aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/00-INDEX2
-rw-r--r--Documentation/ABI/stable/sysfs-hypervisor-xen119
-rw-r--r--Documentation/ABI/testing/sysfs-firmware-ofw26
-rw-r--r--Documentation/ABI/testing/sysfs-hypervisor-xen (renamed from Documentation/ABI/testing/sysfs-hypervisor-pmu)24
-rw-r--r--Documentation/ABI/testing/sysfs-platform-ideapad-laptop8
-rw-r--r--Documentation/DMA-API-HOWTO.txt31
-rw-r--r--Documentation/acpi/gpio-properties.txt65
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt29
-rw-r--r--Documentation/arm64/silicon-errata.txt1
-rw-r--r--Documentation/cgroup-v2.txt108
-rw-r--r--Documentation/dev-tools/index.rst1
-rw-r--r--Documentation/dev-tools/kmemleak.rst1
-rw-r--r--Documentation/dev-tools/kselftest.rst (renamed from Documentation/kselftest.txt)82
-rw-r--r--Documentation/device-mapper/dm-zoned.txt144
-rw-r--r--Documentation/devicetree/bindings/arm/cci.txt15
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.txt13
-rw-r--r--Documentation/devicetree/bindings/arm/idle-states.txt4
-rw-r--r--Documentation/devicetree/bindings/arm/l2c2x0.txt4
-rw-r--r--Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt85
-rw-r--r--Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt144
-rw-r--r--Documentation/devicetree/bindings/arm/topology.txt4
-rw-r--r--Documentation/devicetree/bindings/ata/cortina,gemini-sata-bridge.txt55
-rw-r--r--Documentation/devicetree/bindings/ata/faraday,ftide010.txt38
-rw-r--r--Documentation/devicetree/bindings/bus/simple-pm-bus.txt2
-rw-r--r--Documentation/devicetree/bindings/chosen.txt3
-rw-r--r--Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt11
-rw-r--r--Documentation/devicetree/bindings/clock/hi6220-clock.txt1
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc.txt1
-rw-r--r--Documentation/devicetree/bindings/clock/qoriq-clock.txt6
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt10
-rw-r--r--Documentation/devicetree/bindings/clock/rockchip,rk3128-cru.txt56
-rw-r--r--Documentation/devicetree/bindings/clock/sun8i-de2.txt31
-rw-r--r--Documentation/devicetree/bindings/clock/sunxi-ccu.txt5
-rw-r--r--Documentation/devicetree/bindings/clock/ti,sci-clk.txt37
-rw-r--r--Documentation/devicetree/bindings/clock/ti-clkctrl.txt56
-rw-r--r--Documentation/devicetree/bindings/common-properties.txt2
-rw-r--r--Documentation/devicetree/bindings/crypto/fsl-sec4.txt4
-rw-r--r--Documentation/devicetree/bindings/crypto/fsl-sec6.txt4
-rw-r--r--Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt8
-rw-r--r--Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt9
-rw-r--r--Documentation/devicetree/bindings/display/panel/display-timing.txt16
-rw-r--r--Documentation/devicetree/bindings/dma/arm-pl08x.txt9
-rw-r--r--Documentation/devicetree/bindings/dma/brcm,iproc-sba.txt29
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt5
-rw-r--r--Documentation/devicetree/bindings/dma/shdma.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mvebu.txt24
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio.txt13
-rw-r--r--Documentation/devicetree/bindings/gpio/ingenic,gpio.txt46
-rw-r--r--Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt1
-rw-r--r--Documentation/devicetree/bindings/graph.txt13
-rw-r--r--Documentation/devicetree/bindings/hwlock/sprd-hwspinlock.txt23
-rw-r--r--Documentation/devicetree/bindings/iio/proximity/as3935.txt2
-rw-r--r--Documentation/devicetree/bindings/input/dlink,dir685-touchkeys.txt21
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/st,stmfts.txt43
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/open-pic.txt5
-rw-r--r--Documentation/devicetree/bindings/leds/pca963x.txt1
-rw-r--r--Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt46
-rw-r--r--Documentation/devicetree/bindings/media/cec.txt8
-rw-r--r--Documentation/devicetree/bindings/media/i2c/adv7180.txt15
-rw-r--r--Documentation/devicetree/bindings/media/i2c/max2175.txt59
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ov5640.txt45
-rw-r--r--Documentation/devicetree/bindings/media/imx.txt53
-rw-r--r--Documentation/devicetree/bindings/media/mediatek-mdp.txt12
-rw-r--r--Documentation/devicetree/bindings/media/qcom,venus.txt107
-rw-r--r--Documentation/devicetree/bindings/media/rcar_vin.txt4
-rw-r--r--Documentation/devicetree/bindings/media/renesas,drif.txt176
-rw-r--r--Documentation/devicetree/bindings/media/s5p-cec.txt6
-rw-r--r--Documentation/devicetree/bindings/media/st,stm32-cec.txt19
-rw-r--r--Documentation/devicetree/bindings/media/st,stm32-dcmi.txt45
-rw-r--r--Documentation/devicetree/bindings/media/stih-cec.txt2
-rw-r--r--Documentation/devicetree/bindings/media/video-mux.txt60
-rw-r--r--Documentation/devicetree/bindings/mfd/arizona.txt3
-rw-r--r--Documentation/devicetree/bindings/mfd/lp87565.txt43
-rw-r--r--Documentation/devicetree/bindings/net/ethernet.txt9
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fman.txt (renamed from Documentation/devicetree/bindings/powerpc/fsl/fman.txt)0
-rw-r--r--Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt2
-rw-r--r--Documentation/devicetree/bindings/pci/faraday,ftpci100.txt7
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt4
-rw-r--r--Documentation/devicetree/bindings/pci/mediatek,mt7623-pcie.txt130
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie.txt20
-rw-r--r--Documentation/devicetree/bindings/pci/rcar-pci.txt2
-rw-r--r--Documentation/devicetree/bindings/pci/tango-pcie.txt29
-rw-r--r--Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.txt41
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt25
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-zx.txt85
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,ipq8074-pinctrl.txt172
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,rza1-pinctrl.txt221
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/cpus.txt6
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/l2cache.txt2
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/srio-rmu.txt4
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/srio.txt3
-rw-r--r--Documentation/devicetree/bindings/property-units.txt1
-rw-r--r--Documentation/devicetree/bindings/remoteproc/ti,keystone-rproc.txt133
-rw-r--r--Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt3
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt73
-rw-r--r--Documentation/devicetree/bindings/sound/audio-graph-card.txt129
-rw-r--r--Documentation/devicetree/bindings/sound/audio-graph-scu-card.txt122
-rw-r--r--Documentation/devicetree/bindings/sound/cs35l35.txt3
-rw-r--r--Documentation/devicetree/bindings/sound/nau8825.txt3
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsnd.txt37
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip,pdm.txt39
-rw-r--r--Documentation/devicetree/bindings/sound/rockchip-spdif.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/samsung,odroid.txt15
-rw-r--r--Documentation/devicetree/bindings/sound/simple-scu-card.txt67
-rw-r--r--Documentation/devicetree/bindings/sound/st,stm32-i2s.txt62
-rw-r--r--Documentation/devicetree/bindings/sound/st,stm32-sai.txt41
-rw-r--r--Documentation/devicetree/bindings/sound/st,stm32-spdifrx.txt56
-rw-r--r--Documentation/devicetree/bindings/sound/sun4i-codec.txt11
-rw-r--r--Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt1
-rw-r--r--Documentation/devicetree/bindings/sound/zte,zx-aud96p22.txt24
-rw-r--r--Documentation/devicetree/bindings/thermal/brcm,ns-thermal.txt (renamed from Documentation/devicetree/bindings/thermal/brcm,ns-thermal)0
-rw-r--r--Documentation/devicetree/bindings/usb/exynos-usb.txt4
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt7
-rw-r--r--Documentation/devicetree/booting-without-of.txt2
-rw-r--r--Documentation/devicetree/overlay-notes.txt8
-rw-r--r--Documentation/devicetree/usage-model.txt2
-rw-r--r--Documentation/driver-api/index.rst1
-rw-r--r--Documentation/driver-api/pinctl.rst (renamed from Documentation/pinctrl.txt)1124
-rw-r--r--Documentation/driver-model/devres.txt4
-rw-r--r--Documentation/filesystems/vfs.txt44
-rw-r--r--Documentation/ioctl/ioctl-number.txt2
-rw-r--r--Documentation/kbuild/kbuild.txt8
-rw-r--r--Documentation/kbuild/makefiles.txt41
-rw-r--r--Documentation/media/kapi/cec-core.rst18
-rw-r--r--Documentation/media/kapi/v4l2-core.rst2
-rw-r--r--Documentation/media/kapi/v4l2-fwnode.rst3
-rw-r--r--Documentation/media/kapi/v4l2-of.rst3
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst8
-rw-r--r--Documentation/media/uapi/dvb/fe-diseqc-send-burst.rst4
-rw-r--r--Documentation/media/uapi/dvb/fe-set-tone.rst4
-rw-r--r--Documentation/media/uapi/dvb/fe-set-voltage.rst7
-rw-r--r--Documentation/media/uapi/mediactl/media-ioc-g-topology.rst8
-rw-r--r--Documentation/media/uapi/mediactl/media-types.rst21
-rw-r--r--Documentation/media/uapi/v4l/control.rst6
-rw-r--r--Documentation/media/uapi/v4l/extended-controls.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-sdr-pcu16be.rst55
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-sdr-pcu18be.rst55
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-sdr-pcu20be.rst54
-rw-r--r--Documentation/media/uapi/v4l/sdr-formats.rst3
-rw-r--r--Documentation/media/uapi/v4l/vidioc-cropcap.rst23
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-crop.rst22
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-selection.rst22
-rw-r--r--Documentation/media/v4l-drivers/imx.rst614
-rw-r--r--Documentation/media/v4l-drivers/index.rst1
-rw-r--r--Documentation/media/v4l-drivers/max2175.rst62
-rw-r--r--Documentation/networking/segmentation-offloads.txt2
-rw-r--r--Documentation/powerpc/cxlflash.txt76
-rw-r--r--Documentation/powerpc/firmware-assisted-dump.txt4
-rw-r--r--Documentation/printk-formats.txt36
-rw-r--r--Documentation/process/changes.rst9
-rw-r--r--Documentation/sound/designs/index.rst1
-rw-r--r--Documentation/sound/designs/tracepoints.rst172
-rw-r--r--Documentation/sound/kernel-api/writing-an-alsa-driver.rst111
-rw-r--r--Documentation/sound/soc/dapm.rst18
-rw-r--r--Documentation/virtual/kvm/api.txt172
-rw-r--r--Documentation/virtual/kvm/devices/s390_flic.txt15
-rw-r--r--Documentation/virtual/kvm/devices/vcpu.txt41
-rw-r--r--Documentation/virtual/kvm/devices/vm.txt33
-rw-r--r--Documentation/virtual/kvm/mmu.txt4
-rw-r--r--Documentation/virtual/kvm/vcpu-requests.rst307
-rw-r--r--Documentation/vm/ksm.txt63
-rw-r--r--Documentation/xtensa/mmu.txt6
-rw-r--r--MAINTAINERS206
-rw-r--r--Makefile32
-rw-r--r--arch/Kconfig2
-rw-r--r--arch/alpha/include/asm/uaccess.h1
-rw-r--r--arch/alpha/include/asm/unistd.h1
-rw-r--r--arch/alpha/kernel/osf_sys.c81
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/common/dmabounce.c19
-rw-r--r--arch/arm/configs/lpc32xx_defconfig2
-rw-r--r--arch/arm/include/asm/bitops.h8
-rw-r--r--arch/arm/include/asm/dma-iommu.h4
-rw-r--r--arch/arm/include/asm/dma-mapping.h6
-rw-r--r--arch/arm/include/asm/ftrace.h4
-rw-r--r--arch/arm/include/asm/kvm_host.h28
-rw-r--r--arch/arm/include/asm/mach/pci.h3
-rw-r--r--arch/arm/include/asm/page-nommu.h6
-rw-r--r--arch/arm/include/asm/uaccess.h1
-rw-r--r--arch/arm/include/asm/xen/events.h2
-rw-r--r--arch/arm/include/uapi/asm/Kbuild6
-rw-r--r--arch/arm/include/uapi/asm/kvm.h8
-rw-r--r--arch/arm/kernel/bios32.c44
-rw-r--r--arch/arm/kernel/entry-ftrace.S100
-rw-r--r--arch/arm/kernel/ftrace.c37
-rw-r--r--arch/arm/kernel/process.c16
-rw-r--r--arch/arm/kernel/vdso.c18
-rw-r--r--arch/arm/kvm/guest.c51
-rw-r--r--arch/arm/kvm/handle_exit.c1
-rw-r--r--arch/arm/kvm/hyp/switch.c2
-rw-r--r--arch/arm/kvm/reset.c16
-rw-r--r--arch/arm/mach-davinci/board-da830-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c2
-rw-r--r--arch/arm/mach-dove/pcie.c17
-rw-r--r--arch/arm/mach-iop13xx/pci.c31
-rw-r--r--arch/arm/mach-iop13xx/pci.h3
-rw-r--r--arch/arm/mach-lpc32xx/phy3250.c3
-rw-r--r--arch/arm/mach-mv78xx0/pcie.c16
-rw-r--r--arch/arm/mach-orion5x/common.h3
-rw-r--r--arch/arm/mach-orion5x/pci.c25
-rw-r--r--arch/arm/mach-pxa/balloon3.c2
-rw-r--r--arch/arm/mach-pxa/littleton.c2
-rw-r--r--arch/arm/mach-pxa/stargate2.c2
-rw-r--r--arch/arm/mach-s3c64xx/pl080.c28
-rw-r--r--arch/arm/mach-spear/spear3xx.c14
-rw-r--r--arch/arm/mach-spear/spear6xx.c14
-rw-r--r--arch/arm/mm/Kconfig14
-rw-r--r--arch/arm/mm/Makefile5
-rw-r--r--arch/arm/mm/dma-mapping-nommu.c228
-rw-r--r--arch/arm/mm/dma-mapping.c77
-rw-r--r--arch/arm/xen/mm.c17
-rw-r--r--arch/arm/xen/p2m.c10
-rw-r--r--arch/arm64/Kconfig13
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h2
-rw-r--r--arch/arm64/include/asm/cpucaps.h3
-rw-r--r--arch/arm64/include/asm/cputype.h2
-rw-r--r--arch/arm64/include/asm/dma-mapping.h1
-rw-r--r--arch/arm64/include/asm/esr.h24
-rw-r--r--arch/arm64/include/asm/hugetlb.h4
-rw-r--r--arch/arm64/include/asm/kvm_host.h6
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h1
-rw-r--r--arch/arm64/include/asm/sysreg.h23
-rw-r--r--arch/arm64/include/asm/uaccess.h1
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h3
-rw-r--r--arch/arm64/kernel/cpu_errata.c21
-rw-r--r--arch/arm64/kernel/pci.c10
-rw-r--r--arch/arm64/kvm/guest.c9
-rw-r--r--arch/arm64/kvm/handle_exit.c1
-rw-r--r--arch/arm64/kvm/hyp/switch.c15
-rw-r--r--arch/arm64/kvm/reset.c16
-rw-r--r--arch/arm64/kvm/sys_regs.c27
-rw-r--r--arch/arm64/kvm/trace.h2
-rw-r--r--arch/arm64/kvm/vgic-sys-reg-v3.c45
-rw-r--r--arch/arm64/mm/dma-mapping.c3
-rw-r--r--arch/arm64/mm/hugetlbpage.c53
-rw-r--r--arch/blackfin/Kconfig1
-rw-r--r--arch/blackfin/configs/BF609-EZKIT_defconfig2
-rw-r--r--arch/blackfin/include/asm/uaccess.h7
-rw-r--r--arch/blackfin/mach-bf527/boards/tll6527m.c8
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c6
-rw-r--r--arch/blackfin/mach-bf609/boards/ezkit.c4
-rw-r--r--arch/c6x/include/asm/dma-mapping.h5
-rw-r--r--arch/cris/include/asm/uaccess.h8
-rw-r--r--arch/frv/include/asm/uaccess.h2
-rw-r--r--arch/hexagon/include/asm/dma-mapping.h5
-rw-r--r--arch/hexagon/include/asm/pgtable.h1
-rw-r--r--arch/hexagon/kernel/asm-offsets.c1
-rw-r--r--arch/hexagon/kernel/dma.c21
-rw-r--r--arch/hexagon/kernel/hexagon_ksyms.c1
-rw-r--r--arch/hexagon/mm/vm_tlb.c1
-rw-r--r--arch/ia64/include/asm/dma-mapping.h2
-rw-r--r--arch/ia64/include/asm/uaccess.h12
-rw-r--r--arch/ia64/kernel/Makefile.gate15
-rw-r--r--arch/ia64/lib/Makefile2
-rw-r--r--arch/ia64/lib/strlen_user.S200
-rw-r--r--arch/ia64/mm/hugetlbpage.c4
-rw-r--r--arch/ia64/mm/init.c11
-rw-r--r--arch/m32r/Kconfig1
-rw-r--r--arch/m32r/include/asm/dma-mapping.h2
-rw-r--r--arch/m32r/include/asm/uaccess.h18
-rw-r--r--arch/m32r/include/asm/unistd.h1
-rw-r--r--arch/m32r/kernel/m32r_ksyms.c1
-rw-r--r--arch/m32r/lib/usercopy.c8
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/m68k/include/asm/uaccess_mm.h1
-rw-r--r--arch/m68k/include/asm/uaccess_no.h2
-rw-r--r--arch/metag/include/asm/uaccess.h2
-rw-r--r--arch/metag/mm/hugetlbpage.c3
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/microblaze/include/asm/dma-mapping.h2
-rw-r--r--arch/microblaze/include/asm/uaccess.h4
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/ath79/mach-pb44.c2
-rw-r--r--arch/mips/boot/dts/ingenic/ci20.dts60
-rw-r--r--arch/mips/boot/dts/ingenic/jz4740.dtsi68
-rw-r--r--arch/mips/boot/dts/ingenic/jz4780.dtsi98
-rw-r--r--arch/mips/boot/dts/ingenic/qi_lb60.dts13
-rw-r--r--arch/mips/include/asm/mach-jz4740/gpio.h371
-rw-r--r--arch/mips/include/asm/mach-loongson64/cs5536/cs5536_pci.h1
-rw-r--r--arch/mips/include/asm/pci.h1
-rw-r--r--arch/mips/include/asm/uaccess.h130
-rw-r--r--arch/mips/include/asm/unistd.h1
-rw-r--r--arch/mips/jz4740/Makefile2
-rw-r--r--arch/mips/jz4740/board-qi_lb60.c48
-rw-r--r--arch/mips/jz4740/gpio.c519
-rw-r--r--arch/mips/kvm/trap_emul.c2
-rw-r--r--arch/mips/kvm/vz.c2
-rw-r--r--arch/mips/lantiq/clk.c5
-rw-r--r--arch/mips/lib/Makefile2
-rw-r--r--arch/mips/lib/strlen_user.S65
-rw-r--r--arch/mips/lib/strncpy_user.S6
-rw-r--r--arch/mips/lib/strnlen_user.S6
-rw-r--r--arch/mips/loongson64/common/dma-swiotlb.c19
-rw-r--r--arch/mips/mm/hugetlbpage.c3
-rw-r--r--arch/mips/pci/pci-legacy.c3
-rw-r--r--arch/mn10300/include/asm/Kbuild2
-rw-r--r--arch/mn10300/include/asm/device.h1
-rw-r--r--arch/mn10300/include/asm/fb.h23
-rw-r--r--arch/mn10300/include/asm/uaccess.h2
-rw-r--r--arch/mn10300/kernel/mn10300_ksyms.c1
-rw-r--r--arch/mn10300/lib/usercopy.c8
-rw-r--r--arch/openrisc/configs/or1ksim_defconfig3
-rw-r--r--arch/openrisc/include/asm/dma-mapping.h9
-rw-r--r--arch/openrisc/include/asm/fixmap.h1
-rw-r--r--arch/openrisc/include/asm/uaccess.h1
-rw-r--r--arch/openrisc/kernel/or32_ksyms.c2
-rw-r--r--arch/openrisc/kernel/process.c2
-rw-r--r--arch/openrisc/lib/delay.c3
-rw-r--r--arch/parisc/include/asm/uaccess.h1
-rw-r--r--arch/parisc/include/asm/unistd.h1
-rw-r--r--arch/parisc/mm/hugetlbpage.c3
-rw-r--r--arch/powerpc/Kconfig33
-rw-r--r--arch/powerpc/Makefile11
-rw-r--r--arch/powerpc/Makefile.postlink17
-rw-r--r--arch/powerpc/boot/Makefile5
-rw-r--r--arch/powerpc/boot/crtsavres.S8
-rw-r--r--arch/powerpc/boot/dts/ac14xx.dts2
-rw-r--r--arch/powerpc/boot/dts/digsy_mtc.dts2
-rw-r--r--arch/powerpc/boot/dts/fsl/b4qds.dtsi8
-rw-r--r--arch/powerpc/boot/dts/fsl/c293pcie.dts2
-rw-r--r--arch/powerpc/boot/dts/fsl/p1010rdb.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/p1023rdb.dts2
-rw-r--r--arch/powerpc/boot/dts/fsl/p2041rdb.dts4
-rw-r--r--arch/powerpc/boot/dts/fsl/p3041ds.dts4
-rw-r--r--arch/powerpc/boot/dts/fsl/p4080ds.dts4
-rw-r--r--arch/powerpc/boot/dts/fsl/p5020ds.dts4
-rw-r--r--arch/powerpc/boot/dts/fsl/p5040ds.dts4
-rw-r--r--arch/powerpc/boot/dts/fsl/t208xqds.dtsi8
-rw-r--r--arch/powerpc/boot/dts/fsl/t4240qds.dts12
-rw-r--r--arch/powerpc/boot/dts/fsl/t4240rdb.dts6
-rw-r--r--arch/powerpc/boot/dts/fsp2.dts608
-rw-r--r--arch/powerpc/boot/dts/mpc5121ads.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8308_p1m.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8349emitx.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8377_rdb.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8377_wlan.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8378_rdb.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8379_rdb.dts2
-rw-r--r--arch/powerpc/boot/dts/pcm030.dts2
-rw-r--r--arch/powerpc/boot/dts/pcm032.dts2
-rw-r--r--arch/powerpc/boot/dts/pdm360ng.dts2
-rw-r--r--arch/powerpc/boot/dts/sequoia.dts2
-rw-r--r--arch/powerpc/boot/dts/warp.dts2
-rw-r--r--arch/powerpc/boot/ppc_asm.h12
-rw-r--r--arch/powerpc/configs/44x/fsp2_defconfig126
-rw-r--r--arch/powerpc/include/asm/barrier.h5
-rw-r--r--arch/powerpc/include/asm/bitops.h87
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgalloc.h3
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/hugetlb.h10
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgalloc.h16
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h45
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h6
-rw-r--r--arch/powerpc/include/asm/code-patching.h10
-rw-r--r--arch/powerpc/include/asm/compat.h1
-rw-r--r--arch/powerpc/include/asm/dbell.h13
-rw-r--r--arch/powerpc/include/asm/delay.h16
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h5
-rw-r--r--arch/powerpc/include/asm/exception-64s.h49
-rw-r--r--arch/powerpc/include/asm/fadump.h4
-rw-r--r--arch/powerpc/include/asm/head-64.h25
-rw-r--r--arch/powerpc/include/asm/hw_irq.h4
-rw-r--r--arch/powerpc/include/asm/iommu.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h2
-rw-r--r--arch/powerpc/include/asm/kvm_host.h13
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h2
-rw-r--r--arch/powerpc/include/asm/machdep.h1
-rw-r--r--arch/powerpc/include/asm/mce.h15
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgalloc.h3
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgalloc.h11
-rw-r--r--arch/powerpc/include/asm/opal-api.h76
-rw-r--r--arch/powerpc/include/asm/paca.h14
-rw-r--r--arch/powerpc/include/asm/pgalloc.h14
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h13
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h11
-rw-r--r--arch/powerpc/include/asm/processor.h30
-rw-r--r--arch/powerpc/include/asm/trace.h33
-rw-r--r--arch/powerpc/include/asm/uaccess.h1
-rw-r--r--arch/powerpc/include/uapi/asm/Kbuild6
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h6
-rw-r--r--arch/powerpc/include/uapi/asm/param.h1
-rw-r--r--arch/powerpc/include/uapi/asm/poll.h1
-rw-r--r--arch/powerpc/include/uapi/asm/resource.h1
-rw-r--r--arch/powerpc/include/uapi/asm/sockios.h20
-rw-r--r--arch/powerpc/include/uapi/asm/statfs.h6
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c13
-rw-r--r--arch/powerpc/kernel/dma-iommu.c6
-rw-r--r--arch/powerpc/kernel/dma.c17
-rw-r--r--arch/powerpc/kernel/entry_64.S193
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S284
-rw-r--r--arch/powerpc/kernel/fadump.c196
-rw-r--r--arch/powerpc/kernel/idle_book3s.S188
-rw-r--r--arch/powerpc/kernel/iommu.c28
-rw-r--r--arch/powerpc/kernel/irq.c62
-rw-r--r--arch/powerpc/kernel/kprobes.c8
-rw-r--r--arch/powerpc/kernel/mce.c2
-rw-r--r--arch/powerpc/kernel/mce_power.c3
-rw-r--r--arch/powerpc/kernel/misc_32.S6
-rw-r--r--arch/powerpc/kernel/optprobes.c53
-rw-r--r--arch/powerpc/kernel/process.c45
-rw-r--r--arch/powerpc/kernel/setup-common.c4
-rw-r--r--arch/powerpc/kernel/smp.c7
-rw-r--r--arch/powerpc/kernel/time.c96
-rw-r--r--arch/powerpc/kernel/tm.S4
-rw-r--r--arch/powerpc/kernel/traps.c3
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S61
-rw-r--r--arch/powerpc/kvm/book3s_hv.c511
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S8
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c18
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c11
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S177
-rw-r--r--arch/powerpc/kvm/book3s_xive.c4
-rw-r--r--arch/powerpc/kvm/booke.c2
-rw-r--r--arch/powerpc/kvm/emulate.c4
-rw-r--r--arch/powerpc/kvm/powerpc.c45
-rw-r--r--arch/powerpc/lib/Makefile15
-rw-r--r--arch/powerpc/lib/code-patching.c171
-rw-r--r--arch/powerpc/lib/copyuser_power7.S4
-rw-r--r--arch/powerpc/lib/crtsavres.S6
-rw-r--r--arch/powerpc/lib/xor_vmx.c53
-rw-r--r--arch/powerpc/lib/xor_vmx.h20
-rw-r--r--arch/powerpc/lib/xor_vmx_glue.c62
-rw-r--r--arch/powerpc/mm/8xx_mmu.c2
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c2
-rw-r--r--arch/powerpc/mm/dump_hashpagetable.c2
-rw-r--r--arch/powerpc/mm/fault.c17
-rw-r--r--arch/powerpc/mm/hash_native_64.c41
-rw-r--r--arch/powerpc/mm/hash_utils_64.c2
-rw-r--r--arch/powerpc/mm/hugetlbpage.c95
-rw-r--r--arch/powerpc/mm/init_64.c82
-rw-r--r--arch/powerpc/mm/mem.c32
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c15
-rw-r--r--arch/powerpc/mm/mmu_decl.h1
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c4
-rw-r--r--arch/powerpc/mm/pgtable-hash64.c115
-rw-r--r--arch/powerpc/mm/pgtable-radix.c90
-rw-r--r--arch/powerpc/mm/pgtable_32.c15
-rw-r--r--arch/powerpc/mm/pgtable_64.c45
-rw-r--r--arch/powerpc/mm/slb.c10
-rw-r--r--arch/powerpc/mm/slb_low.S30
-rw-r--r--arch/powerpc/mm/tlb-radix.c9
-rw-r--r--arch/powerpc/mm/tlb_hash64.c6
-rw-r--r--arch/powerpc/perf/hv-24x7.c242
-rw-r--r--arch/powerpc/perf/hv-24x7.h69
-rw-r--r--arch/powerpc/perf/power9-events-list.h4
-rw-r--r--arch/powerpc/perf/power9-pmu.c8
-rw-r--r--arch/powerpc/platforms/44x/Kconfig12
-rw-r--r--arch/powerpc/platforms/44x/Makefile1
-rw-r--r--arch/powerpc/platforms/44x/fsp2.c62
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype6
-rw-r--r--arch/powerpc/platforms/cell/iommu.c53
-rw-r--r--arch/powerpc/platforms/cell/smp.c3
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c16
-rw-r--r--arch/powerpc/platforms/powernv/idle.c198
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S6
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c133
-rw-r--r--arch/powerpc/platforms/powernv/pci.c160
-rw-r--r--arch/powerpc/platforms/powernv/pci.h13
-rw-r--r--arch/powerpc/platforms/powernv/smp.c34
-rw-r--r--arch/powerpc/platforms/powernv/subcore.c3
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig2
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c2
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c7
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c11
-rw-r--r--arch/powerpc/platforms/pseries/smp.c3
-rw-r--r--arch/powerpc/platforms/pseries/vio.c3
-rw-r--r--arch/powerpc/sysdev/axonram.c8
-rw-r--r--arch/powerpc/sysdev/mpc8xx_pic.c2
-rw-r--r--arch/powerpc/sysdev/xive/common.c2
-rw-r--r--arch/powerpc/sysdev/xive/native.c4
-rw-r--r--arch/powerpc/tools/head_check.sh78
-rwxr-xr-xarch/powerpc/tools/unrel_branch_check.sh57
-rw-r--r--arch/powerpc/xmon/xmon.c15
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/include/asm/compat.h1
-rw-r--r--arch/s390/include/asm/ctl_reg.h4
-rw-r--r--arch/s390/include/asm/dma-mapping.h2
-rw-r--r--arch/s390/include/asm/hugetlb.h5
-rw-r--r--arch/s390/include/asm/kexec.h18
-rw-r--r--arch/s390/include/asm/kvm_host.h33
-rw-r--r--arch/s390/include/asm/nmi.h6
-rw-r--r--arch/s390/include/asm/syscall.h6
-rw-r--r--arch/s390/include/asm/uaccess.h17
-rw-r--r--arch/s390/include/uapi/asm/kvm.h12
-rw-r--r--arch/s390/kvm/gaccess.c43
-rw-r--r--arch/s390/kvm/interrupt.c91
-rw-r--r--arch/s390/kvm/kvm-s390.c373
-rw-r--r--arch/s390/kvm/kvm-s390.h2
-rw-r--r--arch/s390/kvm/priv.c103
-rw-r--r--arch/s390/kvm/vsie.c25
-rw-r--r--arch/s390/mm/hugetlbpage.c3
-rw-r--r--arch/s390/mm/init.c32
-rw-r--r--arch/s390/pci/pci_dma.c18
-rw-r--r--arch/score/include/asm/uaccess.h6
-rw-r--r--arch/score/lib/string.S28
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c2
-rw-r--r--arch/sh/include/asm/dma-mapping.h2
-rw-r--r--arch/sh/include/asm/uaccess.h1
-rw-r--r--arch/sh/kernel/ftrace.c18
-rw-r--r--arch/sh/kernel/vsyscall/Makefile16
-rw-r--r--arch/sh/mm/hugetlbpage.c3
-rw-r--r--arch/sh/mm/init.c10
-rw-r--r--arch/sparc/Kconfig5
-rw-r--r--arch/sparc/include/asm/cmpxchg_64.h76
-rw-r--r--arch/sparc/include/asm/dma-mapping.h8
-rw-r--r--arch/sparc/include/asm/ldc.h8
-rw-r--r--arch/sparc/include/asm/mdesc.h24
-rw-r--r--arch/sparc/include/asm/qrwlock.h7
-rw-r--r--arch/sparc/include/asm/qspinlock.h7
-rw-r--r--arch/sparc/include/asm/setup.h2
-rw-r--r--arch/sparc/include/asm/spinlock_64.h208
-rw-r--r--arch/sparc/include/asm/spinlock_types.h12
-rw-r--r--arch/sparc/include/asm/timer_64.h67
-rw-r--r--arch/sparc/include/asm/uaccess_32.h1
-rw-r--r--arch/sparc/include/asm/uaccess_64.h1
-rw-r--r--arch/sparc/include/asm/vio.h13
-rw-r--r--arch/sparc/kernel/apc.c2
-rw-r--r--arch/sparc/kernel/iommu.c52
-rw-r--r--arch/sparc/kernel/iommu_common.h2
-rw-r--r--arch/sparc/kernel/ioport.c27
-rw-r--r--arch/sparc/kernel/kernel.h3
-rw-r--r--arch/sparc/kernel/ldc.c151
-rw-r--r--arch/sparc/kernel/mdesc.c331
-rw-r--r--arch/sparc/kernel/pci_sun4v.c31
-rw-r--r--arch/sparc/kernel/pmc.c2
-rw-r--r--arch/sparc/kernel/prom_64.c2
-rw-r--r--arch/sparc/kernel/setup_64.c7
-rw-r--r--arch/sparc/kernel/time_32.c2
-rw-r--r--arch/sparc/kernel/time_64.c179
-rw-r--r--arch/sparc/kernel/vio.c244
-rw-r--r--arch/sparc/kernel/viohs.c24
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S5
-rw-r--r--arch/sparc/lib/hweight.S16
-rw-r--r--arch/sparc/mm/hugetlbpage.c3
-rw-r--r--arch/tile/include/asm/uaccess.h1
-rw-r--r--arch/tile/kernel/pci-dma.c30
-rw-r--r--arch/tile/kernel/vdso/Makefile26
-rw-r--r--arch/tile/mm/hugetlbpage.c3
-rw-r--r--arch/tile/mm/pgtable.c11
-rw-r--r--arch/um/kernel/process.c5
-rw-r--r--arch/x86/Kconfig5
-rw-r--r--arch/x86/Makefile33
-rw-r--r--arch/x86/Makefile_32.cpu7
-rw-r--r--arch/x86/entry/vdso/vma.c3
-rw-r--r--arch/x86/include/asm/compat.h1
-rw-r--r--arch/x86/include/asm/dma-mapping.h5
-rw-r--r--arch/x86/include/asm/hugetlb.h4
-rw-r--r--arch/x86/include/asm/iommu.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h50
-rw-r--r--arch/x86/include/asm/msr-index.h2
-rw-r--r--arch/x86/include/asm/pmem.h136
-rw-r--r--arch/x86/include/asm/string_64.h5
-rw-r--r--arch/x86/include/asm/uaccess.h1
-rw-r--r--arch/x86/include/asm/uaccess_64.h11
-rw-r--r--arch/x86/include/asm/xen/hypercall.h13
-rw-r--r--arch/x86/include/uapi/asm/Kbuild6
-rw-r--r--arch/x86/include/uapi/asm/hyperv.h6
-rw-r--r--arch/x86/kernel/amd_gart_64.c1
-rw-r--r--arch/x86/kernel/pci-calgary_64.c25
-rw-r--r--arch/x86/kernel/pci-dma.c8
-rw-r--r--arch/x86/kernel/pci-nommu.c11
-rw-r--r--arch/x86/kvm/cpuid.h8
-rw-r--r--arch/x86/kvm/emulate.c84
-rw-r--r--arch/x86/kvm/lapic.c116
-rw-r--r--arch/x86/kvm/lapic.h2
-rw-r--r--arch/x86/kvm/mmu.c155
-rw-r--r--arch/x86/kvm/mmu.h2
-rw-r--r--arch/x86/kvm/mmutrace.h6
-rw-r--r--arch/x86/kvm/svm.c95
-rw-r--r--arch/x86/kvm/vmx.c83
-rw-r--r--arch/x86/kvm/x86.c14
-rw-r--r--arch/x86/lib/usercopy_64.c134
-rw-r--r--arch/x86/mm/hugetlbpage.c2
-rw-r--r--arch/x86/mm/init_32.c7
-rw-r--r--arch/x86/mm/init_64.c11
-rw-r--r--arch/x86/mm/pageattr.c6
-rw-r--r--arch/x86/pci/common.c27
-rw-r--r--arch/x86/pci/fixup.c47
-rw-r--r--arch/x86/pci/pcbios.c2
-rw-r--r--arch/x86/pci/sta2x11-fixup.c3
-rw-r--r--arch/x86/um/vdso/Makefile2
-rw-r--r--arch/x86/xen/enlighten.c154
-rw-r--r--arch/x86/xen/enlighten_hvm.c64
-rw-r--r--arch/x86/xen/enlighten_pv.c89
-rw-r--r--arch/x86/xen/pci-swiotlb-xen.c14
-rw-r--r--arch/x86/xen/setup.c7
-rw-r--r--arch/x86/xen/smp.c31
-rw-r--r--arch/x86/xen/smp.h2
-rw-r--r--arch/x86/xen/smp_hvm.c14
-rw-r--r--arch/x86/xen/smp_pv.c6
-rw-r--r--arch/x86/xen/suspend_hvm.c11
-rw-r--r--arch/x86/xen/xen-ops.h3
-rw-r--r--arch/xtensa/Kconfig1
-rw-r--r--arch/xtensa/include/asm/dma-mapping.h2
-rw-r--r--arch/xtensa/include/asm/uaccess.h6
-rw-r--r--block/compat_ioctl.c355
-rw-r--r--crypto/async_tx/async_pq.c5
-rw-r--r--drivers/acpi/nfit/core.c167
-rw-r--r--drivers/acpi/nfit/mce.c2
-rw-r--r--drivers/acpi/nfit/nfit.h4
-rw-r--r--drivers/ata/Kconfig21
-rw-r--r--drivers/ata/Makefile2
-rw-r--r--drivers/ata/ahci.c2
-rw-r--r--drivers/ata/ahci.h3
-rw-r--r--drivers/ata/ahci_brcm.c12
-rw-r--r--drivers/ata/ahci_qoriq.c14
-rw-r--r--drivers/ata/libahci.c12
-rw-r--r--drivers/ata/libata-core.c214
-rw-r--r--drivers/ata/libata-eh.c64
-rw-r--r--drivers/ata/libata-scsi.c193
-rw-r--r--drivers/ata/libata-sff.c44
-rw-r--r--drivers/ata/libata.h4
-rw-r--r--drivers/ata/pata_bf54x.c2
-rw-r--r--drivers/ata/pata_ep93xx.c1
-rw-r--r--drivers/ata/pata_ftide010.c567
-rw-r--r--drivers/ata/pata_octeon_cf.c2
-rw-r--r--drivers/ata/pata_rb532_cf.c2
-rw-r--r--drivers/ata/pata_rdc.c2
-rw-r--r--drivers/ata/pata_samsung_cf.c2
-rw-r--r--drivers/ata/pata_sch.c2
-rw-r--r--drivers/ata/sata_dwc_460ex.c1
-rw-r--r--drivers/ata/sata_fsl.c2
-rw-r--r--drivers/ata/sata_gemini.c438
-rw-r--r--drivers/ata/sata_gemini.h21
-rw-r--r--drivers/ata/sata_inic162x.c2
-rw-r--r--drivers/ata/sata_rcar.c2
-rw-r--r--drivers/ata/sata_via.c23
-rw-r--r--drivers/base/core.c6
-rw-r--r--drivers/base/dma-coherent.c74
-rw-r--r--drivers/base/dma-mapping.c60
-rw-r--r--drivers/base/memory.c83
-rw-r--r--drivers/base/node.c72
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/block/brd.c8
-rw-r--r--drivers/block/floppy.c328
-rw-r--r--drivers/block/zram/zram_drv.c2
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c333
-rw-r--r--drivers/char/tpm/tpm-chip.c34
-rw-r--r--drivers/char/tpm/tpm-interface.c70
-rw-r--r--drivers/char/tpm/tpm-sysfs.c6
-rw-r--r--drivers/char/tpm/tpm.h11
-rw-r--r--drivers/char/tpm/tpm2-cmd.c121
-rw-r--r--drivers/char/tpm/tpm_crb.c5
-rw-r--r--drivers/char/tpm/tpm_of.c3
-rw-r--r--drivers/char/tpm/tpm_tis.c113
-rw-r--r--drivers/clk/Kconfig17
-rw-r--r--drivers/clk/Makefile7
-rw-r--r--drivers/clk/at91/clk-generated.c6
-rw-r--r--drivers/clk/at91/clk-peripheral.c4
-rw-r--r--drivers/clk/at91/pmc.c129
-rw-r--r--drivers/clk/at91/pmc.h6
-rw-r--r--drivers/clk/bcm/Kconfig8
-rw-r--r--drivers/clk/bcm/Makefile1
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c65
-rw-r--r--drivers/clk/bcm/clk-iproc-pll.c12
-rw-r--r--drivers/clk/bcm/clk-sr.c327
-rw-r--r--drivers/clk/clk-bulk.c157
-rw-r--r--drivers/clk/clk-conf.c2
-rw-r--r--drivers/clk/clk-devres.c36
-rw-r--r--drivers/clk/clk-divider.c19
-rw-r--r--drivers/clk/clk-gemini.c455
-rw-r--r--drivers/clk/clk-palmas.c1
-rw-r--r--drivers/clk/clk-qoriq.c91
-rw-r--r--drivers/clk/clk-scpi.c14
-rw-r--r--drivers/clk/hisilicon/clk-hi3660.c98
-rw-r--r--drivers/clk/hisilicon/clk-hi6220.c22
-rw-r--r--drivers/clk/hisilicon/crg-hi3798cv200.c21
-rw-r--r--drivers/clk/imx/clk-imx7d.c8
-rw-r--r--drivers/clk/imx/clk-pllv3.c5
-rw-r--r--drivers/clk/imx/clk.h1
-rw-r--r--drivers/clk/keystone/Kconfig16
-rw-r--r--drivers/clk/keystone/Makefile3
-rw-r--r--drivers/clk/keystone/sci-clk.c724
-rw-r--r--drivers/clk/mediatek/Makefile2
-rw-r--r--drivers/clk/mediatek/clk-cpumux.c120
-rw-r--r--drivers/clk/mediatek/clk-cpumux.h30
-rw-r--r--drivers/clk/mediatek/clk-mt2701.c8
-rw-r--r--drivers/clk/mediatek/clk-mt8173.c23
-rw-r--r--drivers/clk/meson/Kconfig6
-rw-r--r--drivers/clk/meson/gxbb.c137
-rw-r--r--drivers/clk/meson/gxbb.h7
-rw-r--r--drivers/clk/meson/meson8b.c7
-rw-r--r--drivers/clk/mvebu/ap806-system-controller.c107
-rw-r--r--drivers/clk/mvebu/armada-38x.c7
-rw-r--r--drivers/clk/mvebu/cp110-system-controller.c199
-rw-r--r--drivers/clk/qcom/Kconfig9
-rw-r--r--drivers/clk/qcom/Makefile1
-rw-r--r--drivers/clk/qcom/gcc-ipq8074.c1007
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c1
-rw-r--r--drivers/clk/renesas/Kconfig139
-rw-r--r--drivers/clk/renesas/Makefile41
-rw-r--r--drivers/clk/renesas/clk-mstp.c2
-rw-r--r--drivers/clk/renesas/clk-rcar-gen2.c23
-rw-r--r--drivers/clk/renesas/r8a7745-cpg-mssr.c17
-rw-r--r--drivers/clk/renesas/r8a7790-cpg-mssr.c278
-rw-r--r--drivers/clk/renesas/r8a7791-cpg-mssr.c286
-rw-r--r--drivers/clk/renesas/r8a7792-cpg-mssr.c221
-rw-r--r--drivers/clk/renesas/r8a7794-cpg-mssr.c255
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c43
-rw-r--r--drivers/clk/renesas/r8a7796-cpg-mssr.c39
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c43
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h4
-rw-r--r--drivers/clk/rockchip/Makefile1
-rw-r--r--drivers/clk/rockchip/clk-rk3036.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3128.c612
-rw-r--r--drivers/clk/rockchip/clk-rk3228.c164
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c14
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c5
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c4
-rw-r--r--drivers/clk/samsung/clk-cpu.c17
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c58
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c18
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c27
-rw-r--r--drivers/clk/samsung/clk-pll.c101
-rw-r--r--drivers/clk/samsung/clk-pll.h4
-rw-r--r--drivers/clk/samsung/clk-s3c2410-dclk.c75
-rw-r--r--drivers/clk/samsung/clk-s5pv210-audss.c52
-rw-r--r--drivers/clk/samsung/clk.c91
-rw-r--r--drivers/clk/samsung/clk.h9
-rw-r--r--drivers/clk/socfpga/clk-gate-a10.c2
-rw-r--r--drivers/clk/socfpga/clk.h3
-rw-r--r--drivers/clk/sunxi-ng/Kconfig116
-rw-r--r--drivers/clk/sunxi-ng/Makefile37
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c10
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun5i.h6
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c10
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23.c10
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a33.c10
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a83t.c922
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a83t.h64
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.c260
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.h (renamed from drivers/media/platform/vimc/vimc-capture.h)24
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c10
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r.c117
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c10
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.c38
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c15
-rw-r--r--drivers/clk/sunxi-ng/ccu_mult.c19
-rw-r--r--drivers/clk/sunxi-ng/ccu_mux.c108
-rw-r--r--drivers/clk/sunxi-ng/ccu_mux.h24
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkm.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu_reset.h1
-rw-r--r--drivers/clk/ti/Makefile3
-rw-r--r--drivers/clk/ti/clk-44xx.c663
-rw-r--r--drivers/clk/ti/clkctrl.c497
-rw-r--r--drivers/clk/ti/clock.h31
-rw-r--r--drivers/clk/uniphier/clk-uniphier-sys.c15
-rw-r--r--drivers/clk/versatile/Makefile1
-rw-r--r--drivers/clk/versatile/clk-realview.c97
-rw-r--r--drivers/clk/zte/clk-zx296718.c8
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c53
-rw-r--r--drivers/cpuidle/cpuidle-pseries.c22
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c40
-rw-r--r--drivers/dax/device.c1
-rw-r--r--drivers/dax/super.c118
-rw-r--r--drivers/dio/dio.c17
-rw-r--r--drivers/dma/Kconfig26
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/amba-pl08x.c970
-rw-r--r--drivers/dma/bcm-sba-raid.c1785
-rw-r--r--drivers/dma/dw/Kconfig7
-rw-r--r--drivers/dma/dw/core.c332
-rw-r--r--drivers/dma/dw/platform.c6
-rw-r--r--drivers/dma/dw/regs.h50
-rw-r--r--drivers/dma/fsl_raid.c2
-rw-r--r--drivers/dma/fsldma.c5
-rw-r--r--drivers/dma/fsldma.h4
-rw-r--r--drivers/dma/imx-dma.c7
-rw-r--r--drivers/dma/imx-sdma.c27
-rw-r--r--drivers/dma/ioat/dca.c8
-rw-r--r--drivers/dma/ioat/init.c24
-rw-r--r--drivers/dma/mv_xor_v2.c62
-rw-r--r--drivers/dma/mxs-dma.c2
-rw-r--r--drivers/dma/pl330.c142
-rw-r--r--drivers/dma/qcom/hidma.c22
-rw-r--r--drivers/dma/qcom/hidma.h1
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c47
-rw-r--r--drivers/dma/sh/rcar-dmac.c27
-rw-r--r--drivers/dma/ste_dma40.c5
-rw-r--r--drivers/dma/tegra20-apb-dma.c50
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c3
-rw-r--r--drivers/firmware/tegra/ivc.c4
-rw-r--r--drivers/gpio/Kconfig48
-rw-r--r--drivers/gpio/Makefile4
-rw-r--r--drivers/gpio/gpio-adp5588.c2
-rw-r--r--drivers/gpio/gpio-arizona.c35
-rw-r--r--drivers/gpio/gpio-davinci.c11
-rw-r--r--drivers/gpio/gpio-dwapb.c3
-rw-r--r--drivers/gpio/gpio-exar.c79
-rw-r--r--drivers/gpio/gpio-ingenic.c394
-rw-r--r--drivers/gpio/gpio-lp87565.c190
-rw-r--r--drivers/gpio/gpio-max732x.c2
-rw-r--r--drivers/gpio/gpio-merrifield.c1
-rw-r--r--drivers/gpio/gpio-ml-ioh.c16
-rw-r--r--drivers/gpio/gpio-mockup.c98
-rw-r--r--drivers/gpio/gpio-mvebu.c540
-rw-r--r--drivers/gpio/gpio-pcf857x.c2
-rw-r--r--drivers/gpio/gpio-pch.c15
-rw-r--r--drivers/gpio/gpio-rcar.c4
-rw-r--r--drivers/gpio/gpio-sta2x11.c12
-rw-r--r--drivers/gpio/gpio-wcove.c89
-rw-r--r--drivers/gpio/gpio-xra1403.c237
-rw-r--r--drivers/gpio/gpio-zynq.c26
-rw-r--r--drivers/gpio/gpiolib-acpi.c185
-rw-r--r--drivers/gpio/gpiolib-of.c5
-rw-r--r--drivers/gpio/gpiolib.c31
-rw-r--r--drivers/gpio/gpiolib.h18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c4
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c2
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c5
-rw-r--r--drivers/gpu/drm/armada/armada_gem.h1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c22
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c21
-rw-r--r--drivers/gpu/drm/drm_bufs.c116
-rw-r--r--drivers/gpu/drm/drm_internal.h3
-rw-r--r--drivers/gpu/drm/drm_ioc32.c750
-rw-r--r--drivers/gpu/drm/drm_ioctl.c48
-rw-r--r--drivers/gpu/drm/drm_legacy.h7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c4
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h2
-rw-r--r--drivers/gpu/drm/mga/mga_ioc32.c149
-rw-r--r--drivers/gpu/drm/mga/mga_state.c2
-rw-r--r--drivers/gpu/drm/radeon/Makefile1
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_ioc32.c424
-rw-r--r--drivers/hv/channel_mgmt.c1
-rw-r--r--drivers/hwspinlock/Kconfig26
-rw-r--r--drivers/hwspinlock/Makefile1
-rw-r--r--drivers/hwspinlock/sprd_hwspinlock.c183
-rw-r--r--drivers/infiniband/core/device.c3
-rw-r--r--drivers/infiniband/core/security.c20
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c8
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c1
-rw-r--r--drivers/infiniband/hw/hfi1/vnic.h1
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_main.c19
-rw-r--r--drivers/infiniband/hw/mlx5/main.c27
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c8
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c4
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h2
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c16
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c2
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c8
-rw-r--r--drivers/input/input.c12
-rw-r--r--drivers/input/joystick/xpad.c87
-rw-r--r--drivers/input/keyboard/Kconfig11
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/adp5588-keys.c2
-rw-r--r--drivers/input/keyboard/dlink-dir685-touchkeys.c155
-rw-r--r--drivers/input/keyboard/lm8323.c2
-rw-r--r--drivers/input/keyboard/mcs_touchkey.c2
-rw-r--r--drivers/input/misc/axp20x-pek.c28
-rw-r--r--drivers/input/misc/xen-kbdfront.c22
-rw-r--r--drivers/input/mouse/elan_i2c.h3
-rw-r--r--drivers/input/mouse/elan_i2c_core.c40
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c71
-rw-r--r--drivers/input/mouse/elan_i2c_smbus.c9
-rw-r--r--drivers/input/mouse/elantech.c11
-rw-r--r--drivers/input/rmi4/rmi_f34v7.c24
-rw-r--r--drivers/input/serio/hp_sdc.c6
-rw-r--r--drivers/input/sparse-keymap.c14
-rw-r--r--drivers/input/touchscreen/Kconfig11
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/mcs5000_ts.c2
-rw-r--r--drivers/input/touchscreen/mms114.c2
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c8
-rw-r--r--drivers/input/touchscreen/stmfts.c822
-rw-r--r--drivers/input/touchscreen/tsc2007_core.c2
-rw-r--r--drivers/iommu/amd_iommu.c20
-rw-r--r--drivers/iommu/dma-iommu.c18
-rw-r--r--drivers/iommu/intel-iommu.c3
-rw-r--r--drivers/leds/Kconfig18
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/leds-aat1290.c5
-rw-r--r--drivers/leds/leds-lp5523.c10
-rw-r--r--drivers/leds/leds-max77693.c5
-rw-r--r--drivers/leds/leds-pca963x.c17
-rw-r--r--drivers/leds/leds-sead3.c78
-rw-r--r--drivers/leds/leds-versatile.c110
-rw-r--r--drivers/leds/trigger/ledtrig-gpio.c29
-rw-r--r--drivers/mailbox/Kconfig8
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/mailbox.c16
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c129
-rw-r--r--drivers/md/Kconfig17
-rw-r--r--drivers/md/Makefile2
-rw-r--r--drivers/md/dm-bio-prison-v1.c2
-rw-r--r--drivers/md/dm-bio-prison-v2.c2
-rw-r--r--drivers/md/dm-core.h3
-rw-r--r--drivers/md/dm-crypt.c21
-rw-r--r--drivers/md/dm-flakey.c23
-rw-r--r--drivers/md/dm-ioctl.c109
-rw-r--r--drivers/md/dm-kcopyd.c65
-rw-r--r--drivers/md/dm-linear.c48
-rw-r--r--drivers/md/dm-raid.c13
-rw-r--r--drivers/md/dm-stripe.c40
-rw-r--r--drivers/md/dm-table.c162
-rw-r--r--drivers/md/dm-zoned-metadata.c2509
-rw-r--r--drivers/md/dm-zoned-reclaim.c570
-rw-r--r--drivers/md/dm-zoned-target.c967
-rw-r--r--drivers/md/dm-zoned.h228
-rw-r--r--drivers/md/dm.c142
-rw-r--r--drivers/md/faulty.c5
-rw-r--r--drivers/md/linear.c7
-rw-r--r--drivers/md/md.c47
-rw-r--r--drivers/md/md.h7
-rw-r--r--drivers/md/multipath.c8
-rw-r--r--drivers/md/raid0.c7
-rw-r--r--drivers/md/raid1.c20
-rw-r--r--drivers/md/raid10.c16
-rw-r--r--drivers/md/raid5.c22
-rw-r--r--drivers/media/cec/cec-adap.c88
-rw-r--r--drivers/media/cec/cec-api.c5
-rw-r--r--drivers/media/cec/cec-core.c1
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c39
-rw-r--r--drivers/media/dvb-frontends/Kconfig1
-rw-r--r--drivers/media/dvb-frontends/af9013.c1186
-rw-r--r--drivers/media/dvb-frontends/af9013.h86
-rw-r--r--drivers/media/dvb-frontends/af9013_priv.h2
-rw-r--r--drivers/media/dvb-frontends/au8522_common.c1
-rw-r--r--drivers/media/dvb-frontends/au8522_decoder.c74
-rw-r--r--drivers/media/dvb-frontends/au8522_dig.c215
-rw-r--r--drivers/media/dvb-frontends/bcm3510.c4
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c302
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.h10
-rw-r--r--drivers/media/dvb-frontends/cxd2841er_priv.h3
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c6
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c20
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c10
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c20
-rw-r--r--drivers/media/dvb-frontends/mt352.c1
-rw-r--r--drivers/media/dvb-frontends/or51132.c4
-rw-r--r--drivers/media/dvb-frontends/s5h1411.c4
-rw-r--r--drivers/media/dvb-frontends/stv0367.c1168
-rw-r--r--drivers/media/dvb-frontends/stv0367.h13
-rw-r--r--drivers/media/dvb-frontends/stv0367_defs.h1301
-rw-r--r--drivers/media/dvb-frontends/stv0367_regs.h4
-rw-r--r--drivers/media/dvb-frontends/zl10353.c3
-rw-r--r--drivers/media/i2c/Kconfig51
-rw-r--r--drivers/media/i2c/Makefile5
-rw-r--r--drivers/media/i2c/ad5820.c2
-rw-r--r--drivers/media/i2c/adv7180.c2
-rw-r--r--drivers/media/i2c/adv7604.c7
-rw-r--r--drivers/media/i2c/as3645a.c12
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c36
-rw-r--r--drivers/media/i2c/dw9714.c291
-rw-r--r--drivers/media/i2c/max2175.c1453
-rw-r--r--drivers/media/i2c/max2175.h109
-rw-r--r--drivers/media/i2c/msp3400-kthreads.c1
-rw-r--r--drivers/media/i2c/mt9v032.c7
-rw-r--r--drivers/media/i2c/ov13858.c1816
-rw-r--r--drivers/media/i2c/ov2659.c11
-rw-r--r--drivers/media/i2c/ov5640.c2344
-rw-r--r--drivers/media/i2c/ov5645.c7
-rw-r--r--drivers/media/i2c/ov5647.c7
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c7
-rw-r--r--drivers/media/i2c/s5k5baf.c6
-rw-r--r--drivers/media/i2c/s5k6aa.c2
-rw-r--r--drivers/media/i2c/smiapp/Kconfig1
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c29
-rw-r--r--drivers/media/i2c/soc_camera/ov6650.c2
-rw-r--r--drivers/media/i2c/soc_camera/ov772x.c6
-rw-r--r--drivers/media/i2c/tc358743.c77
-rw-r--r--drivers/media/i2c/tvp514x.c6
-rw-r--r--drivers/media/i2c/tvp5150.c7
-rw-r--r--drivers/media/i2c/tvp7002.c6
-rw-r--r--drivers/media/media-entity.c43
-rw-r--r--drivers/media/pci/bt8xx/dst_ca.c1
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c2
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-pcm.c4
-rw-r--r--drivers/media/pci/cx18/cx18-dvb.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c3
-rw-r--r--drivers/media/pci/cx88/cx88-cards.c9
-rw-r--r--drivers/media/pci/cx88/cx88-video.c4
-rw-r--r--drivers/media/pci/ddbridge/Kconfig6
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c531
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-regs.h4
-rw-r--r--drivers/media/pci/ddbridge/ddbridge.h41
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-pcm.c4
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c3
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-bus.c13
-rw-r--r--drivers/media/pci/saa7164/saa7164-cmd.c2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-core.c1
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-g723.c32
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-i2c.c1
-rw-r--r--drivers/media/pci/ttpci/av7110.c5
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c2
-rw-r--r--drivers/media/platform/Kconfig74
-rw-r--r--drivers/media/platform/Makefile13
-rw-r--r--drivers/media/platform/am437x/Kconfig1
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c15
-rw-r--r--drivers/media/platform/atmel/Kconfig2
-rw-r--r--drivers/media/platform/atmel/atmel-isc.c36
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c35
-rw-r--r--drivers/media/platform/coda/coda-bit.c49
-rw-r--r--drivers/media/platform/coda/coda-common.c70
-rw-r--r--drivers/media/platform/coda/coda.h5
-rw-r--r--drivers/media/platform/coda/imx-vdoa.c49
-rw-r--r--drivers/media/platform/davinci/Kconfig1
-rw-r--r--drivers/media/platform/davinci/vpif.c57
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c232
-rw-r--r--drivers/media/platform/davinci/vpif_display.c5
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c13
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.h1
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c8
-rw-r--r--drivers/media/platform/exynos4-is/Kconfig2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-capture.c7
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c7
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c4
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c13
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c6
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c1
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_core.c12
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c10
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h20
-rw-r--r--drivers/media/platform/omap3isp/isp.c49
-rw-r--r--drivers/media/platform/pxa_camera.c77
-rw-r--r--drivers/media/platform/qcom/venus/Makefile11
-rw-r--r--drivers/media/platform/qcom/venus/core.c390
-rw-r--r--drivers/media/platform/qcom/venus/core.h324
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c108
-rw-r--r--drivers/media/platform/qcom/venus/firmware.h23
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c725
-rw-r--r--drivers/media/platform/qcom/venus/helpers.h45
-rw-r--r--drivers/media/platform/qcom/venus/hfi.c522
-rw-r--r--drivers/media/platform/qcom/venus/hfi.h175
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.c1259
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.h304
-rw-r--r--drivers/media/platform/qcom/venus/hfi_helper.h1050
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.c1052
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.h283
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c1572
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.h23
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus_io.h113
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c1162
-rw-r--r--drivers/media/platform/qcom/venus/vdec.h23
-rw-r--r--drivers/media/platform/qcom/venus/vdec_ctrls.c158
-rw-r--r--drivers/media/platform/qcom/venus/venc.c1283
-rw-r--r--drivers/media/platform/qcom/venus/venc.h23
-rw-r--r--drivers/media/platform/qcom/venus/venc_ctrls.c270
-rw-r--r--drivers/media/platform/rcar-vin/Kconfig1
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c66
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c230
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c97
-rw-r--r--drivers/media/platform/rcar-vin/rcar-vin.h9
-rw-r--r--drivers/media/platform/rcar_drif.c1498
-rw-r--r--drivers/media/platform/rcar_fdp1.c12
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c4
-rw-r--r--drivers/media/platform/s5p-cec/s5p_cec.c6
-rw-r--r--drivers/media/platform/s5p-cec/s5p_cec.h1
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c20
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c2
-rw-r--r--drivers/media/platform/sh_vou.c2
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c7
-rw-r--r--drivers/media/platform/soc_camera/soc_mediabus.c1
-rw-r--r--drivers/media/platform/sti/cec/stih-cec.c16
-rw-r--r--drivers/media/platform/stm32/Makefile2
-rw-r--r--drivers/media/platform/stm32/stm32-cec.c362
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c1404
-rw-r--r--drivers/media/platform/ti-vpe/cal.c15
-rw-r--r--drivers/media/platform/video-mux.c334
-rw-r--r--drivers/media/platform/vimc/Kconfig1
-rw-r--r--drivers/media/platform/vimc/Makefile10
-rw-r--r--drivers/media/platform/vimc/vimc-capture.c321
-rw-r--r--drivers/media/platform/vimc/vimc-common.c473
-rw-r--r--drivers/media/platform/vimc/vimc-common.h229
-rw-r--r--drivers/media/platform/vimc/vimc-core.c610
-rw-r--r--drivers/media/platform/vimc/vimc-core.h112
-rw-r--r--drivers/media/platform/vimc/vimc-debayer.c601
-rw-r--r--drivers/media/platform/vimc/vimc-scaler.c455
-rw-r--r--drivers/media/platform/vimc/vimc-sensor.c321
-rw-r--r--drivers/media/platform/vimc/vimc-sensor.h28
-rw-r--r--drivers/media/platform/vivid/vivid-cec.c6
-rw-r--r--drivers/media/platform/xilinx/Kconfig1
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c63
-rw-r--r--drivers/media/rc/Kconfig8
-rw-r--r--drivers/media/rc/ati_remote.c3
-rw-r--r--drivers/media/rc/iguanair.c1
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.c4
-rw-r--r--drivers/media/rc/imon.c2
-rw-r--r--drivers/media/rc/ir-lirc-codec.c37
-rw-r--r--drivers/media/rc/ir-spi.c11
-rw-r--r--drivers/media/rc/lirc_dev.c254
-rw-r--r--drivers/media/rc/mceusb.c158
-rw-r--r--drivers/media/rc/meson-ir.c89
-rw-r--r--drivers/media/rc/rc-core-priv.h2
-rw-r--r--drivers/media/rc/rc-ir-raw.c36
-rw-r--r--drivers/media/rc/rc-main.c160
-rw-r--r--drivers/media/rc/sir_ir.c94
-rw-r--r--drivers/media/tuners/tda18271-fe.c2
-rw-r--r--drivers/media/tuners/xc5000.c27
-rw-r--r--drivers/media/usb/au0828/au0828-dvb.c30
-rw-r--r--drivers/media/usb/au0828/au0828.h2
-rw-r--r--drivers/media/usb/cpia2/cpia2_core.c51
-rw-r--r--drivers/media/usb/cx231xx/Kconfig2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c34
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-dvb.c49
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-input.c5
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx.h1
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c199
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.h4
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c1
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c32
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.h8
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c1
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-remote.c5
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c35
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_s5k83a.c5
-rw-r--r--drivers/media/usb/gspca/ov519.c3
-rw-r--r--drivers/media/usb/pulse8-cec/pulse8-cec.c9
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c2
-rw-r--r--drivers/media/usb/pwc/pwc-v4l.c3
-rw-r--r--drivers/media/usb/rainshadow-cec/rainshadow-cec.c14
-rw-r--r--drivers/media/usb/s2255/s2255drv.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-input.c4
-rw-r--r--drivers/media/usb/usbvision/usbvision-i2c.c3
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c4
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c34
-rw-r--r--drivers/media/usb/uvc/uvc_video.c4
-rw-r--r--drivers/media/v4l2-core/Kconfig3
-rw-r--r--drivers/media/v4l2-core/Makefile4
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c29
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c51
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c8
-rw-r--r--drivers/media/v4l2-core/v4l2-flash-led-class.c12
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c345
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c94
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c37
-rw-r--r--drivers/media/v4l2-core/v4l2-of.c327
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c8
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c40
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c8
-rw-r--r--drivers/mfd/Kconfig44
-rw-r--r--drivers/mfd/Makefile2
-rw-r--r--drivers/mfd/atmel-flexcom.c2
-rw-r--r--drivers/mfd/axp20x.c3
-rw-r--r--drivers/mfd/cros_ec.c5
-rw-r--r--drivers/mfd/da9062-core.c12
-rw-r--r--drivers/mfd/exynos-lpass.c2
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c7
-rw-r--r--drivers/mfd/intel-lpss-pci.c24
-rw-r--r--drivers/mfd/intel_quark_i2c_gpio.c49
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c232
-rw-r--r--drivers/mfd/intel_soc_pmic_chtwc.c230
-rw-r--r--drivers/mfd/ipaq-micro.c5
-rw-r--r--drivers/mfd/lp87565.c100
-rw-r--r--drivers/mfd/motorola-cpcap.c13
-rw-r--r--drivers/mfd/palmas.c2
-rw-r--r--drivers/mfd/qcom-spmi-pmic.c9
-rw-r--r--drivers/mfd/rn5t618.c2
-rw-r--r--drivers/mfd/rtsx_pcr.c17
-rw-r--r--drivers/mfd/smsc-ece1099.c3
-rw-r--r--drivers/mfd/stm32-timers.c10
-rw-r--r--drivers/mfd/tc6393xb.c4
-rw-r--r--drivers/mfd/timberdale.c2
-rw-r--r--drivers/mfd/twl4030-irq.c4
-rw-r--r--drivers/mfd/wm831x-core.c26
-rw-r--r--drivers/mfd/wm831x-i2c.c4
-rw-r--r--drivers/mfd/wm831x-spi.c4
-rw-r--r--drivers/misc/cxl/Kconfig5
-rw-r--r--drivers/misc/cxl/Makefile2
-rw-r--r--drivers/misc/cxl/cxl.h6
-rw-r--r--drivers/misc/cxl/cxllib.c246
-rw-r--r--drivers/misc/cxl/fault.c29
-rw-r--r--drivers/misc/cxl/flash.c8
-rw-r--r--drivers/misc/cxl/native.c16
-rw-r--r--drivers/misc/cxl/pci.c41
-rw-r--r--drivers/misc/enclosure.c14
-rw-r--r--drivers/mmc/host/jz4740_mmc.c44
-rw-r--r--drivers/mtd/nand/jz4740_nand.c23
-rw-r--r--drivers/net/arcnet/com20020-pci.c6
-rw-r--r--drivers/net/bonding/bond_main.c15
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c159
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c36
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c1
-rw-r--r--drivers/net/virtio_net.c1
-rw-r--r--drivers/net/vrf.c11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c71
-rw-r--r--drivers/nvdimm/btt.c45
-rw-r--r--drivers/nvdimm/btt.h2
-rw-r--r--drivers/nvdimm/btt_devs.c54
-rw-r--r--drivers/nvdimm/bus.c15
-rw-r--r--drivers/nvdimm/claim.c38
-rw-r--r--drivers/nvdimm/core.c5
-rw-r--r--drivers/nvdimm/dax_devs.c10
-rw-r--r--drivers/nvdimm/dimm_devs.c10
-rw-r--r--drivers/nvdimm/label.c251
-rw-r--r--drivers/nvdimm/label.h21
-rw-r--r--drivers/nvdimm/namespace_devs.c282
-rw-r--r--drivers/nvdimm/nd-core.h9
-rw-r--r--drivers/nvdimm/nd.h17
-rw-r--r--drivers/nvdimm/pfn_devs.c12
-rw-r--r--drivers/nvdimm/pmem.c63
-rw-r--r--drivers/nvdimm/pmem.h15
-rw-r--r--drivers/nvdimm/region.c17
-rw-r--r--drivers/nvdimm/region_devs.c88
-rw-r--r--drivers/nvme/host/pci.c15
-rw-r--r--drivers/of/Makefile2
-rw-r--r--drivers/of/address.c2
-rw-r--r--drivers/of/base.c771
-rw-r--r--drivers/of/dynamic.c2
-rw-r--r--drivers/of/fdt.c4
-rw-r--r--drivers/of/irq.c5
-rw-r--r--drivers/of/of_pci_irq.c3
-rw-r--r--drivers/of/of_private.h4
-rw-r--r--drivers/of/overlay.c4
-rw-r--r--drivers/of/platform.c2
-rw-r--r--drivers/of/property.c806
-rw-r--r--drivers/of/resolver.c34
-rw-r--r--drivers/of/unittest-data/tests-platform.dtsi4
-rw-r--r--drivers/of/unittest.c58
-rw-r--r--drivers/pci/Makefile17
-rw-r--r--drivers/pci/ats.c87
-rw-r--r--drivers/pci/dwc/Kconfig11
-rw-r--r--drivers/pci/dwc/Makefile1
-rw-r--r--drivers/pci/dwc/pci-dra7xx.c6
-rw-r--r--drivers/pci/dwc/pci-exynos.c2
-rw-r--r--drivers/pci/dwc/pci-imx6.c39
-rw-r--r--drivers/pci/dwc/pci-keystone.c2
-rw-r--r--drivers/pci/dwc/pci-layerscape.c6
-rw-r--r--drivers/pci/dwc/pcie-armada8k.c2
-rw-r--r--drivers/pci/dwc/pcie-artpec6.c2
-rw-r--r--drivers/pci/dwc/pcie-designware-host.c43
-rw-r--r--drivers/pci/dwc/pcie-designware-plat.c5
-rw-r--r--drivers/pci/dwc/pcie-designware.h2
-rw-r--r--drivers/pci/dwc/pcie-kirin.c517
-rw-r--r--drivers/pci/dwc/pcie-qcom.c440
-rw-r--r--drivers/pci/dwc/pcie-spear13xx.c2
-rw-r--r--drivers/pci/host/Kconfig25
-rw-r--r--drivers/pci/host/Makefile2
-rw-r--r--drivers/pci/host/pci-aardvark.c21
-rw-r--r--drivers/pci/host/pci-ftpci100.c143
-rw-r--r--drivers/pci/host/pci-host-common.c27
-rw-r--r--drivers/pci/host/pci-hyperv.c445
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c2
-rw-r--r--drivers/pci/host/pci-tegra.c42
-rw-r--r--drivers/pci/host/pci-versatile.c36
-rw-r--r--drivers/pci/host/pci-xgene.c23
-rw-r--r--drivers/pci/host/pcie-altera.c24
-rw-r--r--drivers/pci/host/pcie-iproc-bcma.c7
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c7
-rw-r--r--drivers/pci/host/pcie-iproc.c135
-rw-r--r--drivers/pci/host/pcie-mediatek.c554
-rw-r--r--drivers/pci/host/pcie-rcar.c40
-rw-r--r--drivers/pci/host/pcie-rockchip.c147
-rw-r--r--drivers/pci/host/pcie-tango.c141
-rw-r--r--drivers/pci/host/pcie-xilinx-nwl.c79
-rw-r--r--drivers/pci/host/pcie-xilinx.c36
-rw-r--r--drivers/pci/host/vmd.c10
-rw-r--r--drivers/pci/iov.c4
-rw-r--r--drivers/pci/msi.c14
-rw-r--r--drivers/pci/pci-driver.c3
-rw-r--r--drivers/pci/pci-label.c7
-rw-r--r--drivers/pci/pci-sysfs.c204
-rw-r--r--drivers/pci/pci.c227
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/pci/pcie/pcie-dpc.c4
-rw-r--r--drivers/pci/pcie/portdrv.h7
-rw-r--r--drivers/pci/pcie/portdrv_core.c104
-rw-r--r--drivers/pci/probe.c140
-rw-r--r--drivers/pci/quirks.c19
-rw-r--r--drivers/pci/setup-irq.c45
-rw-r--r--drivers/pci/switch/switchtec.c40
-rw-r--r--drivers/pinctrl/Kconfig36
-rw-r--r--drivers/pinctrl/Makefile4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm281xx.c13
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c24
-rw-r--r--drivers/pinctrl/bcm/pinctrl-cygnus-mux.c12
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c6
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c6
-rw-r--r--drivers/pinctrl/core.c30
-rw-r--r--drivers/pinctrl/freescale/Kconfig2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c135
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.h29
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx7d.c6
-rw-r--r--drivers/pinctrl/freescale/pinctrl-vf610.c2
-rw-r--r--drivers/pinctrl/intel/Kconfig8
-rw-r--r--drivers/pinctrl/intel/Makefile1
-rw-r--r--drivers/pinctrl/intel/pinctrl-cannonlake.c442
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c200
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h65
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c1
-rw-r--r--drivers/pinctrl/mediatek/Kconfig9
-rw-r--r--drivers/pinctrl/mediatek/Makefile1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt2701.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7623.c379
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt7623.h1936
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c48
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c99
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.h15
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8.c147
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c32
-rw-r--r--drivers/pinctrl/mvebu/Kconfig12
-rw-r--r--drivers/pinctrl/mvebu/Makefile2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c238
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-ap806.c140
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-cp110.c687
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c6
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.h2
-rw-r--r--drivers/pinctrl/pinconf-generic.c3
-rw-r--r--drivers/pinctrl/pinconf.c35
-rw-r--r--drivers/pinctrl/pinctrl-amd.c4
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c852
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c (renamed from drivers/gpio/gpio-mcp23s08.c)647
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c343
-rw-r--r--drivers/pinctrl/pinctrl-rza1.c1308
-rw-r--r--drivers/pinctrl/pinctrl-single.c8
-rw-r--r--drivers/pinctrl/pinctrl-xway.c2
-rw-r--r--drivers/pinctrl/qcom/Kconfig10
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq8074.c1076
-rw-r--r--drivers/pinctrl/samsung/Kconfig10
-rw-r--r--drivers/pinctrl/samsung/Makefile2
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm.c815
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm64.c399
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c1173
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h13
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos5440.c15
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c24xx.c2
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c64xx.c2
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c39
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig10
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile2
-rw-r--r--drivers/pinctrl/sh-pfc/core.c12
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c1341
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7792.c55
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7794.c1256
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c30
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c385
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7796.c606
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h2
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c67
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.h5
-rw-r--r--drivers/pinctrl/sunxi/Kconfig10
-rw-r--r--drivers/pinctrl/sunxi/Makefile2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c287
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c1056
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a83t-r.c128
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h3
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c1
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra114.c11
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra124.c11
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra20.c11
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra210.c9
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra30.c11
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c364
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c383
-rw-r--r--drivers/pinctrl/zte/Kconfig13
-rw-r--r--drivers/pinctrl/zte/Makefile2
-rw-r--r--drivers/pinctrl/zte/pinctrl-zx.c445
-rw-r--r--drivers/pinctrl/zte/pinctrl-zx.h105
-rw-r--r--drivers/pinctrl/zte/pinctrl-zx296718.c1027
-rw-r--r--drivers/platform/x86/Kconfig29
-rw-r--r--drivers/platform/x86/Makefile2
-rw-r--r--drivers/platform/x86/acer-wmi.c15
-rw-r--r--drivers/platform/x86/acerhdf.c2
-rw-r--r--drivers/platform/x86/alienware-wmi.c8
-rw-r--r--drivers/platform/x86/dell-laptop.c6
-rw-r--r--drivers/platform/x86/dell-rbtn.c31
-rw-r--r--drivers/platform/x86/dell-wmi-led.c2
-rw-r--r--drivers/platform/x86/dell-wmi.c144
-rw-r--r--drivers/platform/x86/eeepc-laptop.c2
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c540
-rw-r--r--drivers/platform/x86/ideapad-laptop.c58
-rw-r--r--drivers/platform/x86/intel_bxtwc_tmu.c4
-rw-r--r--drivers/platform/x86/intel_cht_int33fe.c8
-rw-r--r--drivers/platform/x86/intel_menlow.c2
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c2
-rw-r--r--drivers/platform/x86/msi-laptop.c4
-rw-r--r--drivers/platform/x86/panasonic-laptop.c4
-rw-r--r--drivers/platform/x86/peaq-wmi.c100
-rw-r--r--drivers/platform/x86/samsung-laptop.c6
-rw-r--r--drivers/platform/x86/silead_dmi.c70
-rw-r--r--drivers/platform/x86/sony-laptop.c4
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c183
-rw-r--r--drivers/platform/x86/topstar-laptop.c1
-rw-r--r--drivers/platform/x86/toshiba_acpi.c11
-rw-r--r--drivers/platform/x86/toshiba_haps.c2
-rw-r--r--drivers/platform/x86/wmi-bmof.c125
-rw-r--r--drivers/platform/x86/wmi.c686
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c38
-rw-r--r--drivers/pwm/pwm-jz4740.c29
-rw-r--r--drivers/remoteproc/Kconfig22
-rw-r--r--drivers/remoteproc/Makefile1
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c32
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c525
-rw-r--r--drivers/remoteproc/remoteproc_core.c178
-rw-r--r--drivers/rpmsg/Kconfig11
-rw-r--r--drivers/rpmsg/Makefile1
-rw-r--r--drivers/rpmsg/qcom_glink_rpm.c1233
-rw-r--r--drivers/rpmsg/qcom_smd.c11
-rw-r--r--drivers/rpmsg/rpmsg_char.c4
-rw-r--r--drivers/rpmsg/rpmsg_core.c18
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c18
-rw-r--r--drivers/s390/block/dasd.c3
-rw-r--r--drivers/s390/block/dasd_alias.c3
-rw-r--r--drivers/s390/block/dasd_devmap.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c310
-rw-r--r--drivers/s390/block/dasd_eckd.h1
-rw-r--r--drivers/s390/block/dcssblk.c8
-rw-r--r--drivers/s390/block/scm_blk.c10
-rw-r--r--drivers/s390/char/keyboard.c7
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c2
-rw-r--r--drivers/s390/crypto/zcrypt_api.c10
-rw-r--r--drivers/scsi/53c700.c8
-rw-r--r--drivers/scsi/Kconfig11
-rw-r--r--drivers/scsi/aacraid/aachba.c17
-rw-r--r--drivers/scsi/aacraid/aacraid.h22
-rw-r--r--drivers/scsi/aacraid/commctrl.c19
-rw-r--r--drivers/scsi/aacraid/comminit.c18
-rw-r--r--drivers/scsi/aacraid/commsup.c78
-rw-r--r--drivers/scsi/aacraid/linit.c232
-rw-r--r--drivers/scsi/aacraid/src.c136
-rw-r--r--drivers/scsi/atari_scsi.c2
-rw-r--r--drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h5
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_constants.h3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_debug.c3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_debug.h3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c16
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c62
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c14
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c3
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c3
-rw-r--r--drivers/scsi/csiostor/csio_hw.c98
-rw-r--r--drivers/scsi/csiostor/csio_hw.h1
-rw-r--r--drivers/scsi/csiostor/csio_hw_chip.h14
-rw-r--r--drivers/scsi/csiostor/csio_hw_t5.c29
-rw-r--r--drivers/scsi/csiostor/csio_init.c6
-rw-r--r--drivers/scsi/csiostor/csio_init.h2
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c43
-rw-r--r--drivers/scsi/csiostor/csio_wr.c4
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c5
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h1
-rw-r--r--drivers/scsi/cxlflash/common.h48
-rw-r--r--drivers/scsi/cxlflash/main.c1048
-rw-r--r--drivers/scsi/cxlflash/main.h7
-rw-r--r--drivers/scsi/cxlflash/sislite.h27
-rw-r--r--drivers/scsi/cxlflash/superpipe.c34
-rw-r--r--drivers/scsi/cxlflash/vlun.c89
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c27
-rw-r--r--drivers/scsi/esas2r/esas2r.h4
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c4
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c10
-rw-r--r--drivers/scsi/fcoe/fcoe.c12
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c1
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c24
-rw-r--r--drivers/scsi/fnic/fnic_io.h9
-rw-r--r--drivers/scsi/fnic/fnic_main.c14
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c17
-rw-r--r--drivers/scsi/fnic/fnic_stats.h7
-rw-r--r--drivers/scsi/fnic/fnic_trace.c24
-rw-r--r--drivers/scsi/hisi_sas/Kconfig10
-rw-r--r--drivers/scsi/hisi_sas/Makefile1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h91
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c436
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c82
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c216
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c1846
-rw-r--r--drivers/scsi/hpsa.c851
-rw-r--r--drivers/scsi/hpsa.h4
-rw-r--r--drivers/scsi/hpsa_cmd.h20
-rw-r--r--drivers/scsi/hptiop.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/libiscsi.c4
-rw-r--r--drivers/scsi/libsas/sas_event.c36
-rw-r--r--drivers/scsi/libsas/sas_internal.h4
-rw-r--r--drivers/scsi/lpfc/lpfc.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c101
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c31
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c83
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c55
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c163
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c211
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c308
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c106
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h21
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c8
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.c2
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.h2
-rw-r--r--drivers/scsi/qedf/drv_scsi_fw_funcs.c2
-rw-r--r--drivers/scsi/qedf/drv_scsi_fw_funcs.h2
-rw-r--r--drivers/scsi/qedf/qedf.h2
-rw-r--r--drivers/scsi/qedf/qedf_attr.c59
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h2
-rw-r--r--drivers/scsi/qedf/qedf_debugfs.c2
-rw-r--r--drivers/scsi/qedf/qedf_els.c8
-rw-r--r--drivers/scsi/qedf/qedf_fip.c22
-rw-r--r--drivers/scsi/qedf/qedf_hsi.h2
-rw-r--r--drivers/scsi/qedf/qedf_io.c37
-rw-r--r--drivers/scsi/qedf/qedf_main.c203
-rw-r--r--drivers/scsi/qedf/qedf_version.h8
-rw-r--r--drivers/scsi/qedi/qedi_fw.c4
-rw-r--r--drivers/scsi/qla2xxx/Kconfig1
-rw-r--r--drivers/scsi/qla2xxx/Makefile2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c161
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h182
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c145
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h35
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h40
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c275
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c606
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h60
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c116
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c229
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c125
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c44
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c761
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h132
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c368
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c1438
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h58
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c36
-rw-r--r--drivers/scsi/scsi.c13
-rw-r--r--drivers/scsi/scsi_error.c8
-rw-r--r--drivers/scsi/scsi_lib.c306
-rw-r--r--drivers/scsi/scsi_priv.h3
-rw-r--r--drivers/scsi/scsi_scan.c46
-rw-r--r--drivers/scsi/scsi_sysfs.c42
-rw-r--r--drivers/scsi/scsi_transport_fc.c12
-rw-r--r--drivers/scsi/scsi_transport_srp.c7
-rw-r--r--drivers/scsi/sd.c131
-rw-r--r--drivers/scsi/sd.h2
-rw-r--r--drivers/scsi/sgiwd93.c10
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h194
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c2325
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c100
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h7
-rw-r--r--drivers/scsi/snic/snic_isr.c4
-rw-r--r--drivers/scsi/snic/snic_scsi.c4
-rw-r--r--drivers/scsi/storvsc_drv.c56
-rw-r--r--drivers/scsi/sun_esp.c9
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210-pci.c2
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c60
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.c15
-rw-r--r--drivers/scsi/virtio_scsi.c13
-rw-r--r--drivers/scsi/xen-scsifront.c1
-rw-r--r--drivers/sh/intc/virq.c4
-rw-r--r--drivers/spi/spidev.c42
-rw-r--r--drivers/staging/media/Kconfig2
-rw-r--r--drivers/staging/media/Makefile1
-rw-r--r--drivers/staging/media/atomisp/i2c/Makefile6
-rw-r--r--drivers/staging/media/atomisp/i2c/gc0310.c1
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/Makefile7
-rw-r--r--drivers/staging/media/atomisp/i2c/lm3554.c4
-rw-r--r--drivers/staging/media/atomisp/i2c/mt9m114.c2
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2680.c15
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/Makefile7
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/ov5693.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/Makefile7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c1
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c14
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c1
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/Makefile2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h9
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu_private.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_const.h16
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_exprs.h23
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c36
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h1
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c13
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c297
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c34
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h7
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_irq.c16
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mmu.c6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c24
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c4
-rw-r--r--drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c227
-rw-r--r--drivers/staging/media/atomisp/platform/intel-mid/intel_mid_pcihelpers.c12
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.c6
-rw-r--r--drivers/staging/media/imx/Kconfig21
-rw-r--r--drivers/staging/media/imx/Makefile12
-rw-r--r--drivers/staging/media/imx/TODO23
-rw-r--r--drivers/staging/media/imx/imx-ic-common.c113
-rw-r--r--drivers/staging/media/imx/imx-ic-prp.c518
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c1309
-rw-r--r--drivers/staging/media/imx/imx-ic.h38
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c775
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c1817
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c667
-rw-r--r--drivers/staging/media/imx/imx-media-fim.c494
-rw-r--r--drivers/staging/media/imx/imx-media-internal-sd.c349
-rw-r--r--drivers/staging/media/imx/imx-media-of.c270
-rw-r--r--drivers/staging/media/imx/imx-media-utils.c896
-rw-r--r--drivers/staging/media/imx/imx-media-vdic.c1009
-rw-r--r--drivers/staging/media/imx/imx-media.h325
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c698
-rw-r--r--drivers/staging/media/lirc/TODO47
-rw-r--r--drivers/staging/media/lirc/TODO.lirc_zilog36
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c136
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c5
-rw-r--r--drivers/thermal/intel_bxt_pmic_thermal.c2
-rw-r--r--drivers/tty/serial/8250/8250_exar.c173
-rw-r--r--drivers/usb/core/devio.c48
-rw-r--r--drivers/usb/gadget/function/u_uac1_legacy.c7
-rw-r--r--drivers/usb/host/xhci-hub.c3
-rw-r--r--drivers/usb/host/xhci-pci.c12
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/serial/option.c4
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/usb/typec/typec_wcove.c2
-rw-r--r--drivers/video/backlight/adp8860_bl.c2
-rw-r--r--drivers/video/backlight/adp8870_bl.c2
-rw-r--r--drivers/video/backlight/backlight.c15
-rw-r--r--drivers/video/fbdev/au1100fb.c4
-rw-r--r--drivers/video/fbdev/au1200fb.c5
-rw-r--r--drivers/video/fbdev/core/fbmem.c19
-rw-r--r--drivers/video/fbdev/efifb.c2
-rw-r--r--drivers/video/fbdev/hpfb.c6
-rw-r--r--drivers/video/fbdev/jz4740_fb.c104
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/xen/events/events_base.c6
-rw-r--r--drivers/xen/evtchn.c34
-rw-r--r--drivers/xen/manage.c12
-rw-r--r--drivers/xen/swiotlb-xen.c113
-rw-r--r--drivers/xen/sys-hypervisor.c62
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c21
-rw-r--r--fs/block_dev.c3
-rw-r--r--fs/btrfs/disk-io.c12
-rw-r--r--fs/btrfs/extent_io.c6
-rw-r--r--fs/btrfs/file.c13
-rw-r--r--fs/btrfs/inode.c8
-rw-r--r--fs/buffer.c20
-rw-r--r--fs/cifs/Kconfig9
-rw-r--r--fs/cifs/cifs_unicode.c8
-rw-r--r--fs/cifs/file.c16
-rw-r--r--fs/cifs/smb2ops.c119
-rw-r--r--fs/cifs/smb2pdu.c52
-rw-r--r--fs/cifs/smb2proto.h3
-rw-r--r--fs/cifs/smb2transport.c28
-rw-r--r--fs/cifs/transport.c7
-rw-r--r--fs/compat_ioctl.c19
-rw-r--r--fs/dax.c15
-rw-r--r--fs/dcache.c50
-rw-r--r--fs/debugfs/inode.c10
-rw-r--r--fs/exec.c11
-rw-r--r--fs/exofs/dir.c2
-rw-r--r--fs/ext2/dir.c2
-rw-r--r--fs/ext2/file.c5
-rw-r--r--fs/ext4/fsync.c2
-rw-r--r--fs/fcntl.c30
-rw-r--r--fs/file.c22
-rw-r--r--fs/file_table.c1
-rw-r--r--fs/filesystems.c4
-rw-r--r--fs/gfs2/glock.c11
-rw-r--r--fs/gfs2/incore.h1
-rw-r--r--fs/gfs2/lops.c2
-rw-r--r--fs/inode.c16
-rw-r--r--fs/jbd2/commit.c16
-rw-r--r--fs/jfs/jfs_metapage.c7
-rw-r--r--fs/jfs/jfs_metapage.h1
-rw-r--r--fs/libfs.c6
-rw-r--r--fs/minix/dir.c2
-rw-r--r--fs/minix/itree_common.c2
-rw-r--r--fs/namei.c10
-rw-r--r--fs/namespace.c10
-rw-r--r--fs/ncpfs/mmap.c2
-rw-r--r--fs/notify/fsnotify.c8
-rw-r--r--fs/ocfs2/cluster/netdebug.c1
-rw-r--r--fs/ocfs2/inode.c2
-rw-r--r--fs/ocfs2/ocfs2_fs.h5
-rw-r--r--fs/ocfs2/stackglue.c2
-rw-r--r--fs/open.c3
-rw-r--r--fs/read_write.c6
-rw-r--r--fs/select.c44
-rw-r--r--fs/statfs.c2
-rw-r--r--fs/sync.c2
-rw-r--r--fs/sysv/dir.c2
-rw-r--r--fs/ufs/dir.c2
-rw-r--r--fs/userfaultfd.c12
-rw-r--r--fs/xfs/xfs_file.c2
-rw-r--r--fs/xfs/xfs_mount.c4
-rw-r--r--include/asm-generic/hugetlb.h4
-rw-r--r--include/asm-generic/uaccess.h5
-rw-r--r--include/asm-generic/vmlinux.lds.h18
-rw-r--r--include/drm/drm_ioctl.h1
-rw-r--r--include/dt-bindings/clock/cortina,gemini-clock.h29
-rw-r--r--include/dt-bindings/clock/exynos5420.h3
-rw-r--r--include/dt-bindings/clock/hi3660-clock.h17
-rw-r--r--include/dt-bindings/clock/hi6220-clock.h4
-rw-r--r--include/dt-bindings/clock/histb-clock.h9
-rw-r--r--include/dt-bindings/clock/imx7d-clock.h4
-rw-r--r--include/dt-bindings/clock/mt2701-clk.h3
-rw-r--r--include/dt-bindings/clock/mt8173-clk.h4
-rw-r--r--include/dt-bindings/clock/omap4.h146
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq8074.h152
-rw-r--r--include/dt-bindings/clock/r8a7790-cpg-mssr.h52
-rw-r--r--include/dt-bindings/clock/r8a7791-cpg-mssr.h48
-rw-r--r--include/dt-bindings/clock/r8a7792-cpg-mssr.h43
-rw-r--r--include/dt-bindings/clock/r8a7793-cpg-mssr.h48
-rw-r--r--include/dt-bindings/clock/r8a7794-cpg-mssr.h47
-rw-r--r--include/dt-bindings/clock/rk3128-cru.h282
-rw-r--r--include/dt-bindings/clock/sun5i-ccu.h3
-rw-r--r--include/dt-bindings/clock/sun8i-a83t-ccu.h140
-rw-r--r--include/dt-bindings/clock/sun8i-de2.h18
-rw-r--r--include/dt-bindings/clock/zx296718-clock.h6
-rw-r--r--include/dt-bindings/gpio/gpio.h4
-rw-r--r--include/dt-bindings/reset/sun8i-a83t-ccu.h98
-rw-r--r--include/dt-bindings/reset/sun8i-de2.h14
-rw-r--r--include/kvm/arm_arch_timer.h8
-rw-r--r--include/kvm/arm_pmu.h6
-rw-r--r--include/kvm/arm_vgic.h14
-rw-r--r--include/linux/acpi.h7
-rw-r--r--include/linux/amba/pl080.h107
-rw-r--r--include/linux/amba/pl08x.h30
-rw-r--r--include/linux/ata.h11
-rw-r--r--include/linux/backing-dev.h2
-rw-r--r--include/linux/blk-cgroup.h6
-rw-r--r--include/linux/bootmem.h1
-rw-r--r--include/linux/buffer_head.h1
-rw-r--r--include/linux/cgroup-defs.h12
-rw-r--r--include/linux/clk-provider.h16
-rw-r--r--include/linux/clk.h136
-rw-r--r--include/linux/compat.h3
-rw-r--r--include/linux/compiler-clang.h8
-rw-r--r--include/linux/compiler-gcc.h18
-rw-r--r--include/linux/crash_core.h5
-rw-r--r--include/linux/dax.h12
-rw-r--r--include/linux/dcache.h6
-rw-r--r--include/linux/device-mapper.h85
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/dm-kcopyd.h1
-rw-r--r--include/linux/dma-mapping.h20
-rw-r--r--include/linux/dma/dw.h21
-rw-r--r--include/linux/errseq.h19
-rw-r--r--include/linux/filter.h5
-rw-r--r--include/linux/fs.h67
-rw-r--r--include/linux/fsnotify.h31
-rw-r--r--include/linux/ftrace.h6
-rw-r--r--include/linux/gfp.h11
-rw-r--r--include/linux/gpio/driver.h3
-rw-r--r--include/linux/gpio/machine.h2
-rw-r--r--include/linux/huge_mm.h7
-rw-r--r--include/linux/hugetlb.h85
-rw-r--r--include/linux/hyperv.h1
-rw-r--r--include/linux/imx-media.h29
-rw-r--r--include/linux/input/sparse-keymap.h1
-rw-r--r--include/linux/interrupt.h4
-rw-r--r--include/linux/irqchip/arm-gic-v3.h6
-rw-r--r--include/linux/kexec.h9
-rw-r--r--include/linux/kmemleak.h7
-rw-r--r--include/linux/kvm_host.h12
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/libnvdimm.h11
-rw-r--r--include/linux/memblock.h25
-rw-r--r--include/linux/memcontrol.h318
-rw-r--r--include/linux/memory_hotplug.h53
-rw-r--r--include/linux/mempolicy.h12
-rw-r--r--include/linux/mfd/intel_soc_pmic.h5
-rw-r--r--include/linux/mfd/lp87565.h270
-rw-r--r--include/linux/mfd/rtsx_pci.h5
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mman.h2
-rw-r--r--include/linux/mmzone.h59
-rw-r--r--include/linux/module.h4
-rw-r--r--include/linux/nd.h13
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/linux/node.h35
-rw-r--r--include/linux/nodemask.h4
-rw-r--r--include/linux/of.h36
-rw-r--r--include/linux/of_fdt.h3
-rw-r--r--include/linux/of_gpio.h1
-rw-r--r--include/linux/of_graph.h21
-rw-r--r--include/linux/page-flags.h7
-rw-r--r--include/linux/pagemap.h31
-rw-r--r--include/linux/pci-ats.h10
-rw-r--r--include/linux/pci.h35
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/percpu_counter.h7
-rw-r--r--include/linux/pinctrl/pinconf-generic.h15
-rw-r--r--include/linux/platform_data/adp5588.h (renamed from include/linux/i2c/adp5588.h)0
-rw-r--r--include/linux/platform_data/adp8860.h (renamed from include/linux/i2c/adp8860.h)0
-rw-r--r--include/linux/platform_data/adp8870.h (renamed from include/linux/i2c/adp8870.h)0
-rw-r--r--include/linux/platform_data/clk-realview.h1
-rw-r--r--include/linux/platform_data/leds-pca963x.h6
-rw-r--r--include/linux/platform_data/lm8323.h (renamed from include/linux/i2c/lm8323.h)0
-rw-r--r--include/linux/platform_data/max732x.h (renamed from include/linux/i2c/max732x.h)0
-rw-r--r--include/linux/platform_data/mcs.h (renamed from include/linux/i2c/mcs.h)0
-rw-r--r--include/linux/platform_data/mms114.h (renamed from include/linux/i2c/mms114.h)0
-rw-r--r--include/linux/platform_data/pcf857x.h (renamed from include/linux/i2c/pcf857x.h)0
-rw-r--r--include/linux/platform_data/tsc2007.h (renamed from include/linux/i2c/tsc2007.h)2
-rw-r--r--include/linux/platform_device.h2
-rw-r--r--include/linux/pmem.h142
-rw-r--r--include/linux/processor.h70
-rw-r--r--include/linux/raid/pq.h1
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/set_memory.h20
-rw-r--r--include/linux/signal.h2
-rw-r--r--include/linux/slub_def.h34
-rw-r--r--include/linux/spi/mcp23s08.h38
-rw-r--r--include/linux/string.h6
-rw-r--r--include/linux/swap.h19
-rw-r--r--include/linux/swap_cgroup.h6
-rw-r--r--include/linux/syscalls.h2
-rw-r--r--include/linux/thread_info.h27
-rw-r--r--include/linux/time.h3
-rw-r--r--include/linux/trace_events.h13
-rw-r--r--include/linux/tracepoint.h7
-rw-r--r--include/linux/uaccess.h50
-rw-r--r--include/linux/uio.h79
-rw-r--r--include/linux/vm_event_item.h1
-rw-r--r--include/linux/vmstat.h1
-rw-r--r--include/linux/wmi.h59
-rw-r--r--include/media/cec.h29
-rw-r--r--include/media/davinci/vpif_types.h9
-rw-r--r--include/media/imx.h15
-rw-r--r--include/media/lirc_dev.h32
-rw-r--r--include/media/media-entity.h28
-rw-r--r--include/media/rc-core.h2
-rw-r--r--include/media/v4l2-async.h8
-rw-r--r--include/media/v4l2-ctrls.h13
-rw-r--r--include/media/v4l2-flash-led-class.h6
-rw-r--r--include/media/v4l2-fwnode.h (renamed from include/media/v4l2-of.h)96
-rw-r--r--include/media/v4l2-mem2mem.h92
-rw-r--r--include/media/v4l2-subdev.h16
-rw-r--r--include/misc/cxllib.h133
-rw-r--r--include/net/inet_frag.h4
-rw-r--r--include/net/sock.h1
-rw-r--r--include/rdma/ib_verbs.h6
-rw-r--r--include/scsi/libsas.h6
-rw-r--r--include/scsi/scsi_cmnd.h1
-rw-r--r--include/scsi/scsi_device.h10
-rw-r--r--include/scsi/scsi_devinfo.h9
-rw-r--r--include/scsi/scsi_proto.h4
-rw-r--r--include/scsi/scsi_transport_fc.h4
-rw-r--r--include/sound/ak4113.h13
-rw-r--r--include/sound/ak4114.h13
-rw-r--r--include/sound/ak4117.h13
-rw-r--r--include/sound/core.h6
-rw-r--r--include/sound/cs35l35.h2
-rw-r--r--include/sound/designware_i2s.h1
-rw-r--r--include/sound/emux_synth.h8
-rw-r--r--include/sound/hdmi-codec.h9
-rw-r--r--include/sound/mixer_oss.h2
-rw-r--r--include/sound/opl3.h8
-rw-r--r--include/sound/pcm-indirect.h10
-rw-r--r--include/sound/pcm.h144
-rw-r--r--include/sound/rawmidi.h4
-rw-r--r--include/sound/rt5645.h6
-rw-r--r--include/sound/simple_card_utils.h43
-rw-r--r--include/sound/soc-dapm.h7
-rw-r--r--include/sound/soc-topology.h5
-rw-r--r--include/sound/soc.h3
-rw-r--r--include/trace/events/filemap.h57
-rw-r--r--include/trace/events/percpu.h125
-rw-r--r--include/trace/events/xen.h13
-rw-r--r--include/trace/trace_events.h26
-rw-r--r--include/uapi/linux/cec.h2
-rw-r--r--include/uapi/linux/dm-ioctl.h4
-rw-r--r--include/uapi/linux/dvb/video.h3
-rw-r--r--include/uapi/linux/input-event-codes.h1
-rw-r--r--include/uapi/linux/kvm.h35
-rw-r--r--include/uapi/linux/magic.h1
-rw-r--r--include/uapi/linux/max2175.h28
-rw-r--r--include/uapi/linux/media.h6
-rw-r--r--include/uapi/linux/mempolicy.h8
-rw-r--r--include/uapi/linux/ndctl.h42
-rw-r--r--include/uapi/linux/pci_regs.h1
-rw-r--r--include/uapi/linux/switchtec_ioctl.h3
-rw-r--r--include/uapi/linux/v4l2-controls.h11
-rw-r--r--include/uapi/linux/videodev2.h3
-rw-r--r--include/uapi/scsi/cxlflash_ioctl.h85
-rw-r--r--include/uapi/sound/asoc.h10
-rw-r--r--include/uapi/sound/asound.h4
-rw-r--r--include/uapi/sound/snd_sst_tokens.h8
-rw-r--r--include/xen/arm/hypercall.h5
-rw-r--r--include/xen/events.h1
-rw-r--r--include/xen/interface/version.h15
-rw-r--r--include/xen/swiotlb-xen.h62
-rw-r--r--include/xen/xen-ops.h2
-rw-r--r--init/Kconfig21
-rw-r--r--ipc/Makefile3
-rw-r--r--ipc/compat_mq.c138
-rw-r--r--ipc/mqueue.c349
-rw-r--r--ipc/shm.c2
-rw-r--r--kernel/cgroup/Makefile1
-rw-r--r--kernel/cgroup/cgroup-internal.h2
-rw-r--r--kernel/cgroup/cgroup-v1.c155
-rw-r--r--kernel/cgroup/cgroup.c155
-rw-r--r--kernel/cgroup/cpuset.c33
-rw-r--r--kernel/cgroup/debug.c357
-rw-r--r--kernel/compat.c267
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/extable.c2
-rw-r--r--kernel/fork.c8
-rw-r--r--kernel/irq/affinity.c13
-rw-r--r--kernel/locking/qrwlock.c1
-rw-r--r--kernel/locking/qspinlock_paravirt.h3
-rw-r--r--kernel/memremap.c6
-rw-r--r--kernel/module.c10
-rw-r--r--kernel/pid.c7
-rw-r--r--kernel/power/snapshot.c4
-rw-r--r--kernel/signal.c155
-rw-r--r--kernel/sys.c100
-rw-r--r--kernel/trace/Kconfig22
-rw-r--r--kernel/trace/ftrace.c366
-rw-r--r--kernel/trace/trace.c361
-rw-r--r--kernel/trace/trace.h30
-rw-r--r--kernel/trace/trace_events.c66
-rw-r--r--kernel/trace/trace_output.c27
-rw-r--r--kernel/trace/trace_sched_switch.c72
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Makefile2
-rw-r--r--lib/dma-noop.c21
-rw-r--r--lib/dma-virt.c12
-rw-r--r--lib/errseq.c208
-rw-r--r--lib/flex_proportions.c6
-rw-r--r--lib/iov_iter.c116
-rw-r--r--lib/percpu_counter.c4
-rw-r--r--lib/raid6/mktables.c20
-rw-r--r--lib/strnlen_user.c34
-rw-r--r--lib/usercopy.c10
-rw-r--r--lib/vsprintf.c136
-rw-r--r--mm/Kconfig52
-rw-r--r--mm/Makefile1
-rw-r--r--mm/compaction.c5
-rw-r--r--mm/filemap.c128
-rw-r--r--mm/gup.c201
-rw-r--r--mm/huge_memory.c31
-rw-r--r--mm/hugetlb.c98
-rw-r--r--mm/kmemleak.c136
-rw-r--r--mm/ksm.c820
-rw-r--r--mm/memblock.c3
-rw-r--r--mm/memcontrol.c81
-rw-r--r--mm/memory-failure.c15
-rw-r--r--mm/memory.c6
-rw-r--r--mm/memory_hotplug.c533
-rw-r--r--mm/mempolicy.c181
-rw-r--r--mm/migrate.c21
-rw-r--r--mm/mmap.c6
-rw-r--r--mm/mprotect.c2
-rw-r--r--mm/nobootmem.c2
-rw-r--r--mm/oom_kill.c5
-rw-r--r--mm/page-writeback.c34
-rw-r--r--mm/page_alloc.c134
-rw-r--r--mm/page_isolation.c26
-rw-r--r--mm/page_vma_mapped.c3
-rw-r--r--mm/pagewalk.c3
-rw-r--r--mm/percpu-internal.h166
-rw-r--r--mm/percpu-km.c11
-rw-r--r--mm/percpu-stats.c222
-rw-r--r--mm/percpu-vm.c12
-rw-r--r--mm/percpu.c85
-rw-r--r--mm/rmap.c15
-rw-r--r--mm/shmem.c7
-rw-r--r--mm/slab.c20
-rw-r--r--mm/slab.h18
-rw-r--r--mm/slab_common.c5
-rw-r--r--mm/slub.c113
-rw-r--r--mm/sparse.c104
-rw-r--r--mm/swap.c1
-rw-r--r--mm/swap_cgroup.c40
-rw-r--r--mm/swap_slots.c16
-rw-r--r--mm/swap_state.c98
-rw-r--r--mm/swapfile.c289
-rw-r--r--mm/vmalloc.c7
-rw-r--r--mm/vmscan.c77
-rw-r--r--mm/vmstat.c26
-rw-r--r--mm/workingset.c9
-rw-r--r--mm/zswap.c11
-rw-r--r--net/bridge/netfilter/ebt_nflog.c1
-rw-r--r--net/core/dev.c3
-rw-r--r--net/mpls/af_mpls.c12
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c7
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c7
-rw-r--r--net/rds/tcp_listen.c2
-rw-r--r--net/wireless/nl80211.c10
-rw-r--r--scripts/Kbuild.include19
-rw-r--r--scripts/Makefile.asm-generic2
-rw-r--r--scripts/Makefile.build8
-rw-r--r--scripts/Makefile.host6
-rwxr-xr-xscripts/bootgraph.pl2
-rwxr-xr-xscripts/checkincludes.pl2
-rwxr-xr-xscripts/checkpatch.pl5
-rwxr-xr-xscripts/checkstack.pl2
-rwxr-xr-xscripts/checksyscalls.sh11
-rwxr-xr-xscripts/checkversion.pl2
-rwxr-xr-xscripts/cleanfile3
-rwxr-xr-xscripts/cleanpatch3
-rwxr-xr-xscripts/dtc/dt_to_config2
-rwxr-xr-xscripts/dtc/dtx_diff2
-rwxr-xr-xscripts/export_report.pl3
-rwxr-xr-xscripts/extract-module-sig.pl3
-rwxr-xr-xscripts/extract-sys-certs.pl3
-rwxr-xr-xscripts/extract_xc3028.pl2
-rwxr-xr-xscripts/gen_initramfs_list.sh4
-rwxr-xr-xscripts/get_dvb_firmware2
-rwxr-xr-xscripts/get_maintainer.pl3
-rwxr-xr-xscripts/headerdep.pl2
-rwxr-xr-xscripts/headers_check.pl3
-rwxr-xr-xscripts/kconfig/streamline_config.pl3
-rwxr-xr-xscripts/kernel-doc3
-rwxr-xr-xscripts/link-vmlinux.sh47
-rwxr-xr-xscripts/markup_oops.pl2
-rwxr-xr-xscripts/mkcompile_h2
-rwxr-xr-xscripts/namespace.pl4
-rwxr-xr-xscripts/profile2linkerlist.pl2
-rwxr-xr-xscripts/recordmcount.pl3
-rw-r--r--scripts/spelling.txt31
-rwxr-xr-xscripts/stackdelta2
-rw-r--r--sound/Kconfig6
-rw-r--r--sound/aoa/codecs/tas.c14
-rw-r--r--sound/aoa/fabrics/layout.c8
-rw-r--r--sound/atmel/Kconfig13
-rw-r--r--sound/atmel/Makefile2
-rw-r--r--sound/atmel/abdac.c610
-rw-r--r--sound/atmel/ac97c.c451
-rw-r--r--sound/core/Kconfig59
-rw-r--r--sound/core/Makefile3
-rw-r--r--sound/core/control.c70
-rw-r--r--sound/core/ctljack.c2
-rw-r--r--sound/core/info.c12
-rw-r--r--sound/core/info_oss.c1
-rw-r--r--sound/core/init.c8
-rw-r--r--sound/core/isadma.c3
-rw-r--r--sound/core/memalloc.c13
-rw-r--r--sound/core/memory.c2
-rw-r--r--sound/core/misc.c1
-rw-r--r--sound/core/oss/io.c4
-rw-r--r--sound/core/oss/mixer_oss.c3
-rw-r--r--sound/core/oss/pcm_oss.c101
-rw-r--r--sound/core/oss/pcm_plugin.c5
-rw-r--r--sound/core/oss/pcm_plugin.h10
-rw-r--r--sound/core/pcm.c37
-rw-r--r--sound/core/pcm_compat.c13
-rw-r--r--sound/core/pcm_drm_eld.c8
-rw-r--r--sound/core/pcm_lib.c668
-rw-r--r--sound/core/pcm_local.h50
-rw-r--r--sound/core/pcm_memory.c6
-rw-r--r--sound/core/pcm_misc.c14
-rw-r--r--sound/core/pcm_native.c1013
-rw-r--r--sound/core/pcm_param_trace.h142
-rw-r--r--sound/core/pcm_timer.c14
-rw-r--r--sound/core/pcm_trace.h50
-rw-r--r--sound/core/rawmidi.c4
-rw-r--r--sound/core/seq/Kconfig68
-rw-r--r--sound/core/seq/Makefile21
-rw-r--r--sound/core/seq/oss/Makefile2
-rw-r--r--sound/core/seq/seq_clientmgr.c8
-rw-r--r--sound/core/seq/seq_lock.c1
-rw-r--r--sound/core/seq/seq_memory.c2
-rw-r--r--sound/core/seq/seq_midi_emul.c9
-rw-r--r--sound/core/seq/seq_midi_event.c21
-rw-r--r--sound/core/seq/seq_ports.c2
-rw-r--r--sound/core/seq/seq_virmidi.c3
-rw-r--r--sound/core/seq_device.c (renamed from sound/core/seq/seq_device.c)0
-rw-r--r--sound/core/sound.c2
-rw-r--r--sound/core/sound_oss.c3
-rw-r--r--sound/core/timer.c125
-rw-r--r--sound/drivers/Kconfig15
-rw-r--r--sound/drivers/dummy.c20
-rw-r--r--sound/drivers/opl3/Makefile4
-rw-r--r--sound/drivers/opl3/opl3_lib.c2
-rw-r--r--sound/drivers/opl3/opl3_oss.c14
-rw-r--r--sound/drivers/opl3/opl3_seq.c4
-rw-r--r--sound/drivers/opl3/opl3_voice.h5
-rw-r--r--sound/drivers/opl4/opl4_lib.c4
-rw-r--r--sound/drivers/opl4/opl4_local.h2
-rw-r--r--sound/drivers/pcsp/pcsp_lib.c2
-rw-r--r--sound/drivers/vx/vx_mixer.c24
-rw-r--r--sound/drivers/vx/vx_pcm.c4
-rw-r--r--sound/firewire/amdtp-am824.c71
-rw-r--r--sound/firewire/amdtp-am824.h6
-rw-r--r--sound/firewire/amdtp-stream.c38
-rw-r--r--sound/firewire/amdtp-stream.h1
-rw-r--r--sound/firewire/bebob/bebob_maudio.c8
-rw-r--r--sound/firewire/bebob/bebob_pcm.c41
-rw-r--r--sound/firewire/dice/dice-pcm.c44
-rw-r--r--sound/firewire/digi00x/amdtp-dot.c63
-rw-r--r--sound/firewire/digi00x/digi00x-pcm.c56
-rw-r--r--sound/firewire/digi00x/digi00x.h1
-rw-r--r--sound/firewire/fireface/ff-midi.c22
-rw-r--r--sound/firewire/fireface/ff-pcm.c83
-rw-r--r--sound/firewire/fireworks/fireworks_pcm.c41
-rw-r--r--sound/firewire/motu/motu-pcm.c36
-rw-r--r--sound/firewire/oxfw/oxfw-pcm.c40
-rw-r--r--sound/firewire/tascam/amdtp-tascam.c62
-rw-r--r--sound/firewire/tascam/tascam-pcm.c58
-rw-r--r--sound/firewire/tascam/tascam.h1
-rw-r--r--sound/hda/hdac_bus.c1
-rw-r--r--sound/hda/hdac_device.c1
-rw-r--r--sound/i2c/other/ak4113.c23
-rw-r--r--sound/i2c/other/ak4114.c23
-rw-r--r--sound/i2c/other/ak4117.c23
-rw-r--r--sound/isa/Kconfig8
-rw-r--r--sound/isa/cmi8328.c2
-rw-r--r--sound/isa/cs423x/cs4236_lib.c4
-rw-r--r--sound/isa/es1688/es1688_lib.c4
-rw-r--r--sound/isa/es18xx.c8
-rw-r--r--sound/isa/gus/gus_main.c2
-rw-r--r--sound/isa/gus/gus_pcm.c122
-rw-r--r--sound/isa/sb/emu8000.c2
-rw-r--r--sound/isa/sb/emu8000_callback.c6
-rw-r--r--sound/isa/sb/emu8000_pcm.c214
-rw-r--r--sound/isa/sb/sb16.c2
-rw-r--r--sound/isa/sb/sb16_csp.c4
-rw-r--r--sound/isa/sb/sb16_main.c2
-rw-r--r--sound/isa/sb/sb8_main.c6
-rw-r--r--sound/isa/sscape.c2
-rw-r--r--sound/isa/wss/wss_lib.c4
-rw-r--r--sound/mips/hal2.c38
-rw-r--r--sound/mips/sgio2audio.c14
-rw-r--r--sound/parisc/harmony.c6
-rw-r--r--sound/pci/Kconfig8
-rw-r--r--sound/pci/ali5451/ali5451.c4
-rw-r--r--sound/pci/atiixp_modem.c4
-rw-r--r--sound/pci/au88x0/au88x0_pcm.c4
-rw-r--r--sound/pci/azt3328.c4
-rw-r--r--sound/pci/bt87x.c4
-rw-r--r--sound/pci/cmipci.c12
-rw-r--r--sound/pci/cs4281.c2
-rw-r--r--sound/pci/cs46xx/cs46xx_lib.c14
-rw-r--r--sound/pci/emu10k1/emu10k1.c2
-rw-r--r--sound/pci/emu10k1/emufx.c127
-rw-r--r--sound/pci/emu10k1/emupcm.c12
-rw-r--r--sound/pci/ens1370.c16
-rw-r--r--sound/pci/es1938.c37
-rw-r--r--sound/pci/fm801.c8
-rw-r--r--sound/pci/hda/hda_codec.c251
-rw-r--r--sound/pci/hda/hda_controller.c3
-rw-r--r--sound/pci/hda/hda_controller.h6
-rw-r--r--sound/pci/hda/hda_generic.c46
-rw-r--r--sound/pci/hda/hda_intel.c112
-rw-r--r--sound/pci/hda/hda_local.h61
-rw-r--r--sound/pci/hda/hda_sysfs.c2
-rw-r--r--sound/pci/hda/patch_hdmi.c104
-rw-r--r--sound/pci/hda/patch_realtek.c281
-rw-r--r--sound/pci/hda/patch_si3054.c4
-rw-r--r--sound/pci/ice1712/ice1712.c10
-rw-r--r--sound/pci/ice1712/ice1712.h2
-rw-r--r--sound/pci/ice1712/ice1724.c10
-rw-r--r--sound/pci/ice1712/juli.c6
-rw-r--r--sound/pci/ice1712/maya44.c4
-rw-r--r--sound/pci/ice1712/quartet.c6
-rw-r--r--sound/pci/intel8x0.c12
-rw-r--r--sound/pci/intel8x0m.c4
-rw-r--r--sound/pci/korg1212/korg1212.c112
-rw-r--r--sound/pci/mixart/mixart_mixer.c6
-rw-r--r--sound/pci/nm256/nm256.c61
-rw-r--r--sound/pci/rme32.c80
-rw-r--r--sound/pci/rme96.c74
-rw-r--r--sound/pci/rme9652/hdsp.c75
-rw-r--r--sound/pci/rme9652/hdspm.c4
-rw-r--r--sound/pci/rme9652/rme9652.c75
-rw-r--r--sound/pci/sonicvibes.c4
-rw-r--r--sound/pci/via82xx.c4
-rw-r--r--sound/pci/via82xx_modem.c4
-rw-r--r--sound/pci/vx222/vx222.c4
-rw-r--r--sound/pci/vx222/vx222.h2
-rw-r--r--sound/pci/vx222/vx222_ops.c26
-rw-r--r--sound/pcmcia/vx/vxp_mixer.c14
-rw-r--r--sound/pcmcia/vx/vxp_ops.c28
-rw-r--r--sound/pcmcia/vx/vxpocket.c4
-rw-r--r--sound/pcmcia/vx/vxpocket.h2
-rw-r--r--sound/ppc/awacs.c4
-rw-r--r--sound/ppc/beep.c2
-rw-r--r--sound/ppc/tumbler.c8
-rw-r--r--sound/sh/aica.c4
-rw-r--r--sound/sh/sh_dac_audio.c54
-rw-r--r--sound/soc/atmel/atmel-pcm.h6
-rw-r--r--sound/soc/atmel/tse850-pcm5142.c4
-rw-r--r--sound/soc/blackfin/bf5xx-ac97-pcm.c27
-rw-r--r--sound/soc/blackfin/bf5xx-i2s-pcm.c36
-rw-r--r--sound/soc/codecs/Kconfig10
-rw-r--r--sound/soc/codecs/Makefile4
-rw-r--r--sound/soc/codecs/ak4613.c95
-rw-r--r--sound/soc/codecs/ak4642.c4
-rw-r--r--sound/soc/codecs/cs35l34.c4
-rw-r--r--sound/soc/codecs/cs35l35.c94
-rw-r--r--sound/soc/codecs/cs35l35.h6
-rw-r--r--sound/soc/codecs/cs4271.c2
-rw-r--r--sound/soc/codecs/cs53l30.c2
-rw-r--r--sound/soc/codecs/da7213.c37
-rw-r--r--sound/soc/codecs/da7218.c2
-rw-r--r--sound/soc/codecs/da7219-aad.c31
-rw-r--r--sound/soc/codecs/da7219.c53
-rw-r--r--sound/soc/codecs/da7219.h5
-rw-r--r--sound/soc/codecs/es8316.c637
-rw-r--r--sound/soc/codecs/es8316.h129
-rw-r--r--sound/soc/codecs/hdmi-codec.c92
-rw-r--r--sound/soc/codecs/max9867.c2
-rw-r--r--sound/soc/codecs/msm8916-wcd-analog.c6
-rw-r--r--sound/soc/codecs/nau8824.c52
-rw-r--r--sound/soc/codecs/nau8824.h12
-rw-r--r--sound/soc/codecs/nau8825.c76
-rw-r--r--sound/soc/codecs/nau8825.h1
-rw-r--r--sound/soc/codecs/rt5514.c37
-rw-r--r--sound/soc/codecs/rt5514.h6
-rw-r--r--sound/soc/codecs/rt5645.c97
-rw-r--r--sound/soc/codecs/rt5651.c44
-rw-r--r--sound/soc/codecs/rt5663.c4
-rw-r--r--sound/soc/codecs/rt5663.h4
-rw-r--r--sound/soc/codecs/rt5665.c158
-rw-r--r--sound/soc/codecs/rt5670.c25
-rw-r--r--sound/soc/codecs/rt5677.c32
-rw-r--r--sound/soc/codecs/sgtl5000.c89
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c2
-rw-r--r--sound/soc/codecs/wm_adsp.c6
-rw-r--r--sound/soc/codecs/zx_aud96p22.c403
-rw-r--r--sound/soc/davinci/davinci-mcasp.c12
-rw-r--r--sound/soc/dwc/dwc-i2s.c6
-rw-r--r--sound/soc/fsl/mpc5200_dma.c1
-rw-r--r--sound/soc/generic/Kconfig17
-rw-r--r--sound/soc/generic/Makefile4
-rw-r--r--sound/soc/generic/audio-graph-card.c338
-rw-r--r--sound/soc/generic/audio-graph-scu-card.c411
-rw-r--r--sound/soc/generic/simple-card-utils.c198
-rw-r--r--sound/soc/generic/simple-card.c69
-rw-r--r--sound/soc/generic/simple-scu-card.c55
-rw-r--r--sound/soc/hisilicon/hi6210-i2s.c11
-rw-r--r--sound/soc/intel/Kconfig42
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-pcm.c2
-rw-r--r--sound/soc/intel/atom/sst/sst.c59
-rw-r--r--sound/soc/intel/atom/sst/sst.h30
-rw-r--r--sound/soc/intel/atom/sst/sst_acpi.c232
-rw-r--r--sound/soc/intel/boards/Makefile6
-rw-r--r--sound/soc/intel/boards/bdw-rt5677.c26
-rw-r--r--sound/soc/intel/boards/bxt_da7219_max98357a.c26
-rw-r--r--sound/soc/intel/boards/bxt_rt298.c19
-rw-r--r--sound/soc/intel/boards/byt-max98090.c24
-rw-r--r--sound/soc/intel/boards/bytcht_es8316.c300
-rw-r--r--sound/soc/intel/boards/bytcht_nocodec.c4
-rw-r--r--sound/soc/intel/boards/bytcr_rt5651.c4
-rw-r--r--sound/soc/intel/boards/cht_bsw_max98090_ti.c12
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5672.c89
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_max98927.c687
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c640
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_max98357a.c16
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_ssm4567.c16
-rw-r--r--sound/soc/intel/boards/skl_rt286.c30
-rw-r--r--sound/soc/intel/common/sst-acpi.h23
-rw-r--r--sound/soc/intel/common/sst-dsp-priv.h4
-rw-r--r--sound/soc/intel/common/sst-match-acpi.c47
-rw-r--r--sound/soc/intel/skylake/Makefile4
-rw-r--r--sound/soc/intel/skylake/bxt-sst.c4
-rw-r--r--sound/soc/intel/skylake/skl-debug.c261
-rw-r--r--sound/soc/intel/skylake/skl-messages.c31
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c6
-rw-r--r--sound/soc/intel/skylake/skl-sst.c5
-rw-r--r--sound/soc/intel/skylake/skl-topology.c187
-rw-r--r--sound/soc/intel/skylake/skl-topology.h21
-rw-r--r--sound/soc/intel/skylake/skl-tplg-interface.h2
-rw-r--r--sound/soc/intel/skylake/skl.c114
-rw-r--r--sound/soc/intel/skylake/skl.h23
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-cs42448.c2
-rw-r--r--sound/soc/omap/mcbsp.c12
-rw-r--r--sound/soc/pxa/Kconfig2
-rw-r--r--sound/soc/rockchip/Kconfig9
-rw-r--r--sound/soc/rockchip/Makefile2
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c41
-rw-r--r--sound/soc/rockchip/rockchip_i2s.h3
-rw-r--r--sound/soc/rockchip/rockchip_pdm.c516
-rw-r--r--sound/soc/rockchip/rockchip_pdm.h83
-rw-r--r--sound/soc/rockchip/rockchip_spdif.c4
-rw-r--r--sound/soc/samsung/s3c24xx_uda134x.c2
-rw-r--r--sound/soc/sh/Kconfig2
-rw-r--r--sound/soc/sh/fsi.c13
-rw-r--r--sound/soc/sh/rcar/adg.c61
-rw-r--r--sound/soc/sh/rcar/cmd.c7
-rw-r--r--sound/soc/sh/rcar/core.c494
-rw-r--r--sound/soc/sh/rcar/ctu.c6
-rw-r--r--sound/soc/sh/rcar/dma.c32
-rw-r--r--sound/soc/sh/rcar/dvc.c12
-rw-r--r--sound/soc/sh/rcar/gen.c2
-rw-r--r--sound/soc/sh/rcar/rsnd.h68
-rw-r--r--sound/soc/sh/rcar/src.c20
-rw-r--r--sound/soc/sh/rcar/ssi.c241
-rw-r--r--sound/soc/sh/rcar/ssiu.c37
-rw-r--r--sound/soc/sh/siu_dai.c4
-rw-r--r--sound/soc/soc-compress.c6
-rw-r--r--sound/soc/soc-core.c86
-rw-r--r--sound/soc/soc-pcm.c5
-rw-r--r--sound/soc/soc-topology.c37
-rw-r--r--sound/soc/stm/Kconfig29
-rw-r--r--sound/soc/stm/Makefile12
-rw-r--r--sound/soc/stm/stm32_i2s.c946
-rw-r--r--sound/soc/stm/stm32_sai.c15
-rw-r--r--sound/soc/stm/stm32_sai.h73
-rw-r--r--sound/soc/stm/stm32_sai_sub.c143
-rw-r--r--sound/soc/stm/stm32_spdifrx.c998
-rw-r--r--sound/soc/sunxi/sun4i-codec.c63
-rw-r--r--sound/soc/sunxi/sun8i-codec-analog.c145
-rw-r--r--sound/soc/zte/zx-i2s.c15
-rw-r--r--sound/sparc/cs4231.c4
-rw-r--r--sound/synth/Kconfig2
-rw-r--r--sound/synth/emux/Makefile8
-rw-r--r--sound/synth/emux/emux.c6
-rw-r--r--sound/synth/emux/emux_effect.c2
-rw-r--r--sound/synth/emux/emux_oss.c4
-rw-r--r--sound/usb/Kconfig2
-rw-r--r--sound/usb/line6/driver.h2
-rw-r--r--sound/usb/line6/podhd.c27
-rw-r--r--sound/usb/mixer_quirks.c2
-rw-r--r--sound/usb/usx2y/us122l.c36
-rw-r--r--sound/usb/usx2y/us122l.h2
-rw-r--r--sound/x86/intel_hdmi_audio.c4
-rw-r--r--tools/build/Makefile.build8
-rwxr-xr-xtools/kvm/kvm_stat/kvm_stat669
-rw-r--r--tools/kvm/kvm_stat/kvm_stat.txt12
-rw-r--r--tools/scripts/Makefile.include8
-rw-r--r--tools/testing/nvdimm/test/nfit.c2
-rw-r--r--tools/testing/selftests/breakpoints/breakpoint_test.c45
-rw-r--r--tools/testing/selftests/breakpoints/breakpoint_test_arm64.c95
-rw-r--r--tools/testing/selftests/breakpoints/step_after_suspend_test.c84
-rw-r--r--tools/testing/selftests/capabilities/test_execve.c7
-rwxr-xr-xtools/testing/selftests/ftrace/ftracetest2
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc8
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc28
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc9
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc19
-rw-r--r--tools/testing/selftests/ftrace/test.d/instances/instance-event.tc5
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_maxactive.tc1
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi.c3
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c4
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c3
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c5
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_timeout.c4
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c3
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_wouldblock.c3
-rw-r--r--tools/testing/selftests/futex/include/logging.h4
-rw-r--r--tools/testing/selftests/intel_pstate/.gitignore2
-rw-r--r--tools/testing/selftests/intel_pstate/Makefile2
-rw-r--r--tools/testing/selftests/kselftest.h98
-rw-r--r--tools/testing/selftests/kselftest_harness.h (renamed from tools/testing/selftests/seccomp/test_harness.h)689
-rwxr-xr-xtools/testing/selftests/lib/bitmap.sh4
-rwxr-xr-xtools/testing/selftests/lib/printf.sh4
-rw-r--r--tools/testing/selftests/membarrier/membarrier_test.c97
-rw-r--r--tools/testing/selftests/memfd/Makefile2
-rwxr-xr-xtools/testing/selftests/memory-hotplug/mem-on-off-test.sh86
-rw-r--r--tools/testing/selftests/net/Makefile3
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/context_switch.c53
-rw-r--r--tools/testing/selftests/seccomp/Makefile2
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c4
-rw-r--r--tools/testing/selftests/size/get_size.c27
-rw-r--r--tools/testing/selftests/sync/sync_test.c13
-rw-r--r--tools/testing/selftests/sysctl/common_tests22
-rwxr-xr-xtools/testing/selftests/sysctl/run_numerictests2
-rwxr-xr-xtools/testing/selftests/sysctl/run_stringtests2
-rw-r--r--tools/testing/selftests/vm/virtual_address_range.c35
-rw-r--r--usr/Kconfig24
-rw-r--r--virt/kvm/arm/aarch32.c2
-rw-r--r--virt/kvm/arm/arch_timer.c139
-rw-r--r--virt/kvm/arm/arm.c82
-rw-r--r--virt/kvm/arm/hyp/vgic-v3-sr.c823
-rw-r--r--virt/kvm/arm/mmu.c23
-rw-r--r--virt/kvm/arm/pmu.c117
-rw-r--r--virt/kvm/arm/psci.c8
-rw-r--r--virt/kvm/arm/vgic/vgic-irqfd.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c24
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c22
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c68
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.h12
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c45
-rw-r--r--virt/kvm/arm/vgic/vgic.c68
-rw-r--r--virt/kvm/kvm_main.c12
2407 files changed, 134854 insertions, 39501 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index f35473f8c630..3bec49c33bbb 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -242,8 +242,6 @@ kprobes.txt
- documents the kernel probes debugging feature.
kref.txt
- docs on adding reference counters (krefs) to kernel objects.
-kselftest.txt
- - small unittests for (some) individual codepaths in the kernel.
laptops/
- directory with laptop related info and laptop driver documentation.
ldm.txt
diff --git a/Documentation/ABI/stable/sysfs-hypervisor-xen b/Documentation/ABI/stable/sysfs-hypervisor-xen
new file mode 100644
index 000000000000..3cf5cdfcd9a8
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-hypervisor-xen
@@ -0,0 +1,119 @@
+What: /sys/hypervisor/compilation/compile_date
+Date: March 2009
+KernelVersion: 2.6.30
+Description: If running under Xen:
+ Contains the build time stamp of the Xen hypervisor
+ Might return "<denied>" in case of special security settings
+ in the hypervisor.
+
+What: /sys/hypervisor/compilation/compiled_by
+Date: March 2009
+KernelVersion: 2.6.30
+Description: If running under Xen:
+ Contains information who built the Xen hypervisor
+ Might return "<denied>" in case of special security settings
+ in the hypervisor.
+
+What: /sys/hypervisor/compilation/compiler
+Date: March 2009
+KernelVersion: 2.6.30
+Description: If running under Xen:
+ Compiler which was used to build the Xen hypervisor
+ Might return "<denied>" in case of special security settings
+ in the hypervisor.
+
+What: /sys/hypervisor/properties/capabilities
+Date: March 2009
+KernelVersion: 2.6.30
+Description: If running under Xen:
+ Space separated list of supported guest system types. Each type
+ is in the format: <class>-<major>.<minor>-<arch>
+ With:
+ <class>: "xen" -- x86: paravirtualized, arm: standard
+ "hvm" -- x86 only: fully virtualized
+ <major>: major guest interface version
+ <minor>: minor guest interface version
+ <arch>: architecture, e.g.:
+ "x86_32": 32 bit x86 guest without PAE
+ "x86_32p": 32 bit x86 guest with PAE
+ "x86_64": 64 bit x86 guest
+ "armv7l": 32 bit arm guest
+ "aarch64": 64 bit arm guest
+
+What: /sys/hypervisor/properties/changeset
+Date: March 2009
+KernelVersion: 2.6.30
+Description: If running under Xen:
+ Changeset of the hypervisor (git commit)
+ Might return "<denied>" in case of special security settings
+ in the hypervisor.
+
+What: /sys/hypervisor/properties/features
+Date: March 2009
+KernelVersion: 2.6.30
+Description: If running under Xen:
+ Features the Xen hypervisor supports for the guest as defined
+ in include/xen/interface/features.h printed as a hex value.
+
+What: /sys/hypervisor/properties/pagesize
+Date: March 2009
+KernelVersion: 2.6.30
+Description: If running under Xen:
+ Default page size of the hypervisor printed as a hex value.
+ Might return "0" in case of special security settings
+ in the hypervisor.
+
+What: /sys/hypervisor/properties/virtual_start
+Date: March 2009
+KernelVersion: 2.6.30
+Description: If running under Xen:
+ Virtual address of the hypervisor as a hex value.
+
+What: /sys/hypervisor/type
+Date: March 2009
+KernelVersion: 2.6.30
+Description: If running under Xen:
+ Type of hypervisor:
+ "xen": Xen hypervisor
+
+What: /sys/hypervisor/uuid
+Date: March 2009
+KernelVersion: 2.6.30
+Description: If running under Xen:
+ UUID of the guest as known to the Xen hypervisor.
+
+What: /sys/hypervisor/version/extra
+Date: March 2009
+KernelVersion: 2.6.30
+Description: If running under Xen:
+ The Xen version is in the format <major>.<minor><extra>
+ This is the <extra> part of it.
+ Might return "<denied>" in case of special security settings
+ in the hypervisor.
+
+What: /sys/hypervisor/version/major
+Date: March 2009
+KernelVersion: 2.6.30
+Description: If running under Xen:
+ The Xen version is in the format <major>.<minor><extra>
+ This is the <major> part of it.
+
+What: /sys/hypervisor/version/minor
+Date: March 2009
+KernelVersion: 2.6.30
+Description: If running under Xen:
+ The Xen version is in the format <major>.<minor><extra>
+ This is the <minor> part of it.
diff --git a/Documentation/ABI/testing/sysfs-firmware-ofw b/Documentation/ABI/testing/sysfs-firmware-ofw
index f562b188e71d..edcab3ccfcc0 100644
--- a/Documentation/ABI/testing/sysfs-firmware-ofw
+++ b/Documentation/ABI/testing/sysfs-firmware-ofw
@@ -1,6 +1,6 @@
What: /sys/firmware/devicetree/*
Date: November 2013
-Contact: Grant Likely <[email protected]>
+Contact: Grant Likely <[email protected]>, [email protected]
Description:
When using OpenFirmware or a Flattened Device Tree to enumerate
hardware, the device tree structure will be exposed in this
@@ -26,3 +26,27 @@ Description:
name plus address). Properties are represented as files
in the directory. The contents of each file is the exact
binary data from the device tree.
+
+What: /sys/firmware/fdt
+Date: February 2015
+KernelVersion: 3.19
+Contact: Frank Rowand <[email protected]>, [email protected]
+Description:
+ Exports the FDT blob that was passed to the kernel by
+ the bootloader. This allows userland applications such
+ as kexec to access the raw binary. This blob is also
+ useful when debugging since it contains any changes
+ made to the blob by the bootloader.
+
+ The fact that this node does not reside under
+ /sys/firmware/device-tree is deliberate: FDT is also used
+ on arm64 UEFI/ACPI systems to communicate just the UEFI
+ and ACPI entry points, but the FDT is never unflattened
+ and used to configure the system.
+
+ A CRC32 checksum is calculated over the entire FDT
+ blob, and verified at late_initcall time. The sysfs
+ entry is instantiated only if the checksum is valid,
+ i.e., if the FDT blob has not been modified in the mean
+ time. Otherwise, a warning is printed.
+Users: kexec, debugging
diff --git a/Documentation/ABI/testing/sysfs-hypervisor-pmu b/Documentation/ABI/testing/sysfs-hypervisor-xen
index 224faa105e18..53b7b2ea7515 100644
--- a/Documentation/ABI/testing/sysfs-hypervisor-pmu
+++ b/Documentation/ABI/testing/sysfs-hypervisor-xen
@@ -1,8 +1,19 @@
+What: /sys/hypervisor/guest_type
+Date: June 2017
+KernelVersion: 4.13
+Description: If running under Xen:
+ Type of guest:
+ "Xen": standard guest type on arm
+ "HVM": fully virtualized guest (x86)
+ "PV": paravirtualized guest (x86)
+ "PVH": fully virtualized guest without legacy emulation (x86)
+
What: /sys/hypervisor/pmu/pmu_mode
Date: August 2015
KernelVersion: 4.3
Contact: Boris Ostrovsky <[email protected]>
-Description:
+Description: If running under Xen:
Describes mode that Xen's performance-monitoring unit (PMU)
uses. Accepted values are
"off" -- PMU is disabled
@@ -17,7 +28,16 @@ What: /sys/hypervisor/pmu/pmu_features
Date: August 2015
KernelVersion: 4.3
Contact: Boris Ostrovsky <[email protected]>
-Description:
+Description: If running under Xen:
Describes Xen PMU features (as an integer). A set bit indicates
that the corresponding feature is enabled. See
include/xen/interface/xenpmu.h for available features
+
+What: /sys/hypervisor/properties/buildid
+Date: June 2017
+KernelVersion: 4.13
+Description: If running under Xen:
+ Build id of the hypervisor, needed for hypervisor live patching.
+ Might return "<denied>" in case of special security settings
+ in the hypervisor.
diff --git a/Documentation/ABI/testing/sysfs-platform-ideapad-laptop b/Documentation/ABI/testing/sysfs-platform-ideapad-laptop
index b31e782bd985..597a2f3d1efc 100644
--- a/Documentation/ABI/testing/sysfs-platform-ideapad-laptop
+++ b/Documentation/ABI/testing/sysfs-platform-ideapad-laptop
@@ -17,3 +17,11 @@ Description:
* 2 -> Dust Cleaning
* 4 -> Efficient Thermal Dissipation Mode
+What: /sys/devices/platform/ideapad/touchpad
+Date: May 2017
+KernelVersion: 4.13
+Contact: "Ritesh Raj Sarraf <[email protected]>"
+Description:
+ Control touchpad mode.
+ * 1 -> Switched On
+ * 0 -> Switched Off
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt
index 979228bc9035..4ed388356898 100644
--- a/Documentation/DMA-API-HOWTO.txt
+++ b/Documentation/DMA-API-HOWTO.txt
@@ -550,32 +550,11 @@ and to unmap it:
dma_unmap_single(dev, dma_handle, size, direction);
You should call dma_mapping_error() as dma_map_single() could fail and return
-error. Not all DMA implementations support the dma_mapping_error() interface.
-However, it is a good practice to call dma_mapping_error() interface, which
-will invoke the generic mapping error check interface. Doing so will ensure
-that the mapping code will work correctly on all DMA implementations without
-any dependency on the specifics of the underlying implementation. Using the
-returned address without checking for errors could result in failures ranging
-from panics to silent data corruption. A couple of examples of incorrect ways
-to check for errors that make assumptions about the underlying DMA
-implementation are as follows and these are applicable to dma_map_page() as
-well.
-
-Incorrect example 1:
- dma_addr_t dma_handle;
-
- dma_handle = dma_map_single(dev, addr, size, direction);
- if ((dma_handle & 0xffff != 0) || (dma_handle >= 0x1000000)) {
- goto map_error;
- }
-
-Incorrect example 2:
- dma_addr_t dma_handle;
-
- dma_handle = dma_map_single(dev, addr, size, direction);
- if (dma_handle == DMA_ERROR_CODE) {
- goto map_error;
- }
+error. Doing so will ensure that the mapping code will work correctly on all
+DMA implementations without any dependency on the specifics of the underlying
+implementation. Using the returned address without checking for errors could
+result in failures ranging from panics to silent data corruption. The same
+applies to dma_map_page() as well.
You should call dma_unmap_single() when the DMA activity is finished, e.g.,
from the interrupt which told you that the DMA transfer is done.
diff --git a/Documentation/acpi/gpio-properties.txt b/Documentation/acpi/gpio-properties.txt
index 2aff0349facd..88c65cb5bf0a 100644
--- a/Documentation/acpi/gpio-properties.txt
+++ b/Documentation/acpi/gpio-properties.txt
@@ -156,3 +156,68 @@ pointed to by its first argument. That should be done in the driver's .probe()
routine. On removal, the driver should unregister its GPIO mapping table by
calling acpi_dev_remove_driver_gpios() on the ACPI device object where that
table was previously registered.
+
+Using the _CRS fallback
+-----------------------
+
+If a device does not have _DSD or the driver does not create ACPI GPIO
+mapping, the Linux GPIO framework refuses to return any GPIOs. This is
+because the driver does not know what it actually gets. For example if we
+have a device like below:
+
+ Device (BTH)
+ {
+ Name (_HID, ...)
+
+ Name (_CRS, ResourceTemplate () {
+ GpioIo (Exclusive, PullNone, 0, 0, IoRestrictionNone,
+ "\\_SB.GPO0", 0, ResourceConsumer) {15}
+ GpioIo (Exclusive, PullNone, 0, 0, IoRestrictionNone,
+ "\\_SB.GPO0", 0, ResourceConsumer) {27}
+ })
+ }
+
+The driver might expect to get the right GPIO when it does:
+
+ desc = gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+
+but since there is no way to know the mapping between "reset" and
+the GpioIo() in _CRS desc will hold ERR_PTR(-ENOENT).
+
+The driver author can solve this by passing the mapping explictly
+(the recommended way and documented in the above chapter).
+
+The ACPI GPIO mapping tables should not contaminate drivers that are not
+knowing about which exact device they are servicing on. It implies that
+the ACPI GPIO mapping tables are hardly linked to ACPI ID and certain
+objects, as listed in the above chapter, of the device in question.
+
+Getting GPIO descriptor
+-----------------------
+
+There are two main approaches to get GPIO resource from ACPI:
+ desc = gpiod_get(dev, connection_id, flags);
+ desc = gpiod_get_index(dev, connection_id, index, flags);
+
+We may consider two different cases here, i.e. when connection ID is
+provided and otherwise.
+
+Case 1:
+ desc = gpiod_get(dev, "non-null-connection-id", flags);
+ desc = gpiod_get_index(dev, "non-null-connection-id", index, flags);
+
+Case 2:
+ desc = gpiod_get(dev, NULL, flags);
+ desc = gpiod_get_index(dev, NULL, index, flags);
+
+Case 1 assumes that corresponding ACPI device description must have
+defined device properties and will prevent to getting any GPIO resources
+otherwise.
+
+Case 2 explicitly tells GPIO core to look for resources in _CRS.
+
+Be aware that gpiod_get_index() in cases 1 and 2, assuming that there
+are two versions of ACPI device description provided and no mapping is
+present in the driver, will return different resources. That's why a
+certain driver has to handle them carefully as explained in previous
+chapter.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index f24ee1c99412..d9c171ce4190 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1862,6 +1862,18 @@
for all guests.
Default is 1 (enabled) if in 64-bit or 32-bit PAE mode.
+ kvm-arm.vgic_v3_group0_trap=
+ [KVM,ARM] Trap guest accesses to GICv3 group-0
+ system registers
+
+ kvm-arm.vgic_v3_group1_trap=
+ [KVM,ARM] Trap guest accesses to GICv3 group-1
+ system registers
+
+ kvm-arm.vgic_v3_common_trap=
+ [KVM,ARM] Trap guest accesses to GICv3 common
+ system registers
+
kvm-intel.ept= [KVM,Intel] Disable extended page tables
(virtualized MMU) support on capable Intel chips.
Default is 1 (enabled)
@@ -2303,8 +2315,11 @@
that the amount of memory usable for all allocations
is not too small.
- movable_node [KNL] Boot-time switch to enable the effects
- of CONFIG_MOVABLE_NODE=y. See mm/Kconfig for details.
+ movable_node [KNL] Boot-time switch to make hotplugable memory
+ NUMA nodes to be movable. This means that the memory
+ of such nodes will be usable only for movable
+ allocations which rules out almost all kernel
+ allocations. Use with caution!
MTD_Partition= [MTD]
Format: <name>,<region-number>,<size>,<offset>
@@ -3760,8 +3775,14 @@
slab_nomerge [MM]
Disable merging of slabs with similar size. May be
necessary if there is some reason to distinguish
- allocs to different slabs. Debug options disable
- merging on their own.
+ allocs to different slabs, especially in hardened
+ environments where the risk of heap overflows and
+ layout control by attackers can usually be
+ frustrated by disabling merging. This will reduce
+ most of the exposure of a heap attack to a single
+ cache (risks via metadata attacks are mostly
+ unchanged). Debug options disable merging on their
+ own.
For more information see Documentation/vm/slub.txt.
slab_max_order= [MM, SLAB]
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index 10f2dddbf449..f5f93dca54b7 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -62,6 +62,7 @@ stable kernels.
| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 |
| Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 |
| Cavium | ThunderX SMMUv2 | #27704 | N/A |
+| Cavium | ThunderX Core | #30115 | CAVIUM_ERRATUM_30115 |
| | | | |
| Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 |
| | | | |
diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt
index dc5e2dcdbef4..e6101976e0f1 100644
--- a/Documentation/cgroup-v2.txt
+++ b/Documentation/cgroup-v2.txt
@@ -149,6 +149,16 @@ during boot, before manual intervention is possible. To make testing
and experimenting easier, the kernel parameter cgroup_no_v1= allows
disabling controllers in v1 and make them always available in v2.
+cgroup v2 currently supports the following mount options.
+
+ nsdelegate
+
+ Consider cgroup namespaces as delegation boundaries. This
+ option is system wide and can only be set on mount or modified
+ through remount from the init namespace. The mount option is
+ ignored on non-init namespace mounts. Please refer to the
+ Delegation section for details.
+
2-2. Organizing Processes
@@ -308,18 +318,27 @@ file.
2-5-1. Model of Delegation
-A cgroup can be delegated to a less privileged user by granting write
-access of the directory and its "cgroup.procs" file to the user. Note
-that resource control interface files in a given directory control the
-distribution of the parent's resources and thus must not be delegated
-along with the directory.
-
-Once delegated, the user can build sub-hierarchy under the directory,
-organize processes as it sees fit and further distribute the resources
-it received from the parent. The limits and other settings of all
-resource controllers are hierarchical and regardless of what happens
-in the delegated sub-hierarchy, nothing can escape the resource
-restrictions imposed by the parent.
+A cgroup can be delegated in two ways. First, to a less privileged
+user by granting write access of the directory and its "cgroup.procs"
+and "cgroup.subtree_control" files to the user. Second, if the
+"nsdelegate" mount option is set, automatically to a cgroup namespace
+on namespace creation.
+
+Because the resource control interface files in a given directory
+control the distribution of the parent's resources, the delegatee
+shouldn't be allowed to write to them. For the first method, this is
+achieved by not granting access to these files. For the second, the
+kernel rejects writes to all files other than "cgroup.procs" and
+"cgroup.subtree_control" on a namespace root from inside the
+namespace.
+
+The end results are equivalent for both delegation types. Once
+delegated, the user can build sub-hierarchy under the directory,
+organize processes inside it as it sees fit and further distribute the
+resources it received from the parent. The limits and other settings
+of all resource controllers are hierarchical and regardless of what
+happens in the delegated sub-hierarchy, nothing can escape the
+resource restrictions imposed by the parent.
Currently, cgroup doesn't impose any restrictions on the number of
cgroups in or nesting depth of a delegated sub-hierarchy; however,
@@ -329,10 +348,12 @@ this may be limited explicitly in the future.
2-5-2. Delegation Containment
A delegated sub-hierarchy is contained in the sense that processes
-can't be moved into or out of the sub-hierarchy by the delegatee. For
-a process with a non-root euid to migrate a target process into a
-cgroup by writing its PID to the "cgroup.procs" file, the following
-conditions must be met.
+can't be moved into or out of the sub-hierarchy by the delegatee.
+
+For delegations to a less privileged user, this is achieved by
+requiring the following conditions for a process with a non-root euid
+to migrate a target process into a cgroup by writing its PID to the
+"cgroup.procs" file.
- The writer must have write access to the "cgroup.procs" file.
@@ -359,6 +380,11 @@ destination cgroup C00 is above the points of delegation and U0 would
not have write access to its "cgroup.procs" files and thus the write
will be denied with -EACCES.
+For delegations to namespaces, containment is achieved by requiring
+that both the source and destination cgroups are reachable from the
+namespace of the process which is attempting the migration. If either
+is not reachable, the migration is rejected with -ENOENT.
+
2-6. Guidelines
@@ -826,13 +852,25 @@ PAGE_SIZE multiple when read back.
The number of times the cgroup's memory usage was
about to go over the max boundary. If direct reclaim
- fails to bring it down, the OOM killer is invoked.
+ fails to bring it down, the cgroup goes to OOM state.
oom
- The number of times the OOM killer has been invoked in
- the cgroup. This may not exactly match the number of
- processes killed but should generally be close.
+ The number of time the cgroup's memory usage was
+ reached the limit and allocation was about to fail.
+
+ Depending on context result could be invocation of OOM
+ killer and retrying allocation or failing alloction.
+
+ Failed allocation in its turn could be returned into
+ userspace as -ENOMEM or siletly ignored in cases like
+ disk readahead. For now OOM in memory cgroup kills
+ tasks iff shortage has happened inside page fault.
+
+ oom_kill
+
+ The number of processes belonging to this cgroup
+ killed by any kind of OOM killer.
memory.stat
@@ -930,6 +968,34 @@ PAGE_SIZE multiple when read back.
Number of times a shadow node has been reclaimed
+ pgrefill
+
+ Amount of scanned pages (in an active LRU list)
+
+ pgscan
+
+ Amount of scanned pages (in an inactive LRU list)
+
+ pgsteal
+
+ Amount of reclaimed pages
+
+ pgactivate
+
+ Amount of pages moved to the active LRU list
+
+ pgdeactivate
+
+ Amount of pages moved to the inactive LRU lis
+
+ pglazyfree
+
+ Amount of pages postponed to be freed under memory pressure
+
+ pglazyfreed
+
+ Amount of reclaimed lazyfree pages
+
memory.swap.current
A read-only single value file which exists on non-root
@@ -1413,7 +1479,7 @@ D. Deprecated v1 Core Features
- Multiple hierarchies including named ones are not supported.
-- All mount options and remounting are not supported.
+- All v1 mount options are not supported.
- The "tasks" file is removed and "cgroup.procs" is not sorted.
diff --git a/Documentation/dev-tools/index.rst b/Documentation/dev-tools/index.rst
index 4ac991dbddb7..a81787cd47d7 100644
--- a/Documentation/dev-tools/index.rst
+++ b/Documentation/dev-tools/index.rst
@@ -24,6 +24,7 @@ whole; patches welcome!
kmemcheck
gdb-kernel-debugging
kgdb
+ kselftest
.. only:: subproject and html
diff --git a/Documentation/dev-tools/kmemleak.rst b/Documentation/dev-tools/kmemleak.rst
index b2391b829169..cb8862659178 100644
--- a/Documentation/dev-tools/kmemleak.rst
+++ b/Documentation/dev-tools/kmemleak.rst
@@ -150,6 +150,7 @@ See the include/linux/kmemleak.h header for the functions prototype.
- ``kmemleak_init`` - initialize kmemleak
- ``kmemleak_alloc`` - notify of a memory block allocation
- ``kmemleak_alloc_percpu`` - notify of a percpu memory block allocation
+- ``kmemleak_vmalloc`` - notify of a vmalloc() memory allocation
- ``kmemleak_free`` - notify of a memory block freeing
- ``kmemleak_free_part`` - notify of a partial memory block freeing
- ``kmemleak_free_percpu`` - notify of a percpu memory block freeing
diff --git a/Documentation/kselftest.txt b/Documentation/dev-tools/kselftest.rst
index 5bd590335839..ebd03d11d2c2 100644
--- a/Documentation/kselftest.txt
+++ b/Documentation/dev-tools/kselftest.rst
@@ -1,4 +1,6 @@
+======================
Linux Kernel Selftests
+======================
The kernel contains a set of "self tests" under the tools/testing/selftests/
directory. These are intended to be small tests to exercise individual code
@@ -15,28 +17,33 @@ hotplug test is run on 2% of hotplug capable memory instead of 10%.
Running the selftests (hotplug tests are run in limited mode)
=============================================================
-To build the tests:
+To build the tests::
+
$ make -C tools/testing/selftests
+To run the tests::
-To run the tests:
$ make -C tools/testing/selftests run_tests
-To build and run the tests with a single command, use:
+To build and run the tests with a single command, use::
+
$ make kselftest
-- note that some tests will require root privileges.
+Note that some tests will require root privileges.
Running a subset of selftests
-========================================
+=============================
+
You can use the "TARGETS" variable on the make command line to specify
single test to run, or a list of tests to run.
-To run only tests targeted for a single subsystem:
- $ make -C tools/testing/selftests TARGETS=ptrace run_tests
+To run only tests targeted for a single subsystem::
+
+ $ make -C tools/testing/selftests TARGETS=ptrace run_tests
+
+You can specify multiple tests to build and run::
-You can specify multiple tests to build and run:
$ make TARGETS="size timers" kselftest
See the top-level tools/testing/selftests/Makefile for the list of all
@@ -46,13 +53,15 @@ possible targets.
Running the full range hotplug selftests
========================================
-To build the hotplug tests:
+To build the hotplug tests::
+
$ make -C tools/testing/selftests hotplug
-To run the hotplug tests:
+To run the hotplug tests::
+
$ make -C tools/testing/selftests run_hotplug
-- note that some tests will require root privileges.
+Note that some tests will require root privileges.
Install selftests
@@ -62,11 +71,13 @@ You can use kselftest_install.sh tool installs selftests in default
location which is tools/testing/selftests/kselftest or a user specified
location.
-To install selftests in default location:
+To install selftests in default location::
+
$ cd tools/testing/selftests
$ ./kselftest_install.sh
-To install selftests in a user specified location:
+To install selftests in a user specified location::
+
$ cd tools/testing/selftests
$ ./kselftest_install.sh install_dir
@@ -77,10 +88,10 @@ Kselftest install as well as the Kselftest tarball provide a script
named "run_kselftest.sh" to run the tests.
You can simply do the following to run the installed Kselftests. Please
-note some tests will require root privileges.
+note some tests will require root privileges::
-cd kselftest
-./run_kselftest.sh
+ $ cd kselftest
+ $ ./run_kselftest.sh
Contributing new tests
======================
@@ -96,14 +107,49 @@ In general, the rules for selftests are
* Don't cause the top-level "make run_tests" to fail if your feature is
unconfigured.
-Contributing new tests(details)
-===============================
+Contributing new tests (details)
+================================
* Use TEST_GEN_XXX if such binaries or files are generated during
compiling.
+
TEST_PROGS, TEST_GEN_PROGS mean it is the excutable tested by
default.
+
TEST_PROGS_EXTENDED, TEST_GEN_PROGS_EXTENDED mean it is the
executable which is not tested by default.
TEST_FILES, TEST_GEN_FILES mean it is the file which is used by
test.
+
+Test Harness
+============
+
+The kselftest_harness.h file contains useful helpers to build tests. The tests
+from tools/testing/selftests/seccomp/seccomp_bpf.c can be used as example.
+
+Example
+-------
+
+.. kernel-doc:: tools/testing/selftests/kselftest_harness.h
+ :doc: example
+
+
+Helpers
+-------
+
+.. kernel-doc:: tools/testing/selftests/kselftest_harness.h
+ :functions: TH_LOG TEST TEST_SIGNAL FIXTURE FIXTURE_DATA FIXTURE_SETUP
+ FIXTURE_TEARDOWN TEST_F TEST_HARNESS_MAIN
+
+Operators
+---------
+
+.. kernel-doc:: tools/testing/selftests/kselftest_harness.h
+ :doc: operators
+
+.. kernel-doc:: tools/testing/selftests/kselftest_harness.h
+ :functions: ASSERT_EQ ASSERT_NE ASSERT_LT ASSERT_LE ASSERT_GT ASSERT_GE
+ ASSERT_NULL ASSERT_TRUE ASSERT_NULL ASSERT_TRUE ASSERT_FALSE
+ ASSERT_STREQ ASSERT_STRNE EXPECT_EQ EXPECT_NE EXPECT_LT
+ EXPECT_LE EXPECT_GT EXPECT_GE EXPECT_NULL EXPECT_TRUE
+ EXPECT_FALSE EXPECT_STREQ EXPECT_STRNE
diff --git a/Documentation/device-mapper/dm-zoned.txt b/Documentation/device-mapper/dm-zoned.txt
new file mode 100644
index 000000000000..736fcc78d193
--- /dev/null
+++ b/Documentation/device-mapper/dm-zoned.txt
@@ -0,0 +1,144 @@
+dm-zoned
+========
+
+The dm-zoned device mapper target exposes a zoned block device (ZBC and
+ZAC compliant devices) as a regular block device without any write
+pattern constraints. In effect, it implements a drive-managed zoned
+block device which hides from the user (a file system or an application
+doing raw block device accesses) the sequential write constraints of
+host-managed zoned block devices and can mitigate the potential
+device-side performance degradation due to excessive random writes on
+host-aware zoned block devices.
+
+For a more detailed description of the zoned block device models and
+their constraints see (for SCSI devices):
+
+http://www.t10.org/drafts.htm#ZBC_Family
+
+and (for ATA devices):
+
+http://www.t13.org/Documents/UploadedDocuments/docs2015/di537r05-Zoned_Device_ATA_Command_Set_ZAC.pdf
+
+The dm-zoned implementation is simple and minimizes system overhead (CPU
+and memory usage as well as storage capacity loss). For a 10TB
+host-managed disk with 256 MB zones, dm-zoned memory usage per disk
+instance is at most 4.5 MB and as little as 5 zones will be used
+internally for storing metadata and performaing reclaim operations.
+
+dm-zoned target devices are formatted and checked using the dmzadm
+utility available at:
+
+https://github.com/hgst/dm-zoned-tools
+
+Algorithm
+=========
+
+dm-zoned implements an on-disk buffering scheme to handle non-sequential
+write accesses to the sequential zones of a zoned block device.
+Conventional zones are used for caching as well as for storing internal
+metadata.
+
+The zones of the device are separated into 2 types:
+
+1) Metadata zones: these are conventional zones used to store metadata.
+Metadata zones are not reported as useable capacity to the user.
+
+2) Data zones: all remaining zones, the vast majority of which will be
+sequential zones used exclusively to store user data. The conventional
+zones of the device may be used also for buffering user random writes.
+Data in these zones may be directly mapped to the conventional zone, but
+later moved to a sequential zone so that the conventional zone can be
+reused for buffering incoming random writes.
+
+dm-zoned exposes a logical device with a sector size of 4096 bytes,
+irrespective of the physical sector size of the backend zoned block
+device being used. This allows reducing the amount of metadata needed to
+manage valid blocks (blocks written).
+
+The on-disk metadata format is as follows:
+
+1) The first block of the first conventional zone found contains the
+super block which describes the on disk amount and position of metadata
+blocks.
+
+2) Following the super block, a set of blocks is used to describe the
+mapping of the logical device blocks. The mapping is done per chunk of
+blocks, with the chunk size equal to the zoned block device size. The
+mapping table is indexed by chunk number and each mapping entry
+indicates the zone number of the device storing the chunk of data. Each
+mapping entry may also indicate if the zone number of a conventional
+zone used to buffer random modification to the data zone.
+
+3) A set of blocks used to store bitmaps indicating the validity of
+blocks in the data zones follows the mapping table. A valid block is
+defined as a block that was written and not discarded. For a buffered
+data chunk, a block is always valid only in the data zone mapping the
+chunk or in the buffer zone of the chunk.
+
+For a logical chunk mapped to a conventional zone, all write operations
+are processed by directly writing to the zone. If the mapping zone is a
+sequential zone, the write operation is processed directly only if the
+write offset within the logical chunk is equal to the write pointer
+offset within of the sequential data zone (i.e. the write operation is
+aligned on the zone write pointer). Otherwise, write operations are
+processed indirectly using a buffer zone. In that case, an unused
+conventional zone is allocated and assigned to the chunk being
+accessed. Writing a block to the buffer zone of a chunk will
+automatically invalidate the same block in the sequential zone mapping
+the chunk. If all blocks of the sequential zone become invalid, the zone
+is freed and the chunk buffer zone becomes the primary zone mapping the
+chunk, resulting in native random write performance similar to a regular
+block device.
+
+Read operations are processed according to the block validity
+information provided by the bitmaps. Valid blocks are read either from
+the sequential zone mapping a chunk, or if the chunk is buffered, from
+the buffer zone assigned. If the accessed chunk has no mapping, or the
+accessed blocks are invalid, the read buffer is zeroed and the read
+operation terminated.
+
+After some time, the limited number of convnetional zones available may
+be exhausted (all used to map chunks or buffer sequential zones) and
+unaligned writes to unbuffered chunks become impossible. To avoid this
+situation, a reclaim process regularly scans used conventional zones and
+tries to reclaim the least recently used zones by copying the valid
+blocks of the buffer zone to a free sequential zone. Once the copy
+completes, the chunk mapping is updated to point to the sequential zone
+and the buffer zone freed for reuse.
+
+Metadata Protection
+===================
+
+To protect metadata against corruption in case of sudden power loss or
+system crash, 2 sets of metadata zones are used. One set, the primary
+set, is used as the main metadata region, while the secondary set is
+used as a staging area. Modified metadata is first written to the
+secondary set and validated by updating the super block in the secondary
+set, a generation counter is used to indicate that this set contains the
+newest metadata. Once this operation completes, in place of metadata
+block updates can be done in the primary metadata set. This ensures that
+one of the set is always consistent (all modifications committed or none
+at all). Flush operations are used as a commit point. Upon reception of
+a flush request, metadata modification activity is temporarily blocked
+(for both incoming BIO processing and reclaim process) and all dirty
+metadata blocks are staged and updated. Normal operation is then
+resumed. Flushing metadata thus only temporarily delays write and
+discard requests. Read requests can be processed concurrently while
+metadata flush is being executed.
+
+Usage
+=====
+
+A zoned block device must first be formatted using the dmzadm tool. This
+will analyze the device zone configuration, determine where to place the
+metadata sets on the device and initialize the metadata sets.
+
+Ex:
+
+dmzadm --format /dev/sdxx
+
+For a formatted device, the target can be created normally with the
+dmsetup utility. The only parameter that dm-zoned requires is the
+underlying zoned block device name. Ex:
+
+echo "0 `blockdev --getsize ${dev}` zoned ${dev}" | dmsetup create dmz-`basename ${dev}`
diff --git a/Documentation/devicetree/bindings/arm/cci.txt b/Documentation/devicetree/bindings/arm/cci.txt
index 0f2153e8fa7e..9600761f2d5b 100644
--- a/Documentation/devicetree/bindings/arm/cci.txt
+++ b/Documentation/devicetree/bindings/arm/cci.txt
@@ -11,13 +11,6 @@ clusters, through memory mapped interface, with a global control register
space and multiple sets of interface control registers, one per slave
interface.
-Bindings for the CCI node follow the ePAPR standard, available from:
-
-www.power.org/documentation/epapr-version-1-1/
-
-with the addition of the bindings described in this document which are
-specific to ARM.
-
* CCI interconnect node
Description: Describes a CCI cache coherent Interconnect component
@@ -50,10 +43,10 @@ specific to ARM.
as a tuple of cells, containing child address,
parent address and the size of the region in the
child address space.
- Definition: A standard property. Follow rules in the ePAPR for
- hierarchical bus addressing. CCI interfaces
- addresses refer to the parent node addressing
- scheme to declare their register bases.
+ Definition: A standard property. Follow rules in the Devicetree
+ Specification for hierarchical bus addressing. CCI
+ interfaces addresses refer to the parent node
+ addressing scheme to declare their register bases.
CCI interconnect node can define the following child nodes:
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt
index ad1913bea8d7..a44253cad269 100644
--- a/Documentation/devicetree/bindings/arm/cpus.txt
+++ b/Documentation/devicetree/bindings/arm/cpus.txt
@@ -6,9 +6,9 @@ The device tree allows to describe the layout of CPUs in a system through
the "cpus" node, which in turn contains a number of subnodes (ie "cpu")
defining properties for every cpu.
-Bindings for CPU nodes follow the ePAPR v1.1 standard, available from:
+Bindings for CPU nodes follow the Devicetree Specification, available from:
-https://www.power.org/documentation/epapr-version-1-1/
+https://www.devicetree.org/specifications/
with updates for 32-bit and 64-bit ARM systems provided in this document.
@@ -16,8 +16,8 @@ with updates for 32-bit and 64-bit ARM systems provided in this document.
Convention used in this document
================================
-This document follows the conventions described in the ePAPR v1.1, with
-the addition:
+This document follows the conventions described in the Devicetree
+Specification, with the addition:
- square brackets define bitfields, eg reg[7:0] value of the bitfield in
the reg property contained in bits 7 down to 0
@@ -26,8 +26,9 @@ the addition:
cpus and cpu node bindings definition
=====================================
-The ARM architecture, in accordance with the ePAPR, requires the cpus and cpu
-nodes to be present and contain the properties described below.
+The ARM architecture, in accordance with the Devicetree Specification,
+requires the cpus and cpu nodes to be present and contain the properties
+described below.
- cpus node
diff --git a/Documentation/devicetree/bindings/arm/idle-states.txt b/Documentation/devicetree/bindings/arm/idle-states.txt
index b8e41c148a3c..7a591333f2b1 100644
--- a/Documentation/devicetree/bindings/arm/idle-states.txt
+++ b/Documentation/devicetree/bindings/arm/idle-states.txt
@@ -695,5 +695,5 @@ cpus {
[4] ARM Architecture Reference Manuals
http://infocenter.arm.com/help/index.jsp
-[5] ePAPR standard
- https://www.power.org/documentation/epapr-version-1-1/
+[5] Devicetree Specification
+ https://www.devicetree.org/specifications/
diff --git a/Documentation/devicetree/bindings/arm/l2c2x0.txt b/Documentation/devicetree/bindings/arm/l2c2x0.txt
index d9650c1788f4..fbe6cb21f4cf 100644
--- a/Documentation/devicetree/bindings/arm/l2c2x0.txt
+++ b/Documentation/devicetree/bindings/arm/l2c2x0.txt
@@ -4,8 +4,8 @@ ARM cores often have a separate L2C210/L2C220/L2C310 (also known as PL210/PL220/
PL310 and variants) based level 2 cache controller. All these various implementations
of the L2 cache controller have compatible programming models (Note 1).
Some of the properties that are just prefixed "cache-*" are taken from section
-3.7.3 of the ePAPR v1.1 specification which can be found at:
-https://www.power.org/wp-content/uploads/2012/06/Power_ePAPR_APPROVED_v1.1.pdf
+3.7.3 of the Devicetree Specification which can be found at:
+https://www.devicetree.org/specifications/
The ARM L2 cache representation in the device tree should be done as follows:
diff --git a/Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt b/Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt
index 8968371d84e2..0b887440e08a 100644
--- a/Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt
+++ b/Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt
@@ -7,6 +7,14 @@ registers giving access to numerous features: clocks, pin-muxing and
many other SoC configuration items. This DT binding allows to describe
this system controller.
+For the top level node:
+ - compatible: must be: "syscon", "simple-mfd";
+ - reg: register area of the AP806 system controller
+
+Clocks:
+-------
+
+
The Device Tree node representing the AP806 system controller provides
a number of clocks:
@@ -17,19 +25,76 @@ a number of clocks:
Required properties:
- - compatible: must be:
- "marvell,ap806-system-controller", "syscon"
- - reg: register area of the AP806 system controller
+ - compatible: must be: "marvell,ap806-clock"
- #clock-cells: must be set to 1
- - clock-output-names: must be defined to:
- "ap-cpu-cluster-0", "ap-cpu-cluster-1", "ap-fixed", "ap-mss"
+
+Pinctrl:
+--------
+
+For common binding part and usage, refer to
+Documentation/devicetree/bindings/pinctrl/marvell,mvebu-pinctrl.txt.
+
+Required properties:
+- compatible must be "marvell,ap806-pinctrl",
+
+Available mpp pins/groups and functions:
+Note: brackets (x) are not part of the mpp name for marvell,function and given
+only for more detailed description in this document.
+
+name pins functions
+================================================================================
+mpp0 0 gpio, sdio(clk), spi0(clk)
+mpp1 1 gpio, sdio(cmd), spi0(miso)
+mpp2 2 gpio, sdio(d0), spi0(mosi)
+mpp3 3 gpio, sdio(d1), spi0(cs0n)
+mpp4 4 gpio, sdio(d2), i2c0(sda)
+mpp5 5 gpio, sdio(d3), i2c0(sdk)
+mpp6 6 gpio, sdio(ds)
+mpp7 7 gpio, sdio(d4), uart1(rxd)
+mpp8 8 gpio, sdio(d5), uart1(txd)
+mpp9 9 gpio, sdio(d6), spi0(cs1n)
+mpp10 10 gpio, sdio(d7)
+mpp11 11 gpio, uart0(txd)
+mpp12 12 gpio, sdio(pw_off), sdio(hw_rst)
+mpp13 13 gpio
+mpp14 14 gpio
+mpp15 15 gpio
+mpp16 16 gpio
+mpp17 17 gpio
+mpp18 18 gpio
+mpp19 19 gpio, uart0(rxd), sdio(pw_off)
+
+GPIO:
+-----
+For common binding part and usage, refer to
+Documentation/devicetree/bindings/gpio/gpio-mvebu.txt.
+
+Required properties:
+
+- compatible: "marvell,armada-8k-gpio"
+
+- offset: offset address inside the syscon block
Example:
+ap_syscon: system-controller@6f4000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x6f4000 0x1000>;
- syscon: system-controller@6f4000 {
- compatible = "marvell,ap806-system-controller", "syscon";
+ ap_clk: clock {
+ compatible = "marvell,ap806-clock";
#clock-cells = <1>;
- clock-output-names = "ap-cpu-cluster-0", "ap-cpu-cluster-1",
- "ap-fixed", "ap-mss";
- reg = <0x6f4000 0x1000>;
};
+
+ ap_pinctrl: pinctrl {
+ compatible = "marvell,ap806-pinctrl";
+ };
+
+ ap_gpio: gpio {
+ compatible = "marvell,armada-8k-gpio";
+ offset = <0x1040>;
+ ngpios = <19>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&ap_pinctrl 0 0 19>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt b/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt
index 07dbb358182c..171d02cadea4 100644
--- a/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt
+++ b/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt
@@ -7,6 +7,13 @@ Controller 0 and System Controller 1. This Device Tree binding allows
to describe the first system controller, which provides registers to
configure various aspects of the SoC.
+For the top level node:
+ - compatible: must be: "syscon", "simple-mfd";
+ - reg: register area of the CP110 system controller 0
+
+Clocks:
+-------
+
The Device Tree node representing this System Controller 0 provides a
number of clocks:
@@ -27,6 +34,7 @@ The following clocks are available:
- 0 2 EIP
- 0 3 Core
- 0 4 NAND core
+ - 0 5 SDIO core
- Gatable clocks
- 1 0 Audio
- 1 1 Comm Unit
@@ -56,28 +64,126 @@ The following clocks are available:
Required properties:
- compatible: must be:
- "marvell,cp110-system-controller0", "syscon";
- - reg: register area of the CP110 system controller 0
+ "marvell,cp110-clock"
- #clock-cells: must be set to 2
- - core-clock-output-names must be set to:
- "cpm-apll", "cpm-ppv2-core", "cpm-eip", "cpm-core", "cpm-nand-core"
- - gate-clock-output-names must be set to:
- "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio",
- "cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none",
- "cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata",
- "cpm-sata-usb", "cpm-main", "cpm-sd-mmc-gop", "none", "none", "cpm-slow-io",
- "cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197";
+
+Pinctrl:
+--------
+
+For common binding part and usage, refer to the file
+Documentation/devicetree/bindings/pinctrl/marvell,mvebu-pinctrl.txt.
+
+Required properties:
+
+- compatible: "marvell,armada-7k-pinctrl",
+ "marvell,armada-8k-cpm-pinctrl" or "marvell,armada-8k-cps-pinctrl"
+ depending on the specific variant of the SoC being used.
+
+Available mpp pins/groups and functions:
+Note: brackets (x) are not part of the mpp name for marvell,function and given
+only for more detailed description in this document.
+
+name pins functions
+================================================================================
+mpp0 0 gpio, dev(ale1), au(i2smclk), ge0(rxd3), tdm(pclk), ptp(pulse), mss_i2c(sda), uart0(rxd), sata0(present_act), ge(mdio)
+mpp1 1 gpio, dev(ale0), au(i2sdo_spdifo), ge0(rxd2), tdm(drx), ptp(clk), mss_i2c(sck), uart0(txd), sata1(present_act), ge(mdc)
+mpp2 2 gpio, dev(ad15), au(i2sextclk), ge0(rxd1), tdm(dtx), mss_uart(rxd), ptp(pclk_out), i2c1(sck), uart1(rxd), sata0(present_act), xg(mdc)
+mpp3 3 gpio, dev(ad14), au(i2slrclk), ge0(rxd0), tdm(fsync), mss_uart(txd), pcie(rstoutn), i2c1(sda), uart1(txd), sata1(present_act), xg(mdio)
+mpp4 4 gpio, dev(ad13), au(i2sbclk), ge0(rxctl), tdm(rstn), mss_uart(rxd), uart1(cts), pcie0(clkreq), uart3(rxd), ge(mdc)
+mpp5 5 gpio, dev(ad12), au(i2sdi), ge0(rxclk), tdm(intn), mss_uart(txd), uart1(rts), pcie1(clkreq), uart3(txd), ge(mdio)
+mpp6 6 gpio, dev(ad11), ge0(txd3), spi0(csn2), au(i2sextclk), sata1(present_act), pcie2(clkreq), uart0(rxd), ptp(pulse)
+mpp7 7 gpio, dev(ad10), ge0(txd2), spi0(csn1), spi1(csn1), sata0(present_act), led(data), uart0(txd), ptp(clk)
+mpp8 8 gpio, dev(ad9), ge0(txd1), spi0(csn0), spi1(csn0), uart0(cts), led(stb), uart2(rxd), ptp(pclk_out), synce1(clk)
+mpp9 9 gpio, dev(ad8), ge0(txd0), spi0(mosi), spi1(mosi), pcie(rstoutn), synce2(clk)
+mpp10 10 gpio, dev(readyn), ge0(txctl), spi0(miso), spi1(miso), uart0(cts), sata1(present_act)
+mpp11 11 gpio, dev(wen1), ge0(txclkout), spi0(clk), spi1(clk), uart0(rts), led(clk), uart2(txd), sata0(present_act)
+mpp12 12 gpio, dev(clk_out), nf(rbn1), spi1(csn1), ge0(rxclk)
+mpp13 13 gpio, dev(burstn), nf(rbn0), spi1(miso), ge0(rxctl), mss_spi(miso)
+mpp14 14 gpio, dev(bootcsn), dev(csn0), spi1(csn0), spi0(csn3), au(i2sextclk), spi0(miso), sata0(present_act), mss_spi(csn)
+mpp15 15 gpio, dev(ad7), spi1(mosi), spi0(mosi), mss_spi(mosi), ptp(pulse_cp2cp)
+mpp16 16 gpio, dev(ad6), spi1(clk), mss_spi(clk)
+mpp17 17 gpio, dev(ad5), ge0(txd3)
+mpp18 18 gpio, dev(ad4), ge0(txd2), ptp(clk_cp2cp)
+mpp19 19 gpio, dev(ad3), ge0(txd1), wakeup(out_cp2cp)
+mpp20 20 gpio, dev(ad2), ge0(txd0)
+mpp21 21 gpio, dev(ad1), ge0(txctl), sei(in_cp2cp)
+mpp22 22 gpio, dev(ad0), ge0(txclkout), wakeup(in_cp2cp)
+mpp23 23 gpio, dev(a1), au(i2smclk), link(rd_in_cp2cp)
+mpp24 24 gpio, dev(a0), au(i2slrclk)
+mpp25 25 gpio, dev(oen), au(i2sdo_spdifo)
+mpp26 26 gpio, dev(wen0), au(i2sbclk)
+mpp27 27 gpio, dev(csn0), spi1(miso), mss_gpio4, ge0(rxd3), spi0(csn4), ge(mdio), sata0(present_act), uart0(rts), rei(in_cp2cp)
+mpp28 28 gpio, dev(csn1), spi1(csn0), mss_gpio5, ge0(rxd2), spi0(csn5), pcie2(clkreq), ptp(pulse), ge(mdc), sata1(present_act), uart0(cts), led(data)
+mpp29 29 gpio, dev(csn2), spi1(mosi), mss_gpio6, ge0(rxd1), spi0(csn6), pcie1(clkreq), ptp(clk), mss_i2c(sda), sata0(present_act), uart0(rxd), led(stb)
+mpp30 30 gpio, dev(csn3), spi1(clk), mss_gpio7, ge0(rxd0), spi0(csn7), pcie0(clkreq), ptp(pclk_out), mss_i2c(sck), sata1(present_act), uart0(txd), led(clk)
+mpp31 31 gpio, dev(a2), mss_gpio4, pcie(rstoutn), ge(mdc)
+mpp32 32 gpio, mii(col), mii(txerr), mss_spi(miso), tdm(drx), au(i2sextclk), au(i2sdi), ge(mdio), sdio(v18_en), pcie1(clkreq), mss_gpio0
+mpp33 33 gpio, mii(txclk), sdio(pwr10), mss_spi(csn), tdm(fsync), au(i2smclk), sdio(bus_pwr), xg(mdio), pcie2(clkreq), mss_gpio1
+mpp34 34 gpio, mii(rxerr), sdio(pwr11), mss_spi(mosi), tdm(dtx), au(i2slrclk), sdio(wr_protect), ge(mdc), pcie0(clkreq), mss_gpio2
+mpp35 35 gpio, sata1(present_act), i2c1(sda), mss_spi(clk), tdm(pclk), au(i2sdo_spdifo), sdio(card_detect), xg(mdio), ge(mdio), pcie(rstoutn), mss_gpio3
+mpp36 36 gpio, synce2(clk), i2c1(sck), ptp(clk), synce1(clk), au(i2sbclk), sata0(present_act), xg(mdc), ge(mdc), pcie2(clkreq), mss_gpio5
+mpp37 37 gpio, uart2(rxd), i2c0(sck), ptp(pclk_out), tdm(intn), mss_i2c(sck), sata1(present_act), ge(mdc), xg(mdc), pcie1(clkreq), mss_gpio6, link(rd_out_cp2cp)
+mpp38 38 gpio, uart2(txd), i2c0(sda), ptp(pulse), tdm(rstn), mss_i2c(sda), sata0(present_act), ge(mdio), xg(mdio), au(i2sextclk), mss_gpio7, ptp(pulse_cp2cp)
+mpp39 39 gpio, sdio(wr_protect), au(i2sbclk), ptp(clk), spi0(csn1), sata1(present_act), mss_gpio0
+mpp40 40 gpio, sdio(pwr11), synce1(clk), mss_i2c(sda), au(i2sdo_spdifo), ptp(pclk_out), spi0(clk), uart1(txd), ge(mdio), sata0(present_act), mss_gpio1
+mpp41 41 gpio, sdio(pwr10), sdio(bus_pwr), mss_i2c(sck), au(i2slrclk), ptp(pulse), spi0(mosi), uart1(rxd), ge(mdc), sata1(present_act), mss_gpio2, rei(out_cp2cp)
+mpp42 42 gpio, sdio(v18_en), sdio(wr_protect), synce2(clk), au(i2smclk), mss_uart(txd), spi0(miso), uart1(cts), xg(mdc), sata0(present_act), mss_gpio4
+mpp43 43 gpio, sdio(card_detect), synce1(clk), au(i2sextclk), mss_uart(rxd), spi0(csn0), uart1(rts), xg(mdio), sata1(present_act), mss_gpio5, wakeup(out_cp2cp)
+mpp44 44 gpio, ge1(txd2), uart0(rts), ptp(clk_cp2cp)
+mpp45 45 gpio, ge1(txd3), uart0(txd), pcie(rstoutn)
+mpp46 46 gpio, ge1(txd1), uart1(rts)
+mpp47 47 gpio, ge1(txd0), spi1(clk), uart1(txd), ge(mdc)
+mpp48 48 gpio, ge1(txctl_txen), spi1(mosi), xg(mdc), wakeup(in_cp2cp)
+mpp49 49 gpio, ge1(txclkout), mii(crs), spi1(miso), uart1(rxd), ge(mdio), pcie0(clkreq), sdio(v18_en), sei(out_cp2cp)
+mpp50 50 gpio, ge1(rxclk), mss_i2c(sda), spi1(csn0), uart2(txd), uart0(rxd), xg(mdio), sdio(pwr11)
+mpp51 51 gpio, ge1(rxd0), mss_i2c(sck), spi1(csn1), uart2(rxd), uart0(cts), sdio(pwr10)
+mpp52 52 gpio, ge1(rxd1), synce1(clk), synce2(clk), spi1(csn2), uart1(cts), led(clk), pcie(rstoutn), pcie0(clkreq)
+mpp53 53 gpio, ge1(rxd2), ptp(clk), spi1(csn3), uart1(rxd), led(stb), sdio(led)
+mpp54 54 gpio, ge1(rxd3), synce2(clk), ptp(pclk_out), synce1(clk), led(data), sdio(hw_rst), sdio(wr_protect)
+mpp55 55 gpio, ge1(rxctl_rxdv), ptp(pulse), sdio(led), sdio(card_detect)
+mpp56 56 gpio, tdm(drx), au(i2sdo_spdifo), spi0(clk), uart1(rxd), sata1(present_act), sdio(clk)
+mpp57 57 gpio, mss_i2c(sda), ptp(pclk_out), tdm(intn), au(i2sbclk), spi0(mosi), uart1(txd), sata0(present_act), sdio(cmd)
+mpp58 58 gpio, mss_i2c(sck), ptp(clk), tdm(rstn), au(i2sdi), spi0(miso), uart1(cts), led(clk), sdio(d0)
+mpp59 59 gpio, mss_gpio7, synce2(clk), tdm(fsync), au(i2slrclk), spi0(csn0), uart0(cts), led(stb), uart1(txd), sdio(d1)
+mpp60 60 gpio, mss_gpio6, ptp(pulse), tdm(dtx), au(i2smclk), spi0(csn1), uart0(rts), led(data), uart1(rxd), sdio(d2)
+mpp61 61 gpio, mss_gpio5, ptp(clk), tdm(pclk), au(i2sextclk), spi0(csn2), uart0(txd), uart2(txd), sata1(present_act), ge(mdio), sdio(d3)
+mpp62 62 gpio, mss_gpio4, synce1(clk), ptp(pclk_out), sata1(present_act), spi0(csn3), uart0(rxd), uart2(rxd), sata0(present_act), ge(mdc)
+
+GPIO:
+-----
+
+For common binding part and usage, refer to
+Documentation/devicetree/bindings/gpio/gpio-mvebu.txt.
+
+Required properties:
+
+- compatible: "marvell,armada-8k-gpio"
+
+- offset: offset address inside the syscon block
Example:
- cpm_syscon0: system-controller@440000 {
- compatible = "marvell,cp110-system-controller0", "syscon";
- reg = <0x440000 0x1000>;
+cpm_syscon0: system-controller@440000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x440000 0x1000>;
+
+ cpm_clk: clock {
+ compatible = "marvell,cp110-clock";
#clock-cells = <2>;
- core-clock-output-names = "cpm-apll", "cpm-ppv2-core", "cpm-eip", "cpm-core", "cpm-nand-core";
- gate-clock-output-names = "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio",
- "cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none",
- "cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata",
- "cpm-sata-usb", "cpm-main", "cpm-sd-mmc-gop", "none", "none", "cpm-slow-io",
- "cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197";
};
+
+ cpm_pinctrl: pinctrl {
+ compatible = "marvell,armada-8k-cpm-pinctrl";
+ };
+
+ cpm_gpio1: gpio@100 {
+ compatible = "marvell,armada-8k-gpio";
+ offset = <0x100>;
+ ngpios = <32>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&cpm_pinctrl 0 0 32>;
+ status = "disabled";
+ };
+
+};
diff --git a/Documentation/devicetree/bindings/arm/topology.txt b/Documentation/devicetree/bindings/arm/topology.txt
index 1061faf5f602..de9eb0486630 100644
--- a/Documentation/devicetree/bindings/arm/topology.txt
+++ b/Documentation/devicetree/bindings/arm/topology.txt
@@ -29,9 +29,9 @@ corresponding to the system hierarchy; syntactically they are defined as device
tree nodes.
The remainder of this document provides the topology bindings for ARM, based
-on the ePAPR standard, available from:
+on the Devicetree Specification, available from:
-http://www.power.org/documentation/epapr-version-1-1/
+https://www.devicetree.org/specifications/
If not stated otherwise, whenever a reference to a cpu node phandle is made its
value must point to a cpu node compliant with the cpu node bindings as
diff --git a/Documentation/devicetree/bindings/ata/cortina,gemini-sata-bridge.txt b/Documentation/devicetree/bindings/ata/cortina,gemini-sata-bridge.txt
new file mode 100644
index 000000000000..1c3d3cc70051
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/cortina,gemini-sata-bridge.txt
@@ -0,0 +1,55 @@
+* Cortina Systems Gemini SATA Bridge
+
+The Gemini SATA bridge in a SoC-internal PATA to SATA bridge that
+takes two Faraday Technology FTIDE010 PATA controllers and bridges
+them in different configurations to two SATA ports.
+
+Required properties:
+- compatible: should be
+ "cortina,gemini-sata-bridge"
+- reg: registers and size for the block
+- resets: phandles to the reset lines for both SATA bridges
+- reset-names: must be "sata0", "sata1"
+- clocks: phandles to the compulsory peripheral clocks
+- clock-names: must be "SATA0_PCLK", "SATA1_PCLK"
+- syscon: a phandle to the global Gemini system controller
+- cortina,gemini-ata-muxmode: tell the desired multiplexing mode for
+ the ATA controller and SATA bridges. Values 0..3:
+ Mode 0: ata0 master <-> sata0
+ ata1 master <-> sata1
+ ata0 slave interface brought out on IDE pads
+ Mode 1: ata0 master <-> sata0
+ ata1 master <-> sata1
+ ata1 slave interface brought out on IDE pads
+ Mode 2: ata1 master <-> sata1
+ ata1 slave <-> sata0
+ ata0 master and slave interfaces brought out
+ on IDE pads
+ Mode 3: ata0 master <-> sata0
+ ata0 slave <-> sata1
+ ata1 master and slave interfaces brought out
+ on IDE pads
+
+Optional boolean properties:
+- cortina,gemini-enable-ide-pins: enables the PATA to IDE connection.
+ The muxmode setting decides whether ATA0 or ATA1 is brought out,
+ and whether master, slave or both interfaces get brought out.
+- cortina,gemini-enable-sata-bridge: enables the PATA to SATA bridge
+ inside the Gemnini SoC. The Muxmode decides what PATA blocks will
+ be muxed out and how.
+
+Example:
+
+sata: sata@46000000 {
+ compatible = "cortina,gemini-sata-bridge";
+ reg = <0x46000000 0x100>;
+ resets = <&rcon 26>, <&rcon 27>;
+ reset-names = "sata0", "sata1";
+ clocks = <&gcc GEMINI_CLK_GATE_SATA0>,
+ <&gcc GEMINI_CLK_GATE_SATA1>;
+ clock-names = "SATA0_PCLK", "SATA1_PCLK";
+ syscon = <&syscon>;
+ cortina,gemini-ata-muxmode = <3>;
+ cortina,gemini-enable-ide-pins;
+ cortina,gemini-enable-sata-bridge;
+};
diff --git a/Documentation/devicetree/bindings/ata/faraday,ftide010.txt b/Documentation/devicetree/bindings/ata/faraday,ftide010.txt
new file mode 100644
index 000000000000..a0c64a29104d
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/faraday,ftide010.txt
@@ -0,0 +1,38 @@
+* Faraday Technology FTIDE010 PATA controller
+
+This controller is the first Faraday IDE interface block, used in the
+StorLink SL2312 and SL3516, later known as the Cortina Systems Gemini
+platform. The controller can do PIO modes 0 through 4, Multi-word DMA
+(MWDM)modes 0 through 2 and Ultra DMA modes 0 through 6.
+
+On the Gemini platform, this PATA block is accompanied by a PATA to
+SATA bridge in order to support SATA. This is why a phandle to that
+controller is compulsory on that platform.
+
+The timing properties are unique per-SoC, not per-board.
+
+Required properties:
+- compatible: should be one of
+ "cortina,gemini-pata", "faraday,ftide010"
+ "faraday,ftide010"
+- interrupts: interrupt for the block
+- reg: registers and size for the block
+
+Optional properties:
+- clocks: a SoC clock running the peripheral.
+- clock-names: should be set to "PCLK" for the peripheral clock.
+
+Required properties for "cortina,gemini-pata" compatible:
+- sata: a phande to the Gemini PATA to SATA bridge, see
+ cortina,gemini-sata-bridge.txt for details.
+
+Example:
+
+ata@63000000 {
+ compatible = "cortina,gemini-pata", "faraday,ftide010";
+ reg = <0x63000000 0x100>;
+ interrupts = <4 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&gcc GEMINI_CLK_GATE_IDE>;
+ clock-names = "PCLK";
+ sata = <&sata>;
+};
diff --git a/Documentation/devicetree/bindings/bus/simple-pm-bus.txt b/Documentation/devicetree/bindings/bus/simple-pm-bus.txt
index d032237512c2..6f15037131ed 100644
--- a/Documentation/devicetree/bindings/bus/simple-pm-bus.txt
+++ b/Documentation/devicetree/bindings/bus/simple-pm-bus.txt
@@ -10,7 +10,7 @@ enabled for child devices connected to the bus (either on-SoC or externally)
to function.
While "simple-pm-bus" follows the "simple-bus" set of properties, as specified
-in ePAPR, it is not an extension of "simple-bus".
+in the Devicetree Specification, it is not an extension of "simple-bus".
Required properties:
diff --git a/Documentation/devicetree/bindings/chosen.txt b/Documentation/devicetree/bindings/chosen.txt
index b5e39af4ddc0..dee3f5d9df26 100644
--- a/Documentation/devicetree/bindings/chosen.txt
+++ b/Documentation/devicetree/bindings/chosen.txt
@@ -10,7 +10,8 @@ stdout-path property
--------------------
Device trees may specify the device to be used for boot console output
-with a stdout-path property under /chosen, as described in ePAPR, e.g.
+with a stdout-path property under /chosen, as described in the Devicetree
+Specification, e.g.
/ {
chosen {
diff --git a/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt b/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt
index 2b7b3fa588d7..606da38c0959 100644
--- a/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt
+++ b/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt
@@ -1,11 +1,14 @@
-* Amlogic Meson8b Clock and Reset Unit
+* Amlogic Meson8, Meson8b and Meson8m2 Clock and Reset Unit
-The Amlogic Meson8b clock controller generates and supplies clock to various
-controllers within the SoC.
+The Amlogic Meson8 / Meson8b / Meson8m2 clock controller generates and
+supplies clock to various controllers within the SoC.
Required Properties:
-- compatible: should be "amlogic,meson8b-clkc"
+- compatible: must be one of:
+ - "amlogic,meson8-clkc" for Meson8 (S802) SoCs
+ - "amlogic,meson8b-clkc" for Meson8 (S805) SoCs
+ - "amlogic,meson8m2-clkc" for Meson8m2 (S812) SoCs
- reg: it must be composed by two tuples:
0) physical base address of the xtal register and length of memory
mapped region.
diff --git a/Documentation/devicetree/bindings/clock/hi6220-clock.txt b/Documentation/devicetree/bindings/clock/hi6220-clock.txt
index e4d5feaebc29..ef3deb7b86ea 100644
--- a/Documentation/devicetree/bindings/clock/hi6220-clock.txt
+++ b/Documentation/devicetree/bindings/clock/hi6220-clock.txt
@@ -11,6 +11,7 @@ Required Properties:
- compatible: the compatible should be one of the following strings to
indicate the clock controller functionality.
+ - "hisilicon,hi6220-acpu-sctrl"
- "hisilicon,hi6220-aoctrl"
- "hisilicon,hi6220-sysctrl"
- "hisilicon,hi6220-mediactrl"
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.txt b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
index 5b4dfc1ea54f..551d03be9665 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
@@ -8,6 +8,7 @@ Required properties :
"qcom,gcc-apq8084"
"qcom,gcc-ipq8064"
"qcom,gcc-ipq4019"
+ "qcom,gcc-ipq8074"
"qcom,gcc-msm8660"
"qcom,gcc-msm8916"
"qcom,gcc-msm8960"
diff --git a/Documentation/devicetree/bindings/clock/qoriq-clock.txt b/Documentation/devicetree/bindings/clock/qoriq-clock.txt
index 6ed469c66b32..6498e1fdbb33 100644
--- a/Documentation/devicetree/bindings/clock/qoriq-clock.txt
+++ b/Documentation/devicetree/bindings/clock/qoriq-clock.txt
@@ -57,6 +57,11 @@ Optional properties:
- clocks: If clock-frequency is not specified, sysclk may be provided
as an input clock. Either clock-frequency or clocks must be
provided.
+ A second input clock, called "coreclk", may be provided if
+ core PLLs are based on a different input clock from the
+ platform PLL.
+- clock-names: Required if a coreclk is present. Valid names are
+ "sysclk" and "coreclk".
2. Clock Provider
@@ -73,6 +78,7 @@ second cell is the clock index for the specified type.
2 hwaccel index (n in CLKCGnHWACSR)
3 fman 0 for fm1, 1 for fm2
4 platform pll 0=pll, 1=pll/2, 2=pll/3, 3=pll/4
+ 5 coreclk must be 0
3. Example
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
index f4f944d81308..0cd894f987a3 100644
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.txt
@@ -15,6 +15,11 @@ Required Properties:
- compatible: Must be one of:
- "renesas,r8a7743-cpg-mssr" for the r8a7743 SoC (RZ/G1M)
- "renesas,r8a7745-cpg-mssr" for the r8a7745 SoC (RZ/G1E)
+ - "renesas,r8a7790-cpg-mssr" for the r8a7790 SoC (R-Car H2)
+ - "renesas,r8a7791-cpg-mssr" for the r8a7791 SoC (R-Car M2-W)
+ - "renesas,r8a7792-cpg-mssr" for the r8a7792 SoC (R-Car V2H)
+ - "renesas,r8a7793-cpg-mssr" for the r8a7793 SoC (R-Car M2-N)
+ - "renesas,r8a7794-cpg-mssr" for the r8a7794 SoC (R-Car E2)
- "renesas,r8a7795-cpg-mssr" for the r8a7795 SoC (R-Car H3)
- "renesas,r8a7796-cpg-mssr" for the r8a7796 SoC (R-Car M3-W)
@@ -24,9 +29,10 @@ Required Properties:
- clocks: References to external parent clocks, one entry for each entry in
clock-names
- clock-names: List of external parent clock names. Valid names are:
- - "extal" (r8a7743, r8a7745, r8a7795, r8a7796)
+ - "extal" (r8a7743, r8a7745, r8a7790, r8a7791, r8a7792, r8a7793, r8a7794,
+ r8a7795, r8a7796)
- "extalr" (r8a7795, r8a7796)
- - "usb_extal" (r8a7743, r8a7745)
+ - "usb_extal" (r8a7743, r8a7745, r8a7790, r8a7791, r8a7793, r8a7794)
- #clock-cells: Must be 2
- For CPG core clocks, the two clock specifier cells must be "CPG_CORE"
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3128-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,rk3128-cru.txt
new file mode 100644
index 000000000000..455a9a00a623
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/rockchip,rk3128-cru.txt
@@ -0,0 +1,56 @@
+* Rockchip RK3128 Clock and Reset Unit
+
+The RK3128 clock controller generates and supplies clock to various
+controllers within the SoC and also implements a reset controller for SoC
+peripherals.
+
+Required Properties:
+
+- compatible: should be "rockchip,rk3128-cru"
+- reg: physical base address of the controller and length of memory mapped
+ region.
+- #clock-cells: should be 1.
+- #reset-cells: should be 1.
+
+Optional Properties:
+
+- rockchip,grf: phandle to the syscon managing the "general register files"
+ If missing pll rates are not changeable, due to the missing pll lock status.
+
+Each clock is assigned an identifier and client nodes can use this identifier
+to specify the clock which they consume. All available clocks are defined as
+preprocessor macros in the dt-bindings/clock/rk3128-cru.h headers and can be
+used in device tree sources. Similar macros exist for the reset sources in
+these files.
+
+External clocks:
+
+There are several clocks that are generated outside the SoC. It is expected
+that they are defined using standard clock bindings with following
+clock-output-names:
+ - "xin24m" - crystal input - required,
+ - "ext_i2s" - external I2S clock - optional,
+ - "gmac_clkin" - external GMAC clock - optional
+
+Example: Clock controller node:
+
+ cru: cru@20000000 {
+ compatible = "rockchip,rk3128-cru";
+ reg = <0x20000000 0x1000>;
+ rockchip,grf = <&grf>;
+
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+Example: UART controller node that consumes the clock generated by the clock
+ controller:
+
+ uart2: serial@20068000 {
+ compatible = "rockchip,serial";
+ reg = <0x20068000 0x100>;
+ interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <24000000>;
+ clocks = <&cru SCLK_UART2>, <&cru PCLK_UART2>;
+ clock-names = "sclk_uart", "pclk_uart";
+ };
diff --git a/Documentation/devicetree/bindings/clock/sun8i-de2.txt b/Documentation/devicetree/bindings/clock/sun8i-de2.txt
new file mode 100644
index 000000000000..631d27cd89d6
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/sun8i-de2.txt
@@ -0,0 +1,31 @@
+Allwinner Display Engine 2.0 Clock Control Binding
+--------------------------------------------------
+
+Required properties :
+- compatible: must contain one of the following compatibles:
+ - "allwinner,sun8i-a83t-de2-clk"
+ - "allwinner,sun8i-v3s-de2-clk"
+ - "allwinner,sun50i-h5-de2-clk"
+
+- reg: Must contain the registers base address and length
+- clocks: phandle to the clocks feeding the display engine subsystem.
+ Three are needed:
+ - "mod": the display engine module clock
+ - "bus": the bus clock for the whole display engine subsystem
+- clock-names: Must contain the clock names described just above
+- resets: phandle to the reset control for the display engine subsystem.
+- #clock-cells : must contain 1
+- #reset-cells : must contain 1
+
+Example:
+de2_clocks: clock@1000000 {
+ compatible = "allwinner,sun8i-a83t-de2-clk";
+ reg = <0x01000000 0x100000>;
+ clocks = <&ccu CLK_BUS_DE>,
+ <&ccu CLK_DE>;
+ clock-names = "bus",
+ "mod";
+ resets = <&ccu RST_BUS_DE>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
index f465647a4dd2..df9fad58facd 100644
--- a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
+++ b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
@@ -6,6 +6,8 @@ Required properties :
- "allwinner,sun6i-a31-ccu"
- "allwinner,sun8i-a23-ccu"
- "allwinner,sun8i-a33-ccu"
+ - "allwinner,sun8i-a83t-ccu"
+ - "allwinner,sun8i-a83t-r-ccu"
- "allwinner,sun8i-h3-ccu"
- "allwinner,sun8i-h3-r-ccu"
- "allwinner,sun8i-v3s-ccu"
@@ -18,11 +20,12 @@ Required properties :
- clocks: phandle to the oscillators feeding the CCU. Two are needed:
- "hosc": the high frequency oscillator (usually at 24MHz)
- "losc": the low frequency oscillator (usually at 32kHz)
+ On the A83T, this is the internal 16MHz oscillator divided by 512
- clock-names: Must contain the clock names described just above
- #clock-cells : must contain 1
- #reset-cells : must contain 1
-For the PRCM CCUs on H3/A64, two more clocks are needed:
+For the PRCM CCUs on A83T/H3/A64, two more clocks are needed:
- "pll-periph": the SoC's peripheral PLL from the main CCU
- "iosc": the SoC's internal frequency oscillator
diff --git a/Documentation/devicetree/bindings/clock/ti,sci-clk.txt b/Documentation/devicetree/bindings/clock/ti,sci-clk.txt
new file mode 100644
index 000000000000..1e884c40ab50
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/ti,sci-clk.txt
@@ -0,0 +1,37 @@
+Texas Instruments TI-SCI Clocks
+===============================
+
+All clocks on Texas Instruments' SoCs that contain a System Controller,
+are only controlled by this entity. Communication between a host processor
+running an OS and the System Controller happens through a protocol known
+as TI-SCI[1]. This clock implementation plugs into the common clock
+framework and makes use of the TI-SCI protocol on clock API requests.
+
+[1] Documentation/devicetree/bindings/arm/keystone/ti,sci.txt
+
+Required properties:
+-------------------
+- compatible: Must be "ti,k2g-sci-clk"
+- #clock-cells: Shall be 2.
+ In clock consumers, this cell represents the device ID and clock ID
+ exposed by the PM firmware. The assignments can be found in the header
+ files <dt-bindings/genpd/<soc>.h> (which covers the device IDs) and
+ <dt-bindings/clock/<soc>.h> (which covers the clock IDs), where <soc>
+ is the SoC involved, for example 'k2g'.
+
+Examples:
+--------
+
+pmmc: pmmc {
+ compatible = "ti,k2g-sci";
+
+ k2g_clks: clocks {
+ compatible = "ti,k2g-sci-clk";
+ #clock-cells = <2>;
+ };
+};
+
+uart0: serial@2530c00 {
+ compatible = "ns16550a";
+ clocks = <&k2g_clks 0x2c 0>;
+};
diff --git a/Documentation/devicetree/bindings/clock/ti-clkctrl.txt b/Documentation/devicetree/bindings/clock/ti-clkctrl.txt
new file mode 100644
index 000000000000..48ee6991f2cc
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/ti-clkctrl.txt
@@ -0,0 +1,56 @@
+Texas Instruments clkctrl clock binding
+
+Texas Instruments SoCs can have a clkctrl clock controller for each
+interconnect target module. The clkctrl clock controller manages functional
+and interface clocks for each module. Each clkctrl controller can also
+gate one or more optional functional clocks for a module, and can have one
+or more clock muxes. There is a clkctrl clock controller typically for each
+interconnect target module on omap4 and later variants.
+
+The clock consumers can specify the index of the clkctrl clock using
+the hardware offset from the clkctrl instance register space. The optional
+clocks can be specified by clkctrl hardware offset and the index of the
+optional clock.
+
+For more information, please see the Linux clock framework binding at
+Documentation/devicetree/bindings/clock/clock-bindings.txt.
+
+Required properties :
+- compatible : shall be "ti,clkctrl"
+- #clock-cells : shall contain 2 with the first entry being the instance
+ offset from the clock domain base and the second being the
+ clock index
+
+Example: Clock controller node on omap 4430:
+
+&cm2 {
+ l4per: cm@1400 {
+ cm_l4per@0 {
+ cm_l4per_clkctrl: clk@20 {
+ compatible = "ti,clkctrl";
+ reg = <0x20 0x1b0>;
+ #clock-cells = <2>;
+ };
+ };
+ };
+};
+
+Example: Preprocessor helper macros in dt-bindings/clock/ti-clkctrl.h
+
+#define OMAP4_CLKCTRL_OFFSET 0x20
+#define OMAP4_CLKCTRL_INDEX(offset) ((offset) - OMAP4_CLKCTRL_OFFSET)
+#define MODULEMODE_HWCTRL 1
+#define MODULEMODE_SWCTRL 2
+
+#define OMAP4_GPTIMER10_CLKTRL OMAP4_CLKCTRL_INDEX(0x28)
+#define OMAP4_GPTIMER11_CLKTRL OMAP4_CLKCTRL_INDEX(0x30)
+#define OMAP4_GPTIMER2_CLKTRL OMAP4_CLKCTRL_INDEX(0x38)
+...
+#define OMAP4_GPIO2_CLKCTRL OMAP_CLKCTRL_INDEX(0x60)
+
+Example: Clock consumer node for GPIO2:
+
+&gpio2 {
+ clocks = <&cm_l4per_clkctrl OMAP4_GPIO2_CLKCTRL 0
+ &cm_l4per_clkctrl OMAP4_GPIO2_CLKCTRL 8>;
+};
diff --git a/Documentation/devicetree/bindings/common-properties.txt b/Documentation/devicetree/bindings/common-properties.txt
index 3193979b1d05..697714f8d75c 100644
--- a/Documentation/devicetree/bindings/common-properties.txt
+++ b/Documentation/devicetree/bindings/common-properties.txt
@@ -1,6 +1,6 @@
Common properties
-The ePAPR specification does not define any properties related to hardware
+The Devicetree Specification does not define any properties related to hardware
byteswapping, but endianness issues show up frequently in porting Linux to
different machine types. This document attempts to provide a consistent
way of handling byteswapping across drivers.
diff --git a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
index 10a425f451fc..7aef0eae58d4 100644
--- a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
+++ b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
@@ -118,8 +118,8 @@ PROPERTIES
Definition: A list of clock name strings in the same order as the
clocks property.
- Note: All other standard properties (see the ePAPR) are allowed
- but are optional.
+ Note: All other standard properties (see the Devicetree Specification)
+ are allowed but are optional.
EXAMPLE
diff --git a/Documentation/devicetree/bindings/crypto/fsl-sec6.txt b/Documentation/devicetree/bindings/crypto/fsl-sec6.txt
index baf8a3c1b469..73b0eb950bb3 100644
--- a/Documentation/devicetree/bindings/crypto/fsl-sec6.txt
+++ b/Documentation/devicetree/bindings/crypto/fsl-sec6.txt
@@ -55,8 +55,8 @@ PROPERTIES
triplet that includes the child address, parent address, &
length.
- Note: All other standard properties (see the ePAPR) are allowed
- but are optional.
+ Note: All other standard properties (see the Devicetree Specification)
+ are allowed but are optional.
EXAMPLE
crypto@a0000 {
diff --git a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
index 00ea670b8c4d..06668bca7ffc 100644
--- a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
+++ b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
@@ -78,6 +78,7 @@ graph bindings specified in Documentation/devicetree/bindings/graph.txt.
remote endpoint phandle should be a reference to a valid mipi_dsi_host device
node.
- Video port 1 for the HDMI output
+- Audio port 2 for the HDMI audio input
Example
@@ -112,5 +113,12 @@ Example
remote-endpoint = <&hdmi_connector_in>;
};
};
+
+ port@2 {
+ reg = <2>;
+ codec_endpoint: endpoint {
+ remote-endpoint = <&i2s0_cpu_endpoint>;
+ };
+ };
};
};
diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt b/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt
index f6b3f36d422b..81b68580e199 100644
--- a/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt
+++ b/Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt
@@ -25,7 +25,8 @@ Required properties:
- clock-names: Shall contain "iahb" and "isfr" as defined in dw_hdmi.txt.
- ports: See dw_hdmi.txt. The DWC HDMI shall have one port numbered 0
corresponding to the video input of the controller and one port numbered 1
- corresponding to its HDMI output. Each port shall have a single endpoint.
+ corresponding to its HDMI output, and one port numbered 2 corresponding to
+ sound input of the controller. Each port shall have a single endpoint.
Optional properties:
@@ -59,6 +60,12 @@ Example:
remote-endpoint = <&hdmi0_con>;
};
};
+ port@2 {
+ reg = <2>;
+ rcar_dw_hdmi0_sound_in: endpoint {
+ remote-endpoint = <&hdmi_sound_out>;
+ };
+ };
};
};
diff --git a/Documentation/devicetree/bindings/display/panel/display-timing.txt b/Documentation/devicetree/bindings/display/panel/display-timing.txt
index 81a75893d1b8..58fa3e48481d 100644
--- a/Documentation/devicetree/bindings/display/panel/display-timing.txt
+++ b/Documentation/devicetree/bindings/display/panel/display-timing.txt
@@ -57,11 +57,11 @@ can be specified.
The parameters are defined as:
+----------+-------------------------------------+----------+-------+
- | | ↑ | | |
+ | | ^ | | |
| | |vback_porch | | |
- | | ↓ | | |
+ | | v | | |
+----------#######################################----------+-------+
- | # ↑ # | |
+ | # ^ # | |
| # | # | |
| hback # | # hfront | hsync |
| porch # | hactive # porch | len |
@@ -69,15 +69,15 @@ The parameters are defined as:
| # | # | |
| # |vactive # | |
| # | # | |
- | # ↓ # | |
+ | # v # | |
+----------#######################################----------+-------+
- | | ↑ | | |
+ | | ^ | | |
| | |vfront_porch | | |
- | | ↓ | | |
+ | | v | | |
+----------+-------------------------------------+----------+-------+
- | | ↑ | | |
+ | | ^ | | |
| | |vsync_len | | |
- | | ↓ | | |
+ | | v | | |
+----------+-------------------------------------+----------+-------+
Example:
diff --git a/Documentation/devicetree/bindings/dma/arm-pl08x.txt b/Documentation/devicetree/bindings/dma/arm-pl08x.txt
index 8a0097a029d3..0ba81f79266f 100644
--- a/Documentation/devicetree/bindings/dma/arm-pl08x.txt
+++ b/Documentation/devicetree/bindings/dma/arm-pl08x.txt
@@ -3,6 +3,11 @@
Required properties:
- compatible: "arm,pl080", "arm,primecell";
"arm,pl081", "arm,primecell";
+ "faraday,ftdmac020", "arm,primecell"
+- arm,primecell-periphid: on the FTDMAC020 the primecell ID is not hard-coded
+ in the hardware and must be specified here as <0x0003b080>. This number
+ follows the PrimeCell standard numbering using the JEP106 vendor code 0x38
+ for Faraday Technology.
- reg: Address range of the PL08x registers
- interrupt: The PL08x interrupt number
- clocks: The clock running the IP core clock
@@ -20,8 +25,8 @@ Optional properties:
- dma-requests: contains the total number of DMA requests supported by the DMAC
- memcpy-burst-size: the size of the bursts for memcpy: 1, 4, 8, 16, 32
64, 128 or 256 bytes are legal values
-- memcpy-bus-width: the bus width used for memcpy: 8, 16 or 32 are legal
- values
+- memcpy-bus-width: the bus width used for memcpy in bits: 8, 16 or 32 are legal
+ values, the Faraday FTDMAC020 can also accept 64 bits
Clients
Required properties:
diff --git a/Documentation/devicetree/bindings/dma/brcm,iproc-sba.txt b/Documentation/devicetree/bindings/dma/brcm,iproc-sba.txt
new file mode 100644
index 000000000000..092913a28457
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/brcm,iproc-sba.txt
@@ -0,0 +1,29 @@
+* Broadcom SBA RAID engine
+
+Required properties:
+- compatible: Should be one of the following
+ "brcm,iproc-sba"
+ "brcm,iproc-sba-v2"
+ The "brcm,iproc-sba" has support for only 6 PQ coefficients
+ The "brcm,iproc-sba-v2" has support for only 30 PQ coefficients
+- mboxes: List of phandle and mailbox channel specifiers
+
+Example:
+
+raid_mbox: mbox@67400000 {
+ ...
+ #mbox-cells = <3>;
+ ...
+};
+
+raid0 {
+ compatible = "brcm,iproc-sba-v2";
+ mboxes = <&raid_mbox 0 0x1 0xffff>,
+ <&raid_mbox 1 0x1 0xffff>,
+ <&raid_mbox 2 0x1 0xffff>,
+ <&raid_mbox 3 0x1 0xffff>,
+ <&raid_mbox 4 0x1 0xffff>,
+ <&raid_mbox 5 0x1 0xffff>,
+ <&raid_mbox 6 0x1 0xffff>,
+ <&raid_mbox 7 0x1 0xffff>;
+};
diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
index 3316a9c2e638..79a204d50234 100644
--- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
+++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
@@ -30,8 +30,9 @@ Required Properties:
- interrupts: interrupt specifiers for the DMAC, one for each entry in
interrupt-names.
-- interrupt-names: one entry per channel, named "ch%u", where %u is the
- channel number ranging from zero to the number of channels minus one.
+- interrupt-names: one entry for the error interrupt, named "error", plus one
+ entry per channel, named "ch%u", where %u is the channel number ranging from
+ zero to the number of channels minus one.
- clock-names: "fck" for the functional clock
- clocks: a list of phandle + clock-specifier pairs, one for each entry
diff --git a/Documentation/devicetree/bindings/dma/shdma.txt b/Documentation/devicetree/bindings/dma/shdma.txt
index 2a3f3b8946b9..a91920a49433 100644
--- a/Documentation/devicetree/bindings/dma/shdma.txt
+++ b/Documentation/devicetree/bindings/dma/shdma.txt
@@ -1,6 +1,6 @@
* SHDMA Device Tree bindings
-Sh-/r-mobile and r-car systems often have multiple identical DMA controller
+Sh-/r-mobile and R-Car systems often have multiple identical DMA controller
instances, capable of serving any of a common set of DMA slave devices, using
the same configuration. To describe this topology we require all compatible
SHDMA DT nodes to be placed under a DMA multiplexer node. All such compatible
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
index 01e331a5f3e7..38ca2201e8ae 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
@@ -2,17 +2,27 @@
Required properties:
-- compatible : Should be "marvell,orion-gpio", "marvell,mv78200-gpio"
- or "marvell,armadaxp-gpio". "marvell,orion-gpio" should be used for
- Orion, Kirkwood, Dove, Discovery (except MV78200) and Armada
- 370. "marvell,mv78200-gpio" should be used for the Discovery
- MV78200. "marvel,armadaxp-gpio" should be used for all Armada XP
- SoCs (MV78230, MV78260, MV78460).
+- compatible : Should be "marvell,orion-gpio", "marvell,mv78200-gpio",
+ "marvell,armadaxp-gpio" or "marvell,armada-8k-gpio".
+
+ "marvell,orion-gpio" should be used for Orion, Kirkwood, Dove,
+ Discovery (except MV78200) and Armada 370. "marvell,mv78200-gpio"
+ should be used for the Discovery MV78200.
+
+ "marvel,armadaxp-gpio" should be used for all Armada XP SoCs
+ (MV78230, MV78260, MV78460).
+
+ "marvell,armada-8k-gpio" should be used for the Armada 7K and 8K
+ SoCs (either from AP or CP), see
+ Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt
+ and
+ Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt
+ for specific details about the offset property.
- reg: Address and length of the register set for the device. Only one
entry is expected, except for the "marvell,armadaxp-gpio" variant
for which two entries are expected: one for the general registers,
- one for the per-cpu registers.
+ one for the per-cpu registers. Not used for marvell,armada-8k-gpio.
- interrupts: The list of interrupts that are used for all the pins
managed by this GPIO bank. There can be more than one interrupt
diff --git a/Documentation/devicetree/bindings/gpio/gpio.txt b/Documentation/devicetree/bindings/gpio/gpio.txt
index 84ede036f73d..802402f6cc5d 100644
--- a/Documentation/devicetree/bindings/gpio/gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio.txt
@@ -74,11 +74,14 @@ GPIO pin number, and GPIO flags as accepted by the "qe_pio_e" gpio-controller.
Optional standard bitfield specifiers for the last cell:
- Bit 0: 0 means active high, 1 means active low
-- Bit 1: 1 means single-ended wiring, see:
+- Bit 1: 0 mean push-pull wiring, see:
+ https://en.wikipedia.org/wiki/Push-pull_output
+ 1 means single-ended wiring, see:
https://en.wikipedia.org/wiki/Single-ended_triode
- When used with active-low, this means open drain/collector, see:
+- Bit 2: 0 means open-source, 1 means open drain, see:
https://en.wikipedia.org/wiki/Open_collector
- When used with active-high, this means open source/emitter
+- Bit 3: 0 means the output should be maintained during sleep/low-power mode
+ 1 means the output state can be lost during sleep/low-power mode
1.1) GPIO specifier best practices
----------------------------------
@@ -282,8 +285,8 @@ Example 1:
};
Here, a single GPIO controller has GPIOs 0..9 routed to pin controller
-pinctrl1's pins 20..29, and GPIOs 10..19 routed to pin controller pinctrl2's
-pins 50..59.
+pinctrl1's pins 20..29, and GPIOs 10..29 routed to pin controller pinctrl2's
+pins 50..69.
Example 2:
diff --git a/Documentation/devicetree/bindings/gpio/ingenic,gpio.txt b/Documentation/devicetree/bindings/gpio/ingenic,gpio.txt
new file mode 100644
index 000000000000..7988aeb725f4
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/ingenic,gpio.txt
@@ -0,0 +1,46 @@
+Ingenic jz47xx GPIO controller
+
+That the Ingenic GPIO driver node must be a sub-node of the Ingenic pinctrl
+driver node.
+
+Required properties:
+--------------------
+
+ - compatible: Must contain one of:
+ - "ingenic,jz4740-gpio"
+ - "ingenic,jz4770-gpio"
+ - "ingenic,jz4780-gpio"
+ - reg: The GPIO bank number.
+ - interrupt-controller: Marks the device node as an interrupt controller.
+ - interrupts: Interrupt specifier for the controllers interrupt.
+ - #interrupt-cells: Should be 2. Refer to
+ ../interrupt-controller/interrupts.txt for more details.
+ - gpio-controller: Marks the device node as a GPIO controller.
+ - #gpio-cells: Should be 2. The first cell is the GPIO number and the second
+ cell specifies GPIO flags, as defined in <dt-bindings/gpio/gpio.h>. Only the
+ GPIO_ACTIVE_HIGH and GPIO_ACTIVE_LOW flags are supported.
+ - gpio-ranges: Range of pins managed by the GPIO controller. Refer to
+ 'gpio.txt' in this directory for more details.
+
+Example:
+--------
+
+&pinctrl {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpa: gpio@0 {
+ compatible = "ingenic,jz4740-gpio";
+ reg = <0>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 0 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <28>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
index 7c1ab3b3254f..6826a371fb69 100644
--- a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
+++ b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
@@ -3,6 +3,7 @@
Required Properties:
- compatible: should contain one of the following.
+ - "renesas,gpio-r8a7743": for R8A7743 (RZ/G1M) compatible GPIO controller.
- "renesas,gpio-r8a7778": for R8A7778 (R-Mobile M1) compatible GPIO controller.
- "renesas,gpio-r8a7779": for R8A7779 (R-Car H1) compatible GPIO controller.
- "renesas,gpio-r8a7790": for R8A7790 (R-Car H2) compatible GPIO controller.
diff --git a/Documentation/devicetree/bindings/graph.txt b/Documentation/devicetree/bindings/graph.txt
index fcb1c6a4787b..0415e2c53ba0 100644
--- a/Documentation/devicetree/bindings/graph.txt
+++ b/Documentation/devicetree/bindings/graph.txt
@@ -34,7 +34,7 @@ remote device, an 'endpoint' child node must be provided for each link.
If more than one port is present in a device node or there is more than one
endpoint at a port, or a port node needs to be associated with a selected
hardware interface, a common scheme using '#address-cells', '#size-cells'
-and 'reg' properties is used number the nodes.
+and 'reg' properties is used to number the nodes.
device {
...
@@ -89,9 +89,9 @@ Links between endpoints
Each endpoint should contain a 'remote-endpoint' phandle property that points
to the corresponding endpoint in the port of the remote device. In turn, the
-remote endpoint should contain a 'remote-endpoint' property. If it has one,
-it must not point to another than the local endpoint. Two endpoints with their
-'remote-endpoint' phandles pointing at each other form a link between the
+remote endpoint should contain a 'remote-endpoint' property. If it has one, it
+must not point to anything other than the local endpoint. Two endpoints with
+their 'remote-endpoint' phandles pointing at each other form a link between the
containing ports.
device-1 {
@@ -110,13 +110,12 @@ device-2 {
};
};
-
Required properties
-------------------
If there is more than one 'port' or more than one 'endpoint' node or 'reg'
-property is present in port and/or endpoint nodes the following properties
-are required in a relevant parent node:
+property present in the port and/or endpoint nodes then the following
+properties are required in a relevant parent node:
- #address-cells : number of cells required to define port/endpoint
identifier, should be 1.
diff --git a/Documentation/devicetree/bindings/hwlock/sprd-hwspinlock.txt b/Documentation/devicetree/bindings/hwlock/sprd-hwspinlock.txt
new file mode 100644
index 000000000000..581db9d941ba
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwlock/sprd-hwspinlock.txt
@@ -0,0 +1,23 @@
+SPRD Hardware Spinlock Device Binding
+-------------------------------------
+
+Required properties :
+- compatible : should be "sprd,hwspinlock-r3p0".
+- reg : the register address of hwspinlock.
+- #hwlock-cells : hwlock users only use the hwlock id to represent a specific
+ hwlock, so the number of cells should be <1> here.
+- clock-names : Must contain "enable".
+- clocks : Must contain a phandle entry for the clock in clock-names, see the
+ common clock bindings.
+
+Please look at the generic hwlock binding for usage information for consumers,
+"Documentation/devicetree/bindings/hwlock/hwlock.txt"
+
+Example of hwlock provider:
+ hwspinlock@40500000 {
+ compatible = "sprd,hwspinlock-r3p0";
+ reg = <0 0x40500000 0 0x1000>;
+ #hwlock-cells = <1>;
+ clock-names = "enable";
+ clocks = <&clk_aon_apb_gates0 22>;
+ };
diff --git a/Documentation/devicetree/bindings/iio/proximity/as3935.txt b/Documentation/devicetree/bindings/iio/proximity/as3935.txt
index ae23dd8da736..38d74314b7ab 100644
--- a/Documentation/devicetree/bindings/iio/proximity/as3935.txt
+++ b/Documentation/devicetree/bindings/iio/proximity/as3935.txt
@@ -3,6 +3,7 @@ Austrian Microsystems AS3935 Franklin lightning sensor device driver
Required properties:
- compatible: must be "ams,as3935"
- reg: SPI chip select number for the device
+ - spi-max-frequency: specifies maximum SPI clock frequency
- spi-cpha: SPI Mode 1. Refer to spi/spi-bus.txt for generic SPI
slave node bindings.
- interrupt-parent : should be the phandle for the interrupt controller
@@ -21,6 +22,7 @@ Example:
as3935@0 {
compatible = "ams,as3935";
reg = <0>;
+ spi-max-frequency = <400000>;
spi-cpha;
interrupt-parent = <&gpio1>;
interrupts = <16 1>;
diff --git a/Documentation/devicetree/bindings/input/dlink,dir685-touchkeys.txt b/Documentation/devicetree/bindings/input/dlink,dir685-touchkeys.txt
new file mode 100644
index 000000000000..10dec1c57abf
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/dlink,dir685-touchkeys.txt
@@ -0,0 +1,21 @@
+* D-Link DIR-685 Touchkeys
+
+This is a I2C one-off touchkey controller based on the Cypress Semiconductor
+CY8C214 MCU with some firmware in its internal 8KB flash. The circuit
+board inside the router is named E119921.
+
+The touchkey device node should be placed inside an I2C bus node.
+
+Required properties:
+- compatible: must be "dlink,dir685-touchkeys"
+- reg: the I2C address of the touchkeys
+- interrupts: reference to the interrupt number
+
+Example:
+
+touchkeys@26 {
+ compatible = "dlink,dir685-touchkeys";
+ reg = <0x26>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <17 IRQ_TYPE_EDGE_FALLING>;
+};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/st,stmfts.txt b/Documentation/devicetree/bindings/input/touchscreen/st,stmfts.txt
new file mode 100644
index 000000000000..9683595cd0f5
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/st,stmfts.txt
@@ -0,0 +1,43 @@
+* ST-Microelectronics FingerTip touchscreen controller
+
+The ST-Microelectronics FingerTip device provides a basic touchscreen
+functionality. Along with it the user can enable the touchkey which can work as
+a basic HOME and BACK key for phones.
+
+The driver supports also hovering as an absolute single touch event with x, y, z
+coordinates.
+
+Required properties:
+- compatible : must be "st,stmfts"
+- reg : I2C slave address, (e.g. 0x49)
+- interrupt-parent : the phandle to the interrupt controller which provides
+ the interrupt
+- interrupts : interrupt specification
+- avdd-supply : analogic power supply
+- vdd-supply : power supply
+- touchscreen-size-x : see touchscreen.txt
+- touchscreen-size-y : see touchscreen.txt
+
+Optional properties:
+- touch-key-connected : specifies whether the touchkey feature is connected
+- ledvdd-supply : power supply to the touch key leds
+
+Example:
+
+i2c@00000000 {
+
+ /* ... */
+
+ touchscreen@49 {
+ compatible = "st,stmfts";
+ reg = <0x49>;
+ interrupt-parent = <&gpa1>;
+ interrupts = <1 IRQ_TYPE_NONE>;
+ touchscreen-size-x = <1599>;
+ touchscreen-size-y = <2559>;
+ touch-key-connected;
+ avdd-supply = <&ldo30_reg>;
+ vdd-supply = <&ldo31_reg>;
+ ledvdd-supply = <&ldo33_reg>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/open-pic.txt b/Documentation/devicetree/bindings/interrupt-controller/open-pic.txt
index 909a902dff85..ccbbfdc53c72 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/open-pic.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/open-pic.txt
@@ -92,7 +92,6 @@ Example 2:
* References
-[1] Power.org (TM) Standard for Embedded Power Architecture (TM) Platform
- Requirements (ePAPR), Version 1.0, July 2008.
- (http://www.power.org/resources/downloads/Power_ePAPR_APPROVED_v1.0.pdf)
+[1] Devicetree Specification
+ (https://www.devicetree.org/specifications/)
diff --git a/Documentation/devicetree/bindings/leds/pca963x.txt b/Documentation/devicetree/bindings/leds/pca963x.txt
index dfbdb123a9bf..4eee41482041 100644
--- a/Documentation/devicetree/bindings/leds/pca963x.txt
+++ b/Documentation/devicetree/bindings/leds/pca963x.txt
@@ -10,6 +10,7 @@ Optional properties:
- nxp,period-scale : In some configurations, the chip blinks faster than expected.
This parameter provides a scaling ratio (fixed point, decimal divided
by 1000) to compensate, e.g. 1300=1.3x and 750=0.75x.
+- nxp,inverted-out: invert the polarity of the generated PWM
Each led is represented as a sub-node of the nxp,pca963x device.
diff --git a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt
new file mode 100644
index 000000000000..fb961c310f44
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt
@@ -0,0 +1,46 @@
+Binding for the Qualcomm APCS global block
+==========================================
+
+This binding describes the APCS "global" block found in various Qualcomm
+platforms.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be one of:
+ "qcom,msm8916-apcs-kpss-global",
+ "qcom,msm8996-apcs-hmss-global"
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: must specify the base address and size of the global block
+
+- #mbox-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: as described in mailbox.txt, must be 1
+
+
+= EXAMPLE
+The following example describes the APCS HMSS found in MSM8996 and part of the
+GLINK RPM referencing the "rpm_hlos" doorbell therein.
+
+ apcs_glb: mailbox@9820000 {
+ compatible = "qcom,msm8996-apcs-hmss-global";
+ reg = <0x9820000 0x1000>;
+
+ #mbox-cells = <1>;
+ };
+
+ rpm-glink {
+ compatible = "qcom,glink-rpm";
+
+ interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,rpm-msg-ram = <&rpm_msg_ram>;
+
+ mboxes = <&apcs_glb 0>;
+ mbox-names = "rpm_hlos";
+ };
+
diff --git a/Documentation/devicetree/bindings/media/cec.txt b/Documentation/devicetree/bindings/media/cec.txt
new file mode 100644
index 000000000000..22d7aae3d3d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/cec.txt
@@ -0,0 +1,8 @@
+Common bindings for HDMI CEC adapters
+
+- hdmi-phandle: phandle to the HDMI controller.
+
+- needs-hpd: if present the CEC support is only available when the HPD
+ is high. Some boards only let the CEC pin through if the HPD is high,
+ for example if there is a level converter that uses the HPD to power
+ up or down.
diff --git a/Documentation/devicetree/bindings/media/i2c/adv7180.txt b/Documentation/devicetree/bindings/media/i2c/adv7180.txt
index 4da486f96ff6..552b6a82cb1f 100644
--- a/Documentation/devicetree/bindings/media/i2c/adv7180.txt
+++ b/Documentation/devicetree/bindings/media/i2c/adv7180.txt
@@ -6,6 +6,8 @@ digital interfaces like MIPI CSI-2 or parallel video.
Required Properties :
- compatible : value must be one of
"adi,adv7180"
+ "adi,adv7180cp"
+ "adi,adv7180st"
"adi,adv7182"
"adi,adv7280"
"adi,adv7280-m"
@@ -15,6 +17,19 @@ Required Properties :
"adi,adv7282"
"adi,adv7282-m"
+Device nodes of "adi,adv7180cp" and "adi,adv7180st" must contain one
+'port' child node per device input and output port, in accordance with the
+video interface bindings defined in
+Documentation/devicetree/bindings/media/video-interfaces.txt. The port
+nodes are numbered as follows.
+
+ Port adv7180cp adv7180st
+-------------------------------------------------------------------
+ Input 0-2 0-5
+ Output 3 6
+
+The digital output port node must contain at least one endpoint.
+
Optional Properties :
- powerdown-gpios: reference to the GPIO connected to the powerdown pin,
if any.
diff --git a/Documentation/devicetree/bindings/media/i2c/max2175.txt b/Documentation/devicetree/bindings/media/i2c/max2175.txt
new file mode 100644
index 000000000000..02b4e9cd7b1b
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/max2175.txt
@@ -0,0 +1,59 @@
+Maxim Integrated MAX2175 RF to Bits tuner
+-----------------------------------------
+
+The MAX2175 IC is an advanced analog/digital hybrid-radio receiver with
+RF to Bits® front-end designed for software-defined radio solutions.
+
+Required properties:
+--------------------
+- compatible: "maxim,max2175" for MAX2175 RF-to-bits tuner.
+- clocks: clock specifier.
+- port: child port node corresponding to the I2S output, in accordance with
+ the video interface bindings defined in
+ Documentation/devicetree/bindings/media/video-interfaces.txt. The port
+ node must contain at least one endpoint.
+
+Optional properties:
+--------------------
+- maxim,master : phandle to the master tuner if it is a slave. This
+ is used to define two tuners in diversity mode
+ (1 master, 1 slave). By default each tuner is an
+ individual master.
+- maxim,refout-load : load capacitance value (in picofarads) on reference
+ output drive level. The possible load values are:
+ 0 (default - refout disabled)
+ 10
+ 20
+ 30
+ 40
+ 60
+ 70
+- maxim,am-hiz-filter : empty property indicates the AM Hi-Z filter is used
+ in this hardware for AM antenna input.
+
+Example:
+--------
+
+Board specific DTS file
+
+/* Fixed XTAL clock node */
+maxim_xtal: clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <36864000>;
+};
+
+/* A tuner device instance under i2c bus */
+max2175_0: tuner@60 {
+ compatible = "maxim,max2175";
+ reg = <0x60>;
+ clocks = <&maxim_xtal>;
+ maxim,refout-load = <10>;
+
+ port {
+ max2175_0_ep: endpoint {
+ remote-endpoint = <&slave_rx_device>;
+ };
+ };
+
+};
diff --git a/Documentation/devicetree/bindings/media/i2c/ov5640.txt b/Documentation/devicetree/bindings/media/i2c/ov5640.txt
new file mode 100644
index 000000000000..540b36c4b1f2
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/ov5640.txt
@@ -0,0 +1,45 @@
+* Omnivision OV5640 MIPI CSI-2 sensor
+
+Required Properties:
+- compatible: should be "ovti,ov5640"
+- clocks: reference to the xclk input clock.
+- clock-names: should be "xclk".
+- DOVDD-supply: Digital I/O voltage supply, 1.8 volts
+- AVDD-supply: Analog voltage supply, 2.8 volts
+- DVDD-supply: Digital core voltage supply, 1.5 volts
+
+Optional Properties:
+- reset-gpios: reference to the GPIO connected to the reset pin, if any.
+ This is an active low signal to the OV5640.
+- powerdown-gpios: reference to the GPIO connected to the powerdown pin,
+ if any. This is an active high signal to the OV5640.
+
+The device node must contain one 'port' child node for its digital output
+video port, in accordance with the video interface bindings defined in
+Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+Example:
+
+&i2c1 {
+ ov5640: camera@3c {
+ compatible = "ovti,ov5640";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ov5640>;
+ reg = <0x3c>;
+ clocks = <&clks IMX6QDL_CLK_CKO>;
+ clock-names = "xclk";
+ DOVDD-supply = <&vgen4_reg>; /* 1.8v */
+ AVDD-supply = <&vgen3_reg>; /* 2.8v */
+ DVDD-supply = <&vgen2_reg>; /* 1.5v */
+ powerdown-gpios = <&gpio1 19 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio1 20 GPIO_ACTIVE_LOW>;
+
+ port {
+ ov5640_to_mipi_csi2: endpoint {
+ remote-endpoint = <&mipi_csi2_from_ov5640>;
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/media/imx.txt b/Documentation/devicetree/bindings/media/imx.txt
new file mode 100644
index 000000000000..77f4b0a7fd2b
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/imx.txt
@@ -0,0 +1,53 @@
+Freescale i.MX Media Video Device
+=================================
+
+Video Media Controller node
+---------------------------
+
+This is the media controller node for video capture support. It is a
+virtual device that lists the camera serial interface nodes that the
+media device will control.
+
+Required properties:
+- compatible : "fsl,imx-capture-subsystem";
+- ports : Should contain a list of phandles pointing to camera
+ sensor interface ports of IPU devices
+
+example:
+
+capture-subsystem {
+ compatible = "fsl,imx-capture-subsystem";
+ ports = <&ipu1_csi0>, <&ipu1_csi1>;
+};
+
+
+mipi_csi2 node
+--------------
+
+This is the device node for the MIPI CSI-2 Receiver core in the i.MX
+SoC. This is a Synopsys Designware MIPI CSI-2 host controller core
+combined with a D-PHY core mixed into the same register block. In
+addition this device consists of an i.MX-specific "CSI2IPU gasket"
+glue logic, also controlled from the same register block. The CSI2IPU
+gasket demultiplexes the four virtual channel streams from the host
+controller's 32-bit output image bus onto four 16-bit parallel busses
+to the i.MX IPU CSIs.
+
+Required properties:
+- compatible : "fsl,imx6-mipi-csi2";
+- reg : physical base address and length of the register set;
+- clocks : the MIPI CSI-2 receiver requires three clocks: hsi_tx
+ (the D-PHY clock), video_27m (D-PHY PLL reference
+ clock), and eim_podf;
+- clock-names : must contain "dphy", "ref", "pix";
+- port@* : five port nodes must exist, containing endpoints
+ connecting to the source and sink devices according to
+ of_graph bindings. The first port is an input port,
+ connecting with a MIPI CSI-2 source, and ports 1
+ through 4 are output ports connecting with parallel
+ bus sink endpoint nodes and correspond to the four
+ MIPI CSI-2 virtual channel outputs.
+
+Optional properties:
+- interrupts : must contain two level-triggered interrupts,
+ in order: 100 and 101;
diff --git a/Documentation/devicetree/bindings/media/mediatek-mdp.txt b/Documentation/devicetree/bindings/media/mediatek-mdp.txt
index 4182063a54db..0d03e3ae2be2 100644
--- a/Documentation/devicetree/bindings/media/mediatek-mdp.txt
+++ b/Documentation/devicetree/bindings/media/mediatek-mdp.txt
@@ -2,7 +2,7 @@
Media Data Path is used for scaling and color space conversion.
-Required properties (controller (parent) node):
+Required properties (controller node):
- compatible: "mediatek,mt8173-mdp"
- mediatek,vpu: the node of video processor unit, see
Documentation/devicetree/bindings/media/mediatek-vpu.txt for details.
@@ -32,21 +32,16 @@ Required properties (DMA function blocks, child node):
for details.
Example:
-mdp {
- compatible = "mediatek,mt8173-mdp";
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
- mediatek,vpu = <&vpu>;
-
mdp_rdma0: rdma@14001000 {
compatible = "mediatek,mt8173-mdp-rdma";
+ "mediatek,mt8173-mdp";
reg = <0 0x14001000 0 0x1000>;
clocks = <&mmsys CLK_MM_MDP_RDMA0>,
<&mmsys CLK_MM_MUTEX_32K>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_MM>;
iommus = <&iommu M4U_PORT_MDP_RDMA0>;
mediatek,larb = <&larb0>;
+ mediatek,vpu = <&vpu>;
};
mdp_rdma1: rdma@14002000 {
@@ -106,4 +101,3 @@ mdp {
iommus = <&iommu M4U_PORT_MDP_WROT1>;
mediatek,larb = <&larb4>;
};
-};
diff --git a/Documentation/devicetree/bindings/media/qcom,venus.txt b/Documentation/devicetree/bindings/media/qcom,venus.txt
new file mode 100644
index 000000000000..2693449daf73
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/qcom,venus.txt
@@ -0,0 +1,107 @@
+* Qualcomm Venus video encoder/decoder accelerators
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: Value should contain one of:
+ - "qcom,msm8916-venus"
+ - "qcom,msm8996-venus"
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Register base address and length of the register map.
+- interrupts:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Should contain interrupt line number.
+- clocks:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: A List of phandle and clock specifier pairs as listed
+ in clock-names property.
+- clock-names:
+ Usage: required for msm8916
+ Value type: <stringlist>
+ Definition: Should contain the following entries:
+ - "core" Core video accelerator clock
+ - "iface" Video accelerator AHB clock
+ - "bus" Video accelerator AXI clock
+- clock-names:
+ Usage: required for msm8996
+ Value type: <stringlist>
+ Definition: Should contain the following entries:
+ - "core" Core video accelerator clock
+ - "iface" Video accelerator AHB clock
+ - "bus" Video accelerator AXI clock
+ - "mbus" Video MAXI clock
+- power-domains:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: A phandle and power domain specifier pairs to the
+ power domain which is responsible for collapsing
+ and restoring power to the peripheral.
+- iommus:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: A list of phandle and IOMMU specifier pairs.
+- memory-region:
+ Usage: required
+ Value type: <phandle>
+ Definition: reference to the reserved-memory for the firmware
+ memory region.
+
+* Subnodes
+The Venus video-codec node must contain two subnodes representing
+video-decoder and video-encoder.
+
+Every of video-encoder or video-decoder subnode should have:
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: Value should contain "venus-decoder" or "venus-encoder"
+- clocks:
+ Usage: required for msm8996
+ Value type: <prop-encoded-array>
+ Definition: A List of phandle and clock specifier pairs as listed
+ in clock-names property.
+- clock-names:
+ Usage: required for msm8996
+ Value type: <stringlist>
+ Definition: Should contain the following entries:
+ - "core" Subcore video accelerator clock
+
+- power-domains:
+ Usage: required for msm8996
+ Value type: <prop-encoded-array>
+ Definition: A phandle and power domain specifier pairs to the
+ power domain which is responsible for collapsing
+ and restoring power to the subcore.
+
+* An Example
+ video-codec@1d00000 {
+ compatible = "qcom,msm8916-venus";
+ reg = <0x01d00000 0xff000>;
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_VENUS0_VCODEC0_CLK>,
+ <&gcc GCC_VENUS0_AHB_CLK>,
+ <&gcc GCC_VENUS0_AXI_CLK>;
+ clock-names = "core", "iface", "bus";
+ power-domains = <&gcc VENUS_GDSC>;
+ iommus = <&apps_iommu 5>;
+ memory-region = <&venus_mem>;
+
+ video-decoder {
+ compatible = "venus-decoder";
+ clocks = <&mmcc VIDEO_SUBCORE0_CLK>;
+ clock-names = "core";
+ power-domains = <&mmcc VENUS_CORE0_GDSC>;
+ };
+
+ video-encoder {
+ compatible = "venus-encoder";
+ clocks = <&mmcc VIDEO_SUBCORE1_CLK>;
+ clock-names = "core";
+ power-domains = <&mmcc VENUS_CORE1_GDSC>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/rcar_vin.txt b/Documentation/devicetree/bindings/media/rcar_vin.txt
index 6a4e61cbe011..6e4ef8caf759 100644
--- a/Documentation/devicetree/bindings/media/rcar_vin.txt
+++ b/Documentation/devicetree/bindings/media/rcar_vin.txt
@@ -1,5 +1,5 @@
-Renesas RCar Video Input driver (rcar_vin)
-------------------------------------------
+Renesas R-Car Video Input driver (rcar_vin)
+-------------------------------------------
The rcar_vin device provides video input capabilities for the Renesas R-Car
family of devices. The current blocks are always slaves and suppot one input
diff --git a/Documentation/devicetree/bindings/media/renesas,drif.txt b/Documentation/devicetree/bindings/media/renesas,drif.txt
new file mode 100644
index 000000000000..39516b94c28f
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/renesas,drif.txt
@@ -0,0 +1,176 @@
+Renesas R-Car Gen3 Digital Radio Interface controller (DRIF)
+------------------------------------------------------------
+
+R-Car Gen3 DRIF is a SPI like receive only slave device. A general
+representation of DRIF interfacing with a master device is shown below.
+
++---------------------+ +---------------------+
+| |-----SCK------->|CLK |
+| Master |-----SS-------->|SYNC DRIFn (slave) |
+| |-----SD0------->|D0 |
+| |-----SD1------->|D1 |
++---------------------+ +---------------------+
+
+As per datasheet, each DRIF channel (drifn) is made up of two internal
+channels (drifn0 & drifn1). These two internal channels share the common
+CLK & SYNC. Each internal channel has its own dedicated resources like
+irq, dma channels, address space & clock. This internal split is not
+visible to the external master device.
+
+The device tree model represents each internal channel as a separate node.
+The internal channels sharing the CLK & SYNC are tied together by their
+phandles using a property called "renesas,bonding". For the rest of
+the documentation, unless explicitly stated, the word channel implies an
+internal channel.
+
+When both internal channels are enabled they need to be managed together
+as one (i.e.) they cannot operate alone as independent devices. Out of the
+two, one of them needs to act as a primary device that accepts common
+properties of both the internal channels. This channel is identified by a
+property called "renesas,primary-bond".
+
+To summarize,
+ - When both the internal channels that are bonded together are enabled,
+ the zeroth channel is selected as primary-bond. This channels accepts
+ properties common to all the members of the bond.
+ - When only one of the bonded channels need to be enabled, the property
+ "renesas,bonding" or "renesas,primary-bond" will have no effect. That
+ enabled channel can act alone as any other independent device.
+
+Required properties of an internal channel:
+-------------------------------------------
+- compatible: "renesas,r8a7795-drif" if DRIF controller is a part of R8A7795 SoC.
+ "renesas,rcar-gen3-drif" for a generic R-Car Gen3 compatible device.
+
+ When compatible with the generic version, nodes must list the
+ SoC-specific version corresponding to the platform first
+ followed by the generic version.
+
+- reg: offset and length of that channel.
+- interrupts: associated with that channel.
+- clocks: phandle and clock specifier of that channel.
+- clock-names: clock input name string: "fck".
+- dmas: phandles to the DMA channels.
+- dma-names: names of the DMA channel: "rx".
+- renesas,bonding: phandle to the other channel.
+
+Optional properties of an internal channel:
+-------------------------------------------
+- power-domains: phandle to the respective power domain.
+
+Required properties of an internal channel when:
+ - It is the only enabled channel of the bond (or)
+ - If it acts as primary among enabled bonds
+--------------------------------------------------------
+- pinctrl-0: pin control group to be used for this channel.
+- pinctrl-names: must be "default".
+- renesas,primary-bond: empty property indicating the channel acts as primary
+ among the bonded channels.
+- port: child port node corresponding to the data input, in accordance with
+ the video interface bindings defined in
+ Documentation/devicetree/bindings/media/video-interfaces.txt. The port
+ node must contain at least one endpoint.
+
+Optional endpoint property:
+---------------------------
+- sync-active: Indicates sync signal polarity, 0/1 for low/high respectively.
+ This property maps to SYNCAC bit in the hardware manual. The
+ default is 1 (active high).
+
+Example:
+--------
+
+(1) Both internal channels enabled:
+-----------------------------------
+
+When interfacing with a third party tuner device with two data pins as shown
+below.
+
++---------------------+ +---------------------+
+| |-----SCK------->|CLK |
+| Master |-----SS-------->|SYNC DRIFn (slave) |
+| |-----SD0------->|D0 |
+| |-----SD1------->|D1 |
++---------------------+ +---------------------+
+
+ drif00: rif@e6f40000 {
+ compatible = "renesas,r8a7795-drif",
+ "renesas,rcar-gen3-drif";
+ reg = <0 0xe6f40000 0 0x64>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 515>;
+ clock-names = "fck";
+ dmas = <&dmac1 0x20>, <&dmac2 0x20>;
+ dma-names = "rx", "rx";
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ renesas,bonding = <&drif01>;
+ renesas,primary-bond;
+ pinctrl-0 = <&drif0_pins>;
+ pinctrl-names = "default";
+ port {
+ drif0_ep: endpoint {
+ remote-endpoint = <&tuner_ep>;
+ };
+ };
+ };
+
+ drif01: rif@e6f50000 {
+ compatible = "renesas,r8a7795-drif",
+ "renesas,rcar-gen3-drif";
+ reg = <0 0xe6f50000 0 0x64>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 514>;
+ clock-names = "fck";
+ dmas = <&dmac1 0x22>, <&dmac2 0x22>;
+ dma-names = "rx", "rx";
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ renesas,bonding = <&drif00>;
+ };
+
+
+(2) Internal channel 1 alone is enabled:
+----------------------------------------
+
+When interfacing with a third party tuner device with one data pin as shown
+below.
+
++---------------------+ +---------------------+
+| |-----SCK------->|CLK |
+| Master |-----SS-------->|SYNC DRIFn (slave) |
+| | |D0 (unused) |
+| |-----SD-------->|D1 |
++---------------------+ +---------------------+
+
+ drif00: rif@e6f40000 {
+ compatible = "renesas,r8a7795-drif",
+ "renesas,rcar-gen3-drif";
+ reg = <0 0xe6f40000 0 0x64>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 515>;
+ clock-names = "fck";
+ dmas = <&dmac1 0x20>, <&dmac2 0x20>;
+ dma-names = "rx", "rx";
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ renesas,bonding = <&drif01>;
+ };
+
+ drif01: rif@e6f50000 {
+ compatible = "renesas,r8a7795-drif",
+ "renesas,rcar-gen3-drif";
+ reg = <0 0xe6f50000 0 0x64>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 514>;
+ clock-names = "fck";
+ dmas = <&dmac1 0x22>, <&dmac2 0x22>;
+ dma-names = "rx", "rx";
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ renesas,bonding = <&drif00>;
+ pinctrl-0 = <&drif0_pins>;
+ pinctrl-names = "default";
+ port {
+ drif0_ep: endpoint {
+ remote-endpoint = <&tuner_ep>;
+ sync-active = <0>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/s5p-cec.txt b/Documentation/devicetree/bindings/media/s5p-cec.txt
index 4bb08d9d940b..1b1a10ba48ce 100644
--- a/Documentation/devicetree/bindings/media/s5p-cec.txt
+++ b/Documentation/devicetree/bindings/media/s5p-cec.txt
@@ -15,7 +15,11 @@ Required properties:
- clock-names : from common clock binding: must contain "hdmicec",
corresponding to entry in the clocks property.
- samsung,syscon-phandle - phandle to the PMU system controller
- - hdmi-phandle - phandle to the HDMI controller
+ - hdmi-phandle - phandle to the HDMI controller, see also cec.txt.
+
+Optional:
+ - needs-hpd : if present the CEC support is only available when the HPD
+ is high. See cec.txt for more details.
Example:
diff --git a/Documentation/devicetree/bindings/media/st,stm32-cec.txt b/Documentation/devicetree/bindings/media/st,stm32-cec.txt
new file mode 100644
index 000000000000..6be2381c180d
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/st,stm32-cec.txt
@@ -0,0 +1,19 @@
+STMicroelectronics STM32 CEC driver
+
+Required properties:
+ - compatible : value should be "st,stm32-cec"
+ - reg : Physical base address of the IP registers and length of memory
+ mapped region.
+ - clocks : from common clock binding: handle to CEC clocks
+ - clock-names : from common clock binding: must be "cec" and "hdmi-cec".
+ - interrupts : CEC interrupt number to the CPU.
+
+Example for stm32f746:
+
+cec: cec@40006c00 {
+ compatible = "st,stm32-cec";
+ reg = <0x40006C00 0x400>;
+ interrupts = <94>;
+ clocks = <&rcc 0 STM32F7_APB1_CLOCK(CEC)>, <&rcc 1 CLK_HDMI_CEC>;
+ clock-names = "cec", "hdmi-cec";
+};
diff --git a/Documentation/devicetree/bindings/media/st,stm32-dcmi.txt b/Documentation/devicetree/bindings/media/st,stm32-dcmi.txt
new file mode 100644
index 000000000000..249790a93017
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/st,stm32-dcmi.txt
@@ -0,0 +1,45 @@
+STMicroelectronics STM32 Digital Camera Memory Interface (DCMI)
+
+Required properties:
+- compatible: "st,stm32-dcmi"
+- reg: physical base address and length of the registers set for the device
+- interrupts: should contain IRQ line for the DCMI
+- resets: reference to a reset controller,
+ see Documentation/devicetree/bindings/reset/st,stm32-rcc.txt
+- clocks: list of clock specifiers, corresponding to entries in
+ the clock-names property
+- clock-names: must contain "mclk", which is the DCMI peripherial clock
+- pinctrl: the pincontrol settings to configure muxing properly
+ for pins that connect to DCMI device.
+ See Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.txt.
+- dmas: phandle to DMA controller node,
+ see Documentation/devicetree/bindings/dma/stm32-dma.txt
+- dma-names: must contain "tx", which is the transmit channel from DCMI to DMA
+
+DCMI supports a single port node with parallel bus. It should contain one
+'port' child node with child 'endpoint' node. Please refer to the bindings
+defined in Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+Example:
+
+ dcmi: dcmi@50050000 {
+ compatible = "st,stm32-dcmi";
+ reg = <0x50050000 0x400>;
+ interrupts = <78>;
+ resets = <&rcc STM32F4_AHB2_RESET(DCMI)>;
+ clocks = <&rcc 0 STM32F4_AHB2_CLOCK(DCMI)>;
+ clock-names = "mclk";
+ pinctrl-names = "default";
+ pinctrl-0 = <&dcmi_pins>;
+ dmas = <&dma2 1 1 0x414 0x3>;
+ dma-names = "tx";
+ port {
+ dcmi_0: endpoint {
+ remote-endpoint = <...>;
+ bus-width = <8>;
+ hsync-active = <0>;
+ vsync-active = <0>;
+ pclk-sample = <1>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/stih-cec.txt b/Documentation/devicetree/bindings/media/stih-cec.txt
index 289a08b33651..8be2a040c6c6 100644
--- a/Documentation/devicetree/bindings/media/stih-cec.txt
+++ b/Documentation/devicetree/bindings/media/stih-cec.txt
@@ -9,7 +9,7 @@ Required properties:
- pinctrl-names: Contains only one value - "default"
- pinctrl-0: Specifies the pin control groups used for CEC hardware.
- resets: Reference to a reset controller
- - hdmi-phandle: Phandle to the HDMI controller
+ - hdmi-phandle: Phandle to the HDMI controller, see also cec.txt.
Example for STIH407:
diff --git a/Documentation/devicetree/bindings/media/video-mux.txt b/Documentation/devicetree/bindings/media/video-mux.txt
new file mode 100644
index 000000000000..63b9dc913e45
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video-mux.txt
@@ -0,0 +1,60 @@
+Video Multiplexer
+=================
+
+Video multiplexers allow to select between multiple input ports. Video received
+on the active input port is passed through to the output port. Muxes described
+by this binding are controlled by a multiplexer controller that is described by
+the bindings in Documentation/devicetree/bindings/mux/mux-controller.txt
+
+Required properties:
+- compatible : should be "video-mux"
+- mux-controls : mux controller node to use for operating the mux
+- #address-cells: should be <1>
+- #size-cells: should be <0>
+- port@*: at least three port nodes containing endpoints connecting to the
+ source and sink devices according to of_graph bindings. The last port is
+ the output port, all others are inputs.
+
+Optionally, #address-cells, #size-cells, and port nodes can be grouped under a
+ports node as described in Documentation/devicetree/bindings/graph.txt.
+
+Example:
+
+ mux: mux-controller {
+ compatible = "gpio-mux";
+ #mux-control-cells = <0>;
+
+ mux-gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
+ };
+
+ video-mux {
+ compatible = "video-mux";
+ mux-controls = <&mux>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ mux_in0: endpoint {
+ remote-endpoint = <&video_source0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ mux_in1: endpoint {
+ remote-endpoint = <&video_source1_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ mux_out: endpoint {
+ remote-endpoint = <&capture_interface_in>;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/mfd/arizona.txt b/Documentation/devicetree/bindings/mfd/arizona.txt
index 8f2e2822238d..b37bdde5cfda 100644
--- a/Documentation/devicetree/bindings/mfd/arizona.txt
+++ b/Documentation/devicetree/bindings/mfd/arizona.txt
@@ -30,7 +30,8 @@ Required properties:
- gpio-controller : Indicates this device is a GPIO controller.
- #gpio-cells : Must be 2. The first cell is the pin number and the
- second cell is used to specify optional parameters (currently unused).
+ second cell is used to specify optional parameters, see ../gpio/gpio.txt
+ for details.
- AVDD-supply, DBVDD1-supply, CPVDD-supply : Power supplies for the device,
as covered in Documentation/devicetree/bindings/regulator/regulator.txt
diff --git a/Documentation/devicetree/bindings/mfd/lp87565.txt b/Documentation/devicetree/bindings/mfd/lp87565.txt
new file mode 100644
index 000000000000..a48df7c08ab0
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/lp87565.txt
@@ -0,0 +1,43 @@
+TI LP87565 PMIC MFD driver
+
+Required properties:
+ - compatible: "ti,lp87565", "ti,lp87565-q1"
+ - reg: I2C slave address.
+ - gpio-controller: Marks the device node as a GPIO Controller.
+ - #gpio-cells: Should be two. The first cell is the pin number and
+ the second cell is used to specify flags.
+ See ../gpio/gpio.txt for more information.
+ - xxx-in-supply: Phandle to parent supply node of each regulator
+ populated under regulators node. xxx should match
+ the supply_name populated in driver.
+Example:
+
+lp87565_pmic: pmic@60 {
+ compatible = "ti,lp87565-q1";
+ reg = <0x60>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ buck10-in-supply = <&vsys_3v3>;
+ buck23-in-supply = <&vsys_3v3>;
+
+ regulators: regulators {
+ buck10_reg: buck10 {
+ /* VDD_MPU */
+ regulator-name = "buck10";
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ buck23_reg: buck23 {
+ /* VDD_GPU */
+ regulator-name = "buck23";
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt
index edd7fd2bbbf9..7da86f22a13b 100644
--- a/Documentation/devicetree/bindings/net/ethernet.txt
+++ b/Documentation/devicetree/bindings/net/ethernet.txt
@@ -8,7 +8,8 @@ The following properties are common to the Ethernet controllers:
property;
- max-speed: number, specifies maximum speed in Mbit/s supported by the device;
- max-frame-size: number, maximum transfer unit (IEEE defined MTU), rather than
- the maximum frame size (there's contradiction in ePAPR).
+ the maximum frame size (there's contradiction in the Devicetree
+ Specification).
- phy-mode: string, operation mode of the PHY interface. This is now a de-facto
standard property; supported values are:
* "internal"
@@ -35,9 +36,11 @@ The following properties are common to the Ethernet controllers:
* "rxaui"
* "xaui"
* "10gbase-kr" (10GBASE-KR, XFI, SFI)
-- phy-connection-type: the same as "phy-mode" property but described in ePAPR;
+- phy-connection-type: the same as "phy-mode" property but described in the
+ Devicetree Specification;
- phy-handle: phandle, specifies a reference to a node representing a PHY
- device; this property is described in ePAPR and so preferred;
+ device; this property is described in the Devicetree Specification and so
+ preferred;
- phy: the same as "phy-handle" property, not recommended for new bindings.
- phy-device: the same as "phy-handle" property, not recommended for new
bindings.
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/fman.txt b/Documentation/devicetree/bindings/net/fsl-fman.txt
index df873d1f3b7c..df873d1f3b7c 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/fman.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fman.txt
diff --git a/Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt b/Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt
index 5dbf169cd81c..590f622188de 100644
--- a/Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt
+++ b/Documentation/devicetree/bindings/net/wireless/brcm,bcm43xx-fmac.txt
@@ -31,7 +31,7 @@ mmc3: mmc@01c12000 {
non-removable;
status = "okay";
- brcmf: bcrmf@1 {
+ brcmf: wifi@1 {
reg = <1>;
compatible = "brcm,bcm4329-fmac";
interrupt-parent = <&pio>;
diff --git a/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt b/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt
index 35d4a979bb7b..89a84f8aa621 100644
--- a/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt
+++ b/Documentation/devicetree/bindings/pci/faraday,ftpci100.txt
@@ -30,6 +30,13 @@ Mandatory properties:
128MB, 256MB, 512MB, 1GB or 2GB in size. The memory should be marked as
pre-fetchable.
+Optional properties:
+- clocks: when present, this should contain the peripheral clock (PCLK) and the
+ PCI clock (PCICLK). If these are not present, they are assumed to be
+ hard-wired enabled and always on. The PCI clock will be 33 or 66 MHz.
+- clock-names: when present, this should contain "PCLK" for the peripheral
+ clock and "PCICLK" for the PCI-side clock.
+
Mandatory subnodes:
- For "faraday,ftpci100" a node representing the interrupt-controller inside the
host bridge is mandatory. It has the following mandatory properties:
diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
index e3d5680875b1..cf92d3ba5a26 100644
--- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
@@ -33,6 +33,10 @@ Optional properties:
- reset-gpio-active-high: If present then the reset sequence using the GPIO
specified in the "reset-gpio" property is reversed (H=reset state,
L=operation state).
+- vpcie-supply: Should specify the regulator in charge of PCIe port power.
+ The regulator will be enabled when initializing the PCIe host and
+ disabled either as part of the init process or when shutting down the
+ host.
Additional required properties for imx6sx-pcie:
- clock names: Must include the following additional entries:
diff --git a/Documentation/devicetree/bindings/pci/mediatek,mt7623-pcie.txt b/Documentation/devicetree/bindings/pci/mediatek,mt7623-pcie.txt
new file mode 100644
index 000000000000..fe80dda9bf73
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/mediatek,mt7623-pcie.txt
@@ -0,0 +1,130 @@
+MediaTek Gen2 PCIe controller which is available on MT7623 series SoCs
+
+PCIe subsys supports single root complex (RC) with 3 Root Ports. Each root
+ports supports a Gen2 1-lane Link and has PIPE interface to PHY.
+
+Required properties:
+- compatible: Should contain "mediatek,mt7623-pcie".
+- device_type: Must be "pci"
+- reg: Base addresses and lengths of the PCIe controller.
+- #address-cells: Address representation for root ports (must be 3)
+- #size-cells: Size representation for root ports (must be 2)
+- #interrupt-cells: Size representation for interrupts (must be 1)
+- interrupt-map-mask and interrupt-map: Standard PCI IRQ mapping properties
+ Please refer to the standard PCI bus binding document for a more detailed
+ explanation.
+- clocks: Must contain an entry for each entry in clock-names.
+ See ../clocks/clock-bindings.txt for details.
+- clock-names: Must include the following entries:
+ - free_ck :for reference clock of PCIe subsys
+ - sys_ck0 :for clock of Port0
+ - sys_ck1 :for clock of Port1
+ - sys_ck2 :for clock of Port2
+- resets: Must contain an entry for each entry in reset-names.
+ See ../reset/reset.txt for details.
+- reset-names: Must include the following entries:
+ - pcie-rst0 :port0 reset
+ - pcie-rst1 :port1 reset
+ - pcie-rst2 :port2 reset
+- phys: List of PHY specifiers (used by generic PHY framework).
+- phy-names : Must be "pcie-phy0", "pcie-phy1", "pcie-phyN".. based on the
+ number of PHYs as specified in *phys* property.
+- power-domains: A phandle and power domain specifier pair to the power domain
+ which is responsible for collapsing and restoring power to the peripheral.
+- bus-range: Range of bus numbers associated with this controller.
+- ranges: Ranges for the PCI memory and I/O regions.
+
+In addition, the device tree node must have sub-nodes describing each
+PCIe port interface, having the following mandatory properties:
+
+Required properties:
+- device_type: Must be "pci"
+- reg: Only the first four bytes are used to refer to the correct bus number
+ and device number.
+- #address-cells: Must be 3
+- #size-cells: Must be 2
+- #interrupt-cells: Must be 1
+- interrupt-map-mask and interrupt-map: Standard PCI IRQ mapping properties
+ Please refer to the standard PCI bus binding document for a more detailed
+ explanation.
+- ranges: Sub-ranges distributed from the PCIe controller node. An empty
+ property is sufficient.
+- num-lanes: Number of lanes to use for this port.
+
+Examples:
+
+ hifsys: syscon@1a000000 {
+ compatible = "mediatek,mt7623-hifsys",
+ "mediatek,mt2701-hifsys",
+ "syscon";
+ reg = <0 0x1a000000 0 0x1000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ pcie: pcie-controller@1a140000 {
+ compatible = "mediatek,mt7623-pcie";
+ device_type = "pci";
+ reg = <0 0x1a140000 0 0x1000>, /* PCIe shared registers */
+ <0 0x1a142000 0 0x1000>, /* Port0 registers */
+ <0 0x1a143000 0 0x1000>, /* Port1 registers */
+ <0 0x1a144000 0 0x1000>; /* Port2 registers */
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xf800 0 0 0>;
+ interrupt-map = <0x0000 0 0 0 &sysirq GIC_SPI 193 IRQ_TYPE_LEVEL_LOW>,
+ <0x0800 0 0 0 &sysirq GIC_SPI 194 IRQ_TYPE_LEVEL_LOW>,
+ <0x1000 0 0 0 &sysirq GIC_SPI 195 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&topckgen CLK_TOP_ETHIF_SEL>,
+ <&hifsys CLK_HIFSYS_PCIE0>,
+ <&hifsys CLK_HIFSYS_PCIE1>,
+ <&hifsys CLK_HIFSYS_PCIE2>;
+ clock-names = "free_ck", "sys_ck0", "sys_ck1", "sys_ck2";
+ resets = <&hifsys MT2701_HIFSYS_PCIE0_RST>,
+ <&hifsys MT2701_HIFSYS_PCIE1_RST>,
+ <&hifsys MT2701_HIFSYS_PCIE2_RST>;
+ reset-names = "pcie-rst0", "pcie-rst1", "pcie-rst2";
+ phys = <&pcie0_phy>, <&pcie1_phy>, <&pcie2_phy>;
+ phy-names = "pcie-phy0", "pcie-phy1", "pcie-phy2";
+ power-domains = <&scpsys MT2701_POWER_DOMAIN_HIF>;
+ bus-range = <0x00 0xff>;
+ ranges = <0x81000000 0 0x1a160000 0 0x1a160000 0 0x00010000 /* I/O space */
+ 0x83000000 0 0x60000000 0 0x60000000 0 0x10000000>; /* memory space */
+
+ pcie@0,0 {
+ device_type = "pci";
+ reg = <0x0000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &sysirq GIC_SPI 193 IRQ_TYPE_LEVEL_LOW>;
+ ranges;
+ num-lanes = <1>;
+ };
+
+ pcie@1,0 {
+ device_type = "pci";
+ reg = <0x0800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &sysirq GIC_SPI 194 IRQ_TYPE_LEVEL_LOW>;
+ ranges;
+ num-lanes = <1>;
+ };
+
+ pcie@2,0 {
+ device_type = "pci";
+ reg = <0x1000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &sysirq GIC_SPI 195 IRQ_TYPE_LEVEL_LOW>;
+ ranges;
+ num-lanes = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie.txt b/Documentation/devicetree/bindings/pci/qcom,pcie.txt
index e15f9b19901f..9d418b71774f 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie.txt
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie.txt
@@ -8,6 +8,7 @@
- "qcom,pcie-apq8064" for apq8064
- "qcom,pcie-apq8084" for apq8084
- "qcom,pcie-msm8996" for msm8996 or apq8096
+ - "qcom,pcie-ipq4019" for ipq4019
- reg:
Usage: required
@@ -87,7 +88,7 @@
- "core" Clocks the pcie hw block
- "phy" Clocks the pcie PHY block
- clock-names:
- Usage: required for apq8084
+ Usage: required for apq8084/ipq4019
Value type: <stringlist>
Definition: Should contain the following entries
- "aux" Auxiliary (AUX) clock
@@ -126,6 +127,23 @@
Definition: Should contain the following entries
- "core" Core reset
+- reset-names:
+ Usage: required for ipq/apq8064
+ Value type: <stringlist>
+ Definition: Should contain the following entries
+ - "axi_m" AXI master reset
+ - "axi_s" AXI slave reset
+ - "pipe" PIPE reset
+ - "axi_m_vmid" VMID reset
+ - "axi_s_xpu" XPU reset
+ - "parf" PARF reset
+ - "phy" PHY reset
+ - "axi_m_sticky" AXI sticky reset
+ - "pipe_sticky" PIPE sticky reset
+ - "pwr" PWR reset
+ - "ahb" AHB reset
+ - "phy_ahb" PHY AHB reset
+
- power-domains:
Usage: required for apq8084 and msm8996/apq8096
Value type: <prop-encoded-array>
diff --git a/Documentation/devicetree/bindings/pci/rcar-pci.txt b/Documentation/devicetree/bindings/pci/rcar-pci.txt
index 34712d6fd253..bd27428dda61 100644
--- a/Documentation/devicetree/bindings/pci/rcar-pci.txt
+++ b/Documentation/devicetree/bindings/pci/rcar-pci.txt
@@ -1,4 +1,4 @@
-* Renesas RCar PCIe interface
+* Renesas R-Car PCIe interface
Required properties:
compatible: "renesas,pcie-r8a7779" for the R8A7779 SoC;
diff --git a/Documentation/devicetree/bindings/pci/tango-pcie.txt b/Documentation/devicetree/bindings/pci/tango-pcie.txt
new file mode 100644
index 000000000000..244683836a79
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/tango-pcie.txt
@@ -0,0 +1,29 @@
+Sigma Designs Tango PCIe controller
+
+Required properties:
+
+- compatible: "sigma,smp8759-pcie"
+- reg: address/size of PCI configuration space, address/size of register area
+- bus-range: defined by size of PCI configuration space
+- device_type: "pci"
+- #size-cells: <2>
+- #address-cells: <3>
+- msi-controller
+- ranges: translation from system to bus addresses
+- interrupts: spec for misc interrupts, spec for MSI
+
+Example:
+
+ pcie@2e000 {
+ compatible = "sigma,smp8759-pcie";
+ reg = <0x50000000 0x400000>, <0x2e000 0x100>;
+ bus-range = <0 3>;
+ device_type = "pci";
+ #size-cells = <2>;
+ #address-cells = <3>;
+ msi-controller;
+ ranges = <0x02000000 0x0 0x00400000 0x50400000 0x0 0x3c00000>;
+ interrupts =
+ <54 IRQ_TYPE_LEVEL_HIGH>, /* misc interrupts */
+ <55 IRQ_TYPE_LEVEL_HIGH>; /* MSI */
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
index b53224473672..6f2ec9af0de2 100644
--- a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
@@ -20,8 +20,10 @@ Required properties:
"allwinner,sun9i-a80-pinctrl"
"allwinner,sun9i-a80-r-pinctrl"
"allwinner,sun8i-a83t-pinctrl"
+ "allwinner,sun8i-a83t-r-pinctrl"
"allwinner,sun8i-h3-pinctrl"
"allwinner,sun8i-h3-r-pinctrl"
+ "allwinner,sun8i-r40-pinctrl"
"allwinner,sun50i-a64-pinctrl"
"allwinner,sun50i-a64-r-pinctrl"
"allwinner,sun50i-h5-pinctrl"
diff --git a/Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.txt
new file mode 100644
index 000000000000..ca313a7aeaff
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.txt
@@ -0,0 +1,41 @@
+Ingenic jz47xx pin controller
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+For the jz47xx SoCs, pin control is tightly bound with GPIO ports. All pins may
+be used as GPIOs, multiplexed device functions are configured within the
+GPIO port configuration registers and it is typical to refer to pins using the
+naming scheme "PxN" where x is a character identifying the GPIO port with
+which the pin is associated and N is an integer from 0 to 31 identifying the
+pin within that GPIO port. For example PA0 is the first pin in GPIO port A, and
+PB31 is the last pin in GPIO port B. The jz4740 contains 4 GPIO ports, PA to
+PD, for a total of 128 pins. The jz4780 contains 6 GPIO ports, PA to PF, for a
+total of 192 pins.
+
+
+Required properties:
+--------------------
+
+ - compatible: One of:
+ - "ingenic,jz4740-pinctrl"
+ - "ingenic,jz4770-pinctrl"
+ - "ingenic,jz4780-pinctrl"
+ - reg: Address range of the pinctrl registers.
+
+
+GPIO sub-nodes
+--------------
+
+The pinctrl node can have optional sub-nodes for the Ingenic GPIO driver;
+please refer to ../gpio/ingenic,gpio.txt.
+
+
+Example:
+--------
+
+pinctrl: pin-controller@10010000 {
+ compatible = "ingenic,jz4740-pinctrl";
+ reg = <0x10010000 0x400>;
+};
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
index f01d154090da..62d0f33fa65e 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
@@ -204,21 +204,22 @@ each single pin the number of required sub-nodes containing "pin" and
maintain.
For cases like this, the pin controller driver may use the pinmux helper
-property, where the pin identifier is packed with mux configuration settings
-in a single integer.
+property, where the pin identifier is provided with mux configuration settings
+in a pinmux group. A pinmux group consists of the pin identifier and mux
+settings represented as a single integer or an array of integers.
-The pinmux property accepts an array of integers, each of them describing
+The pinmux property accepts an array of pinmux groups, each of them describing
a single pin multiplexing configuration.
pincontroller {
state_0_node_a {
- pinmux = <PIN_ID_AND_MUX>, <PIN_ID_AND_MUX>, ...;
+ pinmux = <PINMUX_GROUP>, <PINMUX_GROUP>, ...;
};
};
Each individual pin controller driver bindings documentation shall specify
-how those values (pin IDs and pin multiplexing configuration) are defined and
-assembled together.
+how pin IDs and pin multiplexing configuration are defined and assembled
+together in a pinmux group.
== Generic pin configuration node content ==
@@ -251,14 +252,20 @@ drive-push-pull - drive actively high and low
drive-open-drain - drive with open drain
drive-open-source - drive with open source
drive-strength - sink or source at most X mA
-input-enable - enable input on pin (no effect on output)
-input-disable - disable input on pin (no effect on output)
+input-enable - enable input on pin (no effect on output, such as
+ enabling an input buffer)
+input-disable - disable input on pin (no effect on output, such as
+ disabling an input buffer)
input-schmitt-enable - enable schmitt-trigger mode
input-schmitt-disable - disable schmitt-trigger mode
input-debounce - debounce mode with debound time X
power-source - select between different power supplies
low-power-enable - enable low power mode
low-power-disable - disable low power mode
+output-disable - disable output on a pin (such as disable an output
+ buffer)
+output-enable - enable output on a pin without actively driving it
+ (such as enabling an output buffer)
output-low - set the pin to output mode with low level
output-high - set the pin to output mode with high level
slew-rate - set the slew rate
@@ -300,7 +307,7 @@ arguments are described below.
- pinmux takes a list of pin IDs and mux settings as required argument. The
specific bindings for the hardware defines:
- How pin IDs and mux settings are defined and assembled together in a single
- integer.
+ integer or an array of integers.
- bias-pull-up, -down and -pin-default take as optional argument on hardware
supporting it the pull strength in Ohm. bias-disable will disable the pull.
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-zx.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-zx.txt
new file mode 100644
index 000000000000..e219849b21ca
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-zx.txt
@@ -0,0 +1,85 @@
+* ZTE ZX Pin Controller
+
+The pin controller on ZTE ZX platforms is kinda of hybrid. It consists of
+a main controller and an auxiliary one. For example, on ZX296718 SoC, the
+main controller is TOP_PMM and the auxiliary one is AON_IOCFG. Both
+controllers work together to control pin multiplexing and configuration in
+the way illustrated as below.
+
+
+ GMII_RXD3 ---+
+ |
+ DVI1_HS ---+----------------------------- GMII_RXD3 (TOP pin)
+ |
+ BGPIO16 ---+ ^
+ | pinconf
+ ^ |
+ | pinmux |
+ | |
+
+ TOP_PMM (main) AON_IOCFG (aux)
+
+ | | |
+ | pinmux | |
+ | pinmux v |
+ v | pinconf
+ KEY_ROW2 ---+ v
+ PORT1_LCD_TE ---+ |
+ | AGPIO10 ---+------ KEY_ROW2 (AON pin)
+ I2S0_DOUT3 ---+ |
+ |-----------------------+
+ PWM_OUT3 ---+
+ |
+ VGA_VS1 ---+
+
+
+For most of pins like GMII_RXD3 in the figure, the pinmux function is
+controlled by TOP_PMM block only, and this type of pins are meant by term
+'TOP pins'. For pins like KEY_ROW2, the pinmux is controlled by both
+TOP_PMM and AON_IOCFG blocks, as the available multiplexing functions for
+the pin spread in both controllers. This type of pins are called 'AON pins'.
+Though pinmux implementation is quite different, pinconf is same for both
+types of pins. Both are controlled by auxiliary controller, i.e. AON_IOCFG
+on ZX296718.
+
+Required properties:
+- compatible: should be "zte,zx296718-pmm".
+- reg: the register physical address and length.
+- zte,auxiliary-controller: phandle to the auxiliary pin controller which
+ implements pinmux for AON pins and pinconf for all pins.
+
+The following pin configuration are supported. Please refer to
+pinctrl-bindings.txt in this directory for more details of the common
+pinctrl bindings used by client devices.
+
+- bias-pull-up
+- bias-pull-down
+- drive-strength
+- input-enable
+- slew-rate
+
+Examples:
+
+iocfg: pin-controller@119000 {
+ compatible = "zte,zx296718-iocfg";
+ reg = <0x119000 0x1000>;
+};
+
+pmm: pin-controller@1462000 {
+ compatible = "zte,zx296718-pmm";
+ reg = <0x1462000 0x1000>;
+ zte,auxiliary-controller = <&iocfg>;
+};
+
+&pmm {
+ vga_pins: vga {
+ pins = "KEY_COL1", "KEY_COL2", "KEY_ROW1", "KEY_ROW2";
+ function = "VGA";
+ };
+};
+
+&vga {
+ pinctrl-names = "default";
+ pinctrl-0 = <&vga_pins>;
+ status = "okay";
+};
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8074-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8074-pinctrl.txt
new file mode 100644
index 000000000000..407b9443629d
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8074-pinctrl.txt
@@ -0,0 +1,172 @@
+Qualcomm Technologies, Inc. IPQ8074 TLMM block
+
+This binding describes the Top Level Mode Multiplexer block found in the
+IPQ8074 platform.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be "qcom,ipq8074-pinctrl"
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: the base address and size of the TLMM register space.
+
+- interrupts:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: should specify the TLMM summary IRQ.
+
+- interrupt-controller:
+ Usage: required
+ Value type: <none>
+ Definition: identifies this node as an interrupt controller
+
+- #interrupt-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: must be 2. Specifying the pin number and flags, as defined
+ in <dt-bindings/interrupt-controller/irq.h>
+
+- gpio-controller:
+ Usage: required
+ Value type: <none>
+ Definition: identifies this node as a gpio controller
+
+- #gpio-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: must be 2. Specifying the pin number and flags, as defined
+ in <dt-bindings/gpio/gpio.h>
+
+Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
+a general description of GPIO and interrupt bindings.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+The pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin, a group, or a list of pins or groups. This configuration can include the
+mux function to select on those pin(s)/group(s), and various pin configuration
+parameters, such as pull-up, drive strength, etc.
+
+
+PIN CONFIGURATION NODES:
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Each subnode only affects those parameters that are explicitly listed. In
+other words, a subnode that lists a mux function but no pin configuration
+parameters implies no information about any pin configuration parameters.
+Similarly, a pin subnode that describes a pullup parameter implies no
+information about e.g. the mux function.
+
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pin configuration subnode:
+
+- pins:
+ Usage: required
+ Value type: <string-array>
+ Definition: List of gpio pins affected by the properties specified in
+ this subnode. Valid pins are:
+ gpio0-gpio69
+
+- function:
+ Usage: required
+ Value type: <string>
+ Definition: Specify the alternative function to be configured for the
+ specified pins. Functions are only valid for gpio pins.
+ Valid values are:
+ atest_char, atest_char0, atest_char1, atest_char2,
+ atest_char3, audio_rxbclk, audio_rxd, audio_rxfsync,
+ audio_rxmclk, audio_txbclk, audio_txd, audio_txfsync,
+ audio_txmclk, blsp0_i2c, blsp0_spi, blsp0_uart, blsp1_i2c,
+ blsp1_spi, blsp1_uart, blsp2_i2c, blsp2_spi, blsp2_uart,
+ blsp3_i2c, blsp3_spi, blsp3_spi0, blsp3_spi1, blsp3_spi2,
+ blsp3_spi3, blsp3_uart, blsp4_i2c0, blsp4_i2c1, blsp4_spi0,
+ blsp4_spi1, blsp4_uart0, blsp4_uart1, blsp5_i2c, blsp5_spi,
+ blsp5_uart, burn0, burn1, cri_trng, cri_trng0, cri_trng1,
+ cxc0, cxc1, dbg_out, gcc_plltest, gcc_tlmm, gpio, ldo_en,
+ ldo_update, led0, led1, led2, mac0_sa0, mac0_sa1, mac1_sa0,
+ mac1_sa1, mac1_sa2, mac1_sa3, mac2_sa0, mac2_sa1, mdc,
+ mdio, pcie0_clk, pcie0_rst, pcie0_wake, pcie1_clk,
+ pcie1_rst, pcie1_wake, pcm_drx, pcm_dtx, pcm_fsync,
+ pcm_pclk, pcm_zsi0, pcm_zsi1, prng_rosc, pta1_0, pta1_1,
+ pta1_2, pta2_0, pta2_1, pta2_2, pwm0, pwm1, pwm2, pwm3,
+ qdss_cti_trig_in_a0, qdss_cti_trig_in_a1,
+ qdss_cti_trig_in_b0, qdss_cti_trig_in_b1,
+ qdss_cti_trig_out_a0, qdss_cti_trig_out_a1,
+ qdss_cti_trig_out_b0, qdss_cti_trig_out_b1,
+ qdss_traceclk_a, qdss_traceclk_b, qdss_tracectl_a,
+ qdss_tracectl_b, qdss_tracedata_a, qdss_tracedata_b,
+ qpic, rx0, rx1, rx2, sd_card, sd_write, tsens_max, wci2a,
+ wci2b, wci2c, wci2d
+
+- bias-disable:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configued as no pull.
+
+- bias-pull-down:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configued as pull down.
+
+- bias-pull-up:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configued as pull up.
+
+- output-high:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ high.
+
+- output-low:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ low.
+
+- drive-strength:
+ Usage: optional
+ Value type: <u32>
+ Definition: Selects the drive strength for the specified pins, in mA.
+ Valid values are: 2, 4, 6, 8, 10, 12, 14 and 16
+
+Example:
+
+ tlmm: pinctrl@1000000 {
+ compatible = "qcom,ipq8074-pinctrl";
+ reg = <0x1000000 0x300000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ uart2: uart2-default {
+ mux {
+ pins = "gpio23", "gpio24";
+ function = "blsp4_uart1";
+ };
+
+ rx {
+ pins = "gpio23";
+ drive-strength = <4>;
+ bias-disable;
+ };
+
+ tx {
+ pins = "gpio24";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
index 13df9498311a..645082f03259 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
@@ -13,6 +13,8 @@ Required Properties:
- "renesas,pfc-emev2": for EMEV2 (EMMA Mobile EV2) compatible pin-controller.
- "renesas,pfc-r8a73a4": for R8A73A4 (R-Mobile APE6) compatible pin-controller.
- "renesas,pfc-r8a7740": for R8A7740 (R-Mobile A1) compatible pin-controller.
+ - "renesas,pfc-r8a7743": for R8A7743 (RZ/G1M) compatible pin-controller.
+ - "renesas,pfc-r8a7745": for R8A7745 (RZ/G1E) compatible pin-controller.
- "renesas,pfc-r8a7778": for R8A7778 (R-Mobile M1) compatible pin-controller.
- "renesas,pfc-r8a7779": for R8A7779 (R-Car H1) compatible pin-controller.
- "renesas,pfc-r8a7790": for R8A7790 (R-Car H2) compatible pin-controller.
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,rza1-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/renesas,rza1-pinctrl.txt
new file mode 100644
index 000000000000..43e21474528a
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,rza1-pinctrl.txt
@@ -0,0 +1,221 @@
+Renesas RZ/A1 combined Pin and GPIO controller
+
+The Renesas SoCs of the RZ/A1 family feature a combined Pin and GPIO controller,
+named "Ports" in the hardware reference manual.
+Pin multiplexing and GPIO configuration is performed on a per-pin basis
+writing configuration values to per-port register sets.
+Each "port" features up to 16 pins, each of them configurable for GPIO
+function (port mode) or in alternate function mode.
+Up to 8 different alternate function modes exist for each single pin.
+
+Pin controller node
+-------------------
+
+Required properties:
+ - compatible
+ this shall be "renesas,r7s72100-ports".
+
+ - reg
+ address base and length of the memory area where the pin controller
+ hardware is mapped to.
+
+Example:
+Pin controller node for RZ/A1H SoC (r7s72100)
+
+pinctrl: pin-controller@fcfe3000 {
+ compatible = "renesas,r7s72100-ports";
+
+ reg = <0xfcfe3000 0x4230>;
+};
+
+Sub-nodes
+---------
+
+The child nodes of the pin controller node describe a pin multiplexing
+function or a GPIO controller alternatively.
+
+- Pin multiplexing sub-nodes:
+ A pin multiplexing sub-node describes how to configure a set of
+ (or a single) pin in some desired alternate function mode.
+ A single sub-node may define several pin configurations.
+ A few alternate function require special pin configuration flags to be
+ supplied along with the alternate function configuration number.
+ The hardware reference manual specifies when a pin function requires
+ "software IO driven" mode to be specified. To do so use the generic
+ properties from the <include/linux/pinctrl/pinconf_generic.h> header file
+ to instruct the pin controller to perform the desired pin configuration
+ operation.
+ Please refer to pinctrl-bindings.txt to get to know more on generic
+ pin properties usage.
+
+ The allowed generic formats for a pin multiplexing sub-node are the
+ following ones:
+
+ node-1 {
+ pinmux = <PIN_ID_AND_MUX>, <PIN_ID_AND_MUX>, ... ;
+ GENERIC_PINCONFIG;
+ };
+
+ node-2 {
+ sub-node-1 {
+ pinmux = <PIN_ID_AND_MUX>, <PIN_ID_AND_MUX>, ... ;
+ GENERIC_PINCONFIG;
+ };
+
+ sub-node-2 {
+ pinmux = <PIN_ID_AND_MUX>, <PIN_ID_AND_MUX>, ... ;
+ GENERIC_PINCONFIG;
+ };
+
+ ...
+
+ sub-node-n {
+ pinmux = <PIN_ID_AND_MUX>, <PIN_ID_AND_MUX>, ... ;
+ GENERIC_PINCONFIG;
+ };
+ };
+
+ Use the second format when pins part of the same logical group need to have
+ different generic pin configuration flags applied.
+
+ Client sub-nodes shall refer to pin multiplexing sub-nodes using the phandle
+ of the most external one.
+
+ Eg.
+
+ client-1 {
+ ...
+ pinctrl-0 = <&node-1>;
+ ...
+ };
+
+ client-2 {
+ ...
+ pinctrl-0 = <&node-2>;
+ ...
+ };
+
+ Required properties:
+ - pinmux:
+ integer array representing pin number and pin multiplexing configuration.
+ When a pin has to be configured in alternate function mode, use this
+ property to identify the pin by its global index, and provide its
+ alternate function configuration number along with it.
+ When multiple pins are required to be configured as part of the same
+ alternate function they shall be specified as members of the same
+ argument list of a single "pinmux" property.
+ Helper macros to ease assembling the pin index from its position
+ (port where it sits on and pin number) and alternate function identifier
+ are provided by the pin controller header file at:
+ <include/dt-bindings/pinctrl/r7s72100-pinctrl.h>
+ Integers values in "pinmux" argument list are assembled as:
+ ((PORT * 16 + PIN) | MUX_FUNC << 16)
+
+ Optional generic properties:
+ - input-enable:
+ enable input bufer for pins requiring software driven IO input
+ operations.
+ - output-high:
+ enable output buffer for pins requiring software driven IO output
+ operations. output-low can be used alternatively, as line value is
+ ignored by the driver.
+
+ The hardware reference manual specifies when a pin has to be configured to
+ work in bi-directional mode and when the IO direction has to be specified
+ by software. Bi-directional pins are managed by the pin controller driver
+ internally, while software driven IO direction has to be explicitly
+ selected when multiple options are available.
+
+ Example:
+ A serial communication interface with a TX output pin and an RX input pin.
+
+ &pinctrl {
+ scif2_pins: serial2 {
+ pinmux = <RZA1_PINMUX(3, 0, 6)>, <RZA1_PINMUX(3, 2, 4)>;
+ };
+ };
+
+ Pin #0 on port #3 is configured as alternate function #6.
+ Pin #2 on port #3 is configured as alternate function #4.
+
+ Example 2:
+ I2c master: both SDA and SCL pins need bi-directional operations
+
+ &pinctrl {
+ i2c2_pins: i2c2 {
+ pinmux = <RZA1_PINMUX(1, 4, 1)>, <RZA1_PINMUX(1, 5, 1)>;
+ };
+ };
+
+ Pin #4 on port #1 is configured as alternate function #1.
+ Pin #5 on port #1 is configured as alternate function #1.
+ Both need to work in bi-directional mode, the driver manages this internally.
+
+ Example 3:
+ Multi-function timer input and output compare pins.
+ Configure TIOC0A as software driven input and TIOC0B as software driven
+ output.
+
+ &pinctrl {
+ tioc0_pins: tioc0 {
+ tioc0_input_pins {
+ pinumx = <RZA1_PINMUX(4, 0, 2)>;
+ input-enable;
+ };
+
+ tioc0_output_pins {
+ pinmux = <RZA1_PINMUX(4, 1, 1)>;
+ output-enable;
+ };
+ };
+ };
+
+ &tioc0 {
+ ...
+ pinctrl-0 = <&tioc0_pins>;
+ ...
+ };
+
+ Pin #0 on port #4 is configured as alternate function #2 with IO direction
+ specified by software as input.
+ Pin #1 on port #4 is configured as alternate function #1 with IO direction
+ specified by software as output.
+
+- GPIO controller sub-nodes:
+ Each port of the r7s72100 pin controller hardware is itself a GPIO controller.
+ Different SoCs have different numbers of available pins per port, but
+ generally speaking, each of them can be configured in GPIO ("port") mode
+ on this hardware.
+ Describe GPIO controllers using sub-nodes with the following properties.
+
+ Required properties:
+ - gpio-controller
+ empty property as defined by the GPIO bindings documentation.
+ - #gpio-cells
+ number of cells required to identify and configure a GPIO.
+ Shall be 2.
+ - gpio-ranges
+ Describes a GPIO controller specifying its specific pin base, the pin
+ base in the global pin numbering space, and the number of controlled
+ pins, as defined by the GPIO bindings documentation. Refer to
+ Documentation/devicetree/bindings/gpio/gpio.txt file for a more detailed
+ description.
+
+ Example:
+ A GPIO controller node, controlling 16 pins indexed from 0.
+ The GPIO controller base in the global pin indexing space is pin 48, thus
+ pins [0 - 15] on this controller map to pins [48 - 63] in the global pin
+ indexing space.
+
+ port3: gpio-3 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pinctrl 0 48 16>;
+ };
+
+ A device node willing to use pins controlled by this GPIO controller, shall
+ refer to it as follows:
+
+ led1 {
+ gpios = <&port3 10 GPIO_ACTIVE_LOW>;
+ };
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/cpus.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpus.txt
index f8cd2397aa04..d63ab1dec16d 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/cpus.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/cpus.txt
@@ -3,10 +3,10 @@ Power Architecture CPU Binding
Copyright 2013 Freescale Semiconductor Inc.
Power Architecture CPUs in Freescale SOCs are represented in device trees as
-per the definition in ePAPR.
+per the definition in the Devicetree Specification.
-In addition to the ePAPR definitions, the properties defined below may be
-present on CPU nodes.
+In addition to the the Devicetree Specification definitions, the properties
+defined below may be present on CPU nodes.
PROPERTIES
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/l2cache.txt b/Documentation/devicetree/bindings/powerpc/fsl/l2cache.txt
index dc9bb3182525..8a70696395a7 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/l2cache.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/l2cache.txt
@@ -1,7 +1,7 @@
Freescale L2 Cache Controller
L2 cache is present in Freescale's QorIQ and QorIQ Qonverge platforms.
-The cache bindings explained below are ePAPR compliant
+The cache bindings explained below are Devicetree Specification compliant
Required Properties:
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/srio-rmu.txt b/Documentation/devicetree/bindings/powerpc/fsl/srio-rmu.txt
index b9a8a2bcfae7..0496ada4bba4 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/srio-rmu.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/srio-rmu.txt
@@ -124,8 +124,8 @@ Port-Write Unit:
A single IRQ that handles port-write conditions is
specified by this property. (Typically shared with error).
- Note: All other standard properties (see the ePAPR) are allowed
- but are optional.
+ Note: All other standard properties (see the Devicetree Specification)
+ are allowed but are optional.
Example:
rmu: rmu@d3000 {
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/srio.txt b/Documentation/devicetree/bindings/powerpc/fsl/srio.txt
index 07abf0f2f440..86ee6ea73754 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/srio.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/srio.txt
@@ -72,7 +72,8 @@ the following properties:
represents the LIODN associated with maintenance transactions
for the port.
-Note: All other standard properties (see ePAPR) are allowed but are optional.
+Note: All other standard properties (see the Devicetree Specification)
+are allowed but are optional.
Example:
diff --git a/Documentation/devicetree/bindings/property-units.txt b/Documentation/devicetree/bindings/property-units.txt
index 0849618a9df0..45ce054d844d 100644
--- a/Documentation/devicetree/bindings/property-units.txt
+++ b/Documentation/devicetree/bindings/property-units.txt
@@ -30,6 +30,7 @@ Electricity
-micro-ohms : micro Ohms
-microwatt-hours: micro Watt-hours
-microvolt : micro volts
+-picofarads : picofarads
Temperature
----------------------------------------
diff --git a/Documentation/devicetree/bindings/remoteproc/ti,keystone-rproc.txt b/Documentation/devicetree/bindings/remoteproc/ti,keystone-rproc.txt
new file mode 100644
index 000000000000..2aac1aa4123d
--- /dev/null
+++ b/Documentation/devicetree/bindings/remoteproc/ti,keystone-rproc.txt
@@ -0,0 +1,133 @@
+TI Keystone DSP devices
+=======================
+
+The TI Keystone 2 family of SoCs usually have one or more (upto 8) TI DSP Core
+sub-systems that are used to offload some of the processor-intensive tasks or
+algorithms, for achieving various system level goals.
+
+These processor sub-systems usually contain additional sub-modules like L1
+and/or L2 caches/SRAMs, an Interrupt Controller, an external memory controller,
+a dedicated local power/sleep controller etc. The DSP processor core in
+Keystone 2 SoCs is usually a TMS320C66x CorePac processor.
+
+DSP Device Node:
+================
+Each DSP Core sub-system is represented as a single DT node, and should also
+have an alias with the stem 'rproc' defined. Each node has a number of required
+or optional properties that enable the OS running on the host processor (ARM
+CorePac) to perform the device management of the remote processor and to
+communicate with the remote processor.
+
+Required properties:
+--------------------
+The following are the mandatory properties:
+
+- compatible: Should be one of the following,
+ "ti,k2hk-dsp" for DSPs on Keystone 2 66AK2H/K SoCs
+ "ti,k2l-dsp" for DSPs on Keystone 2 66AK2L SoCs
+ "ti,k2e-dsp" for DSPs on Keystone 2 66AK2E SoCs
+
+- reg: Should contain an entry for each value in 'reg-names'.
+ Each entry should have the memory region's start address
+ and the size of the region, the representation matching
+ the parent node's '#address-cells' and '#size-cells' values.
+
+- reg-names: Should contain strings with the following names, each
+ representing a specific internal memory region, and
+ should be defined in this order,
+ "l2sram", "l1pram", "l1dram"
+
+- clocks: Should contain the device's input clock, and should be
+ defined as per the bindings in,
+ Documentation/devicetree/bindings/clock/keystone-gate.txt
+
+- ti,syscon-dev: Should be a pair of the phandle to the Keystone Device
+ State Control node, and the register offset of the DSP
+ boot address register within that node's address space.
+
+- resets: Should contain the phandle to the reset controller node
+ managing the resets for this device, and a reset
+ specifier. Please refer to the following reset bindings
+ for the reset argument specifier as per SoC,
+ Documentation/devicetree/bindings/reset/ti-syscon-reset.txt
+ for 66AK2HK/66AK2L/66AK2E SoCs
+
+- interrupt-parent: Should contain a phandle to the Keystone 2 IRQ controller
+ IP node that is used by the ARM CorePac processor to
+ receive interrupts from the DSP remote processors. See
+ Documentation/devicetree/bindings/interrupt-controller/ti,keystone-irq.txt
+ for details.
+
+- interrupts: Should contain an entry for each value in 'interrupt-names'.
+ Each entry should have the interrupt source number used by
+ the remote processor to the host processor. The values should
+ follow the interrupt-specifier format as dictated by the
+ 'interrupt-parent' node. The purpose of each is as per the
+ description in the 'interrupt-names' property.
+
+- interrupt-names: Should contain strings with the following names, each
+ representing a specific interrupt,
+ "vring" - interrupt for virtio based IPC
+ "exception" - interrupt for exception notification
+
+- kick-gpios: Should specify the gpio device needed for the virtio IPC
+ stack. This will be used to interrupt the remote processor.
+ The gpio device to be used is as per the bindings in,
+ Documentation/devicetree/bindings/gpio/gpio-dsp-keystone.txt
+
+Optional properties:
+--------------------
+
+- memory-region: phandle to the reserved memory node to be associated
+ with the remoteproc device. The reserved memory node
+ can be a CMA memory node, and should be defined as
+ per the bindings in
+ Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
+
+
+Example:
+--------
+ /* 66AK2H/K DSP aliases */
+ aliases {
+ rproc0 = &dsp0;
+ rproc1 = &dsp1;
+ rproc2 = &dsp2;
+ rproc3 = &dsp3;
+ rproc4 = &dsp4;
+ rproc5 = &dsp5;
+ rproc6 = &dsp6;
+ rproc7 = &dsp7;
+ };
+
+ /* 66AK2H/K DSP memory node */
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ dsp_common_memory: dsp-common-memory@81f800000 {
+ compatible = "shared-dma-pool";
+ reg = <0x00000008 0x1f800000 0x00000000 0x800000>;
+ reusable;
+ };
+ };
+
+ /* 66AK2H/K DSP node */
+ soc {
+ dsp0: dsp@10800000 {
+ compatible = "ti,k2hk-dsp";
+ reg = <0x10800000 0x00100000>,
+ <0x10e00000 0x00008000>,
+ <0x10f00000 0x00008000>;
+ reg-names = "l2sram", "l1pram", "l1dram";
+ clocks = <&clkgem0>;
+ ti,syscon-dev = <&devctrl 0x40>;
+ resets = <&pscrst 0>;
+ interrupt-parent = <&kirq0>;
+ interrupts = <0 8>;
+ interrupt-names = "vring", "exception";
+ kick-gpios = <&dspgpio0 27 0>;
+ memory-region = <&dsp_common_memory>;
+ };
+
+ };
diff --git a/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt b/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
index 3da0ebdba8d9..16291f2a4688 100644
--- a/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
+++ b/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
@@ -68,6 +68,9 @@ Linux implementation note:
- If a "linux,cma-default" property is present, then Linux will use the
region for the default pool of the contiguous memory allocator.
+- If a "linux,dma-default" property is present, then Linux will use the
+ region for the default pool of the consistent DMA allocator.
+
Device node references to reserved memory
-----------------------------------------
Regions in the /reserved-memory node may be referenced by other device
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt
new file mode 100644
index 000000000000..50fc20c6ce91
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt
@@ -0,0 +1,73 @@
+Qualcomm RPM GLINK binding
+
+This binding describes the Qualcomm RPM GLINK, a fifo based mechanism for
+communication with the Resource Power Management system on various Qualcomm
+platforms.
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "qcom,glink-rpm"
+
+- interrupts:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: should specify the IRQ used by the remote processor to
+ signal this processor about communication related events
+
+- qcom,rpm-msg-ram:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: handle to RPM message memory resource
+
+- mboxes:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: reference to the "rpm_hlos" mailbox in APCS, as described
+ in mailbox/mailbox.txt
+
+= GLINK DEVICES
+Each subnode of the GLINK node represent function tied to a virtual
+communication channel. The name of the nodes are not important. The properties
+of these nodes are defined by the individual bindings for the specific function
+- but must contain the following property:
+
+- qcom,glink-channels:
+ Usage: required
+ Value type: <stringlist>
+ Definition: a list of channels tied to this function, used for matching
+ the function to a set of virtual channels
+
+= EXAMPLE
+The following example represents the GLINK RPM node on a MSM8996 device, with
+the function for the "rpm_request" channel defined, which is used for
+regualtors and root clocks.
+
+ apcs_glb: mailbox@9820000 {
+ compatible = "qcom,msm8996-apcs-hmss-global";
+ reg = <0x9820000 0x1000>;
+
+ #mbox-cells = <1>;
+ };
+
+ rpm_msg_ram: memory@68000 {
+ compatible = "qcom,rpm-msg-ram";
+ reg = <0x68000 0x6000>;
+ };
+
+ rpm-glink {
+ compatible = "qcom,glink-rpm";
+
+ interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,rpm-msg-ram = <&rpm_msg_ram>;
+
+ mboxes = <&apcs_glb 0>;
+
+ rpm-requests {
+ compatible = "qcom,rpm-msm8996";
+ qcom,glink-channels = "rpm_requests";
+
+ ...
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/audio-graph-card.txt b/Documentation/devicetree/bindings/sound/audio-graph-card.txt
new file mode 100644
index 000000000000..6e6720aa33f1
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/audio-graph-card.txt
@@ -0,0 +1,129 @@
+Audio Graph Card:
+
+Audio Graph Card specifies audio DAI connections of SoC <-> codec.
+It is based on common bindings for device graphs.
+see ${LINUX}/Documentation/devicetree/bindings/graph.txt
+
+Basically, Audio Graph Card property is same as Simple Card.
+see ${LINUX}/Documentation/devicetree/bindings/sound/simple-card.txt
+
+Below are same as Simple-Card.
+
+- label
+- widgets
+- routing
+- dai-format
+- frame-master
+- bitclock-master
+- bitclock-inversion
+- frame-inversion
+- dai-tdm-slot-num
+- dai-tdm-slot-width
+- clocks / system-clock-frequency
+
+Required properties:
+
+- compatible : "audio-graph-card";
+- dais : list of CPU DAI port{s}
+
+Optional properties:
+- pa-gpios: GPIO used to control external amplifier.
+
+Example: Single DAI case
+
+ sound_card {
+ compatible = "audio-graph-card";
+
+ dais = <&cpu_port>;
+ };
+
+ dai-controller {
+ ...
+ cpu_port: port {
+ cpu_endpoint: endpoint {
+ remote-endpoint = <&codec_endpoint>;
+
+ dai-format = "left_j";
+ ...
+ };
+ };
+ };
+
+ audio-codec {
+ ...
+ port {
+ codec_endpoint: endpoint {
+ remote-endpoint = <&cpu_endpoint>;
+ };
+ };
+ };
+
+Example: Multi DAI case
+
+ sound-card {
+ compatible = "audio-graph-card";
+
+ label = "sound-card";
+
+ dais = <&cpu_port0
+ &cpu_port1
+ &cpu_port2>;
+ };
+
+ audio-codec@0 {
+ ...
+ port {
+ codec0_endpoint: endpoint {
+ remote-endpoint = <&cpu_endpoint0>;
+ };
+ };
+ };
+
+ audio-codec@1 {
+ ...
+ port {
+ codec1_endpoint: endpoint {
+ remote-endpoint = <&cpu_endpoint1>;
+ };
+ };
+ };
+
+ audio-codec@2 {
+ ...
+ port {
+ codec2_endpoint: endpoint {
+ remote-endpoint = <&cpu_endpoint2>;
+ };
+ };
+ };
+
+ dai-controller {
+ ...
+ ports {
+ cpu_port0: port@0 {
+ cpu_endpoint0: endpoint {
+ remote-endpoint = <&codec0_endpoint>;
+
+ dai-format = "left_j";
+ ...
+ };
+ };
+ cpu_port1: port@1 {
+ cpu_endpoint1: endpoint {
+ remote-endpoint = <&codec1_endpoint>;
+
+ dai-format = "i2s";
+ ...
+ };
+ };
+ cpu_port2: port@2 {
+ cpu_endpoint2: endpoint {
+ remote-endpoint = <&codec2_endpoint>;
+
+ dai-format = "i2s";
+ ...
+ };
+ };
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/sound/audio-graph-scu-card.txt b/Documentation/devicetree/bindings/sound/audio-graph-scu-card.txt
new file mode 100644
index 000000000000..8b8afe9fcb31
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/audio-graph-scu-card.txt
@@ -0,0 +1,122 @@
+Audio-Graph-SCU-Card:
+
+Audio-Graph-SCU-Card is "Audio-Graph-Card" + "ALSA DPCM".
+
+It is based on common bindings for device graphs.
+see ${LINUX}/Documentation/devicetree/bindings/graph.txt
+
+Basically, Audio-Graph-SCU-Card property is same as
+Simple-Card / Simple-SCU-Card / Audio-Graph-Card.
+see ${LINUX}/Documentation/devicetree/bindings/sound/simple-card.txt
+ ${LINUX}/Documentation/devicetree/bindings/sound/simple-scu-card.txt
+ ${LINUX}/Documentation/devicetree/bindings/sound/audio-graph-card.txt
+
+Below are same as Simple-Card / Audio-Graph-Card.
+
+- label
+- dai-format
+- frame-master
+- bitclock-master
+- bitclock-inversion
+- frame-inversion
+- dai-tdm-slot-num
+- dai-tdm-slot-width
+- clocks / system-clock-frequency
+
+Below are same as Simple-SCU-Card.
+
+- convert-rate
+- convert-channels
+- prefix
+- routing
+
+Required properties:
+
+- compatible : "audio-graph-scu-card";
+- dais : list of CPU DAI port{s}
+
+Example 1. Sampling Rate Conversion
+
+ sound_card {
+ compatible = "audio-graph-scu-card";
+
+ label = "sound-card";
+ prefix = "codec";
+ routing = "codec Playback", "DAI0 Playback",
+ "codec Playback", "DAI1 Playback";
+ convert-rate = <48000>;
+
+ dais = <&cpu_port>;
+ };
+
+ audio-codec {
+ ...
+
+ port {
+ codec_endpoint: endpoint {
+ remote-endpoint = <&cpu_endpoint>;
+ };
+ };
+ };
+
+ dai-controller {
+ ...
+ cpu_port: port {
+ cpu_endpoint: endpoint {
+ remote-endpoint = <&codec_endpoint>;
+
+ dai-format = "left_j";
+ ...
+ };
+ };
+ };
+
+Example 2. 2 CPU 1 Codec (Mixing)
+
+ sound_card {
+ compatible = "audio-graph-scu-card";
+
+ label = "sound-card";
+ prefix = "codec";
+ routing = "codec Playback", "DAI0 Playback",
+ "codec Playback", "DAI1 Playback";
+ convert-rate = <48000>;
+
+ dais = <&cpu_port0
+ &cpu_port1>;
+ };
+
+ audio-codec {
+ ...
+
+ port {
+ codec_endpoint0: endpoint {
+ remote-endpoint = <&cpu_endpoint0>;
+ };
+ codec_endpoint1: endpoint {
+ remote-endpoint = <&cpu_endpoint1>;
+ };
+ };
+ };
+
+ dai-controller {
+ ...
+ ports {
+ cpu_port0: port {
+ cpu_endpoint0: endpoint {
+ remote-endpoint = <&codec_endpoint0>;
+
+ dai-format = "left_j";
+ ...
+ };
+ };
+ cpu_port1: port {
+ cpu_endpoint1: endpoint {
+ remote-endpoint = <&codec_endpoint1>;
+
+ dai-format = "left_j";
+ ...
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/cs35l35.txt b/Documentation/devicetree/bindings/sound/cs35l35.txt
index 016b768bc722..77ee75c39233 100644
--- a/Documentation/devicetree/bindings/sound/cs35l35.txt
+++ b/Documentation/devicetree/bindings/sound/cs35l35.txt
@@ -16,6 +16,9 @@ Required properties:
(See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
for further information relating to interrupt properties)
+ - cirrus,boost-ind-nanohenry: Inductor value for boost converter. The value is
+ in nH and they can be values of 1000nH, 1200nH, 1500nH, and 2200nH.
+
Optional properties:
- reset-gpios : gpio used to reset the amplifier
diff --git a/Documentation/devicetree/bindings/sound/nau8825.txt b/Documentation/devicetree/bindings/sound/nau8825.txt
index d3374231c871..2f5e973285a6 100644
--- a/Documentation/devicetree/bindings/sound/nau8825.txt
+++ b/Documentation/devicetree/bindings/sound/nau8825.txt
@@ -69,6 +69,8 @@ Optional properties:
- nuvoton,jack-insert-debounce: number from 0 to 7 that sets debounce time to 2^(n+2) ms
- nuvoton,jack-eject-debounce: number from 0 to 7 that sets debounce time to 2^(n+2) ms
+ - nuvoton,crosstalk-bypass: make crosstalk function bypass if set.
+
- clocks: list of phandle and clock specifier pairs according to common clock bindings for the
clocks described in clock-names
- clock-names: should include "mclk" for the MCLK master clock
@@ -96,6 +98,7 @@ Example:
nuvoton,short-key-debounce = <2>;
nuvoton,jack-insert-debounce = <7>;
nuvoton,jack-eject-debounce = <7>;
+ nuvoton,crosstalk-bypass;
clock-names = "mclk";
clocks = <&tegra_car TEGRA210_CLK_CLK_OUT_2>;
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
index 15a7316e4c91..7246bb268bf9 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
@@ -83,11 +83,11 @@ SRC can convert [xx]Hz to [yy]Hz. Then, it has below 2 modes
** Asynchronous mode
------------------
-You need to use "renesas,rsrc-card" sound card for it.
+You need to use "simple-scu-audio-card" sound card for it.
example)
sound {
- compatible = "renesas,rsrc-card";
+ compatible = "simple-scu-audio-card";
...
/*
* SRC Asynchronous mode setting
@@ -97,12 +97,12 @@ example)
* Inputed 48kHz data will be converted to
* system specified Hz
*/
- convert-rate = <48000>;
+ simple-audio-card,convert-rate = <48000>;
...
- cpu {
+ simple-audio-card,cpu {
sound-dai = <&rcar_sound>;
};
- codec {
+ simple-audio-card,codec {
...
};
};
@@ -141,23 +141,23 @@ For more detail information, see below
${LINUX}/sound/soc/sh/rcar/ctu.c
- comment of header
-You need to use "renesas,rsrc-card" sound card for it.
+You need to use "simple-scu-audio-card" sound card for it.
example)
sound {
- compatible = "renesas,rsrc-card";
+ compatible = "simple-scu-audio-card";
...
/*
* CTU setting
* All input data will be converted to 2ch
* as output data
*/
- convert-channels = <2>;
+ simple-audio-card,convert-channels = <2>;
...
- cpu {
+ simple-audio-card,cpu {
sound-dai = <&rcar_sound>;
};
- codec {
+ simple-audio-card,codec {
...
};
};
@@ -190,22 +190,22 @@ and these sounds will be merged by MIX.
aplay -D plughw:0,0 xxxx.wav &
aplay -D plughw:0,1 yyyy.wav
-You need to use "renesas,rsrc-card" sound card for it.
+You need to use "simple-scu-audio-card" sound card for it.
Ex)
[MEM] -> [SRC1] -> [CTU02] -+-> [MIX0] -> [DVC0] -> [SSI0]
|
[MEM] -> [SRC2] -> [CTU03] -+
sound {
- compatible = "renesas,rsrc-card";
+ compatible = "simple-scu-audio-card";
...
- cpu@0 {
+ simple-audio-card,cpu@0 {
sound-dai = <&rcar_sound 0>;
};
- cpu@1 {
+ simple-audio-card,cpu@1 {
sound-dai = <&rcar_sound 1>;
};
- codec {
+ simple-audio-card,codec {
...
};
};
@@ -368,6 +368,10 @@ Required properties:
see below for detail.
- #sound-dai-cells : it must be 0 if your system is using single DAI
it must be 1 if your system is using multi DAI
+- clocks : References to SSI/SRC/MIX/CTU/DVC/AUDIO_CLK clocks.
+- clock-names : List of necessary clock names.
+ "ssi-all", "ssi.X", "src.X", "mix.X", "ctu.X",
+ "dvc.X", "clk_a", "clk_b", "clk_c", "clk_i"
Optional properties:
- #clock-cells : it must be 0 if your system has audio_clkout
@@ -375,6 +379,9 @@ Optional properties:
- clock-frequency : for all audio_clkout0/1/2/3
- clkout-lr-asynchronous : boolean property. it indicates that audio_clkoutn
is asynchronizes with lr-clock.
+- resets : References to SSI resets.
+- reset-names : List of valid reset names.
+ "ssi-all", "ssi.X"
SSI subnode properties:
- interrupts : Should contain SSI interrupt for PIO transfer
diff --git a/Documentation/devicetree/bindings/sound/rockchip,pdm.txt b/Documentation/devicetree/bindings/sound/rockchip,pdm.txt
new file mode 100644
index 000000000000..921729de7346
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/rockchip,pdm.txt
@@ -0,0 +1,39 @@
+* Rockchip PDM controller
+
+Required properties:
+
+- compatible: "rockchip,pdm"
+- reg: physical base address of the controller and length of memory mapped
+ region.
+- dmas: DMA specifiers for rx dma. See the DMA client binding,
+ Documentation/devicetree/bindings/dma/dma.txt
+- dma-names: should include "rx".
+- clocks: a list of phandle + clock-specifer pairs, one for each entry in clock-names.
+- clock-names: should contain following:
+ - "pdm_hclk": clock for PDM BUS
+ - "pdm_clk" : clock for PDM controller
+- pinctrl-names: Must contain a "default" entry.
+- pinctrl-N: One property must exist for each entry in
+ pinctrl-names. See ../pinctrl/pinctrl-bindings.txt
+ for details of the property values.
+
+Example for rk3328 PDM controller:
+
+pdm: pdm@ff040000 {
+ compatible = "rockchip,pdm";
+ reg = <0x0 0xff040000 0x0 0x1000>;
+ clocks = <&clk_pdm>, <&clk_gates28 0>;
+ clock-names = "pdm_clk", "pdm_hclk";
+ dmas = <&pdma 16>;
+ #dma-cells = <1>;
+ dma-names = "rx";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&pdmm0_clk
+ &pdmm0_fsync
+ &pdmm0_sdi0
+ &pdmm0_sdi1
+ &pdmm0_sdi2
+ &pdmm0_sdi3>;
+ pinctrl-1 = <&pdmm0_sleep>;
+ status = "disabled";
+};
diff --git a/Documentation/devicetree/bindings/sound/rockchip-spdif.txt b/Documentation/devicetree/bindings/sound/rockchip-spdif.txt
index 11046429a118..4706b96d450b 100644
--- a/Documentation/devicetree/bindings/sound/rockchip-spdif.txt
+++ b/Documentation/devicetree/bindings/sound/rockchip-spdif.txt
@@ -9,7 +9,9 @@ Required properties:
- compatible: should be one of the following:
- "rockchip,rk3066-spdif"
- "rockchip,rk3188-spdif"
+ - "rockchip,rk3228-spdif"
- "rockchip,rk3288-spdif"
+ - "rockchip,rk3328-spdif"
- "rockchip,rk3366-spdif"
- "rockchip,rk3368-spdif"
- "rockchip,rk3399-spdif"
diff --git a/Documentation/devicetree/bindings/sound/samsung,odroid.txt b/Documentation/devicetree/bindings/sound/samsung,odroid.txt
index c1ac70cb0afb..c30934dd975b 100644
--- a/Documentation/devicetree/bindings/sound/samsung,odroid.txt
+++ b/Documentation/devicetree/bindings/sound/samsung,odroid.txt
@@ -5,11 +5,6 @@ Required properties:
- compatible - "samsung,odroidxu3-audio" - for Odroid XU3 board,
"samsung,odroidxu4-audio" - for Odroid XU4 board
- model - the user-visible name of this sound complex
- - 'cpu' subnode with a 'sound-dai' property containing the phandle of the I2S
- controller
- - 'codec' subnode with a 'sound-dai' property containing list of phandles
- to the CODEC nodes, first entry must be corresponding to the MAX98090
- CODEC and the second entry must be the phandle of the HDMI IP block node
- clocks - should contain entries matching clock names in the clock-names
property
- clock-names - should contain following entries:
@@ -32,12 +27,18 @@ Required properties:
For Odroid XU4:
no entries
+Required sub-nodes:
+
+ - 'cpu' subnode with a 'sound-dai' property containing the phandle of the I2S
+ controller
+ - 'codec' subnode with a 'sound-dai' property containing list of phandles
+ to the CODEC nodes, first entry must be corresponding to the MAX98090
+ CODEC and the second entry must be the phandle of the HDMI IP block node
+
Example:
sound {
compatible = "samsung,odroidxu3-audio";
- samsung,cpu-dai = <&i2s0>;
- samsung,codec-dai = <&max98090>;
model = "Odroid-XU3";
samsung,audio-routing =
"Headphone Jack", "HPL",
diff --git a/Documentation/devicetree/bindings/sound/simple-scu-card.txt b/Documentation/devicetree/bindings/sound/simple-scu-card.txt
index d6fe47ed09af..327d229a51b2 100644
--- a/Documentation/devicetree/bindings/sound/simple-scu-card.txt
+++ b/Documentation/devicetree/bindings/sound/simple-scu-card.txt
@@ -1,35 +1,29 @@
-ASoC simple SCU Sound Card
+ASoC Simple SCU Sound Card
-Simple-Card specifies audio DAI connections of SoC <-> codec.
+Simple SCU Sound Card is "Simple Sound Card" + "ALSA DPCM".
+For example, you can use this driver if you want to exchange sampling rate convert,
+Mixing, etc...
Required properties:
- compatible : "simple-scu-audio-card"
"renesas,rsrc-card"
-
Optional properties:
-- simple-audio-card,name : User specified audio sound card name, one string
- property.
-- simple-audio-card,cpu : CPU sub-node
-- simple-audio-card,codec : CODEC sub-node
+- simple-audio-card,name : see simple-audio-card.txt
+- simple-audio-card,cpu : see simple-audio-card.txt
+- simple-audio-card,codec : see simple-audio-card.txt
Optional subnode properties:
-- simple-audio-card,format : CPU/CODEC common audio format.
- "i2s", "right_j", "left_j" , "dsp_a"
- "dsp_b", "ac97", "pdm", "msb", "lsb"
-- simple-audio-card,frame-master : Indicates dai-link frame master.
- phandle to a cpu or codec subnode.
-- simple-audio-card,bitclock-master : Indicates dai-link bit clock master.
- phandle to a cpu or codec subnode.
-- simple-audio-card,bitclock-inversion : bool property. Add this if the
- dai-link uses bit clock inversion.
-- simple-audio-card,frame-inversion : bool property. Add this if the
- dai-link uses frame clock inversion.
+- simple-audio-card,format : see simple-audio-card.txt
+- simple-audio-card,frame-master : see simple-audio-card.txt
+- simple-audio-card,bitclock-master : see simple-audio-card.txt
+- simple-audio-card,bitclock-inversion : see simple-audio-card.txt
+- simple-audio-card,frame-inversion : see simple-audio-card.txt
- simple-audio-card,convert-rate : platform specified sampling rate convert
- simple-audio-card,convert-channels : platform specified converted channel size (2 - 8 ch)
-- simple-audio-card,prefix : see audio-routing
+- simple-audio-card,prefix : see routing
- simple-audio-card,routing : A list of the connections between audio components.
Each entry is a pair of strings, the first being the connection's sink,
the second being the connection's source. Valid names for sources.
@@ -38,32 +32,23 @@ Optional subnode properties:
Required CPU/CODEC subnodes properties:
-- sound-dai : phandle and port of CPU/CODEC
+- sound-dai : see simple-audio-card.txt
Optional CPU/CODEC subnodes properties:
-- clocks / system-clock-frequency : specify subnode's clock if needed.
- it can be specified via "clocks" if system has
- clock node (= common clock), or "system-clock-frequency"
- (if system doens't support common clock)
- If a clock is specified, it is
- enabled with clk_prepare_enable()
- in dai startup() and disabled with
- clk_disable_unprepare() in dai
- shutdown().
+- clocks / system-clock-frequency : see simple-audio-card.txt
-Example 1. Sampling Rate Covert
+Example 1. Sampling Rate Conversion
sound {
compatible = "simple-scu-audio-card";
simple-audio-card,name = "rsnd-ak4643";
simple-audio-card,format = "left_j";
- simple-audio-card,format = "left_j";
simple-audio-card,bitclock-master = <&sndcodec>;
simple-audio-card,frame-master = <&sndcodec>;
- simple-audio-card,convert-rate = <48000>; /* see audio_clk_a */
+ simple-audio-card,convert-rate = <48000>;
simple-audio-card,prefix = "ak4642";
simple-audio-card,routing = "ak4642 Playback", "DAI0 Playback",
@@ -79,20 +64,18 @@ sound {
};
};
-Example 2. 2 CPU 1 Codec
+Example 2. 2 CPU 1 Codec (Mixing)
sound {
- compatible = "renesas,rsrc-card";
-
- card-name = "rsnd-ak4643";
- format = "left_j";
- bitclock-master = <&dpcmcpu>;
- frame-master = <&dpcmcpu>;
+ compatible = "simple-scu-audio-card";
- convert-rate = <48000>; /* see audio_clk_a */
+ simple-audio-card,name = "rsnd-ak4643";
+ simple-audio-card,format = "left_j";
+ simple-audio-card,bitclock-master = <&dpcmcpu>;
+ simple-audio-card,frame-master = <&dpcmcpu>;
- audio-prefix = "ak4642";
- audio-routing = "ak4642 Playback", "DAI0 Playback",
+ simple-audio-card,prefix = "ak4642";
+ simple-audio-card,routing = "ak4642 Playback", "DAI0 Playback",
"ak4642 Playback", "DAI1 Playback";
dpcmcpu: cpu@0 {
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-i2s.txt b/Documentation/devicetree/bindings/sound/st,stm32-i2s.txt
new file mode 100644
index 000000000000..4bda52042402
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/st,stm32-i2s.txt
@@ -0,0 +1,62 @@
+STMicroelectronics STM32 SPI/I2S Controller
+
+The SPI/I2S block supports I2S/PCM protocols when configured on I2S mode.
+Only some SPI instances support I2S.
+
+Required properties:
+ - compatible: Must be "st,stm32h7-i2s"
+ - reg: Offset and length of the device's register set.
+ - interrupts: Must contain the interrupt line id.
+ - clocks: Must contain phandle and clock specifier pairs for each entry
+ in clock-names.
+ - clock-names: Must contain "i2sclk", "pclk", "x8k" and "x11k".
+ "i2sclk": clock which feeds the internal clock generator
+ "pclk": clock which feeds the peripheral bus interface
+ "x8k": I2S parent clock for sampling rates multiple of 8kHz.
+ "x11k": I2S parent clock for sampling rates multiple of 11.025kHz.
+ - dmas: DMA specifiers for tx and rx dma.
+ See Documentation/devicetree/bindings/dma/stm32-dma.txt.
+ - dma-names: Identifier for each DMA request line. Must be "tx" and "rx".
+ - pinctrl-names: should contain only value "default"
+ - pinctrl-0: see Documentation/devicetree/bindings/pinctrl/pinctrl-stm32.txt
+
+Optional properties:
+ - resets: Reference to a reset controller asserting the reset controller
+
+The device node should contain one 'port' child node with one child 'endpoint'
+node, according to the bindings defined in Documentation/devicetree/bindings/
+graph.txt.
+
+Example:
+sound_card {
+ compatible = "audio-graph-card";
+ dais = <&i2s2_port>;
+};
+
+i2s2: audio-controller@40003800 {
+ compatible = "st,stm32h7-i2s";
+ reg = <0x40003800 0x400>;
+ interrupts = <36>;
+ clocks = <&rcc PCLK1>, <&rcc SPI2_CK>, <&rcc PLL1_Q>, <&rcc PLL2_P>;
+ clock-names = "pclk", "i2sclk", "x8k", "x11k";
+ dmas = <&dmamux2 2 39 0x400 0x1>,
+ <&dmamux2 3 40 0x400 0x1>;
+ dma-names = "rx", "tx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2s2>;
+
+ i2s2_port: port@0 {
+ cpu_endpoint: endpoint {
+ remote-endpoint = <&codec_endpoint>;
+ format = "i2s";
+ };
+ };
+};
+
+audio-codec {
+ codec_port: port@0 {
+ codec_endpoint: endpoint {
+ remote-endpoint = <&cpu_endpoint>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-sai.txt b/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
index c59a3d779e06..f1c5ae59e7c9 100644
--- a/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
+++ b/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
@@ -6,7 +6,7 @@ The SAI contains two independent audio sub-blocks. Each sub-block has
its own clock generator and I/O lines controller.
Required properties:
- - compatible: Should be "st,stm32f4-sai"
+ - compatible: Should be "st,stm32f4-sai" or "st,stm32h7-sai"
- reg: Base address and size of SAI common register set.
- clocks: Must contain phandle and clock specifier pairs for each entry
in clock-names.
@@ -36,6 +36,10 @@ SAI subnodes required properties:
- pinctrl-names: should contain only value "default"
- pinctrl-0: see Documentation/devicetree/bindings/pinctrl/pinctrl-stm32.txt
+The device node should contain one 'port' child node with one child 'endpoint'
+node, according to the bindings defined in Documentation/devicetree/bindings/
+graph.txt.
+
Example:
sound_card {
compatible = "audio-graph-card";
@@ -43,38 +47,29 @@ sound_card {
};
sai1: sai1@40015800 {
- compatible = "st,stm32f4-sai";
+ compatible = "st,stm32h7-sai";
#address-cells = <1>;
#size-cells = <1>;
- ranges;
+ ranges = <0 0x40015800 0x400>;
reg = <0x40015800 0x4>;
- clocks = <&rcc 1 CLK_SAIQ_PDIV>, <&rcc 1 CLK_I2SQ_PDIV>;
+ clocks = <&rcc PLL1_Q>, <&rcc PLL2_P>;
clock-names = "x8k", "x11k";
interrupts = <87>;
- sai1b: audio-controller@40015824 {
- #sound-dai-cells = <0>;
- compatible = "st,stm32-sai-sub-b";
- reg = <0x40015824 0x1C>;
- clocks = <&rcc 1 CLK_SAI2>;
+ sai1a: audio-controller@40015804 {
+ compatible = "st,stm32-sai-sub-a";
+ reg = <0x4 0x1C>;
+ clocks = <&rcc SAI1_CK>;
clock-names = "sai_ck";
- dmas = <&dma2 5 0 0x400 0x0>;
+ dmas = <&dmamux1 1 87 0x400 0x0>;
dma-names = "tx";
pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_sai1b>;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
+ pinctrl-0 = <&pinctrl_sai1a>;
- sai1b_port: port@0 {
- reg = <0>;
- cpu_endpoint: endpoint {
- remote-endpoint = <&codec_endpoint>;
- audio-graph-card,format = "i2s";
- audio-graph-card,bitclock-master = <&codec_endpoint>;
- audio-graph-card,frame-master = <&codec_endpoint>;
- };
+ sai1b_port: port {
+ cpu_endpoint: endpoint {
+ remote-endpoint = <&codec_endpoint>;
+ format = "i2s";
};
};
};
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-spdifrx.txt b/Documentation/devicetree/bindings/sound/st,stm32-spdifrx.txt
new file mode 100644
index 000000000000..33826f2459fa
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/st,stm32-spdifrx.txt
@@ -0,0 +1,56 @@
+STMicroelectronics STM32 S/PDIF receiver (SPDIFRX).
+
+The SPDIFRX peripheral, is designed to receive an S/PDIF flow compliant with
+IEC-60958 and IEC-61937.
+
+Required properties:
+ - compatible: should be "st,stm32h7-spdifrx"
+ - reg: cpu DAI IP base address and size
+ - clocks: must contain an entry for kclk (used as S/PDIF signal reference)
+ - clock-names: must contain "kclk"
+ - interrupts: cpu DAI interrupt line
+ - dmas: DMA specifiers for audio data DMA and iec control flow DMA
+ See STM32 DMA bindings, Documentation/devicetree/bindings/dma/stm32-dma.txt
+ - dma-names: two dmas have to be defined, "rx" and "rx-ctrl"
+
+Optional properties:
+ - resets: Reference to a reset controller asserting the SPDIFRX
+
+The device node should contain one 'port' child node with one child 'endpoint'
+node, according to the bindings defined in Documentation/devicetree/bindings/
+graph.txt.
+
+Example:
+spdifrx: spdifrx@40004000 {
+ compatible = "st,stm32h7-spdifrx";
+ reg = <0x40004000 0x400>;
+ clocks = <&rcc SPDIFRX_CK>;
+ clock-names = "kclk";
+ interrupts = <97>;
+ dmas = <&dmamux1 2 93 0x400 0x0>,
+ <&dmamux1 3 94 0x400 0x0>;
+ dma-names = "rx", "rx-ctrl";
+ pinctrl-0 = <&spdifrx_pins>;
+ pinctrl-names = "default";
+
+ spdifrx_port: port {
+ cpu_endpoint: endpoint {
+ remote-endpoint = <&codec_endpoint>;
+ };
+ };
+};
+
+spdif_in: spdif-in {
+ compatible = "linux,spdif-dir";
+
+ codec_port: port {
+ codec_endpoint: endpoint {
+ remote-endpoint = <&cpu_endpoint>;
+ };
+ };
+};
+
+soundcard {
+ compatible = "audio-graph-card";
+ dais = <&spdifrx_port>;
+};
diff --git a/Documentation/devicetree/bindings/sound/sun4i-codec.txt b/Documentation/devicetree/bindings/sound/sun4i-codec.txt
index 3863531d1e6d..2d4e10deb6f4 100644
--- a/Documentation/devicetree/bindings/sound/sun4i-codec.txt
+++ b/Documentation/devicetree/bindings/sound/sun4i-codec.txt
@@ -7,6 +7,7 @@ Required properties:
- "allwinner,sun7i-a20-codec"
- "allwinner,sun8i-a23-codec"
- "allwinner,sun8i-h3-codec"
+ - "allwinner,sun8i-v3s-codec"
- reg: must contain the registers location and length
- interrupts: must contain the codec interrupt
- dmas: DMA channels for tx and rx dma. See the DMA client binding,
@@ -25,6 +26,7 @@ Required properties for the following compatibles:
- "allwinner,sun6i-a31-codec"
- "allwinner,sun8i-a23-codec"
- "allwinner,sun8i-h3-codec"
+ - "allwinner,sun8i-v3s-codec"
- resets: phandle to the reset control for this device
- allwinner,audio-routing: A list of the connections between audio components.
Each entry is a pair of strings, the first being the
@@ -34,15 +36,15 @@ Required properties for the following compatibles:
Audio pins on the SoC:
"HP"
"HPCOM"
- "LINEIN"
- "LINEOUT" (not on sun8i-a23)
+ "LINEIN" (not on sun8i-v3s)
+ "LINEOUT" (not on sun8i-a23 or sun8i-v3s)
"MIC1"
- "MIC2"
+ "MIC2" (not on sun8i-v3s)
"MIC3" (sun6i-a31 only)
Microphone biases from the SoC:
"HBIAS"
- "MBIAS"
+ "MBIAS" (not on sun8i-v3s)
Board connectors:
"Headphone"
@@ -55,6 +57,7 @@ Required properties for the following compatibles:
Required properties for the following compatibles:
- "allwinner,sun8i-a23-codec"
- "allwinner,sun8i-h3-codec"
+ - "allwinner,sun8i-v3s-codec"
- allwinner,codec-analog-controls: A phandle to the codec analog controls
block in the PRCM.
diff --git a/Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt b/Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt
index 779b735781ba..1b6e7c4e50ab 100644
--- a/Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt
+++ b/Documentation/devicetree/bindings/sound/sun8i-codec-analog.txt
@@ -4,6 +4,7 @@ Required properties:
- compatible: must be one of the following compatibles:
- "allwinner,sun8i-a23-codec-analog"
- "allwinner,sun8i-h3-codec-analog"
+ - "allwinner,sun8i-v3s-codec-analog"
Required properties if not a sub-node of the PRCM node:
- reg: must contain the registers location and length
diff --git a/Documentation/devicetree/bindings/sound/zte,zx-aud96p22.txt b/Documentation/devicetree/bindings/sound/zte,zx-aud96p22.txt
new file mode 100644
index 000000000000..41bb1040eb71
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/zte,zx-aud96p22.txt
@@ -0,0 +1,24 @@
+ZTE ZX AUD96P22 Audio Codec
+
+Required properties:
+ - compatible: Must be "zte,zx-aud96p22"
+ - #sound-dai-cells: Should be 0
+ - reg: I2C bus slave address of AUD96P22
+
+Example:
+
+ i2c0: i2c@1486000 {
+ compatible = "zte,zx296718-i2c";
+ reg = <0x01486000 0x1000>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&audiocrm AUDIO_I2C0_WCLK>;
+ clock-frequency = <1600000>;
+
+ aud96p22: codec@22 {
+ compatible = "zte,zx-aud96p22";
+ #sound-dai-cells = <0>;
+ reg = <0x22>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/thermal/brcm,ns-thermal b/Documentation/devicetree/bindings/thermal/brcm,ns-thermal.txt
index 68e047170039..68e047170039 100644
--- a/Documentation/devicetree/bindings/thermal/brcm,ns-thermal
+++ b/Documentation/devicetree/bindings/thermal/brcm,ns-thermal.txt
diff --git a/Documentation/devicetree/bindings/usb/exynos-usb.txt b/Documentation/devicetree/bindings/usb/exynos-usb.txt
index 9b4dbe3b2acc..78ebebb66dad 100644
--- a/Documentation/devicetree/bindings/usb/exynos-usb.txt
+++ b/Documentation/devicetree/bindings/usb/exynos-usb.txt
@@ -92,6 +92,8 @@ Required properties:
parent's address space
- clocks: Clock IDs array as required by the controller.
- clock-names: names of clocks correseponding to IDs in the clock property
+ - vdd10-supply: 1.0V powr supply
+ - vdd33-supply: 3.0V/3.3V power supply
Sub-nodes:
The dwc3 core should be added as subnode to Exynos dwc3 glue.
@@ -107,6 +109,8 @@ Example:
#address-cells = <1>;
#size-cells = <1>;
ranges;
+ vdd10-supply = <&ldo11_reg>;
+ vdd33-supply = <&ldo9_reg>;
dwc3 {
compatible = "synopsys,dwc3";
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 286f4e1e111f..52cfd5f97b6b 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -29,6 +29,7 @@ andestech Andes Technology Corporation
apm Applied Micro Circuits Corporation (APM)
aptina Aptina Imaging
arasan Arasan Chip Systems
+arctic Arctic Sand
aries Aries Embedded GmbH
arm ARM Ltd.
armadeus ARMadeus Systems SARL
@@ -45,6 +46,7 @@ avia avia semiconductor
avic Shanghai AVIC Optoelectronics Co., Ltd.
axentia Axentia Technologies AB
axis Axis Communications AB
+bananapi BIPAI KEJI LIMITED
boe BOE Technology Group Co., Ltd.
bosch Bosch Sensortec GmbH
boundary Boundary Devices Inc.
@@ -159,6 +161,8 @@ iom Iomega Corporation
isee ISEE 2007 S.L.
isil Intersil
issi Integrated Silicon Solutions Inc.
+itead ITEAD Intelligent Systems Co.Ltd
+iwave iWave Systems Technologies Pvt. Ltd.
jdi Japan Display Inc.
jedec JEDEC Solid State Technology Association
karo Ka-Ro electronics GmbH
@@ -177,6 +181,7 @@ lg LG Corporation
libretech Shenzhen Libre Technology Co., Ltd
licheepi Lichee Pi
linaro Linaro Limited
+linksys Belkin International, Inc. (Linksys)
linux Linux-specific binding
lltc Linear Technology Corporation
lsi LSI Corp. (LSI Logic)
@@ -269,8 +274,10 @@ renesas Renesas Electronics Corporation
richtek Richtek Technology Corporation
ricoh Ricoh Co. Ltd.
rikomagic Rikomagic Tech Corp. Ltd
+riscv RISC-V Foundation
rockchip Fuzhou Rockchip Electronics Co., Ltd
rohm ROHM Semiconductor Co., Ltd
+roofull Shenzhen Roofull Technology Co, Ltd
samsung Samsung Semiconductor
samtec Samtec/Softing company
sandisk Sandisk Corporation
diff --git a/Documentation/devicetree/booting-without-of.txt b/Documentation/devicetree/booting-without-of.txt
index 280d283304bb..fb740445199f 100644
--- a/Documentation/devicetree/booting-without-of.txt
+++ b/Documentation/devicetree/booting-without-of.txt
@@ -1413,7 +1413,7 @@ Optional property:
from DMA operations originating from the bus. It provides a means of
defining a mapping or translation between the physical address space of
the bus and the physical address space of the parent of the bus.
- (for more information see ePAPR specification)
+ (for more information see the Devicetree Specification)
* DMA Bus child
Optional property:
diff --git a/Documentation/devicetree/overlay-notes.txt b/Documentation/devicetree/overlay-notes.txt
index d418a6ce9812..eb7f2685fda1 100644
--- a/Documentation/devicetree/overlay-notes.txt
+++ b/Documentation/devicetree/overlay-notes.txt
@@ -3,8 +3,7 @@ Device Tree Overlay Notes
This document describes the implementation of the in-kernel
device tree overlay functionality residing in drivers/of/overlay.c and is a
-companion document to Documentation/devicetree/dt-object-internal.txt[1] &
-Documentation/devicetree/dynamic-resolution-notes.txt[2]
+companion document to Documentation/devicetree/dynamic-resolution-notes.txt[1]
How overlays work
-----------------
@@ -16,8 +15,7 @@ Since the kernel mainly deals with devices, any new device node that result
in an active device should have it created while if the device node is either
disabled or removed all together, the affected device should be deregistered.
-Lets take an example where we have a foo board with the following base tree
-which is taken from [1].
+Lets take an example where we have a foo board with the following base tree:
---- foo.dts -----------------------------------------------------------------
/* FOO platform */
@@ -36,7 +34,7 @@ which is taken from [1].
};
---- foo.dts -----------------------------------------------------------------
-The overlay bar.dts, when loaded (and resolved as described in [2]) should
+The overlay bar.dts, when loaded (and resolved as described in [1]) should
---- bar.dts -----------------------------------------------------------------
/plugin/; /* allow undefined label references and record them */
diff --git a/Documentation/devicetree/usage-model.txt b/Documentation/devicetree/usage-model.txt
index 2b6b3d3f0388..33a8aaac02a8 100644
--- a/Documentation/devicetree/usage-model.txt
+++ b/Documentation/devicetree/usage-model.txt
@@ -387,7 +387,7 @@ static void __init harmony_init_machine(void)
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
-"simple-bus" is defined in the ePAPR 1.0 specification as a property
+"simple-bus" is defined in the Devicetree Specification as a property
meaning a simple memory mapped bus, so the of_platform_populate() code
could be written to just assume simple-bus compatible nodes will
always be traversed. However, we pass it in as an argument so that
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index 3cf1acebc4ee..7c94ab50afed 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -43,6 +43,7 @@ available subsections can be seen below.
80211/index
uio-howto
firmware/index
+ pinctl
misc_devices
.. only:: subproject and html
diff --git a/Documentation/pinctrl.txt b/Documentation/driver-api/pinctl.rst
index f2af35f6d6b2..48f15b4f9d3e 100644
--- a/Documentation/pinctrl.txt
+++ b/Documentation/driver-api/pinctl.rst
@@ -1,4 +1,7 @@
+===============================
PINCTRL (PIN CONTROL) subsystem
+===============================
+
This document outlines the pin control subsystem in Linux
This subsystem deals with:
@@ -33,7 +36,7 @@ When a PIN CONTROLLER is instantiated, it will register a descriptor to the
pin control framework, and this descriptor contains an array of pin descriptors
describing the pins handled by this specific pin controller.
-Here is an example of a PGA (Pin Grid Array) chip seen from underneath:
+Here is an example of a PGA (Pin Grid Array) chip seen from underneath::
A B C D E F G H
@@ -54,39 +57,40 @@ Here is an example of a PGA (Pin Grid Array) chip seen from underneath:
1 o o o o o o o o
To register a pin controller and name all the pins on this package we can do
-this in our driver:
-
-#include <linux/pinctrl/pinctrl.h>
-
-const struct pinctrl_pin_desc foo_pins[] = {
- PINCTRL_PIN(0, "A8"),
- PINCTRL_PIN(1, "B8"),
- PINCTRL_PIN(2, "C8"),
- ...
- PINCTRL_PIN(61, "F1"),
- PINCTRL_PIN(62, "G1"),
- PINCTRL_PIN(63, "H1"),
-};
+this in our driver::
-static struct pinctrl_desc foo_desc = {
- .name = "foo",
- .pins = foo_pins,
- .npins = ARRAY_SIZE(foo_pins),
- .owner = THIS_MODULE,
-};
+ #include <linux/pinctrl/pinctrl.h>
-int __init foo_probe(void)
-{
- int error;
+ const struct pinctrl_pin_desc foo_pins[] = {
+ PINCTRL_PIN(0, "A8"),
+ PINCTRL_PIN(1, "B8"),
+ PINCTRL_PIN(2, "C8"),
+ ...
+ PINCTRL_PIN(61, "F1"),
+ PINCTRL_PIN(62, "G1"),
+ PINCTRL_PIN(63, "H1"),
+ };
+
+ static struct pinctrl_desc foo_desc = {
+ .name = "foo",
+ .pins = foo_pins,
+ .npins = ARRAY_SIZE(foo_pins),
+ .owner = THIS_MODULE,
+ };
+
+ int __init foo_probe(void)
+ {
+ int error;
- struct pinctrl_dev *pctl;
+ struct pinctrl_dev *pctl;
- error = pinctrl_register_and_init(&foo_desc, <PARENT>, NULL, &pctl);
- if (error)
- return error;
+ error = pinctrl_register_and_init(&foo_desc, <PARENT>,
+ NULL, &pctl);
+ if (error)
+ return error;
- return pinctrl_enable(pctl);
-}
+ return pinctrl_enable(pctl);
+ }
To enable the pinctrl subsystem and the subgroups for PINMUX and PINCONF and
selected drivers, you need to select them from your machine's Kconfig entry,
@@ -105,7 +109,7 @@ the pin controller.
For a padring with 467 pads, as opposed to actual pins, I used an enumeration
like this, walking around the edge of the chip, which seems to be industry
-standard too (all these pads had names, too):
+standard too (all these pads had names, too)::
0 ..... 104
@@ -128,64 +132,64 @@ on { 0, 8, 16, 24 }, and a group of pins dealing with an I2C interface on pins
on { 24, 25 }.
These two groups are presented to the pin control subsystem by implementing
-some generic pinctrl_ops like this:
-
-#include <linux/pinctrl/pinctrl.h>
-
-struct foo_group {
- const char *name;
- const unsigned int *pins;
- const unsigned num_pins;
-};
-
-static const unsigned int spi0_pins[] = { 0, 8, 16, 24 };
-static const unsigned int i2c0_pins[] = { 24, 25 };
-
-static const struct foo_group foo_groups[] = {
- {
- .name = "spi0_grp",
- .pins = spi0_pins,
- .num_pins = ARRAY_SIZE(spi0_pins),
- },
+some generic pinctrl_ops like this::
+
+ #include <linux/pinctrl/pinctrl.h>
+
+ struct foo_group {
+ const char *name;
+ const unsigned int *pins;
+ const unsigned num_pins;
+ };
+
+ static const unsigned int spi0_pins[] = { 0, 8, 16, 24 };
+ static const unsigned int i2c0_pins[] = { 24, 25 };
+
+ static const struct foo_group foo_groups[] = {
+ {
+ .name = "spi0_grp",
+ .pins = spi0_pins,
+ .num_pins = ARRAY_SIZE(spi0_pins),
+ },
+ {
+ .name = "i2c0_grp",
+ .pins = i2c0_pins,
+ .num_pins = ARRAY_SIZE(i2c0_pins),
+ },
+ };
+
+
+ static int foo_get_groups_count(struct pinctrl_dev *pctldev)
{
- .name = "i2c0_grp",
- .pins = i2c0_pins,
- .num_pins = ARRAY_SIZE(i2c0_pins),
- },
-};
-
-
-static int foo_get_groups_count(struct pinctrl_dev *pctldev)
-{
- return ARRAY_SIZE(foo_groups);
-}
+ return ARRAY_SIZE(foo_groups);
+ }
-static const char *foo_get_group_name(struct pinctrl_dev *pctldev,
- unsigned selector)
-{
- return foo_groups[selector].name;
-}
+ static const char *foo_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+ {
+ return foo_groups[selector].name;
+ }
-static int foo_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
- const unsigned **pins,
- unsigned *num_pins)
-{
- *pins = (unsigned *) foo_groups[selector].pins;
- *num_pins = foo_groups[selector].num_pins;
- return 0;
-}
+ static int foo_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
+ const unsigned **pins,
+ unsigned *num_pins)
+ {
+ *pins = (unsigned *) foo_groups[selector].pins;
+ *num_pins = foo_groups[selector].num_pins;
+ return 0;
+ }
-static struct pinctrl_ops foo_pctrl_ops = {
- .get_groups_count = foo_get_groups_count,
- .get_group_name = foo_get_group_name,
- .get_group_pins = foo_get_group_pins,
-};
+ static struct pinctrl_ops foo_pctrl_ops = {
+ .get_groups_count = foo_get_groups_count,
+ .get_group_name = foo_get_group_name,
+ .get_group_pins = foo_get_group_pins,
+ };
-static struct pinctrl_desc foo_desc = {
- ...
- .pctlops = &foo_pctrl_ops,
-};
+ static struct pinctrl_desc foo_desc = {
+ ...
+ .pctlops = &foo_pctrl_ops,
+ };
The pin control subsystem will call the .get_groups_count() function to
determine the total number of legal selectors, then it will call the other functions
@@ -213,62 +217,62 @@ The format and meaning of the configuration parameter, PLATFORM_X_PULL_UP
above, is entirely defined by the pin controller driver.
The pin configuration driver implements callbacks for changing pin
-configuration in the pin controller ops like this:
+configuration in the pin controller ops like this::
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinconf.h>
-#include "platform_x_pindefs.h"
+ #include <linux/pinctrl/pinctrl.h>
+ #include <linux/pinctrl/pinconf.h>
+ #include "platform_x_pindefs.h"
-static int foo_pin_config_get(struct pinctrl_dev *pctldev,
- unsigned offset,
- unsigned long *config)
-{
- struct my_conftype conf;
+ static int foo_pin_config_get(struct pinctrl_dev *pctldev,
+ unsigned offset,
+ unsigned long *config)
+ {
+ struct my_conftype conf;
- ... Find setting for pin @ offset ...
+ ... Find setting for pin @ offset ...
- *config = (unsigned long) conf;
-}
+ *config = (unsigned long) conf;
+ }
-static int foo_pin_config_set(struct pinctrl_dev *pctldev,
- unsigned offset,
- unsigned long config)
-{
- struct my_conftype *conf = (struct my_conftype *) config;
+ static int foo_pin_config_set(struct pinctrl_dev *pctldev,
+ unsigned offset,
+ unsigned long config)
+ {
+ struct my_conftype *conf = (struct my_conftype *) config;
- switch (conf) {
- case PLATFORM_X_PULL_UP:
- ...
+ switch (conf) {
+ case PLATFORM_X_PULL_UP:
+ ...
+ }
}
}
-}
-static int foo_pin_config_group_get (struct pinctrl_dev *pctldev,
- unsigned selector,
- unsigned long *config)
-{
- ...
-}
+ static int foo_pin_config_group_get (struct pinctrl_dev *pctldev,
+ unsigned selector,
+ unsigned long *config)
+ {
+ ...
+ }
-static int foo_pin_config_group_set (struct pinctrl_dev *pctldev,
- unsigned selector,
- unsigned long config)
-{
- ...
-}
+ static int foo_pin_config_group_set (struct pinctrl_dev *pctldev,
+ unsigned selector,
+ unsigned long config)
+ {
+ ...
+ }
-static struct pinconf_ops foo_pconf_ops = {
- .pin_config_get = foo_pin_config_get,
- .pin_config_set = foo_pin_config_set,
- .pin_config_group_get = foo_pin_config_group_get,
- .pin_config_group_set = foo_pin_config_group_set,
-};
+ static struct pinconf_ops foo_pconf_ops = {
+ .pin_config_get = foo_pin_config_get,
+ .pin_config_set = foo_pin_config_set,
+ .pin_config_group_get = foo_pin_config_group_get,
+ .pin_config_group_set = foo_pin_config_group_set,
+ };
-/* Pin config operations are handled by some pin controller */
-static struct pinctrl_desc foo_desc = {
- ...
- .confops = &foo_pconf_ops,
-};
+ /* Pin config operations are handled by some pin controller */
+ static struct pinctrl_desc foo_desc = {
+ ...
+ .confops = &foo_pconf_ops,
+ };
Since some controllers have special logic for handling entire groups of pins
they can exploit the special whole-group pin control function. The
@@ -296,35 +300,35 @@ controller handles control of a certain GPIO pin. Since a single pin controller
may be muxing several GPIO ranges (typically SoCs that have one set of pins,
but internally several GPIO silicon blocks, each modelled as a struct
gpio_chip) any number of GPIO ranges can be added to a pin controller instance
-like this:
-
-struct gpio_chip chip_a;
-struct gpio_chip chip_b;
-
-static struct pinctrl_gpio_range gpio_range_a = {
- .name = "chip a",
- .id = 0,
- .base = 32,
- .pin_base = 32,
- .npins = 16,
- .gc = &chip_a;
-};
-
-static struct pinctrl_gpio_range gpio_range_b = {
- .name = "chip b",
- .id = 0,
- .base = 48,
- .pin_base = 64,
- .npins = 8,
- .gc = &chip_b;
-};
-
-{
- struct pinctrl_dev *pctl;
- ...
- pinctrl_add_gpio_range(pctl, &gpio_range_a);
- pinctrl_add_gpio_range(pctl, &gpio_range_b);
-}
+like this::
+
+ struct gpio_chip chip_a;
+ struct gpio_chip chip_b;
+
+ static struct pinctrl_gpio_range gpio_range_a = {
+ .name = "chip a",
+ .id = 0,
+ .base = 32,
+ .pin_base = 32,
+ .npins = 16,
+ .gc = &chip_a;
+ };
+
+ static struct pinctrl_gpio_range gpio_range_b = {
+ .name = "chip b",
+ .id = 0,
+ .base = 48,
+ .pin_base = 64,
+ .npins = 8,
+ .gc = &chip_b;
+ };
+
+ {
+ struct pinctrl_dev *pctl;
+ ...
+ pinctrl_add_gpio_range(pctl, &gpio_range_a);
+ pinctrl_add_gpio_range(pctl, &gpio_range_b);
+ }
So this complex system has one pin controller handling two different
GPIO chips. "chip a" has 16 pins and "chip b" has 8 pins. The "chip a" and
@@ -348,25 +352,26 @@ chip b:
The above examples assume the mapping between the GPIOs and pins is
linear. If the mapping is sparse or haphazard, an array of arbitrary pin
-numbers can be encoded in the range like this:
+numbers can be encoded in the range like this::
-static const unsigned range_pins[] = { 14, 1, 22, 17, 10, 8, 6, 2 };
+ static const unsigned range_pins[] = { 14, 1, 22, 17, 10, 8, 6, 2 };
-static struct pinctrl_gpio_range gpio_range = {
- .name = "chip",
- .id = 0,
- .base = 32,
- .pins = &range_pins,
- .npins = ARRAY_SIZE(range_pins),
- .gc = &chip;
-};
+ static struct pinctrl_gpio_range gpio_range = {
+ .name = "chip",
+ .id = 0,
+ .base = 32,
+ .pins = &range_pins,
+ .npins = ARRAY_SIZE(range_pins),
+ .gc = &chip;
+ };
In this case the pin_base property will be ignored. If the name of a pin
group is known, the pins and npins elements of the above structure can be
initialised using the function pinctrl_get_group_pins(), e.g. for pin
-group "foo":
+group "foo"::
-pinctrl_get_group_pins(pctl, "foo", &gpio_range.pins, &gpio_range.npins);
+ pinctrl_get_group_pins(pctl, "foo", &gpio_range.pins,
+ &gpio_range.npins);
When GPIO-specific functions in the pin control subsystem are called, these
ranges will be used to look up the appropriate pin controller by inspecting
@@ -405,7 +410,7 @@ we usually mean a way of soldering or wiring the package into an electronic
system, even though the framework makes it possible to also change the function
at runtime.
-Here is an example of a PGA (Pin Grid Array) chip seen from underneath:
+Here is an example of a PGA (Pin Grid Array) chip seen from underneath::
A B C D E F G H
+---+
@@ -519,12 +524,12 @@ Definitions:
In the example case we can define that this particular machine shall
use device spi0 with pinmux function fspi0 group gspi0 and i2c0 on function
fi2c0 group gi2c0, on the primary pin controller, we get mappings
- like these:
+ like these::
- {
- {"map-spi0", spi0, pinctrl0, fspi0, gspi0},
- {"map-i2c0", i2c0, pinctrl0, fi2c0, gi2c0}
- }
+ {
+ {"map-spi0", spi0, pinctrl0, fspi0, gspi0},
+ {"map-i2c0", i2c0, pinctrl0, fi2c0, gi2c0}
+ }
Every map must be assigned a state name, pin controller, device and
function. The group is not compulsory - if it is omitted the first group
@@ -578,155 +583,155 @@ some certain registers to activate a certain mux setting for a certain pin.
A simple driver for the above example will work by setting bits 0, 1, 2, 3 or 4
into some register named MUX to select a certain function with a certain
-group of pins would work something like this:
-
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinmux.h>
-
-struct foo_group {
- const char *name;
- const unsigned int *pins;
- const unsigned num_pins;
-};
-
-static const unsigned spi0_0_pins[] = { 0, 8, 16, 24 };
-static const unsigned spi0_1_pins[] = { 38, 46, 54, 62 };
-static const unsigned i2c0_pins[] = { 24, 25 };
-static const unsigned mmc0_1_pins[] = { 56, 57 };
-static const unsigned mmc0_2_pins[] = { 58, 59 };
-static const unsigned mmc0_3_pins[] = { 60, 61, 62, 63 };
-
-static const struct foo_group foo_groups[] = {
- {
- .name = "spi0_0_grp",
- .pins = spi0_0_pins,
- .num_pins = ARRAY_SIZE(spi0_0_pins),
- },
+group of pins would work something like this::
+
+ #include <linux/pinctrl/pinctrl.h>
+ #include <linux/pinctrl/pinmux.h>
+
+ struct foo_group {
+ const char *name;
+ const unsigned int *pins;
+ const unsigned num_pins;
+ };
+
+ static const unsigned spi0_0_pins[] = { 0, 8, 16, 24 };
+ static const unsigned spi0_1_pins[] = { 38, 46, 54, 62 };
+ static const unsigned i2c0_pins[] = { 24, 25 };
+ static const unsigned mmc0_1_pins[] = { 56, 57 };
+ static const unsigned mmc0_2_pins[] = { 58, 59 };
+ static const unsigned mmc0_3_pins[] = { 60, 61, 62, 63 };
+
+ static const struct foo_group foo_groups[] = {
+ {
+ .name = "spi0_0_grp",
+ .pins = spi0_0_pins,
+ .num_pins = ARRAY_SIZE(spi0_0_pins),
+ },
+ {
+ .name = "spi0_1_grp",
+ .pins = spi0_1_pins,
+ .num_pins = ARRAY_SIZE(spi0_1_pins),
+ },
+ {
+ .name = "i2c0_grp",
+ .pins = i2c0_pins,
+ .num_pins = ARRAY_SIZE(i2c0_pins),
+ },
+ {
+ .name = "mmc0_1_grp",
+ .pins = mmc0_1_pins,
+ .num_pins = ARRAY_SIZE(mmc0_1_pins),
+ },
+ {
+ .name = "mmc0_2_grp",
+ .pins = mmc0_2_pins,
+ .num_pins = ARRAY_SIZE(mmc0_2_pins),
+ },
+ {
+ .name = "mmc0_3_grp",
+ .pins = mmc0_3_pins,
+ .num_pins = ARRAY_SIZE(mmc0_3_pins),
+ },
+ };
+
+
+ static int foo_get_groups_count(struct pinctrl_dev *pctldev)
{
- .name = "spi0_1_grp",
- .pins = spi0_1_pins,
- .num_pins = ARRAY_SIZE(spi0_1_pins),
- },
- {
- .name = "i2c0_grp",
- .pins = i2c0_pins,
- .num_pins = ARRAY_SIZE(i2c0_pins),
- },
+ return ARRAY_SIZE(foo_groups);
+ }
+
+ static const char *foo_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
{
- .name = "mmc0_1_grp",
- .pins = mmc0_1_pins,
- .num_pins = ARRAY_SIZE(mmc0_1_pins),
- },
+ return foo_groups[selector].name;
+ }
+
+ static int foo_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
+ unsigned ** const pins,
+ unsigned * const num_pins)
{
- .name = "mmc0_2_grp",
- .pins = mmc0_2_pins,
- .num_pins = ARRAY_SIZE(mmc0_2_pins),
- },
+ *pins = (unsigned *) foo_groups[selector].pins;
+ *num_pins = foo_groups[selector].num_pins;
+ return 0;
+ }
+
+ static struct pinctrl_ops foo_pctrl_ops = {
+ .get_groups_count = foo_get_groups_count,
+ .get_group_name = foo_get_group_name,
+ .get_group_pins = foo_get_group_pins,
+ };
+
+ struct foo_pmx_func {
+ const char *name;
+ const char * const *groups;
+ const unsigned num_groups;
+ };
+
+ static const char * const spi0_groups[] = { "spi0_0_grp", "spi0_1_grp" };
+ static const char * const i2c0_groups[] = { "i2c0_grp" };
+ static const char * const mmc0_groups[] = { "mmc0_1_grp", "mmc0_2_grp",
+ "mmc0_3_grp" };
+
+ static const struct foo_pmx_func foo_functions[] = {
+ {
+ .name = "spi0",
+ .groups = spi0_groups,
+ .num_groups = ARRAY_SIZE(spi0_groups),
+ },
+ {
+ .name = "i2c0",
+ .groups = i2c0_groups,
+ .num_groups = ARRAY_SIZE(i2c0_groups),
+ },
+ {
+ .name = "mmc0",
+ .groups = mmc0_groups,
+ .num_groups = ARRAY_SIZE(mmc0_groups),
+ },
+ };
+
+ static int foo_get_functions_count(struct pinctrl_dev *pctldev)
{
- .name = "mmc0_3_grp",
- .pins = mmc0_3_pins,
- .num_pins = ARRAY_SIZE(mmc0_3_pins),
- },
-};
-
-
-static int foo_get_groups_count(struct pinctrl_dev *pctldev)
-{
- return ARRAY_SIZE(foo_groups);
-}
-
-static const char *foo_get_group_name(struct pinctrl_dev *pctldev,
- unsigned selector)
-{
- return foo_groups[selector].name;
-}
-
-static int foo_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned ** const pins,
- unsigned * const num_pins)
-{
- *pins = (unsigned *) foo_groups[selector].pins;
- *num_pins = foo_groups[selector].num_pins;
- return 0;
-}
-
-static struct pinctrl_ops foo_pctrl_ops = {
- .get_groups_count = foo_get_groups_count,
- .get_group_name = foo_get_group_name,
- .get_group_pins = foo_get_group_pins,
-};
-
-struct foo_pmx_func {
- const char *name;
- const char * const *groups;
- const unsigned num_groups;
-};
-
-static const char * const spi0_groups[] = { "spi0_0_grp", "spi0_1_grp" };
-static const char * const i2c0_groups[] = { "i2c0_grp" };
-static const char * const mmc0_groups[] = { "mmc0_1_grp", "mmc0_2_grp",
- "mmc0_3_grp" };
-
-static const struct foo_pmx_func foo_functions[] = {
+ return ARRAY_SIZE(foo_functions);
+ }
+
+ static const char *foo_get_fname(struct pinctrl_dev *pctldev, unsigned selector)
{
- .name = "spi0",
- .groups = spi0_groups,
- .num_groups = ARRAY_SIZE(spi0_groups),
- },
+ return foo_functions[selector].name;
+ }
+
+ static int foo_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
{
- .name = "i2c0",
- .groups = i2c0_groups,
- .num_groups = ARRAY_SIZE(i2c0_groups),
- },
+ *groups = foo_functions[selector].groups;
+ *num_groups = foo_functions[selector].num_groups;
+ return 0;
+ }
+
+ static int foo_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
+ unsigned group)
{
- .name = "mmc0",
- .groups = mmc0_groups,
- .num_groups = ARRAY_SIZE(mmc0_groups),
- },
-};
-
-static int foo_get_functions_count(struct pinctrl_dev *pctldev)
-{
- return ARRAY_SIZE(foo_functions);
-}
-
-static const char *foo_get_fname(struct pinctrl_dev *pctldev, unsigned selector)
-{
- return foo_functions[selector].name;
-}
-
-static int foo_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
- const char * const **groups,
- unsigned * const num_groups)
-{
- *groups = foo_functions[selector].groups;
- *num_groups = foo_functions[selector].num_groups;
- return 0;
-}
-
-static int foo_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned group)
-{
- u8 regbit = (1 << selector + group);
-
- writeb((readb(MUX)|regbit), MUX)
- return 0;
-}
-
-static struct pinmux_ops foo_pmxops = {
- .get_functions_count = foo_get_functions_count,
- .get_function_name = foo_get_fname,
- .get_function_groups = foo_get_groups,
- .set_mux = foo_set_mux,
- .strict = true,
-};
-
-/* Pinmux operations are handled by some pin controller */
-static struct pinctrl_desc foo_desc = {
- ...
- .pctlops = &foo_pctrl_ops,
- .pmxops = &foo_pmxops,
-};
+ u8 regbit = (1 << selector + group);
+
+ writeb((readb(MUX)|regbit), MUX)
+ return 0;
+ }
+
+ static struct pinmux_ops foo_pmxops = {
+ .get_functions_count = foo_get_functions_count,
+ .get_function_name = foo_get_fname,
+ .get_function_groups = foo_get_groups,
+ .set_mux = foo_set_mux,
+ .strict = true,
+ };
+
+ /* Pinmux operations are handled by some pin controller */
+ static struct pinctrl_desc foo_desc = {
+ ...
+ .pctlops = &foo_pctrl_ops,
+ .pmxops = &foo_pmxops,
+ };
In the example activating muxing 0 and 1 at the same time setting bits
0 and 1, uses one pin in common so they would collide.
@@ -809,9 +814,9 @@ for a device.
The GPIO portions of a pin and its relation to a certain pin controller
configuration and muxing logic can be constructed in several ways. Here
-are two examples:
+are two examples::
-(A)
+ (A)
pin config
logic regs
| +- SPI
@@ -840,7 +845,9 @@ simultaneous access to the same pin from GPIO and pin multiplexing
consumers on hardware of this type. The pinctrl driver should set this flag
accordingly.
-(B)
+::
+
+ (B)
pin config
logic regs
@@ -911,52 +918,55 @@ has to be handled by the <linux/gpio.h> interface. Instead view this as
a certain pin config setting. Look in e.g. <linux/pinctrl/pinconf-generic.h>
and you find this in the documentation:
- PIN_CONFIG_OUTPUT: this will configure the pin in output, use argument
+ PIN_CONFIG_OUTPUT:
+ this will configure the pin in output, use argument
1 to indicate high level, argument 0 to indicate low level.
So it is perfectly possible to push a pin into "GPIO mode" and drive the
line low as part of the usual pin control map. So for example your UART
-driver may look like this:
+driver may look like this::
-#include <linux/pinctrl/consumer.h>
+ #include <linux/pinctrl/consumer.h>
-struct pinctrl *pinctrl;
-struct pinctrl_state *pins_default;
-struct pinctrl_state *pins_sleep;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_sleep;
-pins_default = pinctrl_lookup_state(uap->pinctrl, PINCTRL_STATE_DEFAULT);
-pins_sleep = pinctrl_lookup_state(uap->pinctrl, PINCTRL_STATE_SLEEP);
+ pins_default = pinctrl_lookup_state(uap->pinctrl, PINCTRL_STATE_DEFAULT);
+ pins_sleep = pinctrl_lookup_state(uap->pinctrl, PINCTRL_STATE_SLEEP);
-/* Normal mode */
-retval = pinctrl_select_state(pinctrl, pins_default);
-/* Sleep mode */
-retval = pinctrl_select_state(pinctrl, pins_sleep);
+ /* Normal mode */
+ retval = pinctrl_select_state(pinctrl, pins_default);
+ /* Sleep mode */
+ retval = pinctrl_select_state(pinctrl, pins_sleep);
And your machine configuration may look like this:
--------------------------------------------------
-static unsigned long uart_default_mode[] = {
- PIN_CONF_PACKED(PIN_CONFIG_DRIVE_PUSH_PULL, 0),
-};
-
-static unsigned long uart_sleep_mode[] = {
- PIN_CONF_PACKED(PIN_CONFIG_OUTPUT, 0),
-};
-
-static struct pinctrl_map pinmap[] __initdata = {
- PIN_MAP_MUX_GROUP("uart", PINCTRL_STATE_DEFAULT, "pinctrl-foo",
- "u0_group", "u0"),
- PIN_MAP_CONFIGS_PIN("uart", PINCTRL_STATE_DEFAULT, "pinctrl-foo",
- "UART_TX_PIN", uart_default_mode),
- PIN_MAP_MUX_GROUP("uart", PINCTRL_STATE_SLEEP, "pinctrl-foo",
- "u0_group", "gpio-mode"),
- PIN_MAP_CONFIGS_PIN("uart", PINCTRL_STATE_SLEEP, "pinctrl-foo",
- "UART_TX_PIN", uart_sleep_mode),
-};
-
-foo_init(void) {
- pinctrl_register_mappings(pinmap, ARRAY_SIZE(pinmap));
-}
+::
+
+ static unsigned long uart_default_mode[] = {
+ PIN_CONF_PACKED(PIN_CONFIG_DRIVE_PUSH_PULL, 0),
+ };
+
+ static unsigned long uart_sleep_mode[] = {
+ PIN_CONF_PACKED(PIN_CONFIG_OUTPUT, 0),
+ };
+
+ static struct pinctrl_map pinmap[] __initdata = {
+ PIN_MAP_MUX_GROUP("uart", PINCTRL_STATE_DEFAULT, "pinctrl-foo",
+ "u0_group", "u0"),
+ PIN_MAP_CONFIGS_PIN("uart", PINCTRL_STATE_DEFAULT, "pinctrl-foo",
+ "UART_TX_PIN", uart_default_mode),
+ PIN_MAP_MUX_GROUP("uart", PINCTRL_STATE_SLEEP, "pinctrl-foo",
+ "u0_group", "gpio-mode"),
+ PIN_MAP_CONFIGS_PIN("uart", PINCTRL_STATE_SLEEP, "pinctrl-foo",
+ "UART_TX_PIN", uart_sleep_mode),
+ };
+
+ foo_init(void) {
+ pinctrl_register_mappings(pinmap, ARRAY_SIZE(pinmap));
+ }
Here the pins we want to control are in the "u0_group" and there is some
function called "u0" that can be enabled on this group of pins, and then
@@ -985,7 +995,7 @@ API.
Board/machine configuration
-==================================
+===========================
Boards and machines define how a certain complete running system is put
together, including how GPIOs and devices are muxed, how regulators are
@@ -994,33 +1004,33 @@ part of this.
A pin controller configuration for a machine looks pretty much like a simple
regulator configuration, so for the example array above we want to enable i2c
-and spi on the second function mapping:
-
-#include <linux/pinctrl/machine.h>
-
-static const struct pinctrl_map mapping[] __initconst = {
- {
- .dev_name = "foo-spi.0",
- .name = PINCTRL_STATE_DEFAULT,
- .type = PIN_MAP_TYPE_MUX_GROUP,
- .ctrl_dev_name = "pinctrl-foo",
- .data.mux.function = "spi0",
- },
- {
- .dev_name = "foo-i2c.0",
- .name = PINCTRL_STATE_DEFAULT,
- .type = PIN_MAP_TYPE_MUX_GROUP,
- .ctrl_dev_name = "pinctrl-foo",
- .data.mux.function = "i2c0",
- },
- {
- .dev_name = "foo-mmc.0",
- .name = PINCTRL_STATE_DEFAULT,
- .type = PIN_MAP_TYPE_MUX_GROUP,
- .ctrl_dev_name = "pinctrl-foo",
- .data.mux.function = "mmc0",
- },
-};
+and spi on the second function mapping::
+
+ #include <linux/pinctrl/machine.h>
+
+ static const struct pinctrl_map mapping[] __initconst = {
+ {
+ .dev_name = "foo-spi.0",
+ .name = PINCTRL_STATE_DEFAULT,
+ .type = PIN_MAP_TYPE_MUX_GROUP,
+ .ctrl_dev_name = "pinctrl-foo",
+ .data.mux.function = "spi0",
+ },
+ {
+ .dev_name = "foo-i2c.0",
+ .name = PINCTRL_STATE_DEFAULT,
+ .type = PIN_MAP_TYPE_MUX_GROUP,
+ .ctrl_dev_name = "pinctrl-foo",
+ .data.mux.function = "i2c0",
+ },
+ {
+ .dev_name = "foo-mmc.0",
+ .name = PINCTRL_STATE_DEFAULT,
+ .type = PIN_MAP_TYPE_MUX_GROUP,
+ .ctrl_dev_name = "pinctrl-foo",
+ .data.mux.function = "mmc0",
+ },
+ };
The dev_name here matches to the unique device name that can be used to look
up the device struct (just like with clockdev or regulators). The function name
@@ -1029,76 +1039,81 @@ must match a function provided by the pinmux driver handling this pin range.
As you can see we may have several pin controllers on the system and thus
we need to specify which one of them contains the functions we wish to map.
-You register this pinmux mapping to the pinmux subsystem by simply:
+You register this pinmux mapping to the pinmux subsystem by simply::
ret = pinctrl_register_mappings(mapping, ARRAY_SIZE(mapping));
Since the above construct is pretty common there is a helper macro to make
it even more compact which assumes you want to use pinctrl-foo and position
-0 for mapping, for example:
+0 for mapping, for example::
-static struct pinctrl_map mapping[] __initdata = {
- PIN_MAP_MUX_GROUP("foo-i2c.o", PINCTRL_STATE_DEFAULT, "pinctrl-foo", NULL, "i2c0"),
-};
+ static struct pinctrl_map mapping[] __initdata = {
+ PIN_MAP_MUX_GROUP("foo-i2c.o", PINCTRL_STATE_DEFAULT,
+ "pinctrl-foo", NULL, "i2c0"),
+ };
The mapping table may also contain pin configuration entries. It's common for
each pin/group to have a number of configuration entries that affect it, so
the table entries for configuration reference an array of config parameters
-and values. An example using the convenience macros is shown below:
-
-static unsigned long i2c_grp_configs[] = {
- FOO_PIN_DRIVEN,
- FOO_PIN_PULLUP,
-};
-
-static unsigned long i2c_pin_configs[] = {
- FOO_OPEN_COLLECTOR,
- FOO_SLEW_RATE_SLOW,
-};
-
-static struct pinctrl_map mapping[] __initdata = {
- PIN_MAP_MUX_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0", "i2c0"),
- PIN_MAP_CONFIGS_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0", i2c_grp_configs),
- PIN_MAP_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0scl", i2c_pin_configs),
- PIN_MAP_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0sda", i2c_pin_configs),
-};
+and values. An example using the convenience macros is shown below::
+
+ static unsigned long i2c_grp_configs[] = {
+ FOO_PIN_DRIVEN,
+ FOO_PIN_PULLUP,
+ };
+
+ static unsigned long i2c_pin_configs[] = {
+ FOO_OPEN_COLLECTOR,
+ FOO_SLEW_RATE_SLOW,
+ };
+
+ static struct pinctrl_map mapping[] __initdata = {
+ PIN_MAP_MUX_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT,
+ "pinctrl-foo", "i2c0", "i2c0"),
+ PIN_MAP_CONFIGS_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT,
+ "pinctrl-foo", "i2c0", i2c_grp_configs),
+ PIN_MAP_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT,
+ "pinctrl-foo", "i2c0scl", i2c_pin_configs),
+ PIN_MAP_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT,
+ "pinctrl-foo", "i2c0sda", i2c_pin_configs),
+ };
Finally, some devices expect the mapping table to contain certain specific
named states. When running on hardware that doesn't need any pin controller
configuration, the mapping table must still contain those named states, in
order to explicitly indicate that the states were provided and intended to
be empty. Table entry macro PIN_MAP_DUMMY_STATE serves the purpose of defining
-a named state without causing any pin controller to be programmed:
+a named state without causing any pin controller to be programmed::
-static struct pinctrl_map mapping[] __initdata = {
- PIN_MAP_DUMMY_STATE("foo-i2c.0", PINCTRL_STATE_DEFAULT),
-};
+ static struct pinctrl_map mapping[] __initdata = {
+ PIN_MAP_DUMMY_STATE("foo-i2c.0", PINCTRL_STATE_DEFAULT),
+ };
Complex mappings
================
As it is possible to map a function to different groups of pins an optional
-.group can be specified like this:
-
-...
-{
- .dev_name = "foo-spi.0",
- .name = "spi0-pos-A",
- .type = PIN_MAP_TYPE_MUX_GROUP,
- .ctrl_dev_name = "pinctrl-foo",
- .function = "spi0",
- .group = "spi0_0_grp",
-},
-{
- .dev_name = "foo-spi.0",
- .name = "spi0-pos-B",
- .type = PIN_MAP_TYPE_MUX_GROUP,
- .ctrl_dev_name = "pinctrl-foo",
- .function = "spi0",
- .group = "spi0_1_grp",
-},
-...
+.group can be specified like this::
+
+ ...
+ {
+ .dev_name = "foo-spi.0",
+ .name = "spi0-pos-A",
+ .type = PIN_MAP_TYPE_MUX_GROUP,
+ .ctrl_dev_name = "pinctrl-foo",
+ .function = "spi0",
+ .group = "spi0_0_grp",
+ },
+ {
+ .dev_name = "foo-spi.0",
+ .name = "spi0-pos-B",
+ .type = PIN_MAP_TYPE_MUX_GROUP,
+ .ctrl_dev_name = "pinctrl-foo",
+ .function = "spi0",
+ .group = "spi0_1_grp",
+ },
+ ...
This example mapping is used to switch between two positions for spi0 at
runtime, as described further below under the heading "Runtime pinmuxing".
@@ -1107,67 +1122,67 @@ Further it is possible for one named state to affect the muxing of several
groups of pins, say for example in the mmc0 example above, where you can
additively expand the mmc0 bus from 2 to 4 to 8 pins. If we want to use all
three groups for a total of 2+2+4 = 8 pins (for an 8-bit MMC bus as is the
-case), we define a mapping like this:
-
-...
-{
- .dev_name = "foo-mmc.0",
- .name = "2bit"
- .type = PIN_MAP_TYPE_MUX_GROUP,
- .ctrl_dev_name = "pinctrl-foo",
- .function = "mmc0",
- .group = "mmc0_1_grp",
-},
-{
- .dev_name = "foo-mmc.0",
- .name = "4bit"
- .type = PIN_MAP_TYPE_MUX_GROUP,
- .ctrl_dev_name = "pinctrl-foo",
- .function = "mmc0",
- .group = "mmc0_1_grp",
-},
-{
- .dev_name = "foo-mmc.0",
- .name = "4bit"
- .type = PIN_MAP_TYPE_MUX_GROUP,
- .ctrl_dev_name = "pinctrl-foo",
- .function = "mmc0",
- .group = "mmc0_2_grp",
-},
-{
- .dev_name = "foo-mmc.0",
- .name = "8bit"
- .type = PIN_MAP_TYPE_MUX_GROUP,
- .ctrl_dev_name = "pinctrl-foo",
- .function = "mmc0",
- .group = "mmc0_1_grp",
-},
-{
- .dev_name = "foo-mmc.0",
- .name = "8bit"
- .type = PIN_MAP_TYPE_MUX_GROUP,
- .ctrl_dev_name = "pinctrl-foo",
- .function = "mmc0",
- .group = "mmc0_2_grp",
-},
-{
- .dev_name = "foo-mmc.0",
- .name = "8bit"
- .type = PIN_MAP_TYPE_MUX_GROUP,
- .ctrl_dev_name = "pinctrl-foo",
- .function = "mmc0",
- .group = "mmc0_3_grp",
-},
-...
+case), we define a mapping like this::
+
+ ...
+ {
+ .dev_name = "foo-mmc.0",
+ .name = "2bit"
+ .type = PIN_MAP_TYPE_MUX_GROUP,
+ .ctrl_dev_name = "pinctrl-foo",
+ .function = "mmc0",
+ .group = "mmc0_1_grp",
+ },
+ {
+ .dev_name = "foo-mmc.0",
+ .name = "4bit"
+ .type = PIN_MAP_TYPE_MUX_GROUP,
+ .ctrl_dev_name = "pinctrl-foo",
+ .function = "mmc0",
+ .group = "mmc0_1_grp",
+ },
+ {
+ .dev_name = "foo-mmc.0",
+ .name = "4bit"
+ .type = PIN_MAP_TYPE_MUX_GROUP,
+ .ctrl_dev_name = "pinctrl-foo",
+ .function = "mmc0",
+ .group = "mmc0_2_grp",
+ },
+ {
+ .dev_name = "foo-mmc.0",
+ .name = "8bit"
+ .type = PIN_MAP_TYPE_MUX_GROUP,
+ .ctrl_dev_name = "pinctrl-foo",
+ .function = "mmc0",
+ .group = "mmc0_1_grp",
+ },
+ {
+ .dev_name = "foo-mmc.0",
+ .name = "8bit"
+ .type = PIN_MAP_TYPE_MUX_GROUP,
+ .ctrl_dev_name = "pinctrl-foo",
+ .function = "mmc0",
+ .group = "mmc0_2_grp",
+ },
+ {
+ .dev_name = "foo-mmc.0",
+ .name = "8bit"
+ .type = PIN_MAP_TYPE_MUX_GROUP,
+ .ctrl_dev_name = "pinctrl-foo",
+ .function = "mmc0",
+ .group = "mmc0_3_grp",
+ },
+ ...
The result of grabbing this mapping from the device with something like
-this (see next paragraph):
+this (see next paragraph)::
p = devm_pinctrl_get(dev);
s = pinctrl_lookup_state(p, "8bit");
ret = pinctrl_select_state(p, s);
-or more simply:
+or more simply::
p = devm_pinctrl_get_select(dev, "8bit");
@@ -1205,39 +1220,39 @@ PINCTRL_STATE_SLEEP at runtime, re-biasing or even re-muxing pins to save
current in sleep mode.
A driver may request a certain control state to be activated, usually just the
-default state like this:
+default state like this::
-#include <linux/pinctrl/consumer.h>
+ #include <linux/pinctrl/consumer.h>
-struct foo_state {
- struct pinctrl *p;
- struct pinctrl_state *s;
- ...
-};
+ struct foo_state {
+ struct pinctrl *p;
+ struct pinctrl_state *s;
+ ...
+ };
-foo_probe()
-{
- /* Allocate a state holder named "foo" etc */
- struct foo_state *foo = ...;
+ foo_probe()
+ {
+ /* Allocate a state holder named "foo" etc */
+ struct foo_state *foo = ...;
- foo->p = devm_pinctrl_get(&device);
- if (IS_ERR(foo->p)) {
- /* FIXME: clean up "foo" here */
- return PTR_ERR(foo->p);
- }
+ foo->p = devm_pinctrl_get(&device);
+ if (IS_ERR(foo->p)) {
+ /* FIXME: clean up "foo" here */
+ return PTR_ERR(foo->p);
+ }
- foo->s = pinctrl_lookup_state(foo->p, PINCTRL_STATE_DEFAULT);
- if (IS_ERR(foo->s)) {
- /* FIXME: clean up "foo" here */
- return PTR_ERR(s);
- }
+ foo->s = pinctrl_lookup_state(foo->p, PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(foo->s)) {
+ /* FIXME: clean up "foo" here */
+ return PTR_ERR(s);
+ }
- ret = pinctrl_select_state(foo->s);
- if (ret < 0) {
- /* FIXME: clean up "foo" here */
- return ret;
+ ret = pinctrl_select_state(foo->s);
+ if (ret < 0) {
+ /* FIXME: clean up "foo" here */
+ return ret;
+ }
}
-}
This get/lookup/select/put sequence can just as well be handled by bus drivers
if you don't want each and every driver to handle it and you know the
@@ -1299,16 +1314,16 @@ Drivers needing both pin control and GPIOs
Again, it is discouraged to let drivers lookup and select pin control states
themselves, but again sometimes this is unavoidable.
-So say that your driver is fetching its resources like this:
+So say that your driver is fetching its resources like this::
-#include <linux/pinctrl/consumer.h>
-#include <linux/gpio.h>
+ #include <linux/pinctrl/consumer.h>
+ #include <linux/gpio.h>
-struct pinctrl *pinctrl;
-int gpio;
+ struct pinctrl *pinctrl;
+ int gpio;
-pinctrl = devm_pinctrl_get_select_default(&dev);
-gpio = devm_gpio_request(&dev, 14, "foo");
+ pinctrl = devm_pinctrl_get_select_default(&dev);
+ gpio = devm_gpio_request(&dev, 14, "foo");
Here we first request a certain pin state and then request GPIO 14 to be
used. If you're using the subsystems orthogonally like this, you should
@@ -1347,21 +1362,22 @@ lookup_state() and select_state() on it immediately after the pin control
device has been registered.
This occurs for mapping table entries where the client device name is equal
-to the pin controller device name, and the state name is PINCTRL_STATE_DEFAULT.
+to the pin controller device name, and the state name is PINCTRL_STATE_DEFAULT::
-{
- .dev_name = "pinctrl-foo",
- .name = PINCTRL_STATE_DEFAULT,
- .type = PIN_MAP_TYPE_MUX_GROUP,
- .ctrl_dev_name = "pinctrl-foo",
- .function = "power_func",
-},
+ {
+ .dev_name = "pinctrl-foo",
+ .name = PINCTRL_STATE_DEFAULT,
+ .type = PIN_MAP_TYPE_MUX_GROUP,
+ .ctrl_dev_name = "pinctrl-foo",
+ .function = "power_func",
+ },
Since it may be common to request the core to hog a few always-applicable
mux settings on the primary pin controller, there is a convenience macro for
-this:
+this::
-PIN_MAP_MUX_GROUP_HOG_DEFAULT("pinctrl-foo", NULL /* group */, "power_func")
+ PIN_MAP_MUX_GROUP_HOG_DEFAULT("pinctrl-foo", NULL /* group */,
+ "power_func")
This gives the exact same result as the above construction.
@@ -1378,45 +1394,45 @@ function, but with different named in the mapping as described under
This snippet first initializes a state object for both groups (in foo_probe()),
then muxes the function in the pins defined by group A, and finally muxes it in
-on the pins defined by group B:
+on the pins defined by group B::
-#include <linux/pinctrl/consumer.h>
+ #include <linux/pinctrl/consumer.h>
-struct pinctrl *p;
-struct pinctrl_state *s1, *s2;
+ struct pinctrl *p;
+ struct pinctrl_state *s1, *s2;
-foo_probe()
-{
- /* Setup */
- p = devm_pinctrl_get(&device);
- if (IS_ERR(p))
- ...
+ foo_probe()
+ {
+ /* Setup */
+ p = devm_pinctrl_get(&device);
+ if (IS_ERR(p))
+ ...
+
+ s1 = pinctrl_lookup_state(foo->p, "pos-A");
+ if (IS_ERR(s1))
+ ...
+
+ s2 = pinctrl_lookup_state(foo->p, "pos-B");
+ if (IS_ERR(s2))
+ ...
+ }
- s1 = pinctrl_lookup_state(foo->p, "pos-A");
- if (IS_ERR(s1))
+ foo_switch()
+ {
+ /* Enable on position A */
+ ret = pinctrl_select_state(s1);
+ if (ret < 0)
...
- s2 = pinctrl_lookup_state(foo->p, "pos-B");
- if (IS_ERR(s2))
...
-}
-
-foo_switch()
-{
- /* Enable on position A */
- ret = pinctrl_select_state(s1);
- if (ret < 0)
- ...
-
- ...
- /* Enable on position B */
- ret = pinctrl_select_state(s2);
- if (ret < 0)
- ...
+ /* Enable on position B */
+ ret = pinctrl_select_state(s2);
+ if (ret < 0)
+ ...
- ...
-}
+ ...
+ }
The above has to be done from process context. The reservation of the pins
will be done when the state is activated, so in effect one specific pin
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index 2d132fcea0f8..30e04f7a690d 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -240,10 +240,9 @@ CLOCK
DMA
dmam_alloc_coherent()
- dmam_alloc_noncoherent()
+ dmam_alloc_attrs()
dmam_declare_coherent_memory()
dmam_free_coherent()
- dmam_free_noncoherent()
dmam_pool_create()
dmam_pool_destroy()
@@ -349,6 +348,7 @@ PER-CPU MEM
devm_free_percpu()
PCI
+ devm_pci_alloc_host_bridge() : managed PCI host bridge allocation
devm_pci_remap_cfgspace() : ioremap PCI configuration space
devm_pci_remap_cfg_resource() : ioremap PCI configuration space resource
pcim_enable_device() : after success, all PCI ops become managed
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index f42b90687d40..48c9faa73a76 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -576,7 +576,43 @@ should clear PG_Dirty and set PG_Writeback. It can be actually
written at any point after PG_Dirty is clear. Once it is known to be
safe, PG_Writeback is cleared.
-Writeback makes use of a writeback_control structure...
+Writeback makes use of a writeback_control structure to direct the
+operations. This gives the the writepage and writepages operations some
+information about the nature of and reason for the writeback request,
+and the constraints under which it is being done. It is also used to
+return information back to the caller about the result of a writepage or
+writepages request.
+
+Handling errors during writeback
+--------------------------------
+Most applications that do buffered I/O will periodically call a file
+synchronization call (fsync, fdatasync, msync or sync_file_range) to
+ensure that data written has made it to the backing store. When there
+is an error during writeback, they expect that error to be reported when
+a file sync request is made. After an error has been reported on one
+request, subsequent requests on the same file descriptor should return
+0, unless further writeback errors have occurred since the previous file
+syncronization.
+
+Ideally, the kernel would report errors only on file descriptions on
+which writes were done that subsequently failed to be written back. The
+generic pagecache infrastructure does not track the file descriptions
+that have dirtied each individual page however, so determining which
+file descriptors should get back an error is not possible.
+
+Instead, the generic writeback error tracking infrastructure in the
+kernel settles for reporting errors to fsync on all file descriptions
+that were open at the time that the error occurred. In a situation with
+multiple writers, all of them will get back an error on a subsequent fsync,
+even if all of the writes done through that particular file descriptor
+succeeded (or even if there were no writes on that file descriptor at all).
+
+Filesystems that wish to use this infrastructure should call
+mapping_set_error to record the error in the address_space when it
+occurs. Then, after writing back data from the pagecache in their
+file->fsync operation, they should call file_check_and_advance_wb_err to
+ensure that the struct file's error cursor has advanced to the correct
+point in the stream of errors emitted by the backing device(s).
struct address_space_operations
-------------------------------
@@ -804,7 +840,8 @@ struct address_space_operations {
The File Object
===============
-A file object represents a file opened by a process.
+A file object represents a file opened by a process. This is also known
+as an "open file description" in POSIX parlance.
struct file_operations
@@ -887,7 +924,8 @@ otherwise noted.
release: called when the last reference to an open file is closed
- fsync: called by the fsync(2) system call
+ fsync: called by the fsync(2) system call. Also see the section above
+ entitled "Handling errors during writeback".
fasync: called by the fcntl(2) system call when asynchronous
(non-blocking) mode is enabled for a file
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 1e9fcb4d0ec8..3e3fdae5f3ed 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -326,7 +326,7 @@ Code Seq#(hex) Include File Comments
0xB5 00-0F uapi/linux/rpmsg.h <mailto:[email protected]>
0xC0 00-0F linux/usb/iowarrior.h
0xCA 00-0F uapi/misc/cxl.h
-0xCA 80-8F uapi/scsi/cxlflash_ioctl.h
+0xCA 80-BF uapi/scsi/cxlflash_ioctl.h
0xCB 00-1F CBM serial IEC bus in development:
0xCD 01 linux/reiserfs_fs.h
diff --git a/Documentation/kbuild/kbuild.txt b/Documentation/kbuild/kbuild.txt
index 0ff6a466a05b..ac2363ea05c5 100644
--- a/Documentation/kbuild/kbuild.txt
+++ b/Documentation/kbuild/kbuild.txt
@@ -236,5 +236,9 @@ Files specified with KBUILD_VMLINUX_INIT are linked first.
KBUILD_VMLINUX_MAIN
--------------------------------------------------
All object files for the main part of vmlinux.
-KBUILD_VMLINUX_INIT and KBUILD_VMLINUX_MAIN together specify
-all the object files used to link vmlinux.
+
+KBUILD_VMLINUX_LIBS
+--------------------------------------------------
+All .a "lib" files for vmlinux.
+KBUILD_VMLINUX_INIT, KBUILD_VMLINUX_MAIN, and KBUILD_VMLINUX_LIBS together
+specify all the object files used to link vmlinux.
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index 659afd56ecdb..7003141a6d4f 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -45,10 +45,9 @@ This document describes the Linux kernel Makefiles.
=== 7 Kbuild syntax for exported headers
--- 7.1 no-export-headers
- --- 7.2 genhdr-y
- --- 7.3 generic-y
- --- 7.4 generated-y
- --- 7.5 mandatory-y
+ --- 7.2 generic-y
+ --- 7.3 generated-y
+ --- 7.4 mandatory-y
=== 8 Kbuild Variables
=== 9 Makefile language
@@ -487,22 +486,6 @@ more details, with real examples.
respectively.
Note: cc-option-yn uses KBUILD_CFLAGS for $(CC) options
- cc-option-align
- gcc versions >= 3.0 changed the type of options used to specify
- alignment of functions, loops etc. $(cc-option-align), when used
- as prefix to the align options, will select the right prefix:
- gcc < 3.00
- cc-option-align = -malign
- gcc >= 3.00
- cc-option-align = -falign
-
- Example:
- KBUILD_CFLAGS += $(cc-option-align)-functions=4
-
- In the above example, the option -falign-functions=4 is used for
- gcc >= 3.00. For gcc < 3.00, -malign-functions=4 is used.
- Note: cc-option-align uses KBUILD_CFLAGS for $(CC) options
-
cc-disable-warning
cc-disable-warning checks if gcc supports a given warning and returns
the commandline switch to disable it. This special function is needed,
@@ -1277,18 +1260,7 @@ See subsequent chapter for the syntax of the Kbuild file.
avoid exporting specific headers (e.g. kvm.h) on architectures that do
not support it. It should be avoided as much as possible.
- --- 7.2 genhdr-y
-
- genhdr-y specifies asm files to be generated.
-
- Example:
- #arch/x86/include/uapi/asm/Kbuild
- genhdr-y += unistd_32.h
- genhdr-y += unistd_64.h
- genhdr-y += unistd_x32.h
-
-
- --- 7.3 generic-y
+ --- 7.2 generic-y
If an architecture uses a verbatim copy of a header from
include/asm-generic then this is listed in the file
@@ -1315,11 +1287,10 @@ See subsequent chapter for the syntax of the Kbuild file.
Example: termios.h
#include <asm-generic/termios.h>
- --- 7.4 generated-y
+ --- 7.3 generated-y
If an architecture generates other header files alongside generic-y
- wrappers, and not included in genhdr-y, then generated-y specifies
- them.
+ wrappers, generated-y specifies them.
This prevents them being treated as stale asm-generic wrappers and
removed.
diff --git a/Documentation/media/kapi/cec-core.rst b/Documentation/media/kapi/cec-core.rst
index 7a04c5386dc8..8a65c69ed071 100644
--- a/Documentation/media/kapi/cec-core.rst
+++ b/Documentation/media/kapi/cec-core.rst
@@ -194,6 +194,11 @@ When a transmit finished (successfully or otherwise):
void cec_transmit_done(struct cec_adapter *adap, u8 status, u8 arb_lost_cnt,
u8 nack_cnt, u8 low_drive_cnt, u8 error_cnt);
+or:
+
+.. c:function::
+ void cec_transmit_attempt_done(struct cec_adapter *adap, u8 status);
+
The status can be one of:
CEC_TX_STATUS_OK:
@@ -231,6 +236,11 @@ to 1, if the hardware does support retry then either set these counters to
0 if the hardware provides no feedback of which errors occurred and how many
times, or fill in the correct values as reported by the hardware.
+The cec_transmit_attempt_done() function is a helper for cases where the
+hardware never retries, so the transmit is always for just a single
+attempt. It will call cec_transmit_done() in turn, filling in 1 for the
+count argument corresponding to the status. Or all 0 if the status was OK.
+
When a CEC message was received:
.. c:function::
@@ -307,6 +317,14 @@ to another valid physical address, then this function will first set the
address to CEC_PHYS_ADDR_INVALID before enabling the new physical address.
.. c:function::
+ void cec_s_phys_addr_from_edid(struct cec_adapter *adap,
+ const struct edid *edid);
+
+A helper function that extracts the physical address from the edid struct
+and calls cec_s_phys_addr() with that address, or CEC_PHYS_ADDR_INVALID
+if the EDID did not contain a physical address or edid was a NULL pointer.
+
+.. c:function::
int cec_s_log_addrs(struct cec_adapter *adap,
struct cec_log_addrs *log_addrs, bool block);
diff --git a/Documentation/media/kapi/v4l2-core.rst b/Documentation/media/kapi/v4l2-core.rst
index d8f6c46d26d5..c7434f38fd9c 100644
--- a/Documentation/media/kapi/v4l2-core.rst
+++ b/Documentation/media/kapi/v4l2-core.rst
@@ -19,7 +19,7 @@ Video4Linux devices
v4l2-mc
v4l2-mediabus
v4l2-mem2mem
- v4l2-of
+ v4l2-fwnode
v4l2-rect
v4l2-tuner
v4l2-common
diff --git a/Documentation/media/kapi/v4l2-fwnode.rst b/Documentation/media/kapi/v4l2-fwnode.rst
new file mode 100644
index 000000000000..6c8bccdfeb25
--- /dev/null
+++ b/Documentation/media/kapi/v4l2-fwnode.rst
@@ -0,0 +1,3 @@
+V4L2 fwnode kAPI
+^^^^^^^^^^^^^^^^
+.. kernel-doc:: include/media/v4l2-fwnode.h
diff --git a/Documentation/media/kapi/v4l2-of.rst b/Documentation/media/kapi/v4l2-of.rst
deleted file mode 100644
index 1ddf76b00944..000000000000
--- a/Documentation/media/kapi/v4l2-of.rst
+++ /dev/null
@@ -1,3 +0,0 @@
-V4L2 Open Firmware kAPI
-^^^^^^^^^^^^^^^^^^^^^^^
-.. kernel-doc:: include/media/v4l2-of.h
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
index a0e961f11017..6d7bf7bef3eb 100644
--- a/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
@@ -113,6 +113,14 @@ returns the information to the application. The ioctl never fails.
- 0x00000020
- The CEC hardware can monitor all messages, not just directed and
broadcast messages.
+ * .. _`CEC-CAP-NEEDS-HPD`:
+
+ - ``CEC_CAP_NEEDS_HPD``
+ - 0x00000040
+ - The CEC hardware is only active if the HDMI Hotplug Detect pin is
+ high. This makes it impossible to use CEC to wake up displays that
+ set the HPD pin low when in standby mode, but keep the CEC bus
+ alive.
diff --git a/Documentation/media/uapi/dvb/fe-diseqc-send-burst.rst b/Documentation/media/uapi/dvb/fe-diseqc-send-burst.rst
index 26272f2860bc..e962f6ec5aaf 100644
--- a/Documentation/media/uapi/dvb/fe-diseqc-send-burst.rst
+++ b/Documentation/media/uapi/dvb/fe-diseqc-send-burst.rst
@@ -15,7 +15,7 @@ FE_DISEQC_SEND_BURST - Sends a 22KHz tone burst for 2x1 mini DiSEqC satellite se
Synopsis
========
-.. c:function:: int ioctl( int fd, FE_DISEQC_SEND_BURST, enum fe_sec_mini_cmd *tone )
+.. c:function:: int ioctl( int fd, FE_DISEQC_SEND_BURST, enum fe_sec_mini_cmd tone )
:name: FE_DISEQC_SEND_BURST
@@ -26,7 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <frontend_f_open>`.
``tone``
- pointer to enum :c:type:`fe_sec_mini_cmd`
+ an integer enumered value described at :c:type:`fe_sec_mini_cmd`
Description
diff --git a/Documentation/media/uapi/dvb/fe-set-tone.rst b/Documentation/media/uapi/dvb/fe-set-tone.rst
index bea193234cb4..84e4da3fd4c9 100644
--- a/Documentation/media/uapi/dvb/fe-set-tone.rst
+++ b/Documentation/media/uapi/dvb/fe-set-tone.rst
@@ -15,7 +15,7 @@ FE_SET_TONE - Sets/resets the generation of the continuous 22kHz tone.
Synopsis
========
-.. c:function:: int ioctl( int fd, FE_SET_TONE, enum fe_sec_tone_mode *tone )
+.. c:function:: int ioctl( int fd, FE_SET_TONE, enum fe_sec_tone_mode tone )
:name: FE_SET_TONE
@@ -26,7 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <frontend_f_open>`.
``tone``
- pointer to enum :c:type:`fe_sec_tone_mode`
+ an integer enumered value described at :c:type:`fe_sec_tone_mode`
Description
diff --git a/Documentation/media/uapi/dvb/fe-set-voltage.rst b/Documentation/media/uapi/dvb/fe-set-voltage.rst
index fcf6f38ef18e..052f316bb4a3 100644
--- a/Documentation/media/uapi/dvb/fe-set-voltage.rst
+++ b/Documentation/media/uapi/dvb/fe-set-voltage.rst
@@ -15,7 +15,7 @@ FE_SET_VOLTAGE - Allow setting the DC level sent to the antenna subsystem.
Synopsis
========
-.. c:function:: int ioctl( int fd, FE_SET_VOLTAGE, enum fe_sec_voltage *voltage )
+.. c:function:: int ioctl( int fd, FE_SET_VOLTAGE, enum fe_sec_voltage voltage )
:name: FE_SET_VOLTAGE
@@ -26,10 +26,7 @@ Arguments
File descriptor returned by :ref:`open() <frontend_f_open>`.
``voltage``
- pointer to enum :c:type:`fe_sec_voltage`
-
- Valid values are described at enum
- :c:type:`fe_sec_voltage`.
+ an integer enumered value described at :c:type:`fe_sec_voltage`
Description
diff --git a/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst b/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst
index 48c9531f4db0..add8281494f8 100644
--- a/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst
+++ b/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst
@@ -241,7 +241,7 @@ desired arrays with the media graph elements.
.. c:type:: media_v2_intf_devnode
-.. flat-table:: struct media_v2_interface
+.. flat-table:: struct media_v2_intf_devnode
:header-rows: 0
:stub-columns: 0
:widths: 1 2 8
@@ -312,7 +312,7 @@ desired arrays with the media graph elements.
.. c:type:: media_v2_link
-.. flat-table:: struct media_v2_pad
+.. flat-table:: struct media_v2_link
:header-rows: 0
:stub-columns: 0
:widths: 1 2 8
@@ -324,7 +324,7 @@ desired arrays with the media graph elements.
- ``id``
- - Unique ID for the pad.
+ - Unique ID for the link.
- .. row 2
@@ -334,7 +334,7 @@ desired arrays with the media graph elements.
- On pad to pad links: unique ID for the source pad.
- On interface to entity links: unique ID for the interface.
+ On interface to entity links: unique ID for the entity.
- .. row 3
diff --git a/Documentation/media/uapi/mediactl/media-types.rst b/Documentation/media/uapi/mediactl/media-types.rst
index 2a5164aea2b4..71078565d644 100644
--- a/Documentation/media/uapi/mediactl/media-types.rst
+++ b/Documentation/media/uapi/mediactl/media-types.rst
@@ -299,6 +299,27 @@ Types and flags used to represent the media graph elements
received on its sink pad and outputs the statistics data on
its source pad.
+ - .. row 29
+
+ .. _MEDIA-ENT-F-VID-MUX:
+
+ - ``MEDIA_ENT_F_VID_MUX``
+
+ - Video multiplexer. An entity capable of multiplexing must have at
+ least two sink pads and one source pad, and must pass the video
+ frame(s) received from the active sink pad to the source pad.
+
+ - .. row 30
+
+ .. _MEDIA-ENT-F-VID-IF-BRIDGE:
+
+ - ``MEDIA_ENT_F_VID_IF_BRIDGE``
+
+ - Video interface bridge. A video interface bridge entity must have at
+ least one sink pad and at least one source pad. It receives video
+ frames on its sink pad from an input video bus of one type (HDMI, eDP,
+ MIPI CSI-2, ...), and outputs them on its source pad to an output
+ video bus of another type (eDP, MIPI CSI-2, parallel, ...).
.. tabularcolumns:: |p{5.5cm}|p{12.0cm}|
diff --git a/Documentation/media/uapi/v4l/control.rst b/Documentation/media/uapi/v4l/control.rst
index 51112badb804..c1e6adbe83d7 100644
--- a/Documentation/media/uapi/v4l/control.rst
+++ b/Documentation/media/uapi/v4l/control.rst
@@ -137,6 +137,12 @@ Control IDs
``V4L2_CID_GAIN`` ``(integer)``
Gain control.
+ Primarily used to control gain on e.g. TV tuners but also on
+ webcams. Most devices control only digital gain with this control
+ but on some this could include analogue gain as well. Devices that
+ recognise the difference between digital and analogue gain use
+ controls ``V4L2_CID_DIGITAL_GAIN`` and ``V4L2_CID_ANALOGUE_GAIN``.
+
``V4L2_CID_HFLIP`` ``(boolean)``
Mirror the picture horizontally.
diff --git a/Documentation/media/uapi/v4l/extended-controls.rst b/Documentation/media/uapi/v4l/extended-controls.rst
index abb105724c05..9acc9cad49e2 100644
--- a/Documentation/media/uapi/v4l/extended-controls.rst
+++ b/Documentation/media/uapi/v4l/extended-controls.rst
@@ -2019,7 +2019,7 @@ enum v4l2_exposure_auto_type -
dynamically vary the frame rate. By default this feature is disabled
(0) and the frame rate must remain constant.
-``V4L2_CID_EXPOSURE_BIAS (integer menu)``
+``V4L2_CID_AUTO_EXPOSURE_BIAS (integer menu)``
Determines the automatic exposure compensation, it is effective only
when ``V4L2_CID_EXPOSURE_AUTO`` control is set to ``AUTO``,
``SHUTTER_PRIORITY`` or ``APERTURE_PRIORITY``. It is expressed in
@@ -3021,6 +3021,13 @@ Image Process Control IDs
The video deinterlacing mode (such as Bob, Weave, ...). The menu items are
driver specific and are documented in :ref:`v4l-drivers`.
+``V4L2_CID_DIGITAL_GAIN (integer)``
+ Digital gain is the value by which all colour components
+ are multiplied by. Typically the digital gain applied is the
+ control value divided by e.g. 0x100, meaning that to get no
+ digital gain the control value needs to be 0x100. The no-gain
+ configuration is also typically the default.
+
.. _dv-controls:
diff --git a/Documentation/media/uapi/v4l/pixfmt-sdr-pcu16be.rst b/Documentation/media/uapi/v4l/pixfmt-sdr-pcu16be.rst
new file mode 100644
index 000000000000..2de1b1a0f517
--- /dev/null
+++ b/Documentation/media/uapi/v4l/pixfmt-sdr-pcu16be.rst
@@ -0,0 +1,55 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+.. _V4L2-SDR-FMT-PCU16BE:
+
+******************************
+V4L2_SDR_FMT_PCU16BE ('PC16')
+******************************
+
+Planar complex unsigned 16-bit big endian IQ sample
+
+Description
+===========
+
+This format contains a sequence of complex number samples. Each complex
+number consist of two parts called In-phase and Quadrature (IQ). Both I
+and Q are represented as a 16 bit unsigned big endian number stored in
+32 bit space. The remaining unused bits within the 32 bit space will be
+padded with 0. I value starts first and Q value starts at an offset
+equalling half of the buffer size (i.e.) offset = buffersize/2. Out of
+the 16 bits, bit 15:2 (14 bit) is data and bit 1:0 (2 bit) can be any
+value.
+
+**Byte Order.**
+Each cell is one byte.
+
+.. flat-table::
+ :header-rows: 1
+ :stub-columns: 0
+
+ * - Offset:
+ - Byte B0
+ - Byte B1
+ - Byte B2
+ - Byte B3
+ * - start + 0:
+ - I'\ :sub:`0[13:6]`
+ - I'\ :sub:`0[5:0]; B1[1:0]=pad`
+ - pad
+ - pad
+ * - start + 4:
+ - I'\ :sub:`1[13:6]`
+ - I'\ :sub:`1[5:0]; B1[1:0]=pad`
+ - pad
+ - pad
+ * - ...
+ * - start + offset:
+ - Q'\ :sub:`0[13:6]`
+ - Q'\ :sub:`0[5:0]; B1[1:0]=pad`
+ - pad
+ - pad
+ * - start + offset + 4:
+ - Q'\ :sub:`1[13:6]`
+ - Q'\ :sub:`1[5:0]; B1[1:0]=pad`
+ - pad
+ - pad
diff --git a/Documentation/media/uapi/v4l/pixfmt-sdr-pcu18be.rst b/Documentation/media/uapi/v4l/pixfmt-sdr-pcu18be.rst
new file mode 100644
index 000000000000..da8b26bf6b95
--- /dev/null
+++ b/Documentation/media/uapi/v4l/pixfmt-sdr-pcu18be.rst
@@ -0,0 +1,55 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+.. _V4L2-SDR-FMT-PCU18BE:
+
+******************************
+V4L2_SDR_FMT_PCU18BE ('PC18')
+******************************
+
+Planar complex unsigned 18-bit big endian IQ sample
+
+Description
+===========
+
+This format contains a sequence of complex number samples. Each complex
+number consist of two parts called In-phase and Quadrature (IQ). Both I
+and Q are represented as a 18 bit unsigned big endian number stored in
+32 bit space. The remaining unused bits within the 32 bit space will be
+padded with 0. I value starts first and Q value starts at an offset
+equalling half of the buffer size (i.e.) offset = buffersize/2. Out of
+the 18 bits, bit 17:2 (16 bit) is data and bit 1:0 (2 bit) can be any
+value.
+
+**Byte Order.**
+Each cell is one byte.
+
+.. flat-table::
+ :header-rows: 1
+ :stub-columns: 0
+
+ * - Offset:
+ - Byte B0
+ - Byte B1
+ - Byte B2
+ - Byte B3
+ * - start + 0:
+ - I'\ :sub:`0[17:10]`
+ - I'\ :sub:`0[9:2]`
+ - I'\ :sub:`0[1:0]; B2[5:0]=pad`
+ - pad
+ * - start + 4:
+ - I'\ :sub:`1[17:10]`
+ - I'\ :sub:`1[9:2]`
+ - I'\ :sub:`1[1:0]; B2[5:0]=pad`
+ - pad
+ * - ...
+ * - start + offset:
+ - Q'\ :sub:`0[17:10]`
+ - Q'\ :sub:`0[9:2]`
+ - Q'\ :sub:`0[1:0]; B2[5:0]=pad`
+ - pad
+ * - start + offset + 4:
+ - Q'\ :sub:`1[17:10]`
+ - Q'\ :sub:`1[9:2]`
+ - Q'\ :sub:`1[1:0]; B2[5:0]=pad`
+ - pad
diff --git a/Documentation/media/uapi/v4l/pixfmt-sdr-pcu20be.rst b/Documentation/media/uapi/v4l/pixfmt-sdr-pcu20be.rst
new file mode 100644
index 000000000000..5499eed39477
--- /dev/null
+++ b/Documentation/media/uapi/v4l/pixfmt-sdr-pcu20be.rst
@@ -0,0 +1,54 @@
+.. -*- coding: utf-8; mode: rst -*-
+.. _V4L2-SDR-FMT-PCU20BE:
+
+******************************
+V4L2_SDR_FMT_PCU20BE ('PC20')
+******************************
+
+Planar complex unsigned 20-bit big endian IQ sample
+
+Description
+===========
+
+This format contains a sequence of complex number samples. Each complex
+number consist of two parts called In-phase and Quadrature (IQ). Both I
+and Q are represented as a 20 bit unsigned big endian number stored in
+32 bit space. The remaining unused bits within the 32 bit space will be
+padded with 0. I value starts first and Q value starts at an offset
+equalling half of the buffer size (i.e.) offset = buffersize/2. Out of
+the 20 bits, bit 19:2 (18 bit) is data and bit 1:0 (2 bit) can be any
+value.
+
+**Byte Order.**
+Each cell is one byte.
+
+.. flat-table::
+ :header-rows: 1
+ :stub-columns: 0
+
+ * - Offset:
+ - Byte B0
+ - Byte B1
+ - Byte B2
+ - Byte B3
+ * - start + 0:
+ - I'\ :sub:`0[19:12]`
+ - I'\ :sub:`0[11:4]`
+ - I'\ :sub:`0[3:0]; B2[3:0]=pad`
+ - pad
+ * - start + 4:
+ - I'\ :sub:`1[19:12]`
+ - I'\ :sub:`1[11:4]`
+ - I'\ :sub:`1[3:0]; B2[3:0]=pad`
+ - pad
+ * - ...
+ * - start + offset:
+ - Q'\ :sub:`0[19:12]`
+ - Q'\ :sub:`0[11:4]`
+ - Q'\ :sub:`0[3:0]; B2[3:0]=pad`
+ - pad
+ * - start + offset + 4:
+ - Q'\ :sub:`1[19:12]`
+ - Q'\ :sub:`1[11:4]`
+ - Q'\ :sub:`1[3:0]; B2[3:0]=pad`
+ - pad
diff --git a/Documentation/media/uapi/v4l/sdr-formats.rst b/Documentation/media/uapi/v4l/sdr-formats.rst
index f863c08f1add..2037f5bad727 100644
--- a/Documentation/media/uapi/v4l/sdr-formats.rst
+++ b/Documentation/media/uapi/v4l/sdr-formats.rst
@@ -17,3 +17,6 @@ These formats are used for :ref:`SDR <sdr>` interface only.
pixfmt-sdr-cs08
pixfmt-sdr-cs14le
pixfmt-sdr-ru12le
+ pixfmt-sdr-pcu16be
+ pixfmt-sdr-pcu18be
+ pixfmt-sdr-pcu20be
diff --git a/Documentation/media/uapi/v4l/vidioc-cropcap.rst b/Documentation/media/uapi/v4l/vidioc-cropcap.rst
index f21a69b554e1..0f80d5ca2643 100644
--- a/Documentation/media/uapi/v4l/vidioc-cropcap.rst
+++ b/Documentation/media/uapi/v4l/vidioc-cropcap.rst
@@ -39,17 +39,10 @@ structure. Drivers fill the rest of the structure. The results are
constant except when switching the video standard. Remember this switch
can occur implicit when switching the video input or output.
-Do not use the multiplanar buffer types. Use
-``V4L2_BUF_TYPE_VIDEO_CAPTURE`` instead of
-``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE`` and use
-``V4L2_BUF_TYPE_VIDEO_OUTPUT`` instead of
-``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE``.
-
This ioctl must be implemented for video capture or output devices that
support cropping and/or scaling and/or have non-square pixels, and for
overlay devices.
-
.. c:type:: v4l2_cropcap
.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
@@ -62,9 +55,9 @@ overlay devices.
* - __u32
- ``type``
- Type of the data stream, set by the application. Only these types
- are valid here: ``V4L2_BUF_TYPE_VIDEO_CAPTURE``,
- ``V4L2_BUF_TYPE_VIDEO_OUTPUT`` and
- ``V4L2_BUF_TYPE_VIDEO_OVERLAY``. See :c:type:`v4l2_buf_type`.
+ are valid here: ``V4L2_BUF_TYPE_VIDEO_CAPTURE``, ``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE``,
+ ``V4L2_BUF_TYPE_VIDEO_OUTPUT``, ``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE`` and
+ ``V4L2_BUF_TYPE_VIDEO_OVERLAY``. See :c:type:`v4l2_buf_type` and the note above.
* - struct :ref:`v4l2_rect <v4l2-rect-crop>`
- ``bounds``
- Defines the window within capturing or output is possible, this
@@ -90,6 +83,16 @@ overlay devices.
``pixelaspect`` to 1/1. Other common values are 54/59 for PAL and
SECAM, 11/10 for NTSC sampled according to [:ref:`itu601`].
+.. note::
+ Unfortunately in the case of multiplanar buffer types
+ (``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE`` and ``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE``)
+ this API was messed up with regards to how the :c:type:`v4l2_cropcap` ``type`` field
+ should be filled in. Some drivers only accepted the ``_MPLANE`` buffer type while
+ other drivers only accepted a non-multiplanar buffer type (i.e. without the
+ ``_MPLANE`` at the end).
+
+ Starting with kernel 4.13 both variations are allowed.
+
.. _v4l2-rect-crop:
diff --git a/Documentation/media/uapi/v4l/vidioc-g-crop.rst b/Documentation/media/uapi/v4l/vidioc-g-crop.rst
index 56a36340f565..13771ee3e94a 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-crop.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-crop.rst
@@ -45,12 +45,6 @@ and struct :c:type:`v4l2_rect` substructure named ``c`` of a
v4l2_crop structure and call the :ref:`VIDIOC_S_CROP <VIDIOC_G_CROP>` ioctl with a pointer
to this structure.
-Do not use the multiplanar buffer types. Use
-``V4L2_BUF_TYPE_VIDEO_CAPTURE`` instead of
-``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE`` and use
-``V4L2_BUF_TYPE_VIDEO_OUTPUT`` instead of
-``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE``.
-
The driver first adjusts the requested dimensions against hardware
limits, i. e. the bounds given by the capture/output window, and it
rounds to the closest possible values of horizontal and vertical offset,
@@ -87,14 +81,24 @@ When cropping is not supported then no parameters are changed and
* - __u32
- ``type``
- Type of the data stream, set by the application. Only these types
- are valid here: ``V4L2_BUF_TYPE_VIDEO_CAPTURE``,
- ``V4L2_BUF_TYPE_VIDEO_OUTPUT`` and
- ``V4L2_BUF_TYPE_VIDEO_OVERLAY``. See :c:type:`v4l2_buf_type`.
+ are valid here: ``V4L2_BUF_TYPE_VIDEO_CAPTURE``, ``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE``,
+ ``V4L2_BUF_TYPE_VIDEO_OUTPUT``, ``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE`` and
+ ``V4L2_BUF_TYPE_VIDEO_OVERLAY``. See :c:type:`v4l2_buf_type` and the note above.
* - struct :c:type:`v4l2_rect`
- ``c``
- Cropping rectangle. The same co-ordinate system as for struct
:c:type:`v4l2_cropcap` ``bounds`` is used.
+.. note::
+ Unfortunately in the case of multiplanar buffer types
+ (``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE`` and ``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE``)
+ this API was messed up with regards to how the :c:type:`v4l2_crop` ``type`` field
+ should be filled in. Some drivers only accepted the ``_MPLANE`` buffer type while
+ other drivers only accepted a non-multiplanar buffer type (i.e. without the
+ ``_MPLANE`` at the end).
+
+ Starting with kernel 4.13 both variations are allowed.
+
Return Value
============
diff --git a/Documentation/media/uapi/v4l/vidioc-g-selection.rst b/Documentation/media/uapi/v4l/vidioc-g-selection.rst
index b80d85cb8891..c1ee86472918 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-selection.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-selection.rst
@@ -42,11 +42,7 @@ The ioctls are used to query and configure selection rectangles.
To query the cropping (composing) rectangle set struct
:c:type:`v4l2_selection` ``type`` field to the
-respective buffer type. Do not use the multiplanar buffer types. Use
-``V4L2_BUF_TYPE_VIDEO_CAPTURE`` instead of
-``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE`` and use
-``V4L2_BUF_TYPE_VIDEO_OUTPUT`` instead of
-``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE``. The next step is setting the
+respective buffer type. The next step is setting the
value of struct :c:type:`v4l2_selection` ``target``
field to ``V4L2_SEL_TGT_CROP`` (``V4L2_SEL_TGT_COMPOSE``). Please refer
to table :ref:`v4l2-selections-common` or :ref:`selection-api` for
@@ -64,11 +60,7 @@ pixels.
To change the cropping (composing) rectangle set the struct
:c:type:`v4l2_selection` ``type`` field to the
-respective buffer type. Do not use multiplanar buffers. Use
-``V4L2_BUF_TYPE_VIDEO_CAPTURE`` instead of
-``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE``. Use
-``V4L2_BUF_TYPE_VIDEO_OUTPUT`` instead of
-``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE``. The next step is setting the
+respective buffer type. The next step is setting the
value of struct :c:type:`v4l2_selection` ``target`` to
``V4L2_SEL_TGT_CROP`` (``V4L2_SEL_TGT_COMPOSE``). Please refer to table
:ref:`v4l2-selections-common` or :ref:`selection-api` for additional
@@ -169,6 +161,16 @@ Selection targets and flags are documented in
- Reserved fields for future use. Drivers and applications must zero
this array.
+.. note::
+ Unfortunately in the case of multiplanar buffer types
+ (``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE`` and ``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE``)
+ this API was messed up with regards to how the :c:type:`v4l2_selection` ``type`` field
+ should be filled in. Some drivers only accepted the ``_MPLANE`` buffer type while
+ other drivers only accepted a non-multiplanar buffer type (i.e. without the
+ ``_MPLANE`` at the end).
+
+ Starting with kernel 4.13 both variations are allowed.
+
Return Value
============
diff --git a/Documentation/media/v4l-drivers/imx.rst b/Documentation/media/v4l-drivers/imx.rst
new file mode 100644
index 000000000000..e0ee0f1aeb05
--- /dev/null
+++ b/Documentation/media/v4l-drivers/imx.rst
@@ -0,0 +1,614 @@
+i.MX Video Capture Driver
+=========================
+
+Introduction
+------------
+
+The Freescale i.MX5/6 contains an Image Processing Unit (IPU), which
+handles the flow of image frames to and from capture devices and
+display devices.
+
+For image capture, the IPU contains the following internal subunits:
+
+- Image DMA Controller (IDMAC)
+- Camera Serial Interface (CSI)
+- Image Converter (IC)
+- Sensor Multi-FIFO Controller (SMFC)
+- Image Rotator (IRT)
+- Video De-Interlacing or Combining Block (VDIC)
+
+The IDMAC is the DMA controller for transfer of image frames to and from
+memory. Various dedicated DMA channels exist for both video capture and
+display paths. During transfer, the IDMAC is also capable of vertical
+image flip, 8x8 block transfer (see IRT description), pixel component
+re-ordering (for example UYVY to YUYV) within the same colorspace, and
+even packed <--> planar conversion. It can also perform a simple
+de-interlacing by interleaving even and odd lines during transfer
+(without motion compensation which requires the VDIC).
+
+The CSI is the backend capture unit that interfaces directly with
+camera sensors over Parallel, BT.656/1120, and MIPI CSI-2 busses.
+
+The IC handles color-space conversion, resizing (downscaling and
+upscaling), horizontal flip, and 90/270 degree rotation operations.
+
+There are three independent "tasks" within the IC that can carry out
+conversions concurrently: pre-process encoding, pre-process viewfinder,
+and post-processing. Within each task, conversions are split into three
+sections: downsizing section, main section (upsizing, flip, colorspace
+conversion, and graphics plane combining), and rotation section.
+
+The IPU time-shares the IC task operations. The time-slice granularity
+is one burst of eight pixels in the downsizing section, one image line
+in the main processing section, one image frame in the rotation section.
+
+The SMFC is composed of four independent FIFOs that each can transfer
+captured frames from sensors directly to memory concurrently via four
+IDMAC channels.
+
+The IRT carries out 90 and 270 degree image rotation operations. The
+rotation operation is carried out on 8x8 pixel blocks at a time. This
+operation is supported by the IDMAC which handles the 8x8 block transfer
+along with block reordering, in coordination with vertical flip.
+
+The VDIC handles the conversion of interlaced video to progressive, with
+support for different motion compensation modes (low, medium, and high
+motion). The deinterlaced output frames from the VDIC can be sent to the
+IC pre-process viewfinder task for further conversions. The VDIC also
+contains a Combiner that combines two image planes, with alpha blending
+and color keying.
+
+In addition to the IPU internal subunits, there are also two units
+outside the IPU that are also involved in video capture on i.MX:
+
+- MIPI CSI-2 Receiver for camera sensors with the MIPI CSI-2 bus
+ interface. This is a Synopsys DesignWare core.
+- Two video multiplexers for selecting among multiple sensor inputs
+ to send to a CSI.
+
+For more info, refer to the latest versions of the i.MX5/6 reference
+manuals [#f1]_ and [#f2]_.
+
+
+Features
+--------
+
+Some of the features of this driver include:
+
+- Many different pipelines can be configured via media controller API,
+ that correspond to the hardware video capture pipelines supported in
+ the i.MX.
+
+- Supports parallel, BT.565, and MIPI CSI-2 interfaces.
+
+- Concurrent independent streams, by configuring pipelines to multiple
+ video capture interfaces using independent entities.
+
+- Scaling, color-space conversion, horizontal and vertical flip, and
+ image rotation via IC task subdevs.
+
+- Many pixel formats supported (RGB, packed and planar YUV, partial
+ planar YUV).
+
+- The VDIC subdev supports motion compensated de-interlacing, with three
+ motion compensation modes: low, medium, and high motion. Pipelines are
+ defined that allow sending frames to the VDIC subdev directly from the
+ CSI. There is also support in the future for sending frames to the
+ VDIC from memory buffers via a output/mem2mem devices.
+
+- Includes a Frame Interval Monitor (FIM) that can correct vertical sync
+ problems with the ADV718x video decoders.
+
+
+Entities
+--------
+
+imx6-mipi-csi2
+--------------
+
+This is the MIPI CSI-2 receiver entity. It has one sink pad to receive
+the MIPI CSI-2 stream (usually from a MIPI CSI-2 camera sensor). It has
+four source pads, corresponding to the four MIPI CSI-2 demuxed virtual
+channel outputs. Multpiple source pads can be enabled to independently
+stream from multiple virtual channels.
+
+This entity actually consists of two sub-blocks. One is the MIPI CSI-2
+core. This is a Synopsys Designware MIPI CSI-2 core. The other sub-block
+is a "CSI-2 to IPU gasket". The gasket acts as a demultiplexer of the
+four virtual channels streams, providing four separate parallel buses
+containing each virtual channel that are routed to CSIs or video
+multiplexers as described below.
+
+On i.MX6 solo/dual-lite, all four virtual channel buses are routed to
+two video multiplexers. Both CSI0 and CSI1 can receive any virtual
+channel, as selected by the video multiplexers.
+
+On i.MX6 Quad, virtual channel 0 is routed to IPU1-CSI0 (after selected
+by a video mux), virtual channels 1 and 2 are hard-wired to IPU1-CSI1
+and IPU2-CSI0, respectively, and virtual channel 3 is routed to
+IPU2-CSI1 (again selected by a video mux).
+
+ipuX_csiY_mux
+-------------
+
+These are the video multiplexers. They have two or more sink pads to
+select from either camera sensors with a parallel interface, or from
+MIPI CSI-2 virtual channels from imx6-mipi-csi2 entity. They have a
+single source pad that routes to a CSI (ipuX_csiY entities).
+
+On i.MX6 solo/dual-lite, there are two video mux entities. One sits
+in front of IPU1-CSI0 to select between a parallel sensor and any of
+the four MIPI CSI-2 virtual channels (a total of five sink pads). The
+other mux sits in front of IPU1-CSI1, and again has five sink pads to
+select between a parallel sensor and any of the four MIPI CSI-2 virtual
+channels.
+
+On i.MX6 Quad, there are two video mux entities. One sits in front of
+IPU1-CSI0 to select between a parallel sensor and MIPI CSI-2 virtual
+channel 0 (two sink pads). The other mux sits in front of IPU2-CSI1 to
+select between a parallel sensor and MIPI CSI-2 virtual channel 3 (two
+sink pads).
+
+ipuX_csiY
+---------
+
+These are the CSI entities. They have a single sink pad receiving from
+either a video mux or from a MIPI CSI-2 virtual channel as described
+above.
+
+This entity has two source pads. The first source pad can link directly
+to the ipuX_vdic entity or the ipuX_ic_prp entity, using hardware links
+that require no IDMAC memory buffer transfer.
+
+When the direct source pad is routed to the ipuX_ic_prp entity, frames
+from the CSI can be processed by one or both of the IC pre-processing
+tasks.
+
+When the direct source pad is routed to the ipuX_vdic entity, the VDIC
+will carry out motion-compensated de-interlace using "high motion" mode
+(see description of ipuX_vdic entity).
+
+The second source pad sends video frames directly to memory buffers
+via the SMFC and an IDMAC channel, bypassing IC pre-processing. This
+source pad is routed to a capture device node, with a node name of the
+format "ipuX_csiY capture".
+
+Note that since the IDMAC source pad makes use of an IDMAC channel, it
+can do pixel reordering within the same colorspace. For example, the
+sink pad can take UYVY2X8, but the IDMAC source pad can output YUYV2X8.
+If the sink pad is receiving YUV, the output at the capture device can
+also be converted to a planar YUV format such as YUV420.
+
+It will also perform simple de-interlace without motion compensation,
+which is activated if the sink pad's field type is an interlaced type,
+and the IDMAC source pad field type is set to none.
+
+This subdev can generate the following event when enabling the second
+IDMAC source pad:
+
+- V4L2_EVENT_IMX_FRAME_INTERVAL_ERROR
+
+The user application can subscribe to this event from the ipuX_csiY
+subdev node. This event is generated by the Frame Interval Monitor
+(see below for more on the FIM).
+
+Cropping in ipuX_csiY
+---------------------
+
+The CSI supports cropping the incoming raw sensor frames. This is
+implemented in the ipuX_csiY entities at the sink pad, using the
+crop selection subdev API.
+
+The CSI also supports fixed divide-by-two downscaling indepently in
+width and height. This is implemented in the ipuX_csiY entities at
+the sink pad, using the compose selection subdev API.
+
+The output rectangle at the ipuX_csiY source pad is the same as
+the compose rectangle at the sink pad. So the source pad rectangle
+cannot be negotiated, it must be set using the compose selection
+API at sink pad (if /2 downscale is desired, otherwise source pad
+rectangle is equal to incoming rectangle).
+
+To give an example of crop and /2 downscale, this will crop a
+1280x960 input frame to 640x480, and then /2 downscale in both
+dimensions to 320x240 (assumes ipu1_csi0 is linked to ipu1_csi0_mux):
+
+media-ctl -V "'ipu1_csi0_mux':2[fmt:UYVY2X8/1280x960]"
+media-ctl -V "'ipu1_csi0':0[crop:(0,0)/640x480]"
+media-ctl -V "'ipu1_csi0':0[compose:(0,0)/320x240]"
+
+Frame Skipping in ipuX_csiY
+---------------------------
+
+The CSI supports frame rate decimation, via frame skipping. Frame
+rate decimation is specified by setting the frame intervals at
+sink and source pads. The ipuX_csiY entity then applies the best
+frame skip setting to the CSI to achieve the desired frame rate
+at the source pad.
+
+The following example reduces an assumed incoming 60 Hz frame
+rate by half at the IDMAC output source pad:
+
+media-ctl -V "'ipu1_csi0':0[fmt:UYVY2X8/640x480@1/60]"
+media-ctl -V "'ipu1_csi0':2[fmt:UYVY2X8/640x480@1/30]"
+
+Frame Interval Monitor in ipuX_csiY
+-----------------------------------
+
+The adv718x decoders can occasionally send corrupt fields during
+NTSC/PAL signal re-sync (too little or too many video lines). When
+this happens, the IPU triggers a mechanism to re-establish vertical
+sync by adding 1 dummy line every frame, which causes a rolling effect
+from image to image, and can last a long time before a stable image is
+recovered. Or sometimes the mechanism doesn't work at all, causing a
+permanent split image (one frame contains lines from two consecutive
+captured images).
+
+From experiment it was found that during image rolling, the frame
+intervals (elapsed time between two EOF's) drop below the nominal
+value for the current standard, by about one frame time (60 usec),
+and remain at that value until rolling stops.
+
+While the reason for this observation isn't known (the IPU dummy
+line mechanism should show an increase in the intervals by 1 line
+time every frame, not a fixed value), we can use it to detect the
+corrupt fields using a frame interval monitor. If the FIM detects a
+bad frame interval, the ipuX_csiY subdev will send the event
+V4L2_EVENT_IMX_FRAME_INTERVAL_ERROR. Userland can register with
+the FIM event notification on the ipuX_csiY subdev device node.
+Userland can issue a streaming restart when this event is received
+to correct the rolling/split image.
+
+The ipuX_csiY subdev includes custom controls to tweak some dials for
+FIM. If one of these controls is changed during streaming, the FIM will
+be reset and will continue at the new settings.
+
+- V4L2_CID_IMX_FIM_ENABLE
+
+Enable/disable the FIM.
+
+- V4L2_CID_IMX_FIM_NUM
+
+How many frame interval measurements to average before comparing against
+the nominal frame interval reported by the sensor. This can reduce noise
+caused by interrupt latency.
+
+- V4L2_CID_IMX_FIM_TOLERANCE_MIN
+
+If the averaged intervals fall outside nominal by this amount, in
+microseconds, the V4L2_EVENT_IMX_FRAME_INTERVAL_ERROR event is sent.
+
+- V4L2_CID_IMX_FIM_TOLERANCE_MAX
+
+If any intervals are higher than this value, those samples are
+discarded and do not enter into the average. This can be used to
+discard really high interval errors that might be due to interrupt
+latency from high system load.
+
+- V4L2_CID_IMX_FIM_NUM_SKIP
+
+How many frames to skip after a FIM reset or stream restart before
+FIM begins to average intervals.
+
+- V4L2_CID_IMX_FIM_ICAP_CHANNEL
+- V4L2_CID_IMX_FIM_ICAP_EDGE
+
+These controls will configure an input capture channel as the method
+for measuring frame intervals. This is superior to the default method
+of measuring frame intervals via EOF interrupt, since it is not subject
+to uncertainty errors introduced by interrupt latency.
+
+Input capture requires hardware support. A VSYNC signal must be routed
+to one of the i.MX6 input capture channel pads.
+
+V4L2_CID_IMX_FIM_ICAP_CHANNEL configures which i.MX6 input capture
+channel to use. This must be 0 or 1.
+
+V4L2_CID_IMX_FIM_ICAP_EDGE configures which signal edge will trigger
+input capture events. By default the input capture method is disabled
+with a value of IRQ_TYPE_NONE. Set this control to IRQ_TYPE_EDGE_RISING,
+IRQ_TYPE_EDGE_FALLING, or IRQ_TYPE_EDGE_BOTH to enable input capture,
+triggered on the given signal edge(s).
+
+When input capture is disabled, frame intervals will be measured via
+EOF interrupt.
+
+
+ipuX_vdic
+---------
+
+The VDIC carries out motion compensated de-interlacing, with three
+motion compensation modes: low, medium, and high motion. The mode is
+specified with the menu control V4L2_CID_DEINTERLACING_MODE. It has
+two sink pads and a single source pad.
+
+The direct sink pad receives from an ipuX_csiY direct pad. With this
+link the VDIC can only operate in high motion mode.
+
+When the IDMAC sink pad is activated, it receives from an output
+or mem2mem device node. With this pipeline, it can also operate
+in low and medium modes, because these modes require receiving
+frames from memory buffers. Note that an output or mem2mem device
+is not implemented yet, so this sink pad currently has no links.
+
+The source pad routes to the IC pre-processing entity ipuX_ic_prp.
+
+ipuX_ic_prp
+-----------
+
+This is the IC pre-processing entity. It acts as a router, routing
+data from its sink pad to one or both of its source pads.
+
+It has a single sink pad. The sink pad can receive from the ipuX_csiY
+direct pad, or from ipuX_vdic.
+
+This entity has two source pads. One source pad routes to the
+pre-process encode task entity (ipuX_ic_prpenc), the other to the
+pre-process viewfinder task entity (ipuX_ic_prpvf). Both source pads
+can be activated at the same time if the sink pad is receiving from
+ipuX_csiY. Only the source pad to the pre-process viewfinder task entity
+can be activated if the sink pad is receiving from ipuX_vdic (frames
+from the VDIC can only be processed by the pre-process viewfinder task).
+
+ipuX_ic_prpenc
+--------------
+
+This is the IC pre-processing encode entity. It has a single sink
+pad from ipuX_ic_prp, and a single source pad. The source pad is
+routed to a capture device node, with a node name of the format
+"ipuX_ic_prpenc capture".
+
+This entity performs the IC pre-process encode task operations:
+color-space conversion, resizing (downscaling and upscaling),
+horizontal and vertical flip, and 90/270 degree rotation. Flip
+and rotation are provided via standard V4L2 controls.
+
+Like the ipuX_csiY IDMAC source, it can also perform simple de-interlace
+without motion compensation, and pixel reordering.
+
+ipuX_ic_prpvf
+-------------
+
+This is the IC pre-processing viewfinder entity. It has a single sink
+pad from ipuX_ic_prp, and a single source pad. The source pad is routed
+to a capture device node, with a node name of the format
+"ipuX_ic_prpvf capture".
+
+It is identical in operation to ipuX_ic_prpenc, with the same resizing
+and CSC operations and flip/rotation controls. It will receive and
+process de-interlaced frames from the ipuX_vdic if ipuX_ic_prp is
+receiving from ipuX_vdic.
+
+Like the ipuX_csiY IDMAC source, it can perform simple de-interlace
+without motion compensation. However, note that if the ipuX_vdic is
+included in the pipeline (ipuX_ic_prp is receiving from ipuX_vdic),
+it's not possible to use simple de-interlace in ipuX_ic_prpvf, since
+the ipuX_vdic has already carried out de-interlacing (with motion
+compensation) and therefore the field type output from ipuX_ic_prp can
+only be none.
+
+Capture Pipelines
+-----------------
+
+The following describe the various use-cases supported by the pipelines.
+
+The links shown do not include the backend sensor, video mux, or mipi
+csi-2 receiver links. This depends on the type of sensor interface
+(parallel or mipi csi-2). So these pipelines begin with:
+
+sensor -> ipuX_csiY_mux -> ...
+
+for parallel sensors, or:
+
+sensor -> imx6-mipi-csi2 -> (ipuX_csiY_mux) -> ...
+
+for mipi csi-2 sensors. The imx6-mipi-csi2 receiver may need to route
+to the video mux (ipuX_csiY_mux) before sending to the CSI, depending
+on the mipi csi-2 virtual channel, hence ipuX_csiY_mux is shown in
+parenthesis.
+
+Unprocessed Video Capture:
+--------------------------
+
+Send frames directly from sensor to camera device interface node, with
+no conversions, via ipuX_csiY IDMAC source pad:
+
+-> ipuX_csiY:2 -> ipuX_csiY capture
+
+IC Direct Conversions:
+----------------------
+
+This pipeline uses the preprocess encode entity to route frames directly
+from the CSI to the IC, to carry out scaling up to 1024x1024 resolution,
+CSC, flipping, and image rotation:
+
+-> ipuX_csiY:1 -> 0:ipuX_ic_prp:1 -> 0:ipuX_ic_prpenc:1 ->
+ ipuX_ic_prpenc capture
+
+Motion Compensated De-interlace:
+--------------------------------
+
+This pipeline routes frames from the CSI direct pad to the VDIC entity to
+support motion-compensated de-interlacing (high motion mode only),
+scaling up to 1024x1024, CSC, flip, and rotation:
+
+-> ipuX_csiY:1 -> 0:ipuX_vdic:2 -> 0:ipuX_ic_prp:2 ->
+ 0:ipuX_ic_prpvf:1 -> ipuX_ic_prpvf capture
+
+
+Usage Notes
+-----------
+
+To aid in configuration and for backward compatibility with V4L2
+applications that access controls only from video device nodes, the
+capture device interfaces inherit controls from the active entities
+in the current pipeline, so controls can be accessed either directly
+from the subdev or from the active capture device interface. For
+example, the FIM controls are available either from the ipuX_csiY
+subdevs or from the active capture device.
+
+The following are specific usage notes for the Sabre* reference
+boards:
+
+
+SabreLite with OV5642 and OV5640
+--------------------------------
+
+This platform requires the OmniVision OV5642 module with a parallel
+camera interface, and the OV5640 module with a MIPI CSI-2
+interface. Both modules are available from Boundary Devices:
+
+https://boundarydevices.com/product/nit6x_5mp
+https://boundarydevices.com/product/nit6x_5mp_mipi
+
+Note that if only one camera module is available, the other sensor
+node can be disabled in the device tree.
+
+The OV5642 module is connected to the parallel bus input on the i.MX
+internal video mux to IPU1 CSI0. It's i2c bus connects to i2c bus 2.
+
+The MIPI CSI-2 OV5640 module is connected to the i.MX internal MIPI CSI-2
+receiver, and the four virtual channel outputs from the receiver are
+routed as follows: vc0 to the IPU1 CSI0 mux, vc1 directly to IPU1 CSI1,
+vc2 directly to IPU2 CSI0, and vc3 to the IPU2 CSI1 mux. The OV5640 is
+also connected to i2c bus 2 on the SabreLite, therefore the OV5642 and
+OV5640 must not share the same i2c slave address.
+
+The following basic example configures unprocessed video capture
+pipelines for both sensors. The OV5642 is routed to ipu1_csi0, and
+the OV5640, transmitting on MIPI CSI-2 virtual channel 1 (which is
+imx6-mipi-csi2 pad 2), is routed to ipu1_csi1. Both sensors are
+configured to output 640x480, and the OV5642 outputs YUYV2X8, the
+OV5640 UYVY2X8:
+
+.. code-block:: none
+
+ # Setup links for OV5642
+ media-ctl -l "'ov5642 1-0042':0 -> 'ipu1_csi0_mux':1[1]"
+ media-ctl -l "'ipu1_csi0_mux':2 -> 'ipu1_csi0':0[1]"
+ media-ctl -l "'ipu1_csi0':2 -> 'ipu1_csi0 capture':0[1]"
+ # Setup links for OV5640
+ media-ctl -l "'ov5640 1-0040':0 -> 'imx6-mipi-csi2':0[1]"
+ media-ctl -l "'imx6-mipi-csi2':2 -> 'ipu1_csi1':0[1]"
+ media-ctl -l "'ipu1_csi1':2 -> 'ipu1_csi1 capture':0[1]"
+ # Configure pads for OV5642 pipeline
+ media-ctl -V "'ov5642 1-0042':0 [fmt:YUYV2X8/640x480 field:none]"
+ media-ctl -V "'ipu1_csi0_mux':2 [fmt:YUYV2X8/640x480 field:none]"
+ media-ctl -V "'ipu1_csi0':2 [fmt:AYUV32/640x480 field:none]"
+ # Configure pads for OV5640 pipeline
+ media-ctl -V "'ov5640 1-0040':0 [fmt:UYVY2X8/640x480 field:none]"
+ media-ctl -V "'imx6-mipi-csi2':2 [fmt:UYVY2X8/640x480 field:none]"
+ media-ctl -V "'ipu1_csi1':2 [fmt:AYUV32/640x480 field:none]"
+
+Streaming can then begin independently on the capture device nodes
+"ipu1_csi0 capture" and "ipu1_csi1 capture". The v4l2-ctl tool can
+be used to select any supported YUV pixelformat on the capture device
+nodes, including planar.
+
+SabreAuto with ADV7180 decoder
+------------------------------
+
+On the SabreAuto, an on-board ADV7180 SD decoder is connected to the
+parallel bus input on the internal video mux to IPU1 CSI0.
+
+The following example configures a pipeline to capture from the ADV7180
+video decoder, assuming NTSC 720x480 input signals, with Motion
+Compensated de-interlacing. Pad field types assume the adv7180 outputs
+"interlaced". $outputfmt can be any format supported by the ipu1_ic_prpvf
+entity at its output pad:
+
+.. code-block:: none
+
+ # Setup links
+ media-ctl -l "'adv7180 3-0021':0 -> 'ipu1_csi0_mux':1[1]"
+ media-ctl -l "'ipu1_csi0_mux':2 -> 'ipu1_csi0':0[1]"
+ media-ctl -l "'ipu1_csi0':1 -> 'ipu1_vdic':0[1]"
+ media-ctl -l "'ipu1_vdic':2 -> 'ipu1_ic_prp':0[1]"
+ media-ctl -l "'ipu1_ic_prp':2 -> 'ipu1_ic_prpvf':0[1]"
+ media-ctl -l "'ipu1_ic_prpvf':1 -> 'ipu1_ic_prpvf capture':0[1]"
+ # Configure pads
+ media-ctl -V "'adv7180 3-0021':0 [fmt:UYVY2X8/720x480]"
+ media-ctl -V "'ipu1_csi0_mux':2 [fmt:UYVY2X8/720x480 field:interlaced]"
+ media-ctl -V "'ipu1_csi0':1 [fmt:AYUV32/720x480 field:interlaced]"
+ media-ctl -V "'ipu1_vdic':2 [fmt:AYUV32/720x480 field:none]"
+ media-ctl -V "'ipu1_ic_prp':2 [fmt:AYUV32/720x480 field:none]"
+ media-ctl -V "'ipu1_ic_prpvf':1 [fmt:$outputfmt field:none]"
+
+Streaming can then begin on the capture device node at
+"ipu1_ic_prpvf capture". The v4l2-ctl tool can be used to select any
+supported YUV or RGB pixelformat on the capture device node.
+
+This platform accepts Composite Video analog inputs to the ADV7180 on
+Ain1 (connector J42).
+
+SabreSD with MIPI CSI-2 OV5640
+------------------------------
+
+Similarly to SabreLite, the SabreSD supports a parallel interface
+OV5642 module on IPU1 CSI0, and a MIPI CSI-2 OV5640 module. The OV5642
+connects to i2c bus 1 and the OV5640 to i2c bus 2.
+
+The device tree for SabreSD includes OF graphs for both the parallel
+OV5642 and the MIPI CSI-2 OV5640, but as of this writing only the MIPI
+CSI-2 OV5640 has been tested, so the OV5642 node is currently disabled.
+The OV5640 module connects to MIPI connector J5 (sorry I don't have the
+compatible module part number or URL).
+
+The following example configures a direct conversion pipeline to capture
+from the OV5640, transmitting on MIPI CSI-2 virtual channel 1. $sensorfmt
+can be any format supported by the OV5640. $sensordim is the frame
+dimension part of $sensorfmt (minus the mbus pixel code). $outputfmt can
+be any format supported by the ipu1_ic_prpenc entity at its output pad:
+
+.. code-block:: none
+
+ # Setup links
+ media-ctl -l "'ov5640 1-003c':0 -> 'imx6-mipi-csi2':0[1]"
+ media-ctl -l "'imx6-mipi-csi2':2 -> 'ipu1_csi1':0[1]"
+ media-ctl -l "'ipu1_csi1':1 -> 'ipu1_ic_prp':0[1]"
+ media-ctl -l "'ipu1_ic_prp':1 -> 'ipu1_ic_prpenc':0[1]"
+ media-ctl -l "'ipu1_ic_prpenc':1 -> 'ipu1_ic_prpenc capture':0[1]"
+ # Configure pads
+ media-ctl -V "'ov5640 1-003c':0 [fmt:$sensorfmt field:none]"
+ media-ctl -V "'imx6-mipi-csi2':2 [fmt:$sensorfmt field:none]"
+ media-ctl -V "'ipu1_csi1':1 [fmt:AYUV32/$sensordim field:none]"
+ media-ctl -V "'ipu1_ic_prp':1 [fmt:AYUV32/$sensordim field:none]"
+ media-ctl -V "'ipu1_ic_prpenc':1 [fmt:$outputfmt field:none]"
+
+Streaming can then begin on "ipu1_ic_prpenc capture" node. The v4l2-ctl
+tool can be used to select any supported YUV or RGB pixelformat on the
+capture device node.
+
+
+Known Issues
+------------
+
+1. When using 90 or 270 degree rotation control at capture resolutions
+ near the IC resizer limit of 1024x1024, and combined with planar
+ pixel formats (YUV420, YUV422p), frame capture will often fail with
+ no end-of-frame interrupts from the IDMAC channel. To work around
+ this, use lower resolution and/or packed formats (YUYV, RGB3, etc.)
+ when 90 or 270 rotations are needed.
+
+
+File list
+---------
+
+drivers/staging/media/imx/
+include/media/imx.h
+include/linux/imx-media.h
+
+References
+----------
+
+.. [#f1] http://www.nxp.com/assets/documents/data/en/reference-manuals/IMX6DQRM.pdf
+.. [#f2] http://www.nxp.com/assets/documents/data/en/reference-manuals/IMX6SDLRM.pdf
+
+
+Authors
+-------
+Steve Longerbeam <[email protected]>
+Philipp Zabel <[email protected]>
+Russell King <[email protected]>
+
+Copyright (C) 2012-2017 Mentor Graphics Inc.
diff --git a/Documentation/media/v4l-drivers/index.rst b/Documentation/media/v4l-drivers/index.rst
index 90fe22a6414a..2e24d6806052 100644
--- a/Documentation/media/v4l-drivers/index.rst
+++ b/Documentation/media/v4l-drivers/index.rst
@@ -42,6 +42,7 @@ For more details see the file COPYING in the source distribution of Linux.
davinci-vpbe
fimc
ivtv
+ max2175
meye
omap3isp
omap4_camera
diff --git a/Documentation/media/v4l-drivers/max2175.rst b/Documentation/media/v4l-drivers/max2175.rst
new file mode 100644
index 000000000000..04478c25d57a
--- /dev/null
+++ b/Documentation/media/v4l-drivers/max2175.rst
@@ -0,0 +1,62 @@
+Maxim Integrated MAX2175 RF to bits tuner driver
+================================================
+
+The MAX2175 driver implements the following driver-specific controls:
+
+``V4L2_CID_MAX2175_I2S_ENABLE``
+-------------------------------
+ Enable/Disable I2S output of the tuner. This is a private control
+ that can be accessed only using the subdev interface.
+ Refer to Documentation/media/kapi/v4l2-controls for more details.
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 4
+
+ * - ``(0)``
+ - I2S output is disabled.
+ * - ``(1)``
+ - I2S output is enabled.
+
+``V4L2_CID_MAX2175_HSLS``
+-------------------------
+ The high-side/low-side (HSLS) control of the tuner for a given band.
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 4
+
+ * - ``(0)``
+ - The LO frequency position is below the desired frequency.
+ * - ``(1)``
+ - The LO frequency position is above the desired frequency.
+
+``V4L2_CID_MAX2175_RX_MODE (menu)``
+-----------------------------------
+ The Rx mode controls a number of preset parameters of the tuner like
+ sample clock (sck), sampling rate etc. These multiple settings are
+ provided under one single label called Rx mode in the datasheet. The
+ list below shows the supported modes with a brief description.
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 1 4
+
+ * - ``"Europe modes"``
+ * - ``"FM 1.2" (0)``
+ - This configures FM band with a sample rate of 0.512 million
+ samples/sec with a 10.24 MHz sck.
+ * - ``"DAB 1.2" (1)``
+ - This configures VHF band with a sample rate of 2.048 million
+ samples/sec with a 32.768 MHz sck.
+
+ * - ``"North America modes"``
+ * - ``"FM 1.0" (0)``
+ - This configures FM band with a sample rate of 0.7441875 million
+ samples/sec with a 14.88375 MHz sck.
+ * - ``"DAB 1.2" (1)``
+ - This configures FM band with a sample rate of 0.372 million
+ samples/sec with a 7.441875 MHz sck.
diff --git a/Documentation/networking/segmentation-offloads.txt b/Documentation/networking/segmentation-offloads.txt
index f200467ade38..2f09455a993a 100644
--- a/Documentation/networking/segmentation-offloads.txt
+++ b/Documentation/networking/segmentation-offloads.txt
@@ -55,7 +55,7 @@ IPIP, SIT, GRE, UDP Tunnel, and Remote Checksum Offloads
In addition to the offloads described above it is possible for a frame to
contain additional headers such as an outer tunnel. In order to account
for such instances an additional set of segmentation offload types were
-introduced including SKB_GSO_IPIP, SKB_GSO_SIT, SKB_GSO_GRE, and
+introduced including SKB_GSO_IPXIP4, SKB_GSO_IPXIP6, SKB_GSO_GRE, and
SKB_GSO_UDP_TUNNEL. These extra segmentation types are used to identify
cases where there are more than just 1 set of headers. For example in the
case of IPIP and SIT we should have the network and transport headers moved
diff --git a/Documentation/powerpc/cxlflash.txt b/Documentation/powerpc/cxlflash.txt
index 66b4496d6619..a64bdaa0a1cf 100644
--- a/Documentation/powerpc/cxlflash.txt
+++ b/Documentation/powerpc/cxlflash.txt
@@ -124,8 +124,8 @@ Block library API
http://github.com/open-power/capiflash
-CXL Flash Driver IOCTLs
-=======================
+CXL Flash Driver LUN IOCTLs
+===========================
Users, such as the block library, that wish to interface with a flash
device (LUN) via user space access need to use the services provided
@@ -257,6 +257,12 @@ DK_CXLFLASH_VLUN_RESIZE
operating in the virtual mode and used to program a LUN translation
table that the AFU references when provided with a resource handle.
+ This ioctl can return -EAGAIN if an AFU sync operation takes too long.
+ In addition to returning a failure to user, cxlflash will also schedule
+ an asynchronous AFU reset. Should the user choose to retry the operation,
+ it is expected to succeed. If this ioctl fails with -EAGAIN, the user
+ can either retry the operation or treat it as a failure.
+
DK_CXLFLASH_RELEASE
-------------------
This ioctl is responsible for releasing a previously obtained
@@ -309,6 +315,12 @@ DK_CXLFLASH_VLUN_CLONE
clone. This is to avoid a stale entry in the file descriptor table of the
child process.
+ This ioctl can return -EAGAIN if an AFU sync operation takes too long.
+ In addition to returning a failure to user, cxlflash will also schedule
+ an asynchronous AFU reset. Should the user choose to retry the operation,
+ it is expected to succeed. If this ioctl fails with -EAGAIN, the user
+ can either retry the operation or treat it as a failure.
+
DK_CXLFLASH_VERIFY
------------------
This ioctl is used to detect various changes such as the capacity of
@@ -355,3 +367,63 @@ DK_CXLFLASH_MANAGE_LUN
exclusive user space access (superpipe). In case a LUN is visible
across multiple ports and adapters, this ioctl is used to uniquely
identify each LUN by its World Wide Node Name (WWNN).
+
+
+CXL Flash Driver Host IOCTLs
+============================
+
+ Each host adapter instance that is supported by the cxlflash driver
+ has a special character device associated with it to enable a set of
+ host management function. These character devices are hosted in a
+ class dedicated for cxlflash and can be accessed via /dev/cxlflash/*.
+
+ Applications can be written to perform various functions using the
+ host ioctl APIs below.
+
+ The structure definitions for these IOCTLs are available in:
+ uapi/scsi/cxlflash_ioctl.h
+
+HT_CXLFLASH_LUN_PROVISION
+-------------------------
+ This ioctl is used to create and delete persistent LUNs on cxlflash
+ devices that lack an external LUN management interface. It is only
+ valid when used with AFUs that support the LUN provision capability.
+
+ When sufficient space is available, LUNs can be created by specifying
+ the target port to host the LUN and a desired size in 4K blocks. Upon
+ success, the LUN ID and WWID of the created LUN will be returned and
+ the SCSI bus can be scanned to detect the change in LUN topology. Note
+ that partial allocations are not supported. Should a creation fail due
+ to a space issue, the target port can be queried for its current LUN
+ geometry.
+
+ To remove a LUN, the device must first be disassociated from the Linux
+ SCSI subsystem. The LUN deletion can then be initiated by specifying a
+ target port and LUN ID. Upon success, the LUN geometry associated with
+ the port will be updated to reflect new number of provisioned LUNs and
+ available capacity.
+
+ To query the LUN geometry of a port, the target port is specified and
+ upon success, the following information is presented:
+
+ - Maximum number of provisioned LUNs allowed for the port
+ - Current number of provisioned LUNs for the port
+ - Maximum total capacity of provisioned LUNs for the port (4K blocks)
+ - Current total capacity of provisioned LUNs for the port (4K blocks)
+
+ With this information, the number of available LUNs and capacity can be
+ can be calculated.
+
+HT_CXLFLASH_AFU_DEBUG
+---------------------
+ This ioctl is used to debug AFUs by supporting a command pass-through
+ interface. It is only valid when used with AFUs that support the AFU
+ debug capability.
+
+ With exception of buffer management, AFU debug commands are opaque to
+ cxlflash and treated as pass-through. For debug commands that do require
+ data transfer, the user supplies an adequately sized data buffer and must
+ specify the data transfer direction with respect to the host. There is a
+ maximum transfer size of 256K imposed. Note that partial read completions
+ are not supported - when errors are experienced with a host read data
+ transfer, the data buffer is not copied back to the user.
diff --git a/Documentation/powerpc/firmware-assisted-dump.txt b/Documentation/powerpc/firmware-assisted-dump.txt
index 9cabaf8a207e..bdd344aa18d9 100644
--- a/Documentation/powerpc/firmware-assisted-dump.txt
+++ b/Documentation/powerpc/firmware-assisted-dump.txt
@@ -61,8 +61,8 @@ as follows:
boot successfully. For syntax of crashkernel= parameter,
refer to Documentation/kdump/kdump.txt. If any offset is
provided in crashkernel= parameter, it will be ignored
- as fadump reserves memory at end of RAM for boot memory
- dump preservation in case of a crash.
+ as fadump uses a predefined offset to reserve memory
+ for boot memory dump preservation in case of a crash.
-- After the low memory (boot memory) area has been saved, the
firmware will reset PCI and other hardware state. It will
diff --git a/Documentation/printk-formats.txt b/Documentation/printk-formats.txt
index 5962949944fd..619cdffa5d44 100644
--- a/Documentation/printk-formats.txt
+++ b/Documentation/printk-formats.txt
@@ -275,6 +275,42 @@ struct va_format:
Passed by reference.
+kobjects:
+ %pO
+
+ Base specifier for kobject based structs. Must be followed with
+ character for specific type of kobject as listed below:
+
+ Device tree nodes:
+
+ %pOF[fnpPcCF]
+
+ For printing device tree nodes. The optional arguments are:
+ f device node full_name
+ n device node name
+ p device node phandle
+ P device node path spec (name + @unit)
+ F device node flags
+ c major compatible string
+ C full compatible string
+ Without any arguments prints full_name (same as %pOFf)
+ The separator when using multiple arguments is ':'
+
+ Examples:
+
+ %pOF /foo/bar@0 - Node full name
+ %pOFf /foo/bar@0 - Same as above
+ %pOFfp /foo/bar@0:10 - Node full name + phandle
+ %pOFfcF /foo/bar@0:foo,device:--P- - Node full name +
+ major compatible string +
+ node flags
+ D - dynamic
+ d - detached
+ P - Populated
+ B - Populated bus
+
+ Passed by reference.
+
struct clk:
%pC pll1
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index 3aed751e0cb5..adbb50ae5246 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -31,7 +31,7 @@ you probably needn't concern yourself with isdn4k-utils.
====================== =============== ========================================
GNU C 3.2 gcc --version
GNU make 3.81 make --version
-binutils 2.12 ld -v
+binutils 2.20 ld -v
util-linux 2.10o fdformat --version
module-init-tools 0.9.10 depmod -V
e2fsprogs 1.41.4 e2fsck -V
@@ -75,10 +75,9 @@ You will need GNU make 3.81 or later to build the kernel.
Binutils
--------
-Linux on IA-32 has recently switched from using ``as86`` to using ``gas`` for
-assembling the 16-bit boot code, removing the need for ``as86`` to compile
-your kernel. This change does, however, mean that you need a recent
-release of binutils.
+The build system has, as of 4.13, switched to using thin archives (`ar T`)
+rather than incremental linking (`ld -r`) for built-in.o intermediate steps.
+This requires binutils 2.20 or newer.
Perl
----
diff --git a/Documentation/sound/designs/index.rst b/Documentation/sound/designs/index.rst
index 04dcdae3e4f2..f0749943ccb2 100644
--- a/Documentation/sound/designs/index.rst
+++ b/Documentation/sound/designs/index.rst
@@ -9,6 +9,7 @@ Designs and Implementations
compress-offload
timestamping
jack-controls
+ tracepoints
procfile
powersave
oss-emulation
diff --git a/Documentation/sound/designs/tracepoints.rst b/Documentation/sound/designs/tracepoints.rst
new file mode 100644
index 000000000000..78bc5572f829
--- /dev/null
+++ b/Documentation/sound/designs/tracepoints.rst
@@ -0,0 +1,172 @@
+===================
+Tracepoints in ALSA
+===================
+
+2017/07/02
+Takasahi Sakamoto
+
+Tracepoints in ALSA PCM core
+============================
+
+ALSA PCM core registers ``snd_pcm`` subsystem to kernel tracepoint system.
+This subsystem includes two categories of tracepoints; for state of PCM buffer
+and for processing of PCM hardware parameters. These tracepoints are available
+when corresponding kernel configurations are enabled. When ``CONFIG_SND_DEBUG``
+is enabled, the latter tracepoints are available. When additional
+``SND_PCM_XRUN_DEBUG`` is enabled too, the former trace points are enabled.
+
+Tracepoints for state of PCM buffer
+------------------------------------
+
+This category includes four tracepoints; ``hwptr``, ``applptr``, ``xrun`` and
+``hw_ptr_error``.
+
+Tracepoints for processing of PCM hardware parameters
+-----------------------------------------------------
+
+This category includes two tracepoints; ``hw_mask_param`` and
+``hw_interval_param``.
+
+In a design of ALSA PCM core, data transmission is abstracted as PCM substream.
+Applications manage PCM substream to maintain data transmission for PCM frames.
+Before starting the data transmission, applications need to configure PCM
+substream. In this procedure, PCM hardware parameters are decided by
+interaction between applications and ALSA PCM core. Once decided, runtime of
+the PCM substream keeps the parameters.
+
+The parameters are described in :c:type:`struct snd_pcm_hw_params`. This
+structure includes several types of parameters. Applications set preferable
+value to these parameters, then execute ioctl(2) with SNDRV_PCM_IOCTL_HW_REFINE
+or SNDRV_PCM_IOCTL_HW_PARAMS. The former is used just for refining available
+set of parameters. The latter is used for an actual decision of the parameters.
+
+The :c:type:`struct snd_pcm_hw_params` structure has below members:
+
+``flags``
+ Configurable. ALSA PCM core and some drivers handle this flag to select
+ convenient parameters or change their behaviour.
+``masks``
+ Configurable. This type of parameter is described in
+ :c:type:`struct snd_mask` and represent mask values. As of PCM protocol
+ v2.0.13, three types are defined.
+
+ - SNDRV_PCM_HW_PARAM_ACCESS
+ - SNDRV_PCM_HW_PARAM_FORMAT
+ - SNDRV_PCM_HW_PARAM_SUBFORMAT
+``intervals``
+ Configurable. This type of parameter is described in
+ :c:type:`struct snd_interval` and represent values with a range. As of
+ PCM protocol v2.0.13, twelve types are defined.
+
+ - SNDRV_PCM_HW_PARAM_SAMPLE_BITS
+ - SNDRV_PCM_HW_PARAM_FRAME_BITS
+ - SNDRV_PCM_HW_PARAM_CHANNELS
+ - SNDRV_PCM_HW_PARAM_RATE
+ - SNDRV_PCM_HW_PARAM_PERIOD_TIME
+ - SNDRV_PCM_HW_PARAM_PERIOD_SIZE
+ - SNDRV_PCM_HW_PARAM_PERIOD_BYTES
+ - SNDRV_PCM_HW_PARAM_PERIODS
+ - SNDRV_PCM_HW_PARAM_BUFFER_TIME
+ - SNDRV_PCM_HW_PARAM_BUFFER_SIZE
+ - SNDRV_PCM_HW_PARAM_BUFFER_BYTES
+ - SNDRV_PCM_HW_PARAM_TICK_TIME
+``rmask``
+ Configurable. This is evaluated at ioctl(2) with
+ SNDRV_PCM_IOCTL_HW_REFINE only. Applications can select which
+ mask/interval parameter can be changed by ALSA PCM core. For
+ SNDRV_PCM_IOCTL_HW_PARAMS, this mask is ignored and all of parameters
+ are going to be changed.
+``cmask``
+ Read-only. After returning from ioctl(2), buffer in user space for
+ :c:type:`struct snd_pcm_hw_params` includes result of each operation.
+ This mask represents which mask/interval parameter is actually changed.
+``info``
+ Read-only. This represents hardware/driver capabilities as bit flags
+ with SNDRV_PCM_INFO_XXX. Typically, applications execute ioctl(2) with
+ SNDRV_PCM_IOCTL_HW_REFINE to retrieve this flag, then decide candidates
+ of parameters and execute ioctl(2) with SNDRV_PCM_IOCTL_HW_PARAMS to
+ configure PCM substream.
+``msbits``
+ Read-only. This value represents available bit width in MSB side of
+ a PCM sample. When a parameter of SNDRV_PCM_HW_PARAM_SAMPLE_BITS was
+ decided as a fixed number, this value is also calculated according to
+ it. Else, zero. But this behaviour depends on implementations in driver
+ side.
+``rate_num``
+ Read-only. This value represents numerator of sampling rate in fraction
+ notation. Basically, when a parameter of SNDRV_PCM_HW_PARAM_RATE was
+ decided as a single value, this value is also calculated according to
+ it. Else, zero. But this behaviour depends on implementations in driver
+ side.
+``rate_den``
+ Read-only. This value represents denominator of sampling rate in
+ fraction notation. Basically, when a parameter of
+ SNDRV_PCM_HW_PARAM_RATE was decided as a single value, this value is
+ also calculated according to it. Else, zero. But this behaviour depends
+ on implementations in driver side.
+``fifo_size``
+ Read-only. This value represents the size of FIFO in serial sound
+ interface of hardware. Basically, each driver can assigns a proper
+ value to this parameter but some drivers intentionally set zero with
+ a care of hardware design or data transmission protocol.
+
+ALSA PCM core handles buffer of :c:type:`struct snd_pcm_hw_params` when
+applications execute ioctl(2) with SNDRV_PCM_HW_REFINE or SNDRV_PCM_HW_PARAMS.
+Parameters in the buffer are changed according to
+:c:type:`struct snd_pcm_hardware` and rules of constraints in the runtime. The
+structure describes capabilities of handled hardware. The rules describes
+dependencies on which a parameter is decided according to several parameters.
+A rule has a callback function, and drivers can register arbitrary functions
+to compute the target parameter. ALSA PCM core registers some rules to the
+runtime as a default.
+
+Each driver can join in the interaction as long as it prepared for two stuffs
+in a callback of :c:type:`struct snd_pcm_ops.open`.
+
+1. In the callback, drivers are expected to change a member of
+ :c:type:`struct snd_pcm_hardware` type in the runtime, according to
+ capacities of corresponding hardware.
+2. In the same callback, drivers are also expected to register additional rules
+ of constraints into the runtime when several parameters have dependencies
+ due to hardware design.
+
+The driver can refers to result of the interaction in a callback of
+:c:type:`struct snd_pcm_ops.hw_params`, however it should not change the
+content.
+
+Tracepoints in this category are designed to trace changes of the
+mask/interval parameters. When ALSA PCM core changes them, ``hw_mask_param`` or
+``hw_interval_param`` event is probed according to type of the changed parameter.
+
+ALSA PCM core also has a pretty print format for each of the tracepoints. Below
+is an example for ``hw_mask_param``.
+
+::
+
+ hw_mask_param: pcmC0D0p 001/023 FORMAT 00000000000000000000001000000044 00000000000000000000001000000044
+
+
+Below is an example for ``hw_interval_param``.
+
+::
+
+ hw_interval_param: pcmC0D0p 000/023 BUFFER_SIZE 0 0 [0 4294967295] 0 1 [0 4294967295]
+
+The first three fields are common. They represent name of ALSA PCM character
+device, rules of constraint and name of the changed parameter, in order. The
+field for rules of constraint consists of two sub-fields; index of applied rule
+and total number of rules added to the runtime. As an exception, the index 000
+means that the parameter is changed by ALSA PCM core, regardless of the rules.
+
+The rest of field represent state of the parameter before/after changing. These
+fields are different according to type of the parameter. For parameters of mask
+type, the fields represent hexadecimal dump of content of the parameter. For
+parameters of interval type, the fields represent values of each member of
+``empty``, ``integer``, ``openmin``, ``min``, ``max``, ``openmax`` in
+:c:type:`struct snd_interval` in this order.
+
+Tracepoints in drivers
+======================
+
+Some drivers have tracepoints for developers' convenience. For them, please
+refer to each documentation or implementation.
diff --git a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
index 95c5443eff38..58ffa3f5bda7 100644
--- a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
+++ b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
@@ -2080,8 +2080,8 @@ sleeping poll threads, etc.
This callback is also atomic as default.
-copy and silence callbacks
-~~~~~~~~~~~~~~~~~~~~~~~~~~
+copy_user, copy_kernel and fill_silence ops
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These callbacks are not mandatory, and can be omitted in most cases.
These callbacks are used when the hardware buffer cannot be in the
@@ -3532,8 +3532,9 @@ external hardware buffer in interrupts (or in tasklets, preferably).
The first case works fine if the external hardware buffer is large
enough. This method doesn't need any extra buffers and thus is more
-effective. You need to define the ``copy`` and ``silence`` callbacks
-for the data transfer. However, there is a drawback: it cannot be
+effective. You need to define the ``copy_user`` and ``copy_kernel``
+callbacks for the data transfer, in addition to ``fill_silence``
+callback for playback. However, there is a drawback: it cannot be
mmapped. The examples are GUS's GF1 PCM or emu8000's wavetable PCM.
The second case allows for mmap on the buffer, although you have to
@@ -3545,30 +3546,34 @@ Another case is when the chip uses a PCI memory-map region for the
buffer instead of the host memory. In this case, mmap is available only
on certain architectures like the Intel one. In non-mmap mode, the data
cannot be transferred as in the normal way. Thus you need to define the
-``copy`` and ``silence`` callbacks as well, as in the cases above. The
-examples are found in ``rme32.c`` and ``rme96.c``.
+``copy_user``, ``copy_kernel`` and ``fill_silence`` callbacks as well,
+as in the cases above. The examples are found in ``rme32.c`` and
+``rme96.c``.
-The implementation of the ``copy`` and ``silence`` callbacks depends
-upon whether the hardware supports interleaved or non-interleaved
-samples. The ``copy`` callback is defined like below, a bit
-differently depending whether the direction is playback or capture:
+The implementation of the ``copy_user``, ``copy_kernel`` and
+``silence`` callbacks depends upon whether the hardware supports
+interleaved or non-interleaved samples. The ``copy_user`` callback is
+defined like below, a bit differently depending whether the direction
+is playback or capture:
::
- static int playback_copy(struct snd_pcm_substream *substream, int channel,
- snd_pcm_uframes_t pos, void *src, snd_pcm_uframes_t count);
- static int capture_copy(struct snd_pcm_substream *substream, int channel,
- snd_pcm_uframes_t pos, void *dst, snd_pcm_uframes_t count);
+ static int playback_copy_user(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void __user *src, unsigned long count);
+ static int capture_copy_user(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void __user *dst, unsigned long count);
In the case of interleaved samples, the second argument (``channel``) is
not used. The third argument (``pos``) points the current position
-offset in frames.
+offset in bytes.
The meaning of the fourth argument is different between playback and
capture. For playback, it holds the source data pointer, and for
capture, it's the destination data pointer.
-The last argument is the number of frames to be copied.
+The last argument is the number of bytes to be copied.
What you have to do in this callback is again different between playback
and capture directions. In the playback case, you copy the given amount
@@ -3578,8 +3583,7 @@ way, the copy would be like:
::
- my_memcpy(my_buffer + frames_to_bytes(runtime, pos), src,
- frames_to_bytes(runtime, count));
+ my_memcpy_from_user(my_buffer + pos, src, count);
For the capture direction, you copy the given amount of data (``count``)
at the specified offset (``pos``) on the hardware buffer to the
@@ -3587,31 +3591,68 @@ specified pointer (``dst``).
::
- my_memcpy(dst, my_buffer + frames_to_bytes(runtime, pos),
- frames_to_bytes(runtime, count));
+ my_memcpy_to_user(dst, my_buffer + pos, count);
+
+Here the functions are named as ``from_user`` and ``to_user`` because
+it's the user-space buffer that is passed to these callbacks. That
+is, the callback is supposed to copy from/to the user-space data
+directly to/from the hardware buffer.
-Note that both the position and the amount of data are given in frames.
+Careful readers might notice that these callbacks receive the
+arguments in bytes, not in frames like other callbacks. It's because
+it would make coding easier like the examples above, and also it makes
+easier to unify both the interleaved and non-interleaved cases, as
+explained in the following.
In the case of non-interleaved samples, the implementation will be a bit
-more complicated.
+more complicated. The callback is called for each channel, passed by
+the second argument, so totally it's called for N-channels times per
+transfer.
+
+The meaning of other arguments are almost same as the interleaved
+case. The callback is supposed to copy the data from/to the given
+user-space buffer, but only for the given channel. For the detailed
+implementations, please check ``isa/gus/gus_pcm.c`` or
+"pci/rme9652/rme9652.c" as examples.
+
+The above callbacks are the copy from/to the user-space buffer. There
+are some cases where we want copy from/to the kernel-space buffer
+instead. In such a case, ``copy_kernel`` callback is called. It'd
+look like:
+
+::
+
+ static int playback_copy_kernel(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void *src, unsigned long count);
+ static int capture_copy_kernel(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void *dst, unsigned long count);
+
+As found easily, the only difference is that the buffer pointer is
+without ``__user`` prefix; that is, a kernel-buffer pointer is passed
+in the fourth argument. Correspondingly, the implementation would be
+a version without the user-copy, such as:
-You need to check the channel argument, and if it's -1, copy the whole
-channels. Otherwise, you have to copy only the specified channel. Please
-check ``isa/gus/gus_pcm.c`` as an example.
+::
+
+ my_memcpy(my_buffer + pos, src, count);
-The ``silence`` callback is also implemented in a similar way
+Usually for the playback, another callback ``fill_silence`` is
+defined. It's implemented in a similar way as the copy callbacks
+above:
::
static int silence(struct snd_pcm_substream *substream, int channel,
- snd_pcm_uframes_t pos, snd_pcm_uframes_t count);
+ unsigned long pos, unsigned long count);
-The meanings of arguments are the same as in the ``copy`` callback,
-although there is no ``src/dst`` argument. In the case of interleaved
-samples, the channel argument has no meaning, as well as on ``copy``
-callback.
+The meanings of arguments are the same as in the ``copy_user`` and
+``copy_kernel`` callbacks, although there is no buffer pointer
+argument. In the case of interleaved samples, the channel argument has
+no meaning, as well as on ``copy_*`` callbacks.
-The role of ``silence`` callback is to set the given amount
+The role of ``fill_silence`` callback is to set the given amount
(``count``) of silence data at the specified offset (``pos``) on the
hardware buffer. Suppose that the data format is signed (that is, the
silent-data is 0), and the implementation using a memset-like function
@@ -3619,11 +3660,11 @@ would be like:
::
- my_memcpy(my_buffer + frames_to_bytes(runtime, pos), 0,
- frames_to_bytes(runtime, count));
+ my_memset(my_buffer + pos, 0, count);
In the case of non-interleaved samples, again, the implementation
-becomes a bit more complicated. See, for example, ``isa/gus/gus_pcm.c``.
+becomes a bit more complicated, as it's called N-times per transfer
+for each channel. See, for example, ``isa/gus/gus_pcm.c``.
Non-Contiguous Buffers
----------------------
diff --git a/Documentation/sound/soc/dapm.rst b/Documentation/sound/soc/dapm.rst
index a27f42befa4d..8e44107933ab 100644
--- a/Documentation/sound/soc/dapm.rst
+++ b/Documentation/sound/soc/dapm.rst
@@ -105,6 +105,24 @@ Pre
Special PRE widget (exec before all others)
Post
Special POST widget (exec after all others)
+Buffer
+ Inter widget audio data buffer within a DSP.
+Scheduler
+ DSP internal scheduler that schedules component/pipeline processing
+ work.
+Effect
+ Widget that performs an audio processing effect.
+SRC
+ Sample Rate Converter within DSP or CODEC
+ASRC
+ Asynchronous Sample Rate Converter within DSP or CODEC
+Encoder
+ Widget that encodes audio data from one format (usually PCM) to another
+ usually more compressed format.
+Decoder
+ Widget that decodes audio data from a compressed format to an
+ uncompressed format like PCM.
+
(Widgets are defined in include/sound/soc-dapm.h)
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 4029943887a3..3a9831b72945 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -3255,6 +3255,141 @@ Otherwise, if the MCE is a corrected error, KVM will just
store it in the corresponding bank (provided this bank is
not holding a previously reported uncorrected error).
+4.107 KVM_S390_GET_CMMA_BITS
+
+Capability: KVM_CAP_S390_CMMA_MIGRATION
+Architectures: s390
+Type: vm ioctl
+Parameters: struct kvm_s390_cmma_log (in, out)
+Returns: 0 on success, a negative value on error
+
+This ioctl is used to get the values of the CMMA bits on the s390
+architecture. It is meant to be used in two scenarios:
+- During live migration to save the CMMA values. Live migration needs
+ to be enabled via the KVM_REQ_START_MIGRATION VM property.
+- To non-destructively peek at the CMMA values, with the flag
+ KVM_S390_CMMA_PEEK set.
+
+The ioctl takes parameters via the kvm_s390_cmma_log struct. The desired
+values are written to a buffer whose location is indicated via the "values"
+member in the kvm_s390_cmma_log struct. The values in the input struct are
+also updated as needed.
+Each CMMA value takes up one byte.
+
+struct kvm_s390_cmma_log {
+ __u64 start_gfn;
+ __u32 count;
+ __u32 flags;
+ union {
+ __u64 remaining;
+ __u64 mask;
+ };
+ __u64 values;
+};
+
+start_gfn is the number of the first guest frame whose CMMA values are
+to be retrieved,
+
+count is the length of the buffer in bytes,
+
+values points to the buffer where the result will be written to.
+
+If count is greater than KVM_S390_SKEYS_MAX, then it is considered to be
+KVM_S390_SKEYS_MAX. KVM_S390_SKEYS_MAX is re-used for consistency with
+other ioctls.
+
+The result is written in the buffer pointed to by the field values, and
+the values of the input parameter are updated as follows.
+
+Depending on the flags, different actions are performed. The only
+supported flag so far is KVM_S390_CMMA_PEEK.
+
+The default behaviour if KVM_S390_CMMA_PEEK is not set is:
+start_gfn will indicate the first page frame whose CMMA bits were dirty.
+It is not necessarily the same as the one passed as input, as clean pages
+are skipped.
+
+count will indicate the number of bytes actually written in the buffer.
+It can (and very often will) be smaller than the input value, since the
+buffer is only filled until 16 bytes of clean values are found (which
+are then not copied in the buffer). Since a CMMA migration block needs
+the base address and the length, for a total of 16 bytes, we will send
+back some clean data if there is some dirty data afterwards, as long as
+the size of the clean data does not exceed the size of the header. This
+allows to minimize the amount of data to be saved or transferred over
+the network at the expense of more roundtrips to userspace. The next
+invocation of the ioctl will skip over all the clean values, saving
+potentially more than just the 16 bytes we found.
+
+If KVM_S390_CMMA_PEEK is set:
+the existing storage attributes are read even when not in migration
+mode, and no other action is performed;
+
+the output start_gfn will be equal to the input start_gfn,
+
+the output count will be equal to the input count, except if the end of
+memory has been reached.
+
+In both cases:
+the field "remaining" will indicate the total number of dirty CMMA values
+still remaining, or 0 if KVM_S390_CMMA_PEEK is set and migration mode is
+not enabled.
+
+mask is unused.
+
+values points to the userspace buffer where the result will be stored.
+
+This ioctl can fail with -ENOMEM if not enough memory can be allocated to
+complete the task, with -ENXIO if CMMA is not enabled, with -EINVAL if
+KVM_S390_CMMA_PEEK is not set but migration mode was not enabled, with
+-EFAULT if the userspace address is invalid or if no page table is
+present for the addresses (e.g. when using hugepages).
+
+4.108 KVM_S390_SET_CMMA_BITS
+
+Capability: KVM_CAP_S390_CMMA_MIGRATION
+Architectures: s390
+Type: vm ioctl
+Parameters: struct kvm_s390_cmma_log (in)
+Returns: 0 on success, a negative value on error
+
+This ioctl is used to set the values of the CMMA bits on the s390
+architecture. It is meant to be used during live migration to restore
+the CMMA values, but there are no restrictions on its use.
+The ioctl takes parameters via the kvm_s390_cmma_values struct.
+Each CMMA value takes up one byte.
+
+struct kvm_s390_cmma_log {
+ __u64 start_gfn;
+ __u32 count;
+ __u32 flags;
+ union {
+ __u64 remaining;
+ __u64 mask;
+ };
+ __u64 values;
+};
+
+start_gfn indicates the starting guest frame number,
+
+count indicates how many values are to be considered in the buffer,
+
+flags is not used and must be 0.
+
+mask indicates which PGSTE bits are to be considered.
+
+remaining is not used.
+
+values points to the buffer in userspace where to store the values.
+
+This ioctl can fail with -ENOMEM if not enough memory can be allocated to
+complete the task, with -ENXIO if CMMA is not enabled, with -EINVAL if
+the count field is too large (e.g. more than KVM_S390_CMMA_SIZE_MAX) or
+if the flags field was not 0, with -EFAULT if the userspace address is
+invalid, if invalid pages are written to (e.g. after the end of memory)
+or if no page table is present for the addresses (e.g. when using
+hugepages).
+
5. The kvm_run structure
------------------------
@@ -3996,6 +4131,34 @@ Parameters: none
Allow use of adapter-interruption suppression.
Returns: 0 on success; -EBUSY if a VCPU has already been created.
+7.11 KVM_CAP_PPC_SMT
+
+Architectures: ppc
+Parameters: vsmt_mode, flags
+
+Enabling this capability on a VM provides userspace with a way to set
+the desired virtual SMT mode (i.e. the number of virtual CPUs per
+virtual core). The virtual SMT mode, vsmt_mode, must be a power of 2
+between 1 and 8. On POWER8, vsmt_mode must also be no greater than
+the number of threads per subcore for the host. Currently flags must
+be 0. A successful call to enable this capability will result in
+vsmt_mode being returned when the KVM_CAP_PPC_SMT capability is
+subsequently queried for the VM. This capability is only supported by
+HV KVM, and can only be set before any VCPUs have been created.
+The KVM_CAP_PPC_SMT_POSSIBLE capability indicates which virtual SMT
+modes are available.
+
+7.12 KVM_CAP_PPC_FWNMI
+
+Architectures: ppc
+Parameters: none
+
+With this capability a machine check exception in the guest address
+space will cause KVM to exit the guest with NMI exit reason. This
+enables QEMU to build error log and branch to guest kernel registered
+machine check handling routine. Without this capability KVM will
+branch to guests' 0x200 interrupt vector.
+
8. Other capabilities.
----------------------
@@ -4157,3 +4320,12 @@ Currently the following bits are defined for the device_irq_level bitmap:
Future versions of kvm may implement additional events. These will get
indicated by returning a higher number from KVM_CHECK_EXTENSION and will be
listed above.
+
+8.10 KVM_CAP_PPC_SMT_POSSIBLE
+
+Architectures: ppc
+
+Querying this capability returns a bitmap indicating the possible
+virtual SMT modes that can be set using KVM_CAP_PPC_SMT. If bit N
+(counting from the right) is set, then a virtual SMT mode of 2^N is
+available.
diff --git a/Documentation/virtual/kvm/devices/s390_flic.txt b/Documentation/virtual/kvm/devices/s390_flic.txt
index c2518cea8ab4..2f1cbf1301d2 100644
--- a/Documentation/virtual/kvm/devices/s390_flic.txt
+++ b/Documentation/virtual/kvm/devices/s390_flic.txt
@@ -16,6 +16,7 @@ FLIC provides support to
- register and modify adapter interrupt sources (KVM_DEV_FLIC_ADAPTER_*)
- modify AIS (adapter-interruption-suppression) mode state (KVM_DEV_FLIC_AISM)
- inject adapter interrupts on a specified adapter (KVM_DEV_FLIC_AIRQ_INJECT)
+- get/set all AIS mode states (KVM_DEV_FLIC_AISM_ALL)
Groups:
KVM_DEV_FLIC_ENQUEUE
@@ -136,6 +137,20 @@ struct kvm_s390_ais_req {
an isc according to the adapter-interruption-suppression mode on condition
that the AIS capability is enabled.
+ KVM_DEV_FLIC_AISM_ALL
+ Gets or sets the adapter-interruption-suppression mode for all ISCs. Takes
+ a kvm_s390_ais_all describing:
+
+struct kvm_s390_ais_all {
+ __u8 simm; /* Single-Interruption-Mode mask */
+ __u8 nimm; /* No-Interruption-Mode mask *
+};
+
+ simm contains Single-Interruption-Mode mask for all ISCs, nimm contains
+ No-Interruption-Mode mask for all ISCs. Each bit in simm and nimm corresponds
+ to an ISC (MSB0 bit 0 to ISC 0 and so on). The combination of simm bit and
+ nimm bit presents AIS mode for a ISC.
+
Note: The KVM_SET_DEVICE_ATTR/KVM_GET_DEVICE_ATTR device ioctls executed on
FLIC with an unknown group or attribute gives the error code EINVAL (instead of
ENXIO, as specified in the API documentation). It is not possible to conclude
diff --git a/Documentation/virtual/kvm/devices/vcpu.txt b/Documentation/virtual/kvm/devices/vcpu.txt
index 02f50686c418..2b5dab16c4f2 100644
--- a/Documentation/virtual/kvm/devices/vcpu.txt
+++ b/Documentation/virtual/kvm/devices/vcpu.txt
@@ -16,7 +16,9 @@ Parameters: in kvm_device_attr.addr the address for PMU overflow interrupt is a
Returns: -EBUSY: The PMU overflow interrupt is already set
-ENXIO: The overflow interrupt not set when attempting to get it
-ENODEV: PMUv3 not supported
- -EINVAL: Invalid PMU overflow interrupt number supplied
+ -EINVAL: Invalid PMU overflow interrupt number supplied or
+ trying to set the IRQ number without using an in-kernel
+ irqchip.
A value describing the PMUv3 (Performance Monitor Unit v3) overflow interrupt
number for this vcpu. This interrupt could be a PPI or SPI, but the interrupt
@@ -25,11 +27,36 @@ all vcpus, while as an SPI it must be a separate number per vcpu.
1.2 ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_INIT
Parameters: no additional parameter in kvm_device_attr.addr
-Returns: -ENODEV: PMUv3 not supported
- -ENXIO: PMUv3 not properly configured as required prior to calling this
- attribute
+Returns: -ENODEV: PMUv3 not supported or GIC not initialized
+ -ENXIO: PMUv3 not properly configured or in-kernel irqchip not
+ configured as required prior to calling this attribute
-EBUSY: PMUv3 already initialized
-Request the initialization of the PMUv3. This must be done after creating the
-in-kernel irqchip. Creating a PMU with a userspace irqchip is currently not
-supported.
+Request the initialization of the PMUv3. If using the PMUv3 with an in-kernel
+virtual GIC implementation, this must be done after initializing the in-kernel
+irqchip.
+
+
+2. GROUP: KVM_ARM_VCPU_TIMER_CTRL
+Architectures: ARM,ARM64
+
+2.1. ATTRIBUTE: KVM_ARM_VCPU_TIMER_IRQ_VTIMER
+2.2. ATTRIBUTE: KVM_ARM_VCPU_TIMER_IRQ_PTIMER
+Parameters: in kvm_device_attr.addr the address for the timer interrupt is a
+ pointer to an int
+Returns: -EINVAL: Invalid timer interrupt number
+ -EBUSY: One or more VCPUs has already run
+
+A value describing the architected timer interrupt number when connected to an
+in-kernel virtual GIC. These must be a PPI (16 <= intid < 32). Setting the
+attribute overrides the default values (see below).
+
+KVM_ARM_VCPU_TIMER_IRQ_VTIMER: The EL1 virtual timer intid (default: 27)
+KVM_ARM_VCPU_TIMER_IRQ_PTIMER: The EL1 physical timer intid (default: 30)
+
+Setting the same PPI for different timers will prevent the VCPUs from running.
+Setting the interrupt number on a VCPU configures all VCPUs created at that
+time to use the number provided for a given timer, overwriting any previously
+configured values on other VCPUs. Userspace should configure the interrupt
+numbers on at least one VCPU after creating all VCPUs and before running any
+VCPUs.
diff --git a/Documentation/virtual/kvm/devices/vm.txt b/Documentation/virtual/kvm/devices/vm.txt
index 575ccb022aac..903fc926860b 100644
--- a/Documentation/virtual/kvm/devices/vm.txt
+++ b/Documentation/virtual/kvm/devices/vm.txt
@@ -222,3 +222,36 @@ Allows user space to disable dea key wrapping, clearing the wrapping key.
Parameters: none
Returns: 0
+
+5. GROUP: KVM_S390_VM_MIGRATION
+Architectures: s390
+
+5.1. ATTRIBUTE: KVM_S390_VM_MIGRATION_STOP (w/o)
+
+Allows userspace to stop migration mode, needed for PGSTE migration.
+Setting this attribute when migration mode is not active will have no
+effects.
+
+Parameters: none
+Returns: 0
+
+5.2. ATTRIBUTE: KVM_S390_VM_MIGRATION_START (w/o)
+
+Allows userspace to start migration mode, needed for PGSTE migration.
+Setting this attribute when migration mode is already active will have
+no effects.
+
+Parameters: none
+Returns: -ENOMEM if there is not enough free memory to start migration mode
+ -EINVAL if the state of the VM is invalid (e.g. no memory defined)
+ 0 in case of success.
+
+5.3. ATTRIBUTE: KVM_S390_VM_MIGRATION_STATUS (r/o)
+
+Allows userspace to query the status of migration mode.
+
+Parameters: address of a buffer in user space to store the data (u64) to;
+ the data itself is either 0 if migration mode is disabled or 1
+ if it is enabled
+Returns: -EFAULT if the given address is not accessible from kernel space
+ 0 in case of success.
diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt
index 481b6a9c25d5..f50d45b1e967 100644
--- a/Documentation/virtual/kvm/mmu.txt
+++ b/Documentation/virtual/kvm/mmu.txt
@@ -179,6 +179,10 @@ Shadow pages contain the following information:
shadow page; it is also used to go back from a struct kvm_mmu_page
to a memslot, through the kvm_memslots_for_spte_role macro and
__gfn_to_memslot.
+ role.ad_disabled:
+ Is 1 if the MMU instance cannot use A/D bits. EPT did not have A/D
+ bits before Haswell; shadow EPT page tables also cannot use A/D bits
+ if the L1 hypervisor does not enable them.
gfn:
Either the guest page table containing the translations shadowed by this
page, or the base page frame for linear translations. See role.direct.
diff --git a/Documentation/virtual/kvm/vcpu-requests.rst b/Documentation/virtual/kvm/vcpu-requests.rst
new file mode 100644
index 000000000000..5feb3706a7ae
--- /dev/null
+++ b/Documentation/virtual/kvm/vcpu-requests.rst
@@ -0,0 +1,307 @@
+=================
+KVM VCPU Requests
+=================
+
+Overview
+========
+
+KVM supports an internal API enabling threads to request a VCPU thread to
+perform some activity. For example, a thread may request a VCPU to flush
+its TLB with a VCPU request. The API consists of the following functions::
+
+ /* Check if any requests are pending for VCPU @vcpu. */
+ bool kvm_request_pending(struct kvm_vcpu *vcpu);
+
+ /* Check if VCPU @vcpu has request @req pending. */
+ bool kvm_test_request(int req, struct kvm_vcpu *vcpu);
+
+ /* Clear request @req for VCPU @vcpu. */
+ void kvm_clear_request(int req, struct kvm_vcpu *vcpu);
+
+ /*
+ * Check if VCPU @vcpu has request @req pending. When the request is
+ * pending it will be cleared and a memory barrier, which pairs with
+ * another in kvm_make_request(), will be issued.
+ */
+ bool kvm_check_request(int req, struct kvm_vcpu *vcpu);
+
+ /*
+ * Make request @req of VCPU @vcpu. Issues a memory barrier, which pairs
+ * with another in kvm_check_request(), prior to setting the request.
+ */
+ void kvm_make_request(int req, struct kvm_vcpu *vcpu);
+
+ /* Make request @req of all VCPUs of the VM with struct kvm @kvm. */
+ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
+
+Typically a requester wants the VCPU to perform the activity as soon
+as possible after making the request. This means most requests
+(kvm_make_request() calls) are followed by a call to kvm_vcpu_kick(),
+and kvm_make_all_cpus_request() has the kicking of all VCPUs built
+into it.
+
+VCPU Kicks
+----------
+
+The goal of a VCPU kick is to bring a VCPU thread out of guest mode in
+order to perform some KVM maintenance. To do so, an IPI is sent, forcing
+a guest mode exit. However, a VCPU thread may not be in guest mode at the
+time of the kick. Therefore, depending on the mode and state of the VCPU
+thread, there are two other actions a kick may take. All three actions
+are listed below:
+
+1) Send an IPI. This forces a guest mode exit.
+2) Waking a sleeping VCPU. Sleeping VCPUs are VCPU threads outside guest
+ mode that wait on waitqueues. Waking them removes the threads from
+ the waitqueues, allowing the threads to run again. This behavior
+ may be suppressed, see KVM_REQUEST_NO_WAKEUP below.
+3) Nothing. When the VCPU is not in guest mode and the VCPU thread is not
+ sleeping, then there is nothing to do.
+
+VCPU Mode
+---------
+
+VCPUs have a mode state, ``vcpu->mode``, that is used to track whether the
+guest is running in guest mode or not, as well as some specific
+outside guest mode states. The architecture may use ``vcpu->mode`` to
+ensure VCPU requests are seen by VCPUs (see "Ensuring Requests Are Seen"),
+as well as to avoid sending unnecessary IPIs (see "IPI Reduction"), and
+even to ensure IPI acknowledgements are waited upon (see "Waiting for
+Acknowledgements"). The following modes are defined:
+
+OUTSIDE_GUEST_MODE
+
+ The VCPU thread is outside guest mode.
+
+IN_GUEST_MODE
+
+ The VCPU thread is in guest mode.
+
+EXITING_GUEST_MODE
+
+ The VCPU thread is transitioning from IN_GUEST_MODE to
+ OUTSIDE_GUEST_MODE.
+
+READING_SHADOW_PAGE_TABLES
+
+ The VCPU thread is outside guest mode, but it wants the sender of
+ certain VCPU requests, namely KVM_REQ_TLB_FLUSH, to wait until the VCPU
+ thread is done reading the page tables.
+
+VCPU Request Internals
+======================
+
+VCPU requests are simply bit indices of the ``vcpu->requests`` bitmap.
+This means general bitops, like those documented in [atomic-ops]_ could
+also be used, e.g. ::
+
+ clear_bit(KVM_REQ_UNHALT & KVM_REQUEST_MASK, &vcpu->requests);
+
+However, VCPU request users should refrain from doing so, as it would
+break the abstraction. The first 8 bits are reserved for architecture
+independent requests, all additional bits are available for architecture
+dependent requests.
+
+Architecture Independent Requests
+---------------------------------
+
+KVM_REQ_TLB_FLUSH
+
+ KVM's common MMU notifier may need to flush all of a guest's TLB
+ entries, calling kvm_flush_remote_tlbs() to do so. Architectures that
+ choose to use the common kvm_flush_remote_tlbs() implementation will
+ need to handle this VCPU request.
+
+KVM_REQ_MMU_RELOAD
+
+ When shadow page tables are used and memory slots are removed it's
+ necessary to inform each VCPU to completely refresh the tables. This
+ request is used for that.
+
+KVM_REQ_PENDING_TIMER
+
+ This request may be made from a timer handler run on the host on behalf
+ of a VCPU. It informs the VCPU thread to inject a timer interrupt.
+
+KVM_REQ_UNHALT
+
+ This request may be made from the KVM common function kvm_vcpu_block(),
+ which is used to emulate an instruction that causes a CPU to halt until
+ one of an architectural specific set of events and/or interrupts is
+ received (determined by checking kvm_arch_vcpu_runnable()). When that
+ event or interrupt arrives kvm_vcpu_block() makes the request. This is
+ in contrast to when kvm_vcpu_block() returns due to any other reason,
+ such as a pending signal, which does not indicate the VCPU's halt
+ emulation should stop, and therefore does not make the request.
+
+KVM_REQUEST_MASK
+----------------
+
+VCPU requests should be masked by KVM_REQUEST_MASK before using them with
+bitops. This is because only the lower 8 bits are used to represent the
+request's number. The upper bits are used as flags. Currently only two
+flags are defined.
+
+VCPU Request Flags
+------------------
+
+KVM_REQUEST_NO_WAKEUP
+
+ This flag is applied to requests that only need immediate attention
+ from VCPUs running in guest mode. That is, sleeping VCPUs do not need
+ to be awaken for these requests. Sleeping VCPUs will handle the
+ requests when they are awaken later for some other reason.
+
+KVM_REQUEST_WAIT
+
+ When requests with this flag are made with kvm_make_all_cpus_request(),
+ then the caller will wait for each VCPU to acknowledge its IPI before
+ proceeding. This flag only applies to VCPUs that would receive IPIs.
+ If, for example, the VCPU is sleeping, so no IPI is necessary, then
+ the requesting thread does not wait. This means that this flag may be
+ safely combined with KVM_REQUEST_NO_WAKEUP. See "Waiting for
+ Acknowledgements" for more information about requests with
+ KVM_REQUEST_WAIT.
+
+VCPU Requests with Associated State
+===================================
+
+Requesters that want the receiving VCPU to handle new state need to ensure
+the newly written state is observable to the receiving VCPU thread's CPU
+by the time it observes the request. This means a write memory barrier
+must be inserted after writing the new state and before setting the VCPU
+request bit. Additionally, on the receiving VCPU thread's side, a
+corresponding read barrier must be inserted after reading the request bit
+and before proceeding to read the new state associated with it. See
+scenario 3, Message and Flag, of [lwn-mb]_ and the kernel documentation
+[memory-barriers]_.
+
+The pair of functions, kvm_check_request() and kvm_make_request(), provide
+the memory barriers, allowing this requirement to be handled internally by
+the API.
+
+Ensuring Requests Are Seen
+==========================
+
+When making requests to VCPUs, we want to avoid the receiving VCPU
+executing in guest mode for an arbitrary long time without handling the
+request. We can be sure this won't happen as long as we ensure the VCPU
+thread checks kvm_request_pending() before entering guest mode and that a
+kick will send an IPI to force an exit from guest mode when necessary.
+Extra care must be taken to cover the period after the VCPU thread's last
+kvm_request_pending() check and before it has entered guest mode, as kick
+IPIs will only trigger guest mode exits for VCPU threads that are in guest
+mode or at least have already disabled interrupts in order to prepare to
+enter guest mode. This means that an optimized implementation (see "IPI
+Reduction") must be certain when it's safe to not send the IPI. One
+solution, which all architectures except s390 apply, is to:
+
+- set ``vcpu->mode`` to IN_GUEST_MODE between disabling the interrupts and
+ the last kvm_request_pending() check;
+- enable interrupts atomically when entering the guest.
+
+This solution also requires memory barriers to be placed carefully in both
+the requesting thread and the receiving VCPU. With the memory barriers we
+can exclude the possibility of a VCPU thread observing
+!kvm_request_pending() on its last check and then not receiving an IPI for
+the next request made of it, even if the request is made immediately after
+the check. This is done by way of the Dekker memory barrier pattern
+(scenario 10 of [lwn-mb]_). As the Dekker pattern requires two variables,
+this solution pairs ``vcpu->mode`` with ``vcpu->requests``. Substituting
+them into the pattern gives::
+
+ CPU1 CPU2
+ ================= =================
+ local_irq_disable();
+ WRITE_ONCE(vcpu->mode, IN_GUEST_MODE); kvm_make_request(REQ, vcpu);
+ smp_mb(); smp_mb();
+ if (kvm_request_pending(vcpu)) { if (READ_ONCE(vcpu->mode) ==
+ IN_GUEST_MODE) {
+ ...abort guest entry... ...send IPI...
+ } }
+
+As stated above, the IPI is only useful for VCPU threads in guest mode or
+that have already disabled interrupts. This is why this specific case of
+the Dekker pattern has been extended to disable interrupts before setting
+``vcpu->mode`` to IN_GUEST_MODE. WRITE_ONCE() and READ_ONCE() are used to
+pedantically implement the memory barrier pattern, guaranteeing the
+compiler doesn't interfere with ``vcpu->mode``'s carefully planned
+accesses.
+
+IPI Reduction
+-------------
+
+As only one IPI is needed to get a VCPU to check for any/all requests,
+then they may be coalesced. This is easily done by having the first IPI
+sending kick also change the VCPU mode to something !IN_GUEST_MODE. The
+transitional state, EXITING_GUEST_MODE, is used for this purpose.
+
+Waiting for Acknowledgements
+----------------------------
+
+Some requests, those with the KVM_REQUEST_WAIT flag set, require IPIs to
+be sent, and the acknowledgements to be waited upon, even when the target
+VCPU threads are in modes other than IN_GUEST_MODE. For example, one case
+is when a target VCPU thread is in READING_SHADOW_PAGE_TABLES mode, which
+is set after disabling interrupts. To support these cases, the
+KVM_REQUEST_WAIT flag changes the condition for sending an IPI from
+checking that the VCPU is IN_GUEST_MODE to checking that it is not
+OUTSIDE_GUEST_MODE.
+
+Request-less VCPU Kicks
+-----------------------
+
+As the determination of whether or not to send an IPI depends on the
+two-variable Dekker memory barrier pattern, then it's clear that
+request-less VCPU kicks are almost never correct. Without the assurance
+that a non-IPI generating kick will still result in an action by the
+receiving VCPU, as the final kvm_request_pending() check does for
+request-accompanying kicks, then the kick may not do anything useful at
+all. If, for instance, a request-less kick was made to a VCPU that was
+just about to set its mode to IN_GUEST_MODE, meaning no IPI is sent, then
+the VCPU thread may continue its entry without actually having done
+whatever it was the kick was meant to initiate.
+
+One exception is x86's posted interrupt mechanism. In this case, however,
+even the request-less VCPU kick is coupled with the same
+local_irq_disable() + smp_mb() pattern described above; the ON bit
+(Outstanding Notification) in the posted interrupt descriptor takes the
+role of ``vcpu->requests``. When sending a posted interrupt, PIR.ON is
+set before reading ``vcpu->mode``; dually, in the VCPU thread,
+vmx_sync_pir_to_irr() reads PIR after setting ``vcpu->mode`` to
+IN_GUEST_MODE.
+
+Additional Considerations
+=========================
+
+Sleeping VCPUs
+--------------
+
+VCPU threads may need to consider requests before and/or after calling
+functions that may put them to sleep, e.g. kvm_vcpu_block(). Whether they
+do or not, and, if they do, which requests need consideration, is
+architecture dependent. kvm_vcpu_block() calls kvm_arch_vcpu_runnable()
+to check if it should awaken. One reason to do so is to provide
+architectures a function where requests may be checked if necessary.
+
+Clearing Requests
+-----------------
+
+Generally it only makes sense for the receiving VCPU thread to clear a
+request. However, in some circumstances, such as when the requesting
+thread and the receiving VCPU thread are executed serially, such as when
+they are the same thread, or when they are using some form of concurrency
+control to temporarily execute synchronously, then it's possible to know
+that the request may be cleared immediately, rather than waiting for the
+receiving VCPU thread to handle the request in VCPU RUN. The only current
+examples of this are kvm_vcpu_block() calls made by VCPUs to block
+themselves. A possible side-effect of that call is to make the
+KVM_REQ_UNHALT request, which may then be cleared immediately when the
+VCPU returns from the call.
+
+References
+==========
+
+.. [atomic-ops] Documentation/core-api/atomic_ops.rst
+.. [memory-barriers] Documentation/memory-barriers.txt
+.. [lwn-mb] https://lwn.net/Articles/573436/
diff --git a/Documentation/vm/ksm.txt b/Documentation/vm/ksm.txt
index 6b0ca7feb135..6686bd267dc9 100644
--- a/Documentation/vm/ksm.txt
+++ b/Documentation/vm/ksm.txt
@@ -98,6 +98,50 @@ use_zero_pages - specifies whether empty pages (i.e. allocated pages
it is only effective for pages merged after the change.
Default: 0 (normal KSM behaviour as in earlier releases)
+max_page_sharing - Maximum sharing allowed for each KSM page. This
+ enforces a deduplication limit to avoid the virtual
+ memory rmap lists to grow too large. The minimum
+ value is 2 as a newly created KSM page will have at
+ least two sharers. The rmap walk has O(N)
+ complexity where N is the number of rmap_items
+ (i.e. virtual mappings) that are sharing the page,
+ which is in turn capped by max_page_sharing. So
+ this effectively spread the the linear O(N)
+ computational complexity from rmap walk context
+ over different KSM pages. The ksmd walk over the
+ stable_node "chains" is also O(N), but N is the
+ number of stable_node "dups", not the number of
+ rmap_items, so it has not a significant impact on
+ ksmd performance. In practice the best stable_node
+ "dup" candidate will be kept and found at the head
+ of the "dups" list. The higher this value the
+ faster KSM will merge the memory (because there
+ will be fewer stable_node dups queued into the
+ stable_node chain->hlist to check for pruning) and
+ the higher the deduplication factor will be, but
+ the slowest the worst case rmap walk could be for
+ any given KSM page. Slowing down the rmap_walk
+ means there will be higher latency for certain
+ virtual memory operations happening during
+ swapping, compaction, NUMA balancing and page
+ migration, in turn decreasing responsiveness for
+ the caller of those virtual memory operations. The
+ scheduler latency of other tasks not involved with
+ the VM operations doing the rmap walk is not
+ affected by this parameter as the rmap walks are
+ always schedule friendly themselves.
+
+stable_node_chains_prune_millisecs - How frequently to walk the whole
+ list of stable_node "dups" linked in the
+ stable_node "chains" in order to prune stale
+ stable_nodes. Smaller milllisecs values will free
+ up the KSM metadata with lower latency, but they
+ will make ksmd use more CPU during the scan. This
+ only applies to the stable_node chains so it's a
+ noop if not a single KSM page hit the
+ max_page_sharing yet (there would be no stable_node
+ chains in such case).
+
The effectiveness of KSM and MADV_MERGEABLE is shown in /sys/kernel/mm/ksm/:
pages_shared - how many shared pages are being used
@@ -106,10 +150,29 @@ pages_unshared - how many pages unique but repeatedly checked for merging
pages_volatile - how many pages changing too fast to be placed in a tree
full_scans - how many times all mergeable areas have been scanned
+stable_node_chains - number of stable node chains allocated, this is
+ effectively the number of KSM pages that hit the
+ max_page_sharing limit
+stable_node_dups - number of stable node dups queued into the
+ stable_node chains
+
A high ratio of pages_sharing to pages_shared indicates good sharing, but
a high ratio of pages_unshared to pages_sharing indicates wasted effort.
pages_volatile embraces several different kinds of activity, but a high
proportion there would also indicate poor use of madvise MADV_MERGEABLE.
+The maximum possible page_sharing/page_shared ratio is limited by the
+max_page_sharing tunable. To increase the ratio max_page_sharing must
+be increased accordingly.
+
+The stable_node_dups/stable_node_chains ratio is also affected by the
+max_page_sharing tunable, and an high ratio may indicate fragmentation
+in the stable_node dups, which could be solved by introducing
+fragmentation algorithms in ksmd which would refile rmap_items from
+one stable_node dup to another stable_node dup, in order to freeup
+stable_node "dups" with few rmap_items in them, but that may increase
+the ksmd CPU usage and possibly slowdown the readonly computations on
+the KSM pages of the applications.
+
Izik Eidus,
Hugh Dickins, 17 Nov 2009
diff --git a/Documentation/xtensa/mmu.txt b/Documentation/xtensa/mmu.txt
index 222a2c6748e6..5de8715d5bec 100644
--- a/Documentation/xtensa/mmu.txt
+++ b/Documentation/xtensa/mmu.txt
@@ -41,9 +41,9 @@ The scheme below assumes that the kernel is loaded below 0x40000000.
00..1F -> 00 -> 00 -> 00
The default location of IO peripherals is above 0xf0000000. This may be changed
-using a "ranges" property in a device tree simple-bus node. See ePAPR 1.1, §6.5
-for details on the syntax and semantic of simple-bus nodes. The following
-limitations apply:
+using a "ranges" property in a device tree simple-bus node. See the Devicetree
+Specification, section 4.5 for details on the syntax and semantics of
+simple-bus nodes. The following limitations apply:
1. Only top level simple-bus nodes are considered
diff --git a/MAINTAINERS b/MAINTAINERS
index b73dddde6455..4f4057c8ffca 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1515,6 +1515,7 @@ F: arch/arm64/boot/dts/marvell/armada*
F: drivers/cpufreq/mvebu-cpufreq.c
F: drivers/irqchip/irq-armada-370-xp.c
F: drivers/irqchip/irq-mvebu-*
+F: drivers/pinctrl/mvebu/
F: drivers/rtc/rtc-armada38x.c
ARM/Marvell Berlin SoC support
@@ -1681,7 +1682,6 @@ F: arch/arm/mach-qcom/
F: arch/arm64/boot/dts/qcom/*
F: drivers/i2c/busses/i2c-qup.c
F: drivers/clk/qcom/
-F: drivers/pinctrl/qcom/
F: drivers/dma/qcom/
F: drivers/soc/qcom/
F: drivers/spi/spi-qup.c
@@ -1802,11 +1802,12 @@ F: arch/arm/plat-samsung/s5p-dev-mfc.c
F: drivers/media/platform/s5p-mfc/
ARM/SAMSUNG S5P SERIES HDMI CEC SUBSYSTEM SUPPORT
-M: Kyungmin Park <[email protected]>
+M: Marek Szyprowski <[email protected]>
+L: [email protected] (moderated for non-subscribers)
S: Maintained
-F: drivers/staging/media/platform/s5p-cec/
+F: drivers/media/platform/s5p-cec/
+F: Documentation/devicetree/bindings/media/s5p-cec.txt
ARM/SAMSUNG S5P SERIES JPEG CODEC SUPPORT
M: Andrzej Pietrasiewicz <[email protected]>
@@ -2630,6 +2631,21 @@ S: Maintained
F: net/bluetooth/
F: include/net/bluetooth/
+DMA MAPPING HELPERS
+M: Christoph Hellwig <[email protected]>
+M: Marek Szyprowski <[email protected]>
+R: Robin Murphy <[email protected]>
+T: git git://git.infradead.org/users/hch/dma-mapping.git
+W: http://git.infradead.org/users/hch/dma-mapping.git
+S: Supported
+F: lib/dma-debug.c
+F: lib/dma-noop.c
+F: lib/dma-virt.c
+F: drivers/base/dma-mapping.c
+F: drivers/base/dma-coherent.c
+F: include/linux/dma-mapping.h
+
BONDING DRIVER
M: Jay Vosburgh <[email protected]>
M: Veaceslav Falico <[email protected]>
@@ -3174,6 +3190,7 @@ F: include/media/cec.h
F: include/media/cec-notifier.h
F: include/uapi/linux/cec.h
F: include/uapi/linux/cec-funcs.h
+F: Documentation/devicetree/bindings/media/cec.txt
CELL BROADBAND ENGINE ARCHITECTURE
M: Arnd Bergmann <[email protected]>
@@ -3764,8 +3781,8 @@ S: Supported
F: drivers/net/ethernet/chelsio/cxgb4vf/
CXL (IBM Coherent Accelerator Processor Interface CAPI) DRIVER
-M: Ian Munsie <[email protected]>
M: Frederic Barrat <[email protected]>
+M: Andrew Donnellan <[email protected]>
S: Supported
F: arch/powerpc/platforms/powernv/pci-cxl.c
@@ -3829,6 +3846,12 @@ S: Supported
F: drivers/input/touchscreen/cyttsp*
F: include/linux/input/cyttsp.h
+D-LINK DIR-685 TOUCHKEYS DRIVER
+M: Linus Walleij <[email protected]>
+S: Supported
+F: drivers/input/dlink-dir685-touchkeys.c
+
DALLAS/MAXIM DS1685-FAMILY REAL TIME CLOCK
M: Joshua Kinard <[email protected]>
S: Maintained
@@ -4031,7 +4054,10 @@ W: http://www.dialog-semiconductor.com/products
S: Supported
F: Documentation/hwmon/da90??
F: Documentation/devicetree/bindings/mfd/da90*.txt
+F: Documentation/devicetree/bindings/input/da90??-onkey.txt
+F: Documentation/devicetree/bindings/thermal/da90??-thermal.txt
F: Documentation/devicetree/bindings/regulator/da92*.txt
+F: Documentation/devicetree/bindings/watchdog/da92??-wdt.txt
F: Documentation/devicetree/bindings/sound/da[79]*.txt
F: drivers/gpio/gpio-da90??.c
F: drivers/hwmon/da90??-hwmon.c
@@ -4046,6 +4072,7 @@ F: drivers/power/supply/da9052-battery.c
F: drivers/power/supply/da91??-*.c
F: drivers/regulator/da903x.c
F: drivers/regulator/da9???-regulator.[ch]
+F: drivers/thermal/da90??-thermal.c
F: drivers/rtc/rtc-da90??.c
F: drivers/video/backlight/da90??_bl.c
F: drivers/watchdog/da90??_wdt.c
@@ -4734,6 +4761,13 @@ S: Maintained
F: drivers/media/usb/dvb-usb-v2/dvb_usb*
F: drivers/media/usb/dvb-usb-v2/usb_urb.c
+DONGWOON DW9714 LENS VOICE COIL DRIVER
+M: Sakari Ailus <[email protected]>
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/dw9714.c
+
DYNAMIC DEBUG
M: Jason Baron <[email protected]>
S: Maintained
@@ -5041,6 +5075,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git
F: drivers/video/fbdev/s1d13xxxfb.c
F: include/video/s1d13xxxfb.h
+ERRSEQ ERROR TRACKING INFRASTRUCTURE
+M: Jeff Layton <[email protected]>
+S: Maintained
+F: lib/errseq.c
+F: include/linux/errseq.h
+
ET131X NETWORK DRIVER
M: Mark Einon <[email protected]>
S: Odd Fixes
@@ -5324,7 +5364,7 @@ S: Maintained
F: drivers/video/fbdev/fsl-diu-fb.*
FREESCALE DMA DRIVER
-M: Li Yang <[email protected]>
+M: Li Yang <[email protected]>
M: Zhang Wei <[email protected]>
S: Maintained
@@ -5389,11 +5429,11 @@ S: Maintained
F: drivers/net/ethernet/freescale/dpaa
FREESCALE SOC DRIVERS
-M: Scott Wood <[email protected]>
+M: Li Yang <[email protected]>
S: Maintained
-F: Documentation/devicetree/bindings/powerpc/fsl/
+F: Documentation/devicetree/bindings/soc/fsl/
F: drivers/soc/fsl/
F: include/linux/fsl/
@@ -5406,14 +5446,14 @@ F: include/soc/fsl/*qe*.h
F: include/soc/fsl/*ucc*.h
FREESCALE USB PERIPHERAL DRIVERS
-M: Li Yang <[email protected]>
+M: Li Yang <[email protected]>
S: Maintained
F: drivers/usb/gadget/udc/fsl*
FREESCALE QUICC ENGINE UCC ETHERNET DRIVER
-M: Li Yang <[email protected]>
+M: Li Yang <[email protected]>
S: Maintained
@@ -5723,6 +5763,15 @@ F: include/asm-generic/gpio.h
F: include/uapi/linux/gpio.h
F: tools/gpio/
+GPIO ACPI SUPPORT
+M: Mika Westerberg <[email protected]>
+M: Andy Shevchenko <[email protected]>
+S: Maintained
+F: Documentation/acpi/gpio-properties.txt
+F: drivers/gpio/gpiolib-acpi.c
+
GRE DEMULTIPLEXER DRIVER
M: Dmitry Kozlov <[email protected]>
@@ -6646,8 +6695,10 @@ S: Maintained
F: drivers/input/
F: include/linux/input.h
F: include/uapi/linux/input.h
+F: include/uapi/linux/input-event-codes.h
F: include/linux/input/
F: Documentation/devicetree/bindings/input/
+F: Documentation/input/
INPUT MULTITOUCH (MT) PROTOCOL
M: Henrik Rydberg <[email protected]>
@@ -7300,9 +7351,10 @@ KERNEL SELFTEST FRAMEWORK
M: Shuah Khan <[email protected]>
M: Shuah Khan <[email protected]>
-T: git git://git.kernel.org/pub/scm/shuah/linux-kselftest
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git
S: Maintained
-F: tools/testing/selftests
+F: tools/testing/selftests/
+F: Documentation/dev-tools/kselftest*
KERNEL VIRTUAL MACHINE (KVM)
M: Paolo Bonzini <[email protected]>
@@ -7341,7 +7393,7 @@ F: arch/powerpc/kvm/
KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
M: Christian Borntraeger <[email protected]>
-M: Cornelia Huck <[email protected]>
+M: Cornelia Huck <[email protected]>
W: http://www.ibm.com/developerworks/linux/linux390/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git
@@ -7593,6 +7645,15 @@ S: Maintained
F: drivers/ata/pata_*.c
F: drivers/ata/ata_generic.c
+LIBATA PATA FARADAY FTIDE010 AND GEMINI SATA BRIDGE DRIVERS
+M: Linus Walleij <[email protected]>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+S: Maintained
+F: drivers/ata/pata_ftide010.c
+F: drivers/ata/sata_gemini.c
+F: drivers/ata/sata_gemini.h
+
LIBATA SATA AHCI PLATFORM devices support
M: Hans de Goede <[email protected]>
M: Tejun Heo <[email protected]>
@@ -7647,9 +7708,7 @@ M: Ross Zwisler <[email protected]>
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
S: Supported
-F: drivers/nvdimm/pmem.c
-F: include/linux/pmem.h
-F: arch/*/include/asm/pmem.h
+F: drivers/nvdimm/pmem*
LIGHTNVM PLATFORM SUPPORT
M: Matias Bjorling <[email protected]>
@@ -7740,6 +7799,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/scottwood/linux.git
S: Maintained
F: arch/powerpc/platforms/83xx/
F: arch/powerpc/platforms/85xx/
+F: Documentation/devicetree/bindings/powerpc/fsl/
LINUX FOR POWERPC PA SEMI PWRFICIENT
@@ -8092,6 +8152,16 @@ S: Maintained
F: Documentation/hwmon/max20751
F: drivers/hwmon/max20751.c
+MAX2175 SDR TUNER DRIVER
+M: Ramesh Shanmugasundaram <[email protected]>
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: Documentation/devicetree/bindings/media/i2c/max2175.txt
+F: Documentation/media/v4l-drivers/max2175.rst
+F: drivers/media/i2c/max2175*
+F: include/uapi/linux/max2175.h
+
MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
S: Orphan
@@ -8172,6 +8242,27 @@ L: [email protected]
S: Maintained
F: drivers/iio/dac/cio-dac.c
+MEDIA DRIVERS FOR RENESAS - DRIF
+M: Ramesh Shanmugasundaram <[email protected]>
+T: git git://linuxtv.org/media_tree.git
+S: Supported
+F: Documentation/devicetree/bindings/media/renesas,drif.txt
+F: drivers/media/platform/rcar_drif.c
+
+MEDIA DRIVERS FOR FREESCALE IMX
+M: Steve Longerbeam <[email protected]>
+M: Philipp Zabel <[email protected]>
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: Documentation/devicetree/bindings/media/imx.txt
+F: Documentation/media/v4l-drivers/imx.rst
+F: drivers/staging/media/imx/
+F: include/linux/imx-media.h
+F: include/media/imx.h
+
MEDIA DRIVERS FOR RENESAS - FCP
M: Laurent Pinchart <[email protected]>
@@ -9046,9 +9137,7 @@ F: lib/random32.c
NETWORKING [IPv4/IPv6]
M: "David S. Miller" <[email protected]>
M: Alexey Kuznetsov <[email protected]>
-M: James Morris <[email protected]>
M: Hideaki YOSHIFUJI <[email protected]>
-M: Patrick McHardy <[email protected]>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
S: Maintained
@@ -9539,6 +9628,13 @@ M: Harald Welte <[email protected]>
S: Maintained
F: drivers/char/pcmcia/cm4040_cs.*
+OMNIVISION OV5640 SENSOR DRIVER
+M: Steve Longerbeam <[email protected]>
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/ov5640.c
+
OMNIVISION OV5647 SENSOR DRIVER
M: Ramiro Oliveira <[email protected]>
@@ -9554,6 +9650,13 @@ S: Maintained
F: drivers/media/i2c/ov7670.c
F: Documentation/devicetree/bindings/media/i2c/ov7670.txt
+OMNIVISION OV13858 SENSOR DRIVER
+M: Sakari Ailus <[email protected]>
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/ov13858.c
+
ONENAND FLASH DRIVER
M: Kyungmin Park <[email protected]>
@@ -9588,6 +9691,7 @@ S: Maintained
F: drivers/of/
F: include/linux/of*.h
F: scripts/dtc/
+F: Documentation/ABI/testing/sysfs-firmware-ofw
OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
M: Rob Herring <[email protected]>
@@ -10056,9 +10160,16 @@ S: Maintained
F: Documentation/devicetree/bindings/pci/hisilicon-pcie.txt
F: drivers/pci/dwc/pcie-hisi.c
+PCIE DRIVER FOR HISILICON KIRIN
+M: Xiaowei Song <[email protected]>
+M: Binghui Wang <[email protected]>
+S: Maintained
+F: Documentation/devicetree/bindings/pci/pcie-kirin.txt
+F: drivers/pci/dwc/pcie-kirin.c
+
PCIE DRIVER FOR ROCKCHIP
M: Shawn Lin <[email protected]>
-M: Wenrui Li <[email protected]>
S: Maintained
@@ -10080,6 +10191,14 @@ S: Supported
F: Documentation/devicetree/bindings/pci/pci-thunder-*
F: drivers/pci/host/pci-thunder-*
+PCIE DRIVER FOR MEDIATEK
+M: Ryder Lee <[email protected]>
+S: Supported
+F: Documentation/devicetree/bindings/pci/mediatek*
+F: drivers/pci/host/*mediatek*
+
PCMCIA SUBSYSTEM
P: Linux PCMCIA Team
@@ -10204,6 +10323,13 @@ M: Heikki Krogerus <[email protected]>
S: Maintained
F: drivers/pinctrl/intel/
+PIN CONTROLLER - QUALCOMM
+M: Bjorn Andersson <[email protected]>
+S: Maintained
+F: Documentation/devicetree/bindings/pinctrl/qcom,*.txt
+F: drivers/pinctrl/qcom/
+
PIN CONTROLLER - RENESAS
M: Laurent Pinchart <[email protected]>
M: Geert Uytterhoeven <[email protected]>
@@ -10705,6 +10831,14 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rkuo/linux-hexagon-kernel.g
S: Supported
F: arch/hexagon/
+QUALCOMM VENUS VIDEO ACCELERATOR DRIVER
+M: Stanimir Varbanov <[email protected]>
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/platform/qcom/venus/
+
QUALCOMM WCN36XX WIRELESS DRIVER
M: Eugene Krasnikov <[email protected]>
@@ -11190,7 +11324,7 @@ S: Supported
F: drivers/iommu/s390-iommu.c
S390 VFIO-CCW DRIVER
-M: Cornelia Huck <[email protected]>
+M: Cornelia Huck <[email protected]>
M: Dong Jia Shi <[email protected]>
@@ -11634,6 +11768,7 @@ F: kernel/seccomp.c
F: include/uapi/linux/seccomp.h
F: include/linux/seccomp.h
F: tools/testing/selftests/seccomp/*
+F: tools/testing/selftests/kselftest_harness.h
F: Documentation/userspace-api/seccomp_filter.rst
K: \bsecure_computing
K: \bTIF_SECCOMP\b
@@ -11934,6 +12069,13 @@ S: Maintained
F: drivers/media/platform/davinci/
F: include/media/davinci/
+TI DAVINCI SERIES GPIO DRIVER
+M: Keerthy <[email protected]>
+S: Maintained
+F: Documentation/devicetree/bindings/gpio/gpio-davinci.txt
+F: drivers/gpio/gpio-davinci.c
+
TI AM437X VPFE DRIVER
M: "Lad, Prabhakar" <[email protected]>
@@ -12109,8 +12251,9 @@ F: drivers/leds/leds-net48xx.c
SOFTLOGIC 6x10 MPEG CODEC
M: Bluecherry Maintainers <[email protected]>
+M: Anton Sviridenko <[email protected]>
M: Andrey Utkin <[email protected]>
-M: Andrey Utkin <[email protected]>
+M: Andrey Utkin <[email protected]>
M: Ismael Luceno <[email protected]>
S: Supported
@@ -12779,6 +12922,8 @@ F: Documentation/devicetree/bindings/soc/ti/sci-pm-domain.txt
F: include/dt-bindings/genpd/k2g.h
F: drivers/soc/ti/ti_sci_pm_domains.c
F: Documentation/devicetree/bindings/reset/ti,sci-reset.txt
+F: Documentation/devicetree/bindings/clock/ti,sci-clk.txt
+F: drivers/clk/keystone/sci-clk.c
F: drivers/reset/reset-ti-sci.c
THANKO'S RAREMONO AM/FM/SW RADIO RECEIVER USB DRIVER
@@ -13058,6 +13203,7 @@ F: Documentation/media/v4l-drivers/tm6000*
TW5864 VIDEO4LINUX DRIVER
M: Bluecherry Maintainers <[email protected]>
+M: Anton Sviridenko <[email protected]>
M: Andrey Utkin <[email protected]>
M: Andrey Utkin <[email protected]>
@@ -13683,6 +13829,12 @@ S: Maintained
F: drivers/media/v4l2-core/videobuf2-*
F: include/media/videobuf2-*
+VIDEO MULTIPLEXER DRIVER
+M: Philipp Zabel <[email protected]>
+S: Maintained
+F: drivers/media/platform/video-mux.c
+
VIRTIO AND VHOST VSOCK DRIVER
M: Stefan Hajnoczi <[email protected]>
@@ -13728,7 +13880,7 @@ F: include/uapi/linux/virtio_*.h
F: drivers/crypto/virtio/
VIRTIO DRIVERS FOR S390
-M: Cornelia Huck <[email protected]>
+M: Cornelia Huck <[email protected]>
M: Halil Pasic <[email protected]>
@@ -14142,6 +14294,8 @@ F: drivers/xen/
F: arch/x86/include/asm/xen/
F: include/xen/
F: include/uapi/xen/
+F: Documentation/ABI/stable/sysfs-hypervisor-xen
+F: Documentation/ABI/testing/sysfs-hypervisor-xen
XEN HYPERVISOR ARM
M: Stefano Stabellini <[email protected]>
@@ -14234,6 +14388,14 @@ L: [email protected]
S: Supported
F: drivers/char/xillybus/
+XRA1403 GPIO EXPANDER
+M: Nandor Han <[email protected]>
+M: Semi Malinen <[email protected]>
+S: Maintained
+F: drivers/gpio/gpio-xra1403.c
+F: Documentation/devicetree/bindings/gpio/gpio-xra1403.txt
+
XTENSA XTFPGA PLATFORM SUPPORT
M: Max Filippov <[email protected]>
diff --git a/Makefile b/Makefile
index d7cb0372bed9..06ef9947cf7c 100644
--- a/Makefile
+++ b/Makefile
@@ -84,17 +84,10 @@ endif
# If the user is running make -s (silent mode), suppress echoing of
# commands
-ifneq ($(filter 4.%,$(MAKE_VERSION)),) # make-4
-ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
+ifneq ($(findstring s,$(filter-out --%,$(MAKEFLAGS))),)
quiet=silent_
tools_silent=s
endif
-else # make-3.8x
-ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
- quiet=silent_
- tools_silent=-s
-endif
-endif
export quiet Q KBUILD_VERBOSE
@@ -113,8 +106,8 @@ export quiet Q KBUILD_VERBOSE
# The O= assignment takes precedence over the KBUILD_OUTPUT environment
# variable.
-# KBUILD_SRC is set on invocation of make in OBJ directory
-# KBUILD_SRC is not intended to be used by the regular user (for now)
+# KBUILD_SRC is not intended to be used by the regular user (for now),
+# it is set on invocation of make with KBUILD_OUTPUT or O= specified.
ifeq ($(KBUILD_SRC),)
# OK, Make called in directory where kernel src resides
@@ -135,7 +128,6 @@ ifneq ($(words $(subst :, ,$(CURDIR))), 1)
endif
ifneq ($(KBUILD_OUTPUT),)
-# Invoke a second make in the output directory, passing relevant variables
# check that the output directory actually exists
saved-output := $(KBUILD_OUTPUT)
KBUILD_OUTPUT := $(shell mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) \
@@ -148,6 +140,7 @@ PHONY += $(MAKECMDGOALS) sub-make
$(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
@:
+# Invoke a second make in the output directory, passing relevant variables
sub-make:
$(Q)$(MAKE) -C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR) \
-f $(CURDIR)/Makefile $(filter-out _all sub-make,$(MAKECMDGOALS))
@@ -303,7 +296,7 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
HOSTCC = gcc
HOSTCXX = g++
-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
+HOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
HOSTCXXFLAGS = -O2
ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
@@ -388,12 +381,10 @@ USERINCLUDE := \
# Needed to be compatible with the O= option
LINUXINCLUDE := \
-I$(srctree)/arch/$(hdr-arch)/include \
- -I$(objtree)/arch/$(hdr-arch)/include/generated/uapi \
-I$(objtree)/arch/$(hdr-arch)/include/generated \
$(if $(KBUILD_SRC), -I$(srctree)/include) \
- -I$(objtree)/include
-
-LINUXINCLUDE += $(filter-out $(LINUXINCLUDE),$(USERINCLUDE))
+ -I$(objtree)/include \
+ $(USERINCLUDE)
KBUILD_CPPFLAGS := -D__KERNEL__
@@ -707,6 +698,7 @@ KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
+KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
# Quiet clang warning: comparison of unsigned expression < 0 is always false
KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
@@ -952,19 +944,19 @@ core-y := $(patsubst %/, %/built-in.o, $(core-y))
drivers-y := $(patsubst %/, %/built-in.o, $(drivers-y))
net-y := $(patsubst %/, %/built-in.o, $(net-y))
libs-y1 := $(patsubst %/, %/lib.a, $(libs-y))
-libs-y2 := $(patsubst %/, %/built-in.o, $(libs-y))
-libs-y := $(libs-y1) $(libs-y2)
+libs-y2 := $(filter-out %.a, $(patsubst %/, %/built-in.o, $(libs-y)))
virt-y := $(patsubst %/, %/built-in.o, $(virt-y))
# Externally visible symbols (used by link-vmlinux.sh)
export KBUILD_VMLINUX_INIT := $(head-y) $(init-y)
-export KBUILD_VMLINUX_MAIN := $(core-y) $(libs-y) $(drivers-y) $(net-y) $(virt-y)
+export KBUILD_VMLINUX_MAIN := $(core-y) $(libs-y2) $(drivers-y) $(net-y) $(virt-y)
+export KBUILD_VMLINUX_LIBS := $(libs-y1)
export KBUILD_LDS := arch/$(SRCARCH)/kernel/vmlinux.lds
export LDFLAGS_vmlinux
# used by scripts/pacmage/Makefile
export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) arch Documentation include samples scripts tools)
-vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_INIT) $(KBUILD_VMLINUX_MAIN)
+vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_INIT) $(KBUILD_VMLINUX_MAIN) $(KBUILD_VMLINUX_LIBS)
# Include targets which we want to execute sequentially if the rest of the
# kernel build went well. If CONFIG_TRIM_UNUSED_KSYMS is set, this might be
diff --git a/arch/Kconfig b/arch/Kconfig
index dc26b6d9175e..cae0958a2298 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -550,7 +550,7 @@ config CC_STACKPROTECTOR_STRONG
endchoice
config THIN_ARCHIVES
- bool
+ def_bool y
help
Select this if the architecture wants to use thin archives
instead of ld -r to create the built-in.o files.
diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h
index 7b82dc9a8556..133a4884ed44 100644
--- a/arch/alpha/include/asm/uaccess.h
+++ b/arch/alpha/include/asm/uaccess.h
@@ -326,7 +326,6 @@ clear_user(void __user *to, long len)
(uaccess_kernel() ? ~0UL : TASK_SIZE)
extern long strncpy_from_user(char *dest, const char __user *src, long count);
-extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
#include <asm/extable.h>
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index a56e608db2f9..b37153ecf2ac 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -10,7 +10,6 @@
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_FADVISE64
#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_OLD_GETRLIMIT
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_FORK
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index b23d6fbbb225..df0d0a5e9353 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -564,25 +564,20 @@ SYSCALL_DEFINE0(getdtablesize)
*/
SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen)
{
- unsigned len;
- int i;
+ int len, err = 0;
+ char *kname;
- if (!access_ok(VERIFY_WRITE, name, namelen))
- return -EFAULT;
-
- len = namelen;
- if (len > 32)
- len = 32;
+ if (namelen > 32)
+ namelen = 32;
down_read(&uts_sem);
- for (i = 0; i < len; ++i) {
- __put_user(utsname()->domainname[i], name + i);
- if (utsname()->domainname[i] == '\0')
- break;
- }
+ kname = utsname()->domainname;
+ len = strnlen(kname, namelen);
+ if (copy_to_user(name, kname, min(len + 1, namelen)))
+ err = -EFAULT;
up_read(&uts_sem);
- return 0;
+ return err;
}
/*
@@ -718,9 +713,8 @@ SYSCALL_DEFINE2(osf_sigstack, struct sigstack __user *, uss,
if (uoss) {
error = -EFAULT;
- if (! access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))
- || __put_user(oss_sp, &uoss->ss_sp)
- || __put_user(oss_os, &uoss->ss_onstack))
+ if (put_user(oss_sp, &uoss->ss_sp) ||
+ put_user(oss_os, &uoss->ss_onstack))
goto out;
}
@@ -957,37 +951,45 @@ struct itimerval32
static inline long
get_tv32(struct timeval *o, struct timeval32 __user *i)
{
- return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
- (__get_user(o->tv_sec, &i->tv_sec) |
- __get_user(o->tv_usec, &i->tv_usec)));
+ struct timeval32 tv;
+ if (copy_from_user(&tv, i, sizeof(struct timeval32)))
+ return -EFAULT;
+ o->tv_sec = tv.tv_sec;
+ o->tv_usec = tv.tv_usec;
+ return 0;
}
static inline long
put_tv32(struct timeval32 __user *o, struct timeval *i)
{
- return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
- (__put_user(i->tv_sec, &o->tv_sec) |
- __put_user(i->tv_usec, &o->tv_usec)));
+ return copy_to_user(o, &(struct timeval32){
+ .tv_sec = o->tv_sec,
+ .tv_usec = o->tv_usec},
+ sizeof(struct timeval32));
}
static inline long
get_it32(struct itimerval *o, struct itimerval32 __user *i)
{
- return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
- (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) |
- __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) |
- __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) |
- __get_user(o->it_value.tv_usec, &i->it_value.tv_usec)));
+ struct itimerval32 itv;
+ if (copy_from_user(&itv, i, sizeof(struct itimerval32)))
+ return -EFAULT;
+ o->it_interval.tv_sec = itv.it_interval.tv_sec;
+ o->it_interval.tv_usec = itv.it_interval.tv_usec;
+ o->it_value.tv_sec = itv.it_value.tv_sec;
+ o->it_value.tv_usec = itv.it_value.tv_usec;
+ return 0;
}
static inline long
put_it32(struct itimerval32 __user *o, struct itimerval *i)
{
- return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
- (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) |
- __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) |
- __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) |
- __put_user(i->it_value.tv_usec, &o->it_value.tv_usec)));
+ return copy_to_user(o, &(struct itimerval32){
+ .it_interval.tv_sec = o->it_interval.tv_sec,
+ .it_interval.tv_usec = o->it_interval.tv_usec,
+ .it_value.tv_sec = o->it_value.tv_sec,
+ .it_value.tv_usec = o->it_value.tv_usec},
+ sizeof(struct itimerval32));
}
static inline void
@@ -1106,20 +1108,17 @@ SYSCALL_DEFINE5(osf_select, int, n, fd_set __user *, inp, fd_set __user *, outp,
{
struct timespec end_time, *to = NULL;
if (tvp) {
- time_t sec, usec;
-
+ struct timeval tv;
to = &end_time;
- if (!access_ok(VERIFY_READ, tvp, sizeof(*tvp))
- || __get_user(sec, &tvp->tv_sec)
- || __get_user(usec, &tvp->tv_usec)) {
+ if (get_tv32(&tv, tvp))
return -EFAULT;
- }
- if (sec < 0 || usec < 0)
+ if (tv.tv_sec < 0 || tv.tv_usec < 0)
return -EINVAL;
- if (poll_select_set_timeout(to, sec, usec * NSEC_PER_USEC))
+ if (poll_select_set_timeout(to, tv.tv_sec,
+ tv.tv_usec * NSEC_PER_USEC))
return -EINVAL;
}
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index abd59fad1a34..a208bfe367b5 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -22,6 +22,7 @@ config ARM
select CLONE_BACKWARDS
select CPU_PM if (SUSPEND || CPU_IDLE)
select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS
+ select DMA_NOOP_OPS if !MMU
select EDAC_SUPPORT
select EDAC_ATOMIC_SCRUB
select GENERIC_ALLOCATOR
@@ -57,6 +58,7 @@ config ARM
select HAVE_DMA_API_DEBUG
select HAVE_DMA_CONTIGUOUS if MMU
select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32 && MMU
+ select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
select HAVE_EXIT_THREAD
select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 9b1b7be2ec0e..9a92de63426f 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -33,6 +33,7 @@
#include <linux/scatterlist.h>
#include <asm/cacheflush.h>
+#include <asm/dma-iommu.h>
#undef STATS
@@ -256,7 +257,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
if (buf == NULL) {
dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
__func__, ptr);
- return DMA_ERROR_CODE;
+ return ARM_MAPPING_ERROR;
}
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -326,7 +327,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
ret = needs_bounce(dev, dma_addr, size);
if (ret < 0)
- return DMA_ERROR_CODE;
+ return ARM_MAPPING_ERROR;
if (ret == 0) {
arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
@@ -335,7 +336,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
if (PageHighMem(page)) {
dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
- return DMA_ERROR_CODE;
+ return ARM_MAPPING_ERROR;
}
return map_single(dev, page_address(page) + offset, size, dir, attrs);
@@ -444,12 +445,17 @@ static void dmabounce_sync_for_device(struct device *dev,
arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
}
-static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
+static int dmabounce_dma_supported(struct device *dev, u64 dma_mask)
{
if (dev->archdata.dmabounce)
return 0;
- return arm_dma_ops.set_dma_mask(dev, dma_mask);
+ return arm_dma_ops.dma_supported(dev, dma_mask);
+}
+
+static int dmabounce_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return arm_dma_ops.mapping_error(dev, dma_addr);
}
static const struct dma_map_ops dmabounce_ops = {
@@ -465,7 +471,8 @@ static const struct dma_map_ops dmabounce_ops = {
.unmap_sg = arm_dma_unmap_sg,
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
.sync_sg_for_device = arm_dma_sync_sg_for_device,
- .set_dma_mask = dmabounce_set_mask,
+ .dma_supported = dmabounce_dma_supported,
+ .mapping_error = dmabounce_mapping_error,
};
static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
diff --git a/arch/arm/configs/lpc32xx_defconfig b/arch/arm/configs/lpc32xx_defconfig
index 6ba430d2b5b2..e15fa5f168bb 100644
--- a/arch/arm/configs/lpc32xx_defconfig
+++ b/arch/arm/configs/lpc32xx_defconfig
@@ -112,7 +112,7 @@ CONFIG_GPIO_SX150X=y
CONFIG_GPIO_74X164=y
CONFIG_GPIO_MAX7301=y
CONFIG_GPIO_MC33880=y
-CONFIG_GPIO_MCP23S08=y
+CONFIG_PINCTRL_MCP23S08=y
CONFIG_SENSORS_DS620=y
CONFIG_SENSORS_MAX6639=y
CONFIG_WATCHDOG=y
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index e943e6cee254..f308c8c40cb9 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -159,16 +159,16 @@ extern int _test_and_change_bit(int nr, volatile unsigned long * p);
/*
* Little endian assembly bitops. nr = 0 -> byte 0 bit 0.
*/
-extern int _find_first_zero_bit_le(const void * p, unsigned size);
-extern int _find_next_zero_bit_le(const void * p, int size, int offset);
+extern int _find_first_zero_bit_le(const unsigned long *p, unsigned size);
+extern int _find_next_zero_bit_le(const unsigned long *p, int size, int offset);
extern int _find_first_bit_le(const unsigned long *p, unsigned size);
extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
/*
* Big endian assembly bitops. nr = 0 -> byte 3 bit 0.
*/
-extern int _find_first_zero_bit_be(const void * p, unsigned size);
-extern int _find_next_zero_bit_be(const void * p, int size, int offset);
+extern int _find_first_zero_bit_be(const unsigned long *p, unsigned size);
+extern int _find_next_zero_bit_be(const unsigned long *p, int size, int offset);
extern int _find_first_bit_be(const unsigned long *p, unsigned size);
extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index 2ef282f96651..c090ec675eac 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -9,6 +9,8 @@
#include <linux/kmemcheck.h>
#include <linux/kref.h>
+#define ARM_MAPPING_ERROR (~(dma_addr_t)0x0)
+
struct dma_iommu_mapping {
/* iommu specific data */
struct iommu_domain *domain;
@@ -33,5 +35,7 @@ int arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping);
void arm_iommu_detach_device(struct device *dev);
+int arm_dma_supported(struct device *dev, u64 mask);
+
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 680d3f3889e7..4e0285a66ef8 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -12,18 +12,14 @@
#include <xen/xen.h>
#include <asm/xen/hypervisor.h>
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
extern const struct dma_map_ops arm_dma_ops;
extern const struct dma_map_ops arm_coherent_dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
- return &arm_dma_ops;
+ return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : &dma_noop_ops;
}
-#define HAVE_ARCH_DMA_SUPPORTED 1
-extern int dma_supported(struct device *dev, u64 mask);
-
#ifdef __arch_page_to_dma
#error Please update to __arch_pfn_to_dma
#endif
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index 22b73112b75f..f379881d5cc3 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -1,6 +1,10 @@
#ifndef _ASM_ARM_FTRACE
#define _ASM_ARM_FTRACE
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+#endif
+
#ifdef CONFIG_FUNCTION_TRACER
#define MCOUNT_ADDR ((unsigned long)(__gnu_mcount_nc))
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index f0e66577ce05..127e2dd2e21c 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -44,7 +44,9 @@
#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
#endif
-#define KVM_REQ_VCPU_EXIT (8 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_SLEEP \
+ KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
int __attribute_const__ kvm_target_cpu(void);
@@ -233,8 +235,6 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm);
-void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu);
-void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu);
int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
@@ -291,20 +291,12 @@ static inline void kvm_arm_init_debug(void) {}
static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
-static inline int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
- struct kvm_device_attr *attr)
-{
- return -ENXIO;
-}
-static inline int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
- struct kvm_device_attr *attr)
-{
- return -ENXIO;
-}
-static inline int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
- struct kvm_device_attr *attr)
-{
- return -ENXIO;
-}
+
+int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr);
+int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr);
+int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr);
#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index 2d88af5be45f..233b4b50eff3 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -16,6 +16,7 @@
struct pci_sys_data;
struct pci_ops;
struct pci_bus;
+struct pci_host_bridge;
struct device;
struct hw_pci {
@@ -25,7 +26,7 @@ struct hw_pci {
unsigned int io_optional:1;
void **private_data;
int (*setup)(int nr, struct pci_sys_data *);
- struct pci_bus *(*scan)(int nr, struct pci_sys_data *);
+ int (*scan)(int nr, struct pci_host_bridge *);
void (*preinit)(void);
void (*postinit)(void);
u8 (*swizzle)(struct pci_dev *dev, u8 *pin);
diff --git a/arch/arm/include/asm/page-nommu.h b/arch/arm/include/asm/page-nommu.h
index 503f488053de..8f2c47bec375 100644
--- a/arch/arm/include/asm/page-nommu.h
+++ b/arch/arm/include/asm/page-nommu.h
@@ -11,12 +11,6 @@
#ifndef _ASMARM_PAGE_NOMMU_H
#define _ASMARM_PAGE_NOMMU_H
-#if !defined(CONFIG_SMALL_TASKS) && PAGE_SHIFT < 13
-#define KTHREAD_SIZE (8192)
-#else
-#define KTHREAD_SIZE PAGE_SIZE
-#endif
-
#define clear_page(page) memset((page), 0, PAGE_SIZE)
#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 2577405d082d..6838abc04279 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -526,7 +526,6 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
/* These are from lib/ code, and use __get_user() and friends */
extern long strncpy_from_user(char *dest, const char __user *src, long count);
-extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
#endif /* _ASMARM_UACCESS_H */
diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h
index 71e473d05fcc..620dc75362e5 100644
--- a/arch/arm/include/asm/xen/events.h
+++ b/arch/arm/include/asm/xen/events.h
@@ -16,7 +16,7 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
return raw_irqs_disabled_flags(regs->ARM_cpsr);
}
-#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((ptr), \
+#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((long long*)(ptr),\
atomic64_t, \
counter), (val))
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild
index e9b098d6b766..5fb3368e70cb 100644
--- a/arch/arm/include/uapi/asm/Kbuild
+++ b/arch/arm/include/uapi/asm/Kbuild
@@ -1,8 +1,8 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-genhdr-y += unistd-common.h
-genhdr-y += unistd-oabi.h
-genhdr-y += unistd-eabi.h
+generated-y += unistd-common.h
+generated-y += unistd-oabi.h
+generated-y += unistd-eabi.h
generic-y += siginfo.h
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 5e3c673fa3f4..5db2d4c6a55f 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -203,6 +203,14 @@ struct kvm_arch_memory_slot {
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff
#define VGIC_LEVEL_INFO_LINE_LEVEL 0
+/* Device Control API on vcpu fd */
+#define KVM_ARM_VCPU_PMU_V3_CTRL 0
+#define KVM_ARM_VCPU_PMU_V3_IRQ 0
+#define KVM_ARM_VCPU_PMU_V3_INIT 1
+#define KVM_ARM_VCPU_TIMER_CTRL 1
+#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0
+#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1
+
#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index b259956365a0..56dc1a3a33b4 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -458,10 +458,14 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
int nr, busnr;
for (nr = busnr = 0; nr < hw->nr_controllers; nr++) {
- sys = kzalloc(sizeof(struct pci_sys_data), GFP_KERNEL);
- if (WARN(!sys, "PCI: unable to allocate sys data!"))
+ struct pci_host_bridge *bridge;
+
+ bridge = pci_alloc_host_bridge(sizeof(struct pci_sys_data));
+ if (WARN(!bridge, "PCI: unable to allocate bridge!"))
break;
+ sys = pci_host_bridge_priv(bridge);
+
sys->busnr = busnr;
sys->swizzle = hw->swizzle;
sys->map_irq = hw->map_irq;
@@ -473,7 +477,6 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
ret = hw->setup(nr, sys);
if (ret > 0) {
- struct pci_host_bridge *host_bridge;
ret = pcibios_init_resource(nr, sys, hw->io_optional);
if (ret) {
@@ -481,26 +484,37 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
break;
}
+ bridge->map_irq = pcibios_map_irq;
+ bridge->swizzle_irq = pcibios_swizzle;
+
if (hw->scan)
- sys->bus = hw->scan(nr, sys);
- else
- sys->bus = pci_scan_root_bus_msi(parent,
- sys->busnr, hw->ops, sys,
- &sys->resources, hw->msi_ctrl);
+ ret = hw->scan(nr, bridge);
+ else {
+ list_splice_init(&sys->resources,
+ &bridge->windows);
+ bridge->dev.parent = parent;
+ bridge->sysdata = sys;
+ bridge->busnr = sys->busnr;
+ bridge->ops = hw->ops;
+ bridge->msi = hw->msi_ctrl;
+ bridge->align_resource =
+ hw->align_resource;
+
+ ret = pci_scan_root_bus_bridge(bridge);
+ }
- if (WARN(!sys->bus, "PCI: unable to scan bus!")) {
- kfree(sys);
+ if (WARN(ret < 0, "PCI: unable to scan bus!")) {
+ pci_free_host_bridge(bridge);
break;
}
+ sys->bus = bridge->bus;
+
busnr = sys->bus->busn_res.end + 1;
list_add(&sys->node, head);
-
- host_bridge = pci_find_host_bridge(sys->bus);
- host_bridge->align_resource = hw->align_resource;
} else {
- kfree(sys);
+ pci_free_host_bridge(bridge);
if (ret < 0)
break;
}
@@ -519,8 +533,6 @@ void pci_common_init_dev(struct device *parent, struct hw_pci *hw)
if (hw->postinit)
hw->postinit();
- pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq);
-
list_for_each_entry(sys, &head, node) {
struct pci_bus *bus = sys->bus;
diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S
index c73c4030ca5d..efcd9f25a14b 100644
--- a/arch/arm/kernel/entry-ftrace.S
+++ b/arch/arm/kernel/entry-ftrace.S
@@ -92,12 +92,95 @@
2: mcount_exit
.endm
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+
+.macro __ftrace_regs_caller
+
+ sub sp, sp, #8 @ space for PC and CPSR OLD_R0,
+ @ OLD_R0 will overwrite previous LR
+
+ add ip, sp, #12 @ move in IP the value of SP as it was
+ @ before the push {lr} of the mcount mechanism
+
+ str lr, [sp, #0] @ store LR instead of PC
+
+ ldr lr, [sp, #8] @ get previous LR
+
+ str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR
+
+ stmdb sp!, {ip, lr}
+ stmdb sp!, {r0-r11, lr}
+
+ @ stack content at this point:
+ @ 0 4 48 52 56 60 64 68 72
+ @ R0 | R1 | ... | LR | SP + 4 | previous LR | LR | PSR | OLD_R0 |
+
+ mov r3, sp @ struct pt_regs*
+
+ ldr r2, =function_trace_op
+ ldr r2, [r2] @ pointer to the current
+ @ function tracing op
+
+ ldr r1, [sp, #S_LR] @ lr of instrumented func
+
+ ldr lr, [sp, #S_PC] @ get LR
+
+ mcount_adjust_addr r0, lr @ instrumented function
+
+ .globl ftrace_regs_call
+ftrace_regs_call:
+ bl ftrace_stub
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ .globl ftrace_graph_regs_call
+ftrace_graph_regs_call:
+ mov r0, r0
+#endif
+
+ @ pop saved regs
+ ldmia sp!, {r0-r12} @ restore r0 through r12
+ ldr ip, [sp, #8] @ restore PC
+ ldr lr, [sp, #4] @ restore LR
+ ldr sp, [sp, #0] @ restore SP
+ mov pc, ip @ return
+.endm
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+.macro __ftrace_graph_regs_caller
+
+ sub r0, fp, #4 @ lr of instrumented routine (parent)
+
+ @ called from __ftrace_regs_caller
+ ldr r1, [sp, #S_PC] @ instrumented routine (func)
+ mcount_adjust_addr r1, r1
+
+ mov r2, fp @ frame pointer
+ bl prepare_ftrace_return
+
+ @ pop registers saved in ftrace_regs_caller
+ ldmia sp!, {r0-r12} @ restore r0 through r12
+ ldr ip, [sp, #8] @ restore PC
+ ldr lr, [sp, #4] @ restore LR
+ ldr sp, [sp, #0] @ restore SP
+ mov pc, ip @ return
+
+.endm
+#endif
+#endif
+
.macro __ftrace_caller suffix
mcount_enter
mcount_get_lr r1 @ lr of instrumented func
mcount_adjust_addr r0, lr @ instrumented function
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ ldr r2, =function_trace_op
+ ldr r2, [r2] @ pointer to the current
+ @ function tracing op
+ mov r3, #0 @ regs is NULL
+#endif
+
.globl ftrace_call\suffix
ftrace_call\suffix:
bl ftrace_stub
@@ -212,6 +295,15 @@ UNWIND(.fnstart)
__ftrace_caller
UNWIND(.fnend)
ENDPROC(ftrace_caller)
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ENTRY(ftrace_regs_caller)
+UNWIND(.fnstart)
+ __ftrace_regs_caller
+UNWIND(.fnend)
+ENDPROC(ftrace_regs_caller)
+#endif
+
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -220,6 +312,14 @@ UNWIND(.fnstart)
__ftrace_graph_caller
UNWIND(.fnend)
ENDPROC(ftrace_graph_caller)
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ENTRY(ftrace_graph_regs_caller)
+UNWIND(.fnstart)
+ __ftrace_graph_regs_caller
+UNWIND(.fnend)
+ENDPROC(ftrace_graph_regs_caller)
+#endif
#endif
.purgem mcount_enter
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 833c991075a1..5617932a83df 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -141,6 +141,15 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
ret = ftrace_modify_code(pc, 0, new, false);
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ if (!ret) {
+ pc = (unsigned long)&ftrace_regs_call;
+ new = ftrace_call_replace(pc, (unsigned long)func);
+
+ ret = ftrace_modify_code(pc, 0, new, false);
+ }
+#endif
+
#ifdef CONFIG_OLD_MCOUNT
if (!ret) {
pc = (unsigned long)&ftrace_call_old;
@@ -159,11 +168,29 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
unsigned long ip = rec->ip;
old = ftrace_nop_replace(rec);
+
+ new = ftrace_call_replace(ip, adjust_address(rec, addr));
+
+ return ftrace_modify_code(rec->ip, old, new, true);
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+
+int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ unsigned long addr)
+{
+ unsigned long new, old;
+ unsigned long ip = rec->ip;
+
+ old = ftrace_call_replace(ip, adjust_address(rec, old_addr));
+
new = ftrace_call_replace(ip, adjust_address(rec, addr));
return ftrace_modify_code(rec->ip, old, new, true);
}
+#endif
+
int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
@@ -231,6 +258,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
extern unsigned long ftrace_graph_call;
extern unsigned long ftrace_graph_call_old;
extern void ftrace_graph_caller_old(void);
+extern unsigned long ftrace_graph_regs_call;
+extern void ftrace_graph_regs_caller(void);
static int __ftrace_modify_caller(unsigned long *callsite,
void (*func) (void), bool enable)
@@ -253,6 +282,14 @@ static int ftrace_modify_graph_caller(bool enable)
ftrace_graph_caller,
enable);
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ if (!ret)
+ ret = __ftrace_modify_caller(&ftrace_graph_regs_call,
+ ftrace_graph_regs_caller,
+ enable);
+#endif
+
+
#ifdef CONFIG_OLD_MCOUNT
if (!ret)
ret = __ftrace_modify_caller(&ftrace_graph_call_old,
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 939e8b58c59d..d96714e1858c 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -123,10 +123,10 @@ void __show_regs(struct pt_regs *regs)
print_symbol("PC is at %s\n", instruction_pointer(regs));
print_symbol("LR is at %s\n", regs->ARM_lr);
- printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
- "sp : %08lx ip : %08lx fp : %08lx\n",
- regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
- regs->ARM_sp, regs->ARM_ip, regs->ARM_fp);
+ printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n",
+ regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr);
+ printk("sp : %08lx ip : %08lx fp : %08lx\n",
+ regs->ARM_sp, regs->ARM_ip, regs->ARM_fp);
printk("r10: %08lx r9 : %08lx r8 : %08lx\n",
regs->ARM_r10, regs->ARM_r9,
regs->ARM_r8);
@@ -404,9 +404,17 @@ static unsigned long sigpage_addr(const struct mm_struct *mm,
static struct page *signal_page;
extern struct page *get_signal_page(void);
+static int sigpage_mremap(const struct vm_special_mapping *sm,
+ struct vm_area_struct *new_vma)
+{
+ current->mm->context.sigpage = new_vma->vm_start;
+ return 0;
+}
+
static const struct vm_special_mapping sigpage_mapping = {
.name = "[sigpage]",
.pages = &signal_page,
+ .mremap = sigpage_mremap,
};
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index 53cf86cf2d1a..a4d6dc0f2427 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -54,8 +54,26 @@ static const struct vm_special_mapping vdso_data_mapping = {
.pages = &vdso_data_page,
};
+static int vdso_mremap(const struct vm_special_mapping *sm,
+ struct vm_area_struct *new_vma)
+{
+ unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
+ unsigned long vdso_size;
+
+ /* without VVAR page */
+ vdso_size = (vdso_total_pages - 1) << PAGE_SHIFT;
+
+ if (vdso_size != new_size)
+ return -EINVAL;
+
+ current->mm->context.vdso = new_vma->vm_start;
+
+ return 0;
+}
+
static struct vm_special_mapping vdso_text_mapping __ro_after_init = {
.name = "[vdso]",
+ .mremap = vdso_mremap,
};
struct elfinfo {
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index fa6182a40941..1e0784ebbfd6 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -301,3 +301,54 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
{
return -EINVAL;
}
+
+int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ int ret;
+
+ switch (attr->group) {
+ case KVM_ARM_VCPU_TIMER_CTRL:
+ ret = kvm_arm_timer_set_attr(vcpu, attr);
+ break;
+ default:
+ ret = -ENXIO;
+ break;
+ }
+
+ return ret;
+}
+
+int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ int ret;
+
+ switch (attr->group) {
+ case KVM_ARM_VCPU_TIMER_CTRL:
+ ret = kvm_arm_timer_get_attr(vcpu, attr);
+ break;
+ default:
+ ret = -ENXIO;
+ break;
+ }
+
+ return ret;
+}
+
+int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ int ret;
+
+ switch (attr->group) {
+ case KVM_ARM_VCPU_TIMER_CTRL:
+ ret = kvm_arm_timer_has_attr(vcpu, attr);
+ break;
+ default:
+ ret = -ENXIO;
+ break;
+ }
+
+ return ret;
+}
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index f86a9aaef462..54442e375354 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -72,6 +72,7 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
trace_kvm_wfx(*vcpu_pc(vcpu), false);
vcpu->stat.wfi_exit_stat++;
kvm_vcpu_block(vcpu);
+ kvm_clear_request(KVM_REQ_UNHALT, vcpu);
}
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c
index 624a510d31df..ebd2dd46adf7 100644
--- a/arch/arm/kvm/hyp/switch.c
+++ b/arch/arm/kvm/hyp/switch.c
@@ -237,8 +237,10 @@ void __hyp_text __noreturn __hyp_panic(int cause)
vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR);
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ __timer_save_state(vcpu);
__deactivate_traps(vcpu);
__deactivate_vm(vcpu);
+ __banked_restore_state(host_ctxt);
__sysreg_restore_state(host_ctxt);
}
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
index 1da8b2d14550..5ed0c3ee33d6 100644
--- a/arch/arm/kvm/reset.c
+++ b/arch/arm/kvm/reset.c
@@ -37,16 +37,6 @@ static struct kvm_regs cortexa_regs_reset = {
.usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT,
};
-static const struct kvm_irq_level cortexa_ptimer_irq = {
- { .irq = 30 },
- .level = 1,
-};
-
-static const struct kvm_irq_level cortexa_vtimer_irq = {
- { .irq = 27 },
- .level = 1,
-};
-
/*******************************************************************************
* Exported reset function
@@ -62,16 +52,12 @@ static const struct kvm_irq_level cortexa_vtimer_irq = {
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
{
struct kvm_regs *reset_regs;
- const struct kvm_irq_level *cpu_vtimer_irq;
- const struct kvm_irq_level *cpu_ptimer_irq;
switch (vcpu->arch.target) {
case KVM_ARM_TARGET_CORTEX_A7:
case KVM_ARM_TARGET_CORTEX_A15:
reset_regs = &cortexa_regs_reset;
vcpu->arch.midr = read_cpuid_id();
- cpu_vtimer_irq = &cortexa_vtimer_irq;
- cpu_ptimer_irq = &cortexa_ptimer_irq;
break;
default:
return -ENODEV;
@@ -84,5 +70,5 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
kvm_reset_coprocs(vcpu);
/* Reset arch_timer context */
- return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq, cpu_ptimer_irq);
+ return kvm_timer_vcpu_reset(vcpu);
}
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
index 58075627c6df..f673cd7a6766 100644
--- a/arch/arm/mach-davinci/board-da830-evm.c
+++ b/arch/arm/mach-davinci/board-da830-evm.c
@@ -17,7 +17,7 @@
#include <linux/gpio/machine.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
-#include <linux/i2c/pcf857x.h>
+#include <linux/platform_data/pcf857x.h>
#include <linux/platform_data/at24.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index 20f1874a5657..70e00dbeec96 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -14,7 +14,7 @@
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
-#include <linux/i2c/pcf857x.h>
+#include <linux/platform_data/pcf857x.h>
#include <linux/platform_data/at24.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index 055e947a6a39..1d76e7480a42 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -23,7 +23,7 @@
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/platform_data/at24.h>
-#include <linux/i2c/pcf857x.h>
+#include <linux/platform_data/pcf857x.h>
#include <media/i2c/tvp514x.h>
#include <media/i2c/adv7343.h>
diff --git a/arch/arm/mach-dove/pcie.c b/arch/arm/mach-dove/pcie.c
index 91fe97144570..dfb62f3f5dcf 100644
--- a/arch/arm/mach-dove/pcie.c
+++ b/arch/arm/mach-dove/pcie.c
@@ -152,16 +152,23 @@ static void rc_pci_fixup(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL, PCI_ANY_ID, rc_pci_fixup);
-static struct pci_bus __init *
-dove_pcie_scan_bus(int nr, struct pci_sys_data *sys)
+static int __init
+dove_pcie_scan_bus(int nr, struct pci_host_bridge *bridge)
{
+ struct pci_sys_data *sys = pci_host_bridge_priv(bridge);
+
if (nr >= num_pcie_ports) {
BUG();
- return NULL;
+ return -EINVAL;
}
- return pci_scan_root_bus(NULL, sys->busnr, &pcie_ops, sys,
- &sys->resources);
+ list_splice_init(&sys->resources, &bridge->windows);
+ bridge->dev.parent = NULL;
+ bridge->sysdata = sys;
+ bridge->busnr = sys->busnr;
+ bridge->ops = &pcie_ops;
+
+ return pci_scan_root_bus_bridge(bridge);
}
static int __init dove_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
diff --git a/arch/arm/mach-iop13xx/pci.c b/arch/arm/mach-iop13xx/pci.c
index 204eb4460271..070d92ae1b6f 100644
--- a/arch/arm/mach-iop13xx/pci.c
+++ b/arch/arm/mach-iop13xx/pci.c
@@ -504,10 +504,10 @@ iop13xx_pci_abort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
/* Scan an IOP13XX PCI bus. nr selects which ATU we use.
*/
-struct pci_bus *iop13xx_scan_bus(int nr, struct pci_sys_data *sys)
+int iop13xx_scan_bus(int nr, struct pci_host_bridge *bridge)
{
- int which_atu;
- struct pci_bus *bus = NULL;
+ int which_atu, ret;
+ struct pci_sys_data *sys = pci_host_bridge_priv(bridge);
switch (init_atu) {
case IOP13XX_INIT_ATU_ATUX:
@@ -525,9 +525,14 @@ struct pci_bus *iop13xx_scan_bus(int nr, struct pci_sys_data *sys)
if (!which_atu) {
BUG();
- return NULL;
+ return -ENODEV;
}
+ list_splice_init(&sys->resources, &bridge->windows);
+ bridge->dev.parent = NULL;
+ bridge->sysdata = sys;
+ bridge->busnr = sys->busnr;
+
switch (which_atu) {
case IOP13XX_INIT_ATU_ATUX:
if (time_after_eq(jiffies + msecs_to_jiffies(1000),
@@ -535,18 +540,22 @@ struct pci_bus *iop13xx_scan_bus(int nr, struct pci_sys_data *sys)
while(time_before(jiffies, atux_trhfa_timeout))
udelay(100);
- bus = pci_bus_atux = pci_scan_root_bus(NULL, sys->busnr,
- &iop13xx_atux_ops,
- sys, &sys->resources);
+ bridge->ops = &iop13xx_atux_ops;
+ ret = pci_scan_root_bus_bridge(bridge);
+ if (!ret)
+ pci_bus_atux = bridge->bus;
break;
case IOP13XX_INIT_ATU_ATUE:
- bus = pci_bus_atue = pci_scan_root_bus(NULL, sys->busnr,
- &iop13xx_atue_ops,
- sys, &sys->resources);
+ bridge->ops = &iop13xx_atue_ops;
+ ret = pci_scan_root_bus_bridge(bridge);
+ if (!ret)
+ pci_bus_atue = bridge->bus;
break;
+ default:
+ ret = -EINVAL;
}
- return bus;
+ return ret;
}
/* This function is called from iop13xx_pci_init() after assigning valid
diff --git a/arch/arm/mach-iop13xx/pci.h b/arch/arm/mach-iop13xx/pci.h
index 71b9c57e1fde..8dc343cb887a 100644
--- a/arch/arm/mach-iop13xx/pci.h
+++ b/arch/arm/mach-iop13xx/pci.h
@@ -11,9 +11,10 @@ extern size_t iop13xx_atue_mem_size;
extern size_t iop13xx_atux_mem_size;
struct pci_sys_data;
+struct pci_host_bridge;
struct hw_pci;
int iop13xx_pci_setup(int nr, struct pci_sys_data *sys);
-struct pci_bus *iop13xx_scan_bus(int nr, struct pci_sys_data *);
+int iop13xx_scan_bus(int nr, struct pci_host_bridge *bridge);
void iop13xx_atu_select(struct hw_pci *plat_pci);
void iop13xx_pci_init(void);
void iop13xx_map_pci_memory(void);
diff --git a/arch/arm/mach-lpc32xx/phy3250.c b/arch/arm/mach-lpc32xx/phy3250.c
index 6c52bd32610e..e48cc06c2aec 100644
--- a/arch/arm/mach-lpc32xx/phy3250.c
+++ b/arch/arm/mach-lpc32xx/phy3250.c
@@ -137,6 +137,9 @@ static void pl08x_put_signal(const struct pl08x_channel_data *cd, int ch)
}
static struct pl08x_platform_data pl08x_pd = {
+ /* Some reasonable memcpy defaults */
+ .memcpy_burst_size = PL08X_BURST_SZ_256,
+ .memcpy_bus_width = PL08X_BUS_WIDTH_32_BITS,
.slave_channels = &pl08x_slave_channels[0],
.num_slave_channels = ARRAY_SIZE(pl08x_slave_channels),
.get_xfer_signal = pl08x_get_signal,
diff --git a/arch/arm/mach-mv78xx0/pcie.c b/arch/arm/mach-mv78xx0/pcie.c
index 81ff4327a962..636d84b40466 100644
--- a/arch/arm/mach-mv78xx0/pcie.c
+++ b/arch/arm/mach-mv78xx0/pcie.c
@@ -194,16 +194,22 @@ static void rc_pci_fixup(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL, PCI_ANY_ID, rc_pci_fixup);
-static struct pci_bus __init *
-mv78xx0_pcie_scan_bus(int nr, struct pci_sys_data *sys)
+static int __init mv78xx0_pcie_scan_bus(int nr, struct pci_host_bridge *bridge)
{
+ struct pci_sys_data *sys = pci_host_bridge_priv(bridge);
+
if (nr >= num_pcie_ports) {
BUG();
- return NULL;
+ return -EINVAL;
}
- return pci_scan_root_bus(NULL, sys->busnr, &pcie_ops, sys,
- &sys->resources);
+ list_splice_init(&sys->resources, &bridge->windows);
+ bridge->dev.parent = NULL;
+ bridge->sysdata = sys;
+ bridge->busnr = sys->busnr;
+ bridge->ops = &pcie_ops;
+
+ return pci_scan_root_bus_bridge(bridge);
}
static int __init mv78xx0_pcie_map_irq(const struct pci_dev *dev, u8 slot,
diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h
index efeffc6b4ebb..4c0c7de665c3 100644
--- a/arch/arm/mach-orion5x/common.h
+++ b/arch/arm/mach-orion5x/common.h
@@ -54,6 +54,7 @@ void orion5x_restart(enum reboot_mode, const char *);
* PCIe/PCI functions.
*/
struct pci_bus;
+struct pci_host_bridge;
struct pci_sys_data;
struct pci_dev;
@@ -61,7 +62,7 @@ void orion5x_pcie_id(u32 *dev, u32 *rev);
void orion5x_pci_disable(void);
void orion5x_pci_set_cardbus_mode(void);
int orion5x_pci_sys_setup(int nr, struct pci_sys_data *sys);
-struct pci_bus *orion5x_pci_sys_scan_bus(int nr, struct pci_sys_data *sys);
+int orion5x_pci_sys_scan_bus(int nr, struct pci_host_bridge *bridge);
int orion5x_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
struct tag;
diff --git a/arch/arm/mach-orion5x/pci.c b/arch/arm/mach-orion5x/pci.c
index ecb998e7f8dc..76951bfbacf5 100644
--- a/arch/arm/mach-orion5x/pci.c
+++ b/arch/arm/mach-orion5x/pci.c
@@ -555,18 +555,27 @@ int __init orion5x_pci_sys_setup(int nr, struct pci_sys_data *sys)
return 0;
}
-struct pci_bus __init *orion5x_pci_sys_scan_bus(int nr, struct pci_sys_data *sys)
+int __init orion5x_pci_sys_scan_bus(int nr, struct pci_host_bridge *bridge)
{
- if (nr == 0)
- return pci_scan_root_bus(NULL, sys->busnr, &pcie_ops, sys,
- &sys->resources);
+ struct pci_sys_data *sys = pci_host_bridge_priv(bridge);
- if (nr == 1 && !orion5x_pci_disabled)
- return pci_scan_root_bus(NULL, sys->busnr, &pci_ops, sys,
- &sys->resources);
+ list_splice_init(&sys->resources, &bridge->windows);
+ bridge->dev.parent = NULL;
+ bridge->sysdata = sys;
+ bridge->busnr = sys->busnr;
+
+ if (nr == 0) {
+ bridge->ops = &pcie_ops;
+ return pci_scan_root_bus_bridge(bridge);
+ }
+
+ if (nr == 1 && !orion5x_pci_disabled) {
+ bridge->ops = &pci_ops;
+ return pci_scan_root_bus_bridge(bridge);
+ }
BUG();
- return NULL;
+ return -ENODEV;
}
int __init orion5x_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index d452a49c0396..1467c1d1e541 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -27,7 +27,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/types.h>
-#include <linux/i2c/pcf857x.h>
+#include <linux/platform_data/pcf857x.h>
#include <linux/i2c/pxa-i2c.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/physmap.h>
diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c
index 051c554776a6..fae38fdc8d8e 100644
--- a/arch/arm/mach-pxa/littleton.c
+++ b/arch/arm/mach-pxa/littleton.c
@@ -27,7 +27,7 @@
#include <linux/i2c.h>
#include <linux/leds.h>
#include <linux/mfd/da903x.h>
-#include <linux/i2c/max732x.h>
+#include <linux/platform_data/max732x.h>
#include <linux/i2c/pxa-i2c.h>
#include <asm/types.h>
diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c
index 7b6610e9dae4..2d45d18b1a5e 100644
--- a/arch/arm/mach-pxa/stargate2.c
+++ b/arch/arm/mach-pxa/stargate2.c
@@ -26,7 +26,7 @@
#include <linux/mtd/partitions.h>
#include <linux/i2c/pxa-i2c.h>
-#include <linux/i2c/pcf857x.h>
+#include <linux/platform_data/pcf857x.h>
#include <linux/platform_data/at24.h>
#include <linux/smc91x.h>
#include <linux/gpio.h>
diff --git a/arch/arm/mach-s3c64xx/pl080.c b/arch/arm/mach-s3c64xx/pl080.c
index 261820a855ec..66fc774b70ec 100644
--- a/arch/arm/mach-s3c64xx/pl080.c
+++ b/arch/arm/mach-s3c64xx/pl080.c
@@ -137,16 +137,10 @@ static const struct dma_slave_map s3c64xx_dma0_slave_map[] = {
};
struct pl08x_platform_data s3c64xx_dma0_plat_data = {
- .memcpy_channel = {
- .bus_id = "memcpy",
- .cctl_memcpy =
- (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT |
- PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT |
- PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT |
- PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT |
- PL080_CONTROL_PROT_BUFF | PL080_CONTROL_PROT_CACHE |
- PL080_CONTROL_PROT_SYS),
- },
+ .memcpy_burst_size = PL08X_BURST_SZ_4,
+ .memcpy_bus_width = PL08X_BUS_WIDTH_32_BITS,
+ .memcpy_prot_buff = true,
+ .memcpy_prot_cache = true,
.lli_buses = PL08X_AHB1,
.mem_buses = PL08X_AHB1,
.get_xfer_signal = pl08x_get_xfer_signal,
@@ -238,16 +232,10 @@ static const struct dma_slave_map s3c64xx_dma1_slave_map[] = {
};
struct pl08x_platform_data s3c64xx_dma1_plat_data = {
- .memcpy_channel = {
- .bus_id = "memcpy",
- .cctl_memcpy =
- (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT |
- PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT |
- PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT |
- PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT |
- PL080_CONTROL_PROT_BUFF | PL080_CONTROL_PROT_CACHE |
- PL080_CONTROL_PROT_SYS),
- },
+ .memcpy_burst_size = PL08X_BURST_SZ_4,
+ .memcpy_bus_width = PL08X_BUS_WIDTH_32_BITS,
+ .memcpy_prot_buff = true,
+ .memcpy_prot_cache = true,
.lli_buses = PL08X_AHB1,
.mem_buses = PL08X_AHB1,
.get_xfer_signal = pl08x_get_xfer_signal,
diff --git a/arch/arm/mach-spear/spear3xx.c b/arch/arm/mach-spear/spear3xx.c
index 23394ac76cf2..8537fcffe5a8 100644
--- a/arch/arm/mach-spear/spear3xx.c
+++ b/arch/arm/mach-spear/spear3xx.c
@@ -44,16 +44,10 @@ struct pl022_ssp_controller pl022_plat_data = {
/* dmac device registration */
struct pl08x_platform_data pl080_plat_data = {
- .memcpy_channel = {
- .bus_id = "memcpy",
- .cctl_memcpy =
- (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \
- PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT | \
- PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | \
- PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | \
- PL080_CONTROL_PROT_BUFF | PL080_CONTROL_PROT_CACHE | \
- PL080_CONTROL_PROT_SYS),
- },
+ .memcpy_burst_size = PL08X_BURST_SZ_16,
+ .memcpy_bus_width = PL08X_BUS_WIDTH_32_BITS,
+ .memcpy_prot_buff = true,
+ .memcpy_prot_cache = true,
.lli_buses = PL08X_AHB1,
.mem_buses = PL08X_AHB1,
.get_xfer_signal = pl080_get_signal,
diff --git a/arch/arm/mach-spear/spear6xx.c b/arch/arm/mach-spear/spear6xx.c
index ccf3573b831c..c5fc110134ba 100644
--- a/arch/arm/mach-spear/spear6xx.c
+++ b/arch/arm/mach-spear/spear6xx.c
@@ -322,16 +322,10 @@ static struct pl08x_channel_data spear600_dma_info[] = {
};
static struct pl08x_platform_data spear6xx_pl080_plat_data = {
- .memcpy_channel = {
- .bus_id = "memcpy",
- .cctl_memcpy =
- (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \
- PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT | \
- PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | \
- PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | \
- PL080_CONTROL_PROT_BUFF | PL080_CONTROL_PROT_CACHE | \
- PL080_CONTROL_PROT_SYS),
- },
+ .memcpy_burst_size = PL08X_BURST_SZ_16,
+ .memcpy_bus_width = PL08X_BUS_WIDTH_32_BITS,
+ .memcpy_prot_buff = true,
+ .memcpy_prot_cache = true,
.lli_buses = PL08X_AHB1,
.mem_buses = PL08X_AHB1,
.get_xfer_signal = pl080_get_signal,
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index c6c4c9c8824b..60cdfdc151aa 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -679,7 +679,7 @@ config ARCH_DMA_ADDR_T_64BIT
bool
config ARM_THUMB
- bool "Support Thumb user binaries" if !CPU_THUMBONLY
+ bool "Support Thumb user binaries" if !CPU_THUMBONLY && EXPERT
depends on CPU_THUMB_CAPABLE
default y
help
@@ -690,6 +690,10 @@ config ARM_THUMB
instruction set resulting in smaller binaries at the expense of
slightly less efficient code.
+ If this option is disabled, and you run userspace that switches to
+ Thumb mode, signal handling will not work correctly, resulting in
+ segmentation faults or illegal instruction aborts.
+
If you don't know what this all is, saying Y is a safe choice.
config ARM_THUMBEE
@@ -1045,8 +1049,8 @@ config ARM_L1_CACHE_SHIFT
default 5
config ARM_DMA_MEM_BUFFERABLE
- bool "Use non-cacheable memory for DMA" if (CPU_V6 || CPU_V6K) && !CPU_V7
- default y if CPU_V6 || CPU_V6K || CPU_V7
+ bool "Use non-cacheable memory for DMA" if (CPU_V6 || CPU_V6K || CPU_V7M) && !CPU_V7
+ default y if CPU_V6 || CPU_V6K || CPU_V7 || CPU_V7M
help
Historically, the kernel has used strongly ordered mappings to
provide DMA coherent memory. With the advent of ARMv7, mapping
@@ -1061,6 +1065,10 @@ config ARM_DMA_MEM_BUFFERABLE
and therefore turning this on may result in unpredictable driver
behaviour. Therefore, we offer this as an option.
+ On some of the beefier ARMv7-M machines (with DMA and write
+ buffers) you likely want this enabled, while those that
+ didn't need it until now also won't need it in the future.
+
You are recommended say 'Y' here and debug any affected drivers.
config ARM_HEAVY_MB
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index b3dea80715b4..950d19babb5f 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -2,9 +2,8 @@
# Makefile for the linux arm-specific parts of the memory manager.
#
-obj-y := dma-mapping.o extable.o fault.o init.o \
- iomap.o
-
+obj-y := extable.o fault.o init.o iomap.o
+obj-y += dma-mapping$(MMUEXT).o
obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \
mmap.o pgd.o mmu.o pageattr.o
diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c
new file mode 100644
index 000000000000..90ee354d803e
--- /dev/null
+++ b/arch/arm/mm/dma-mapping-nommu.c
@@ -0,0 +1,228 @@
+/*
+ * Based on linux/arch/arm/mm/dma-mapping.c
+ *
+ * Copyright (C) 2000-2004 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+
+#include <asm/cachetype.h>
+#include <asm/cacheflush.h>
+#include <asm/outercache.h>
+#include <asm/cp15.h>
+
+#include "dma.h"
+
+/*
+ * dma_noop_ops is used if
+ * - MMU/MPU is off
+ * - cpu is v7m w/o cache support
+ * - device is coherent
+ * otherwise arm_nommu_dma_ops is used.
+ *
+ * arm_nommu_dma_ops rely on consistent DMA memory (please, refer to
+ * [1] on how to declare such memory).
+ *
+ * [1] Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
+ */
+
+static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ unsigned long attrs)
+
+{
+ const struct dma_map_ops *ops = &dma_noop_ops;
+
+ /*
+ * We are here because:
+ * - no consistent DMA region has been defined, so we can't
+ * continue.
+ * - there is no space left in consistent DMA region, so we
+ * only can fallback to generic allocator if we are
+ * advertised that consistency is not required.
+ */
+
+ if (attrs & DMA_ATTR_NON_CONSISTENT)
+ return ops->alloc(dev, size, dma_handle, gfp, attrs);
+
+ WARN_ON_ONCE(1);
+ return NULL;
+}
+
+static void arm_nommu_dma_free(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_addr,
+ unsigned long attrs)
+{
+ const struct dma_map_ops *ops = &dma_noop_ops;
+
+ if (attrs & DMA_ATTR_NON_CONSISTENT)
+ ops->free(dev, size, cpu_addr, dma_addr, attrs);
+ else
+ WARN_ON_ONCE(1);
+
+ return;
+}
+
+static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ dmac_map_area(__va(paddr), size, dir);
+
+ if (dir == DMA_FROM_DEVICE)
+ outer_inv_range(paddr, paddr + size);
+ else
+ outer_clean_range(paddr, paddr + size);
+}
+
+static void __dma_page_dev_to_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ if (dir != DMA_TO_DEVICE) {
+ outer_inv_range(paddr, paddr + size);
+ dmac_unmap_area(__va(paddr), size, dir);
+ }
+}
+
+static dma_addr_t arm_nommu_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ dma_addr_t handle = page_to_phys(page) + offset;
+
+ __dma_page_cpu_to_dev(handle, size, dir);
+
+ return handle;
+}
+
+static void arm_nommu_dma_unmap_page(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ __dma_page_dev_to_cpu(handle, size, dir);
+}
+
+
+static int arm_nommu_dma_map_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sgl, sg, nents, i) {
+ sg_dma_address(sg) = sg_phys(sg);
+ sg_dma_len(sg) = sg->length;
+ __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
+ }
+
+ return nents;
+}
+
+static void arm_nommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nents, i)
+ __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
+}
+
+static void arm_nommu_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ __dma_page_cpu_to_dev(handle, size, dir);
+}
+
+static void arm_nommu_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ __dma_page_cpu_to_dev(handle, size, dir);
+}
+
+static void arm_nommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nents, i)
+ __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
+}
+
+static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nents, i)
+ __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
+}
+
+const struct dma_map_ops arm_nommu_dma_ops = {
+ .alloc = arm_nommu_dma_alloc,
+ .free = arm_nommu_dma_free,
+ .map_page = arm_nommu_dma_map_page,
+ .unmap_page = arm_nommu_dma_unmap_page,
+ .map_sg = arm_nommu_dma_map_sg,
+ .unmap_sg = arm_nommu_dma_unmap_sg,
+ .sync_single_for_device = arm_nommu_dma_sync_single_for_device,
+ .sync_single_for_cpu = arm_nommu_dma_sync_single_for_cpu,
+ .sync_sg_for_device = arm_nommu_dma_sync_sg_for_device,
+ .sync_sg_for_cpu = arm_nommu_dma_sync_sg_for_cpu,
+};
+EXPORT_SYMBOL(arm_nommu_dma_ops);
+
+static const struct dma_map_ops *arm_nommu_get_dma_map_ops(bool coherent)
+{
+ return coherent ? &dma_noop_ops : &arm_nommu_dma_ops;
+}
+
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ const struct iommu_ops *iommu, bool coherent)
+{
+ const struct dma_map_ops *dma_ops;
+
+ if (IS_ENABLED(CONFIG_CPU_V7M)) {
+ /*
+ * Cache support for v7m is optional, so can be treated as
+ * coherent if no cache has been detected. Note that it is not
+ * enough to check if MPU is in use or not since in absense of
+ * MPU system memory map is used.
+ */
+ dev->archdata.dma_coherent = (cacheid) ? coherent : true;
+ } else {
+ /*
+ * Assume coherent DMA in case MMU/MPU has not been set up.
+ */
+ dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true;
+ }
+
+ dma_ops = arm_nommu_get_dma_map_ops(dev->archdata.dma_coherent);
+
+ set_dma_ops(dev, dma_ops);
+}
+
+void arch_teardown_dma_ops(struct device *dev)
+{
+}
+
+#define PREALLOC_DMA_DEBUG_ENTRIES 4096
+
+static int __init dma_debug_do_init(void)
+{
+ dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+ return 0;
+}
+core_initcall(dma_debug_do_init);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index bd83c531828a..e7380bafbfa6 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -180,6 +180,11 @@ static void arm_dma_sync_single_for_device(struct device *dev,
__dma_page_cpu_to_dev(page, offset, size, dir);
}
+static int arm_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == ARM_MAPPING_ERROR;
+}
+
const struct dma_map_ops arm_dma_ops = {
.alloc = arm_dma_alloc,
.free = arm_dma_free,
@@ -193,6 +198,8 @@ const struct dma_map_ops arm_dma_ops = {
.sync_single_for_device = arm_dma_sync_single_for_device,
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
.sync_sg_for_device = arm_dma_sync_sg_for_device,
+ .mapping_error = arm_dma_mapping_error,
+ .dma_supported = arm_dma_supported,
};
EXPORT_SYMBOL(arm_dma_ops);
@@ -211,6 +218,8 @@ const struct dma_map_ops arm_coherent_dma_ops = {
.get_sgtable = arm_dma_get_sgtable,
.map_page = arm_coherent_dma_map_page,
.map_sg = arm_dma_map_sg,
+ .mapping_error = arm_dma_mapping_error,
+ .dma_supported = arm_dma_supported,
};
EXPORT_SYMBOL(arm_coherent_dma_ops);
@@ -344,8 +353,6 @@ static void __dma_free_buffer(struct page *page, size_t size)
}
}
-#ifdef CONFIG_MMU
-
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
const void *caller, bool want_vaddr,
@@ -647,22 +654,6 @@ static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
return prot;
}
-#define nommu() 0
-
-#else /* !CONFIG_MMU */
-
-#define nommu() 1
-
-#define __get_dma_pgprot(attrs, prot) __pgprot(0)
-#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL
-#define __alloc_from_pool(size, ret_page) NULL
-#define __alloc_from_contiguous(dev, size, prot, ret, c, wv, coherent_flag, gfp) NULL
-#define __free_from_pool(cpu_addr, size) do { } while (0)
-#define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0)
-#define __dma_free_remap(cpu_addr, size) do { } while (0)
-
-#endif /* CONFIG_MMU */
-
static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
struct page **ret_page)
{
@@ -799,13 +790,13 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp &= ~(__GFP_COMP);
args.gfp = gfp;
- *handle = DMA_ERROR_CODE;
+ *handle = ARM_MAPPING_ERROR;
allowblock = gfpflags_allow_blocking(gfp);
cma = allowblock ? dev_get_cma_area(dev) : false;
if (cma)
buf->allocator = &cma_allocator;
- else if (nommu() || is_coherent)
+ else if (is_coherent)
buf->allocator = &simple_allocator;
else if (allowblock)
buf->allocator = &remap_allocator;
@@ -854,8 +845,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
- int ret = -ENXIO;
-#ifdef CONFIG_MMU
+ int ret;
unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long pfn = dma_to_pfn(dev, dma_addr);
@@ -870,10 +860,6 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
}
-#else
- ret = vm_iomap_memory(vma, vma->vm_start,
- (vma->vm_end - vma->vm_start));
-#endif /* CONFIG_MMU */
return ret;
}
@@ -892,9 +878,7 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
-#ifdef CONFIG_MMU
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
-#endif /* CONFIG_MMU */
return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
}
@@ -1177,11 +1161,10 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
* during bus mastering, then you would pass 0x00ffffff as the mask
* to this function.
*/
-int dma_supported(struct device *dev, u64 mask)
+int arm_dma_supported(struct device *dev, u64 mask)
{
return __dma_supported(dev, mask, false);
}
-EXPORT_SYMBOL(dma_supported);
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
@@ -1254,7 +1237,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
if (i == mapping->nr_bitmaps) {
if (extend_iommu_mapping(mapping)) {
spin_unlock_irqrestore(&mapping->lock, flags);
- return DMA_ERROR_CODE;
+ return ARM_MAPPING_ERROR;
}
start = bitmap_find_next_zero_area(mapping->bitmaps[i],
@@ -1262,7 +1245,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
if (start > mapping->bits) {
spin_unlock_irqrestore(&mapping->lock, flags);
- return DMA_ERROR_CODE;
+ return ARM_MAPPING_ERROR;
}
bitmap_set(mapping->bitmaps[i], start, count);
@@ -1445,7 +1428,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
int i;
dma_addr = __alloc_iova(mapping, size);
- if (dma_addr == DMA_ERROR_CODE)
+ if (dma_addr == ARM_MAPPING_ERROR)
return dma_addr;
iova = dma_addr;
@@ -1472,7 +1455,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
fail:
iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
__free_iova(mapping, dma_addr, size);
- return DMA_ERROR_CODE;
+ return ARM_MAPPING_ERROR;
}
static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
@@ -1533,7 +1516,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
return NULL;
*handle = __iommu_create_mapping(dev, &page, size, attrs);
- if (*handle == DMA_ERROR_CODE)
+ if (*handle == ARM_MAPPING_ERROR)
goto err_mapping;
return addr;
@@ -1561,7 +1544,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
struct page **pages;
void *addr = NULL;
- *handle = DMA_ERROR_CODE;
+ *handle = ARM_MAPPING_ERROR;
size = PAGE_ALIGN(size);
if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
@@ -1582,7 +1565,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
return NULL;
*handle = __iommu_create_mapping(dev, pages, size, attrs);
- if (*handle == DMA_ERROR_CODE)
+ if (*handle == ARM_MAPPING_ERROR)
goto err_buffer;
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
@@ -1732,10 +1715,10 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
int prot;
size = PAGE_ALIGN(size);
- *handle = DMA_ERROR_CODE;
+ *handle = ARM_MAPPING_ERROR;
iova_base = iova = __alloc_iova(mapping, size);
- if (iova == DMA_ERROR_CODE)
+ if (iova == ARM_MAPPING_ERROR)
return -ENOMEM;
for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
@@ -1775,7 +1758,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
for (i = 1; i < nents; i++) {
s = sg_next(s);
- s->dma_address = DMA_ERROR_CODE;
+ s->dma_address = ARM_MAPPING_ERROR;
s->dma_length = 0;
if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
@@ -1950,7 +1933,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
int ret, prot, len = PAGE_ALIGN(size + offset);
dma_addr = __alloc_iova(mapping, len);
- if (dma_addr == DMA_ERROR_CODE)
+ if (dma_addr == ARM_MAPPING_ERROR)
return dma_addr;
prot = __dma_info_to_prot(dir, attrs);
@@ -1962,7 +1945,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
return dma_addr + offset;
fail:
__free_iova(mapping, dma_addr, len);
- return DMA_ERROR_CODE;
+ return ARM_MAPPING_ERROR;
}
/**
@@ -2056,7 +2039,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev,
size_t len = PAGE_ALIGN(size + offset);
dma_addr = __alloc_iova(mapping, len);
- if (dma_addr == DMA_ERROR_CODE)
+ if (dma_addr == ARM_MAPPING_ERROR)
return dma_addr;
prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
@@ -2068,7 +2051,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev,
return dma_addr + offset;
fail:
__free_iova(mapping, dma_addr, len);
- return DMA_ERROR_CODE;
+ return ARM_MAPPING_ERROR;
}
/**
@@ -2140,6 +2123,9 @@ const struct dma_map_ops iommu_ops = {
.map_resource = arm_iommu_map_resource,
.unmap_resource = arm_iommu_unmap_resource,
+
+ .mapping_error = arm_dma_mapping_error,
+ .dma_supported = arm_dma_supported,
};
const struct dma_map_ops iommu_coherent_ops = {
@@ -2156,6 +2142,9 @@ const struct dma_map_ops iommu_coherent_ops = {
.map_resource = arm_iommu_map_resource,
.unmap_resource = arm_iommu_unmap_resource,
+
+ .mapping_error = arm_dma_mapping_error,
+ .dma_supported = arm_dma_supported,
};
/**
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index f0325d96b97a..785d2a562a23 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -185,23 +185,6 @@ EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
const struct dma_map_ops *xen_dma_ops;
EXPORT_SYMBOL(xen_dma_ops);
-static const struct dma_map_ops xen_swiotlb_dma_ops = {
- .alloc = xen_swiotlb_alloc_coherent,
- .free = xen_swiotlb_free_coherent,
- .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
- .sync_single_for_device = xen_swiotlb_sync_single_for_device,
- .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
- .map_sg = xen_swiotlb_map_sg_attrs,
- .unmap_sg = xen_swiotlb_unmap_sg_attrs,
- .map_page = xen_swiotlb_map_page,
- .unmap_page = xen_swiotlb_unmap_page,
- .dma_supported = xen_swiotlb_dma_supported,
- .set_dma_mask = xen_swiotlb_set_dma_mask,
- .mmap = xen_swiotlb_dma_mmap,
- .get_sgtable = xen_swiotlb_get_sgtable,
-};
-
int __init xen_mm_init(void)
{
struct gnttab_cache_flush cflush;
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
index 0ed01f2d5ee4..e71eefa2e427 100644
--- a/arch/arm/xen/p2m.c
+++ b/arch/arm/xen/p2m.c
@@ -144,17 +144,17 @@ bool __set_phys_to_machine_multi(unsigned long pfn,
return true;
}
- p2m_entry = kzalloc(sizeof(struct xen_p2m_entry), GFP_NOWAIT);
- if (!p2m_entry) {
- pr_warn("cannot allocate xen_p2m_entry\n");
+ p2m_entry = kzalloc(sizeof(*p2m_entry), GFP_NOWAIT);
+ if (!p2m_entry)
return false;
- }
+
p2m_entry->pfn = pfn;
p2m_entry->nr_pages = nr_pages;
p2m_entry->mfn = mfn;
write_lock_irqsave(&p2m_lock, irqflags);
- if ((rc = xen_add_phys_to_mach_entry(p2m_entry)) < 0) {
+ rc = xen_add_phys_to_mach_entry(p2m_entry);
+ if (rc < 0) {
write_unlock_irqrestore(&p2m_lock, irqflags);
return false;
}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 9f7a934ff707..8addb851ab5e 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -13,7 +13,7 @@ config ARM64
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_GCOV_PROFILE_ALL
- select ARCH_HAS_GIGANTIC_PAGE
+ select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
select ARCH_HAS_KCOV
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SG_CHAIN
@@ -488,6 +488,17 @@ config CAVIUM_ERRATUM_27456
If unsure, say Y.
+config CAVIUM_ERRATUM_30115
+ bool "Cavium erratum 30115: Guest may disable interrupts in host"
+ default y
+ help
+ On ThunderX T88 pass 1.x through 2.2, T81 pass 1.0 through
+ 1.2, and T83 Pass 1.0, KVM guest execution may disable
+ interrupts in host. Trapping both GICv3 group-0 and group-1
+ accesses sidesteps the issue.
+
+ If unsure, say Y.
+
config QCOM_FALKOR_ERRATUM_1003
bool "Falkor E1003: Incorrect translation due to ASID change"
default y
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 1a98bc8602a2..8cef47fa2218 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -89,7 +89,7 @@ static inline void gic_write_ctlr(u32 val)
static inline void gic_write_grpen1(u32 val)
{
- write_sysreg_s(val, SYS_ICC_GRPEN1_EL1);
+ write_sysreg_s(val, SYS_ICC_IGRPEN1_EL1);
isb();
}
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index b3aab8a17868..8d2272c6822c 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -38,7 +38,8 @@
#define ARM64_WORKAROUND_REPEAT_TLBI 17
#define ARM64_WORKAROUND_QCOM_FALKOR_E1003 18
#define ARM64_WORKAROUND_858921 19
+#define ARM64_WORKAROUND_CAVIUM_30115 20
-#define ARM64_NCAPS 20
+#define ARM64_NCAPS 21
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 0984d1b3a8f2..235e77d98261 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -86,6 +86,7 @@
#define CAVIUM_CPU_PART_THUNDERX 0x0A1
#define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2
+#define CAVIUM_CPU_PART_THUNDERX_83XX 0x0A3
#define BRCM_CPU_PART_VULCAN 0x516
@@ -96,6 +97,7 @@
#define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
+#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index f72779aad276..0df756b24863 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -24,7 +24,6 @@
#include <xen/xen.h>
#include <asm/xen/hypervisor.h>
-#define DMA_ERROR_CODE (~(dma_addr_t)0)
extern const struct dma_map_ops dummy_dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 28bf02efce76..8cabd57b6348 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -19,6 +19,7 @@
#define __ASM_ESR_H
#include <asm/memory.h>
+#include <asm/sysreg.h>
#define ESR_ELx_EC_UNKNOWN (0x00)
#define ESR_ELx_EC_WFx (0x01)
@@ -182,6 +183,29 @@
#define ESR_ELx_SYS64_ISS_SYS_CNTFRQ (ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 0, 14, 0) | \
ESR_ELx_SYS64_ISS_DIR_READ)
+#define esr_sys64_to_sysreg(e) \
+ sys_reg((((e) & ESR_ELx_SYS64_ISS_OP0_MASK) >> \
+ ESR_ELx_SYS64_ISS_OP0_SHIFT), \
+ (((e) & ESR_ELx_SYS64_ISS_OP1_MASK) >> \
+ ESR_ELx_SYS64_ISS_OP1_SHIFT), \
+ (((e) & ESR_ELx_SYS64_ISS_CRN_MASK) >> \
+ ESR_ELx_SYS64_ISS_CRN_SHIFT), \
+ (((e) & ESR_ELx_SYS64_ISS_CRM_MASK) >> \
+ ESR_ELx_SYS64_ISS_CRM_SHIFT), \
+ (((e) & ESR_ELx_SYS64_ISS_OP2_MASK) >> \
+ ESR_ELx_SYS64_ISS_OP2_SHIFT))
+
+#define esr_cp15_to_sysreg(e) \
+ sys_reg(3, \
+ (((e) & ESR_ELx_SYS64_ISS_OP1_MASK) >> \
+ ESR_ELx_SYS64_ISS_OP1_SHIFT), \
+ (((e) & ESR_ELx_SYS64_ISS_CRN_MASK) >> \
+ ESR_ELx_SYS64_ISS_CRN_SHIFT), \
+ (((e) & ESR_ELx_SYS64_ISS_CRM_MASK) >> \
+ ESR_ELx_SYS64_ISS_CRM_SHIFT), \
+ (((e) & ESR_ELx_SYS64_ISS_OP2_MASK) >> \
+ ESR_ELx_SYS64_ISS_OP2_SHIFT))
+
#ifndef __ASSEMBLY__
#include <asm/types.h>
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index bbc1e35aa601..793bd73b0d07 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -83,4 +83,8 @@ extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
extern void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep);
+#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
+static inline bool gigantic_page_supported(void) { return true; }
+#endif
+
#endif /* __ASM_HUGETLB_H */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 1f252a95bc02..d68630007b14 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -42,7 +42,9 @@
#define KVM_VCPU_MAX_FEATURES 4
-#define KVM_REQ_VCPU_EXIT (8 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_SLEEP \
+ KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
@@ -334,8 +336,6 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm);
-void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu);
-void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu);
u64 __kvm_call_hyp(void *hypfn, ...);
#define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index b18e852d27e8..4572a9b560fa 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -127,6 +127,7 @@ int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
+int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
void __timer_save_state(struct kvm_vcpu *vcpu);
void __timer_restore_state(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index b4d13d9267ff..16e44fa9b3b6 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -180,14 +180,31 @@
#define SYS_VBAR_EL1 sys_reg(3, 0, 12, 0, 0)
+#define SYS_ICC_IAR0_EL1 sys_reg(3, 0, 12, 8, 0)
+#define SYS_ICC_EOIR0_EL1 sys_reg(3, 0, 12, 8, 1)
+#define SYS_ICC_HPPIR0_EL1 sys_reg(3, 0, 12, 8, 2)
+#define SYS_ICC_BPR0_EL1 sys_reg(3, 0, 12, 8, 3)
+#define SYS_ICC_AP0Rn_EL1(n) sys_reg(3, 0, 12, 8, 4 | n)
+#define SYS_ICC_AP0R0_EL1 SYS_ICC_AP0Rn_EL1(0)
+#define SYS_ICC_AP0R1_EL1 SYS_ICC_AP0Rn_EL1(1)
+#define SYS_ICC_AP0R2_EL1 SYS_ICC_AP0Rn_EL1(2)
+#define SYS_ICC_AP0R3_EL1 SYS_ICC_AP0Rn_EL1(3)
+#define SYS_ICC_AP1Rn_EL1(n) sys_reg(3, 0, 12, 9, n)
+#define SYS_ICC_AP1R0_EL1 SYS_ICC_AP1Rn_EL1(0)
+#define SYS_ICC_AP1R1_EL1 SYS_ICC_AP1Rn_EL1(1)
+#define SYS_ICC_AP1R2_EL1 SYS_ICC_AP1Rn_EL1(2)
+#define SYS_ICC_AP1R3_EL1 SYS_ICC_AP1Rn_EL1(3)
#define SYS_ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1)
+#define SYS_ICC_RPR_EL1 sys_reg(3, 0, 12, 11, 3)
#define SYS_ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
#define SYS_ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
#define SYS_ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
+#define SYS_ICC_HPPIR1_EL1 sys_reg(3, 0, 12, 12, 2)
#define SYS_ICC_BPR1_EL1 sys_reg(3, 0, 12, 12, 3)
#define SYS_ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4)
#define SYS_ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5)
-#define SYS_ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
+#define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6)
+#define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
#define SYS_CONTEXTIDR_EL1 sys_reg(3, 0, 13, 0, 1)
#define SYS_TPIDR_EL1 sys_reg(3, 0, 13, 0, 4)
@@ -287,8 +304,8 @@
#define SCTLR_ELx_M 1
#define SCTLR_EL2_RES1 ((1 << 4) | (1 << 5) | (1 << 11) | (1 << 16) | \
- (1 << 16) | (1 << 18) | (1 << 22) | (1 << 23) | \
- (1 << 28) | (1 << 29))
+ (1 << 18) | (1 << 22) | (1 << 23) | (1 << 28) | \
+ (1 << 29))
#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
SCTLR_ELx_SA | SCTLR_ELx_I)
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 7b8a04789cef..59f09e6a6cb8 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -349,7 +349,6 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
extern long strncpy_from_user(char *dest, const char __user *src, long count);
-extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
#endif /* __ASM_UACCESS_H */
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 70eea2ecc663..9f3ca24bbcc6 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -232,6 +232,9 @@ struct kvm_arch_memory_slot {
#define KVM_ARM_VCPU_PMU_V3_CTRL 0
#define KVM_ARM_VCPU_PMU_V3_IRQ 0
#define KVM_ARM_VCPU_PMU_V3_INIT 1
+#define KVM_ARM_VCPU_TIMER_CTRL 1
+#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0
+#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1
/* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_TYPE_SHIFT 24
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 2ed2a7657711..0e27f86ee709 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -133,6 +133,27 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
},
#endif
+#ifdef CONFIG_CAVIUM_ERRATUM_30115
+ {
+ /* Cavium ThunderX, T88 pass 1.x - 2.2 */
+ .desc = "Cavium erratum 30115",
+ .capability = ARM64_WORKAROUND_CAVIUM_30115,
+ MIDR_RANGE(MIDR_THUNDERX, 0x00,
+ (1 << MIDR_VARIANT_SHIFT) | 2),
+ },
+ {
+ /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
+ .desc = "Cavium erratum 30115",
+ .capability = ARM64_WORKAROUND_CAVIUM_30115,
+ MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x02),
+ },
+ {
+ /* Cavium ThunderX, T83 pass 1.0 */
+ .desc = "Cavium erratum 30115",
+ .capability = ARM64_WORKAROUND_CAVIUM_30115,
+ MIDR_RANGE(MIDR_THUNDERX_83XX, 0x00, 0x00),
+ },
+#endif
{
.desc = "Mismatched cache line size",
.capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index a7f6c01c13b9..e2b7e4f9cc31 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -39,20 +39,18 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
return res->start;
}
+#ifdef CONFIG_ACPI
/*
* Try to assign the IRQ number when probing a new device
*/
int pcibios_alloc_irq(struct pci_dev *dev)
{
- if (acpi_disabled)
- dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
-#ifdef CONFIG_ACPI
- else
- return acpi_pci_irq_enable(dev);
-#endif
+ if (!acpi_disabled)
+ acpi_pci_irq_enable(dev);
return 0;
}
+#endif
/*
* raw_pci_read/write - Platform-specific PCI config space access.
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index b37446a8ffdb..5c7f657dd207 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -390,6 +390,9 @@ int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
case KVM_ARM_VCPU_PMU_V3_CTRL:
ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
break;
+ case KVM_ARM_VCPU_TIMER_CTRL:
+ ret = kvm_arm_timer_set_attr(vcpu, attr);
+ break;
default:
ret = -ENXIO;
break;
@@ -407,6 +410,9 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
case KVM_ARM_VCPU_PMU_V3_CTRL:
ret = kvm_arm_pmu_v3_get_attr(vcpu, attr);
break;
+ case KVM_ARM_VCPU_TIMER_CTRL:
+ ret = kvm_arm_timer_get_attr(vcpu, attr);
+ break;
default:
ret = -ENXIO;
break;
@@ -424,6 +430,9 @@ int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
case KVM_ARM_VCPU_PMU_V3_CTRL:
ret = kvm_arm_pmu_v3_has_attr(vcpu, attr);
break;
+ case KVM_ARM_VCPU_TIMER_CTRL:
+ ret = kvm_arm_timer_has_attr(vcpu, attr);
+ break;
default:
ret = -ENXIO;
break;
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index fa1b18e364fc..17d8a1677a0b 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -89,6 +89,7 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
vcpu->stat.wfi_exit_stat++;
kvm_vcpu_block(vcpu);
+ kvm_clear_request(KVM_REQ_UNHALT, vcpu);
}
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index aede1658aeda..945e79c641c4 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -350,6 +350,20 @@ again:
}
}
+ if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
+ exit_code == ARM_EXCEPTION_TRAP &&
+ (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 ||
+ kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
+ int ret = __vgic_v3_perform_cpuif_access(vcpu);
+
+ if (ret == 1) {
+ __skip_instr(vcpu);
+ goto again;
+ }
+
+ /* 0 falls through to be handled out of EL2 */
+ }
+
fp_enabled = __fpsimd_enabled();
__sysreg_save_guest_state(guest_ctxt);
@@ -422,6 +436,7 @@ void __hyp_text __noreturn __hyp_panic(void)
vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ __timer_save_state(vcpu);
__deactivate_traps(vcpu);
__deactivate_vm(vcpu);
__sysreg_restore_host_state(host_ctxt);
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 561badf93de8..3256b9228e75 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -46,16 +46,6 @@ static const struct kvm_regs default_regs_reset32 = {
COMPAT_PSR_I_BIT | COMPAT_PSR_F_BIT),
};
-static const struct kvm_irq_level default_ptimer_irq = {
- .irq = 30,
- .level = 1,
-};
-
-static const struct kvm_irq_level default_vtimer_irq = {
- .irq = 27,
- .level = 1,
-};
-
static bool cpu_has_32bit_el1(void)
{
u64 pfr0;
@@ -108,8 +98,6 @@ int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
*/
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
{
- const struct kvm_irq_level *cpu_vtimer_irq;
- const struct kvm_irq_level *cpu_ptimer_irq;
const struct kvm_regs *cpu_reset;
switch (vcpu->arch.target) {
@@ -122,8 +110,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
cpu_reset = &default_regs_reset;
}
- cpu_vtimer_irq = &default_vtimer_irq;
- cpu_ptimer_irq = &default_ptimer_irq;
break;
}
@@ -137,5 +123,5 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
kvm_pmu_vcpu_reset(vcpu);
/* Reset timer */
- return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq, cpu_ptimer_irq);
+ return kvm_timer_vcpu_reset(vcpu);
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 0fe27024a2e1..77862881ae86 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -56,7 +56,8 @@
*/
static bool read_from_write_only(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *params)
+ struct sys_reg_params *params,
+ const struct sys_reg_desc *r)
{
WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
print_sys_reg_instr(params);
@@ -64,6 +65,16 @@ static bool read_from_write_only(struct kvm_vcpu *vcpu,
return false;
}
+static bool write_to_read_only(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *params,
+ const struct sys_reg_desc *r)
+{
+ WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
+ print_sys_reg_instr(params);
+ kvm_inject_undefined(vcpu);
+ return false;
+}
+
/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
static u32 cache_levels;
@@ -93,7 +104,7 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
{
if (!p->is_write)
- return read_from_write_only(vcpu, p);
+ return read_from_write_only(vcpu, p, r);
kvm_set_way_flush(vcpu);
return true;
@@ -135,7 +146,7 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
{
if (!p->is_write)
- return read_from_write_only(vcpu, p);
+ return read_from_write_only(vcpu, p, r);
vgic_v3_dispatch_sgi(vcpu, p->regval);
@@ -773,7 +784,7 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return trap_raz_wi(vcpu, p, r);
if (!p->is_write)
- return read_from_write_only(vcpu, p);
+ return read_from_write_only(vcpu, p, r);
if (pmu_write_swinc_el0_disabled(vcpu))
return false;
@@ -953,7 +964,15 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
+ { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
+ { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
+ { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
+ { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
+ { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
{ SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
+ { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
+ { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
+ { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
{ SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
{ SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
diff --git a/arch/arm64/kvm/trace.h b/arch/arm64/kvm/trace.h
index 7fb0008c4fa3..5188c7007169 100644
--- a/arch/arm64/kvm/trace.h
+++ b/arch/arm64/kvm/trace.h
@@ -93,6 +93,8 @@ TRACE_EVENT(kvm_arm_set_dreg32,
TP_printk("%s: 0x%08x", __entry->name, __entry->value)
);
+TRACE_DEFINE_SIZEOF(__u64);
+
TRACE_EVENT(kvm_arm_set_regset,
TP_PROTO(const char *type, int len, __u64 *control, __u64 *value),
TP_ARGS(type, len, control, value),
diff --git a/arch/arm64/kvm/vgic-sys-reg-v3.c b/arch/arm64/kvm/vgic-sys-reg-v3.c
index 6260b69e5622..116786d2e8e8 100644
--- a/arch/arm64/kvm/vgic-sys-reg-v3.c
+++ b/arch/arm64/kvm/vgic-sys-reg-v3.c
@@ -268,36 +268,21 @@ static bool access_gic_sre(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true;
}
static const struct sys_reg_desc gic_v3_icc_reg_descs[] = {
- /* ICC_PMR_EL1 */
- { Op0(3), Op1(0), CRn(4), CRm(6), Op2(0), access_gic_pmr },
- /* ICC_BPR0_EL1 */
- { Op0(3), Op1(0), CRn(12), CRm(8), Op2(3), access_gic_bpr0 },
- /* ICC_AP0R0_EL1 */
- { Op0(3), Op1(0), CRn(12), CRm(8), Op2(4), access_gic_ap0r },
- /* ICC_AP0R1_EL1 */
- { Op0(3), Op1(0), CRn(12), CRm(8), Op2(5), access_gic_ap0r },
- /* ICC_AP0R2_EL1 */
- { Op0(3), Op1(0), CRn(12), CRm(8), Op2(6), access_gic_ap0r },
- /* ICC_AP0R3_EL1 */
- { Op0(3), Op1(0), CRn(12), CRm(8), Op2(7), access_gic_ap0r },
- /* ICC_AP1R0_EL1 */
- { Op0(3), Op1(0), CRn(12), CRm(9), Op2(0), access_gic_ap1r },
- /* ICC_AP1R1_EL1 */
- { Op0(3), Op1(0), CRn(12), CRm(9), Op2(1), access_gic_ap1r },
- /* ICC_AP1R2_EL1 */
- { Op0(3), Op1(0), CRn(12), CRm(9), Op2(2), access_gic_ap1r },
- /* ICC_AP1R3_EL1 */
- { Op0(3), Op1(0), CRn(12), CRm(9), Op2(3), access_gic_ap1r },
- /* ICC_BPR1_EL1 */
- { Op0(3), Op1(0), CRn(12), CRm(12), Op2(3), access_gic_bpr1 },
- /* ICC_CTLR_EL1 */
- { Op0(3), Op1(0), CRn(12), CRm(12), Op2(4), access_gic_ctlr },
- /* ICC_SRE_EL1 */
- { Op0(3), Op1(0), CRn(12), CRm(12), Op2(5), access_gic_sre },
- /* ICC_IGRPEN0_EL1 */
- { Op0(3), Op1(0), CRn(12), CRm(12), Op2(6), access_gic_grpen0 },
- /* ICC_GRPEN1_EL1 */
- { Op0(3), Op1(0), CRn(12), CRm(12), Op2(7), access_gic_grpen1 },
+ { SYS_DESC(SYS_ICC_PMR_EL1), access_gic_pmr },
+ { SYS_DESC(SYS_ICC_BPR0_EL1), access_gic_bpr0 },
+ { SYS_DESC(SYS_ICC_AP0R0_EL1), access_gic_ap0r },
+ { SYS_DESC(SYS_ICC_AP0R1_EL1), access_gic_ap0r },
+ { SYS_DESC(SYS_ICC_AP0R2_EL1), access_gic_ap0r },
+ { SYS_DESC(SYS_ICC_AP0R3_EL1), access_gic_ap0r },
+ { SYS_DESC(SYS_ICC_AP1R0_EL1), access_gic_ap1r },
+ { SYS_DESC(SYS_ICC_AP1R1_EL1), access_gic_ap1r },
+ { SYS_DESC(SYS_ICC_AP1R2_EL1), access_gic_ap1r },
+ { SYS_DESC(SYS_ICC_AP1R3_EL1), access_gic_ap1r },
+ { SYS_DESC(SYS_ICC_BPR1_EL1), access_gic_bpr1 },
+ { SYS_DESC(SYS_ICC_CTLR_EL1), access_gic_ctlr },
+ { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
+ { SYS_DESC(SYS_ICC_IGRPEN0_EL1), access_gic_grpen0 },
+ { SYS_DESC(SYS_ICC_IGRPEN1_EL1), access_gic_grpen1 },
};
int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id,
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 3e340b625436..e90cd1db42a8 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -175,7 +175,6 @@ static void *__dma_alloc(struct device *dev, size_t size,
no_map:
__dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
no_mem:
- *dma_handle = DMA_ERROR_CODE;
return NULL;
}
@@ -478,7 +477,7 @@ static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir,
unsigned long attrs)
{
- return DMA_ERROR_CODE;
+ return 0;
}
static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 69b8200b1cfd..656e0ece2289 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -42,15 +42,13 @@ int pud_huge(pud_t pud)
}
static int find_num_contig(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte, size_t *pgsize)
+ pte_t *ptep, size_t *pgsize)
{
pgd_t *pgd = pgd_offset(mm, addr);
pud_t *pud;
pmd_t *pmd;
*pgsize = PAGE_SIZE;
- if (!pte_cont(pte))
- return 1;
pud = pud_offset(pgd, addr);
pmd = pmd_offset(pud, addr);
if ((pte_t *)pmd == ptep) {
@@ -65,15 +63,16 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
{
size_t pgsize;
int i;
- int ncontig = find_num_contig(mm, addr, ptep, pte, &pgsize);
+ int ncontig;
unsigned long pfn;
pgprot_t hugeprot;
- if (ncontig == 1) {
+ if (!pte_cont(pte)) {
set_pte_at(mm, addr, ptep, pte);
return;
}
+ ncontig = find_num_contig(mm, addr, ptep, &pgsize);
pfn = pte_pfn(pte);
hugeprot = __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
for (i = 0; i < ncontig; i++) {
@@ -132,7 +131,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
return pte;
}
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+pte_t *huge_pte_offset(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
pud_t *pud;
@@ -184,21 +184,19 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
if (pte_cont(*ptep)) {
int ncontig, i;
size_t pgsize;
- pte_t *cpte;
bool is_dirty = false;
- cpte = huge_pte_offset(mm, addr);
- ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
+ ncontig = find_num_contig(mm, addr, ptep, &pgsize);
/* save the 1st pte to return */
- pte = ptep_get_and_clear(mm, addr, cpte);
+ pte = ptep_get_and_clear(mm, addr, ptep);
for (i = 1, addr += pgsize; i < ncontig; ++i, addr += pgsize) {
/*
* If HW_AFDBM is enabled, then the HW could
* turn on the dirty bit for any of the page
* in the set, so check them all.
*/
- ++cpte;
- if (pte_dirty(ptep_get_and_clear(mm, addr, cpte)))
+ ++ptep;
+ if (pte_dirty(ptep_get_and_clear(mm, addr, ptep)))
is_dirty = true;
}
if (is_dirty)
@@ -214,8 +212,6 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
- pte_t *cpte;
-
if (pte_cont(pte)) {
int ncontig, i, changed = 0;
size_t pgsize = 0;
@@ -225,12 +221,11 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
__pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^
pte_val(pte));
- cpte = huge_pte_offset(vma->vm_mm, addr);
- pfn = pte_pfn(*cpte);
- ncontig = find_num_contig(vma->vm_mm, addr, cpte,
- *cpte, &pgsize);
- for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize) {
- changed |= ptep_set_access_flags(vma, addr, cpte,
+ pfn = pte_pfn(pte);
+ ncontig = find_num_contig(vma->vm_mm, addr, ptep,
+ &pgsize);
+ for (i = 0; i < ncontig; ++i, ++ptep, addr += pgsize) {
+ changed |= ptep_set_access_flags(vma, addr, ptep,
pfn_pte(pfn,
hugeprot),
dirty);
@@ -247,13 +242,11 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
{
if (pte_cont(*ptep)) {
int ncontig, i;
- pte_t *cpte;
size_t pgsize = 0;
- cpte = huge_pte_offset(mm, addr);
- ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
- for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize)
- ptep_set_wrprotect(mm, addr, cpte);
+ ncontig = find_num_contig(mm, addr, ptep, &pgsize);
+ for (i = 0; i < ncontig; ++i, ++ptep, addr += pgsize)
+ ptep_set_wrprotect(mm, addr, ptep);
} else {
ptep_set_wrprotect(mm, addr, ptep);
}
@@ -264,14 +257,12 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma,
{
if (pte_cont(*ptep)) {
int ncontig, i;
- pte_t *cpte;
size_t pgsize = 0;
- cpte = huge_pte_offset(vma->vm_mm, addr);
- ncontig = find_num_contig(vma->vm_mm, addr, cpte,
- *cpte, &pgsize);
- for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize)
- ptep_clear_flush(vma, addr, cpte);
+ ncontig = find_num_contig(vma->vm_mm, addr, ptep,
+ &pgsize);
+ for (i = 0; i < ncontig; ++i, ++ptep, addr += pgsize)
+ ptep_clear_flush(vma, addr, ptep);
} else {
ptep_clear_flush(vma, addr, ptep);
}
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 3c1bd640042a..89bdb8264305 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -41,6 +41,7 @@ config BLACKFIN
select MODULES_USE_ELF_RELA
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_NMI
+ select ARCH_NO_COHERENT_DMA_MMAP
config GENERIC_CSUM
def_bool y
diff --git a/arch/blackfin/configs/BF609-EZKIT_defconfig b/arch/blackfin/configs/BF609-EZKIT_defconfig
index ba4267f658af..3ce77f07208a 100644
--- a/arch/blackfin/configs/BF609-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF609-EZKIT_defconfig
@@ -105,7 +105,7 @@ CONFIG_SPI=y
CONFIG_SPI_ADI_V3=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_MCP23S08=y
+CONFIG_PINCTRL_MCP23S08=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_BFIN_WDT=y
diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h
index f54a34f31cea..45da4bcb050e 100644
--- a/arch/blackfin/include/asm/uaccess.h
+++ b/arch/blackfin/include/asm/uaccess.h
@@ -194,13 +194,6 @@ static inline long __must_check strnlen_user(const char __user *src, long n)
return strnlen((const char __force *)src, n) + 1;
}
-static inline long __must_check strlen_user(const char __user *src)
-{
- if (!access_ok(VERIFY_READ, src, 1))
- return 0;
- return strlen((const char __force *)src) + 1;
-}
-
/*
* Zero Userspace
*/
diff --git a/arch/blackfin/mach-bf527/boards/tll6527m.c b/arch/blackfin/mach-bf527/boards/tll6527m.c
index c1acce4c2e45..ce5488e8226b 100644
--- a/arch/blackfin/mach-bf527/boards/tll6527m.c
+++ b/arch/blackfin/mach-bf527/boards/tll6527m.c
@@ -348,14 +348,14 @@ static struct platform_device bfin_i2s = {
};
#endif
-#if IS_ENABLED(CONFIG_GPIO_MCP23S08)
+#if IS_ENABLED(CONFIG_PINCTRL_MCP23S08)
#include <linux/spi/mcp23s08.h>
static const struct mcp23s08_platform_data bfin_mcp23s08_sys_gpio_info = {
- .chip[0].is_present = true,
+ .spi_present_mask = BIT(0),
.base = 0x30,
};
static const struct mcp23s08_platform_data bfin_mcp23s08_usr_gpio_info = {
- .chip[2].is_present = true,
+ .spi_present_mask = BIT(2),
.base = 0x38,
};
#endif
@@ -423,7 +423,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.mode = SPI_CPHA | SPI_CPOL,
},
#endif
-#if IS_ENABLED(CONFIG_GPIO_MCP23S08)
+#if IS_ENABLED(CONFIG_PINCTRL_MCP23S08)
{
.modalias = "mcp23s08",
.platform_data = &bfin_mcp23s08_sys_gpio_info,
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index eaec7b4832a2..7528148dc492 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -22,7 +22,7 @@
#include <linux/usb/isp1362.h>
#endif
#include <linux/i2c.h>
-#include <linux/i2c/adp5588.h>
+#include <linux/platform_data/adp5588.h>
#include <linux/etherdevice.h>
#include <linux/ata_platform.h>
#include <linux/irq.h>
@@ -1995,7 +1995,7 @@ static struct adp5588_gpio_platform_data adp5588_gpio_data = {
#endif
#if IS_ENABLED(CONFIG_BACKLIGHT_ADP8870)
-#include <linux/i2c/adp8870.h>
+#include <linux/platform_data/adp8870.h>
static struct led_info adp8870_leds[] = {
{
.name = "adp8870-led7",
@@ -2047,7 +2047,7 @@ static struct adp8870_backlight_platform_data adp8870_pdata = {
#endif
#if IS_ENABLED(CONFIG_BACKLIGHT_ADP8860)
-#include <linux/i2c/adp8860.h>
+#include <linux/platform_data/adp8860.h>
static struct led_info adp8860_leds[] = {
{
.name = "adp8860-led7",
diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c
index 9231e5a72b93..51157a255824 100644
--- a/arch/blackfin/mach-bf609/boards/ezkit.c
+++ b/arch/blackfin/mach-bf609/boards/ezkit.c
@@ -1887,7 +1887,7 @@ static struct platform_device i2c_bfin_twi1_device = {
};
#endif
-#if IS_ENABLED(CONFIG_GPIO_MCP23S08)
+#if IS_ENABLED(CONFIG_PINCTRL_MCP23S08)
#include <linux/spi/mcp23s08.h>
static const struct mcp23s08_platform_data bfin_mcp23s08_soft_switch0 = {
.base = 120,
@@ -1929,7 +1929,7 @@ static struct i2c_board_info __initdata bfin_i2c_board_info0[] = {
I2C_BOARD_INFO("ssm2602", 0x1b),
},
#endif
-#if IS_ENABLED(CONFIG_GPIO_MCP23S08)
+#if IS_ENABLED(CONFIG_PINCTRL_MCP23S08)
{
I2C_BOARD_INFO("mcp23017", 0x21),
.platform_data = (void *)&bfin_mcp23s08_soft_switch0
diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h
index aca9f755e4f8..05daf1038111 100644
--- a/arch/c6x/include/asm/dma-mapping.h
+++ b/arch/c6x/include/asm/dma-mapping.h
@@ -12,11 +12,6 @@
#ifndef _ASM_C6X_DMA_MAPPING_H
#define _ASM_C6X_DMA_MAPPING_H
-/*
- * DMA errors are defined by all-bits-set in the DMA address.
- */
-#define DMA_ERROR_CODE ~0
-
extern const struct dma_map_ops c6x_dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
diff --git a/arch/cris/include/asm/uaccess.h b/arch/cris/include/asm/uaccess.h
index 0d473aec3066..b0c6b077b632 100644
--- a/arch/cris/include/asm/uaccess.h
+++ b/arch/cris/include/asm/uaccess.h
@@ -173,12 +173,6 @@ extern unsigned long __copy_user_in(void *to, const void __user *from, unsigned
extern unsigned long __do_clear_user(void __user *to, unsigned long n);
static inline long
-__strncpy_from_user(char *dst, const char __user *src, long count)
-{
- return __do_strncpy_from_user(dst, src, count);
-}
-
-static inline long
strncpy_from_user(char *dst, const char __user *src, long count)
{
long res = -EFAULT;
@@ -363,6 +357,4 @@ __clear_user(void __user *to, unsigned long n)
return __do_clear_user(to, n);
}
-#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
-
#endif /* _CRIS_UACCESS_H */
diff --git a/arch/frv/include/asm/uaccess.h b/arch/frv/include/asm/uaccess.h
index e4e33b4cd3ae..ff9562dc6825 100644
--- a/arch/frv/include/asm/uaccess.h
+++ b/arch/frv/include/asm/uaccess.h
@@ -282,6 +282,4 @@ clear_user(void __user *to, unsigned long n)
extern long strncpy_from_user(char *dst, const char __user *src, long count);
extern long strnlen_user(const char __user *src, long count);
-#define strlen_user(str) strnlen_user(str, 32767)
-
#endif /* _ASM_UACCESS_H */
diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h
index d3a87bd9b686..463dbc18f853 100644
--- a/arch/hexagon/include/asm/dma-mapping.h
+++ b/arch/hexagon/include/asm/dma-mapping.h
@@ -29,8 +29,6 @@
#include <asm/io.h>
struct device;
-extern int bad_dma_address;
-#define DMA_ERROR_CODE bad_dma_address
extern const struct dma_map_ops *dma_ops;
@@ -39,9 +37,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return dma_ops;
}
-#define HAVE_ARCH_DMA_SUPPORTED 1
-extern int dma_supported(struct device *dev, u64 mask);
-extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle);
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h
index 24a9177fb897..aef02f7ca8aa 100644
--- a/arch/hexagon/include/asm/pgtable.h
+++ b/arch/hexagon/include/asm/pgtable.h
@@ -24,7 +24,6 @@
/*
* Page table definitions for Qualcomm Hexagon processor.
*/
-#include <linux/swap.h>
#include <asm/page.h>
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
diff --git a/arch/hexagon/kernel/asm-offsets.c b/arch/hexagon/kernel/asm-offsets.c
index 308be68d4fb3..3980c0407aa1 100644
--- a/arch/hexagon/kernel/asm-offsets.c
+++ b/arch/hexagon/kernel/asm-offsets.c
@@ -25,7 +25,6 @@
#include <linux/compat.h>
#include <linux/types.h>
#include <linux/sched.h>
-#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/kbuild.h>
#include <asm/ptrace.h>
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
index e74b65009587..546792d176a4 100644
--- a/arch/hexagon/kernel/dma.c
+++ b/arch/hexagon/kernel/dma.c
@@ -25,25 +25,16 @@
#include <linux/module.h>
#include <asm/page.h>
+#define HEXAGON_MAPPING_ERROR 0
+
const struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops);
-int bad_dma_address; /* globals are automatically initialized to zero */
-
static inline void *dma_addr_to_virt(dma_addr_t dma_addr)
{
return phys_to_virt((unsigned long) dma_addr);
}
-int dma_supported(struct device *dev, u64 mask)
-{
- if (mask == DMA_BIT_MASK(32))
- return 1;
- else
- return 0;
-}
-EXPORT_SYMBOL(dma_supported);
-
static struct gen_pool *coherent_pool;
@@ -181,7 +172,7 @@ static dma_addr_t hexagon_map_page(struct device *dev, struct page *page,
WARN_ON(size == 0);
if (!check_addr("map_single", dev, bus, size))
- return bad_dma_address;
+ return HEXAGON_MAPPING_ERROR;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
dma_sync(dma_addr_to_virt(bus), size, dir);
@@ -203,6 +194,11 @@ static void hexagon_sync_single_for_device(struct device *dev,
dma_sync(dma_addr_to_virt(dma_handle), size, dir);
}
+static int hexagon_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == HEXAGON_MAPPING_ERROR;
+}
+
const struct dma_map_ops hexagon_dma_ops = {
.alloc = hexagon_dma_alloc_coherent,
.free = hexagon_free_coherent,
@@ -210,6 +206,7 @@ const struct dma_map_ops hexagon_dma_ops = {
.map_page = hexagon_map_page,
.sync_single_for_cpu = hexagon_sync_single_for_cpu,
.sync_single_for_device = hexagon_sync_single_for_device,
+ .mapping_error = hexagon_mapping_error,
.is_phys = 1,
};
diff --git a/arch/hexagon/kernel/hexagon_ksyms.c b/arch/hexagon/kernel/hexagon_ksyms.c
index 00bcad9cbd8f..aa248f595431 100644
--- a/arch/hexagon/kernel/hexagon_ksyms.c
+++ b/arch/hexagon/kernel/hexagon_ksyms.c
@@ -40,7 +40,6 @@ EXPORT_SYMBOL(memset);
/* Additional variables */
EXPORT_SYMBOL(__phys_offset);
EXPORT_SYMBOL(_dflt_cache_att);
-EXPORT_SYMBOL(bad_dma_address);
#define DECLARE_EXPORT(name) \
extern void name(void); EXPORT_SYMBOL(name)
diff --git a/arch/hexagon/mm/vm_tlb.c b/arch/hexagon/mm/vm_tlb.c
index 9647d00cb761..b474065533ce 100644
--- a/arch/hexagon/mm/vm_tlb.c
+++ b/arch/hexagon/mm/vm_tlb.c
@@ -24,6 +24,7 @@
* be instantiated for it, differently from a native build.
*/
#include <linux/mm.h>
+#include <linux/sched.h>
#include <asm/page.h>
#include <asm/hexagon_vm.h>
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 73ec3c6f4cfe..3ce5ab4339f3 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -12,8 +12,6 @@
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
-#define DMA_ERROR_CODE 0
-
extern const struct dma_map_ops *dma_ops;
extern struct ia64_machine_vector ia64_mv;
extern void set_iommu_machvec(void);
diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
index 82a7646c4416..b2106b01e84f 100644
--- a/arch/ia64/include/asm/uaccess.h
+++ b/arch/ia64/include/asm/uaccess.h
@@ -277,18 +277,6 @@ extern long __must_check __strncpy_from_user (char *to, const char __user *from,
__sfu_ret; \
})
-/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
-extern unsigned long __strlen_user (const char __user *);
-
-#define strlen_user(str) \
-({ \
- const char __user *__su_str = (str); \
- unsigned long __su_ret = 0; \
- if (__access_ok(__su_str, 0)) \
- __su_ret = __strlen_user(__su_str); \
- __su_ret; \
-})
-
/*
* Returns: 0 if exception before NUL or reaching the supplied limit
* (N), a value greater than N if the limit would be exceeded, else
diff --git a/arch/ia64/kernel/Makefile.gate b/arch/ia64/kernel/Makefile.gate
index a32903ada016..7da7c65a9bb1 100644
--- a/arch/ia64/kernel/Makefile.gate
+++ b/arch/ia64/kernel/Makefile.gate
@@ -1,8 +1,8 @@
# The gate DSO image is built using a special linker script.
-targets += gate.so gate-syms.o
+targets += gate.so gate.lds gate.o gate-dummy.o
-extra-y += gate.so gate-syms.o gate.lds gate.o
+obj-y += gate-syms.o
CPPFLAGS_gate.lds := -P -C -U$(ARCH)
@@ -14,13 +14,14 @@ GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \
$(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE
$(call if_changed,gate)
-$(obj)/built-in.o: $(obj)/gate-syms.o
-$(obj)/built-in.o: ld_flags += -R $(obj)/gate-syms.o
-
-GATECFLAGS_gate-syms.o = -r
-$(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE
+GATECFLAGS_gate-dummy.o = -r
+$(obj)/gate-dummy.o: $(obj)/gate.lds $(obj)/gate.o FORCE
$(call if_changed,gate)
+LDFLAGS_gate-syms.o := -r -R
+$(obj)/gate-syms.o: $(obj)/gate-dummy.o FORCE
+ $(call if_changed,ld)
+
# gate-data.o contains the gate DSO image as data in section .data..gate.
# We must build gate.so before we can assemble it.
# Note: kbuild does not track this dependency due to usage of .incbin
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile
index 0a40b14407b1..1a36a3a39624 100644
--- a/arch/ia64/lib/Makefile
+++ b/arch/ia64/lib/Makefile
@@ -5,7 +5,7 @@
lib-y := io.o __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o \
checksum.o clear_page.o csum_partial_copy.o \
- clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \
+ clear_user.o strncpy_from_user.o strnlen_user.o \
flush.o ip_fast_csum.o do_csum.o \
memset.o strlen.o xor.o
diff --git a/arch/ia64/lib/strlen_user.S b/arch/ia64/lib/strlen_user.S
deleted file mode 100644
index 9d257684e733..000000000000
--- a/arch/ia64/lib/strlen_user.S
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * Optimized version of the strlen_user() function
- *
- * Inputs:
- * in0 address of buffer
- *
- * Outputs:
- * ret0 0 in case of fault, strlen(buffer)+1 otherwise
- *
- * Copyright (C) 1998, 1999, 2001 Hewlett-Packard Co
- * David Mosberger-Tang <[email protected]>
- * Stephane Eranian <[email protected]>
- *
- * 01/19/99 S.Eranian heavily enhanced version (see details below)
- * 09/24/99 S.Eranian added speculation recovery code
- */
-
-#include <asm/asmmacro.h>
-#include <asm/export.h>
-
-//
-// int strlen_user(char *)
-// ------------------------
-// Returns:
-// - length of string + 1
-// - 0 in case an exception is raised
-//
-// This is an enhanced version of the basic strlen_user. it includes a
-// combination of compute zero index (czx), parallel comparisons, speculative
-// loads and loop unroll using rotating registers.
-//
-// General Ideas about the algorithm:
-// The goal is to look at the string in chunks of 8 bytes.
-// so we need to do a few extra checks at the beginning because the
-// string may not be 8-byte aligned. In this case we load the 8byte
-// quantity which includes the start of the string and mask the unused
-// bytes with 0xff to avoid confusing czx.
-// We use speculative loads and software pipelining to hide memory
-// latency and do read ahead safely. This way we defer any exception.
-//
-// Because we don't want the kernel to be relying on particular
-// settings of the DCR register, we provide recovery code in case
-// speculation fails. The recovery code is going to "redo" the work using
-// only normal loads. If we still get a fault then we return an
-// error (ret0=0). Otherwise we return the strlen+1 as usual.
-// The fact that speculation may fail can be caused, for instance, by
-// the DCR.dm bit being set. In this case TLB misses are deferred, i.e.,
-// a NaT bit will be set if the translation is not present. The normal
-// load, on the other hand, will cause the translation to be inserted
-// if the mapping exists.
-//
-// It should be noted that we execute recovery code only when we need
-// to use the data that has been speculatively loaded: we don't execute
-// recovery code on pure read ahead data.
-//
-// Remarks:
-// - the cmp r0,r0 is used as a fast way to initialize a predicate
-// register to 1. This is required to make sure that we get the parallel
-// compare correct.
-//
-// - we don't use the epilogue counter to exit the loop but we need to set
-// it to zero beforehand.
-//
-// - after the loop we must test for Nat values because neither the
-// czx nor cmp instruction raise a NaT consumption fault. We must be
-// careful not to look too far for a Nat for which we don't care.
-// For instance we don't need to look at a NaT in val2 if the zero byte
-// was in val1.
-//
-// - Clearly performance tuning is required.
-//
-
-#define saved_pfs r11
-#define tmp r10
-#define base r16
-#define orig r17
-#define saved_pr r18
-#define src r19
-#define mask r20
-#define val r21
-#define val1 r22
-#define val2 r23
-
-GLOBAL_ENTRY(__strlen_user)
- .prologue
- .save ar.pfs, saved_pfs
- alloc saved_pfs=ar.pfs,11,0,0,8
-
- .rotr v[2], w[2] // declares our 4 aliases
-
- extr.u tmp=in0,0,3 // tmp=least significant 3 bits
- mov orig=in0 // keep trackof initial byte address
- dep src=0,in0,0,3 // src=8byte-aligned in0 address
- .save pr, saved_pr
- mov saved_pr=pr // preserve predicates (rotation)
- ;;
-
- .body
-
- ld8.s v[1]=[src],8 // load the initial 8bytes (must speculate)
- shl tmp=tmp,3 // multiply by 8bits/byte
- mov mask=-1 // our mask
- ;;
- ld8.s w[1]=[src],8 // load next 8 bytes in 2nd pipeline
- cmp.eq p6,p0=r0,r0 // sets p6 (required because of // cmp.and)
- sub tmp=64,tmp // how many bits to shift our mask on the right
- ;;
- shr.u mask=mask,tmp // zero enough bits to hold v[1] valuable part
- mov ar.ec=r0 // clear epilogue counter (saved in ar.pfs)
- ;;
- add base=-16,src // keep track of aligned base
- chk.s v[1], .recover // if already NaT, then directly skip to recover
- or v[1]=v[1],mask // now we have a safe initial byte pattern
- ;;
-1:
- ld8.s v[0]=[src],8 // speculatively load next
- czx1.r val1=v[1] // search 0 byte from right
- czx1.r val2=w[1] // search 0 byte from right following 8bytes
- ;;
- ld8.s w[0]=[src],8 // speculatively load next to next
- cmp.eq.and p6,p0=8,val1 // p6 = p6 and val1==8
- cmp.eq.and p6,p0=8,val2 // p6 = p6 and mask==8
-(p6) br.wtop.dptk.few 1b // loop until p6 == 0
- ;;
- //
- // We must return try the recovery code iff
- // val1_is_nat || (val1==8 && val2_is_nat)
- //
- // XXX Fixme
- // - there must be a better way of doing the test
- //
- cmp.eq p8,p9=8,val1 // p6 = val1 had zero (disambiguate)
- tnat.nz p6,p7=val1 // test NaT on val1
-(p6) br.cond.spnt .recover // jump to recovery if val1 is NaT
- ;;
- //
- // if we come here p7 is true, i.e., initialized for // cmp
- //
- cmp.eq.and p7,p0=8,val1// val1==8?
- tnat.nz.and p7,p0=val2 // test NaT if val2
-(p7) br.cond.spnt .recover // jump to recovery if val2 is NaT
- ;;
-(p8) mov val1=val2 // val2 contains the value
-(p8) adds src=-16,src // correct position when 3 ahead
-(p9) adds src=-24,src // correct position when 4 ahead
- ;;
- sub ret0=src,orig // distance from origin
- sub tmp=7,val1 // 7=8-1 because this strlen returns strlen+1
- mov pr=saved_pr,0xffffffffffff0000
- ;;
- sub ret0=ret0,tmp // length=now - back -1
- mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what
- br.ret.sptk.many rp // end of normal execution
-
- //
- // Outlined recovery code when speculation failed
- //
- // This time we don't use speculation and rely on the normal exception
- // mechanism. that's why the loop is not as good as the previous one
- // because read ahead is not possible
- //
- // XXX Fixme
- // - today we restart from the beginning of the string instead
- // of trying to continue where we left off.
- //
-.recover:
- EX(.Lexit1, ld8 val=[base],8) // load the initial bytes
- ;;
- or val=val,mask // remask first bytes
- cmp.eq p0,p6=r0,r0 // nullify first ld8 in loop
- ;;
- //
- // ar.ec is still zero here
- //
-2:
- EX(.Lexit1, (p6) ld8 val=[base],8)
- ;;
- czx1.r val1=val // search 0 byte from right
- ;;
- cmp.eq p6,p0=8,val1 // val1==8 ?
-(p6) br.wtop.dptk.few 2b // loop until p6 == 0
- ;;
- sub ret0=base,orig // distance from base
- sub tmp=7,val1 // 7=8-1 because this strlen returns strlen+1
- mov pr=saved_pr,0xffffffffffff0000
- ;;
- sub ret0=ret0,tmp // length=now - back -1
- mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what
- br.ret.sptk.many rp // end of successful recovery code
-
- //
- // We failed even on the normal load (called from exception handler)
- //
-.Lexit1:
- mov ret0=0
- mov pr=saved_pr,0xffffffffffff0000
- mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what
- br.ret.sptk.many rp
-END(__strlen_user)
-EXPORT_SYMBOL(__strlen_user)
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index 85de86d36fdf..ae35140332f7 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -44,7 +44,7 @@ huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
}
pte_t *
-huge_pte_offset (struct mm_struct *mm, unsigned long addr)
+huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz)
{
unsigned long taddr = htlbpage_to_page(addr);
pgd_t *pgd;
@@ -92,7 +92,7 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int writ
if (REGION_NUMBER(addr) != RGN_HPAGE)
return ERR_PTR(-EINVAL);
- ptep = huge_pte_offset(mm, addr);
+ ptep = huge_pte_offset(mm, addr, HPAGE_SIZE);
if (!ptep || pte_none(*ptep))
return NULL;
page = pte_page(*ptep);
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 8f3efa682ee8..a4e8d6bd9cfa 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -646,20 +646,13 @@ mem_init (void)
}
#ifdef CONFIG_MEMORY_HOTPLUG
-int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
+int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
{
- pg_data_t *pgdat;
- struct zone *zone;
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;
- pgdat = NODE_DATA(nid);
-
- zone = pgdat->node_zones +
- zone_for_memory(nid, start, size, ZONE_NORMAL, for_device);
- ret = __add_pages(nid, zone, start_pfn, nr_pages);
-
+ ret = __add_pages(nid, start_pfn, nr_pages, want_memblock);
if (ret)
printk("%s: Problem encountered in __add_pages() as ret=%d\n",
__func__, ret);
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 95474460b367..87cde1e4b38c 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -19,6 +19,7 @@ config M32R
select HAVE_DEBUG_STACKOVERFLOW
select CPU_NO_EFFICIENT_FFS
select DMA_NOOP_OPS
+ select ARCH_NO_COHERENT_DMA_MMAP if !MMU
config SBUS
bool
diff --git a/arch/m32r/include/asm/dma-mapping.h b/arch/m32r/include/asm/dma-mapping.h
index c01d9f52d228..aff3ae8b62f7 100644
--- a/arch/m32r/include/asm/dma-mapping.h
+++ b/arch/m32r/include/asm/dma-mapping.h
@@ -8,8 +8,6 @@
#include <linux/dma-debug.h>
#include <linux/io.h>
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
return &dma_noop_ops;
diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h
index 07be349c00ad..496c4716dbc8 100644
--- a/arch/m32r/include/asm/uaccess.h
+++ b/arch/m32r/include/asm/uaccess.h
@@ -482,8 +482,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
long __must_check strncpy_from_user(char *dst, const char __user *src,
long count);
-long __must_check __strncpy_from_user(char *dst,
- const char __user *src, long count);
/**
* __clear_user: - Zero a block of memory in user space, with less checking.
@@ -511,22 +509,6 @@ unsigned long __clear_user(void __user *mem, unsigned long len);
*/
unsigned long clear_user(void __user *mem, unsigned long len);
-/**
- * strlen_user: - Get the size of a string in user space.
- * @str: The string to measure.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Get the size of a NUL-terminated string in user space.
- *
- * Returns the size of the string INCLUDING the terminating NUL.
- * On exception, returns 0.
- *
- * If there is a limit on the length of a valid string, you may wish to
- * consider using strnlen_user() instead.
- */
-#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
long strnlen_user(const char __user *str, long n);
#endif /* _ASM_M32R_UACCESS_H */
diff --git a/arch/m32r/include/asm/unistd.h b/arch/m32r/include/asm/unistd.h
index 59db80193454..de602533a3bd 100644
--- a/arch/m32r/include/asm/unistd.h
+++ b/arch/m32r/include/asm/unistd.h
@@ -18,7 +18,6 @@
#define __ARCH_WANT_SYS_FADVISE64
#define __ARCH_WANT_SYS_GETPGRP
#define __ARCH_WANT_SYS_LLSEEK
-#define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_SYS_FORK
diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c
index a4d43b5cc102..68da6b800453 100644
--- a/arch/m32r/kernel/m32r_ksyms.c
+++ b/arch/m32r/kernel/m32r_ksyms.c
@@ -23,7 +23,6 @@ EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(strncpy_from_user);
-EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(clear_user);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(strnlen_user);
diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
index b3ef2c899f96..b723b11107c7 100644
--- a/arch/m32r/lib/usercopy.c
+++ b/arch/m32r/lib/usercopy.c
@@ -89,14 +89,6 @@ do { \
#endif /* CONFIG_ISA_DUAL_ISSUE */
long
-__strncpy_from_user(char *dst, const char __user *src, long count)
-{
- long res;
- __do_strncpy_from_user(dst, src, count, res);
- return res;
-}
-
-long
strncpy_from_user(char *dst, const char __user *src, long count)
{
long res = -EFAULT;
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index d140206d5d29..5abb548f0e70 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -2,6 +2,7 @@ config M68K
bool
default y
select ARCH_MIGHT_HAVE_PC_PARPORT if ISA
+ select ARCH_NO_COHERENT_DMA_MMAP if !MMU
select HAVE_IDE
select HAVE_AOUT if MMU
select HAVE_DEBUG_BUGVERBOSE
diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h
index ef856ffeffdf..1da1e23de74c 100644
--- a/arch/m68k/include/asm/uaccess_mm.h
+++ b/arch/m68k/include/asm/uaccess_mm.h
@@ -378,7 +378,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
(uaccess_kernel() ? ~0UL : TASK_SIZE)
extern long strncpy_from_user(char *dst, const char __user *src, long count);
-extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
unsigned long __clear_user(void __user *to, unsigned long n);
diff --git a/arch/m68k/include/asm/uaccess_no.h b/arch/m68k/include/asm/uaccess_no.h
index e482c3899ff1..53d7b792a43d 100644
--- a/arch/m68k/include/asm/uaccess_no.h
+++ b/arch/m68k/include/asm/uaccess_no.h
@@ -141,8 +141,6 @@ static inline long strnlen_user(const char *src, long n)
return(strlen(src) + 1); /* DAVIDM make safer */
}
-#define strlen_user(str) strnlen_user(str, 32767)
-
/*
* Zero Userspace
*/
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
index 9c8fbf8fb5aa..47469e26641a 100644
--- a/arch/metag/include/asm/uaccess.h
+++ b/arch/metag/include/asm/uaccess.h
@@ -188,8 +188,6 @@ strncpy_from_user(char *dst, const char __user *src, long count)
*/
extern long __must_check strnlen_user(const char __user *src, long count);
-#define strlen_user(str) strnlen_user(str, 32767)
-
extern unsigned long raw_copy_from_user(void *to, const void __user *from,
unsigned long n);
extern unsigned long raw_copy_to_user(void __user *to, const void *from,
diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
index db1b7da91e4f..67fd53e2935a 100644
--- a/arch/metag/mm/hugetlbpage.c
+++ b/arch/metag/mm/hugetlbpage.c
@@ -74,7 +74,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
return pte;
}
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+pte_t *huge_pte_offset(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
pud_t *pud;
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 8e47121b8b8b..4ed8ebf33509 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -2,6 +2,7 @@ config MICROBLAZE
def_bool y
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_NO_COHERENT_DMA_MMAP if !MMU
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT
select TIMER_OF
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h
index 3fad5e722a66..e15cd2f76e23 100644
--- a/arch/microblaze/include/asm/dma-mapping.h
+++ b/arch/microblaze/include/asm/dma-mapping.h
@@ -28,8 +28,6 @@
#include <asm/io.h>
#include <asm/cacheflush.h>
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-
#define __dma_alloc_coherent(dev, gfp, size, handle) NULL
#define __dma_free_coherent(size, addr) ((void)0)
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 38f2c9ccef10..81f16aadbf9e 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -355,14 +355,12 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
*/
extern int __strncpy_user(char *to, const char __user *from, int len);
-#define __strncpy_from_user __strncpy_user
-
static inline long
strncpy_from_user(char *dst, const char __user *src, long count)
{
if (!access_ok(VERIFY_READ, src, 1))
return -EFAULT;
- return __strncpy_from_user(dst, src, count);
+ return __strncpy_user(dst, src, count);
}
/*
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 2828ecde133d..45bcd1cfcec0 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -364,6 +364,7 @@ config MACH_INGENIC
select SYS_SUPPORTS_ZBOOT_UART16550
select DMA_NONCOHERENT
select IRQ_MIPS_CPU
+ select PINCTRL
select GPIOLIB
select COMMON_CLK
select GENERIC_IRQ_CHIP
diff --git a/arch/mips/ath79/mach-pb44.c b/arch/mips/ath79/mach-pb44.c
index 67b980d94fb7..be78298dffb4 100644
--- a/arch/mips/ath79/mach-pb44.c
+++ b/arch/mips/ath79/mach-pb44.c
@@ -12,7 +12,7 @@
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/i2c-gpio.h>
-#include <linux/i2c/pcf857x.h>
+#include <linux/platform_data/pcf857x.h>
#include "machtypes.h"
#include "dev-gpio-buttons.h"
diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts
index 1652d8d60b1e..fd138d9978c1 100644
--- a/arch/mips/boot/dts/ingenic/ci20.dts
+++ b/arch/mips/boot/dts/ingenic/ci20.dts
@@ -29,18 +29,30 @@
&uart0 {
status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_uart0>;
};
&uart1 {
status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_uart1>;
};
&uart3 {
status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_uart2>;
};
&uart4 {
status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_uart4>;
};
&nemc {
@@ -61,6 +73,13 @@
ingenic,nemc-tAW = <15>;
ingenic,nemc-tSTRV = <100>;
+ /*
+ * Only CLE/ALE are needed for the devices that are connected, rather
+ * than the full address line set.
+ */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_nemc>;
+
nand@1 {
reg = <1>;
@@ -69,6 +88,9 @@
nand-ecc-mode = "hw";
nand-on-flash-bbt;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_nemc_cs1>;
+
partitions {
compatible = "fixed-partitions";
#address-cells = <2>;
@@ -106,3 +128,41 @@
&bch {
status = "okay";
};
+
+&pinctrl {
+ pins_uart0: uart0 {
+ function = "uart0";
+ groups = "uart0-data";
+ bias-disable;
+ };
+
+ pins_uart1: uart1 {
+ function = "uart1";
+ groups = "uart1-data";
+ bias-disable;
+ };
+
+ pins_uart2: uart2 {
+ function = "uart2";
+ groups = "uart2-data", "uart2-hwflow";
+ bias-disable;
+ };
+
+ pins_uart4: uart4 {
+ function = "uart4";
+ groups = "uart4-data";
+ bias-disable;
+ };
+
+ pins_nemc: nemc {
+ function = "nemc";
+ groups = "nemc-data", "nemc-cle-ale", "nemc-rd-we", "nemc-frd-fwe";
+ bias-disable;
+ };
+
+ pins_nemc_cs1: nemc-cs1 {
+ function = "nemc-cs1";
+ groups = "nemc-cs1";
+ bias-disable;
+ };
+};
diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi
index 3e1587f1f77a..2ca7ce7481f1 100644
--- a/arch/mips/boot/dts/ingenic/jz4740.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi
@@ -55,6 +55,74 @@
clock-names = "rtc";
};
+ pinctrl: pin-controller@10010000 {
+ compatible = "ingenic,jz4740-pinctrl";
+ reg = <0x10010000 0x400>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpa: gpio@0 {
+ compatible = "ingenic,jz4740-gpio";
+ reg = <0>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 0 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <28>;
+ };
+
+ gpb: gpio@1 {
+ compatible = "ingenic,jz4740-gpio";
+ reg = <1>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 32 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <27>;
+ };
+
+ gpc: gpio@2 {
+ compatible = "ingenic,jz4740-gpio";
+ reg = <2>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 64 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <26>;
+ };
+
+ gpd: gpio@3 {
+ compatible = "ingenic,jz4740-gpio";
+ reg = <3>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 96 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <25>;
+ };
+ };
+
uart0: serial@10030000 {
compatible = "ingenic,jz4740-uart";
reg = <0x10030000 0x100>;
diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi
index b868b429add2..4853ef67b3ab 100644
--- a/arch/mips/boot/dts/ingenic/jz4780.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi
@@ -44,6 +44,104 @@
#clock-cells = <1>;
};
+ pinctrl: pin-controller@10010000 {
+ compatible = "ingenic,jz4780-pinctrl";
+ reg = <0x10010000 0x600>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpa: gpio@0 {
+ compatible = "ingenic,jz4780-gpio";
+ reg = <0>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 0 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <17>;
+ };
+
+ gpb: gpio@1 {
+ compatible = "ingenic,jz4780-gpio";
+ reg = <1>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 32 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <16>;
+ };
+
+ gpc: gpio@2 {
+ compatible = "ingenic,jz4780-gpio";
+ reg = <2>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 64 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <15>;
+ };
+
+ gpd: gpio@3 {
+ compatible = "ingenic,jz4780-gpio";
+ reg = <3>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 96 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <14>;
+ };
+
+ gpe: gpio@4 {
+ compatible = "ingenic,jz4780-gpio";
+ reg = <4>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 128 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <13>;
+ };
+
+ gpf: gpio@5 {
+ compatible = "ingenic,jz4780-gpio";
+ reg = <5>;
+
+ gpio-controller;
+ gpio-ranges = <&pinctrl 0 160 32>;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <12>;
+ };
+ };
+
uart0: serial@10030000 {
compatible = "ingenic,jz4780-uart";
reg = <0x10030000 0x100>;
diff --git a/arch/mips/boot/dts/ingenic/qi_lb60.dts b/arch/mips/boot/dts/ingenic/qi_lb60.dts
index be1a7d3a3e1b..b715ee2ac2ee 100644
--- a/arch/mips/boot/dts/ingenic/qi_lb60.dts
+++ b/arch/mips/boot/dts/ingenic/qi_lb60.dts
@@ -17,3 +17,16 @@
&rtc_dev {
system-power-controller;
};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_uart0>;
+};
+
+&pinctrl {
+ pins_uart0: uart0 {
+ function = "uart0";
+ groups = "uart0-data";
+ bias-disable;
+ };
+};
diff --git a/arch/mips/include/asm/mach-jz4740/gpio.h b/arch/mips/include/asm/mach-jz4740/gpio.h
index 7c7708a23baa..fd847c984701 100644
--- a/arch/mips/include/asm/mach-jz4740/gpio.h
+++ b/arch/mips/include/asm/mach-jz4740/gpio.h
@@ -16,380 +16,9 @@
#ifndef _JZ_GPIO_H
#define _JZ_GPIO_H
-#include <linux/types.h>
-
-enum jz_gpio_function {
- JZ_GPIO_FUNC_NONE,
- JZ_GPIO_FUNC1,
- JZ_GPIO_FUNC2,
- JZ_GPIO_FUNC3,
-};
-
-/*
- Usually a driver for a SoC component has to request several gpio pins and
- configure them as function pins.
- jz_gpio_bulk_request can be used to ease this process.
- Usually one would do something like:
-
- static const struct jz_gpio_bulk_request i2c_pins[] = {
- JZ_GPIO_BULK_PIN(I2C_SDA),
- JZ_GPIO_BULK_PIN(I2C_SCK),
- };
-
- inside the probe function:
-
- ret = jz_gpio_bulk_request(i2c_pins, ARRAY_SIZE(i2c_pins));
- if (ret) {
- ...
-
- inside the remove function:
-
- jz_gpio_bulk_free(i2c_pins, ARRAY_SIZE(i2c_pins));
-
-*/
-
-struct jz_gpio_bulk_request {
- int gpio;
- const char *name;
- enum jz_gpio_function function;
-};
-
-#define JZ_GPIO_BULK_PIN(pin) { \
- .gpio = JZ_GPIO_ ## pin, \
- .name = #pin, \
- .function = JZ_GPIO_FUNC_ ## pin \
-}
-
-int jz_gpio_bulk_request(const struct jz_gpio_bulk_request *request, size_t num);
-void jz_gpio_bulk_free(const struct jz_gpio_bulk_request *request, size_t num);
-void jz_gpio_bulk_suspend(const struct jz_gpio_bulk_request *request, size_t num);
-void jz_gpio_bulk_resume(const struct jz_gpio_bulk_request *request, size_t num);
-void jz_gpio_enable_pullup(unsigned gpio);
-void jz_gpio_disable_pullup(unsigned gpio);
-int jz_gpio_set_function(int gpio, enum jz_gpio_function function);
-
-int jz_gpio_port_direction_input(int port, uint32_t mask);
-int jz_gpio_port_direction_output(int port, uint32_t mask);
-void jz_gpio_port_set_value(int port, uint32_t value, uint32_t mask);
-uint32_t jz_gpio_port_get_value(int port, uint32_t mask);
-
#define JZ_GPIO_PORTA(x) ((x) + 32 * 0)
#define JZ_GPIO_PORTB(x) ((x) + 32 * 1)
#define JZ_GPIO_PORTC(x) ((x) + 32 * 2)
#define JZ_GPIO_PORTD(x) ((x) + 32 * 3)
-/* Port A function pins */
-#define JZ_GPIO_MEM_DATA0 JZ_GPIO_PORTA(0)
-#define JZ_GPIO_MEM_DATA1 JZ_GPIO_PORTA(1)
-#define JZ_GPIO_MEM_DATA2 JZ_GPIO_PORTA(2)
-#define JZ_GPIO_MEM_DATA3 JZ_GPIO_PORTA(3)
-#define JZ_GPIO_MEM_DATA4 JZ_GPIO_PORTA(4)
-#define JZ_GPIO_MEM_DATA5 JZ_GPIO_PORTA(5)
-#define JZ_GPIO_MEM_DATA6 JZ_GPIO_PORTA(6)
-#define JZ_GPIO_MEM_DATA7 JZ_GPIO_PORTA(7)
-#define JZ_GPIO_MEM_DATA8 JZ_GPIO_PORTA(8)
-#define JZ_GPIO_MEM_DATA9 JZ_GPIO_PORTA(9)
-#define JZ_GPIO_MEM_DATA10 JZ_GPIO_PORTA(10)
-#define JZ_GPIO_MEM_DATA11 JZ_GPIO_PORTA(11)
-#define JZ_GPIO_MEM_DATA12 JZ_GPIO_PORTA(12)
-#define JZ_GPIO_MEM_DATA13 JZ_GPIO_PORTA(13)
-#define JZ_GPIO_MEM_DATA14 JZ_GPIO_PORTA(14)
-#define JZ_GPIO_MEM_DATA15 JZ_GPIO_PORTA(15)
-#define JZ_GPIO_MEM_DATA16 JZ_GPIO_PORTA(16)
-#define JZ_GPIO_MEM_DATA17 JZ_GPIO_PORTA(17)
-#define JZ_GPIO_MEM_DATA18 JZ_GPIO_PORTA(18)
-#define JZ_GPIO_MEM_DATA19 JZ_GPIO_PORTA(19)
-#define JZ_GPIO_MEM_DATA20 JZ_GPIO_PORTA(20)
-#define JZ_GPIO_MEM_DATA21 JZ_GPIO_PORTA(21)
-#define JZ_GPIO_MEM_DATA22 JZ_GPIO_PORTA(22)
-#define JZ_GPIO_MEM_DATA23 JZ_GPIO_PORTA(23)
-#define JZ_GPIO_MEM_DATA24 JZ_GPIO_PORTA(24)
-#define JZ_GPIO_MEM_DATA25 JZ_GPIO_PORTA(25)
-#define JZ_GPIO_MEM_DATA26 JZ_GPIO_PORTA(26)
-#define JZ_GPIO_MEM_DATA27 JZ_GPIO_PORTA(27)
-#define JZ_GPIO_MEM_DATA28 JZ_GPIO_PORTA(28)
-#define JZ_GPIO_MEM_DATA29 JZ_GPIO_PORTA(29)
-#define JZ_GPIO_MEM_DATA30 JZ_GPIO_PORTA(30)
-#define JZ_GPIO_MEM_DATA31 JZ_GPIO_PORTA(31)
-
-#define JZ_GPIO_FUNC_MEM_DATA0 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA1 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA2 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA3 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA4 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA5 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA6 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA7 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA8 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA9 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA10 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA11 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA12 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA13 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA14 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA15 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA16 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA17 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA18 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA19 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA20 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA21 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA22 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA23 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA24 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA25 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA26 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA27 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA28 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA29 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA30 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DATA31 JZ_GPIO_FUNC1
-
-/* Port B function pins */
-#define JZ_GPIO_MEM_ADDR0 JZ_GPIO_PORTB(0)
-#define JZ_GPIO_MEM_ADDR1 JZ_GPIO_PORTB(1)
-#define JZ_GPIO_MEM_ADDR2 JZ_GPIO_PORTB(2)
-#define JZ_GPIO_MEM_ADDR3 JZ_GPIO_PORTB(3)
-#define JZ_GPIO_MEM_ADDR4 JZ_GPIO_PORTB(4)
-#define JZ_GPIO_MEM_ADDR5 JZ_GPIO_PORTB(5)
-#define JZ_GPIO_MEM_ADDR6 JZ_GPIO_PORTB(6)
-#define JZ_GPIO_MEM_ADDR7 JZ_GPIO_PORTB(7)
-#define JZ_GPIO_MEM_ADDR8 JZ_GPIO_PORTB(8)
-#define JZ_GPIO_MEM_ADDR9 JZ_GPIO_PORTB(9)
-#define JZ_GPIO_MEM_ADDR10 JZ_GPIO_PORTB(10)
-#define JZ_GPIO_MEM_ADDR11 JZ_GPIO_PORTB(11)
-#define JZ_GPIO_MEM_ADDR12 JZ_GPIO_PORTB(12)
-#define JZ_GPIO_MEM_ADDR13 JZ_GPIO_PORTB(13)
-#define JZ_GPIO_MEM_ADDR14 JZ_GPIO_PORTB(14)
-#define JZ_GPIO_MEM_ADDR15 JZ_GPIO_PORTB(15)
-#define JZ_GPIO_MEM_ADDR16 JZ_GPIO_PORTB(16)
-#define JZ_GPIO_LCD_CLS JZ_GPIO_PORTB(17)
-#define JZ_GPIO_LCD_SPL JZ_GPIO_PORTB(18)
-#define JZ_GPIO_MEM_DCS JZ_GPIO_PORTB(19)
-#define JZ_GPIO_MEM_RAS JZ_GPIO_PORTB(20)
-#define JZ_GPIO_MEM_CAS JZ_GPIO_PORTB(21)
-#define JZ_GPIO_MEM_SDWE JZ_GPIO_PORTB(22)
-#define JZ_GPIO_MEM_CKE JZ_GPIO_PORTB(23)
-#define JZ_GPIO_MEM_CKO JZ_GPIO_PORTB(24)
-#define JZ_GPIO_MEM_CS0 JZ_GPIO_PORTB(25)
-#define JZ_GPIO_MEM_CS1 JZ_GPIO_PORTB(26)
-#define JZ_GPIO_MEM_CS2 JZ_GPIO_PORTB(27)
-#define JZ_GPIO_MEM_CS3 JZ_GPIO_PORTB(28)
-#define JZ_GPIO_MEM_RD JZ_GPIO_PORTB(29)
-#define JZ_GPIO_MEM_WR JZ_GPIO_PORTB(30)
-#define JZ_GPIO_MEM_WE0 JZ_GPIO_PORTB(31)
-
-#define JZ_GPIO_FUNC_MEM_ADDR0 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_ADDR1 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_ADDR2 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_ADDR3 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_ADDR4 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_ADDR5 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_ADDR6 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_ADDR7 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_ADDR8 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_ADDR9 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_ADDR10 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_ADDR11 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_ADDR12 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_ADDR13 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_ADDR14 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_ADDR15 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_ADDR16 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_CLS JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_SPL JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_DCS JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_RAS JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_CAS JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_SDWE JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_CKE JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_CKO JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_CS0 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_CS1 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_CS2 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_CS3 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_RD JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_WR JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_WE0 JZ_GPIO_FUNC1
-
-
-#define JZ_GPIO_MEM_ADDR21 JZ_GPIO_PORTB(17)
-#define JZ_GPIO_MEM_ADDR22 JZ_GPIO_PORTB(18)
-
-#define JZ_GPIO_FUNC_MEM_ADDR21 JZ_GPIO_FUNC2
-#define JZ_GPIO_FUNC_MEM_ADDR22 JZ_GPIO_FUNC2
-
-/* Port C function pins */
-#define JZ_GPIO_LCD_DATA0 JZ_GPIO_PORTC(0)
-#define JZ_GPIO_LCD_DATA1 JZ_GPIO_PORTC(1)
-#define JZ_GPIO_LCD_DATA2 JZ_GPIO_PORTC(2)
-#define JZ_GPIO_LCD_DATA3 JZ_GPIO_PORTC(3)
-#define JZ_GPIO_LCD_DATA4 JZ_GPIO_PORTC(4)
-#define JZ_GPIO_LCD_DATA5 JZ_GPIO_PORTC(5)
-#define JZ_GPIO_LCD_DATA6 JZ_GPIO_PORTC(6)
-#define JZ_GPIO_LCD_DATA7 JZ_GPIO_PORTC(7)
-#define JZ_GPIO_LCD_DATA8 JZ_GPIO_PORTC(8)
-#define JZ_GPIO_LCD_DATA9 JZ_GPIO_PORTC(9)
-#define JZ_GPIO_LCD_DATA10 JZ_GPIO_PORTC(10)
-#define JZ_GPIO_LCD_DATA11 JZ_GPIO_PORTC(11)
-#define JZ_GPIO_LCD_DATA12 JZ_GPIO_PORTC(12)
-#define JZ_GPIO_LCD_DATA13 JZ_GPIO_PORTC(13)
-#define JZ_GPIO_LCD_DATA14 JZ_GPIO_PORTC(14)
-#define JZ_GPIO_LCD_DATA15 JZ_GPIO_PORTC(15)
-#define JZ_GPIO_LCD_DATA16 JZ_GPIO_PORTC(16)
-#define JZ_GPIO_LCD_DATA17 JZ_GPIO_PORTC(17)
-#define JZ_GPIO_LCD_PCLK JZ_GPIO_PORTC(18)
-#define JZ_GPIO_LCD_HSYNC JZ_GPIO_PORTC(19)
-#define JZ_GPIO_LCD_VSYNC JZ_GPIO_PORTC(20)
-#define JZ_GPIO_LCD_DE JZ_GPIO_PORTC(21)
-#define JZ_GPIO_LCD_PS JZ_GPIO_PORTC(22)
-#define JZ_GPIO_LCD_REV JZ_GPIO_PORTC(23)
-#define JZ_GPIO_MEM_WE1 JZ_GPIO_PORTC(24)
-#define JZ_GPIO_MEM_WE2 JZ_GPIO_PORTC(25)
-#define JZ_GPIO_MEM_WE3 JZ_GPIO_PORTC(26)
-#define JZ_GPIO_MEM_WAIT JZ_GPIO_PORTC(27)
-#define JZ_GPIO_MEM_FRE JZ_GPIO_PORTC(28)
-#define JZ_GPIO_MEM_FWE JZ_GPIO_PORTC(29)
-
-#define JZ_GPIO_FUNC_LCD_DATA0 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_DATA1 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_DATA2 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_DATA3 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_DATA4 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_DATA5 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_DATA6 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_DATA7 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_DATA8 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_DATA9 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_DATA10 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_DATA11 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_DATA12 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_DATA13 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_DATA14 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_DATA15 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_DATA16 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_DATA17 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_PCLK JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_VSYNC JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_HSYNC JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_DE JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_PS JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_REV JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_WE1 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_WE2 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_WE3 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_WAIT JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_FRE JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MEM_FWE JZ_GPIO_FUNC1
-
-
-#define JZ_GPIO_MEM_ADDR19 JZ_GPIO_PORTB(22)
-#define JZ_GPIO_MEM_ADDR20 JZ_GPIO_PORTB(23)
-
-#define JZ_GPIO_FUNC_MEM_ADDR19 JZ_GPIO_FUNC2
-#define JZ_GPIO_FUNC_MEM_ADDR20 JZ_GPIO_FUNC2
-
-/* Port D function pins */
-#define JZ_GPIO_CIM_DATA0 JZ_GPIO_PORTD(0)
-#define JZ_GPIO_CIM_DATA1 JZ_GPIO_PORTD(1)
-#define JZ_GPIO_CIM_DATA2 JZ_GPIO_PORTD(2)
-#define JZ_GPIO_CIM_DATA3 JZ_GPIO_PORTD(3)
-#define JZ_GPIO_CIM_DATA4 JZ_GPIO_PORTD(4)
-#define JZ_GPIO_CIM_DATA5 JZ_GPIO_PORTD(5)
-#define JZ_GPIO_CIM_DATA6 JZ_GPIO_PORTD(6)
-#define JZ_GPIO_CIM_DATA7 JZ_GPIO_PORTD(7)
-#define JZ_GPIO_MSC_CMD JZ_GPIO_PORTD(8)
-#define JZ_GPIO_MSC_CLK JZ_GPIO_PORTD(9)
-#define JZ_GPIO_MSC_DATA0 JZ_GPIO_PORTD(10)
-#define JZ_GPIO_MSC_DATA1 JZ_GPIO_PORTD(11)
-#define JZ_GPIO_MSC_DATA2 JZ_GPIO_PORTD(12)
-#define JZ_GPIO_MSC_DATA3 JZ_GPIO_PORTD(13)
-#define JZ_GPIO_CIM_MCLK JZ_GPIO_PORTD(14)
-#define JZ_GPIO_CIM_PCLK JZ_GPIO_PORTD(15)
-#define JZ_GPIO_CIM_VSYNC JZ_GPIO_PORTD(16)
-#define JZ_GPIO_CIM_HSYNC JZ_GPIO_PORTD(17)
-#define JZ_GPIO_SPI_CLK JZ_GPIO_PORTD(18)
-#define JZ_GPIO_SPI_CE0 JZ_GPIO_PORTD(19)
-#define JZ_GPIO_SPI_DT JZ_GPIO_PORTD(20)
-#define JZ_GPIO_SPI_DR JZ_GPIO_PORTD(21)
-#define JZ_GPIO_SPI_CE1 JZ_GPIO_PORTD(22)
-#define JZ_GPIO_PWM0 JZ_GPIO_PORTD(23)
-#define JZ_GPIO_PWM1 JZ_GPIO_PORTD(24)
-#define JZ_GPIO_PWM2 JZ_GPIO_PORTD(25)
-#define JZ_GPIO_PWM3 JZ_GPIO_PORTD(26)
-#define JZ_GPIO_PWM4 JZ_GPIO_PORTD(27)
-#define JZ_GPIO_PWM5 JZ_GPIO_PORTD(28)
-#define JZ_GPIO_PWM6 JZ_GPIO_PORTD(30)
-#define JZ_GPIO_PWM7 JZ_GPIO_PORTD(31)
-
-#define JZ_GPIO_FUNC_CIM_DATA JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_CIM_DATA0 JZ_GPIO_FUNC_CIM_DATA
-#define JZ_GPIO_FUNC_CIM_DATA1 JZ_GPIO_FUNC_CIM_DATA
-#define JZ_GPIO_FUNC_CIM_DATA2 JZ_GPIO_FUNC_CIM_DATA
-#define JZ_GPIO_FUNC_CIM_DATA3 JZ_GPIO_FUNC_CIM_DATA
-#define JZ_GPIO_FUNC_CIM_DATA4 JZ_GPIO_FUNC_CIM_DATA
-#define JZ_GPIO_FUNC_CIM_DATA5 JZ_GPIO_FUNC_CIM_DATA
-#define JZ_GPIO_FUNC_CIM_DATA6 JZ_GPIO_FUNC_CIM_DATA
-#define JZ_GPIO_FUNC_CIM_DATA7 JZ_GPIO_FUNC_CIM_DATA
-#define JZ_GPIO_FUNC_MSC_CMD JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MSC_CLK JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MSC_DATA JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_MSC_DATA0 JZ_GPIO_FUNC_MSC_DATA
-#define JZ_GPIO_FUNC_MSC_DATA1 JZ_GPIO_FUNC_MSC_DATA
-#define JZ_GPIO_FUNC_MSC_DATA2 JZ_GPIO_FUNC_MSC_DATA
-#define JZ_GPIO_FUNC_MSC_DATA3 JZ_GPIO_FUNC_MSC_DATA
-#define JZ_GPIO_FUNC_CIM_MCLK JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_CIM_PCLK JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_CIM_VSYNC JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_CIM_HSYNC JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_SPI_CLK JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_SPI_CE0 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_SPI_DT JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_SPI_DR JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_SPI_CE1 JZ_GPIO_FUNC1
-
-#define JZ_GPIO_FUNC_PWM JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_PWM0 JZ_GPIO_FUNC_PWM
-#define JZ_GPIO_FUNC_PWM1 JZ_GPIO_FUNC_PWM
-#define JZ_GPIO_FUNC_PWM2 JZ_GPIO_FUNC_PWM
-#define JZ_GPIO_FUNC_PWM3 JZ_GPIO_FUNC_PWM
-#define JZ_GPIO_FUNC_PWM4 JZ_GPIO_FUNC_PWM
-#define JZ_GPIO_FUNC_PWM5 JZ_GPIO_FUNC_PWM
-#define JZ_GPIO_FUNC_PWM6 JZ_GPIO_FUNC_PWM
-#define JZ_GPIO_FUNC_PWM7 JZ_GPIO_FUNC_PWM
-
-#define JZ_GPIO_MEM_SCLK_RSTN JZ_GPIO_PORTD(18)
-#define JZ_GPIO_MEM_BCLK JZ_GPIO_PORTD(19)
-#define JZ_GPIO_MEM_SDATO JZ_GPIO_PORTD(20)
-#define JZ_GPIO_MEM_SDATI JZ_GPIO_PORTD(21)
-#define JZ_GPIO_MEM_SYNC JZ_GPIO_PORTD(22)
-#define JZ_GPIO_I2C_SDA JZ_GPIO_PORTD(23)
-#define JZ_GPIO_I2C_SCK JZ_GPIO_PORTD(24)
-#define JZ_GPIO_UART0_TXD JZ_GPIO_PORTD(25)
-#define JZ_GPIO_UART0_RXD JZ_GPIO_PORTD(26)
-#define JZ_GPIO_MEM_ADDR17 JZ_GPIO_PORTD(27)
-#define JZ_GPIO_MEM_ADDR18 JZ_GPIO_PORTD(28)
-#define JZ_GPIO_UART0_CTS JZ_GPIO_PORTD(30)
-#define JZ_GPIO_UART0_RTS JZ_GPIO_PORTD(31)
-
-#define JZ_GPIO_FUNC_MEM_SCLK_RSTN JZ_GPIO_FUNC2
-#define JZ_GPIO_FUNC_MEM_BCLK JZ_GPIO_FUNC2
-#define JZ_GPIO_FUNC_MEM_SDATO JZ_GPIO_FUNC2
-#define JZ_GPIO_FUNC_MEM_SDATI JZ_GPIO_FUNC2
-#define JZ_GPIO_FUNC_MEM_SYNC JZ_GPIO_FUNC2
-#define JZ_GPIO_FUNC_I2C_SDA JZ_GPIO_FUNC2
-#define JZ_GPIO_FUNC_I2C_SCK JZ_GPIO_FUNC2
-#define JZ_GPIO_FUNC_UART0_TXD JZ_GPIO_FUNC2
-#define JZ_GPIO_FUNC_UART0_RXD JZ_GPIO_FUNC2
-#define JZ_GPIO_FUNC_MEM_ADDR17 JZ_GPIO_FUNC2
-#define JZ_GPIO_FUNC_MEM_ADDR18 JZ_GPIO_FUNC2
-#define JZ_GPIO_FUNC_UART0_CTS JZ_GPIO_FUNC2
-#define JZ_GPIO_FUNC_UART0_RTS JZ_GPIO_FUNC2
-
-#define JZ_GPIO_UART1_RXD JZ_GPIO_PORTD(30)
-#define JZ_GPIO_UART1_TXD JZ_GPIO_PORTD(31)
-
-#define JZ_GPIO_FUNC_UART1_RXD JZ_GPIO_FUNC3
-#define JZ_GPIO_FUNC_UART1_TXD JZ_GPIO_FUNC3
-
#endif
diff --git a/arch/mips/include/asm/mach-loongson64/cs5536/cs5536_pci.h b/arch/mips/include/asm/mach-loongson64/cs5536/cs5536_pci.h
index 8a7ecb4d5c64..bf9dd9eb4ceb 100644
--- a/arch/mips/include/asm/mach-loongson64/cs5536/cs5536_pci.h
+++ b/arch/mips/include/asm/mach-loongson64/cs5536/cs5536_pci.h
@@ -80,7 +80,6 @@ extern u32 cs5536_pci_conf_read4(int function, int reg);
#define PCI_BAR3_REG 0x1c
#define PCI_BAR4_REG 0x20
#define PCI_BAR5_REG 0x24
-#define PCI_BAR_COUNT 6
#define PCI_BAR_RANGE_MASK 0xFFFFFFFF
/* CARDBUS CIS POINTER */
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 1000c1b4c875..52f551ee492d 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -39,7 +39,6 @@ struct pci_controller {
unsigned long io_offset;
unsigned long io_map_base;
struct resource *busn_resource;
- unsigned long busn_offset;
#ifndef CONFIG_PCI_DOMAINS_GENERIC
unsigned int index;
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 99e629a590a5..9700251159b1 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -967,60 +967,6 @@ __clear_user(void __user *addr, __kernel_size_t size)
__cl_size; \
})
-extern long __strncpy_from_kernel_nocheck_asm(char *__to, const char __user *__from, long __len);
-extern long __strncpy_from_user_nocheck_asm(char *__to, const char __user *__from, long __len);
-
-/*
- * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
- * @dst: Destination address, in kernel space. This buffer must be at
- * least @count bytes long.
- * @src: Source address, in user space.
- * @count: Maximum number of bytes to copy, including the trailing NUL.
- *
- * Copies a NUL-terminated string from userspace to kernel space.
- * Caller must check the specified block with access_ok() before calling
- * this function.
- *
- * On success, returns the length of the string (not including the trailing
- * NUL).
- *
- * If access to userspace fails, returns -EFAULT (some data may have been
- * copied).
- *
- * If @count is smaller than the length of the string, copies @count bytes
- * and returns @count.
- */
-static inline long
-__strncpy_from_user(char *__to, const char __user *__from, long __len)
-{
- long res;
-
- if (eva_kernel_access()) {
- __asm__ __volatile__(
- "move\t$4, %1\n\t"
- "move\t$5, %2\n\t"
- "move\t$6, %3\n\t"
- __MODULE_JAL(__strncpy_from_kernel_nocheck_asm)
- "move\t%0, $2"
- : "=r" (res)
- : "r" (__to), "r" (__from), "r" (__len)
- : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
- } else {
- might_fault();
- __asm__ __volatile__(
- "move\t$4, %1\n\t"
- "move\t$5, %2\n\t"
- "move\t$6, %3\n\t"
- __MODULE_JAL(__strncpy_from_user_nocheck_asm)
- "move\t%0, $2"
- : "=r" (res)
- : "r" (__to), "r" (__from), "r" (__len)
- : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
- }
-
- return res;
-}
-
extern long __strncpy_from_kernel_asm(char *__to, const char __user *__from, long __len);
extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len);
@@ -1073,82 +1019,6 @@ strncpy_from_user(char *__to, const char __user *__from, long __len)
return res;
}
-extern long __strlen_kernel_asm(const char __user *s);
-extern long __strlen_user_asm(const char __user *s);
-
-/*
- * strlen_user: - Get the size of a string in user space.
- * @str: The string to measure.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Get the size of a NUL-terminated string in user space.
- *
- * Returns the size of the string INCLUDING the terminating NUL.
- * On exception, returns 0.
- *
- * If there is a limit on the length of a valid string, you may wish to
- * consider using strnlen_user() instead.
- */
-static inline long strlen_user(const char __user *s)
-{
- long res;
-
- if (eva_kernel_access()) {
- __asm__ __volatile__(
- "move\t$4, %1\n\t"
- __MODULE_JAL(__strlen_kernel_asm)
- "move\t%0, $2"
- : "=r" (res)
- : "r" (s)
- : "$2", "$4", __UA_t0, "$31");
- } else {
- might_fault();
- __asm__ __volatile__(
- "move\t$4, %1\n\t"
- __MODULE_JAL(__strlen_user_asm)
- "move\t%0, $2"
- : "=r" (res)
- : "r" (s)
- : "$2", "$4", __UA_t0, "$31");
- }
-
- return res;
-}
-
-extern long __strnlen_kernel_nocheck_asm(const char __user *s, long n);
-extern long __strnlen_user_nocheck_asm(const char __user *s, long n);
-
-/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
-static inline long __strnlen_user(const char __user *s, long n)
-{
- long res;
-
- if (eva_kernel_access()) {
- __asm__ __volatile__(
- "move\t$4, %1\n\t"
- "move\t$5, %2\n\t"
- __MODULE_JAL(__strnlen_kernel_nocheck_asm)
- "move\t%0, $2"
- : "=r" (res)
- : "r" (s), "r" (n)
- : "$2", "$4", "$5", __UA_t0, "$31");
- } else {
- might_fault();
- __asm__ __volatile__(
- "move\t$4, %1\n\t"
- "move\t$5, %2\n\t"
- __MODULE_JAL(__strnlen_user_nocheck_asm)
- "move\t%0, $2"
- : "=r" (res)
- : "r" (s), "r" (n)
- : "$2", "$4", "$5", __UA_t0, "$31");
- }
-
- return res;
-}
-
extern long __strnlen_kernel_asm(const char __user *s, long n);
extern long __strnlen_user_asm(const char __user *s, long n);
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index e55813029d5a..3c09450908aa 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -35,7 +35,6 @@
#define __ARCH_WANT_SYS_GETPGRP
#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_NICE
-#define __ARCH_WANT_SYS_OLD_GETRLIMIT
#define __ARCH_WANT_SYS_OLD_UNAME
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
diff --git a/arch/mips/jz4740/Makefile b/arch/mips/jz4740/Makefile
index 39d70bde8cfe..6b9c1f7c31c9 100644
--- a/arch/mips/jz4740/Makefile
+++ b/arch/mips/jz4740/Makefile
@@ -7,8 +7,6 @@
obj-y += prom.o time.o reset.o setup.o \
platform.o timer.o
-obj-$(CONFIG_MACH_JZ4740) += gpio.o
-
CFLAGS_setup.o = -I$(src)/../../../scripts/dtc/libfdt
# board specific support
diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
index a5bd94b95263..6d7f97552200 100644
--- a/arch/mips/jz4740/board-qi_lb60.c
+++ b/arch/mips/jz4740/board-qi_lb60.c
@@ -22,6 +22,8 @@
#include <linux/input/matrix_keypad.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_gpio.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf-generic.h>
#include <linux/power_supply.h>
#include <linux/power/jz4740-battery.h>
#include <linux/power/gpio-charger.h>
@@ -159,7 +161,7 @@ static struct jz_nand_platform_data qi_lb60_nand_pdata = {
static struct gpiod_lookup_table qi_lb60_nand_gpio_table = {
.dev_id = "jz4740-nand.0",
.table = {
- GPIO_LOOKUP("Bank C", 30, "busy", 0),
+ GPIO_LOOKUP("GPIOC", 30, "busy", 0),
{ },
},
};
@@ -421,8 +423,8 @@ static struct platform_device qi_lb60_audio_device = {
static struct gpiod_lookup_table qi_lb60_audio_gpio_table = {
.dev_id = "qi-lb60-audio",
.table = {
- GPIO_LOOKUP("Bank B", 29, "snd", 0),
- GPIO_LOOKUP("Bank D", 4, "amp", 0),
+ GPIO_LOOKUP("GPIOB", 29, "snd", 0),
+ GPIO_LOOKUP("GPIOD", 4, "amp", 0),
{ },
},
};
@@ -447,13 +449,36 @@ static struct platform_device *jz_platform_devices[] __initdata = {
&qi_lb60_audio_device,
};
-static void __init board_gpio_setup(void)
-{
- /* We only need to enable/disable pullup here for pins used in generic
- * drivers. Everything else is done by the drivers themselves. */
- jz_gpio_disable_pullup(QI_LB60_GPIO_SD_VCC_EN_N);
- jz_gpio_disable_pullup(QI_LB60_GPIO_SD_CD);
-}
+static unsigned long pin_cfg_bias_disable[] = {
+ PIN_CONFIG_BIAS_DISABLE,
+};
+
+static struct pinctrl_map pin_map[] __initdata = {
+ /* NAND pin configuration */
+ PIN_MAP_MUX_GROUP_DEFAULT("jz4740-nand",
+ "10010000.jz4740-pinctrl", "nand", "nand-cs1"),
+
+ /* fbdev pin configuration */
+ PIN_MAP_MUX_GROUP("jz4740-fb", PINCTRL_STATE_DEFAULT,
+ "10010000.jz4740-pinctrl", "lcd", "lcd-8bit"),
+ PIN_MAP_MUX_GROUP("jz4740-fb", PINCTRL_STATE_SLEEP,
+ "10010000.jz4740-pinctrl", "lcd", "lcd-no-pins"),
+
+ /* MMC pin configuration */
+ PIN_MAP_MUX_GROUP_DEFAULT("jz4740-mmc.0",
+ "10010000.jz4740-pinctrl", "mmc", "mmc-1bit"),
+ PIN_MAP_MUX_GROUP_DEFAULT("jz4740-mmc.0",
+ "10010000.jz4740-pinctrl", "mmc", "mmc-4bit"),
+ PIN_MAP_CONFIGS_PIN_DEFAULT("jz4740-mmc.0",
+ "10010000.jz4740-pinctrl", "PD0", pin_cfg_bias_disable),
+ PIN_MAP_CONFIGS_PIN_DEFAULT("jz4740-mmc.0",
+ "10010000.jz4740-pinctrl", "PD2", pin_cfg_bias_disable),
+
+ /* PWM pin configuration */
+ PIN_MAP_MUX_GROUP_DEFAULT("jz4740-pwm",
+ "10010000.jz4740-pinctrl", "pwm4", "pwm4"),
+};
+
static int __init qi_lb60_init_platform_devices(void)
{
@@ -469,6 +494,7 @@ static int __init qi_lb60_init_platform_devices(void)
ARRAY_SIZE(qi_lb60_spi_board_info));
pwm_add_table(qi_lb60_pwm_lookup, ARRAY_SIZE(qi_lb60_pwm_lookup));
+ pinctrl_register_mappings(pin_map, ARRAY_SIZE(pin_map));
return platform_add_devices(jz_platform_devices,
ARRAY_SIZE(jz_platform_devices));
@@ -479,8 +505,6 @@ static int __init qi_lb60_board_setup(void)
{
printk(KERN_INFO "Qi Hardware JZ4740 QI LB60 setup\n");
- board_gpio_setup();
-
if (qi_lb60_init_platform_devices())
panic("Failed to initialize platform devices");
diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c
deleted file mode 100644
index cac1ccde2214..000000000000
--- a/arch/mips/jz4740/gpio.c
+++ /dev/null
@@ -1,519 +0,0 @@
-/*
- * Copyright (C) 2009-2010, Lars-Peter Clausen <[email protected]>
- * JZ4740 platform GPIO support
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/init.h>
-
-#include <linux/io.h>
-#include <linux/gpio/driver.h>
-/* FIXME: needed for gpio_request(), try to remove consumer API from driver */
-#include <linux/gpio.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/irqchip/ingenic.h>
-#include <linux/bitops.h>
-
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
-#include <asm/mach-jz4740/base.h>
-#include <asm/mach-jz4740/gpio.h>
-
-#define JZ4740_GPIO_BASE_A (32*0)
-#define JZ4740_GPIO_BASE_B (32*1)
-#define JZ4740_GPIO_BASE_C (32*2)
-#define JZ4740_GPIO_BASE_D (32*3)
-
-#define JZ4740_GPIO_NUM_A 32
-#define JZ4740_GPIO_NUM_B 32
-#define JZ4740_GPIO_NUM_C 31
-#define JZ4740_GPIO_NUM_D 32
-
-#define JZ4740_IRQ_GPIO_BASE_A (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_A)
-#define JZ4740_IRQ_GPIO_BASE_B (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_B)
-#define JZ4740_IRQ_GPIO_BASE_C (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_C)
-#define JZ4740_IRQ_GPIO_BASE_D (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_D)
-
-#define JZ_REG_GPIO_PIN 0x00
-#define JZ_REG_GPIO_DATA 0x10
-#define JZ_REG_GPIO_DATA_SET 0x14
-#define JZ_REG_GPIO_DATA_CLEAR 0x18
-#define JZ_REG_GPIO_MASK 0x20
-#define JZ_REG_GPIO_MASK_SET 0x24
-#define JZ_REG_GPIO_MASK_CLEAR 0x28
-#define JZ_REG_GPIO_PULL 0x30
-#define JZ_REG_GPIO_PULL_SET 0x34
-#define JZ_REG_GPIO_PULL_CLEAR 0x38
-#define JZ_REG_GPIO_FUNC 0x40
-#define JZ_REG_GPIO_FUNC_SET 0x44
-#define JZ_REG_GPIO_FUNC_CLEAR 0x48
-#define JZ_REG_GPIO_SELECT 0x50
-#define JZ_REG_GPIO_SELECT_SET 0x54
-#define JZ_REG_GPIO_SELECT_CLEAR 0x58
-#define JZ_REG_GPIO_DIRECTION 0x60
-#define JZ_REG_GPIO_DIRECTION_SET 0x64
-#define JZ_REG_GPIO_DIRECTION_CLEAR 0x68
-#define JZ_REG_GPIO_TRIGGER 0x70
-#define JZ_REG_GPIO_TRIGGER_SET 0x74
-#define JZ_REG_GPIO_TRIGGER_CLEAR 0x78
-#define JZ_REG_GPIO_FLAG 0x80
-#define JZ_REG_GPIO_FLAG_CLEAR 0x14
-
-#define GPIO_TO_BIT(gpio) BIT(gpio & 0x1f)
-#define GPIO_TO_REG(gpio, reg) (gpio_to_jz_gpio_chip(gpio)->base + (reg))
-#define CHIP_TO_REG(chip, reg) (gpio_chip_to_jz_gpio_chip(chip)->base + (reg))
-
-struct jz_gpio_chip {
- unsigned int irq;
- unsigned int irq_base;
- uint32_t edge_trigger_both;
-
- void __iomem *base;
-
- struct gpio_chip gpio_chip;
-};
-
-static struct jz_gpio_chip jz4740_gpio_chips[];
-
-static inline struct jz_gpio_chip *gpio_to_jz_gpio_chip(unsigned int gpio)
-{
- return &jz4740_gpio_chips[gpio >> 5];
-}
-
-static inline struct jz_gpio_chip *gpio_chip_to_jz_gpio_chip(struct gpio_chip *gc)
-{
- return gpiochip_get_data(gc);
-}
-
-static inline struct jz_gpio_chip *irq_to_jz_gpio_chip(struct irq_data *data)
-{
- struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
- return gc->private;
-}
-
-static inline void jz_gpio_write_bit(unsigned int gpio, unsigned int reg)
-{
- writel(GPIO_TO_BIT(gpio), GPIO_TO_REG(gpio, reg));
-}
-
-int jz_gpio_set_function(int gpio, enum jz_gpio_function function)
-{
- if (function == JZ_GPIO_FUNC_NONE) {
- jz_gpio_write_bit(gpio, JZ_REG_GPIO_FUNC_CLEAR);
- jz_gpio_write_bit(gpio, JZ_REG_GPIO_SELECT_CLEAR);
- jz_gpio_write_bit(gpio, JZ_REG_GPIO_TRIGGER_CLEAR);
- } else {
- jz_gpio_write_bit(gpio, JZ_REG_GPIO_FUNC_SET);
- jz_gpio_write_bit(gpio, JZ_REG_GPIO_TRIGGER_CLEAR);
- switch (function) {
- case JZ_GPIO_FUNC1:
- jz_gpio_write_bit(gpio, JZ_REG_GPIO_SELECT_CLEAR);
- break;
- case JZ_GPIO_FUNC3:
- jz_gpio_write_bit(gpio, JZ_REG_GPIO_TRIGGER_SET);
- case JZ_GPIO_FUNC2: /* Falltrough */
- jz_gpio_write_bit(gpio, JZ_REG_GPIO_SELECT_SET);
- break;
- default:
- BUG();
- break;
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(jz_gpio_set_function);
-
-int jz_gpio_bulk_request(const struct jz_gpio_bulk_request *request, size_t num)
-{
- size_t i;
- int ret;
-
- for (i = 0; i < num; ++i, ++request) {
- ret = gpio_request(request->gpio, request->name);
- if (ret)
- goto err;
- jz_gpio_set_function(request->gpio, request->function);
- }
-
- return 0;
-
-err:
- for (--request; i > 0; --i, --request) {
- gpio_free(request->gpio);
- jz_gpio_set_function(request->gpio, JZ_GPIO_FUNC_NONE);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(jz_gpio_bulk_request);
-
-void jz_gpio_bulk_free(const struct jz_gpio_bulk_request *request, size_t num)
-{
- size_t i;
-
- for (i = 0; i < num; ++i, ++request) {
- gpio_free(request->gpio);
- jz_gpio_set_function(request->gpio, JZ_GPIO_FUNC_NONE);
- }
-
-}
-EXPORT_SYMBOL_GPL(jz_gpio_bulk_free);
-
-void jz_gpio_bulk_suspend(const struct jz_gpio_bulk_request *request, size_t num)
-{
- size_t i;
-
- for (i = 0; i < num; ++i, ++request) {
- jz_gpio_set_function(request->gpio, JZ_GPIO_FUNC_NONE);
- jz_gpio_write_bit(request->gpio, JZ_REG_GPIO_DIRECTION_CLEAR);
- jz_gpio_write_bit(request->gpio, JZ_REG_GPIO_PULL_SET);
- }
-}
-EXPORT_SYMBOL_GPL(jz_gpio_bulk_suspend);
-
-void jz_gpio_bulk_resume(const struct jz_gpio_bulk_request *request, size_t num)
-{
- size_t i;
-
- for (i = 0; i < num; ++i, ++request)
- jz_gpio_set_function(request->gpio, request->function);
-}
-EXPORT_SYMBOL_GPL(jz_gpio_bulk_resume);
-
-void jz_gpio_enable_pullup(unsigned gpio)
-{
- jz_gpio_write_bit(gpio, JZ_REG_GPIO_PULL_CLEAR);
-}
-EXPORT_SYMBOL_GPL(jz_gpio_enable_pullup);
-
-void jz_gpio_disable_pullup(unsigned gpio)
-{
- jz_gpio_write_bit(gpio, JZ_REG_GPIO_PULL_SET);
-}
-EXPORT_SYMBOL_GPL(jz_gpio_disable_pullup);
-
-static int jz_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
-{
- return !!(readl(CHIP_TO_REG(chip, JZ_REG_GPIO_PIN)) & BIT(gpio));
-}
-
-static void jz_gpio_set_value(struct gpio_chip *chip, unsigned gpio, int value)
-{
- uint32_t __iomem *reg = CHIP_TO_REG(chip, JZ_REG_GPIO_DATA_SET);
- reg += !value;
- writel(BIT(gpio), reg);
-}
-
-static int jz_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
- int value)
-{
- writel(BIT(gpio), CHIP_TO_REG(chip, JZ_REG_GPIO_DIRECTION_SET));
- jz_gpio_set_value(chip, gpio, value);
-
- return 0;
-}
-
-static int jz_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
-{
- writel(BIT(gpio), CHIP_TO_REG(chip, JZ_REG_GPIO_DIRECTION_CLEAR));
-
- return 0;
-}
-
-static int jz_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
-{
- struct jz_gpio_chip *jz_gpio = gpiochip_get_data(chip);
-
- return jz_gpio->irq_base + gpio;
-}
-
-int jz_gpio_port_direction_input(int port, uint32_t mask)
-{
- writel(mask, GPIO_TO_REG(port, JZ_REG_GPIO_DIRECTION_CLEAR));
-
- return 0;
-}
-EXPORT_SYMBOL(jz_gpio_port_direction_input);
-
-int jz_gpio_port_direction_output(int port, uint32_t mask)
-{
- writel(mask, GPIO_TO_REG(port, JZ_REG_GPIO_DIRECTION_SET));
-
- return 0;
-}
-EXPORT_SYMBOL(jz_gpio_port_direction_output);
-
-void jz_gpio_port_set_value(int port, uint32_t value, uint32_t mask)
-{
- writel(~value & mask, GPIO_TO_REG(port, JZ_REG_GPIO_DATA_CLEAR));
- writel(value & mask, GPIO_TO_REG(port, JZ_REG_GPIO_DATA_SET));
-}
-EXPORT_SYMBOL(jz_gpio_port_set_value);
-
-uint32_t jz_gpio_port_get_value(int port, uint32_t mask)
-{
- uint32_t value = readl(GPIO_TO_REG(port, JZ_REG_GPIO_PIN));
-
- return value & mask;
-}
-EXPORT_SYMBOL(jz_gpio_port_get_value);
-
-#define IRQ_TO_BIT(irq) BIT((irq - JZ4740_IRQ_GPIO(0)) & 0x1f)
-
-static void jz_gpio_check_trigger_both(struct jz_gpio_chip *chip, unsigned int irq)
-{
- uint32_t value;
- void __iomem *reg;
- uint32_t mask = IRQ_TO_BIT(irq);
-
- if (!(chip->edge_trigger_both & mask))
- return;
-
- reg = chip->base;
-
- value = readl(chip->base + JZ_REG_GPIO_PIN);
- if (value & mask)
- reg += JZ_REG_GPIO_DIRECTION_CLEAR;
- else
- reg += JZ_REG_GPIO_DIRECTION_SET;
-
- writel(mask, reg);
-}
-
-static void jz_gpio_irq_demux_handler(struct irq_desc *desc)
-{
- uint32_t flag;
- unsigned int gpio_irq;
- struct jz_gpio_chip *chip = irq_desc_get_handler_data(desc);
-
- flag = readl(chip->base + JZ_REG_GPIO_FLAG);
- if (!flag)
- return;
-
- gpio_irq = chip->irq_base + __fls(flag);
-
- jz_gpio_check_trigger_both(chip, gpio_irq);
-
- generic_handle_irq(gpio_irq);
-};
-
-static inline void jz_gpio_set_irq_bit(struct irq_data *data, unsigned int reg)
-{
- struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data);
- writel(IRQ_TO_BIT(data->irq), chip->base + reg);
-}
-
-static void jz_gpio_irq_unmask(struct irq_data *data)
-{
- struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data);
-
- jz_gpio_check_trigger_both(chip, data->irq);
- irq_gc_unmask_enable_reg(data);
-};
-
-/* TODO: Check if function is gpio */
-static unsigned int jz_gpio_irq_startup(struct irq_data *data)
-{
- jz_gpio_set_irq_bit(data, JZ_REG_GPIO_SELECT_SET);
- jz_gpio_irq_unmask(data);
- return 0;
-}
-
-static void jz_gpio_irq_shutdown(struct irq_data *data)
-{
- irq_gc_mask_disable_reg(data);
-
- /* Set direction to input */
- jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_CLEAR);
- jz_gpio_set_irq_bit(data, JZ_REG_GPIO_SELECT_CLEAR);
-}
-
-static int jz_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
-{
- struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data);
- unsigned int irq = data->irq;
-
- if (flow_type == IRQ_TYPE_EDGE_BOTH) {
- uint32_t value = readl(chip->base + JZ_REG_GPIO_PIN);
- if (value & IRQ_TO_BIT(irq))
- flow_type = IRQ_TYPE_EDGE_FALLING;
- else
- flow_type = IRQ_TYPE_EDGE_RISING;
- chip->edge_trigger_both |= IRQ_TO_BIT(irq);
- } else {
- chip->edge_trigger_both &= ~IRQ_TO_BIT(irq);
- }
-
- switch (flow_type) {
- case IRQ_TYPE_EDGE_RISING:
- jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_SET);
- jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_SET);
- break;
- case IRQ_TYPE_EDGE_FALLING:
- jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_CLEAR);
- jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_SET);
- break;
- case IRQ_TYPE_LEVEL_HIGH:
- jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_SET);
- jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_CLEAR);
- break;
- case IRQ_TYPE_LEVEL_LOW:
- jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_CLEAR);
- jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_CLEAR);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int jz_gpio_irq_set_wake(struct irq_data *data, unsigned int on)
-{
- struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data);
-
- irq_gc_set_wake(data, on);
- irq_set_irq_wake(chip->irq, on);
-
- return 0;
-}
-
-#define JZ4740_GPIO_CHIP(_bank) { \
- .irq_base = JZ4740_IRQ_GPIO_BASE_ ## _bank, \
- .gpio_chip = { \
- .label = "Bank " # _bank, \
- .owner = THIS_MODULE, \
- .set = jz_gpio_set_value, \
- .get = jz_gpio_get_value, \
- .direction_output = jz_gpio_direction_output, \
- .direction_input = jz_gpio_direction_input, \
- .to_irq = jz_gpio_to_irq, \
- .base = JZ4740_GPIO_BASE_ ## _bank, \
- .ngpio = JZ4740_GPIO_NUM_ ## _bank, \
- }, \
-}
-
-static struct jz_gpio_chip jz4740_gpio_chips[] = {
- JZ4740_GPIO_CHIP(A),
- JZ4740_GPIO_CHIP(B),
- JZ4740_GPIO_CHIP(C),
- JZ4740_GPIO_CHIP(D),
-};
-
-static void jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id)
-{
- struct irq_chip_generic *gc;
- struct irq_chip_type *ct;
-
- chip->base = ioremap(JZ4740_GPIO_BASE_ADDR + (id * 0x100), 0x100);
-
- chip->irq = JZ4740_IRQ_INTC_GPIO(id);
- irq_set_chained_handler_and_data(chip->irq,
- jz_gpio_irq_demux_handler, chip);
-
- gc = irq_alloc_generic_chip(chip->gpio_chip.label, 1, chip->irq_base,
- chip->base, handle_level_irq);
-
- gc->wake_enabled = IRQ_MSK(chip->gpio_chip.ngpio);
- gc->private = chip;
-
- ct = gc->chip_types;
- ct->regs.enable = JZ_REG_GPIO_MASK_CLEAR;
- ct->regs.disable = JZ_REG_GPIO_MASK_SET;
- ct->regs.ack = JZ_REG_GPIO_FLAG_CLEAR;
-
- ct->chip.name = "GPIO";
- ct->chip.irq_mask = irq_gc_mask_disable_reg;
- ct->chip.irq_unmask = jz_gpio_irq_unmask;
- ct->chip.irq_ack = irq_gc_ack_set_bit;
- ct->chip.irq_suspend = ingenic_intc_irq_suspend;
- ct->chip.irq_resume = ingenic_intc_irq_resume;
- ct->chip.irq_startup = jz_gpio_irq_startup;
- ct->chip.irq_shutdown = jz_gpio_irq_shutdown;
- ct->chip.irq_set_type = jz_gpio_irq_set_type;
- ct->chip.irq_set_wake = jz_gpio_irq_set_wake;
- ct->chip.flags = IRQCHIP_SET_TYPE_MASKED;
-
- irq_setup_generic_chip(gc, IRQ_MSK(chip->gpio_chip.ngpio),
- IRQ_GC_INIT_NESTED_LOCK, 0, IRQ_NOPROBE | IRQ_LEVEL);
-
- gpiochip_add_data(&chip->gpio_chip, chip);
-}
-
-static int __init jz4740_gpio_init(void)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i)
- jz4740_gpio_chip_init(&jz4740_gpio_chips[i], i);
-
- printk(KERN_INFO "JZ4740 GPIO initialized\n");
-
- return 0;
-}
-arch_initcall(jz4740_gpio_init);
-
-#ifdef CONFIG_DEBUG_FS
-
-static inline void gpio_seq_reg(struct seq_file *s, struct jz_gpio_chip *chip,
- const char *name, unsigned int reg)
-{
- seq_printf(s, "\t%s: %08x\n", name, readl(chip->base + reg));
-}
-
-static int gpio_regs_show(struct seq_file *s, void *unused)
-{
- struct jz_gpio_chip *chip = jz4740_gpio_chips;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i, ++chip) {
- seq_printf(s, "==GPIO %d==\n", i);
- gpio_seq_reg(s, chip, "Pin", JZ_REG_GPIO_PIN);
- gpio_seq_reg(s, chip, "Data", JZ_REG_GPIO_DATA);
- gpio_seq_reg(s, chip, "Mask", JZ_REG_GPIO_MASK);
- gpio_seq_reg(s, chip, "Pull", JZ_REG_GPIO_PULL);
- gpio_seq_reg(s, chip, "Func", JZ_REG_GPIO_FUNC);
- gpio_seq_reg(s, chip, "Select", JZ_REG_GPIO_SELECT);
- gpio_seq_reg(s, chip, "Direction", JZ_REG_GPIO_DIRECTION);
- gpio_seq_reg(s, chip, "Trigger", JZ_REG_GPIO_TRIGGER);
- gpio_seq_reg(s, chip, "Flag", JZ_REG_GPIO_FLAG);
- }
-
- return 0;
-}
-
-static int gpio_regs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, gpio_regs_show, NULL);
-}
-
-static const struct file_operations gpio_regs_operations = {
- .open = gpio_regs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int __init gpio_debugfs_init(void)
-{
- (void) debugfs_create_file("jz_regs_gpio", S_IFREG | S_IRUGO,
- NULL, NULL, &gpio_regs_operations);
- return 0;
-}
-subsys_initcall(gpio_debugfs_init);
-
-#endif
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index a563759fd142..6a0d7040d882 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -1094,7 +1094,7 @@ static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu,
struct mm_struct *mm;
int i;
- if (likely(!vcpu->requests))
+ if (likely(!kvm_request_pending(vcpu)))
return;
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c
index 71d8856ade64..74805035edc8 100644
--- a/arch/mips/kvm/vz.c
+++ b/arch/mips/kvm/vz.c
@@ -2337,7 +2337,7 @@ static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu)
int ret = 0;
int i;
- if (!vcpu->requests)
+ if (!kvm_request_pending(vcpu))
return 0;
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c
index 149f0513c4f5..a263d1b751ff 100644
--- a/arch/mips/lantiq/clk.c
+++ b/arch/mips/lantiq/clk.c
@@ -160,11 +160,6 @@ void clk_deactivate(struct clk *clk)
}
EXPORT_SYMBOL(clk_deactivate);
-struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
-{
- return NULL;
-}
-
static inline u32 get_counter_resolution(void)
{
u32 res;
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index 0344e575f522..a37fe3d1ee2f 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -3,7 +3,7 @@
#
lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \
- mips-atomic.o strlen_user.o strncpy_user.o \
+ mips-atomic.o strncpy_user.o \
strnlen_user.o uncached.o
obj-y += iomap.o
diff --git a/arch/mips/lib/strlen_user.S b/arch/mips/lib/strlen_user.S
deleted file mode 100644
index 40be22625bc5..000000000000
--- a/arch/mips/lib/strlen_user.S
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1996, 1998, 1999, 2004 by Ralf Baechle
- * Copyright (C) 1999 Silicon Graphics, Inc.
- * Copyright (C) 2011 MIPS Technologies, Inc.
- */
-#include <asm/asm.h>
-#include <asm/asm-offsets.h>
-#include <asm/export.h>
-#include <asm/regdef.h>
-
-#define EX(insn,reg,addr,handler) \
-9: insn reg, addr; \
- .section __ex_table,"a"; \
- PTR 9b, handler; \
- .previous
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 for error
- */
- .macro __BUILD_STRLEN_ASM func
-LEAF(__strlen_\func\()_asm)
- LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
- and v0, a0
- bnez v0, .Lfault\@
-
- move v0, a0
-.ifeqs "\func", "kernel"
-1: EX(lbu, v1, (v0), .Lfault\@)
-.else
-1: EX(lbue, v1, (v0), .Lfault\@)
-.endif
- PTR_ADDIU v0, 1
- bnez v1, 1b
- PTR_SUBU v0, a0
- jr ra
- END(__strlen_\func\()_asm)
-
-.Lfault\@: move v0, zero
- jr ra
- .endm
-
-#ifndef CONFIG_EVA
- /* Set aliases */
- .global __strlen_user_asm
- .set __strlen_user_asm, __strlen_kernel_asm
-EXPORT_SYMBOL(__strlen_user_asm)
-#endif
-
-__BUILD_STRLEN_ASM kernel
-EXPORT_SYMBOL(__strlen_kernel_asm)
-
-#ifdef CONFIG_EVA
-
- .set push
- .set eva
-__BUILD_STRLEN_ASM user
- .set pop
-EXPORT_SYMBOL(__strlen_user_asm)
-#endif
diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S
index 5267ca800b84..acdff66bd5d2 100644
--- a/arch/mips/lib/strncpy_user.S
+++ b/arch/mips/lib/strncpy_user.S
@@ -35,7 +35,6 @@ LEAF(__strncpy_from_\func\()_asm)
and v0, a1
bnez v0, .Lfault\@
-FEXPORT(__strncpy_from_\func\()_nocheck_asm)
move t0, zero
move v1, a1
.ifeqs "\func","kernel"
@@ -70,16 +69,12 @@ FEXPORT(__strncpy_from_\func\()_nocheck_asm)
#ifndef CONFIG_EVA
/* Set aliases */
.global __strncpy_from_user_asm
- .global __strncpy_from_user_nocheck_asm
.set __strncpy_from_user_asm, __strncpy_from_kernel_asm
- .set __strncpy_from_user_nocheck_asm, __strncpy_from_kernel_nocheck_asm
EXPORT_SYMBOL(__strncpy_from_user_asm)
-EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm)
#endif
__BUILD_STRNCPY_ASM kernel
EXPORT_SYMBOL(__strncpy_from_kernel_asm)
-EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm)
#ifdef CONFIG_EVA
.set push
@@ -87,5 +82,4 @@ EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm)
__BUILD_STRNCPY_ASM user
.set pop
EXPORT_SYMBOL(__strncpy_from_user_asm)
-EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm)
#endif
diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S
index 860ea99fd70c..e1bacf5a3abe 100644
--- a/arch/mips/lib/strnlen_user.S
+++ b/arch/mips/lib/strnlen_user.S
@@ -32,7 +32,6 @@ LEAF(__strnlen_\func\()_asm)
and v0, a0
bnez v0, .Lfault\@
-FEXPORT(__strnlen_\func\()_nocheck_asm)
move v0, a0
PTR_ADDU a1, a0 # stop pointer
1:
@@ -68,16 +67,12 @@ FEXPORT(__strnlen_\func\()_nocheck_asm)
#ifndef CONFIG_EVA
/* Set aliases */
.global __strnlen_user_asm
- .global __strnlen_user_nocheck_asm
.set __strnlen_user_asm, __strnlen_kernel_asm
- .set __strnlen_user_nocheck_asm, __strnlen_kernel_nocheck_asm
EXPORT_SYMBOL(__strnlen_user_asm)
-EXPORT_SYMBOL(__strnlen_user_nocheck_asm)
#endif
__BUILD_STRNLEN_ASM kernel
EXPORT_SYMBOL(__strnlen_kernel_asm)
-EXPORT_SYMBOL(__strnlen_kernel_nocheck_asm)
#ifdef CONFIG_EVA
@@ -86,5 +81,4 @@ EXPORT_SYMBOL(__strnlen_kernel_nocheck_asm)
__BUILD_STRNLEN_ASM user
.set pop
EXPORT_SYMBOL(__strnlen_user_asm)
-EXPORT_SYMBOL(__strnlen_user_nocheck_asm)
#endif
diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c
index 178ca17a5667..34486c138206 100644
--- a/arch/mips/loongson64/common/dma-swiotlb.c
+++ b/arch/mips/loongson64/common/dma-swiotlb.c
@@ -75,19 +75,11 @@ static void loongson_dma_sync_sg_for_device(struct device *dev,
mb();
}
-static int loongson_dma_set_mask(struct device *dev, u64 mask)
+static int loongson_dma_supported(struct device *dev, u64 mask)
{
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
-
- if (mask > DMA_BIT_MASK(loongson_sysconf.dma_mask_bits)) {
- *dev->dma_mask = DMA_BIT_MASK(loongson_sysconf.dma_mask_bits);
- return -EIO;
- }
-
- *dev->dma_mask = mask;
-
- return 0;
+ if (mask > DMA_BIT_MASK(loongson_sysconf.dma_mask_bits))
+ return 0;
+ return swiotlb_dma_supported(dev, mask);
}
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
@@ -126,8 +118,7 @@ static const struct dma_map_ops loongson_dma_map_ops = {
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
.sync_sg_for_device = loongson_dma_sync_sg_for_device,
.mapping_error = swiotlb_dma_mapping_error,
- .dma_supported = swiotlb_dma_supported,
- .set_dma_mask = loongson_dma_set_mask
+ .dma_supported = loongson_dma_supported,
};
void __init plat_swiotlb_setup(void)
diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c
index 74aa6f62468f..cef152234312 100644
--- a/arch/mips/mm/hugetlbpage.c
+++ b/arch/mips/mm/hugetlbpage.c
@@ -36,7 +36,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr,
return pte;
}
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
+ unsigned long sz)
{
pgd_t *pgd;
pud_t *pud;
diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
index 3a84f6c0c840..174575a9a112 100644
--- a/arch/mips/pci/pci-legacy.c
+++ b/arch/mips/pci/pci-legacy.c
@@ -86,8 +86,7 @@ static void pcibios_scanbus(struct pci_controller *hose)
hose->mem_resource, hose->mem_offset);
pci_add_resource_offset(&resources,
hose->io_resource, hose->io_offset);
- pci_add_resource_offset(&resources,
- hose->busn_resource, hose->busn_offset);
+ pci_add_resource(&resources, hose->busn_resource);
bus = pci_scan_root_bus(NULL, next_busno, hose->pci_ops, hose,
&resources);
hose->bus = bus;
diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild
index ed810e7206e8..db5b57829a81 100644
--- a/arch/mn10300/include/asm/Kbuild
+++ b/arch/mn10300/include/asm/Kbuild
@@ -1,8 +1,10 @@
generic-y += barrier.h
generic-y += clkdev.h
+generic-y += device.h
generic-y += exec.h
generic-y += extable.h
+generic-y += fb.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
diff --git a/arch/mn10300/include/asm/device.h b/arch/mn10300/include/asm/device.h
deleted file mode 100644
index f0a4c256403b..000000000000
--- a/arch/mn10300/include/asm/device.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/device.h>
diff --git a/arch/mn10300/include/asm/fb.h b/arch/mn10300/include/asm/fb.h
deleted file mode 100644
index 697b24a91e1a..000000000000
--- a/arch/mn10300/include/asm/fb.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* MN10300 Frame buffer stuff
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells ([email protected])
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-#ifndef _ASM_FB_H
-#define _ASM_FB_H
-
-#include <linux/fb.h>
-
-#define fb_pgprotect(...) do {} while (0)
-
-static inline int fb_is_primary_device(struct fb_info *info)
-{
- return 0;
-}
-
-#endif /* _ASM_FB_H */
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
index c6966474827f..5af468fd1359 100644
--- a/arch/mn10300/include/asm/uaccess.h
+++ b/arch/mn10300/include/asm/uaccess.h
@@ -290,9 +290,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
}
extern long strncpy_from_user(char *dst, const char __user *src, long count);
-extern long __strncpy_from_user(char *dst, const char __user *src, long count);
extern long strnlen_user(const char __user *str, long n);
-#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
extern unsigned long clear_user(void __user *mem, unsigned long len);
extern unsigned long __clear_user(void __user *mem, unsigned long len);
diff --git a/arch/mn10300/kernel/mn10300_ksyms.c b/arch/mn10300/kernel/mn10300_ksyms.c
index 5e9f919635f0..66fb68d0ca8a 100644
--- a/arch/mn10300/kernel/mn10300_ksyms.c
+++ b/arch/mn10300/kernel/mn10300_ksyms.c
@@ -23,7 +23,6 @@ EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(strncpy_from_user);
-EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(clear_user);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(strnlen_user);
diff --git a/arch/mn10300/lib/usercopy.c b/arch/mn10300/lib/usercopy.c
index cece1799cc32..39626912de98 100644
--- a/arch/mn10300/lib/usercopy.c
+++ b/arch/mn10300/lib/usercopy.c
@@ -50,14 +50,6 @@ do { \
} while (0)
long
-__strncpy_from_user(char *dst, const char *src, long count)
-{
- long res;
- __do_strncpy_from_user(dst, src, count, res);
- return res;
-}
-
-long
strncpy_from_user(char *dst, const char *src, long count)
{
long res = -EFAULT;
diff --git a/arch/openrisc/configs/or1ksim_defconfig b/arch/openrisc/configs/or1ksim_defconfig
index 42fe5303a370..a73aa90501be 100644
--- a/arch/openrisc/configs/or1ksim_defconfig
+++ b/arch/openrisc/configs/or1ksim_defconfig
@@ -1,4 +1,4 @@
-CONFIG_CROSS_COMPILE="or32-linux-"
+CONFIG_CROSS_COMPILE="or1k-linux-"
CONFIG_NO_HZ=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
@@ -23,7 +23,6 @@ CONFIG_INET=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
CONFIG_TCP_CONG_ADVANCED=y
# CONFIG_TCP_CONG_BIC is not set
diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h
index 0c0075f17145..f41bd3cb76d9 100644
--- a/arch/openrisc/include/asm/dma-mapping.h
+++ b/arch/openrisc/include/asm/dma-mapping.h
@@ -26,8 +26,6 @@
#include <linux/kmemcheck.h>
#include <linux/dma-mapping.h>
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-
extern const struct dma_map_ops or1k_dma_map_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
@@ -35,11 +33,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return &or1k_dma_map_ops;
}
-#define HAVE_ARCH_DMA_SUPPORTED 1
-static inline int dma_supported(struct device *dev, u64 dma_mask)
-{
- /* Support 32 bit DMA mask exclusively */
- return dma_mask == DMA_BIT_MASK(32);
-}
-
#endif /* __ASM_OPENRISC_DMA_MAPPING_H */
diff --git a/arch/openrisc/include/asm/fixmap.h b/arch/openrisc/include/asm/fixmap.h
index 52733416c1f3..5a0159546f9e 100644
--- a/arch/openrisc/include/asm/fixmap.h
+++ b/arch/openrisc/include/asm/fixmap.h
@@ -27,6 +27,7 @@
#define FIXADDR_TOP ((unsigned long) (-2*PAGE_SIZE))
#include <linux/kernel.h>
+#include <linux/bug.h>
#include <asm/page.h>
/*
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
index a557a7cd0232..bbf5c79cce7a 100644
--- a/arch/openrisc/include/asm/uaccess.h
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -264,7 +264,6 @@ clear_user(void *addr, unsigned long size)
extern long strncpy_from_user(char *dest, const char __user *src, long count);
-extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
#endif /* __ASM_OPENRISC_UACCESS_H */
diff --git a/arch/openrisc/kernel/or32_ksyms.c b/arch/openrisc/kernel/or32_ksyms.c
index ee3e604959e1..d7260fdb0351 100644
--- a/arch/openrisc/kernel/or32_ksyms.c
+++ b/arch/openrisc/kernel/or32_ksyms.c
@@ -15,7 +15,7 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/elfcore.h>
#include <linux/sched.h>
#include <linux/in6.h>
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index f9b77003f113..8739b6d75005 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -26,7 +26,7 @@
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
diff --git a/arch/openrisc/lib/delay.c b/arch/openrisc/lib/delay.c
index c82b09f4a106..8b13fdf43ec6 100644
--- a/arch/openrisc/lib/delay.c
+++ b/arch/openrisc/lib/delay.c
@@ -16,8 +16,9 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/init.h>
+#include <asm/param.h>
#include <asm/delay.h>
#include <asm/timex.h>
#include <asm/processor.h>
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index c3e114f67485..1fd962a07f52 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -209,7 +209,6 @@ extern long lstrnlen_user(const char __user *, long);
#define user_addr_max() (~0UL)
#define strnlen_user lstrnlen_user
-#define strlen_user(str) lstrnlen_user(str, 0x7fffffffL)
#define clear_user lclear_user
#define __clear_user lclear_user
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
index 5f4c68daa261..7dc31c84dd37 100644
--- a/arch/parisc/include/asm/unistd.h
+++ b/arch/parisc/include/asm/unistd.h
@@ -156,7 +156,6 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
#define __ARCH_WANT_SYS_GETPGRP
#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_NICE
-#define __ARCH_WANT_SYS_OLD_GETRLIMIT
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c
index aa50ac090e9b..5eb8f633b282 100644
--- a/arch/parisc/mm/hugetlbpage.c
+++ b/arch/parisc/mm/hugetlbpage.c
@@ -69,7 +69,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
return pte;
}
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+pte_t *huge_pte_offset(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
pud_t *pud;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 6189238e69f8..7177a3f4f418 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -109,14 +109,6 @@ config GENERIC_LOCKBREAK
default y
depends on SMP && PREEMPT
-config ARCH_HAS_ILOG2_U32
- bool
- default y
-
-config ARCH_HAS_ILOG2_U64
- bool
- default y if 64BIT
-
config GENERIC_HWEIGHT
bool
default y
@@ -138,6 +130,7 @@ config PPC
select ARCH_HAS_SG_CHAIN
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UBSAN_SANITIZE_ALL
+ select ARCH_HAS_ZONE_DEVICE if PPC_BOOK3S_64
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
@@ -163,7 +156,7 @@ config PPC
select GENERIC_SMP_IDLE_THREAD
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
- select GENERIC_TIME_VSYSCALL_OLD
+ select GENERIC_TIME_VSYSCALL
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KGDB
@@ -171,6 +164,8 @@ config PPC
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
+ select ARCH_HAS_STRICT_KERNEL_RWX if (PPC_BOOK3S_64 && !RELOCATABLE && !HIBERNATION)
+ select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select HAVE_CBPF_JIT if !PPC64
select HAVE_CONTEXT_TRACKING if PPC64
select HAVE_DEBUG_KMEMLEAK
@@ -208,6 +203,7 @@ config PPC
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING
+ select HAVE_IRQ_TIME_ACCOUNTING
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
select MODULES_USE_ELF_RELA
@@ -438,6 +434,17 @@ config PPC_TRANSACTIONAL_MEM
---help---
Support user-mode Transactional Memory on POWERPC.
+config LD_HEAD_STUB_CATCH
+ bool "Reserve 256 bytes to cope with linker stubs in HEAD text" if EXPERT
+ depends on PPC64
+ default n
+ help
+ Very large kernels can cause linker branch stubs to be generated by
+ code in head_64.S, which moves the head text sections out of their
+ specified location. This option can work around the problem.
+
+ If unsure, say "N".
+
config DISABLE_MPROFILE_KERNEL
bool "Disable use of mprofile-kernel for kernel tracing"
depends on PPC64 && CPU_LITTLE_ENDIAN
@@ -456,14 +463,6 @@ config MPROFILE_KERNEL
depends on PPC64 && CPU_LITTLE_ENDIAN
def_bool !DISABLE_MPROFILE_KERNEL
-config USE_THIN_ARCHIVES
- bool "Build the kernel using thin archives"
- default n
- select THIN_ARCHIVES
- help
- Build the kernel using thin archives.
- If you're unsure say N.
-
config IOMMU_HELPER
def_bool PPC64
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 3e0f0e1fadef..8d4ed73d5490 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -98,6 +98,7 @@ endif
LDFLAGS_vmlinux-y := -Bstatic
LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) := -pie
LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-y)
+LDFLAGS_vmlinux += $(call ld-option,--orphan-handling=warn)
ifeq ($(CONFIG_PPC64),y)
ifeq ($(call cc-option-yn,-mcmodel=medium),y)
@@ -189,7 +190,17 @@ else
CHECKFLAGS += -D__LITTLE_ENDIAN__
endif
+ifdef CONFIG_PPC32
KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
+else
+ifeq ($(call ld-ifversion, -ge, 225000000, y),y)
+# Have the linker provide sfpr if possible.
+# There is a corresponding test in arch/powerpc/lib/Makefile
+KBUILD_LDFLAGS_MODULE += --save-restore-funcs
+else
+KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
+endif
+endif
ifeq ($(CONFIG_476FPE_ERR46),y)
KBUILD_LDFLAGS_MODULE += --ppc476-workaround \
diff --git a/arch/powerpc/Makefile.postlink b/arch/powerpc/Makefile.postlink
index eccfcc88afae..5db43ebbe2df 100644
--- a/arch/powerpc/Makefile.postlink
+++ b/arch/powerpc/Makefile.postlink
@@ -10,13 +10,26 @@ __archpost:
-include include/config/auto.conf
include scripts/Kbuild.include
+quiet_cmd_head_check = CHKHEAD $@
+ cmd_head_check = $(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/head_check.sh "$(NM)" "$@"
+
quiet_cmd_relocs_check = CHKREL $@
- cmd_relocs_check = $(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$@"
+ifdef CONFIG_PPC_BOOK3S_64
+ cmd_relocs_check = \
+ $(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$@" ; \
+ $(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/unrel_branch_check.sh "$(OBJDUMP)" "$@"
+else
+ cmd_relocs_check = \
+ $(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$@"
+endif
# `@true` prevents complaint when there is nothing to be done
vmlinux: FORCE
@true
+ifdef CONFIG_PPC64
+ $(call cmd,head_check)
+endif
ifdef CONFIG_RELOCATABLE
$(call if_changed,relocs_check)
endif
@@ -25,7 +38,7 @@ endif
@true
clean:
- @true
+ rm -f .tmp_symbols.txt
PHONY += FORCE clean
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index e82f333cc84a..a7814a7b1523 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -95,13 +95,16 @@ libfdtheader := fdt.h libfdt.h libfdt_internal.h
$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o opal.o): \
$(addprefix $(obj)/,$(libfdtheader))
-src-wlib-y := string.S crt0.S crtsavres.S stdio.c decompress.c main.c \
+src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \
$(libfdt) libfdt-wrapper.c \
ns16550.c serial.c simple_alloc.c div64.S util.S \
elf_util.c $(zlib-y) devtree.c stdlib.c \
oflib.c ofconsole.c cuboot.c mpsc.c cpm-serial.c \
uartlite.c mpc52xx-psc.c opal.c
src-wlib-$(CONFIG_PPC64_BOOT_WRAPPER) += opal-calls.S
+ifndef CONFIG_PPC64_BOOT_WRAPPER
+src-wlib-y += crtsavres.S
+endif
src-wlib-$(CONFIG_40x) += 4xx.c planetcore.c
src-wlib-$(CONFIG_44x) += 4xx.c ebony.c bamboo.c
src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c fsl-soc.c
diff --git a/arch/powerpc/boot/crtsavres.S b/arch/powerpc/boot/crtsavres.S
index f3d9b35c07d4..085fb2b9a8b8 100644
--- a/arch/powerpc/boot/crtsavres.S
+++ b/arch/powerpc/boot/crtsavres.S
@@ -37,12 +37,13 @@
* the executable file might be covered by the GNU General Public License.
*/
+#ifdef __powerpc64__
+#error "On PPC64, FPR save/restore functions are provided by the linker."
+#endif
+
.file "crtsavres.S"
.section ".text"
-/* On PowerPC64 Linux, these functions are provided by the linker. */
-#ifndef __powerpc64__
-
#define _GLOBAL(name) \
.type name,@function; \
.globl name; \
@@ -230,4 +231,3 @@ _GLOBAL(_rest32gpr_31_x)
mtlr 0
mr 1,11
blr
-#endif
diff --git a/arch/powerpc/boot/dts/ac14xx.dts b/arch/powerpc/boot/dts/ac14xx.dts
index 27fcabc2f857..83bcfd865167 100644
--- a/arch/powerpc/boot/dts/ac14xx.dts
+++ b/arch/powerpc/boot/dts/ac14xx.dts
@@ -10,7 +10,7 @@
*/
-#include <mpc5121.dtsi>
+#include "mpc5121.dtsi"
/ {
model = "ac14xx";
diff --git a/arch/powerpc/boot/dts/digsy_mtc.dts b/arch/powerpc/boot/dts/digsy_mtc.dts
index 955bff629df3..c280e75c86bf 100644
--- a/arch/powerpc/boot/dts/digsy_mtc.dts
+++ b/arch/powerpc/boot/dts/digsy_mtc.dts
@@ -73,7 +73,7 @@
i2c@3d00 {
eeprom@50 {
- compatible = "at,24c08";
+ compatible = "atmel,24c08";
reg = <0x50>;
};
diff --git a/arch/powerpc/boot/dts/fsl/b4qds.dtsi b/arch/powerpc/boot/dts/fsl/b4qds.dtsi
index 3785ef826d07..999efd3bc167 100644
--- a/arch/powerpc/boot/dts/fsl/b4qds.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4qds.dtsi
@@ -166,19 +166,19 @@
reg = <0>;
eeprom@50 {
- compatible = "at24,24c64";
+ compatible = "atmel,24c64";
reg = <0x50>;
};
eeprom@51 {
- compatible = "at24,24c256";
+ compatible = "atmel,24c256";
reg = <0x51>;
};
eeprom@53 {
- compatible = "at24,24c256";
+ compatible = "atmel,24c256";
reg = <0x53>;
};
eeprom@57 {
- compatible = "at24,24c256";
+ compatible = "atmel,24c256";
reg = <0x57>;
};
rtc@68 {
diff --git a/arch/powerpc/boot/dts/fsl/c293pcie.dts b/arch/powerpc/boot/dts/fsl/c293pcie.dts
index 66709788429d..5e905e0857cf 100644
--- a/arch/powerpc/boot/dts/fsl/c293pcie.dts
+++ b/arch/powerpc/boot/dts/fsl/c293pcie.dts
@@ -153,7 +153,7 @@
&soc {
i2c@3000 {
eeprom@50 {
- compatible = "st,24c1024";
+ compatible = "st,24c1024", "atmel,24c1024";
reg = <0x50>;
};
diff --git a/arch/powerpc/boot/dts/fsl/p1010rdb.dtsi b/arch/powerpc/boot/dts/fsl/p1010rdb.dtsi
index a8e4ba070104..2ca9cee2ddeb 100644
--- a/arch/powerpc/boot/dts/fsl/p1010rdb.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1010rdb.dtsi
@@ -89,7 +89,7 @@
&board_soc {
i2c@3000 {
eeprom@50 {
- compatible = "st,24c256";
+ compatible = "st,24c256", "atmel,24c256";
reg = <0x50>;
};
diff --git a/arch/powerpc/boot/dts/fsl/p1023rdb.dts b/arch/powerpc/boot/dts/fsl/p1023rdb.dts
index 9716ca64651c..ead928364beb 100644
--- a/arch/powerpc/boot/dts/fsl/p1023rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/p1023rdb.dts
@@ -79,7 +79,7 @@
i2c@3000 {
eeprom@53 {
- compatible = "at24,24c04";
+ compatible = "atmel,24c04";
reg = <0x53>;
};
diff --git a/arch/powerpc/boot/dts/fsl/p2041rdb.dts b/arch/powerpc/boot/dts/fsl/p2041rdb.dts
index e50fea95a853..950816b9d6e1 100644
--- a/arch/powerpc/boot/dts/fsl/p2041rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/p2041rdb.dts
@@ -127,7 +127,7 @@
reg = <0x48>;
};
eeprom@50 {
- compatible = "at24,24c256";
+ compatible = "atmel,24c256";
reg = <0x50>;
};
rtc@68 {
@@ -142,7 +142,7 @@
i2c@118100 {
eeprom@50 {
- compatible = "at24,24c256";
+ compatible = "atmel,24c256";
reg = <0x50>;
};
};
diff --git a/arch/powerpc/boot/dts/fsl/p3041ds.dts b/arch/powerpc/boot/dts/fsl/p3041ds.dts
index 40748e415adb..6f5f7283c533 100644
--- a/arch/powerpc/boot/dts/fsl/p3041ds.dts
+++ b/arch/powerpc/boot/dts/fsl/p3041ds.dts
@@ -124,11 +124,11 @@
i2c@118100 {
eeprom@51 {
- compatible = "at24,24c256";
+ compatible = "atmel,24c256";
reg = <0x51>;
};
eeprom@52 {
- compatible = "at24,24c256";
+ compatible = "atmel,24c256";
reg = <0x52>;
};
};
diff --git a/arch/powerpc/boot/dts/fsl/p4080ds.dts b/arch/powerpc/boot/dts/fsl/p4080ds.dts
index 816b9788d5f6..65e20152e22f 100644
--- a/arch/powerpc/boot/dts/fsl/p4080ds.dts
+++ b/arch/powerpc/boot/dts/fsl/p4080ds.dts
@@ -125,11 +125,11 @@
i2c@118100 {
eeprom@51 {
- compatible = "at24,24c256";
+ compatible = "atmel,24c256";
reg = <0x51>;
};
eeprom@52 {
- compatible = "at24,24c256";
+ compatible = "atmel,24c256";
reg = <0x52>;
};
rtc@68 {
diff --git a/arch/powerpc/boot/dts/fsl/p5020ds.dts b/arch/powerpc/boot/dts/fsl/p5020ds.dts
index cd6f37386111..b24adf902d8d 100644
--- a/arch/powerpc/boot/dts/fsl/p5020ds.dts
+++ b/arch/powerpc/boot/dts/fsl/p5020ds.dts
@@ -124,11 +124,11 @@
i2c@118100 {
eeprom@51 {
- compatible = "at24,24c256";
+ compatible = "atmel,24c256";
reg = <0x51>;
};
eeprom@52 {
- compatible = "at24,24c256";
+ compatible = "atmel,24c256";
reg = <0x52>;
};
};
diff --git a/arch/powerpc/boot/dts/fsl/p5040ds.dts b/arch/powerpc/boot/dts/fsl/p5040ds.dts
index 45084738cf4e..30850b3228e0 100644
--- a/arch/powerpc/boot/dts/fsl/p5040ds.dts
+++ b/arch/powerpc/boot/dts/fsl/p5040ds.dts
@@ -133,11 +133,11 @@
i2c@118100 {
eeprom@51 {
- compatible = "at24,24c256";
+ compatible = "atmel,24c256";
reg = <0x51>;
};
eeprom@52 {
- compatible = "at24,24c256";
+ compatible = "atmel,24c256";
reg = <0x52>;
};
};
diff --git a/arch/powerpc/boot/dts/fsl/t208xqds.dtsi b/arch/powerpc/boot/dts/fsl/t208xqds.dtsi
index ec080bd01b09..db4139999b28 100644
--- a/arch/powerpc/boot/dts/fsl/t208xqds.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t208xqds.dtsi
@@ -147,17 +147,17 @@
reg = <0x0>;
eeprom@50 {
- compatible = "at24,24c512";
+ compatible = "atmel,24c512";
reg = <0x50>;
};
eeprom@51 {
- compatible = "at24,24c02";
+ compatible = "atmel,24c02";
reg = <0x51>;
};
eeprom@57 {
- compatible = "at24,24c02";
+ compatible = "atmel,24c02";
reg = <0x57>;
};
@@ -174,7 +174,7 @@
reg = <0x1>;
eeprom@55 {
- compatible = "at24,24c02";
+ compatible = "atmel,24c02";
reg = <0x55>;
};
};
diff --git a/arch/powerpc/boot/dts/fsl/t4240qds.dts b/arch/powerpc/boot/dts/fsl/t4240qds.dts
index 9573ceada07c..c0913ac5aaad 100644
--- a/arch/powerpc/boot/dts/fsl/t4240qds.dts
+++ b/arch/powerpc/boot/dts/fsl/t4240qds.dts
@@ -377,27 +377,27 @@
reg = <0>;
eeprom@51 {
- compatible = "at24,24c256";
+ compatible = "atmel,24c256";
reg = <0x51>;
};
eeprom@52 {
- compatible = "at24,24c256";
+ compatible = "atmel,24c256";
reg = <0x52>;
};
eeprom@53 {
- compatible = "at24,24c256";
+ compatible = "atmel,24c256";
reg = <0x53>;
};
eeprom@54 {
- compatible = "at24,24c256";
+ compatible = "atmel,24c256";
reg = <0x54>;
};
eeprom@55 {
- compatible = "at24,24c256";
+ compatible = "atmel,24c256";
reg = <0x55>;
};
eeprom@56 {
- compatible = "at24,24c256";
+ compatible = "atmel,24c256";
reg = <0x56>;
};
rtc@68 {
diff --git a/arch/powerpc/boot/dts/fsl/t4240rdb.dts b/arch/powerpc/boot/dts/fsl/t4240rdb.dts
index 8166c660712a..15eb0a3f7290 100644
--- a/arch/powerpc/boot/dts/fsl/t4240rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t4240rdb.dts
@@ -130,15 +130,15 @@
reg = <0x2f>;
};
eeprom@52 {
- compatible = "at24,24c256";
+ compatible = "atmel,24c256";
reg = <0x52>;
};
eeprom@54 {
- compatible = "at24,24c256";
+ compatible = "atmel,24c256";
reg = <0x54>;
};
eeprom@56 {
- compatible = "at24,24c256";
+ compatible = "atmel,24c256";
reg = <0x56>;
};
rtc@68 {
diff --git a/arch/powerpc/boot/dts/fsp2.dts b/arch/powerpc/boot/dts/fsp2.dts
new file mode 100644
index 000000000000..475953ada707
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsp2.dts
@@ -0,0 +1,608 @@
+/*
+ * Device Tree Source for FSP2
+ *
+ * Copyright 2010,2012 IBM Corp.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without
+ * any warranty of any kind, whether express or implied.
+ */
+
+
+/dts-v1/;
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ model = "ibm,fsp2";
+ compatible = "ibm,fsp2";
+ dcr-parent = <&{/cpus/cpu@0}>;
+
+ aliases {
+ ethernet0 = &EMAC0;
+ ethernet1 = &EMAC1;
+ serial0 = &UART0;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ model = "PowerPC, 476FSP2";
+ reg = <0x0>;
+ clock-frequency = <0>; /* Filled in by cuboot */
+ timebase-frequency = <0>; /* Filled in by cuboot */
+ i-cache-line-size = <32>;
+ d-cache-line-size = <32>;
+ d-cache-size = <32768>;
+ i-cache-size = <32768>;
+ dcr-controller;
+ dcr-access-method = "native";
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x00000000 0x00000000>; /* Filled in by
+ cuboot */
+ };
+
+ clocks {
+ mmc_clk: mmc_clk {
+ compatible = "fixed-clock";
+ clock-frequency = <50000000>;
+ clock-output-names = "mmc_clk";
+ };
+ };
+
+ UIC0: uic0 {
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <0>;
+ dcr-reg = <0x2c0 0x8>;
+ };
+
+ /* "interrupts" field is <bit level bit level>
+ first pair is non-critical, second is critical */
+ UIC1_0: uic1_0 {
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <1>;
+ dcr-reg = <0x2c8 0x8>;
+ interrupt-parent = <&UIC0>;
+ interrupts = <21 0x4 4 0x84>;
+ };
+
+ /* PSI and DMA */
+ UIC1_1: uic1_1 {
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <2>;
+ dcr-reg = <0x350 0x8>;
+ interrupt-parent = <&UIC0>;
+ interrupts = <22 0x4 5 0x84>;
+ };
+
+ /* Ethernet and USB */
+ UIC1_2: uic1_2 {
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <3>;
+ dcr-reg = <0x358 0x8>;
+ interrupt-parent = <&UIC0>;
+ interrupts = <23 0x4 6 0x84>;
+ };
+
+ /* PLB Errors */
+ UIC1_3: uic1_3 {
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <4>;
+ dcr-reg = <0x360 0x8>;
+ interrupt-parent = <&UIC0>;
+ interrupts = <24 0x4 7 0x84>;
+ };
+
+ UIC1_4: uic1_4 {
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <5>;
+ dcr-reg = <0x368 0x8>;
+ interrupt-parent = <&UIC0>;
+ interrupts = <25 0x4 8 0x84>;
+ };
+
+ UIC1_5: uic1_5 {
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <6>;
+ dcr-reg = <0x370 0x8>;
+ interrupt-parent = <&UIC0>;
+ interrupts = <26 0x4 9 0x84>;
+ };
+
+ /* 2nd level UICs for FSI */
+ UIC2_0: uic2_0 {
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <7>;
+ dcr-reg = <0x2d0 0x8>;
+ interrupt-parent = <&UIC1_0>;
+ interrupts = <16 0x4 0 0x84>;
+ };
+
+ UIC2_1: uic2_1 {
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <8>;
+ dcr-reg = <0x2d8 0x8>;
+ interrupt-parent = <&UIC1_0>;
+ interrupts = <17 0x4 1 0x84>;
+ };
+
+ UIC2_2: uic2_2 {
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <9>;
+ dcr-reg = <0x2e0 0x8>;
+ interrupt-parent = <&UIC1_0>;
+ interrupts = <18 0x4 2 0x84>;
+ };
+
+ UIC2_3: uic2_3 {
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <10>;
+ dcr-reg = <0x2e8 0x8>;
+ interrupt-parent = <&UIC1_0>;
+ interrupts = <19 0x4 3 0x84>;
+ };
+
+ UIC2_4: uic2_4 {
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <11>;
+ dcr-reg = <0x2f0 0x8>;
+ interrupt-parent = <&UIC1_0>;
+ interrupts = <20 0x4 4 0x84>;
+ };
+
+ UIC2_5: uic2_5 {
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <12>;
+ dcr-reg = <0x2f8 0x8>;
+ interrupt-parent = <&UIC1_0>;
+ interrupts = <21 0x4 5 0x84>;
+ };
+
+ UIC2_6: uic2_6 {
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <13>;
+ dcr-reg = <0x300 0x8>;
+ interrupt-parent = <&UIC1_0>;
+ interrupts = <22 0x4 6 0x84>;
+ };
+
+ UIC2_7: uic2_7 {
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <14>;
+ dcr-reg = <0x308 0x8>;
+ interrupt-parent = <&UIC1_0>;
+ interrupts = <23 0x4 7 0x84>;
+ };
+
+ UIC2_8: uic2_8 {
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <15>;
+ dcr-reg = <0x310 0x8>;
+ interrupt-parent = <&UIC1_0>;
+ interrupts = <24 0x4 8 0x84>;
+ };
+
+ UIC2_9: uic2_9 {
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <16>;
+ dcr-reg = <0x318 0x8>;
+ interrupt-parent = <&UIC1_0>;
+ interrupts = <25 0x4 9 0x84>;
+ };
+
+ UIC2_10: uic2_10 {
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <17>;
+ dcr-reg = <0x320 0x8>;
+ interrupt-parent = <&UIC1_0>;
+ interrupts = <26 0x4 10 0x84>;
+ };
+
+ UIC2_11: uic2_11 {
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <18>;
+ dcr-reg = <0x328 0x8>;
+ interrupt-parent = <&UIC1_0>;
+ interrupts = <27 0x4 11 0x84>;
+ };
+
+ UIC2_12: uic2_12 {
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <19>;
+ dcr-reg = <0x330 0x8>;
+ interrupt-parent = <&UIC1_0>;
+ interrupts = <28 0x4 12 0x84>;
+ };
+
+ UIC2_13: uic2_13 {
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <20>;
+ dcr-reg = <0x338 0x8>;
+ interrupt-parent = <&UIC1_0>;
+ interrupts = <29 0x4 13 0x84>;
+ };
+
+ UIC2_14: uic2_14 {
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <21>;
+ dcr-reg = <0x340 0x8>;
+ interrupt-parent = <&UIC1_0>;
+ interrupts = <30 0x4 14 0x84>;
+ };
+
+ UIC2_15: uic2_15 {
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+
+ compatible = "ibm,uic";
+ interrupt-controller;
+ cell-index = <22>;
+ dcr-reg = <0x348 0x8>;
+ interrupt-parent = <&UIC1_0>;
+ interrupts = <31 0x4 15 0x84>;
+ };
+
+ mmc0: sdhci@020c0000 {
+ compatible = "st,sdhci-stih407", "st,sdhci";
+ status = "disabled";
+ reg = <0x020c0000 0x20000>;
+ reg-names = "mmc";
+ interrupt-parent = <&UIC1_3>;
+ interrupts = <21 0x4 22 0x4>;
+ interrupt-names = "mmcirq";
+ pinctrl-names = "default";
+ pinctrl-0 = <>;
+ clock-names = "mmc";
+ clocks = <&mmc_clk>;
+ };
+
+ plb6 {
+ compatible = "ibm,plb6";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges;
+
+ MCW0: memory-controller-wrapper {
+ compatible = "ibm,cw-476fsp2";
+ dcr-reg = <0x11111800 0x40>;
+ };
+
+ MCIF0: memory-controller {
+ compatible = "ibm,sdram-476fsp2", "ibm,sdram-4xx-ddr3";
+ dcr-reg = <0x11120000 0x10000>;
+ mcer-device = <&MCW0>;
+ interrupt-parent = <&UIC0>;
+ interrupts = <10 0x84 /* ECC UE */
+ 11 0x84>; /* ECC CE */
+ };
+ };
+
+ plb4 {
+ compatible = "ibm,plb4";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x00000000 0x00000010 0x00000000 0x80000000
+ 0x80000000 0x00000010 0x80000000 0x80000000>;
+ clock-frequency = <333333334>;
+
+ plb6-system-hung-irq {
+ compatible = "ibm,bus-error-irq";
+ #interrupt-cells = <2>;
+ interrupt-parent = <&UIC0>;
+ interrupts = <0 0x84>;
+ };
+
+ l2-error-irq {
+ compatible = "ibm,bus-error-irq";
+ #interrupt-cells = <2>;
+ interrupt-parent = <&UIC0>;
+ interrupts = <20 0x84>;
+ };
+
+ plb6-plb4-irq {
+ compatible = "ibm,bus-error-irq";
+ #interrupt-cells = <2>;
+ interrupt-parent = <&UIC0>;
+ interrupts = <1 0x84>;
+ };
+
+ plb4-ahb-irq {
+ compatible = "ibm,bus-error-irq";
+ #interrupt-cells = <2>;
+ interrupt-parent = <&UIC1_3>;
+ interrupts = <20 0x84>;
+ };
+
+ opbd-error-irq {
+ compatible = "ibm,opbd-error-irq";
+ #interrupt-cells = <2>;
+ interrupt-parent = <&UIC1_4>;
+ interrupts = <5 0x84>;
+ };
+
+ cmu-error-irq {
+ compatible = "ibm,cmu-error-irq";
+ #interrupt-cells = <2>;
+ interrupt-parent = <&UIC0>;
+ interrupts = <28 0x84>;
+ };
+
+ conf-error-irq {
+ compatible = "ibm,conf-error-irq";
+ #interrupt-cells = <2>;
+ interrupt-parent = <&UIC1_4>;
+ interrupts = <11 0x84>;
+ };
+
+ mc-ue-irq {
+ compatible = "ibm,mc-ue-irq";
+ #interrupt-cells = <2>;
+ interrupt-parent = <&UIC0>;
+ interrupts = <10 0x84>;
+ };
+
+ reset-warning-irq {
+ compatible = "ibm,reset-warning-irq";
+ #interrupt-cells = <2>;
+ interrupt-parent = <&UIC0>;
+ interrupts = <17 0x84>;
+ };
+
+ MAL0: mcmal0 {
+ #interrupt-cells = <1>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ compatible = "ibm,mcmal";
+ dcr-reg = <0x80 0x80>;
+ num-tx-chans = <1>;
+ num-rx-chans = <1>;
+ interrupt-parent = <&MAL0>;
+ interrupts = <0 1 2 3 4>;
+ /* index interrupt-parent interrupt# type */
+ interrupt-map = </*TXEOB*/ 0 &UIC1_2 4 0x4
+ /*RXEOB*/ 1 &UIC1_2 3 0x4
+ /*SERR*/ 2 &UIC1_2 7 0x4
+ /*TXDE*/ 3 &UIC1_2 6 0x4
+ /*RXDE*/ 4 &UIC1_2 5 0x4>;
+ };
+
+ MAL1: mcmal1 {
+ #interrupt-cells = <1>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ compatible = "ibm,mcmal";
+ dcr-reg = <0x100 0x80>;
+ num-tx-chans = <1>;
+ num-rx-chans = <1>;
+ interrupt-parent = <&MAL1>;
+ interrupts = <0 1 2 3 4>;
+ /* index interrupt-parent interrupt# type */
+ interrupt-map = </*TXEOB*/ 0 &UIC1_2 12 0x4
+ /*RXEOB*/ 1 &UIC1_2 11 0x4
+ /*SERR*/ 2 &UIC1_2 15 0x4
+ /*TXDE*/ 3 &UIC1_2 14 0x4
+ /*RXDE*/ 4 &UIC1_2 13 0x4>;
+ };
+
+ opb {
+ compatible = "ibm,opb";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges; // pass-thru to parent bus
+ clock-frequency = <83333334>;
+
+ EMAC0: ethernet@b0000000 {
+ linux,network-index = <0>;
+ device_type = "network";
+ compatible = "ibm,emac4sync";
+ has-inverted-stacr-oc;
+ interrupt-parent = <&UIC1_2>;
+ interrupts = <1 0x4 0 0x4>;
+ reg = <0xb0000000 0x100>;
+ local-mac-address = [000000000000]; /* Filled in by
+ cuboot */
+ mal-device = <&MAL0>;
+ mal-tx-channel = <0>;
+ mal-rx-channel = <0>;
+ cell-index = <0>;
+ max-frame-size = <1500>;
+ rx-fifo-size = <4096>;
+ tx-fifo-size = <4096>;
+ rx-fifo-size-gige = <16384>;
+ tx-fifo-size-gige = <8192>;
+ phy-address = <1>;
+ phy-mode = "rgmii";
+ phy-map = <00000003>;
+ rgmii-device = <&RGMII>;
+ rgmii-channel = <0>;
+ };
+
+ EMAC1: ethernet@b0000100 {
+ linux,network-index = <1>;
+ device_type = "network";
+ compatible = "ibm,emac4sync";
+ has-inverted-stacr-oc;
+ interrupt-parent = <&UIC1_2>;
+ interrupts = <9 0x4 8 0x4>;
+ reg = <0xb0000100 0x100>;
+ local-mac-address = [000000000000]; /* Filled in by
+ cuboot */
+ mal-device = <&MAL1>;
+ mal-tx-channel = <0>;
+ mal-rx-channel = <0>;
+ cell-index = <1>;
+ max-frame-size = <1500>;
+ rx-fifo-size = <4096>;
+ tx-fifo-size = <4096>;
+ rx-fifo-size-gige = <16384>;
+ tx-fifo-size-gige = <8192>;
+ phy-address = <2>;
+ phy-mode = "rgmii";
+ phy-map = <00000003>;
+ rgmii-device = <&RGMII>;
+ rgmii-channel = <1>;
+ };
+
+ RGMII: rgmii@b0000600 {
+ compatible = "ibm,rgmii";
+ has-mdio;
+ reg = <0xb0000600 0x8>;
+ };
+
+ UART0: serial@b0020000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0xb0020000 0x8>;
+ virtual-reg = <0xb0020000>;
+ clock-frequency = <20833333>;
+ current-speed = <115200>;
+ interrupt-parent = <&UIC0>;
+ interrupts = <31 0x4>;
+ };
+ };
+
+ OHCI1: ohci@02040000 {
+ compatible = "ohci-le";
+ reg = <0x02040000 0xa0>;
+ interrupt-parent = <&UIC1_3>;
+ interrupts = <28 0x8 29 0x8>;
+ };
+
+ OHCI2: ohci@02080000 {
+ compatible = "ohci-le";
+ reg = <0x02080000 0xa0>;
+ interrupt-parent = <&UIC1_3>;
+ interrupts = <30 0x8 31 0x8>;
+ };
+
+ EHCI: ehci@02000000 {
+ compatible = "usb-ehci";
+ reg = <0x02000000 0xa4>;
+ interrupt-parent = <&UIC1_3>;
+ interrupts = <23 0x4>;
+ };
+
+ };
+
+ chosen {
+ linux,stdout-path = "/plb/opb/serial@b0020000";
+ bootargs = "console=ttyS0,115200 rw log_buf_len=32768 debug";
+ };
+};
diff --git a/arch/powerpc/boot/dts/mpc5121ads.dts b/arch/powerpc/boot/dts/mpc5121ads.dts
index 75888ce2c792..1e81a7e32d18 100644
--- a/arch/powerpc/boot/dts/mpc5121ads.dts
+++ b/arch/powerpc/boot/dts/mpc5121ads.dts
@@ -9,7 +9,7 @@
* option) any later version.
*/
-#include <mpc5121.dtsi>
+#include "mpc5121.dtsi"
/ {
model = "mpc5121ads";
@@ -94,7 +94,7 @@
};
eeprom@50 {
- compatible = "at,24c32";
+ compatible = "atmel,24c32";
reg = <0x50>;
};
diff --git a/arch/powerpc/boot/dts/mpc8308_p1m.dts b/arch/powerpc/boot/dts/mpc8308_p1m.dts
index 57f86cdf9f36..cab933b3957a 100644
--- a/arch/powerpc/boot/dts/mpc8308_p1m.dts
+++ b/arch/powerpc/boot/dts/mpc8308_p1m.dts
@@ -123,7 +123,7 @@
interrupt-parent = <&ipic>;
dfsrr;
fram@50 {
- compatible = "ramtron,24c64";
+ compatible = "ramtron,24c64", "atmel,24c64";
reg = <0x50>;
};
};
diff --git a/arch/powerpc/boot/dts/mpc8349emitx.dts b/arch/powerpc/boot/dts/mpc8349emitx.dts
index 90aed3ac2f69..648a85858eb5 100644
--- a/arch/powerpc/boot/dts/mpc8349emitx.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitx.dts
@@ -92,7 +92,7 @@
dfsrr;
eeprom: at24@50 {
- compatible = "st,24c256";
+ compatible = "st,24c256", "atmel,24c256";
reg = <0x50>;
};
@@ -130,7 +130,7 @@
};
spd: at24@51 {
- compatible = "at24,spd";
+ compatible = "atmel,spd";
reg = <0x51>;
};
diff --git a/arch/powerpc/boot/dts/mpc8377_rdb.dts b/arch/powerpc/boot/dts/mpc8377_rdb.dts
index e32613963ab0..5e85d8c93bca 100644
--- a/arch/powerpc/boot/dts/mpc8377_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8377_rdb.dts
@@ -150,7 +150,7 @@
};
at24@50 {
- compatible = "at24,24c256";
+ compatible = "atmel,24c256";
reg = <0x50>;
};
diff --git a/arch/powerpc/boot/dts/mpc8377_wlan.dts b/arch/powerpc/boot/dts/mpc8377_wlan.dts
index c0c790168b96..fee15fcbb46f 100644
--- a/arch/powerpc/boot/dts/mpc8377_wlan.dts
+++ b/arch/powerpc/boot/dts/mpc8377_wlan.dts
@@ -135,7 +135,7 @@
dfsrr;
at24@50 {
- compatible = "at24,24c256";
+ compatible = "atmel,24c256";
reg = <0x50>;
};
diff --git a/arch/powerpc/boot/dts/mpc8378_rdb.dts b/arch/powerpc/boot/dts/mpc8378_rdb.dts
index 71842fcd621f..e973d61956b9 100644
--- a/arch/powerpc/boot/dts/mpc8378_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8378_rdb.dts
@@ -150,7 +150,7 @@
};
at24@50 {
- compatible = "at24,24c256";
+ compatible = "atmel,24c256";
reg = <0x50>;
};
diff --git a/arch/powerpc/boot/dts/mpc8379_rdb.dts b/arch/powerpc/boot/dts/mpc8379_rdb.dts
index e442a29b2fe0..ed5d12ff2ee0 100644
--- a/arch/powerpc/boot/dts/mpc8379_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8379_rdb.dts
@@ -148,7 +148,7 @@
};
at24@50 {
- compatible = "at24,24c256";
+ compatible = "atmel,24c256";
reg = <0x50>;
};
diff --git a/arch/powerpc/boot/dts/pcm030.dts b/arch/powerpc/boot/dts/pcm030.dts
index 192e66af0001..836e47cc4bed 100644
--- a/arch/powerpc/boot/dts/pcm030.dts
+++ b/arch/powerpc/boot/dts/pcm030.dts
@@ -71,7 +71,7 @@
reg = <0x51>;
};
eeprom@52 {
- compatible = "catalyst,24c32";
+ compatible = "catalyst,24c32", "atmel,24c32";
reg = <0x52>;
pagesize = <32>;
};
diff --git a/arch/powerpc/boot/dts/pcm032.dts b/arch/powerpc/boot/dts/pcm032.dts
index 96b139bf50e9..576249bf2fb9 100644
--- a/arch/powerpc/boot/dts/pcm032.dts
+++ b/arch/powerpc/boot/dts/pcm032.dts
@@ -75,7 +75,7 @@
reg = <0x51>;
};
eeprom@52 {
- compatible = "catalyst,24c32";
+ compatible = "catalyst,24c32", "atmel,24c32";
reg = <0x52>;
pagesize = <32>;
};
diff --git a/arch/powerpc/boot/dts/pdm360ng.dts b/arch/powerpc/boot/dts/pdm360ng.dts
index 0cec7244abe7..445b88114009 100644
--- a/arch/powerpc/boot/dts/pdm360ng.dts
+++ b/arch/powerpc/boot/dts/pdm360ng.dts
@@ -13,7 +13,7 @@
* option) any later version.
*/
-#include <mpc5121.dtsi>
+#include "mpc5121.dtsi"
/ {
model = "pdm360ng";
diff --git a/arch/powerpc/boot/dts/sequoia.dts b/arch/powerpc/boot/dts/sequoia.dts
index b1d329246b08..e41b88a5eaee 100644
--- a/arch/powerpc/boot/dts/sequoia.dts
+++ b/arch/powerpc/boot/dts/sequoia.dts
@@ -229,7 +229,7 @@
};
partition@84000 {
label = "user";
- reg = <0x00000000 0x01f7c000>;
+ reg = <0x00084000 0x01f7c000>;
};
};
};
diff --git a/arch/powerpc/boot/dts/warp.dts b/arch/powerpc/boot/dts/warp.dts
index e576ee85c42f..ea9053ef4819 100644
--- a/arch/powerpc/boot/dts/warp.dts
+++ b/arch/powerpc/boot/dts/warp.dts
@@ -238,7 +238,7 @@
/* This will create 52 and 53 */
at24@52 {
- compatible = "at,24c04";
+ compatible = "atmel,24c04";
reg = <0x52>;
};
};
diff --git a/arch/powerpc/boot/ppc_asm.h b/arch/powerpc/boot/ppc_asm.h
index b03373d8b386..68e388ee94fe 100644
--- a/arch/powerpc/boot/ppc_asm.h
+++ b/arch/powerpc/boot/ppc_asm.h
@@ -67,13 +67,15 @@
#define MSR_LE 0x0000000000000001
#define FIXUP_ENDIAN \
- tdi 0, 0, 0x48; /* Reverse endian of b . + 8 */ \
- b $+36; /* Skip trampoline if endian is good */ \
- .long 0x05009f42; /* bcl 20,31,$+4 */ \
- .long 0xa602487d; /* mflr r10 */ \
- .long 0x1c004a39; /* addi r10,r10,28 */ \
+ tdi 0,0,0x48; /* Reverse endian of b . + 8 */ \
+ b $+44; /* Skip trampoline if endian is good */ \
.long 0xa600607d; /* mfmsr r11 */ \
.long 0x01006b69; /* xori r11,r11,1 */ \
+ .long 0x00004039; /* li r10,0 */ \
+ .long 0x6401417d; /* mtmsrd r10,1 */ \
+ .long 0x05009f42; /* bcl 20,31,$+4 */ \
+ .long 0xa602487d; /* mflr r10 */ \
+ .long 0x14004a39; /* addi r10,r10,20 */ \
.long 0xa6035a7d; /* mtsrr0 r10 */ \
.long 0xa6037b7d; /* mtsrr1 r11 */ \
.long 0x2400004c /* rfid */
diff --git a/arch/powerpc/configs/44x/fsp2_defconfig b/arch/powerpc/configs/44x/fsp2_defconfig
new file mode 100644
index 000000000000..e8e6a6999852
--- /dev/null
+++ b/arch/powerpc/configs/44x/fsp2_defconfig
@@ -0,0 +1,126 @@
+CONFIG_44x=y
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+# CONFIG_FHANDLE is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PPC_47x=y
+# CONFIG_EBONY is not set
+CONFIG_FSP2=y
+CONFIG_476FPE_ERR46=y
+CONFIG_SWIOTLB=y
+CONFIG_KEXEC=y
+CONFIG_CRASH_DUMP=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="ip=on rw"
+# CONFIG_SUSPEND is not set
+# CONFIG_PCI is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_IPV6 is not set
+CONFIG_VLAN_8021Q=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=35000
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+# CONFIG_SATA_PMP is not set
+# CONFIG_ATA_SFF is not set
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_IBM_EMAC=m
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_RUNTIME_UARTS=32
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_IBM_IIC=y
+CONFIG_PTP_1588_CLOCK=y
+# CONFIG_HWMON is not set
+CONFIG_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_BOOKE_WDT=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_MMC=y
+CONFIG_MMC_DEBUG=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_M41T80=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_WBUF_VERIFY=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_DEFAULT="n"
+CONFIG_XZ_DEC=y
+CONFIG_PRINTK_TIME=y
+CONFIG_MESSAGE_LOGLEVEL_DEFAULT=3
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_PCBC=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index c0deafc212b8..25d42bd3f114 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -74,6 +74,11 @@ do { \
___p1; \
})
+/*
+ * This must resolve to hwsync on SMP for the context switch path.
+ * See _switch, and core scheduler context switch memory ordering
+ * comments.
+ */
#define smp_mb__before_spinlock() smp_mb()
#include <asm-generic/barrier.h>
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 33a24fdd7958..b750ffef83c7 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -206,68 +206,13 @@ static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr)
* Return the zero-based bit position (LE, not IBM bit numbering) of
* the most significant 1-bit in a double word.
*/
-static __inline__ __attribute__((const))
-int __ilog2(unsigned long x)
-{
- int lz;
+#define __ilog2(x) ilog2(x)
- asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (x));
- return BITS_PER_LONG - 1 - lz;
-}
+#include <asm-generic/bitops/ffz.h>
-static inline __attribute__((const))
-int __ilog2_u32(u32 n)
-{
- int bit;
- asm ("cntlzw %0,%1" : "=r" (bit) : "r" (n));
- return 31 - bit;
-}
+#include <asm-generic/bitops/builtin-__ffs.h>
-#ifdef __powerpc64__
-static inline __attribute__((const))
-int __ilog2_u64(u64 n)
-{
- int bit;
- asm ("cntlzd %0,%1" : "=r" (bit) : "r" (n));
- return 63 - bit;
-}
-#endif
-
-/*
- * Determines the bit position of the least significant 0 bit in the
- * specified double word. The returned bit position will be
- * zero-based, starting from the right side (63/31 - 0).
- */
-static __inline__ unsigned long ffz(unsigned long x)
-{
- /* no zero exists anywhere in the 8 byte area. */
- if ((x = ~x) == 0)
- return BITS_PER_LONG;
-
- /*
- * Calculate the bit position of the least significant '1' bit in x
- * (since x has been changed this will actually be the least significant
- * '0' bit in * the original x). Note: (x & -x) gives us a mask that
- * is the least significant * (RIGHT-most) 1-bit of the value in x.
- */
- return __ilog2(x & -x);
-}
-
-static __inline__ unsigned long __ffs(unsigned long x)
-{
- return __ilog2(x & -x);
-}
-
-/*
- * ffs: find first bit set. This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
- */
-static __inline__ int ffs(int x)
-{
- unsigned long i = (unsigned long)x;
- return __ilog2(i & -i) + 1;
-}
+#include <asm-generic/bitops/builtin-ffs.h>
/*
* fls: find last (most-significant) bit set.
@@ -275,33 +220,15 @@ static __inline__ int ffs(int x)
*/
static __inline__ int fls(unsigned int x)
{
- int lz;
-
- asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
- return 32 - lz;
+ return 32 - __builtin_clz(x);
}
-static __inline__ unsigned long __fls(unsigned long x)
-{
- return __ilog2(x);
-}
+#include <asm-generic/bitops/builtin-__fls.h>
-/*
- * 64-bit can do this using one cntlzd (count leading zeroes doubleword)
- * instruction; for 32-bit we use the generic version, which does two
- * 32-bit fls calls.
- */
-#ifdef __powerpc64__
static __inline__ int fls64(__u64 x)
{
- int lz;
-
- asm ("cntlzd %0,%1" : "=r" (lz) : "r" (x));
- return 64 - lz;
+ return 64 - __builtin_clzll(x);
}
-#else
-#include <asm-generic/bitops/fls64.h>
-#endif /* __powerpc64__ */
#ifdef CONFIG_PPC64
unsigned int __arch_hweight8(unsigned int w);
diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h
index d310546e5d9d..a120e7f8d535 100644
--- a/arch/powerpc/include/asm/book3s/32/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h
@@ -31,7 +31,8 @@ extern struct kmem_cache *pgtable_cache[];
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
+ return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
+ pgtable_gfp_flags(mm, GFP_KERNEL));
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 26ed228d4dc6..7fb755880409 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -297,6 +297,8 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
pmd_t **pmdp);
+int map_kernel_page(unsigned long va, phys_addr_t pa, int flags);
+
/* Generic accessors to PTE bits */
static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);}
static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); }
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index 4e957b027fe0..0ce513f2926f 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -89,6 +89,9 @@ static inline int hash__pgd_bad(pgd_t pgd)
{
return (pgd_val(pgd) == 0);
}
+#ifdef CONFIG_STRICT_KERNEL_RWX
+extern void hash__mark_rodata_ro(void);
+#endif
extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long pte, int huge);
diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
index 6666cd366596..5c28bd6f2ae1 100644
--- a/arch/powerpc/include/asm/book3s/64/hugetlb.h
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
@@ -50,4 +50,14 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
else
return entry;
}
+
+#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
+static inline bool gigantic_page_supported(void)
+{
+ if (radix_enabled())
+ return true;
+ return false;
+}
+#endif
+
#endif
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index cd5e7aa8cc34..20b1485ff1e8 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -53,10 +53,11 @@ extern void __tlb_remove_table(void *_table);
static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
{
#ifdef CONFIG_PPC_64K_PAGES
- return (pgd_t *)__get_free_page(PGALLOC_GFP);
+ return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP));
#else
struct page *page;
- page = alloc_pages(PGALLOC_GFP | __GFP_REPEAT, 4);
+ page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_REPEAT),
+ 4);
if (!page)
return NULL;
return (pgd_t *) page_address(page);
@@ -76,7 +77,8 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
if (radix_enabled())
return radix__pgd_alloc(mm);
- return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
+ return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
+ pgtable_gfp_flags(mm, GFP_KERNEL));
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
@@ -93,7 +95,8 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
+ return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
+ pgtable_gfp_flags(mm, GFP_KERNEL));
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -119,7 +122,8 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
+ return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
+ pgtable_gfp_flags(mm, GFP_KERNEL));
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
@@ -168,7 +172,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
struct page *page;
pte_t *pte;
- pte = pte_alloc_one_kernel(mm, address);
+ pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT);
if (!pte)
return NULL;
page = virt_to_page(pte);
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 85bc9875c3be..c0737c86a362 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -5,6 +5,7 @@
#ifndef __ASSEMBLY__
#include <linux/mmdebug.h>
+#include <linux/bug.h>
#endif
/*
@@ -79,6 +80,9 @@
#define _PAGE_SOFT_DIRTY _RPAGE_SW3 /* software: software dirty tracking */
#define _PAGE_SPECIAL _RPAGE_SW2 /* software: special page */
+#define _PAGE_DEVMAP _RPAGE_SW1 /* software: ZONE_DEVICE page */
+#define __HAVE_ARCH_PTE_DEVMAP
+
/*
* Drivers request for cache inhibited pte mapping using _PAGE_NO_CACHE
* Instead of fixing all of them, add an alternate define which
@@ -599,6 +603,16 @@ static inline pte_t pte_mkhuge(pte_t pte)
return pte;
}
+static inline pte_t pte_mkdevmap(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_SPECIAL|_PAGE_DEVMAP);
+}
+
+static inline int pte_devmap(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DEVMAP));
+}
+
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
/* FIXME!! check whether this need to be a conditional */
@@ -1146,6 +1160,37 @@ static inline bool arch_needs_pgtable_deposit(void)
return true;
}
+
+static inline pmd_t pmd_mkdevmap(pmd_t pmd)
+{
+ return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
+}
+
+static inline int pmd_devmap(pmd_t pmd)
+{
+ return pte_devmap(pmd_pte(pmd));
+}
+
+static inline int pud_devmap(pud_t pud)
+{
+ return 0;
+}
+
+static inline int pgd_devmap(pgd_t pgd)
+{
+ return 0;
+}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+static inline const int pud_pfn(pud_t pud)
+{
+ /*
+ * Currently all calls to pud_pfn() are gated around a pud_devmap()
+ * check so this should never be used. If it grows another user we
+ * want to know about it.
+ */
+ BUILD_BUG();
+ return 0;
+}
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index ac16d1943022..487709ff6875 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -116,6 +116,10 @@
#define RADIX_PUD_TABLE_SIZE (sizeof(pud_t) << RADIX_PUD_INDEX_SIZE)
#define RADIX_PGD_TABLE_SIZE (sizeof(pgd_t) << RADIX_PGD_INDEX_SIZE)
+#ifdef CONFIG_STRICT_KERNEL_RWX
+extern void radix__mark_rodata_ro(void);
+#endif
+
static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr,
unsigned long set)
{
@@ -252,7 +256,7 @@ static inline int radix__pgd_bad(pgd_t pgd)
static inline int radix__pmd_trans_huge(pmd_t pmd)
{
- return !!(pmd_val(pmd) & _PAGE_PTE);
+ return (pmd_val(pmd) & (_PAGE_PTE | _PAGE_DEVMAP)) == _PAGE_PTE;
}
static inline pmd_t radix__pmd_mkhuge(pmd_t pmd)
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index abef812de7f8..5482928eea1b 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -83,8 +83,16 @@ static inline unsigned long ppc_function_entry(void *func)
* On PPC64 ABIv1 the function pointer actually points to the
* function's descriptor. The first entry in the descriptor is the
* address of the function text.
+ *
+ * However, we may also receive pointer to an assembly symbol. To
+ * detect that, we first check if the function pointer we receive
+ * already points to kernel/module text and we only dereference it
+ * if it doesn't.
*/
- return ((func_descr_t *)func)->entry;
+ if (kernel_text_address((unsigned long)func))
+ return (unsigned long)func;
+ else
+ return ((func_descr_t *)func)->entry;
#else
return (unsigned long)func;
#endif
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index 4f2df589ec1d..f256e1d14a14 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -109,7 +109,6 @@ struct compat_statfs {
int f_spare[4];
};
-#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
#define COMPAT_RLIM_INFINITY 0xffffffff
typedef u32 compat_old_sigset_t;
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
index f70cbfe0ec04..9f2ae0d25e15 100644
--- a/arch/powerpc/include/asm/dbell.h
+++ b/arch/powerpc/include/asm/dbell.h
@@ -56,6 +56,19 @@ static inline void ppc_msgsync(void)
: : "i" (CPU_FTR_HVMODE|CPU_FTR_ARCH_300));
}
+static inline void _ppc_msgclr(u32 msg)
+{
+ __asm__ __volatile__ (ASM_FTR_IFSET(PPC_MSGCLR(%1), PPC_MSGCLRP(%1), %0)
+ : : "i" (CPU_FTR_HVMODE), "r" (msg));
+}
+
+static inline void ppc_msgclr(enum ppc_dbell type)
+{
+ u32 msg = PPC_DBELL_TYPE(type);
+
+ _ppc_msgclr(msg);
+}
+
#else /* CONFIG_PPC_BOOK3S */
#define PPC_DBELL_MSGTYPE PPC_DBELL
diff --git a/arch/powerpc/include/asm/delay.h b/arch/powerpc/include/asm/delay.h
index 52e4d54da2a9..3df4417dd9c8 100644
--- a/arch/powerpc/include/asm/delay.h
+++ b/arch/powerpc/include/asm/delay.h
@@ -2,6 +2,7 @@
#define _ASM_POWERPC_DELAY_H
#ifdef __KERNEL__
+#include <linux/processor.h>
#include <asm/time.h>
/*
@@ -58,11 +59,18 @@ extern void udelay(unsigned long usecs);
typeof(condition) __ret; \
unsigned long __loops = tb_ticks_per_usec * timeout; \
unsigned long __start = get_tbl(); \
- while (!(__ret = (condition)) && (tb_ticks_since(__start) <= __loops)) \
- if (delay) \
+ \
+ if (delay) { \
+ while (!(__ret = (condition)) && \
+ (tb_ticks_since(__start) <= __loops)) \
udelay(delay); \
- else \
- cpu_relax(); \
+ } else { \
+ spin_begin(); \
+ while (!(__ret = (condition)) && \
+ (tb_ticks_since(__start) <= __loops)) \
+ spin_cpu_relax(); \
+ spin_end(); \
+ } \
if (!__ret) \
__ret = (condition); \
__ret; \
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 181a095468e4..eaece3d3e225 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -17,10 +17,6 @@
#include <asm/io.h>
#include <asm/swiotlb.h>
-#ifdef CONFIG_PPC64
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-#endif
-
/* Some dma direct funcs must be visible for use in other dma_ops */
extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
@@ -116,7 +112,6 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
#define HAVE_ARCH_DMA_SET_MASK 1
extern int dma_set_mask(struct device *dev, u64 dma_mask);
-extern int __dma_set_mask(struct device *dev, u64 dma_mask);
extern u64 __dma_get_required_mask(struct device *dev);
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 183d73b6ed99..9a318973af05 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -36,20 +36,38 @@
*/
#include <asm/head-64.h>
+/* PACA save area offsets (exgen, exmc, etc) */
#define EX_R9 0
#define EX_R10 8
#define EX_R11 16
#define EX_R12 24
#define EX_R13 32
-#define EX_SRR0 40
-#define EX_DAR 48
-#define EX_DSISR 56
-#define EX_CCR 60
-#define EX_R3 64
-#define EX_LR 72
-#define EX_CFAR 80
-#define EX_PPR 88 /* SMT thread status register (priority) */
-#define EX_CTR 96
+#define EX_DAR 40
+#define EX_DSISR 48
+#define EX_CCR 52
+#define EX_CFAR 56
+#define EX_PPR 64
+#if defined(CONFIG_RELOCATABLE)
+#define EX_CTR 72
+#define EX_SIZE 10 /* size in u64 units */
+#else
+#define EX_SIZE 9 /* size in u64 units */
+#endif
+
+/*
+ * EX_LR is only used in EXSLB and where it does not overlap with EX_DAR
+ * EX_CCR similarly with DSISR, but being 4 byte registers there is a hole
+ * in the save area so it's not necessary to overlap them. Could be used
+ * for future savings though if another 4 byte register was to be saved.
+ */
+#define EX_LR EX_DAR
+
+/*
+ * EX_R3 is only used by the bad_stack handler. bad_stack reloads and
+ * saves DAR from SPRN_DAR, and EX_DAR is not used. So EX_R3 can overlap
+ * with EX_DAR.
+ */
+#define EX_R3 EX_DAR
#ifdef CONFIG_RELOCATABLE
#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
@@ -236,6 +254,19 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
#define kvmppc_interrupt kvmppc_interrupt_pr
#endif
+/*
+ * Branch to label using its 0xC000 address. This results in instruction
+ * address suitable for MSR[IR]=0 or 1, which allows relocation to be turned
+ * on using mtmsr rather than rfid.
+ *
+ * This could set the 0xc bits for !RELOCATABLE as an immediate, rather than
+ * load KBASE for a slight optimisation.
+ */
+#define BRANCH_TO_C000(reg, label) \
+ __LOAD_HANDLER(reg, label); \
+ mtctr reg; \
+ bctr
+
#ifdef CONFIG_RELOCATABLE
#define BRANCH_TO_COMMON(reg, label) \
__LOAD_HANDLER(reg, label); \
diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
index 60b91084f33c..ce88bbe1d809 100644
--- a/arch/powerpc/include/asm/fadump.h
+++ b/arch/powerpc/include/asm/fadump.h
@@ -43,6 +43,9 @@
#define MIN_BOOT_MEM (((RMA_END < (0x1UL << 28)) ? (0x1UL << 28) : RMA_END) \
+ (0x1UL << 26))
+/* The upper limit percentage for user specified boot memory size (25%) */
+#define MAX_BOOT_MEM_RATIO 4
+
#define memblock_num_regions(memblock_type) (memblock.memblock_type.cnt)
/* Firmware provided dump sections */
@@ -200,6 +203,7 @@ struct fad_crash_memory_ranges {
unsigned long long size;
};
+extern int is_fadump_boot_memory_area(u64 addr, ulong size);
extern int early_init_dt_scan_fw_dump(unsigned long node,
const char *uname, int depth, void *data);
extern int fadump_reserve_mem(void);
diff --git a/arch/powerpc/include/asm/head-64.h b/arch/powerpc/include/asm/head-64.h
index 86eb87382031..d81eac5b509f 100644
--- a/arch/powerpc/include/asm/head-64.h
+++ b/arch/powerpc/include/asm/head-64.h
@@ -3,6 +3,7 @@
#include <asm/cache.h>
+#ifdef __ASSEMBLY__
/*
* We can't do CPP stringification and concatination directly into the section
* name for some reason, so these macros can do it for us.
@@ -49,8 +50,8 @@
* CLOSE_FIXED_SECTION() or elsewhere, there may be something
* unexpected being added there. Remove the '. = x_len' line, rebuild, and
* check what is pushing the section down.
- * - If the build dies in linking, check arch/powerpc/kernel/vmlinux.lds.S
- * for instructions.
+ * - If the build dies in linking, check arch/powerpc/tools/head_check.sh
+ * comments.
* - If the kernel crashes or hangs in very early boot, it could be linker
* stubs at the start of the main text.
*/
@@ -63,11 +64,29 @@
. = 0x0; \
start_##sname:
+/*
+ * .linker_stub_catch section is used to catch linker stubs from being
+ * inserted in our .text section, above the start_text label (which breaks
+ * the ABS_ADDR calculation). See kernel/vmlinux.lds.S and tools/head_check.sh
+ * for more details. We would prefer to just keep a cacheline (0x80), but
+ * 0x100 seems to be how the linker aligns branch stub groups.
+ */
+#ifdef CONFIG_LD_HEAD_STUB_CATCH
+#define OPEN_TEXT_SECTION(start) \
+ .section ".linker_stub_catch","ax",@progbits; \
+linker_stub_catch: \
+ . = 0x4; \
+ text_start = (start) + 0x100; \
+ .section ".text","ax",@progbits; \
+ .balign 0x100; \
+start_text:
+#else
#define OPEN_TEXT_SECTION(start) \
text_start = (start); \
.section ".text","ax",@progbits; \
. = 0x0; \
start_text:
+#endif
#define ZERO_FIXED_SECTION(sname, start, end) \
sname##_start = (start); \
@@ -397,4 +416,6 @@ name:
EXC_COMMON_BEGIN(name); \
STD_EXCEPTION_COMMON(realvec + 0x2, name, hdlr); \
+#endif /* __ASSEMBLY__ */
+
#endif /* _ASM_POWERPC_HEAD_64_H */
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index eba60416536e..c1dd1929342d 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -129,6 +129,10 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
}
extern bool prep_irq_for_idle(void);
+extern bool prep_irq_for_idle_irqsoff(void);
+extern void irq_set_pending_from_srr1(unsigned long srr1);
+
+#define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
extern void force_external_irq_replay(void);
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 8a8ce220d7d0..20febe0b7f32 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -139,6 +139,8 @@ struct scatterlist;
#ifdef CONFIG_PPC64
+#define IOMMU_MAPPING_ERROR (~(dma_addr_t)0x0)
+
static inline void set_iommu_table_base(struct device *dev,
struct iommu_table *base)
{
@@ -238,6 +240,8 @@ static inline int __init tce_iommu_bus_notifier_init(void)
}
#endif /* !CONFIG_IOMMU_API */
+int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr);
+
#else
static inline void *get_iommu_table_base(struct device *dev)
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 2bf35017ffc0..b8d5b8e35244 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -86,7 +86,6 @@ struct kvmppc_vcore {
u16 last_cpu;
u8 vcore_state;
u8 in_guest;
- struct kvmppc_vcore *master_vcore;
struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
struct list_head preempt_list;
spinlock_t lock;
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index b148496ffe36..7cea76f11c26 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -81,7 +81,7 @@ struct kvm_split_mode {
u8 subcore_size;
u8 do_nap;
u8 napped[MAX_SMT_THREADS];
- struct kvmppc_vcore *master_vcs[MAX_SUBCORES];
+ struct kvmppc_vcore *vc[MAX_SUBCORES];
};
/*
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 9c51ac4b8f36..8b3f1238d07f 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -35,6 +35,7 @@
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/hvcall.h>
+#include <asm/mce.h>
#define KVM_MAX_VCPUS NR_CPUS
#define KVM_MAX_VCORES NR_CPUS
@@ -52,8 +53,8 @@
#define KVM_IRQCHIP_NUM_PINS 256
/* PPC-specific vcpu->requests bit members */
-#define KVM_REQ_WATCHDOG 8
-#define KVM_REQ_EPR_EXIT 9
+#define KVM_REQ_WATCHDOG KVM_ARCH_REQ(0)
+#define KVM_REQ_EPR_EXIT KVM_ARCH_REQ(1)
#include <linux/mmu_notifier.h>
@@ -267,6 +268,8 @@ struct kvm_resize_hpt;
struct kvm_arch {
unsigned int lpid;
+ unsigned int smt_mode; /* # vcpus per virtual core */
+ unsigned int emul_smt_mode; /* emualted SMT mode, on P9 */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
unsigned int tlb_sets;
struct kvm_hpt_info hpt;
@@ -285,6 +288,7 @@ struct kvm_arch {
cpumask_t need_tlb_flush;
cpumask_t cpu_in_guest;
u8 radix;
+ u8 fwnmi_enabled;
pgd_t *pgtable;
u64 process_table;
struct dentry *debugfs_dir;
@@ -566,6 +570,7 @@ struct kvm_vcpu_arch {
ulong wort;
ulong tid;
ulong psscr;
+ ulong hfscr;
ulong shadow_srr1;
#endif
u32 vrsave; /* also USPRG0 */
@@ -579,7 +584,7 @@ struct kvm_vcpu_arch {
ulong mcsrr0;
ulong mcsrr1;
ulong mcsr;
- u32 dec;
+ ulong dec;
#ifdef CONFIG_BOOKE
u32 decar;
#endif
@@ -710,6 +715,7 @@ struct kvm_vcpu_arch {
unsigned long pending_exceptions;
u8 ceded;
u8 prodded;
+ u8 doorbell_request;
u32 last_inst;
struct swait_queue_head *wqp;
@@ -722,6 +728,7 @@ struct kvm_vcpu_arch {
int prev_cpu;
bool timer_running;
wait_queue_head_t cpu_run;
+ struct machine_check_event mce_evt; /* Valid if trap == 0x200 */
struct kvm_vcpu_arch_shared *shared;
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index e0d88c38602b..ba5fadd6f3c9 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -315,6 +315,8 @@ struct kvmppc_ops {
struct irq_bypass_producer *);
int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
+ int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
+ unsigned long flags);
};
extern struct kvmppc_ops *kvmppc_hv_ops;
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index f90b22c722e1..cd2fc1cc1cc7 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -226,6 +226,7 @@ struct machdep_calls {
extern void e500_idle(void);
extern void power4_idle(void);
extern void power7_idle(void);
+extern void power9_idle(void);
extern void ppc6xx_idle(void);
extern void book3e_idle(void);
diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h
index 81eff8631434..190d69a7f701 100644
--- a/arch/powerpc/include/asm/mce.h
+++ b/arch/powerpc/include/asm/mce.h
@@ -90,13 +90,14 @@ enum MCE_UserErrorType {
enum MCE_RaErrorType {
MCE_RA_ERROR_INDETERMINATE = 0,
MCE_RA_ERROR_IFETCH = 1,
- MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
- MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN = 3,
- MCE_RA_ERROR_LOAD = 4,
- MCE_RA_ERROR_STORE = 5,
- MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 6,
- MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN = 7,
- MCE_RA_ERROR_LOAD_STORE_FOREIGN = 8,
+ MCE_RA_ERROR_IFETCH_FOREIGN = 2,
+ MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH = 3,
+ MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN = 4,
+ MCE_RA_ERROR_LOAD = 5,
+ MCE_RA_ERROR_STORE = 6,
+ MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 7,
+ MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN = 8,
+ MCE_RA_ERROR_LOAD_STORE_FOREIGN = 9,
};
enum MCE_LinkErrorType {
diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h
index 633139291a48..cc369a70f2bb 100644
--- a/arch/powerpc/include/asm/nohash/32/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h
@@ -31,7 +31,8 @@ extern struct kmem_cache *pgtable_cache[];
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
+ return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
+ pgtable_gfp_flags(mm, GFP_KERNEL));
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 5134ade2e850..91314268f04f 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -340,6 +340,8 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
pmd_t **pmdp);
+int map_kernel_page(unsigned long va, phys_addr_t pa, int flags);
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */
diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h
index 897d2e1c8a9b..9721c7867b9c 100644
--- a/arch/powerpc/include/asm/nohash/64/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h
@@ -43,7 +43,8 @@ extern struct kmem_cache *pgtable_cache[];
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
- return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
+ return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
+ pgtable_gfp_flags(mm, GFP_KERNEL));
}
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
@@ -57,7 +58,8 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
+ return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
+ pgtable_gfp_flags(mm, GFP_KERNEL));
}
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -96,7 +98,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
struct page *page;
pte_t *pte;
- pte = pte_alloc_one_kernel(mm, address);
+ pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT);
if (!pte)
return NULL;
page = virt_to_page(pte);
@@ -189,7 +191,8 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
+ return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
+ pgtable_gfp_flags(mm, GFP_KERNEL));
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
index cb3e6242a78c..ef930ba500f9 100644
--- a/arch/powerpc/include/asm/opal-api.h
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -667,12 +667,14 @@ enum {
enum {
OPAL_PHB_ERROR_DATA_TYPE_P7IOC = 1,
- OPAL_PHB_ERROR_DATA_TYPE_PHB3 = 2
+ OPAL_PHB_ERROR_DATA_TYPE_PHB3 = 2,
+ OPAL_PHB_ERROR_DATA_TYPE_PHB4 = 3
};
enum {
OPAL_P7IOC_NUM_PEST_REGS = 128,
- OPAL_PHB3_NUM_PEST_REGS = 256
+ OPAL_PHB3_NUM_PEST_REGS = 256,
+ OPAL_PHB4_NUM_PEST_REGS = 512
};
struct OpalIoPhbErrorCommon {
@@ -802,6 +804,75 @@ struct OpalIoPhb3ErrorData {
__be64 pestB[OPAL_PHB3_NUM_PEST_REGS];
};
+struct OpalIoPhb4ErrorData {
+ struct OpalIoPhbErrorCommon common;
+
+ __be32 brdgCtl;
+
+ /* PHB4 cfg regs */
+ __be32 deviceStatus;
+ __be32 slotStatus;
+ __be32 linkStatus;
+ __be32 devCmdStatus;
+ __be32 devSecStatus;
+
+ /* cfg AER regs */
+ __be32 rootErrorStatus;
+ __be32 uncorrErrorStatus;
+ __be32 corrErrorStatus;
+ __be32 tlpHdr1;
+ __be32 tlpHdr2;
+ __be32 tlpHdr3;
+ __be32 tlpHdr4;
+ __be32 sourceId;
+
+ /* PHB4 ETU Error Regs */
+ __be64 nFir; /* 000 */
+ __be64 nFirMask; /* 003 */
+ __be64 nFirWOF; /* 008 */
+ __be64 phbPlssr; /* 120 */
+ __be64 phbCsr; /* 110 */
+ __be64 lemFir; /* C00 */
+ __be64 lemErrorMask; /* C18 */
+ __be64 lemWOF; /* C40 */
+ __be64 phbErrorStatus; /* C80 */
+ __be64 phbFirstErrorStatus; /* C88 */
+ __be64 phbErrorLog0; /* CC0 */
+ __be64 phbErrorLog1; /* CC8 */
+ __be64 phbTxeErrorStatus; /* D00 */
+ __be64 phbTxeFirstErrorStatus; /* D08 */
+ __be64 phbTxeErrorLog0; /* D40 */
+ __be64 phbTxeErrorLog1; /* D48 */
+ __be64 phbRxeArbErrorStatus; /* D80 */
+ __be64 phbRxeArbFirstErrorStatus; /* D88 */
+ __be64 phbRxeArbErrorLog0; /* DC0 */
+ __be64 phbRxeArbErrorLog1; /* DC8 */
+ __be64 phbRxeMrgErrorStatus; /* E00 */
+ __be64 phbRxeMrgFirstErrorStatus; /* E08 */
+ __be64 phbRxeMrgErrorLog0; /* E40 */
+ __be64 phbRxeMrgErrorLog1; /* E48 */
+ __be64 phbRxeTceErrorStatus; /* E80 */
+ __be64 phbRxeTceFirstErrorStatus; /* E88 */
+ __be64 phbRxeTceErrorLog0; /* EC0 */
+ __be64 phbRxeTceErrorLog1; /* EC8 */
+
+ /* PHB4 REGB Error Regs */
+ __be64 phbPblErrorStatus; /* 1900 */
+ __be64 phbPblFirstErrorStatus; /* 1908 */
+ __be64 phbPblErrorLog0; /* 1940 */
+ __be64 phbPblErrorLog1; /* 1948 */
+ __be64 phbPcieDlpErrorLog1; /* 1AA0 */
+ __be64 phbPcieDlpErrorLog2; /* 1AA8 */
+ __be64 phbPcieDlpErrorStatus; /* 1AB0 */
+ __be64 phbRegbErrorStatus; /* 1C00 */
+ __be64 phbRegbFirstErrorStatus; /* 1C08 */
+ __be64 phbRegbErrorLog0; /* 1C40 */
+ __be64 phbRegbErrorLog1; /* 1C48 */
+
+ __be64 pestA[OPAL_PHB4_NUM_PEST_REGS];
+ __be64 pestB[OPAL_PHB4_NUM_PEST_REGS];
+};
+
enum {
OPAL_REINIT_CPUS_HILE_BE = (1 << 0),
OPAL_REINIT_CPUS_HILE_LE = (1 << 1),
@@ -877,6 +948,7 @@ enum {
OPAL_PHB_CAPI_MODE_SNOOP_OFF = 2,
OPAL_PHB_CAPI_MODE_SNOOP_ON = 3,
OPAL_PHB_CAPI_MODE_DMA = 4,
+ OPAL_PHB_CAPI_MODE_DMA_TVT1 = 5,
};
/* OPAL I2C request */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 1c09f8fe2ee8..dc88a31cc79a 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -21,7 +21,11 @@
#include <asm/lppaca.h>
#include <asm/mmu.h>
#include <asm/page.h>
+#ifdef CONFIG_PPC_BOOK3E
#include <asm/exception-64e.h>
+#else
+#include <asm/exception-64s.h>
+#endif
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
#include <asm/kvm_book3s_asm.h>
#endif
@@ -98,8 +102,8 @@ struct paca_struct {
* Now, starting in cacheline 2, the exception save areas
*/
/* used for most interrupts/exceptions */
- u64 exgen[13] __attribute__((aligned(0x80)));
- u64 exslb[13]; /* used for SLB/segment table misses
+ u64 exgen[EX_SIZE] __attribute__((aligned(0x80)));
+ u64 exslb[EX_SIZE]; /* used for SLB/segment table misses
* on the linear mapping */
/* SLB related definitions */
u16 vmalloc_sllp;
@@ -177,12 +181,14 @@ struct paca_struct {
* to the sibling threads' paca.
*/
struct paca_struct **thread_sibling_pacas;
+ /* The PSSCR value that the kernel requested before going to stop */
+ u64 requested_psscr;
#endif
#ifdef CONFIG_PPC_STD_MMU_64
/* Non-maskable exceptions that are not performance critical */
- u64 exnmi[13]; /* used for system reset (nmi) */
- u64 exmc[13]; /* used for machine checks */
+ u64 exnmi[EX_SIZE]; /* used for system reset (nmi) */
+ u64 exmc[EX_SIZE]; /* used for machine checks */
#endif
#ifdef CONFIG_PPC_BOOK3S_64
/* Exclusive stacks for system reset and machine check exception. */
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index 0413457ba11d..d795c5d5789c 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -3,6 +3,20 @@
#include <linux/mm.h>
+#ifndef MODULE
+static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp)
+{
+ if (unlikely(mm == &init_mm))
+ return gfp;
+ return gfp | __GFP_ACCOUNT;
+}
+#else /* !MODULE */
+static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp)
+{
+ return gfp | __GFP_ACCOUNT;
+}
+#endif /* MODULE */
+
#ifdef CONFIG_PPC_BOOK3S
#include <asm/book3s/pgalloc.h>
#else
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 3a8d278e7421..fa9ebaead91e 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -103,6 +103,8 @@
#define OP_31_XOP_STBUX 247
#define OP_31_XOP_LHZX 279
#define OP_31_XOP_LHZUX 311
+#define OP_31_XOP_MSGSNDP 142
+#define OP_31_XOP_MSGCLRP 174
#define OP_31_XOP_MFSPR 339
#define OP_31_XOP_LWAX 341
#define OP_31_XOP_LHAX 343
@@ -189,8 +191,7 @@
/* sorted alphabetically */
#define PPC_INST_BHRBE 0x7c00025c
#define PPC_INST_CLRBHRB 0x7c00035c
-#define PPC_INST_COPY 0x7c00060c
-#define PPC_INST_COPY_FIRST 0x7c20060c
+#define PPC_INST_COPY 0x7c20060c
#define PPC_INST_CP_ABORT 0x7c00068c
#define PPC_INST_DCBA 0x7c0005ec
#define PPC_INST_DCBA_MASK 0xfc0007fe
@@ -221,10 +222,10 @@
#define PPC_INST_MSGCLR 0x7c0001dc
#define PPC_INST_MSGSYNC 0x7c0006ec
#define PPC_INST_MSGSNDP 0x7c00011c
+#define PPC_INST_MSGCLRP 0x7c00015c
#define PPC_INST_MTTMR 0x7c0003dc
#define PPC_INST_NOP 0x60000000
-#define PPC_INST_PASTE 0x7c00070c
-#define PPC_INST_PASTE_LAST 0x7c20070d
+#define PPC_INST_PASTE 0x7c20070d
#define PPC_INST_POPCNTB 0x7c0000f4
#define PPC_INST_POPCNTB_MASK 0xfc0007fe
#define PPC_INST_POPCNTD 0x7c0003f4
@@ -392,6 +393,8 @@
/* Deal with instructions that older assemblers aren't aware of */
#define PPC_CP_ABORT stringify_in_c(.long PPC_INST_CP_ABORT)
+#define PPC_COPY(a, b) stringify_in_c(.long PPC_INST_COPY | \
+ ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \
__PPC_RA(a) | __PPC_RB(b))
#define PPC_DCBZL(a, b) stringify_in_c(.long PPC_INST_DCBZL | \
@@ -409,6 +412,8 @@
___PPC_RB(b))
#define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \
___PPC_RB(b))
+#define PPC_MSGCLRP(b) stringify_in_c(.long PPC_INST_MSGCLRP | \
+ ___PPC_RB(b))
#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \
__PPC_RA(a) | __PPC_RS(s))
#define PPC_POPCNTD(a, s) stringify_in_c(.long PPC_INST_POPCNTD | \
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 359c44341761..6baeeb9acd0d 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -770,15 +770,18 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
#else
#define FIXUP_ENDIAN \
tdi 0,0,0x48; /* Reverse endian of b . + 8 */ \
- b $+36; /* Skip trampoline if endian is good */ \
- .long 0x05009f42; /* bcl 20,31,$+4 */ \
- .long 0xa602487d; /* mflr r10 */ \
- .long 0x1c004a39; /* addi r10,r10,28 */ \
+ b $+44; /* Skip trampoline if endian is good */ \
.long 0xa600607d; /* mfmsr r11 */ \
.long 0x01006b69; /* xori r11,r11,1 */ \
+ .long 0x00004039; /* li r10,0 */ \
+ .long 0x6401417d; /* mtmsrd r10,1 */ \
+ .long 0x05009f42; /* bcl 20,31,$+4 */ \
+ .long 0xa602487d; /* mflr r10 */ \
+ .long 0x14004a39; /* addi r10,r10,20 */ \
.long 0xa6035a7d; /* mtsrr0 r10 */ \
.long 0xa6037b7d; /* mtsrr1 r11 */ \
.long 0x2400004c /* rfid */
+
#endif /* !CONFIG_PPC_BOOK3E */
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 1189d04f3bd1..fab7ff877304 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -421,6 +421,26 @@ static inline unsigned long __pack_fe01(unsigned int fpmode)
#ifdef CONFIG_PPC64
#define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0)
+
+#define spin_begin() HMT_low()
+
+#define spin_cpu_relax() barrier()
+
+#define spin_cpu_yield() spin_cpu_relax()
+
+#define spin_end() HMT_medium()
+
+#define spin_until_cond(cond) \
+do { \
+ if (unlikely(!(cond))) { \
+ spin_begin(); \
+ do { \
+ spin_cpu_relax(); \
+ } while (!(cond)); \
+ spin_end(); \
+ } \
+} while (0)
+
#else
#define cpu_relax() barrier()
#endif
@@ -474,11 +494,11 @@ extern unsigned long cpuidle_disable;
enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
extern int powersave_nap; /* set if nap mode can be used in idle loop */
-extern unsigned long power7_nap(int check_irq);
-extern unsigned long power7_sleep(void);
-extern unsigned long power7_winkle(void);
-extern unsigned long power9_idle_stop(unsigned long stop_psscr_val,
- unsigned long stop_psscr_mask);
+extern unsigned long power7_idle_insn(unsigned long type); /* PNV_THREAD_NAP/etc*/
+extern void power7_idle_type(unsigned long type);
+extern unsigned long power9_idle_stop(unsigned long psscr_val);
+extern void power9_idle_type(unsigned long stop_psscr_val,
+ unsigned long stop_psscr_mask);
extern void flush_instruction_cache(void);
extern void hard_reset_now(void);
diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h
index c05cef6ee06c..18f168aebae3 100644
--- a/arch/powerpc/include/asm/trace.h
+++ b/arch/powerpc/include/asm/trace.h
@@ -168,6 +168,39 @@ TRACE_EVENT(hash_fault,
__entry->addr, __entry->access, __entry->trap)
);
+
+TRACE_EVENT(tlbie,
+
+ TP_PROTO(unsigned long lpid, unsigned long local, unsigned long rb,
+ unsigned long rs, unsigned long ric, unsigned long prs,
+ unsigned long r),
+ TP_ARGS(lpid, local, rb, rs, ric, prs, r),
+ TP_STRUCT__entry(
+ __field(unsigned long, lpid)
+ __field(unsigned long, local)
+ __field(unsigned long, rb)
+ __field(unsigned long, rs)
+ __field(unsigned long, ric)
+ __field(unsigned long, prs)
+ __field(unsigned long, r)
+ ),
+
+ TP_fast_assign(
+ __entry->lpid = lpid;
+ __entry->local = local;
+ __entry->rb = rb;
+ __entry->rs = rs;
+ __entry->ric = ric;
+ __entry->prs = prs;
+ __entry->r = r;
+ ),
+
+ TP_printk("lpid=%ld, local=%ld, rb=0x%lx, rs=0x%lx, ric=0x%lx, "
+ "prs=0x%lx, r=0x%lx", __entry->lpid, __entry->local,
+ __entry->rb, __entry->rs, __entry->ric, __entry->prs,
+ __entry->r)
+);
+
#endif /* _TRACE_POWERPC_H */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 41e88d3ce36b..4cf57f2126e6 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -340,7 +340,6 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size)
}
extern long strncpy_from_user(char *dst, const char __user *src, long count);
-extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
#endif /* _ARCH_POWERPC_UACCESS_H */
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
index b15bf6bc0e94..0d960ef78a9a 100644
--- a/arch/powerpc/include/uapi/asm/Kbuild
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -1,2 +1,8 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+
+generic-y += param.h
+generic-y += poll.h
+generic-y += resource.h
+generic-y += sockios.h
+generic-y += statfs.h
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 07fbeb927834..8cf8f0c96906 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -60,6 +60,12 @@ struct kvm_regs {
#define KVM_SREGS_E_FSL_PIDn (1 << 0) /* PID1/PID2 */
+/* flags for kvm_run.flags */
+#define KVM_RUN_PPC_NMI_DISP_MASK (3 << 0)
+#define KVM_RUN_PPC_NMI_DISP_FULLY_RECOV (1 << 0)
+#define KVM_RUN_PPC_NMI_DISP_LIMITED_RECOV (2 << 0)
+#define KVM_RUN_PPC_NMI_DISP_NOT_RECOV (3 << 0)
+
/*
* Feature bits indicate which sections of the sregs struct are valid,
* both in KVM_GET_SREGS and KVM_SET_SREGS. On KVM_SET_SREGS, registers
diff --git a/arch/powerpc/include/uapi/asm/param.h b/arch/powerpc/include/uapi/asm/param.h
deleted file mode 100644
index 965d45427975..000000000000
--- a/arch/powerpc/include/uapi/asm/param.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/param.h>
diff --git a/arch/powerpc/include/uapi/asm/poll.h b/arch/powerpc/include/uapi/asm/poll.h
deleted file mode 100644
index c98509d3149e..000000000000
--- a/arch/powerpc/include/uapi/asm/poll.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/poll.h>
diff --git a/arch/powerpc/include/uapi/asm/resource.h b/arch/powerpc/include/uapi/asm/resource.h
deleted file mode 100644
index 04bc4db8921b..000000000000
--- a/arch/powerpc/include/uapi/asm/resource.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/resource.h>
diff --git a/arch/powerpc/include/uapi/asm/sockios.h b/arch/powerpc/include/uapi/asm/sockios.h
deleted file mode 100644
index 55cef7675a31..000000000000
--- a/arch/powerpc/include/uapi/asm/sockios.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef _ASM_POWERPC_SOCKIOS_H
-#define _ASM_POWERPC_SOCKIOS_H
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-/* Socket-level I/O control calls. */
-#define FIOSETOWN 0x8901
-#define SIOCSPGRP 0x8902
-#define FIOGETOWN 0x8903
-#define SIOCGPGRP 0x8904
-#define SIOCATMARK 0x8905
-#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
-#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
-
-#endif /* _ASM_POWERPC_SOCKIOS_H */
diff --git a/arch/powerpc/include/uapi/asm/statfs.h b/arch/powerpc/include/uapi/asm/statfs.h
deleted file mode 100644
index 5244834583a4..000000000000
--- a/arch/powerpc/include/uapi/asm/statfs.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_POWERPC_STATFS_H
-#define _ASM_POWERPC_STATFS_H
-
-#include <asm-generic/statfs.h>
-
-#endif
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index e132902e1f14..0845eebc5af3 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -25,8 +25,6 @@ CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_prom_init.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_btext.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_prom.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
-# timers used by tracing
-CFLAGS_REMOVE_time.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
endif
obj-y := cputable.o ptrace.o syscalls.o \
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 709e23425317..6e95c2c19a7e 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -100,12 +100,12 @@ int main(void)
OFFSET(THREAD_NORMSAVES, thread_struct, normsave[0]);
#endif
OFFSET(THREAD_FPEXC_MODE, thread_struct, fpexc_mode);
- OFFSET(THREAD_FPSTATE, thread_struct, fp_state);
+ OFFSET(THREAD_FPSTATE, thread_struct, fp_state.fpr);
OFFSET(THREAD_FPSAVEAREA, thread_struct, fp_save_area);
OFFSET(FPSTATE_FPSCR, thread_fp_state, fpscr);
OFFSET(THREAD_LOAD_FP, thread_struct, load_fp);
#ifdef CONFIG_ALTIVEC
- OFFSET(THREAD_VRSTATE, thread_struct, vr_state);
+ OFFSET(THREAD_VRSTATE, thread_struct, vr_state.vr);
OFFSET(THREAD_VRSAVEAREA, thread_struct, vr_save_area);
OFFSET(THREAD_VRSAVE, thread_struct, vrsave);
OFFSET(THREAD_USED_VR, thread_struct, used_vr);
@@ -145,9 +145,9 @@ int main(void)
OFFSET(THREAD_TM_PPR, thread_struct, tm_ppr);
OFFSET(THREAD_TM_DSCR, thread_struct, tm_dscr);
OFFSET(PT_CKPT_REGS, thread_struct, ckpt_regs);
- OFFSET(THREAD_CKVRSTATE, thread_struct, ckvr_state);
+ OFFSET(THREAD_CKVRSTATE, thread_struct, ckvr_state.vr);
OFFSET(THREAD_CKVRSAVE, thread_struct, ckvrsave);
- OFFSET(THREAD_CKFPSTATE, thread_struct, ckfp_state);
+ OFFSET(THREAD_CKFPSTATE, thread_struct, ckfp_state.fpr);
/* Local pt_regs on stack for Transactional Memory funcs. */
DEFINE(TM_FRAME_SIZE, STACK_FRAME_OVERHEAD +
sizeof(struct pt_regs) + 16);
@@ -485,6 +485,7 @@ int main(void)
OFFSET(KVM_ENABLED_HCALLS, kvm, arch.enabled_hcalls);
OFFSET(KVM_VRMA_SLB_V, kvm, arch.vrma_slb_v);
OFFSET(KVM_RADIX, kvm, arch.radix);
+ OFFSET(KVM_FWNMI, kvm, arch.fwnmi_enabled);
OFFSET(VCPU_DSISR, kvm_vcpu, arch.shregs.dsisr);
OFFSET(VCPU_DAR, kvm_vcpu, arch.shregs.dar);
OFFSET(VCPU_VPA, kvm_vcpu, arch.vpa.pinned_addr);
@@ -513,6 +514,7 @@ int main(void)
OFFSET(VCPU_PENDING_EXC, kvm_vcpu, arch.pending_exceptions);
OFFSET(VCPU_CEDED, kvm_vcpu, arch.ceded);
OFFSET(VCPU_PRODDED, kvm_vcpu, arch.prodded);
+ OFFSET(VCPU_DBELL_REQ, kvm_vcpu, arch.doorbell_request);
OFFSET(VCPU_MMCR, kvm_vcpu, arch.mmcr);
OFFSET(VCPU_PMC, kvm_vcpu, arch.pmc);
OFFSET(VCPU_SPMC, kvm_vcpu, arch.spmc);
@@ -542,6 +544,7 @@ int main(void)
OFFSET(VCPU_WORT, kvm_vcpu, arch.wort);
OFFSET(VCPU_TID, kvm_vcpu, arch.tid);
OFFSET(VCPU_PSSCR, kvm_vcpu, arch.psscr);
+ OFFSET(VCPU_HFSCR, kvm_vcpu, arch.hfscr);
OFFSET(VCORE_ENTRY_EXIT, kvmppc_vcore, entry_exit_map);
OFFSET(VCORE_IN_GUEST, kvmppc_vcore, in_guest);
OFFSET(VCORE_NAPPING_THREADS, kvmppc_vcore, napping_threads);
@@ -742,9 +745,11 @@ int main(void)
OFFSET(PACA_THREAD_MASK, paca_struct, thread_mask);
OFFSET(PACA_SUBCORE_SIBLING_MASK, paca_struct, subcore_sibling_mask);
OFFSET(PACA_SIBLING_PACA_PTRS, paca_struct, thread_sibling_pacas);
+ OFFSET(PACA_REQ_PSSCR, paca_struct, requested_psscr);
#endif
DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER);
+ DEFINE(PPC_DBELL_MSGTYPE, PPC_DBELL_MSGTYPE);
#ifdef CONFIG_PPC_8xx
DEFINE(VIRT_IMMR_BASE, (u64)__fix_to_virt(FIX_IMMR_BASE));
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index fb7cbaa37658..8f7abf9baa63 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -105,6 +105,11 @@ static u64 dma_iommu_get_required_mask(struct device *dev)
return mask;
}
+int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == IOMMU_MAPPING_ERROR;
+}
+
struct dma_map_ops dma_iommu_ops = {
.alloc = dma_iommu_alloc_coherent,
.free = dma_iommu_free_coherent,
@@ -115,5 +120,6 @@ struct dma_map_ops dma_iommu_ops = {
.map_page = dma_iommu_map_page,
.unmap_page = dma_iommu_unmap_page,
.get_required_mask = dma_iommu_get_required_mask,
+ .mapping_error = dma_iommu_mapping_error,
};
EXPORT_SYMBOL(dma_iommu_ops);
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 41c749586bd2..4194bbbbdb10 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -314,18 +314,6 @@ EXPORT_SYMBOL(dma_set_coherent_mask);
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
-int __dma_set_mask(struct device *dev, u64 dma_mask)
-{
- const struct dma_map_ops *dma_ops = get_dma_ops(dev);
-
- if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
- return dma_ops->set_dma_mask(dev, dma_mask);
- if (!dev->dma_mask || !dma_supported(dev, dma_mask))
- return -EIO;
- *dev->dma_mask = dma_mask;
- return 0;
-}
-
int dma_set_mask(struct device *dev, u64 dma_mask)
{
if (ppc_md.dma_set_mask)
@@ -338,7 +326,10 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
return phb->controller_ops.dma_set_mask(pdev, dma_mask);
}
- return __dma_set_mask(dev, dma_mask);
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ return -EIO;
+ *dev->dma_mask = dma_mask;
+ return 0;
}
EXPORT_SYMBOL(dma_set_mask);
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index bfbad08a1207..49d8422767b4 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -57,7 +57,7 @@ system_call_common:
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BEGIN_FTR_SECTION
extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
- bne tabort_syscall
+ bne .Ltabort_syscall
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
andi. r10,r12,MSR_PR
@@ -143,6 +143,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
mtmsrd r11,1
#endif /* CONFIG_PPC_BOOK3E */
+system_call: /* label this so stack traces look sane */
/* We do need to set SOFTE in the stack frame or the return
* from interrupt will be painful
*/
@@ -152,11 +153,11 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
CURRENT_THREAD_INFO(r11, r1)
ld r10,TI_FLAGS(r11)
andi. r11,r10,_TIF_SYSCALL_DOTRACE
- bne syscall_dotrace /* does not return */
+ bne .Lsyscall_dotrace /* does not return */
cmpldi 0,r0,NR_syscalls
- bge- syscall_enosys
+ bge- .Lsyscall_enosys
-system_call: /* label this so stack traces look sane */
+.Lsyscall:
/*
* Need to vector to 32 Bit or default sys_call_table here,
* based on caller's run-mode / personality.
@@ -185,8 +186,20 @@ system_call: /* label this so stack traces look sane */
#ifdef CONFIG_PPC_BOOK3S
/* No MSR:RI on BookE */
andi. r10,r8,MSR_RI
- beq- unrecov_restore
+ beq- .Lunrecov_restore
#endif
+
+/*
+ * This is a few instructions into the actual syscall exit path (which actually
+ * starts at .Lsyscall_exit) to cater to kprobe blacklisting and to reduce the
+ * number of visible symbols for profiling purposes.
+ *
+ * We can probe from system_call until this point as MSR_RI is set. But once it
+ * is cleared below, we won't be able to take a trap.
+ *
+ * This is blacklisted from kprobes further below with _ASM_NOKPROBE_SYMBOL().
+ */
+system_call_exit:
/*
* Disable interrupts so current_thread_info()->flags can't change,
* and so that we don't get interrupted after loading SRR0/1.
@@ -208,31 +221,21 @@ system_call: /* label this so stack traces look sane */
ld r9,TI_FLAGS(r12)
li r11,-MAX_ERRNO
andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
- bne- syscall_exit_work
+ bne- .Lsyscall_exit_work
- andi. r0,r8,MSR_FP
- beq 2f
+ /* If MSR_FP and MSR_VEC are set in user msr, then no need to restore */
+ li r7,MSR_FP
#ifdef CONFIG_ALTIVEC
- andis. r0,r8,MSR_VEC@h
- bne 3f
-#endif
-2: addi r3,r1,STACK_FRAME_OVERHEAD
-#ifdef CONFIG_PPC_BOOK3S
- li r10,MSR_RI
- mtmsrd r10,1 /* Restore RI */
-#endif
- bl restore_math
-#ifdef CONFIG_PPC_BOOK3S
- li r11,0
- mtmsrd r11,1
+ oris r7,r7,MSR_VEC@h
#endif
- ld r8,_MSR(r1)
- ld r3,RESULT(r1)
- li r11,-MAX_ERRNO
+ and r0,r8,r7
+ cmpd r0,r7
+ bne .Lsyscall_restore_math
+.Lsyscall_restore_math_cont:
-3: cmpld r3,r11
+ cmpld r3,r11
ld r5,_CCR(r1)
- bge- syscall_error
+ bge- .Lsyscall_error
.Lsyscall_error_cont:
ld r7,_NIP(r1)
BEGIN_FTR_SECTION
@@ -258,14 +261,48 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
RFI
b . /* prevent speculative execution */
-syscall_error:
+.Lsyscall_error:
oris r5,r5,0x1000 /* Set SO bit in CR */
neg r3,r3
std r5,_CCR(r1)
b .Lsyscall_error_cont
-
+
+.Lsyscall_restore_math:
+ /*
+ * Some initial tests from restore_math to avoid the heavyweight
+ * C code entry and MSR manipulations.
+ */
+ LOAD_REG_IMMEDIATE(r0, MSR_TS_MASK)
+ and. r0,r0,r8
+ bne 1f
+
+ ld r7,PACACURRENT(r13)
+ lbz r0,THREAD+THREAD_LOAD_FP(r7)
+#ifdef CONFIG_ALTIVEC
+ lbz r6,THREAD+THREAD_LOAD_VEC(r7)
+ add r0,r0,r6
+#endif
+ cmpdi r0,0
+ beq .Lsyscall_restore_math_cont
+
+1: addi r3,r1,STACK_FRAME_OVERHEAD
+#ifdef CONFIG_PPC_BOOK3S
+ li r10,MSR_RI
+ mtmsrd r10,1 /* Restore RI */
+#endif
+ bl restore_math
+#ifdef CONFIG_PPC_BOOK3S
+ li r11,0
+ mtmsrd r11,1
+#endif
+ /* Restore volatiles, reload MSR from updated one */
+ ld r8,_MSR(r1)
+ ld r3,RESULT(r1)
+ li r11,-MAX_ERRNO
+ b .Lsyscall_restore_math_cont
+
/* Traced system call support */
-syscall_dotrace:
+.Lsyscall_dotrace:
bl save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_syscall_trace_enter
@@ -286,23 +323,23 @@ syscall_dotrace:
ld r7,GPR7(r1)
ld r8,GPR8(r1)
- /* Repopulate r9 and r10 for the system_call path */
+ /* Repopulate r9 and r10 for the syscall path */
addi r9,r1,STACK_FRAME_OVERHEAD
CURRENT_THREAD_INFO(r10, r1)
ld r10,TI_FLAGS(r10)
cmpldi r0,NR_syscalls
- blt+ system_call
+ blt+ .Lsyscall
/* Return code is already in r3 thanks to do_syscall_trace_enter() */
b .Lsyscall_exit
-syscall_enosys:
+.Lsyscall_enosys:
li r3,-ENOSYS
b .Lsyscall_exit
-syscall_exit_work:
+.Lsyscall_exit_work:
#ifdef CONFIG_PPC_BOOK3S
li r10,MSR_RI
mtmsrd r10,1 /* Restore RI */
@@ -362,7 +399,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
b ret_from_except
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-tabort_syscall:
+.Ltabort_syscall:
/* Firstly we need to enable TM in the kernel */
mfmsr r10
li r9, 1
@@ -388,6 +425,8 @@ tabort_syscall:
rfid
b . /* prevent speculative execution */
#endif
+_ASM_NOKPROBE_SYMBOL(system_call_common);
+_ASM_NOKPROBE_SYMBOL(system_call_exit);
/* Save non-volatile GPRs, if not already saved. */
_GLOBAL(save_nvgprs)
@@ -398,6 +437,7 @@ _GLOBAL(save_nvgprs)
clrrdi r0,r11,1
std r0,_TRAP(r1)
blr
+_ASM_NOKPROBE_SYMBOL(save_nvgprs);
/*
@@ -488,33 +528,30 @@ _GLOBAL(_switch)
std r23,_CCR(r1)
std r1,KSP(r3) /* Set old stack pointer */
-#ifdef CONFIG_SMP
- /* We need a sync somewhere here to make sure that if the
- * previous task gets rescheduled on another CPU, it sees all
- * stores it has performed on this one.
+ /*
+ * On SMP kernels, care must be taken because a task may be
+ * scheduled off CPUx and on to CPUy. Memory ordering must be
+ * considered.
+ *
+ * Cacheable stores on CPUx will be visible when the task is
+ * scheduled on CPUy by virtue of the core scheduler barriers
+ * (see "Notes on Program-Order guarantees on SMP systems." in
+ * kernel/sched/core.c).
+ *
+ * Uncacheable stores in the case of involuntary preemption must
+ * be taken care of. The smp_mb__before_spin_lock() in __schedule()
+ * is implemented as hwsync on powerpc, which orders MMIO too. So
+ * long as there is an hwsync in the context switch path, it will
+ * be executed on the source CPU after the task has performed
+ * all MMIO ops on that CPU, and on the destination CPU before the
+ * task performs any MMIO ops there.
*/
- sync
-#endif /* CONFIG_SMP */
/*
- * If we optimise away the clear of the reservation in system
- * calls because we know the CPU tracks the address of the
- * reservation, then we need to clear it here to cover the
- * case that the kernel context switch path has no larx
- * instructions.
+ * The kernel context switch path must contain a spin_lock,
+ * which contains larx/stcx, which will clear any reservation
+ * of the task being switched.
*/
-BEGIN_FTR_SECTION
- ldarx r6,0,r1
-END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
-
-BEGIN_FTR_SECTION
-/*
- * A cp_abort (copy paste abort) here ensures that when context switching, a
- * copy from one process can't leak into the paste of another.
- */
- PPC_CP_ABORT
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
-
#ifdef CONFIG_PPC_BOOK3S
/* Cancel all explict user streams as they will have no use after context
* switch and will stop the HW from creating streams itself
@@ -583,6 +620,14 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
top of the kernel stack. */
addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
+ /*
+ * PMU interrupts in radix may come in here. They will use r1, not
+ * PACAKSAVE, so this stack switch will not cause a problem. They
+ * will store to the process stack, which may then be migrated to
+ * another CPU. However the rq lock release on this CPU paired with
+ * the rq lock acquire on the new CPU before the stack becomes
+ * active on the new CPU, will order those stores.
+ */
mr r1,r8 /* start using new stack pointer */
std r7,PACAKSAVE(r13)
@@ -763,11 +808,11 @@ restore:
ld r5,SOFTE(r1)
lbz r6,PACASOFTIRQEN(r13)
cmpwi cr0,r5,0
- beq restore_irq_off
+ beq .Lrestore_irq_off
/* We are enabling, were we already enabled ? Yes, just return */
cmpwi cr0,r6,1
- beq cr0,do_restore
+ beq cr0,.Ldo_restore
/*
* We are about to soft-enable interrupts (we are hard disabled
@@ -776,14 +821,14 @@ restore:
*/
lbz r0,PACAIRQHAPPENED(r13)
cmpwi cr0,r0,0
- bne- restore_check_irq_replay
+ bne- .Lrestore_check_irq_replay
/*
* Get here when nothing happened while soft-disabled, just
* soft-enable and move-on. We will hard-enable as a side
* effect of rfi
*/
-restore_no_replay:
+.Lrestore_no_replay:
TRACE_ENABLE_INTS
li r0,1
stb r0,PACASOFTIRQEN(r13);
@@ -791,7 +836,7 @@ restore_no_replay:
/*
* Final return path. BookE is handled in a different file
*/
-do_restore:
+.Ldo_restore:
#ifdef CONFIG_PPC_BOOK3E
b exception_return_book3e
#else
@@ -825,7 +870,7 @@ fast_exception_return:
REST_8GPRS(5, r1)
andi. r0,r3,MSR_RI
- beq- unrecov_restore
+ beq- .Lunrecov_restore
/* Load PPR from thread struct before we clear MSR:RI */
BEGIN_FTR_SECTION
@@ -883,7 +928,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
* make sure that in this case, we also clear PACA_IRQ_HARD_DIS
* or that bit can get out of sync and bad things will happen
*/
-restore_irq_off:
+.Lrestore_irq_off:
ld r3,_MSR(r1)
lbz r7,PACAIRQHAPPENED(r13)
andi. r0,r3,MSR_EE
@@ -893,13 +938,13 @@ restore_irq_off:
1: li r0,0
stb r0,PACASOFTIRQEN(r13);
TRACE_DISABLE_INTS
- b do_restore
+ b .Ldo_restore
/*
* Something did happen, check if a re-emit is needed
* (this also clears paca->irq_happened)
*/
-restore_check_irq_replay:
+.Lrestore_check_irq_replay:
/* XXX: We could implement a fast path here where we check
* for irq_happened being just 0x01, in which case we can
* clear it and return. That means that we would potentially
@@ -909,7 +954,7 @@ restore_check_irq_replay:
*/
bl __check_irq_replay
cmpwi cr0,r3,0
- beq restore_no_replay
+ beq .Lrestore_no_replay
/*
* We need to re-emit an interrupt. We do so by re-using our
@@ -958,10 +1003,18 @@ restore_check_irq_replay:
#endif /* CONFIG_PPC_DOORBELL */
1: b ret_from_except /* What else to do here ? */
-unrecov_restore:
+.Lunrecov_restore:
addi r3,r1,STACK_FRAME_OVERHEAD
bl unrecoverable_exception
- b unrecov_restore
+ b .Lunrecov_restore
+
+_ASM_NOKPROBE_SYMBOL(ret_from_except);
+_ASM_NOKPROBE_SYMBOL(ret_from_except_lite);
+_ASM_NOKPROBE_SYMBOL(resume_kernel);
+_ASM_NOKPROBE_SYMBOL(fast_exc_return_irq);
+_ASM_NOKPROBE_SYMBOL(restore);
+_ASM_NOKPROBE_SYMBOL(fast_exception_return);
+
#ifdef CONFIG_PPC_RTAS
/*
@@ -1038,6 +1091,8 @@ _GLOBAL(enter_rtas)
rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
andc r6,r0,r9
+
+__enter_rtas:
sync /* disable interrupts so SRR0/1 */
mtmsrd r0 /* don't get trashed */
@@ -1074,6 +1129,8 @@ rtas_return_loc:
mtspr SPRN_SRR1,r4
rfid
b . /* prevent speculative execution */
+_ASM_NOKPROBE_SYMBOL(__enter_rtas)
+_ASM_NOKPROBE_SYMBOL(rtas_return_loc)
.align 3
1: .llong rtas_restore_regs
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index b886795060fd..4c18a5fbb4bb 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -99,7 +99,11 @@ EXC_VIRT_NONE(0x4000, 0x100)
#ifdef CONFIG_PPC_P7_NAP
/*
* If running native on arch 2.06 or later, check if we are waking up
- * from nap/sleep/winkle, and branch to idle handler.
+ * from nap/sleep/winkle, and branch to idle handler. This tests SRR1
+ * bits 46:47. A non-0 value indicates that we are coming from a power
+ * saving state. The idle wakeup handler initially runs in real mode,
+ * but we branch to the 0xc000... address so we can turn on relocation
+ * with mtmsr.
*/
#define IDLETEST(n) \
BEGIN_FTR_SECTION ; \
@@ -107,7 +111,7 @@ EXC_VIRT_NONE(0x4000, 0x100)
rlwinm. r10,r10,47-31,30,31 ; \
beq- 1f ; \
cmpwi cr3,r10,2 ; \
- BRANCH_TO_COMMON(r10, system_reset_idle_common) ; \
+ BRANCH_TO_C000(r10, system_reset_idle_common) ; \
1: \
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
#else
@@ -128,6 +132,7 @@ EXC_VIRT_NONE(0x4100, 0x100)
#ifdef CONFIG_PPC_P7_NAP
EXC_COMMON_BEGIN(system_reset_idle_common)
+ mfspr r12,SPRN_SRR1
b pnv_powersave_wakeup
#endif
@@ -507,46 +512,22 @@ EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
SET_SCRATCH0(r13)
EXCEPTION_PROLOG_0(PACA_EXSLB)
EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380)
- std r3,PACA_EXSLB+EX_R3(r13)
+ mr r12,r3 /* save r3 */
mfspr r3,SPRN_DAR
- mfspr r12,SPRN_SRR1
+ mfspr r11,SPRN_SRR1
crset 4*cr6+eq
-#ifndef CONFIG_RELOCATABLE
- b slb_miss_realmode
-#else
- /*
- * We can't just use a direct branch to slb_miss_realmode
- * because the distance from here to there depends on where
- * the kernel ends up being put.
- */
- mfctr r11
- LOAD_HANDLER(r10, slb_miss_realmode)
- mtctr r10
- bctr
-#endif
+ BRANCH_TO_COMMON(r10, slb_miss_common)
EXC_REAL_END(data_access_slb, 0x380, 0x80)
EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80)
SET_SCRATCH0(r13)
EXCEPTION_PROLOG_0(PACA_EXSLB)
EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
- std r3,PACA_EXSLB+EX_R3(r13)
+ mr r12,r3 /* save r3 */
mfspr r3,SPRN_DAR
- mfspr r12,SPRN_SRR1
+ mfspr r11,SPRN_SRR1
crset 4*cr6+eq
-#ifndef CONFIG_RELOCATABLE
- b slb_miss_realmode
-#else
- /*
- * We can't just use a direct branch to slb_miss_realmode
- * because the distance from here to there depends on where
- * the kernel ends up being put.
- */
- mfctr r11
- LOAD_HANDLER(r10, slb_miss_realmode)
- mtctr r10
- bctr
-#endif
+ BRANCH_TO_COMMON(r10, slb_miss_common)
EXC_VIRT_END(data_access_slb, 0x4380, 0x80)
TRAMP_KVM_SKIP(PACA_EXSLB, 0x380)
@@ -575,88 +556,82 @@ EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80)
SET_SCRATCH0(r13)
EXCEPTION_PROLOG_0(PACA_EXSLB)
EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
- std r3,PACA_EXSLB+EX_R3(r13)
+ mr r12,r3 /* save r3 */
mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
- mfspr r12,SPRN_SRR1
+ mfspr r11,SPRN_SRR1
crclr 4*cr6+eq
-#ifndef CONFIG_RELOCATABLE
- b slb_miss_realmode
-#else
- mfctr r11
- LOAD_HANDLER(r10, slb_miss_realmode)
- mtctr r10
- bctr
-#endif
+ BRANCH_TO_COMMON(r10, slb_miss_common)
EXC_REAL_END(instruction_access_slb, 0x480, 0x80)
EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80)
SET_SCRATCH0(r13)
EXCEPTION_PROLOG_0(PACA_EXSLB)
EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
- std r3,PACA_EXSLB+EX_R3(r13)
+ mr r12,r3 /* save r3 */
mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
- mfspr r12,SPRN_SRR1
+ mfspr r11,SPRN_SRR1
crclr 4*cr6+eq
-#ifndef CONFIG_RELOCATABLE
- b slb_miss_realmode
-#else
- mfctr r11
- LOAD_HANDLER(r10, slb_miss_realmode)
- mtctr r10
- bctr
-#endif
+ BRANCH_TO_COMMON(r10, slb_miss_common)
EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80)
TRAMP_KVM(PACA_EXSLB, 0x480)
-/* This handler is used by both 0x380 and 0x480 slb miss interrupts */
-EXC_COMMON_BEGIN(slb_miss_realmode)
+/*
+ * This handler is used by the 0x380 and 0x480 SLB miss interrupts, as well as
+ * the virtual mode 0x4380 and 0x4480 interrupts if AIL is enabled.
+ */
+EXC_COMMON_BEGIN(slb_miss_common)
/*
* r13 points to the PACA, r9 contains the saved CR,
- * r12 contain the saved SRR1, SRR0 is still ready for return
+ * r12 contains the saved r3,
+ * r11 contain the saved SRR1, SRR0 is still ready for return
* r3 has the faulting address
* r9 - r13 are saved in paca->exslb.
- * r3 is saved in paca->slb_r3
* cr6.eq is set for a D-SLB miss, clear for a I-SLB miss
* We assume we aren't going to take any exceptions during this
* procedure.
*/
mflr r10
-#ifdef CONFIG_RELOCATABLE
- mtctr r11
-#endif
-
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
- std r3,PACA_EXSLB+EX_DAR(r13)
+
+ /*
+ * Test MSR_RI before calling slb_allocate_realmode, because the
+ * MSR in r11 gets clobbered. However we still want to allocate
+ * SLB in case MSR_RI=0, to minimise the risk of getting stuck in
+ * recursive SLB faults. So use cr5 for this, which is preserved.
+ */
+ andi. r11,r11,MSR_RI /* check for unrecoverable exception */
+ cmpdi cr5,r11,MSR_RI
crset 4*cr0+eq
#ifdef CONFIG_PPC_STD_MMU_64
BEGIN_MMU_FTR_SECTION
- bl slb_allocate_realmode
+ bl slb_allocate
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
#endif
ld r10,PACA_EXSLB+EX_LR(r13)
- ld r3,PACA_EXSLB+EX_R3(r13)
lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
mtlr r10
- beq 8f /* if bad address, make full stack frame */
+ beq- 8f /* if bad address, make full stack frame */
- andi. r10,r12,MSR_RI /* check for unrecoverable exception */
- beq- 2f
+ bne- cr5,2f /* if unrecoverable exception, oops */
/* All done -- return from exception. */
.machine push
.machine "power4"
mtcrf 0x80,r9
+ mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */
mtcrf 0x02,r9 /* I/D indication is in cr6 */
mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
.machine pop
+ RESTORE_CTR(r9, PACA_EXSLB)
RESTORE_PPR_PACA(PACA_EXSLB, r9)
+ mr r3,r12
ld r9,PACA_EXSLB+EX_R9(r13)
ld r10,PACA_EXSLB+EX_R10(r13)
ld r11,PACA_EXSLB+EX_R11(r13)
@@ -665,7 +640,10 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
rfid
b . /* prevent speculative execution */
-2: mfspr r11,SPRN_SRR0
+2: std r3,PACA_EXSLB+EX_DAR(r13)
+ mr r3,r12
+ mfspr r11,SPRN_SRR0
+ mfspr r12,SPRN_SRR1
LOAD_HANDLER(r10,unrecov_slb)
mtspr SPRN_SRR0,r10
ld r10,PACAKMSR(r13)
@@ -673,7 +651,10 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
rfid
b .
-8: mfspr r11,SPRN_SRR0
+8: std r3,PACA_EXSLB+EX_DAR(r13)
+ mr r3,r12
+ mfspr r11,SPRN_SRR0
+ mfspr r12,SPRN_SRR1
LOAD_HANDLER(r10,bad_addr_slb)
mtspr SPRN_SRR0,r10
ld r10,PACAKMSR(r13)
@@ -821,46 +802,80 @@ EXC_VIRT(trap_0b, 0x4b00, 0x100, 0xb00)
TRAMP_KVM(PACA_EXGEN, 0xb00)
EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
+/*
+ * system call / hypercall (0xc00, 0x4c00)
+ *
+ * The system call exception is invoked with "sc 0" and does not alter HV bit.
+ * There is support for kernel code to invoke system calls but there are no
+ * in-tree users.
+ *
+ * The hypercall is invoked with "sc 1" and sets HV=1.
+ *
+ * In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to
+ * 0x4c00 virtual mode.
+ *
+ * Call convention:
+ *
+ * syscall register convention is in Documentation/powerpc/syscall64-abi.txt
+ *
+ * For hypercalls, the register convention is as follows:
+ * r0 volatile
+ * r1-2 nonvolatile
+ * r3 volatile parameter and return value for status
+ * r4-r10 volatile input and output value
+ * r11 volatile hypercall number and output value
+ * r12 volatile
+ * r13-r31 nonvolatile
+ * LR nonvolatile
+ * CTR volatile
+ * XER volatile
+ * CR0-1 CR5-7 volatile
+ * CR2-4 nonvolatile
+ * Other registers nonvolatile
+ *
+ * The intersection of volatile registers that don't contain possible
+ * inputs is: r12, cr0, xer, ctr. We may use these as scratch regs
+ * upon entry without saving.
+ */
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
- /*
- * If CONFIG_KVM_BOOK3S_64_HANDLER is set, save the PPR (on systems
- * that support it) before changing to HMT_MEDIUM. That allows the KVM
- * code to save that value into the guest state (it is the guest's PPR
- * value). Otherwise just change to HMT_MEDIUM as userspace has
- * already saved the PPR.
- */
+ /*
+ * There is a little bit of juggling to get syscall and hcall
+ * working well. Save r10 in ctr to be restored in case it is a
+ * hcall.
+ *
+ * Userspace syscalls have already saved the PPR, hcalls must save
+ * it before setting HMT_MEDIUM.
+ */
#define SYSCALL_KVMTEST \
- SET_SCRATCH0(r13); \
+ mr r12,r13; \
GET_PACA(r13); \
- std r9,PACA_EXGEN+EX_R9(r13); \
- OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); \
+ mtctr r10; \
+ KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \
HMT_MEDIUM; \
- std r10,PACA_EXGEN+EX_R10(r13); \
- OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r9, CPU_FTR_HAS_PPR); \
- mfcr r9; \
- KVMTEST_PR(0xc00); \
- GET_SCRATCH0(r13)
+ mr r9,r12; \
#else
#define SYSCALL_KVMTEST \
- HMT_MEDIUM
+ HMT_MEDIUM; \
+ mr r9,r13; \
+ GET_PACA(r13);
#endif
#define LOAD_SYSCALL_HANDLER(reg) \
__LOAD_HANDLER(reg, system_call_common)
-/* Syscall routine is used twice, in reloc-off and reloc-on paths */
-#define SYSCALL_PSERIES_1 \
+#define SYSCALL_FASTENDIAN_TEST \
BEGIN_FTR_SECTION \
cmpdi r0,0x1ebe ; \
beq- 1f ; \
END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
- mr r9,r13 ; \
- GET_PACA(r13) ; \
- mfspr r11,SPRN_SRR0 ; \
-0:
-#define SYSCALL_PSERIES_2_RFID \
+/*
+ * After SYSCALL_KVMTEST, we reach here with PACA in r13, r13 in r9,
+ * and HMT_MEDIUM.
+ */
+#define SYSCALL_REAL \
+ mfspr r11,SPRN_SRR0 ; \
mfspr r12,SPRN_SRR1 ; \
LOAD_SYSCALL_HANDLER(r10) ; \
mtspr SPRN_SRR0,r10 ; \
@@ -869,11 +884,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
rfid ; \
b . ; /* prevent speculative execution */
-#define SYSCALL_PSERIES_3 \
+#define SYSCALL_FASTENDIAN \
/* Fast LE/BE switch system call */ \
1: mfspr r12,SPRN_SRR1 ; \
xori r12,r12,MSR_LE ; \
mtspr SPRN_SRR1,r12 ; \
+ mr r13,r9 ; \
rfid ; /* return to userspace */ \
b . ; /* prevent speculative execution */
@@ -882,16 +898,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
* We can't branch directly so we do it via the CTR which
* is volatile across system calls.
*/
-#define SYSCALL_PSERIES_2_DIRECT \
- LOAD_SYSCALL_HANDLER(r12) ; \
- mtctr r12 ; \
+#define SYSCALL_VIRT \
+ LOAD_SYSCALL_HANDLER(r10) ; \
+ mtctr r10 ; \
+ mfspr r11,SPRN_SRR0 ; \
mfspr r12,SPRN_SRR1 ; \
li r10,MSR_RI ; \
mtmsrd r10,1 ; \
bctr ;
#else
/* We can branch directly */
-#define SYSCALL_PSERIES_2_DIRECT \
+#define SYSCALL_VIRT \
+ mfspr r11,SPRN_SRR0 ; \
mfspr r12,SPRN_SRR1 ; \
li r10,MSR_RI ; \
mtmsrd r10,1 ; /* Set RI (EE=0) */ \
@@ -899,20 +917,43 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
#endif
EXC_REAL_BEGIN(system_call, 0xc00, 0x100)
- SYSCALL_KVMTEST
- SYSCALL_PSERIES_1
- SYSCALL_PSERIES_2_RFID
- SYSCALL_PSERIES_3
+ SYSCALL_KVMTEST /* loads PACA into r13, and saves r13 to r9 */
+ SYSCALL_FASTENDIAN_TEST
+ SYSCALL_REAL
+ SYSCALL_FASTENDIAN
EXC_REAL_END(system_call, 0xc00, 0x100)
EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100)
- SYSCALL_KVMTEST
- SYSCALL_PSERIES_1
- SYSCALL_PSERIES_2_DIRECT
- SYSCALL_PSERIES_3
+ SYSCALL_KVMTEST /* loads PACA into r13, and saves r13 to r9 */
+ SYSCALL_FASTENDIAN_TEST
+ SYSCALL_VIRT
+ SYSCALL_FASTENDIAN
EXC_VIRT_END(system_call, 0x4c00, 0x100)
-TRAMP_KVM(PACA_EXGEN, 0xc00)
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+ /*
+ * This is a hcall, so register convention is as above, with these
+ * differences:
+ * r13 = PACA
+ * r12 = orig r13
+ * ctr = orig r10
+ */
+TRAMP_KVM_BEGIN(do_kvm_0xc00)
+ /*
+ * Save the PPR (on systems that support it) before changing to
+ * HMT_MEDIUM. That allows the KVM code to save that value into the
+ * guest state (it is the guest's PPR value).
+ */
+ OPT_GET_SPR(r0, SPRN_PPR, CPU_FTR_HAS_PPR)
+ HMT_MEDIUM
+ OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r0, CPU_FTR_HAS_PPR)
+ mfctr r10
+ SET_SCRATCH0(r12)
+ std r9,PACA_EXGEN+EX_R9(r13)
+ mfcr r9
+ std r10,PACA_EXGEN+EX_R10(r13)
+ KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
+#endif
EXC_REAL(single_step, 0xd00, 0x100)
@@ -1553,6 +1594,26 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1: addi r3,r1,STACK_FRAME_OVERHEAD
bl kernel_bad_stack
b 1b
+_ASM_NOKPROBE_SYMBOL(bad_stack);
+
+/*
+ * When doorbell is triggered from system reset wakeup, the message is
+ * not cleared, so it would fire again when EE is enabled.
+ *
+ * When coming from local_irq_enable, there may be the same problem if
+ * we were hard disabled.
+ *
+ * Execute msgclr to clear pending exceptions before handling it.
+ */
+h_doorbell_common_msgclr:
+ LOAD_REG_IMMEDIATE(r3, PPC_DBELL_MSGTYPE << (63-36))
+ PPC_MSGCLR(3)
+ b h_doorbell_common
+
+doorbell_super_common_msgclr:
+ LOAD_REG_IMMEDIATE(r3, PPC_DBELL_MSGTYPE << (63-36))
+ PPC_MSGCLRP(3)
+ b doorbell_super_common
/*
* Called from arch_local_irq_enable when an interrupt needs
@@ -1563,6 +1624,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
* Note: While MSR:EE is off, we need to make sure that _MSR
* in the generated frame has EE set to 1 or the exception
* handler will not properly re-enable them.
+ *
+ * Note that we don't specify LR as the NIP (return address) for
+ * the interrupt because that would unbalance the return branch
+ * predictor.
*/
_GLOBAL(__replay_interrupt)
/* We are going to jump to the exception common code which
@@ -1570,7 +1635,7 @@ _GLOBAL(__replay_interrupt)
* we don't give a damn about, so we don't bother storing them.
*/
mfmsr r12
- mflr r11
+ LOAD_REG_ADDR(r11, 1f)
mfcr r9
ori r12,r12,MSR_EE
cmpwi r3,0x900
@@ -1579,13 +1644,16 @@ _GLOBAL(__replay_interrupt)
beq hardware_interrupt_common
BEGIN_FTR_SECTION
cmpwi r3,0xe80
- beq h_doorbell_common
+ beq h_doorbell_common_msgclr
cmpwi r3,0xea0
beq h_virt_irq_common
cmpwi r3,0xe60
beq hmi_exception_common
FTR_SECTION_ELSE
cmpwi r3,0xa00
- beq doorbell_super_common
+ beq doorbell_super_common_msgclr
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
+1:
blr
+
+_ASM_NOKPROBE_SYMBOL(__replay_interrupt)
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 466569e26278..3079518f2245 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -113,11 +113,55 @@ int __init early_init_dt_scan_fw_dump(unsigned long node,
return 1;
}
+/*
+ * If fadump is registered, check if the memory provided
+ * falls within boot memory area.
+ */
+int is_fadump_boot_memory_area(u64 addr, ulong size)
+{
+ if (!fw_dump.dump_registered)
+ return 0;
+
+ return (addr + size) > RMA_START && addr <= fw_dump.boot_memory_size;
+}
+
int is_fadump_active(void)
{
return fw_dump.dump_active;
}
+/*
+ * Returns 1, if there are no holes in boot memory area,
+ * 0 otherwise.
+ */
+static int is_boot_memory_area_contiguous(void)
+{
+ struct memblock_region *reg;
+ unsigned long tstart, tend;
+ unsigned long start_pfn = PHYS_PFN(RMA_START);
+ unsigned long end_pfn = PHYS_PFN(RMA_START + fw_dump.boot_memory_size);
+ unsigned int ret = 0;
+
+ for_each_memblock(memory, reg) {
+ tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
+ tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
+ if (tstart < tend) {
+ /* Memory hole from start_pfn to tstart */
+ if (tstart > start_pfn)
+ break;
+
+ if (tend == end_pfn) {
+ ret = 1;
+ break;
+ }
+
+ start_pfn = tend + 1;
+ }
+ }
+
+ return ret;
+}
+
/* Print firmware assisted dump configurations for debugging purpose. */
static void fadump_show_config(void)
{
@@ -212,20 +256,46 @@ static inline unsigned long fadump_calculate_reserve_size(void)
int ret;
unsigned long long base, size;
+ if (fw_dump.reserve_bootvar)
+ pr_warn("'fadump_reserve_mem=' parameter is deprecated in favor of 'crashkernel=' parameter.\n");
+
/*
* Check if the size is specified through crashkernel= cmdline
- * option. If yes, then use that but ignore base as fadump
- * reserves memory at end of RAM.
+ * option. If yes, then use that but ignore base as fadump reserves
+ * memory at a predefined offset.
*/
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
&size, &base);
if (ret == 0 && size > 0) {
+ unsigned long max_size;
+
+ if (fw_dump.reserve_bootvar)
+ pr_info("Using 'crashkernel=' parameter for memory reservation.\n");
+
fw_dump.reserve_bootvar = (unsigned long)size;
+
+ /*
+ * Adjust if the boot memory size specified is above
+ * the upper limit.
+ */
+ max_size = memblock_phys_mem_size() / MAX_BOOT_MEM_RATIO;
+ if (fw_dump.reserve_bootvar > max_size) {
+ fw_dump.reserve_bootvar = max_size;
+ pr_info("Adjusted boot memory size to %luMB\n",
+ (fw_dump.reserve_bootvar >> 20));
+ }
+
+ return fw_dump.reserve_bootvar;
+ } else if (fw_dump.reserve_bootvar) {
+ /*
+ * 'fadump_reserve_mem=' is being used to reserve memory
+ * for firmware-assisted dump.
+ */
return fw_dump.reserve_bootvar;
}
/* divide by 20 to get 5% of value */
- size = memblock_end_of_DRAM() / 20;
+ size = memblock_phys_mem_size() / 20;
/* round it down in multiples of 256 */
size = size & ~0x0FFFFFFFUL;
@@ -377,9 +447,22 @@ static int __init early_fadump_param(char *p)
}
early_param("fadump", early_fadump_param);
-static void register_fw_dump(struct fadump_mem_struct *fdm)
+/*
+ * Look for fadump_reserve_mem= cmdline option
+ * TODO: Remove references to 'fadump_reserve_mem=' parameter,
+ * the sooner 'crashkernel=' parameter is accustomed to.
+ */
+static int __init early_fadump_reserve_mem(char *p)
+{
+ if (p)
+ fw_dump.reserve_bootvar = memparse(p, &p);
+ return 0;
+}
+early_param("fadump_reserve_mem", early_fadump_reserve_mem);
+
+static int register_fw_dump(struct fadump_mem_struct *fdm)
{
- int rc;
+ int rc, err;
unsigned int wait_time;
pr_debug("Registering for firmware-assisted kernel dump...\n");
@@ -396,26 +479,38 @@ static void register_fw_dump(struct fadump_mem_struct *fdm)
} while (wait_time);
+ err = -EIO;
switch (rc) {
+ default:
+ pr_err("Failed to register. Unknown Error(%d).\n", rc);
+ break;
case -1:
printk(KERN_ERR "Failed to register firmware-assisted kernel"
" dump. Hardware Error(%d).\n", rc);
break;
case -3:
+ if (!is_boot_memory_area_contiguous())
+ pr_err("Can't have holes in boot memory area while "
+ "registering fadump\n");
+
printk(KERN_ERR "Failed to register firmware-assisted kernel"
" dump. Parameter Error(%d).\n", rc);
+ err = -EINVAL;
break;
case -9:
printk(KERN_ERR "firmware-assisted kernel dump is already "
" registered.");
fw_dump.dump_registered = 1;
+ err = -EEXIST;
break;
case 0:
printk(KERN_INFO "firmware-assisted kernel dump registration"
" is successful\n");
fw_dump.dump_registered = 1;
+ err = 0;
break;
}
+ return err;
}
void crash_fadump(struct pt_regs *regs, const char *str)
@@ -831,8 +926,19 @@ static void fadump_setup_crash_memory_ranges(void)
for_each_memblock(memory, reg) {
start = (unsigned long long)reg->base;
end = start + (unsigned long long)reg->size;
- if (start == RMA_START && end >= fw_dump.boot_memory_size)
- start = fw_dump.boot_memory_size;
+
+ /*
+ * skip the first memory chunk that is already added (RMA_START
+ * through boot_memory_size). This logic needs a relook if and
+ * when RMA_START changes to a non-zero value.
+ */
+ BUILD_BUG_ON(RMA_START != 0);
+ if (start < fw_dump.boot_memory_size) {
+ if (end > fw_dump.boot_memory_size)
+ start = fw_dump.boot_memory_size;
+ else
+ continue;
+ }
/* add this range excluding the reserved dump area. */
fadump_exclude_reserved_area(start, end);
@@ -956,7 +1062,7 @@ static unsigned long init_fadump_header(unsigned long addr)
return addr;
}
-static void register_fadump(void)
+static int register_fadump(void)
{
unsigned long addr;
void *vaddr;
@@ -966,7 +1072,7 @@ static void register_fadump(void)
* assisted dump.
*/
if (!fw_dump.reserve_dump_area_size)
- return;
+ return -ENODEV;
fadump_setup_crash_memory_ranges();
@@ -979,7 +1085,7 @@ static void register_fadump(void)
fadump_create_elfcore_headers(vaddr);
/* register the future kernel dump with firmware. */
- register_fw_dump(&fdm);
+ return register_fw_dump(&fdm);
}
static int fadump_unregister_dump(struct fadump_mem_struct *fdm)
@@ -1046,28 +1152,71 @@ void fadump_cleanup(void)
}
}
+static void fadump_free_reserved_memory(unsigned long start_pfn,
+ unsigned long end_pfn)
+{
+ unsigned long pfn;
+ unsigned long time_limit = jiffies + HZ;
+
+ pr_info("freeing reserved memory (0x%llx - 0x%llx)\n",
+ PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
+
+ for (pfn = start_pfn; pfn < end_pfn; pfn++) {
+ free_reserved_page(pfn_to_page(pfn));
+
+ if (time_after(jiffies, time_limit)) {
+ cond_resched();
+ time_limit = jiffies + HZ;
+ }
+ }
+}
+
+/*
+ * Skip memory holes and free memory that was actually reserved.
+ */
+static void fadump_release_reserved_area(unsigned long start, unsigned long end)
+{
+ struct memblock_region *reg;
+ unsigned long tstart, tend;
+ unsigned long start_pfn = PHYS_PFN(start);
+ unsigned long end_pfn = PHYS_PFN(end);
+
+ for_each_memblock(memory, reg) {
+ tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
+ tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
+ if (tstart < tend) {
+ fadump_free_reserved_memory(tstart, tend);
+
+ if (tend == end_pfn)
+ break;
+
+ start_pfn = tend + 1;
+ }
+ }
+}
+
/*
* Release the memory that was reserved in early boot to preserve the memory
* contents. The released memory will be available for general use.
*/
static void fadump_release_memory(unsigned long begin, unsigned long end)
{
- unsigned long addr;
unsigned long ra_start, ra_end;
ra_start = fw_dump.reserve_dump_area_start;
ra_end = ra_start + fw_dump.reserve_dump_area_size;
- for (addr = begin; addr < end; addr += PAGE_SIZE) {
- /*
- * exclude the dump reserve area. Will reuse it for next
- * fadump registration.
- */
- if (addr <= ra_end && ((addr + PAGE_SIZE) > ra_start))
- continue;
-
- free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
- }
+ /*
+ * exclude the dump reserve area. Will reuse it for next
+ * fadump registration.
+ */
+ if (begin < ra_end && end > ra_start) {
+ if (begin < ra_start)
+ fadump_release_reserved_area(begin, ra_start);
+ if (end > ra_end)
+ fadump_release_reserved_area(ra_end, end);
+ } else
+ fadump_release_reserved_area(begin, end);
}
static void fadump_invalidate_release_mem(void)
@@ -1161,7 +1310,6 @@ static ssize_t fadump_register_store(struct kobject *kobj,
switch (buf[0]) {
case '0':
if (fw_dump.dump_registered == 0) {
- ret = -EINVAL;
goto unlock_out;
}
/* Un-register Firmware-assisted dump */
@@ -1169,11 +1317,11 @@ static ssize_t fadump_register_store(struct kobject *kobj,
break;
case '1':
if (fw_dump.dump_registered == 1) {
- ret = -EINVAL;
+ ret = -EEXIST;
goto unlock_out;
}
/* Register Firmware-assisted dump */
- register_fadump();
+ ret = register_fadump();
break;
default:
ret = -EINVAL;
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 4898d676dcae..5adb390e773b 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -31,6 +31,7 @@
* registers for winkle support.
*/
#define _SDR1 GPR3
+#define _PTCR GPR3
#define _RPR GPR4
#define _SPURR GPR5
#define _PURR GPR6
@@ -39,7 +40,7 @@
#define _AMOR GPR9
#define _WORT GPR10
#define _WORC GPR11
-#define _PTCR GPR12
+#define _LPCR GPR12
#define PSSCR_EC_ESL_MASK_SHIFTED (PSSCR_EC | PSSCR_ESL) >> 16
@@ -55,12 +56,14 @@ save_sprs_to_stack:
* here since any thread in the core might wake up first
*/
BEGIN_FTR_SECTION
- mfspr r3,SPRN_PTCR
- std r3,_PTCR(r1)
/*
* Note - SDR1 is dropped in Power ISA v3. Hence not restoring
* SDR1 here
*/
+ mfspr r3,SPRN_PTCR
+ std r3,_PTCR(r1)
+ mfspr r3,SPRN_LPCR
+ std r3,_LPCR(r1)
FTR_SECTION_ELSE
mfspr r3,SPRN_SDR1
std r3,_SDR1(r1)
@@ -106,13 +109,9 @@ core_idle_lock_held:
/*
* Pass requested state in r3:
* r3 - PNV_THREAD_NAP/SLEEP/WINKLE in POWER8
- * - Requested STOP state in POWER9
- *
- * To check IRQ_HAPPENED in r4
- * 0 - don't check
- * 1 - check
+ * - Requested PSSCR value in POWER9
*
- * Address to 'rfid' to in r5
+ * Address of idle handler to branch to in realmode in r4
*/
pnv_powersave_common:
/* Use r3 to pass state nap/sleep/winkle */
@@ -122,37 +121,14 @@ pnv_powersave_common:
* need to save PC, some CR bits and the NV GPRs,
* but for now an interrupt frame will do.
*/
+ mtctr r4
+
mflr r0
std r0,16(r1)
stdu r1,-INT_FRAME_SIZE(r1)
std r0,_LINK(r1)
std r0,_NIP(r1)
- /* Hard disable interrupts */
- mfmsr r9
- rldicl r9,r9,48,1
- rotldi r9,r9,16
- mtmsrd r9,1 /* hard-disable interrupts */
-
- /* Check if something happened while soft-disabled */
- lbz r0,PACAIRQHAPPENED(r13)
- andi. r0,r0,~PACA_IRQ_HARD_DIS@l
- beq 1f
- cmpwi cr0,r4,0
- beq 1f
- addi r1,r1,INT_FRAME_SIZE
- ld r0,16(r1)
- li r3,0 /* Return 0 (no nap) */
- mtlr r0
- blr
-
-1: /* We mark irqs hard disabled as this is the state we'll
- * be in when returning and we need to tell arch_local_irq_restore()
- * about it
- */
- li r0,PACA_IRQ_HARD_DIS
- stb r0,PACAIRQHAPPENED(r13)
-
/* We haven't lost state ... yet */
li r0,0
stb r0,PACA_NAPSTATELOST(r13)
@@ -160,9 +136,8 @@ pnv_powersave_common:
/* Continue saving state */
SAVE_GPR(2, r1)
SAVE_NVGPRS(r1)
- mfcr r4
- std r4,_CCR(r1)
- std r9,_MSR(r1)
+ mfcr r5
+ std r5,_CCR(r1)
std r1,PACAR1(r13)
/*
@@ -172,12 +147,8 @@ pnv_powersave_common:
* the MMU context to the guest.
*/
LOAD_REG_IMMEDIATE(r7, MSR_IDLE)
- li r6, MSR_RI
- andc r6, r9, r6
- mtmsrd r6, 1 /* clear RI before setting SRR0/1 */
- mtspr SPRN_SRR0, r5
- mtspr SPRN_SRR1, r7
- rfid
+ mtmsrd r7,0
+ bctr
.globl pnv_enter_arch207_idle_mode
pnv_enter_arch207_idle_mode:
@@ -285,6 +256,19 @@ power_enter_stop:
bne .Lhandle_esl_ec_set
IDLE_STATE_ENTER_SEQ(PPC_STOP)
li r3,0 /* Since we didn't lose state, return 0 */
+
+ /*
+ * pnv_wakeup_noloss() expects r12 to contain the SRR1 value so
+ * it can determine if the wakeup reason is an HMI in
+ * CHECK_HMI_INTERRUPT.
+ *
+ * However, when we wakeup with ESL=0, SRR1 will not contain the wakeup
+ * reason, so there is no point setting r12 to SRR1.
+ *
+ * Further, we clear r12 here, so that we don't accidentally enter the
+ * HMI in pnv_wakeup_noloss() if the value of r12[42:45] == WAKE_HMI.
+ */
+ li r12, 0
b pnv_wakeup_noloss
.Lhandle_esl_ec_set:
@@ -319,45 +303,23 @@ lwarx_loop_stop:
IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP)
-_GLOBAL(power7_idle)
+/*
+ * Entered with MSR[EE]=0 and no soft-masked interrupts pending.
+ * r3 contains desired idle state (PNV_THREAD_NAP/SLEEP/WINKLE).
+ */
+_GLOBAL(power7_idle_insn)
/* Now check if user or arch enabled NAP mode */
- LOAD_REG_ADDRBASE(r3,powersave_nap)
- lwz r4,ADDROFF(powersave_nap)(r3)
- cmpwi 0,r4,0
- beqlr
- li r3, 1
- /* fall through */
-
-_GLOBAL(power7_nap)
- mr r4,r3
- li r3,PNV_THREAD_NAP
- LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
- b pnv_powersave_common
- /* No return */
-
-_GLOBAL(power7_sleep)
- li r3,PNV_THREAD_SLEEP
- li r4,1
- LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
- b pnv_powersave_common
- /* No return */
-
-_GLOBAL(power7_winkle)
- li r3,PNV_THREAD_WINKLE
- li r4,1
- LOAD_REG_ADDR(r5, pnv_enter_arch207_idle_mode)
+ LOAD_REG_ADDR(r4, pnv_enter_arch207_idle_mode)
b pnv_powersave_common
- /* No return */
#define CHECK_HMI_INTERRUPT \
- mfspr r0,SPRN_SRR1; \
BEGIN_FTR_SECTION_NESTED(66); \
- rlwinm r0,r0,45-31,0xf; /* extract wake reason field (P8) */ \
+ rlwinm r0,r12,45-31,0xf; /* extract wake reason field (P8) */ \
FTR_SECTION_ELSE_NESTED(66); \
- rlwinm r0,r0,45-31,0xe; /* P7 wake reason field is 3 bits */ \
+ rlwinm r0,r12,45-31,0xe; /* P7 wake reason field is 3 bits */ \
ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \
cmpwi r0,0xa; /* Hypervisor maintenance ? */ \
- bne 20f; \
+ bne+ 20f; \
/* Invoke opal call to handle hmi */ \
ld r2,PACATOC(r13); \
ld r1,PACAR1(r13); \
@@ -369,16 +331,13 @@ ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \
20: nop;
/*
- * r3 - The PSSCR value corresponding to the stop state.
- * r4 - The PSSCR mask corrresonding to the stop state.
+ * Entered with MSR[EE]=0 and no soft-masked interrupts pending.
+ * r3 contains desired PSSCR register value.
*/
_GLOBAL(power9_idle_stop)
- mfspr r5,SPRN_PSSCR
- andc r5,r5,r4
- or r3,r3,r5
+ std r3, PACA_REQ_PSSCR(r13)
mtspr SPRN_PSSCR,r3
- LOAD_REG_ADDR(r5,power_enter_stop)
- li r4,1
+ LOAD_REG_ADDR(r4,power_enter_stop)
b pnv_powersave_common
/* No return */
@@ -436,17 +395,17 @@ pnv_powersave_wakeup_mce:
/*
* Now put the original SRR1 with SRR1_WAKEMCE_RESVD as the wake
- * reason into SRR1, which allows reuse of the system reset wakeup
+ * reason into r12, which allows reuse of the system reset wakeup
* code without being mistaken for another type of wakeup.
*/
- oris r3,r3,SRR1_WAKEMCE_RESVD@h
- mtspr SPRN_SRR1,r3
+ oris r12,r3,SRR1_WAKEMCE_RESVD@h
b pnv_powersave_wakeup
/*
* Called from reset vector for powersave wakeups.
* cr3 - set to gt if waking up with partial/complete hypervisor state loss
+ * r12 - SRR1
*/
.global pnv_powersave_wakeup
pnv_powersave_wakeup:
@@ -464,6 +423,8 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
li r0,PNV_THREAD_RUNNING
stb r0,PACA_THREAD_IDLE_STATE(r13) /* Clear thread state */
+ mr r3,r12
+
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
li r0,KVM_HWTHREAD_IN_KERNEL
stb r0,HSTATE_HWTHREAD_STATE(r13)
@@ -477,7 +438,6 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
#endif
/* Return SRR1 from power7_nap() */
- mfspr r3,SPRN_SRR1
blt cr3,pnv_wakeup_noloss
b pnv_wakeup_loss
@@ -489,18 +449,35 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
*/
pnv_restore_hyp_resource_arch300:
/*
+ * Workaround for POWER9, if we lost resources, the ERAT
+ * might have been mixed up and needs flushing.
+ */
+ blt cr3,1f
+ PPC_INVALIDATE_ERAT
+1:
+ /*
* POWER ISA 3. Use PSSCR to determine if we
* are waking up from deep idle state
*/
LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
- mfspr r5,SPRN_PSSCR
+BEGIN_FTR_SECTION_NESTED(71)
+ /*
+ * Assume that we are waking up from the state
+ * same as the Requested Level (RL) in the PSSCR
+ * which are Bits 60-63
+ */
+ ld r5,PACA_REQ_PSSCR(r13)
+ rldicl r5,r5,0,60
+FTR_SECTION_ELSE_NESTED(71)
/*
* 0-3 bits correspond to Power-Saving Level Status
* which indicates the idle state we are waking up from
*/
+ mfspr r5, SPRN_PSSCR
rldicl r5,r5,4,60
+ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_POWER9_DD1, 71)
cmpd cr4,r5,r4
bge cr4,pnv_wakeup_tb_loss /* returns to caller */
@@ -567,9 +544,9 @@ pnv_wakeup_tb_loss:
* is required to return back to reset vector after hypervisor state
* restore is complete.
*/
+ mr r19,r12
mr r18,r4
mflr r17
- mfspr r16,SPRN_SRR1
BEGIN_FTR_SECTION
CHECK_HMI_INTERRUPT
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
@@ -731,13 +708,14 @@ timebase_resync:
* Use cr3 which indicates that we are waking up with atleast partial
* hypervisor state loss to determine if TIMEBASE RESYNC is needed.
*/
- ble cr3,clear_lock
+ ble cr3,.Ltb_resynced
/* Time base re-sync */
bl opal_resync_timebase;
/*
- * If waking up from sleep, per core state is not lost, skip to
- * clear_lock.
+ * If waking up from sleep (POWER8), per core state
+ * is not lost, skip to clear_lock.
*/
+.Ltb_resynced:
blt cr4,clear_lock
/*
@@ -812,9 +790,13 @@ no_segments:
mtctr r12
bctrl
+BEGIN_FTR_SECTION
+ ld r4,_LPCR(r1)
+ mtspr SPRN_LPCR,r4
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
hypervisor_state_restored:
- mtspr SPRN_SRR1,r16
+ mr r12,r19
mtlr r17
blr /* return to pnv_powersave_wakeup */
@@ -827,6 +809,7 @@ fastsleep_workaround_at_exit:
/*
* R3 here contains the value that will be returned to the caller
* of power7_nap.
+ * R12 contains SRR1 for CHECK_HMI_INTERRUPT.
*/
.global pnv_wakeup_loss
pnv_wakeup_loss:
@@ -836,32 +819,33 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
REST_NVGPRS(r1)
REST_GPR(2, r1)
+ ld r4,PACAKMSR(r13)
+ ld r5,_LINK(r1)
ld r6,_CCR(r1)
- ld r4,_MSR(r1)
- ld r5,_NIP(r1)
addi r1,r1,INT_FRAME_SIZE
+ mtlr r5
mtcr r6
- mtspr SPRN_SRR1,r4
- mtspr SPRN_SRR0,r5
- rfid
+ mtmsrd r4
+ blr
/*
* R3 here contains the value that will be returned to the caller
* of power7_nap.
+ * R12 contains SRR1 for CHECK_HMI_INTERRUPT.
*/
pnv_wakeup_noloss:
lbz r0,PACA_NAPSTATELOST(r13)
cmpwi r0,0
bne pnv_wakeup_loss
+ ld r1,PACAR1(r13)
BEGIN_FTR_SECTION
CHECK_HMI_INTERRUPT
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
- ld r1,PACAR1(r13)
- ld r6,_CCR(r1)
- ld r4,_MSR(r1)
+ ld r4,PACAKMSR(r13)
ld r5,_NIP(r1)
+ ld r6,_CCR(r1)
addi r1,r1,INT_FRAME_SIZE
+ mtlr r5
mtcr r6
- mtspr SPRN_SRR1,r4
- mtspr SPRN_SRR0,r5
- rfid
+ mtmsrd r4
+ blr
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index f2b724cd9e64..233ca3fe4754 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -198,11 +198,11 @@ static unsigned long iommu_range_alloc(struct device *dev,
if (unlikely(npages == 0)) {
if (printk_ratelimit())
WARN_ON(1);
- return DMA_ERROR_CODE;
+ return IOMMU_MAPPING_ERROR;
}
if (should_fail_iommu(dev))
- return DMA_ERROR_CODE;
+ return IOMMU_MAPPING_ERROR;
/*
* We don't need to disable preemption here because any CPU can
@@ -278,7 +278,7 @@ again:
} else {
/* Give up */
spin_unlock_irqrestore(&(pool->lock), flags);
- return DMA_ERROR_CODE;
+ return IOMMU_MAPPING_ERROR;
}
}
@@ -310,13 +310,13 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
unsigned long attrs)
{
unsigned long entry;
- dma_addr_t ret = DMA_ERROR_CODE;
+ dma_addr_t ret = IOMMU_MAPPING_ERROR;
int build_fail;
entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
- if (unlikely(entry == DMA_ERROR_CODE))
- return DMA_ERROR_CODE;
+ if (unlikely(entry == IOMMU_MAPPING_ERROR))
+ return IOMMU_MAPPING_ERROR;
entry += tbl->it_offset; /* Offset into real TCE table */
ret = entry << tbl->it_page_shift; /* Set the return dma address */
@@ -328,12 +328,12 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
/* tbl->it_ops->set() only returns non-zero for transient errors.
* Clean up the table bitmap in this case and return
- * DMA_ERROR_CODE. For all other errors the functionality is
+ * IOMMU_MAPPING_ERROR. For all other errors the functionality is
* not altered.
*/
if (unlikely(build_fail)) {
__iommu_free(tbl, ret, npages);
- return DMA_ERROR_CODE;
+ return IOMMU_MAPPING_ERROR;
}
/* Flush/invalidate TLB caches if necessary */
@@ -478,7 +478,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
/* Handle failure */
- if (unlikely(entry == DMA_ERROR_CODE)) {
+ if (unlikely(entry == IOMMU_MAPPING_ERROR)) {
if (!(attrs & DMA_ATTR_NO_WARN) &&
printk_ratelimit())
dev_info(dev, "iommu_alloc failed, tbl %p "
@@ -545,7 +545,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
*/
if (outcount < incount) {
outs = sg_next(outs);
- outs->dma_address = DMA_ERROR_CODE;
+ outs->dma_address = IOMMU_MAPPING_ERROR;
outs->dma_length = 0;
}
@@ -563,7 +563,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
npages = iommu_num_pages(s->dma_address, s->dma_length,
IOMMU_PAGE_SIZE(tbl));
__iommu_free(tbl, vaddr, npages);
- s->dma_address = DMA_ERROR_CODE;
+ s->dma_address = IOMMU_MAPPING_ERROR;
s->dma_length = 0;
}
if (s == outs)
@@ -777,7 +777,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
unsigned long mask, enum dma_data_direction direction,
unsigned long attrs)
{
- dma_addr_t dma_handle = DMA_ERROR_CODE;
+ dma_addr_t dma_handle = IOMMU_MAPPING_ERROR;
void *vaddr;
unsigned long uaddr;
unsigned int npages, align;
@@ -797,7 +797,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
mask >> tbl->it_page_shift, align,
attrs);
- if (dma_handle == DMA_ERROR_CODE) {
+ if (dma_handle == IOMMU_MAPPING_ERROR) {
if (!(attrs & DMA_ATTR_NO_WARN) &&
printk_ratelimit()) {
dev_info(dev, "iommu_alloc failed, tbl %p "
@@ -869,7 +869,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
io_order = get_iommu_order(size, tbl);
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
mask >> tbl->it_page_shift, io_order, 0);
- if (mapping == DMA_ERROR_CODE) {
+ if (mapping == IOMMU_MAPPING_ERROR) {
free_pages((unsigned long)ret, order);
return NULL;
}
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 5c291df30fe3..0bcec745a672 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -322,7 +322,8 @@ bool prep_irq_for_idle(void)
* First we need to hard disable to ensure no interrupt
* occurs before we effectively enter the low power state
*/
- hard_irq_disable();
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
/*
* If anything happened while we were soft-disabled,
@@ -347,6 +348,65 @@ bool prep_irq_for_idle(void)
return true;
}
+#ifdef CONFIG_PPC_BOOK3S
+/*
+ * This is for idle sequences that return with IRQs off, but the
+ * idle state itself wakes on interrupt. Tell the irq tracer that
+ * IRQs are enabled for the duration of idle so it does not get long
+ * off times. Must be paired with fini_irq_for_idle_irqsoff.
+ */
+bool prep_irq_for_idle_irqsoff(void)
+{
+ WARN_ON(!irqs_disabled());
+
+ /*
+ * First we need to hard disable to ensure no interrupt
+ * occurs before we effectively enter the low power state
+ */
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+ /*
+ * If anything happened while we were soft-disabled,
+ * we return now and do not enter the low power state.
+ */
+ if (lazy_irq_pending())
+ return false;
+
+ /* Tell lockdep we are about to re-enable */
+ trace_hardirqs_on();
+
+ return true;
+}
+
+/*
+ * Take the SRR1 wakeup reason, index into this table to find the
+ * appropriate irq_happened bit.
+ */
+static const u8 srr1_to_lazyirq[0x10] = {
+ 0, 0, 0,
+ PACA_IRQ_DBELL,
+ 0,
+ PACA_IRQ_DBELL,
+ PACA_IRQ_DEC,
+ 0,
+ PACA_IRQ_EE,
+ PACA_IRQ_EE,
+ PACA_IRQ_HMI,
+ 0, 0, 0, 0, 0 };
+
+void irq_set_pending_from_srr1(unsigned long srr1)
+{
+ unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18;
+
+ /*
+ * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0,
+ * so this can be called unconditionally with srr1 wake reason.
+ */
+ local_paca->irq_happened |= srr1_to_lazyirq[idx];
+}
+#endif /* CONFIG_PPC_BOOK3S */
+
/*
* Force a replay of the external interrupt handler on this CPU.
*/
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 01addfb0ed0a..45f1ff721c32 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -164,17 +164,13 @@ NOKPROBE_SYMBOL(arch_prepare_kprobe);
void arch_arm_kprobe(struct kprobe *p)
{
- *p->addr = BREAKPOINT_INSTRUCTION;
- flush_icache_range((unsigned long) p->addr,
- (unsigned long) p->addr + sizeof(kprobe_opcode_t));
+ patch_instruction(p->addr, BREAKPOINT_INSTRUCTION);
}
NOKPROBE_SYMBOL(arch_arm_kprobe);
void arch_disarm_kprobe(struct kprobe *p)
{
- *p->addr = p->opcode;
- flush_icache_range((unsigned long) p->addr,
- (unsigned long) p->addr + sizeof(kprobe_opcode_t));
+ patch_instruction(p->addr, p->opcode);
}
NOKPROBE_SYMBOL(arch_disarm_kprobe);
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index 5f9eada3519b..e0e131e662ed 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -268,6 +268,7 @@ void machine_check_print_event_info(struct machine_check_event *evt,
static const char *mc_ra_types[] = {
"Indeterminate",
"Instruction fetch (bad)",
+ "Instruction fetch (foreign)",
"Page table walk ifetch (bad)",
"Page table walk ifetch (foreign)",
"Load (bad)",
@@ -405,6 +406,7 @@ void machine_check_print_event_info(struct machine_check_event *evt,
break;
}
}
+EXPORT_SYMBOL_GPL(machine_check_print_event_info);
uint64_t get_mce_fault_addr(struct machine_check_event *evt)
{
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index f913139bb0c2..d24e689e893f 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -236,6 +236,9 @@ static const struct mce_ierror_table mce_p9_ierror_table[] = {
{ 0x00000000081c0000, 0x0000000000180000, true,
MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH,
MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+{ 0x00000000081c0000, 0x00000000001c0000, true,
+ MCE_ERROR_TYPE_RA, MCE_RA_ERROR_IFETCH_FOREIGN,
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
{ 0x00000000081c0000, 0x0000000008000000, true,
MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_IFETCH_TIMEOUT,
MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 84db14e435f5..3f7a9a2d2435 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -244,8 +244,7 @@ _GLOBAL(_nmask_and_or_msr)
*/
_GLOBAL(real_readb)
mfmsr r7
- ori r0,r7,MSR_DR
- xori r0,r0,MSR_DR
+ rlwinm r0,r7,0,~MSR_DR
sync
mtmsr r0
sync
@@ -262,8 +261,7 @@ _GLOBAL(real_readb)
*/
_GLOBAL(real_writeb)
mfmsr r7
- ori r0,r7,MSR_DR
- xori r0,r0,MSR_DR
+ rlwinm r0,r7,0,~MSR_DR
sync
mtmsr r0
sync
diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c
index ec60ed0d4aad..6f8273f5e988 100644
--- a/arch/powerpc/kernel/optprobes.c
+++ b/arch/powerpc/kernel/optprobes.c
@@ -158,12 +158,13 @@ void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
{
/* addis r4,0,(insn)@h */
- *addr++ = PPC_INST_ADDIS | ___PPC_RT(4) |
- ((val >> 16) & 0xffff);
+ patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(4) |
+ ((val >> 16) & 0xffff));
+ addr++;
/* ori r4,r4,(insn)@l */
- *addr = PPC_INST_ORI | ___PPC_RA(4) | ___PPC_RS(4) |
- (val & 0xffff);
+ patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(4) |
+ ___PPC_RS(4) | (val & 0xffff));
}
/*
@@ -173,24 +174,28 @@ void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr)
{
/* lis r3,(op)@highest */
- *addr++ = PPC_INST_ADDIS | ___PPC_RT(3) |
- ((val >> 48) & 0xffff);
+ patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(3) |
+ ((val >> 48) & 0xffff));
+ addr++;
/* ori r3,r3,(op)@higher */
- *addr++ = PPC_INST_ORI | ___PPC_RA(3) | ___PPC_RS(3) |
- ((val >> 32) & 0xffff);
+ patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
+ ___PPC_RS(3) | ((val >> 32) & 0xffff));
+ addr++;
/* rldicr r3,r3,32,31 */
- *addr++ = PPC_INST_RLDICR | ___PPC_RA(3) | ___PPC_RS(3) |
- __PPC_SH64(32) | __PPC_ME64(31);
+ patch_instruction(addr, PPC_INST_RLDICR | ___PPC_RA(3) |
+ ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31));
+ addr++;
/* oris r3,r3,(op)@h */
- *addr++ = PPC_INST_ORIS | ___PPC_RA(3) | ___PPC_RS(3) |
- ((val >> 16) & 0xffff);
+ patch_instruction(addr, PPC_INST_ORIS | ___PPC_RA(3) |
+ ___PPC_RS(3) | ((val >> 16) & 0xffff));
+ addr++;
/* ori r3,r3,(op)@l */
- *addr = PPC_INST_ORI | ___PPC_RA(3) | ___PPC_RS(3) |
- (val & 0xffff);
+ patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
+ ___PPC_RS(3) | (val & 0xffff));
}
int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
@@ -198,7 +203,8 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
kprobe_opcode_t *buff, branch_op_callback, branch_emulate_step;
kprobe_opcode_t *op_callback_addr, *emulate_step_addr;
long b_offset;
- unsigned long nip;
+ unsigned long nip, size;
+ int rc, i;
kprobe_ppc_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
@@ -231,8 +237,14 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
goto error;
/* Setup template */
- memcpy(buff, optprobe_template_entry,
- TMPL_END_IDX * sizeof(kprobe_opcode_t));
+ /* We can optimize this via patch_instruction_window later */
+ size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
+ pr_devel("Copying template to %p, size %lu\n", buff, size);
+ for (i = 0; i < size; i++) {
+ rc = patch_instruction(buff + i, *(optprobe_template_entry + i));
+ if (rc < 0)
+ goto error;
+ }
/*
* Fixup the template with instructions to:
@@ -261,8 +273,8 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
if (!branch_op_callback || !branch_emulate_step)
goto error;
- buff[TMPL_CALL_HDLR_IDX] = branch_op_callback;
- buff[TMPL_EMULATE_IDX] = branch_emulate_step;
+ patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback);
+ patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step);
/*
* 3. load instruction to be emulated into relevant register, and
@@ -272,8 +284,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
/*
* 4. branch back from trampoline
*/
- buff[TMPL_RET_IDX] = create_branch((unsigned int *)buff + TMPL_RET_IDX,
- (unsigned long)nip, 0);
+ patch_branch(buff + TMPL_RET_IDX, (unsigned long)nip, 0);
flush_icache_range((unsigned long)buff,
(unsigned long)(&buff[TMPL_END_IDX]));
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 2ad725ef4368..9f3e2c932dcc 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -511,6 +511,10 @@ void restore_math(struct pt_regs *regs)
{
unsigned long msr;
+ /*
+ * Syscall exit makes a similar initial check before branching
+ * to restore_math. Keep them in synch.
+ */
if (!msr_tm_active(regs->msr) &&
!current->thread.load_fp && !loadvec(current->thread))
return;
@@ -1133,6 +1137,11 @@ static inline void restore_sprs(struct thread_struct *old_thread,
#endif
}
+#ifdef CONFIG_PPC_BOOK3S_64
+#define CP_SIZE 128
+static const u8 dummy_copy_buffer[CP_SIZE] __attribute__((aligned(CP_SIZE)));
+#endif
+
struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *new)
{
@@ -1195,12 +1204,14 @@ struct task_struct *__switch_to(struct task_struct *prev,
__switch_to_tm(prev, new);
- /*
- * We can't take a PMU exception inside _switch() since there is a
- * window where the kernel stack SLB and the kernel stack are out
- * of sync. Hard disable here.
- */
- hard_irq_disable();
+ if (!radix_enabled()) {
+ /*
+ * We can't take a PMU exception inside _switch() since there
+ * is a window where the kernel stack SLB and the kernel stack
+ * are out of sync. Hard disable here.
+ */
+ hard_irq_disable();
+ }
/*
* Call restore_sprs() before calling _switch(). If we move it after
@@ -1220,8 +1231,28 @@ struct task_struct *__switch_to(struct task_struct *prev,
batch->active = 1;
}
- if (current_thread_info()->task->thread.regs)
+ if (current_thread_info()->task->thread.regs) {
restore_math(current_thread_info()->task->thread.regs);
+
+ /*
+ * The copy-paste buffer can only store into foreign real
+ * addresses, so unprivileged processes can not see the
+ * data or use it in any way unless they have foreign real
+ * mappings. We don't have a VAS driver that allocates those
+ * yet, so no cpabort is required.
+ */
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
+ /*
+ * DD1 allows paste into normal system memory, so we
+ * do an unpaired copy here to clear the buffer and
+ * prevent a covert channel being set up.
+ *
+ * cpabort is not used because it is quite expensive.
+ */
+ asm volatile(PPC_COPY(%0, %1)
+ : : "r"(dummy_copy_buffer), "r"(0));
+ }
+ }
#endif /* CONFIG_PPC_STD_MMU_64 */
return last;
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 857129acf960..94a948207cd2 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -335,6 +335,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
maj = ((pvr >> 8) & 0xFF) - 1;
min = pvr & 0xFF;
break;
+ case 0x004e: /* POWER9 bits 12-15 give chip type */
+ maj = (pvr >> 8) & 0x0F;
+ min = pvr & 0xFF;
+ break;
default:
maj = (pvr >> 8) & 0xFF;
min = pvr & 0xFF;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 1069f74fca47..c6b8bace1766 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -33,6 +33,7 @@
#include <linux/notifier.h>
#include <linux/topology.h>
#include <linux/profile.h>
+#include <linux/processor.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
@@ -112,7 +113,8 @@ int smp_generic_cpu_bootable(unsigned int nr)
#ifdef CONFIG_PPC64
int smp_generic_kick_cpu(int nr)
{
- BUG_ON(nr < 0 || nr >= NR_CPUS);
+ if (nr < 0 || nr >= nr_cpu_ids)
+ return -EINVAL;
/*
* The processor is currently spinning, waiting for the
@@ -766,8 +768,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
smp_ops->give_timebase();
/* Wait until cpu puts itself in the online & active maps */
- while (!cpu_online(cpu))
- cpu_relax();
+ spin_until_cond(cpu_online(cpu));
return 0;
}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 2b33cfaac7b8..fe6f3a285455 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -59,10 +59,10 @@
#include <linux/suspend.h>
#include <linux/rtc.h>
#include <linux/sched/cputime.h>
+#include <linux/processor.h>
#include <asm/trace.h>
#include <asm/io.h>
-#include <asm/processor.h>
#include <asm/nvram.h>
#include <asm/cache.h>
#include <asm/machdep.h>
@@ -442,6 +442,7 @@ void __delay(unsigned long loops)
unsigned long start;
int diff;
+ spin_begin();
if (__USE_RTC()) {
start = get_rtcl();
do {
@@ -449,13 +450,14 @@ void __delay(unsigned long loops)
diff = get_rtcl() - start;
if (diff < 0)
diff += 1000000000;
+ spin_cpu_relax();
} while (diff < loops);
} else {
start = get_tbl();
while (get_tbl() - start < loops)
- HMT_low();
- HMT_medium();
+ spin_cpu_relax();
}
+ spin_end();
}
EXPORT_SYMBOL(__delay);
@@ -675,7 +677,7 @@ EXPORT_SYMBOL_GPL(tb_to_ns);
* the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
* are 64-bit unsigned numbers.
*/
-unsigned long long sched_clock(void)
+notrace unsigned long long sched_clock(void)
{
if (__USE_RTC())
return get_rtc();
@@ -739,12 +741,20 @@ static int __init get_freq(char *name, int cells, unsigned long *val)
static void start_cpu_decrementer(void)
{
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+ unsigned int tcr;
+
/* Clear any pending timer interrupts */
mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
- /* Enable decrementer interrupt */
- mtspr(SPRN_TCR, TCR_DIE);
-#endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */
+ tcr = mfspr(SPRN_TCR);
+ /*
+ * The watchdog may have already been enabled by u-boot. So leave
+ * TRC[WP] (Watchdog Period) alone.
+ */
+ tcr &= TCR_WP_MASK; /* Clear all bits except for TCR[WP] */
+ tcr |= TCR_DIE; /* Enable decrementer */
+ mtspr(SPRN_TCR, tcr);
+#endif
}
void __init generic_calibrate_decr(void)
@@ -823,38 +833,76 @@ void read_persistent_clock(struct timespec *ts)
}
/* clocksource code */
-static u64 rtc_read(struct clocksource *cs)
+static notrace u64 rtc_read(struct clocksource *cs)
{
return (u64)get_rtc();
}
-static u64 timebase_read(struct clocksource *cs)
+static notrace u64 timebase_read(struct clocksource *cs)
{
return (u64)get_tb();
}
-void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
- struct clocksource *clock, u32 mult, u64 cycle_last)
+
+void update_vsyscall(struct timekeeper *tk)
{
+ struct timespec xt;
+ struct clocksource *clock = tk->tkr_mono.clock;
+ u32 mult = tk->tkr_mono.mult;
+ u32 shift = tk->tkr_mono.shift;
+ u64 cycle_last = tk->tkr_mono.cycle_last;
u64 new_tb_to_xs, new_stamp_xsec;
- u32 frac_sec;
+ u64 frac_sec;
if (clock != &clocksource_timebase)
return;
+ xt.tv_sec = tk->xtime_sec;
+ xt.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
+
/* Make userspace gettimeofday spin until we're done. */
++vdso_data->tb_update_count;
smp_mb();
- /* 19342813113834067 ~= 2^(20+64) / 1e9 */
- new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift);
- new_stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC;
- do_div(new_stamp_xsec, 1000000000);
- new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC;
+ /*
+ * This computes ((2^20 / 1e9) * mult) >> shift as a
+ * 0.64 fixed-point fraction.
+ * The computation in the else clause below won't overflow
+ * (as long as the timebase frequency is >= 1.049 MHz)
+ * but loses precision because we lose the low bits of the constant
+ * in the shift. Note that 19342813113834067 ~= 2^(20+64) / 1e9.
+ * For a shift of 24 the error is about 0.5e-9, or about 0.5ns
+ * over a second. (Shift values are usually 22, 23 or 24.)
+ * For high frequency clocks such as the 512MHz timebase clock
+ * on POWER[6789], the mult value is small (e.g. 32768000)
+ * and so we can shift the constant by 16 initially
+ * (295147905179 ~= 2^(20+64-16) / 1e9) and then do the
+ * remaining shifts after the multiplication, which gives a
+ * more accurate result (e.g. with mult = 32768000, shift = 24,
+ * the error is only about 1.2e-12, or 0.7ns over 10 minutes).
+ */
+ if (mult <= 62500000 && clock->shift >= 16)
+ new_tb_to_xs = ((u64) mult * 295147905179ULL) >> (clock->shift - 16);
+ else
+ new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift);
+
+ /*
+ * Compute the fractional second in units of 2^-32 seconds.
+ * The fractional second is tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift
+ * in nanoseconds, so multiplying that by 2^32 / 1e9 gives
+ * it in units of 2^-32 seconds.
+ * We assume shift <= 32 because clocks_calc_mult_shift()
+ * generates shift values in the range 0 - 32.
+ */
+ frac_sec = tk->tkr_mono.xtime_nsec << (32 - shift);
+ do_div(frac_sec, NSEC_PER_SEC);
- BUG_ON(wall_time->tv_nsec >= NSEC_PER_SEC);
- /* this is tv_nsec / 1e9 as a 0.32 fraction */
- frac_sec = ((u64) wall_time->tv_nsec * 18446744073ULL) >> 32;
+ /*
+ * Work out new stamp_xsec value for any legacy users of systemcfg.
+ * stamp_xsec is in units of 2^-20 seconds.
+ */
+ new_stamp_xsec = frac_sec >> 12;
+ new_stamp_xsec += tk->xtime_sec * XSEC_PER_SEC;
/*
* tb_update_count is used to allow the userspace gettimeofday code
@@ -864,15 +912,13 @@ void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
* the two values of tb_update_count match and are even then the
* tb_to_xs and stamp_xsec values are consistent. If not, then it
* loops back and reads them again until this criteria is met.
- * We expect the caller to have done the first increment of
- * vdso_data->tb_update_count already.
*/
vdso_data->tb_orig_stamp = cycle_last;
vdso_data->stamp_xsec = new_stamp_xsec;
vdso_data->tb_to_xs = new_tb_to_xs;
- vdso_data->wtom_clock_sec = wtm->tv_sec;
- vdso_data->wtom_clock_nsec = wtm->tv_nsec;
- vdso_data->stamp_xtime = *wall_time;
+ vdso_data->wtom_clock_sec = tk->wall_to_monotonic.tv_sec;
+ vdso_data->wtom_clock_nsec = tk->wall_to_monotonic.tv_nsec;
+ vdso_data->stamp_xtime = xt;
vdso_data->stamp_sec_fraction = frac_sec;
smp_wmb();
++(vdso_data->tb_update_count);
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 3a2d04134da9..c4ba37822ba0 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -313,8 +313,8 @@ dont_backup_fp:
blr
- /* void tm_recheckpoint(struct thread_struct *thread,
- * unsigned long orig_msr)
+ /* void __tm_recheckpoint(struct thread_struct *thread,
+ * unsigned long orig_msr)
* - Restore the checkpointed register state saved by tm_reclaim
* when we switch_to a process.
*
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index d4e545d27ef9..bfcfd9ef09f2 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -237,6 +237,7 @@ void die(const char *str, struct pt_regs *regs, long err)
err = 0;
oops_end(flags, regs, err);
}
+NOKPROBE_SYMBOL(die);
void user_single_step_siginfo(struct task_struct *tsk,
struct pt_regs *regs, siginfo_t *info)
@@ -1968,6 +1969,7 @@ void unrecoverable_exception(struct pt_regs *regs)
regs->trap, regs->nip);
die("Unrecoverable exception", regs, SIGABRT);
}
+NOKPROBE_SYMBOL(unrecoverable_exception);
#if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
/*
@@ -1998,6 +2000,7 @@ void kernel_bad_stack(struct pt_regs *regs)
regs->gpr[1], regs->nip);
die("Bad kernel stack pointer", regs, SIGABRT);
}
+NOKPROBE_SYMBOL(kernel_bad_stack);
void __init trap_init(void)
{
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 2f793be3d2b1..b1a250560198 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -8,6 +8,12 @@
#include <asm/cache.h>
#include <asm/thread_info.h>
+#ifdef CONFIG_STRICT_KERNEL_RWX
+#define STRICT_ALIGN_SIZE (1 << 24)
+#else
+#define STRICT_ALIGN_SIZE PAGE_SIZE
+#endif
+
ENTRY(_stext)
PHDRS {
@@ -58,7 +64,6 @@ SECTIONS
#ifdef CONFIG_PPC64
KEEP(*(.head.text.first_256B));
#ifdef CONFIG_PPC_BOOK3E
-# define END_FIXED 0x100
#else
KEEP(*(.head.text.real_vectors));
*(.head.text.real_trampolines);
@@ -66,12 +71,8 @@ SECTIONS
*(.head.text.virt_trampolines);
# if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
KEEP(*(.head.data.fwnmi_page));
-# define END_FIXED 0x8000
-# else
-# define END_FIXED 0x7000
# endif
#endif
- ASSERT((. == END_FIXED), "vmlinux.lds.S: fixed section overflow error");
#else /* !CONFIG_PPC64 */
HEAD_TEXT
#endif
@@ -79,23 +80,6 @@ SECTIONS
__head_end = .;
- /*
- * If the build dies here, it's likely code in head_64.S is referencing
- * labels it can't reach, and the linker inserting stubs without the
- * assembler's knowledge. To debug, remove the above assert and
- * rebuild. Look for branch stubs in the fixed section region.
- *
- * Linker stub generation could be allowed in "trampoline"
- * sections if absolutely necessary, but this would require
- * some rework of the fixed sections. Before resorting to this,
- * consider references that have sufficient addressing range,
- * (e.g., hand coded trampolines) so the linker does not have
- * to add stubs.
- *
- * Linker stubs at the top of the main text section are currently not
- * detected, and will result in a crash at boot due to offsets being
- * wrong.
- */
#ifdef CONFIG_PPC64
/*
* BLOCK(0) overrides the default output section alignment because
@@ -103,18 +87,31 @@ SECTIONS
* section placement to work.
*/
.text BLOCK(0) : AT(ADDR(.text) - LOAD_OFFSET) {
+#ifdef CONFIG_LD_HEAD_STUB_CATCH
+ *(.linker_stub_catch);
+ . = . ;
+#endif
+
#else
.text : AT(ADDR(.text) - LOAD_OFFSET) {
ALIGN_FUNCTION();
#endif
/* careful! __ftr_alt_* sections need to be close to .text */
- *(.text .fixup __ftr_alt_* .ref.text)
+ *(.text.hot .text .text.fixup .text.unlikely .fixup __ftr_alt_* .ref.text);
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
+ /*
+ * -Os builds call FP save/restore functions. The powerpc64
+ * linker generates those on demand in the .sfpr section.
+ * .sfpr gets placed at the beginning of a group of input
+ * sections, which can break start-of-text offset if it is
+ * included with the main text sections, so put it by itself.
+ */
+ *(.sfpr);
MEM_KEEP(init.text)
MEM_KEEP(exit.text)
@@ -132,7 +129,7 @@ SECTIONS
PROVIDE32 (etext = .);
/* Read-only data */
- RODATA
+ RO_DATA(PAGE_SIZE)
EXCEPTION_TABLE(0)
@@ -149,7 +146,7 @@ SECTIONS
/*
* Init sections discarded at runtime
*/
- . = ALIGN(PAGE_SIZE);
+ . = ALIGN(STRICT_ALIGN_SIZE);
__init_begin = .;
INIT_TEXT_SECTION(PAGE_SIZE) :kernel
@@ -267,7 +264,9 @@ SECTIONS
.data : AT(ADDR(.data) - LOAD_OFFSET) {
DATA_DATA
*(.sdata)
+ *(.sdata2)
*(.got.plt) *(.got)
+ *(.plt)
}
#else
.data : AT(ADDR(.data) - LOAD_OFFSET) {
@@ -330,6 +329,16 @@ SECTIONS
_end = . ;
PROVIDE32 (end = .);
- /* Sections to be discarded. */
+ STABS_DEBUG
+
+ DWARF_DEBUG
+
DISCARDS
+ /DISCARD/ : {
+ *(*.EMB.apuinfo)
+ *(.glink .iplt .plt .rela* .comment)
+ *(.gnu.version*)
+ *(.gnu.attributes)
+ *(.eh_frame)
+ }
}
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 773b35d16a0b..0b436df746fc 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -46,6 +46,8 @@
#include <linux/of.h>
#include <asm/reg.h>
+#include <asm/ppc-opcode.h>
+#include <asm/disassemble.h>
#include <asm/cputable.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
@@ -645,6 +647,7 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
unsigned long stolen;
unsigned long core_stolen;
u64 now;
+ unsigned long flags;
dt = vcpu->arch.dtl_ptr;
vpa = vcpu->arch.vpa.pinned_addr;
@@ -652,10 +655,10 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
core_stolen = vcore_stolen_time(vc, now);
stolen = core_stolen - vcpu->arch.stolen_logged;
vcpu->arch.stolen_logged = core_stolen;
- spin_lock_irq(&vcpu->arch.tbacct_lock);
+ spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
stolen += vcpu->arch.busy_stolen;
vcpu->arch.busy_stolen = 0;
- spin_unlock_irq(&vcpu->arch.tbacct_lock);
+ spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
if (!dt || !vpa)
return;
memset(dt, 0, sizeof(struct dtl_entry));
@@ -675,6 +678,26 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
vcpu->arch.dtl.dirty = true;
}
+/* See if there is a doorbell interrupt pending for a vcpu */
+static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu)
+{
+ int thr;
+ struct kvmppc_vcore *vc;
+
+ if (vcpu->arch.doorbell_request)
+ return true;
+ /*
+ * Ensure that the read of vcore->dpdes comes after the read
+ * of vcpu->doorbell_request. This barrier matches the
+ * lwsync in book3s_hv_rmhandlers.S just before the
+ * fast_guest_return label.
+ */
+ smp_rmb();
+ vc = vcpu->arch.vcore;
+ thr = vcpu->vcpu_id - vc->first_vcpuid;
+ return !!(vc->dpdes & (1 << thr));
+}
+
static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207)
@@ -926,6 +949,101 @@ static int kvmppc_emulate_debug_inst(struct kvm_run *run,
}
}
+static void do_nothing(void *x)
+{
+}
+
+static unsigned long kvmppc_read_dpdes(struct kvm_vcpu *vcpu)
+{
+ int thr, cpu, pcpu, nthreads;
+ struct kvm_vcpu *v;
+ unsigned long dpdes;
+
+ nthreads = vcpu->kvm->arch.emul_smt_mode;
+ dpdes = 0;
+ cpu = vcpu->vcpu_id & ~(nthreads - 1);
+ for (thr = 0; thr < nthreads; ++thr, ++cpu) {
+ v = kvmppc_find_vcpu(vcpu->kvm, cpu);
+ if (!v)
+ continue;
+ /*
+ * If the vcpu is currently running on a physical cpu thread,
+ * interrupt it in order to pull it out of the guest briefly,
+ * which will update its vcore->dpdes value.
+ */
+ pcpu = READ_ONCE(v->cpu);
+ if (pcpu >= 0)
+ smp_call_function_single(pcpu, do_nothing, NULL, 1);
+ if (kvmppc_doorbell_pending(v))
+ dpdes |= 1 << thr;
+ }
+ return dpdes;
+}
+
+/*
+ * On POWER9, emulate doorbell-related instructions in order to
+ * give the guest the illusion of running on a multi-threaded core.
+ * The instructions emulated are msgsndp, msgclrp, mfspr TIR,
+ * and mfspr DPDES.
+ */
+static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
+{
+ u32 inst, rb, thr;
+ unsigned long arg;
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_vcpu *tvcpu;
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ return EMULATE_FAIL;
+ if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE)
+ return RESUME_GUEST;
+ if (get_op(inst) != 31)
+ return EMULATE_FAIL;
+ rb = get_rb(inst);
+ thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1);
+ switch (get_xop(inst)) {
+ case OP_31_XOP_MSGSNDP:
+ arg = kvmppc_get_gpr(vcpu, rb);
+ if (((arg >> 27) & 0xf) != PPC_DBELL_SERVER)
+ break;
+ arg &= 0x3f;
+ if (arg >= kvm->arch.emul_smt_mode)
+ break;
+ tvcpu = kvmppc_find_vcpu(kvm, vcpu->vcpu_id - thr + arg);
+ if (!tvcpu)
+ break;
+ if (!tvcpu->arch.doorbell_request) {
+ tvcpu->arch.doorbell_request = 1;
+ kvmppc_fast_vcpu_kick_hv(tvcpu);
+ }
+ break;
+ case OP_31_XOP_MSGCLRP:
+ arg = kvmppc_get_gpr(vcpu, rb);
+ if (((arg >> 27) & 0xf) != PPC_DBELL_SERVER)
+ break;
+ vcpu->arch.vcore->dpdes = 0;
+ vcpu->arch.doorbell_request = 0;
+ break;
+ case OP_31_XOP_MFSPR:
+ switch (get_sprn(inst)) {
+ case SPRN_TIR:
+ arg = thr;
+ break;
+ case SPRN_DPDES:
+ arg = kvmppc_read_dpdes(vcpu);
+ break;
+ default:
+ return EMULATE_FAIL;
+ }
+ kvmppc_set_gpr(vcpu, get_rt(inst), arg);
+ break;
+ default:
+ return EMULATE_FAIL;
+ }
+ kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
+ return RESUME_GUEST;
+}
+
static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
struct task_struct *tsk)
{
@@ -971,15 +1089,20 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = RESUME_GUEST;
break;
case BOOK3S_INTERRUPT_MACHINE_CHECK:
- /*
- * Deliver a machine check interrupt to the guest.
- * We have to do this, even if the host has handled the
- * machine check, because machine checks use SRR0/1 and
- * the interrupt might have trashed guest state in them.
- */
- kvmppc_book3s_queue_irqprio(vcpu,
- BOOK3S_INTERRUPT_MACHINE_CHECK);
- r = RESUME_GUEST;
+ /* Exit to guest with KVM_EXIT_NMI as exit reason */
+ run->exit_reason = KVM_EXIT_NMI;
+ run->hw.hardware_exit_reason = vcpu->arch.trap;
+ /* Clear out the old NMI status from run->flags */
+ run->flags &= ~KVM_RUN_PPC_NMI_DISP_MASK;
+ /* Now set the NMI status */
+ if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED)
+ run->flags |= KVM_RUN_PPC_NMI_DISP_FULLY_RECOV;
+ else
+ run->flags |= KVM_RUN_PPC_NMI_DISP_NOT_RECOV;
+
+ r = RESUME_HOST;
+ /* Print the MCE event to host console. */
+ machine_check_print_event_info(&vcpu->arch.mce_evt, false);
break;
case BOOK3S_INTERRUPT_PROGRAM:
{
@@ -1048,12 +1171,19 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
/*
* This occurs if the guest (kernel or userspace), does something that
- * is prohibited by HFSCR. We just generate a program interrupt to
- * the guest.
+ * is prohibited by HFSCR.
+ * On POWER9, this could be a doorbell instruction that we need
+ * to emulate.
+ * Otherwise, we just generate a program interrupt to the guest.
*/
case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
- kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
- r = RESUME_GUEST;
+ r = EMULATE_FAIL;
+ if ((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG)
+ r = kvmppc_emulate_doorbell_instr(vcpu);
+ if (r == EMULATE_FAIL) {
+ kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
+ r = RESUME_GUEST;
+ }
break;
case BOOK3S_INTERRUPT_HV_RM_HARD:
r = RESUME_PASSTHROUGH;
@@ -1143,6 +1273,12 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
if (cpu_has_feature(CPU_FTR_ARCH_207S))
mask |= LPCR_AIL;
+ /*
+ * On POWER9, allow userspace to enable large decrementer for the
+ * guest, whether or not the host has it enabled.
+ */
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ mask |= LPCR_LD;
/* Broken 32-bit version of LPCR must not clear top bits */
if (preserve_top32)
@@ -1611,7 +1747,7 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
init_swait_queue_head(&vcore->wq);
vcore->preempt_tb = TB_NIL;
vcore->lpcr = kvm->arch.lpcr;
- vcore->first_vcpuid = core * threads_per_vcore();
+ vcore->first_vcpuid = core * kvm->arch.smt_mode;
vcore->kvm = kvm;
INIT_LIST_HEAD(&vcore->preempt_list);
@@ -1770,14 +1906,10 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
unsigned int id)
{
struct kvm_vcpu *vcpu;
- int err = -EINVAL;
+ int err;
int core;
struct kvmppc_vcore *vcore;
- core = id / threads_per_vcore();
- if (core >= KVM_MAX_VCORES)
- goto out;
-
err = -ENOMEM;
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
if (!vcpu)
@@ -1808,6 +1940,20 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
vcpu->arch.busy_preempt = TB_NIL;
vcpu->arch.intr_msr = MSR_SF | MSR_ME;
+ /*
+ * Set the default HFSCR for the guest from the host value.
+ * This value is only used on POWER9.
+ * On POWER9 DD1, TM doesn't work, so we make sure to
+ * prevent the guest from using it.
+ * On POWER9, we want to virtualize the doorbell facility, so we
+ * turn off the HFSCR bit, which causes those instructions to trap.
+ */
+ vcpu->arch.hfscr = mfspr(SPRN_HFSCR);
+ if (!cpu_has_feature(CPU_FTR_TM))
+ vcpu->arch.hfscr &= ~HFSCR_TM;
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ vcpu->arch.hfscr &= ~HFSCR_MSGP;
+
kvmppc_mmu_book3s_hv_init(vcpu);
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
@@ -1815,11 +1961,17 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
init_waitqueue_head(&vcpu->arch.cpu_run);
mutex_lock(&kvm->lock);
- vcore = kvm->arch.vcores[core];
- if (!vcore) {
- vcore = kvmppc_vcore_create(kvm, core);
- kvm->arch.vcores[core] = vcore;
- kvm->arch.online_vcores++;
+ vcore = NULL;
+ err = -EINVAL;
+ core = id / kvm->arch.smt_mode;
+ if (core < KVM_MAX_VCORES) {
+ vcore = kvm->arch.vcores[core];
+ if (!vcore) {
+ err = -ENOMEM;
+ vcore = kvmppc_vcore_create(kvm, core);
+ kvm->arch.vcores[core] = vcore;
+ kvm->arch.online_vcores++;
+ }
}
mutex_unlock(&kvm->lock);
@@ -1847,6 +1999,43 @@ out:
return ERR_PTR(err);
}
+static int kvmhv_set_smt_mode(struct kvm *kvm, unsigned long smt_mode,
+ unsigned long flags)
+{
+ int err;
+ int esmt = 0;
+
+ if (flags)
+ return -EINVAL;
+ if (smt_mode > MAX_SMT_THREADS || !is_power_of_2(smt_mode))
+ return -EINVAL;
+ if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /*
+ * On POWER8 (or POWER7), the threading mode is "strict",
+ * so we pack smt_mode vcpus per vcore.
+ */
+ if (smt_mode > threads_per_subcore)
+ return -EINVAL;
+ } else {
+ /*
+ * On POWER9, the threading mode is "loose",
+ * so each vcpu gets its own vcore.
+ */
+ esmt = smt_mode;
+ smt_mode = 1;
+ }
+ mutex_lock(&kvm->lock);
+ err = -EBUSY;
+ if (!kvm->arch.online_vcores) {
+ kvm->arch.smt_mode = smt_mode;
+ kvm->arch.emul_smt_mode = esmt;
+ err = 0;
+ }
+ mutex_unlock(&kvm->lock);
+
+ return err;
+}
+
static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
{
if (vpa->pinned_addr)
@@ -1897,7 +2086,7 @@ static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
}
}
-extern void __kvmppc_vcore_entry(void);
+extern int __kvmppc_vcore_entry(void);
static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
struct kvm_vcpu *vcpu)
@@ -1962,10 +2151,6 @@ static void kvmppc_release_hwthread(int cpu)
tpaca->kvm_hstate.kvm_split_mode = NULL;
}
-static void do_nothing(void *x)
-{
-}
-
static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
{
int i;
@@ -1983,11 +2168,35 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
smp_call_function_single(cpu + i, do_nothing, NULL, 1);
}
+static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+
+ /*
+ * With radix, the guest can do TLB invalidations itself,
+ * and it could choose to use the local form (tlbiel) if
+ * it is invalidating a translation that has only ever been
+ * used on one vcpu. However, that doesn't mean it has
+ * only ever been used on one physical cpu, since vcpus
+ * can move around between pcpus. To cope with this, when
+ * a vcpu moves from one pcpu to another, we need to tell
+ * any vcpus running on the same core as this vcpu previously
+ * ran to flush the TLB. The TLB is shared between threads,
+ * so we use a single bit in .need_tlb_flush for all 4 threads.
+ */
+ if (vcpu->arch.prev_cpu != pcpu) {
+ if (vcpu->arch.prev_cpu >= 0 &&
+ cpu_first_thread_sibling(vcpu->arch.prev_cpu) !=
+ cpu_first_thread_sibling(pcpu))
+ radix_flush_cpu(kvm, vcpu->arch.prev_cpu, vcpu);
+ vcpu->arch.prev_cpu = pcpu;
+ }
+}
+
static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
{
int cpu;
struct paca_struct *tpaca;
- struct kvmppc_vcore *mvc = vc->master_vcore;
struct kvm *kvm = vc->kvm;
cpu = vc->pcpu;
@@ -1997,36 +2206,16 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
vcpu->arch.timer_running = 0;
}
cpu += vcpu->arch.ptid;
- vcpu->cpu = mvc->pcpu;
+ vcpu->cpu = vc->pcpu;
vcpu->arch.thread_cpu = cpu;
-
- /*
- * With radix, the guest can do TLB invalidations itself,
- * and it could choose to use the local form (tlbiel) if
- * it is invalidating a translation that has only ever been
- * used on one vcpu. However, that doesn't mean it has
- * only ever been used on one physical cpu, since vcpus
- * can move around between pcpus. To cope with this, when
- * a vcpu moves from one pcpu to another, we need to tell
- * any vcpus running on the same core as this vcpu previously
- * ran to flush the TLB. The TLB is shared between threads,
- * so we use a single bit in .need_tlb_flush for all 4 threads.
- */
- if (kvm_is_radix(kvm) && vcpu->arch.prev_cpu != cpu) {
- if (vcpu->arch.prev_cpu >= 0 &&
- cpu_first_thread_sibling(vcpu->arch.prev_cpu) !=
- cpu_first_thread_sibling(cpu))
- radix_flush_cpu(kvm, vcpu->arch.prev_cpu, vcpu);
- vcpu->arch.prev_cpu = cpu;
- }
cpumask_set_cpu(cpu, &kvm->arch.cpu_in_guest);
}
tpaca = &paca[cpu];
tpaca->kvm_hstate.kvm_vcpu = vcpu;
- tpaca->kvm_hstate.ptid = cpu - mvc->pcpu;
+ tpaca->kvm_hstate.ptid = cpu - vc->pcpu;
/* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */
smp_wmb();
- tpaca->kvm_hstate.kvm_vcore = mvc;
+ tpaca->kvm_hstate.kvm_vcore = vc;
if (cpu != smp_processor_id())
kvmppc_ipi_thread(cpu);
}
@@ -2155,8 +2344,7 @@ struct core_info {
int max_subcore_threads;
int total_threads;
int subcore_threads[MAX_SUBCORES];
- struct kvm *subcore_vm[MAX_SUBCORES];
- struct list_head vcs[MAX_SUBCORES];
+ struct kvmppc_vcore *vc[MAX_SUBCORES];
};
/*
@@ -2167,17 +2355,12 @@ static int subcore_thread_map[MAX_SUBCORES] = { 0, 4, 2, 6 };
static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc)
{
- int sub;
-
memset(cip, 0, sizeof(*cip));
cip->n_subcores = 1;
cip->max_subcore_threads = vc->num_threads;
cip->total_threads = vc->num_threads;
cip->subcore_threads[0] = vc->num_threads;
- cip->subcore_vm[0] = vc->kvm;
- for (sub = 0; sub < MAX_SUBCORES; ++sub)
- INIT_LIST_HEAD(&cip->vcs[sub]);
- list_add_tail(&vc->preempt_list, &cip->vcs[0]);
+ cip->vc[0] = vc;
}
static bool subcore_config_ok(int n_subcores, int n_threads)
@@ -2197,9 +2380,8 @@ static bool subcore_config_ok(int n_subcores, int n_threads)
return n_subcores * roundup_pow_of_two(n_threads) <= MAX_SMT_THREADS;
}
-static void init_master_vcore(struct kvmppc_vcore *vc)
+static void init_vcore_to_run(struct kvmppc_vcore *vc)
{
- vc->master_vcore = vc;
vc->entry_exit_map = 0;
vc->in_guest = 0;
vc->napping_threads = 0;
@@ -2224,9 +2406,9 @@ static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
++cip->n_subcores;
cip->total_threads += vc->num_threads;
cip->subcore_threads[sub] = vc->num_threads;
- cip->subcore_vm[sub] = vc->kvm;
- init_master_vcore(vc);
- list_move_tail(&vc->preempt_list, &cip->vcs[sub]);
+ cip->vc[sub] = vc;
+ init_vcore_to_run(vc);
+ list_del_init(&vc->preempt_list);
return true;
}
@@ -2294,6 +2476,18 @@ static void collect_piggybacks(struct core_info *cip, int target_threads)
spin_unlock(&lp->lock);
}
+static bool recheck_signals(struct core_info *cip)
+{
+ int sub, i;
+ struct kvm_vcpu *vcpu;
+
+ for (sub = 0; sub < cip->n_subcores; ++sub)
+ for_each_runnable_thread(i, vcpu, cip->vc[sub])
+ if (signal_pending(vcpu->arch.run_task))
+ return true;
+ return false;
+}
+
static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
{
int still_running = 0, i;
@@ -2331,7 +2525,6 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
wake_up(&vcpu->arch.cpu_run);
}
}
- list_del_init(&vc->preempt_list);
if (!is_master) {
if (still_running > 0) {
kvmppc_vcore_preempt(vc);
@@ -2393,6 +2586,21 @@ static inline int kvmppc_set_host_core(unsigned int cpu)
return 0;
}
+static void set_irq_happened(int trap)
+{
+ switch (trap) {
+ case BOOK3S_INTERRUPT_EXTERNAL:
+ local_paca->irq_happened |= PACA_IRQ_EE;
+ break;
+ case BOOK3S_INTERRUPT_H_DOORBELL:
+ local_paca->irq_happened |= PACA_IRQ_DBELL;
+ break;
+ case BOOK3S_INTERRUPT_HMI:
+ local_paca->irq_happened |= PACA_IRQ_HMI;
+ break;
+ }
+}
+
/*
* Run a set of guest threads on a physical core.
* Called with vc->lock held.
@@ -2403,7 +2611,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
int i;
int srcu_idx;
struct core_info core_info;
- struct kvmppc_vcore *pvc, *vcnext;
+ struct kvmppc_vcore *pvc;
struct kvm_split_mode split_info, *sip;
int split, subcore_size, active;
int sub;
@@ -2412,6 +2620,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
int pcpu, thr;
int target_threads;
int controlled_threads;
+ int trap;
/*
* Remove from the list any threads that have a signal pending
@@ -2426,7 +2635,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
/*
* Initialize *vc.
*/
- init_master_vcore(vc);
+ init_vcore_to_run(vc);
vc->preempt_tb = TB_NIL;
/*
@@ -2463,6 +2672,43 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
if (vc->num_threads < target_threads)
collect_piggybacks(&core_info, target_threads);
+ /*
+ * On radix, arrange for TLB flushing if necessary.
+ * This has to be done before disabling interrupts since
+ * it uses smp_call_function().
+ */
+ pcpu = smp_processor_id();
+ if (kvm_is_radix(vc->kvm)) {
+ for (sub = 0; sub < core_info.n_subcores; ++sub)
+ for_each_runnable_thread(i, vcpu, core_info.vc[sub])
+ kvmppc_prepare_radix_vcpu(vcpu, pcpu);
+ }
+
+ /*
+ * Hard-disable interrupts, and check resched flag and signals.
+ * If we need to reschedule or deliver a signal, clean up
+ * and return without going into the guest(s).
+ */
+ local_irq_disable();
+ hard_irq_disable();
+ if (lazy_irq_pending() || need_resched() ||
+ recheck_signals(&core_info)) {
+ local_irq_enable();
+ vc->vcore_state = VCORE_INACTIVE;
+ /* Unlock all except the primary vcore */
+ for (sub = 1; sub < core_info.n_subcores; ++sub) {
+ pvc = core_info.vc[sub];
+ /* Put back on to the preempted vcores list */
+ kvmppc_vcore_preempt(pvc);
+ spin_unlock(&pvc->lock);
+ }
+ for (i = 0; i < controlled_threads; ++i)
+ kvmppc_release_hwthread(pcpu + i);
+ return;
+ }
+
+ kvmppc_clear_host_core(pcpu);
+
/* Decide on micro-threading (split-core) mode */
subcore_size = threads_per_subcore;
cmd_bit = stat_bit = 0;
@@ -2486,13 +2732,10 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
split_info.ldbar = mfspr(SPRN_LDBAR);
split_info.subcore_size = subcore_size;
for (sub = 0; sub < core_info.n_subcores; ++sub)
- split_info.master_vcs[sub] =
- list_first_entry(&core_info.vcs[sub],
- struct kvmppc_vcore, preempt_list);
+ split_info.vc[sub] = core_info.vc[sub];
/* order writes to split_info before kvm_split_mode pointer */
smp_wmb();
}
- pcpu = smp_processor_id();
for (thr = 0; thr < controlled_threads; ++thr)
paca[pcpu + thr].kvm_hstate.kvm_split_mode = sip;
@@ -2512,32 +2755,29 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
}
}
- kvmppc_clear_host_core(pcpu);
-
/* Start all the threads */
active = 0;
for (sub = 0; sub < core_info.n_subcores; ++sub) {
thr = subcore_thread_map[sub];
thr0_done = false;
active |= 1 << thr;
- list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list) {
- pvc->pcpu = pcpu + thr;
- for_each_runnable_thread(i, vcpu, pvc) {
- kvmppc_start_thread(vcpu, pvc);
- kvmppc_create_dtl_entry(vcpu, pvc);
- trace_kvm_guest_enter(vcpu);
- if (!vcpu->arch.ptid)
- thr0_done = true;
- active |= 1 << (thr + vcpu->arch.ptid);
- }
- /*
- * We need to start the first thread of each subcore
- * even if it doesn't have a vcpu.
- */
- if (pvc->master_vcore == pvc && !thr0_done)
- kvmppc_start_thread(NULL, pvc);
- thr += pvc->num_threads;
+ pvc = core_info.vc[sub];
+ pvc->pcpu = pcpu + thr;
+ for_each_runnable_thread(i, vcpu, pvc) {
+ kvmppc_start_thread(vcpu, pvc);
+ kvmppc_create_dtl_entry(vcpu, pvc);
+ trace_kvm_guest_enter(vcpu);
+ if (!vcpu->arch.ptid)
+ thr0_done = true;
+ active |= 1 << (thr + vcpu->arch.ptid);
}
+ /*
+ * We need to start the first thread of each subcore
+ * even if it doesn't have a vcpu.
+ */
+ if (!thr0_done)
+ kvmppc_start_thread(NULL, pvc);
+ thr += pvc->num_threads;
}
/*
@@ -2564,17 +2804,27 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
trace_kvmppc_run_core(vc, 0);
for (sub = 0; sub < core_info.n_subcores; ++sub)
- list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list)
- spin_unlock(&pvc->lock);
+ spin_unlock(&core_info.vc[sub]->lock);
+
+ /*
+ * Interrupts will be enabled once we get into the guest,
+ * so tell lockdep that we're about to enable interrupts.
+ */
+ trace_hardirqs_on();
guest_enter();
srcu_idx = srcu_read_lock(&vc->kvm->srcu);
- __kvmppc_vcore_entry();
+ trap = __kvmppc_vcore_entry();
srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
+ guest_exit();
+
+ trace_hardirqs_off();
+ set_irq_happened(trap);
+
spin_lock(&vc->lock);
/* prevent other vcpu threads from doing kvmppc_start_thread() now */
vc->vcore_state = VCORE_EXITING;
@@ -2602,6 +2852,10 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
split_info.do_nap = 0;
}
+ kvmppc_set_host_core(pcpu);
+
+ local_irq_enable();
+
/* Let secondaries go back to the offline loop */
for (i = 0; i < controlled_threads; ++i) {
kvmppc_release_hwthread(pcpu + i);
@@ -2610,18 +2864,15 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
cpumask_clear_cpu(pcpu + i, &vc->kvm->arch.cpu_in_guest);
}
- kvmppc_set_host_core(pcpu);
-
spin_unlock(&vc->lock);
/* make sure updates to secondary vcpu structs are visible now */
smp_mb();
- guest_exit();
- for (sub = 0; sub < core_info.n_subcores; ++sub)
- list_for_each_entry_safe(pvc, vcnext, &core_info.vcs[sub],
- preempt_list)
- post_guest_process(pvc, pvc == vc);
+ for (sub = 0; sub < core_info.n_subcores; ++sub) {
+ pvc = core_info.vc[sub];
+ post_guest_process(pvc, pvc == vc);
+ }
spin_lock(&vc->lock);
preempt_enable();
@@ -2666,6 +2917,30 @@ static void shrink_halt_poll_ns(struct kvmppc_vcore *vc)
vc->halt_poll_ns /= halt_poll_ns_shrink;
}
+#ifdef CONFIG_KVM_XICS
+static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu)
+{
+ if (!xive_enabled())
+ return false;
+ return vcpu->arch.xive_saved_state.pipr <
+ vcpu->arch.xive_saved_state.cppr;
+}
+#else
+static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+#endif /* CONFIG_KVM_XICS */
+
+static bool kvmppc_vcpu_woken(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.pending_exceptions || vcpu->arch.prodded ||
+ kvmppc_doorbell_pending(vcpu) || xive_interrupt_pending(vcpu))
+ return true;
+
+ return false;
+}
+
/*
* Check to see if any of the runnable vcpus on the vcore have pending
* exceptions or are no longer ceded
@@ -2676,8 +2951,7 @@ static int kvmppc_vcore_check_block(struct kvmppc_vcore *vc)
int i;
for_each_runnable_thread(i, vcpu, vc) {
- if (vcpu->arch.pending_exceptions || !vcpu->arch.ceded ||
- vcpu->arch.prodded)
+ if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu))
return 1;
}
@@ -2819,15 +3093,14 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
*/
if (!signal_pending(current)) {
if (vc->vcore_state == VCORE_PIGGYBACK) {
- struct kvmppc_vcore *mvc = vc->master_vcore;
- if (spin_trylock(&mvc->lock)) {
- if (mvc->vcore_state == VCORE_RUNNING &&
- !VCORE_IS_EXITING(mvc)) {
+ if (spin_trylock(&vc->lock)) {
+ if (vc->vcore_state == VCORE_RUNNING &&
+ !VCORE_IS_EXITING(vc)) {
kvmppc_create_dtl_entry(vcpu, vc);
kvmppc_start_thread(vcpu, vc);
trace_kvm_guest_enter(vcpu);
}
- spin_unlock(&mvc->lock);
+ spin_unlock(&vc->lock);
}
} else if (vc->vcore_state == VCORE_RUNNING &&
!VCORE_IS_EXITING(vc)) {
@@ -2863,7 +3136,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
break;
n_ceded = 0;
for_each_runnable_thread(i, v, vc) {
- if (!v->arch.pending_exceptions && !v->arch.prodded)
+ if (!kvmppc_vcpu_woken(v))
n_ceded += v->arch.ceded;
else
v->arch.ceded = 0;
@@ -3519,6 +3792,19 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
kvm_hv_vm_activated();
/*
+ * Initialize smt_mode depending on processor.
+ * POWER8 and earlier have to use "strict" threading, where
+ * all vCPUs in a vcore have to run on the same (sub)core,
+ * whereas on POWER9 the threads can each run a different
+ * guest.
+ */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ kvm->arch.smt_mode = threads_per_subcore;
+ else
+ kvm->arch.smt_mode = 1;
+ kvm->arch.emul_smt_mode = 1;
+
+ /*
* Create a debugfs directory for the VM
*/
snprintf(buf, sizeof(buf), "vm%d", current->pid);
@@ -3947,6 +4233,7 @@ static struct kvmppc_ops kvm_ops_hv = {
#endif
.configure_mmu = kvmhv_configure_mmu,
.get_rmmu_info = kvmhv_get_rmmu_info,
+ .set_smt_mode = kvmhv_set_smt_mode,
};
static int kvm_init_subcore_bitmap(void)
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index ee4c2558c305..90644db9d38e 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -307,7 +307,7 @@ void kvmhv_commence_exit(int trap)
return;
for (i = 0; i < MAX_SUBCORES; ++i) {
- vc = sip->master_vcs[i];
+ vc = sip->vc[i];
if (!vc)
break;
do {
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 404deb512844..dc54373c8780 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -61,13 +61,6 @@ BEGIN_FTR_SECTION
std r3, HSTATE_DABR(r13)
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
- /* Hard-disable interrupts */
- mfmsr r10
- std r10, HSTATE_HOST_MSR(r13)
- rldicl r10,r10,48,1
- rotldi r10,r10,16
- mtmsrd r10,1
-
/* Save host PMU registers */
BEGIN_FTR_SECTION
/* Work around P8 PMAE bug */
@@ -153,6 +146,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
*
* R1 = host R1
* R2 = host R2
+ * R3 = trap number on this thread
* R12 = exit handler id
* R13 = PACA
*/
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index 7ef0993214f3..c356f9a40b24 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -130,12 +130,28 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
out:
/*
+ * For guest that supports FWNMI capability, hook the MCE event into
+ * vcpu structure. We are going to exit the guest with KVM_EXIT_NMI
+ * exit reason. On our way to exit we will pull this event from vcpu
+ * structure and print it from thread 0 of the core/subcore.
+ *
+ * For guest that does not support FWNMI capability (old QEMU):
* We are now going enter guest either through machine check
* interrupt (for unhandled errors) or will continue from
* current HSRR0 (for handled errors) in guest. Hence
* queue up the event so that we can log it from host console later.
*/
- machine_check_queue_event();
+ if (vcpu->kvm->arch.fwnmi_enabled) {
+ /*
+ * Hook up the mce event on to vcpu structure.
+ * First clear the old event.
+ */
+ memset(&vcpu->arch.mce_evt, 0, sizeof(vcpu->arch.mce_evt));
+ if (get_mce_event(&mce_evt, MCE_EVENT_RELEASE)) {
+ vcpu->arch.mce_evt = mce_evt;
+ }
+ } else
+ machine_check_queue_event();
return handled;
}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index ce6f2121fffe..584c74c8119f 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -15,6 +15,7 @@
#include <linux/log2.h>
#include <asm/tlbflush.h>
+#include <asm/trace.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/book3s/64/mmu-hash.h>
@@ -443,17 +444,23 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
cpu_relax();
if (need_sync)
asm volatile("ptesync" : : : "memory");
- for (i = 0; i < npages; ++i)
+ for (i = 0; i < npages; ++i) {
asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
"r" (rbvalues[i]), "r" (kvm->arch.lpid));
+ trace_tlbie(kvm->arch.lpid, 0, rbvalues[i],
+ kvm->arch.lpid, 0, 0, 0);
+ }
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
kvm->arch.tlbie_lock = 0;
} else {
if (need_sync)
asm volatile("ptesync" : : : "memory");
- for (i = 0; i < npages; ++i)
+ for (i = 0; i < npages; ++i) {
asm volatile(PPC_TLBIEL(%0,%1,0,0,0) : :
"r" (rbvalues[i]), "r" (0));
+ trace_tlbie(kvm->arch.lpid, 1, rbvalues[i],
+ 0, 0, 0, 0);
+ }
asm volatile("ptesync" : : : "memory");
}
}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 4888dd494604..cb44065e2946 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -45,7 +45,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
#define NAPPING_NOVCPU 2
/* Stack frame offsets for kvmppc_hv_entry */
-#define SFS 144
+#define SFS 160
#define STACK_SLOT_TRAP (SFS-4)
#define STACK_SLOT_TID (SFS-16)
#define STACK_SLOT_PSSCR (SFS-24)
@@ -54,6 +54,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
#define STACK_SLOT_CIABR (SFS-48)
#define STACK_SLOT_DAWR (SFS-56)
#define STACK_SLOT_DAWRX (SFS-64)
+#define STACK_SLOT_HFSCR (SFS-72)
/*
* Call kvmppc_hv_entry in real mode.
@@ -68,6 +69,7 @@ _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
std r0, PPC_LR_STKOFF(r1)
stdu r1, -112(r1)
mfmsr r10
+ std r10, HSTATE_HOST_MSR(r13)
LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
li r0,MSR_RI
andc r0,r10,r0
@@ -152,20 +154,21 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
stb r0, HSTATE_HWTHREAD_REQ(r13)
/*
- * For external and machine check interrupts, we need
- * to call the Linux handler to process the interrupt.
- * We do that by jumping to absolute address 0x500 for
- * external interrupts, or the machine_check_fwnmi label
- * for machine checks (since firmware might have patched
- * the vector area at 0x200). The [h]rfid at the end of the
- * handler will return to the book3s_hv_interrupts.S code.
- * For other interrupts we do the rfid to get back
- * to the book3s_hv_interrupts.S code here.
+ * For external interrupts we need to call the Linux
+ * handler to process the interrupt. We do that by jumping
+ * to absolute address 0x500 for external interrupts.
+ * The [h]rfid at the end of the handler will return to
+ * the book3s_hv_interrupts.S code. For other interrupts
+ * we do the rfid to get back to the book3s_hv_interrupts.S
+ * code here.
*/
ld r8, 112+PPC_LR_STKOFF(r1)
addi r1, r1, 112
ld r7, HSTATE_HOST_MSR(r13)
+ /* Return the trap number on this thread as the return value */
+ mr r3, r12
+
/*
* If we came back from the guest via a relocation-on interrupt,
* we will be in virtual mode at this point, which makes it a
@@ -175,59 +178,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
andi. r0, r0, MSR_IR /* in real mode? */
bne .Lvirt_return
- cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
- cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
- beq 11f
- cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
- beq 15f /* Invoke the H_DOORBELL handler */
- cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI
- beq cr2, 14f /* HMI check */
-
- /* RFI into the highmem handler, or branch to interrupt handler */
+ /* RFI into the highmem handler */
mfmsr r6
li r0, MSR_RI
andc r6, r6, r0
mtmsrd r6, 1 /* Clear RI in MSR */
mtsrr0 r8
mtsrr1 r7
- beq cr1, 13f /* machine check */
RFI
- /* On POWER7, we have external interrupts set to use HSRR0/1 */
-11: mtspr SPRN_HSRR0, r8
- mtspr SPRN_HSRR1, r7
- ba 0x500
-
-13: b machine_check_fwnmi
-
-14: mtspr SPRN_HSRR0, r8
- mtspr SPRN_HSRR1, r7
- b hmi_exception_after_realmode
-
-15: mtspr SPRN_HSRR0, r8
- mtspr SPRN_HSRR1, r7
- ba 0xe80
-
- /* Virtual-mode return - can't get here for HMI or machine check */
+ /* Virtual-mode return */
.Lvirt_return:
- cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
- beq 16f
- cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
- beq 17f
- andi. r0, r7, MSR_EE /* were interrupts hard-enabled? */
- beq 18f
- mtmsrd r7, 1 /* if so then re-enable them */
-18: mtlr r8
+ mtlr r8
blr
-16: mtspr SPRN_HSRR0, r8 /* jump to reloc-on external vector */
- mtspr SPRN_HSRR1, r7
- b exc_virt_0x4500_hardware_interrupt
-
-17: mtspr SPRN_HSRR0, r8
- mtspr SPRN_HSRR1, r7
- b exc_virt_0x4e80_h_doorbell
-
kvmppc_primary_no_guest:
/* We handle this much like a ceded vcpu */
/* put the HDEC into the DEC, since HDEC interrupts don't wake us */
@@ -349,15 +313,21 @@ kvm_novcpu_exit:
* We come in here when wakened from nap mode.
* Relocation is off and most register values are lost.
* r13 points to the PACA.
+ * r3 contains the SRR1 wakeup value, SRR1 is trashed.
*/
.globl kvm_start_guest
kvm_start_guest:
-
/* Set runlatch bit the minute you wake up from nap */
mfspr r0, SPRN_CTRLF
ori r0, r0, 1
mtspr SPRN_CTRLT, r0
+ /*
+ * Could avoid this and pass it through in r3. For now,
+ * code expects it to be in SRR1.
+ */
+ mtspr SPRN_SRR1,r3
+
ld r2,PACATOC(r13)
li r0,KVM_HWTHREAD_IN_KVM
@@ -476,13 +446,15 @@ kvm_no_guest:
/*
* We jump to pnv_wakeup_loss, which will return to the caller
* of power7_nap in the powernv cpu offline loop. The value we
- * put in r3 becomes the return value for power7_nap.
+ * put in r3 becomes the return value for power7_nap. pnv_wakeup_loss
+ * requires SRR1 in r12.
*/
li r3, LPCR_PECE0
mfspr r4, SPRN_LPCR
rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
mtspr SPRN_LPCR, r4
li r3, 0
+ mfspr r12,SPRN_SRR1
b pnv_wakeup_loss
53: HMT_LOW
@@ -769,6 +741,8 @@ BEGIN_FTR_SECTION
std r6, STACK_SLOT_PSSCR(r1)
std r7, STACK_SLOT_PID(r1)
std r8, STACK_SLOT_IAMR(r1)
+ mfspr r5, SPRN_HFSCR
+ std r5, STACK_SLOT_HFSCR(r1)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
BEGIN_FTR_SECTION
mfspr r5, SPRN_CIABR
@@ -920,8 +894,10 @@ FTR_SECTION_ELSE
ld r5, VCPU_TID(r4)
ld r6, VCPU_PSSCR(r4)
oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
+ ld r7, VCPU_HFSCR(r4)
mtspr SPRN_TIDR, r5
mtspr SPRN_PSSCR, r6
+ mtspr SPRN_HFSCR, r7
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
8:
@@ -936,7 +912,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
mftb r7
subf r3,r7,r8
mtspr SPRN_DEC,r3
- stw r3,VCPU_DEC(r4)
+ std r3,VCPU_DEC(r4)
ld r5, VCPU_SPRG0(r4)
ld r6, VCPU_SPRG1(r4)
@@ -1048,7 +1024,13 @@ kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
li r0, BOOK3S_INTERRUPT_EXTERNAL
bne cr1, 12f
mfspr r0, SPRN_DEC
- cmpwi r0, 0
+BEGIN_FTR_SECTION
+ /* On POWER9 check whether the guest has large decrementer enabled */
+ andis. r8, r8, LPCR_LD@h
+ bne 15f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+ extsw r0, r0
+15: cmpdi r0, 0
li r0, BOOK3S_INTERRUPT_DECREMENTER
bge 5f
@@ -1058,6 +1040,23 @@ kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
mr r9, r4
bl kvmppc_msr_interrupt
5:
+BEGIN_FTR_SECTION
+ b fast_guest_return
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
+ /* On POWER9, check for pending doorbell requests */
+ lbz r0, VCPU_DBELL_REQ(r4)
+ cmpwi r0, 0
+ beq fast_guest_return
+ ld r5, HSTATE_KVM_VCORE(r13)
+ /* Set DPDES register so the CPU will take a doorbell interrupt */
+ li r0, 1
+ mtspr SPRN_DPDES, r0
+ std r0, VCORE_DPDES(r5)
+ /* Make sure other cpus see vcore->dpdes set before dbell req clear */
+ lwsync
+ /* Clear the pending doorbell request */
+ li r0, 0
+ stb r0, VCPU_DBELL_REQ(r4)
/*
* Required state:
@@ -1232,6 +1231,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
stw r12,VCPU_TRAP(r9)
+ /*
+ * Now that we have saved away SRR0/1 and HSRR0/1,
+ * interrupts are recoverable in principle, so set MSR_RI.
+ * This becomes important for relocation-on interrupts from
+ * the guest, which we can get in radix mode on POWER9.
+ */
+ li r0, MSR_RI
+ mtmsrd r0, 1
+
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
addi r3, r9, VCPU_TB_RMINTR
mr r4, r9
@@ -1288,6 +1296,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
beq 4f
b guest_exit_cont
3:
+ /* If it's a hypervisor facility unavailable interrupt, save HFSCR */
+ cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL
+ bne 14f
+ mfspr r3, SPRN_HFSCR
+ std r3, VCPU_HFSCR(r9)
+ b guest_exit_cont
+14:
/* External interrupt ? */
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
bne+ guest_exit_cont
@@ -1475,12 +1490,18 @@ mc_cont:
mtspr SPRN_SPURR,r4
/* Save DEC */
+ ld r3, HSTATE_KVM_VCORE(r13)
mfspr r5,SPRN_DEC
mftb r6
+ /* On P9, if the guest has large decr enabled, don't sign extend */
+BEGIN_FTR_SECTION
+ ld r4, VCORE_LPCR(r3)
+ andis. r4, r4, LPCR_LD@h
+ bne 16f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
extsw r5,r5
- add r5,r5,r6
+16: add r5,r5,r6
/* r5 is a guest timebase value here, convert to host TB */
- ld r3,HSTATE_KVM_VCORE(r13)
ld r4,VCORE_TB_OFFSET(r3)
subf r5,r4,r5
std r5,VCPU_DEC_EXPIRES(r9)
@@ -1525,6 +1546,9 @@ FTR_SECTION_ELSE
rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
rotldi r6, r6, 60
std r6, VCPU_PSSCR(r9)
+ /* Restore host HFSCR value */
+ ld r7, STACK_SLOT_HFSCR(r1)
+ mtspr SPRN_HFSCR, r7
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
/*
* Restore various registers to 0, where non-zero values
@@ -2402,8 +2426,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
mfspr r3, SPRN_DEC
mfspr r4, SPRN_HDEC
mftb r5
+BEGIN_FTR_SECTION
+ /* On P9 check whether the guest has large decrementer mode enabled */
+ ld r6, HSTATE_KVM_VCORE(r13)
+ ld r6, VCORE_LPCR(r6)
+ andis. r6, r6, LPCR_LD@h
+ bne 68f
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
extsw r3, r3
- EXTEND_HDEC(r4)
+68: EXTEND_HDEC(r4)
cmpd r3, r4
ble 67f
mtspr SPRN_DEC, r4
@@ -2589,22 +2620,32 @@ machine_check_realmode:
ld r9, HSTATE_KVM_VCPU(r13)
li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
/*
- * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through
- * machine check interrupt (set HSRR0 to 0x200). And for handled
- * errors (no-fatal), just go back to guest execution with current
- * HSRR0 instead of exiting guest. This new approach will inject
- * machine check to guest for fatal error causing guest to crash.
- *
- * The old code used to return to host for unhandled errors which
- * was causing guest to hang with soft lockups inside guest and
- * makes it difficult to recover guest instance.
+ * For the guest that is FWNMI capable, deliver all the MCE errors
+ * (handled/unhandled) by exiting the guest with KVM_EXIT_NMI exit
+ * reason. This new approach injects machine check errors in guest
+ * address space to guest with additional information in the form
+ * of RTAS event, thus enabling guest kernel to suitably handle
+ * such errors.
*
+ * For the guest that is not FWNMI capable (old QEMU) fallback
+ * to old behaviour for backward compatibility:
+ * Deliver unhandled/fatal (e.g. UE) MCE errors to guest either
+ * through machine check interrupt (set HSRR0 to 0x200).
+ * For handled errors (no-fatal), just go back to guest execution
+ * with current HSRR0.
* if we receive machine check with MSR(RI=0) then deliver it to
* guest as machine check causing guest to crash.
*/
ld r11, VCPU_MSR(r9)
rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
bne mc_cont /* if so, exit to host */
+ /* Check if guest is capable of handling NMI exit */
+ ld r10, VCPU_KVM(r9)
+ lbz r10, KVM_FWNMI(r10)
+ cmpdi r10, 1 /* FWNMI capable? */
+ beq mc_cont /* if so, exit with KVM_EXIT_NMI. */
+
+ /* if not, fall through for backward compatibility. */
andi. r10, r11, MSR_RI /* check for unrecoverable exception */
beq 1f /* Deliver a machine check to guest */
ld r10, VCPU_PC(r9)
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index ffe1da95033a..08b200a0bbce 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -1257,8 +1257,8 @@ static void xive_pre_save_scan(struct kvmppc_xive *xive)
if (!xc)
continue;
for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
- if (xc->queues[i].qpage)
- xive_pre_save_queue(xive, &xc->queues[i]);
+ if (xc->queues[j].qpage)
+ xive_pre_save_queue(xive, &xc->queues[j]);
}
}
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 3eaac3809977..071b87ee682f 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -687,7 +687,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
kvmppc_core_check_exceptions(vcpu);
- if (vcpu->requests) {
+ if (kvm_request_pending(vcpu)) {
/* Exception delivery raised request; start over */
return 1;
}
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index c873ffe55362..4d8b4d6cebff 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -39,7 +39,7 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
unsigned long dec_nsec;
unsigned long long dec_time;
- pr_debug("mtDEC: %x\n", vcpu->arch.dec);
+ pr_debug("mtDEC: %lx\n", vcpu->arch.dec);
hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
#ifdef CONFIG_PPC_BOOK3S
@@ -109,7 +109,7 @@ static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
case SPRN_TBWU: break;
case SPRN_DEC:
- vcpu->arch.dec = spr_val;
+ vcpu->arch.dec = (u32) spr_val;
kvmppc_emulate_dec(vcpu);
break;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 7f71ab5fcad1..1a75c0b5f4ca 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -55,8 +55,7 @@ EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
{
- return !!(v->arch.pending_exceptions) ||
- v->requests;
+ return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
}
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
@@ -108,7 +107,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
*/
smp_mb();
- if (vcpu->requests) {
+ if (kvm_request_pending(vcpu)) {
/* Make sure we process requests preemptable */
local_irq_enable();
trace_kvm_check_requests(vcpu);
@@ -554,13 +553,28 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
case KVM_CAP_PPC_SMT:
r = 0;
- if (hv_enabled) {
+ if (kvm) {
+ if (kvm->arch.emul_smt_mode > 1)
+ r = kvm->arch.emul_smt_mode;
+ else
+ r = kvm->arch.smt_mode;
+ } else if (hv_enabled) {
if (cpu_has_feature(CPU_FTR_ARCH_300))
r = 1;
else
r = threads_per_subcore;
}
break;
+ case KVM_CAP_PPC_SMT_POSSIBLE:
+ r = 1;
+ if (hv_enabled) {
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ r = ((threads_per_subcore << 1) - 1);
+ else
+ /* P9 can emulate dbells, so allow any mode */
+ r = 8 | 4 | 2 | 1;
+ }
+ break;
case KVM_CAP_PPC_RMA:
r = 0;
break;
@@ -619,6 +633,11 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = !!hv_enabled && !cpu_has_feature(CPU_FTR_ARCH_300);
break;
#endif
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ case KVM_CAP_PPC_FWNMI:
+ r = hv_enabled;
+ break;
+#endif
case KVM_CAP_PPC_HTM:
r = cpu_has_feature(CPU_FTR_TM_COMP) &&
is_kvmppc_hv_enabled(kvm);
@@ -1538,6 +1557,15 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
break;
}
#endif /* CONFIG_KVM_XICS */
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ case KVM_CAP_PPC_FWNMI:
+ r = -EINVAL;
+ if (!is_kvmppc_hv_enabled(vcpu->kvm))
+ break;
+ r = 0;
+ vcpu->kvm->arch.fwnmi_enabled = true;
+ break;
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
default:
r = -EINVAL;
break;
@@ -1712,6 +1740,15 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
r = 0;
break;
}
+ case KVM_CAP_PPC_SMT: {
+ unsigned long mode = cap->args[0];
+ unsigned long flags = cap->args[1];
+
+ r = -EINVAL;
+ if (kvm->arch.kvm_ops->set_smt_mode)
+ r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
+ break;
+ }
#endif
default:
r = -EINVAL;
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index ed7dfce331e0..3c3146ba62da 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -9,10 +9,17 @@ ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
-obj-y += string.o alloc.o crtsavres.o code-patching.o \
- feature-fixups.o
+obj-y += string.o alloc.o code-patching.o feature-fixups.o
-obj-$(CONFIG_PPC32) += div64.o copy_32.o
+obj-$(CONFIG_PPC32) += div64.o copy_32.o crtsavres.o
+
+# See corresponding test in arch/powerpc/Makefile
+# 64-bit linker creates .sfpr on demand for final link (vmlinux),
+# so it is only needed for modules, and only for older linkers which
+# do not support --save-restore-funcs
+ifeq ($(call ld-ifversion, -lt, 225000000, y),y)
+extra-$(CONFIG_PPC64) += crtsavres.o
+endif
obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \
copyuser_power7.o string_64.o copypage_power7.o memcpy_power7.o \
@@ -30,7 +37,7 @@ obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o
-obj-$(CONFIG_ALTIVEC) += xor_vmx.o
+obj-$(CONFIG_ALTIVEC) += xor_vmx.o xor_vmx_glue.o
CFLAGS_xor_vmx.o += -maltivec $(call cc-option,-mabi=altivec)
obj-$(CONFIG_PPC64) += $(obj64-y)
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 500b0f6a0b64..c9de03e0c1f1 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -12,23 +12,186 @@
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/mm.h>
-#include <asm/page.h>
-#include <asm/code-patching.h>
+#include <linux/cpuhotplug.h>
+#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/kprobes.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+#include <asm/page.h>
+#include <asm/code-patching.h>
-int patch_instruction(unsigned int *addr, unsigned int instr)
+static int __patch_instruction(unsigned int *addr, unsigned int instr)
{
int err;
__put_user_size(instr, addr, 4, err);
if (err)
return err;
- asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (addr));
+
+ asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" :: "r" (addr));
+
+ return 0;
+}
+
+#ifdef CONFIG_STRICT_KERNEL_RWX
+static DEFINE_PER_CPU(struct vm_struct *, text_poke_area);
+
+static int text_area_cpu_up(unsigned int cpu)
+{
+ struct vm_struct *area;
+
+ area = get_vm_area(PAGE_SIZE, VM_ALLOC);
+ if (!area) {
+ WARN_ONCE(1, "Failed to create text area for cpu %d\n",
+ cpu);
+ return -1;
+ }
+ this_cpu_write(text_poke_area, area);
+
+ return 0;
+}
+
+static int text_area_cpu_down(unsigned int cpu)
+{
+ free_vm_area(this_cpu_read(text_poke_area));
+ return 0;
+}
+
+/*
+ * Run as a late init call. This allows all the boot time patching to be done
+ * simply by patching the code, and then we're called here prior to
+ * mark_rodata_ro(), which happens after all init calls are run. Although
+ * BUG_ON() is rude, in this case it should only happen if ENOMEM, and we judge
+ * it as being preferable to a kernel that will crash later when someone tries
+ * to use patch_instruction().
+ */
+static int __init setup_text_poke_area(void)
+{
+ BUG_ON(!cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "powerpc/text_poke:online", text_area_cpu_up,
+ text_area_cpu_down));
+
+ return 0;
+}
+late_initcall(setup_text_poke_area);
+
+/*
+ * This can be called for kernel text or a module.
+ */
+static int map_patch_area(void *addr, unsigned long text_poke_addr)
+{
+ unsigned long pfn;
+ int err;
+
+ if (is_vmalloc_addr(addr))
+ pfn = vmalloc_to_pfn(addr);
+ else
+ pfn = __pa_symbol(addr) >> PAGE_SHIFT;
+
+ err = map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT),
+ pgprot_val(PAGE_KERNEL));
+
+ pr_devel("Mapped addr %lx with pfn %lx:%d\n", text_poke_addr, pfn, err);
+ if (err)
+ return -1;
+
return 0;
}
+static inline int unmap_patch_area(unsigned long addr)
+{
+ pte_t *ptep;
+ pmd_t *pmdp;
+ pud_t *pudp;
+ pgd_t *pgdp;
+
+ pgdp = pgd_offset_k(addr);
+ if (unlikely(!pgdp))
+ return -EINVAL;
+
+ pudp = pud_offset(pgdp, addr);
+ if (unlikely(!pudp))
+ return -EINVAL;
+
+ pmdp = pmd_offset(pudp, addr);
+ if (unlikely(!pmdp))
+ return -EINVAL;
+
+ ptep = pte_offset_kernel(pmdp, addr);
+ if (unlikely(!ptep))
+ return -EINVAL;
+
+ pr_devel("clearing mm %p, pte %p, addr %lx\n", &init_mm, ptep, addr);
+
+ /*
+ * In hash, pte_clear flushes the tlb, in radix, we have to
+ */
+ pte_clear(&init_mm, addr, ptep);
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+
+ return 0;
+}
+
+int patch_instruction(unsigned int *addr, unsigned int instr)
+{
+ int err;
+ unsigned int *dest = NULL;
+ unsigned long flags;
+ unsigned long text_poke_addr;
+ unsigned long kaddr = (unsigned long)addr;
+
+ /*
+ * During early early boot patch_instruction is called
+ * when text_poke_area is not ready, but we still need
+ * to allow patching. We just do the plain old patching
+ * We use slab_is_available and per cpu read * via this_cpu_read
+ * of text_poke_area. Per-CPU areas might not be up early
+ * this can create problems with just using this_cpu_read()
+ */
+ if (!slab_is_available() || !this_cpu_read(text_poke_area))
+ return __patch_instruction(addr, instr);
+
+ local_irq_save(flags);
+
+ text_poke_addr = (unsigned long)__this_cpu_read(text_poke_area)->addr;
+ if (map_patch_area(addr, text_poke_addr)) {
+ err = -1;
+ goto out;
+ }
+
+ dest = (unsigned int *)(text_poke_addr) +
+ ((kaddr & ~PAGE_MASK) / sizeof(unsigned int));
+
+ /*
+ * We use __put_user_size so that we can handle faults while
+ * writing to dest and return err to handle faults gracefully
+ */
+ __put_user_size(instr, dest, 4, err);
+ if (!err)
+ asm ("dcbst 0, %0; sync; icbi 0,%0; icbi 0,%1; sync; isync"
+ ::"r" (dest), "r"(addr));
+
+ err = unmap_patch_area(text_poke_addr);
+ if (err)
+ pr_warn("failed to unmap %lx\n", text_poke_addr);
+
+out:
+ local_irq_restore(flags);
+
+ return err;
+}
+#else /* !CONFIG_STRICT_KERNEL_RWX */
+
+int patch_instruction(unsigned int *addr, unsigned int instr)
+{
+ return __patch_instruction(addr, instr);
+}
+
+#endif /* CONFIG_STRICT_KERNEL_RWX */
+NOKPROBE_SYMBOL(patch_instruction);
+
int patch_branch(unsigned int *addr, unsigned long target, int flags)
{
return patch_instruction(addr, create_branch(addr, target, flags));
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index a24b4039352c..706b7cc19846 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -82,14 +82,14 @@
_GLOBAL(__copy_tofrom_user_power7)
#ifdef CONFIG_ALTIVEC
cmpldi r5,16
- cmpldi cr1,r5,4096
+ cmpldi cr1,r5,3328
std r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
std r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
std r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
blt .Lshort_copy
- bgt cr1,.Lvmx_copy
+ bge cr1,.Lvmx_copy
#else
cmpldi r5,16
diff --git a/arch/powerpc/lib/crtsavres.S b/arch/powerpc/lib/crtsavres.S
index 18af0b3d3eb2..7e5e1c28e56a 100644
--- a/arch/powerpc/lib/crtsavres.S
+++ b/arch/powerpc/lib/crtsavres.S
@@ -44,10 +44,10 @@
#ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-#ifndef CONFIG_PPC64
-
.section ".text"
+#ifndef CONFIG_PPC64
+
/* Routines for saving integer registers, called by the compiler. */
/* Called with r11 pointing to the stack header word of the caller of the */
/* function, just beyond the end of the integer save area. */
@@ -314,8 +314,6 @@ _GLOBAL(_restvr_31)
#else /* CONFIG_PPC64 */
- .section ".text.save.restore","ax",@progbits
-
.globl _savegpr0_14
_savegpr0_14:
std r14,-144(r1)
diff --git a/arch/powerpc/lib/xor_vmx.c b/arch/powerpc/lib/xor_vmx.c
index f9de69a04e88..4df240aa5f81 100644
--- a/arch/powerpc/lib/xor_vmx.c
+++ b/arch/powerpc/lib/xor_vmx.c
@@ -29,10 +29,7 @@
#define vector __attribute__((vector_size(16)))
#endif
-#include <linux/preempt.h>
-#include <linux/export.h>
-#include <linux/sched.h>
-#include <asm/switch_to.h>
+#include "xor_vmx.h"
typedef vector signed char unative_t;
@@ -64,16 +61,13 @@ typedef vector signed char unative_t;
V1##_3 = vec_xor(V1##_3, V2##_3); \
} while (0)
-void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in)
+void __xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in)
{
DEFINE(v1);
DEFINE(v2);
unsigned long lines = bytes / (sizeof(unative_t)) / 4;
- preempt_disable();
- enable_kernel_altivec();
-
do {
LOAD(v1);
LOAD(v2);
@@ -83,23 +77,16 @@ void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
v1 += 4;
v2 += 4;
} while (--lines > 0);
-
- disable_kernel_altivec();
- preempt_enable();
}
-EXPORT_SYMBOL(xor_altivec_2);
-void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in)
+void __xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in, unsigned long *v3_in)
{
DEFINE(v1);
DEFINE(v2);
DEFINE(v3);
unsigned long lines = bytes / (sizeof(unative_t)) / 4;
- preempt_disable();
- enable_kernel_altivec();
-
do {
LOAD(v1);
LOAD(v2);
@@ -112,15 +99,11 @@ void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
v2 += 4;
v3 += 4;
} while (--lines > 0);
-
- disable_kernel_altivec();
- preempt_enable();
}
-EXPORT_SYMBOL(xor_altivec_3);
-void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in)
+void __xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in, unsigned long *v3_in,
+ unsigned long *v4_in)
{
DEFINE(v1);
DEFINE(v2);
@@ -128,9 +111,6 @@ void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
DEFINE(v4);
unsigned long lines = bytes / (sizeof(unative_t)) / 4;
- preempt_disable();
- enable_kernel_altivec();
-
do {
LOAD(v1);
LOAD(v2);
@@ -146,15 +126,11 @@ void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
v3 += 4;
v4 += 4;
} while (--lines > 0);
-
- disable_kernel_altivec();
- preempt_enable();
}
-EXPORT_SYMBOL(xor_altivec_4);
-void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in, unsigned long *v5_in)
+void __xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in, unsigned long *v3_in,
+ unsigned long *v4_in, unsigned long *v5_in)
{
DEFINE(v1);
DEFINE(v2);
@@ -163,9 +139,6 @@ void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
DEFINE(v5);
unsigned long lines = bytes / (sizeof(unative_t)) / 4;
- preempt_disable();
- enable_kernel_altivec();
-
do {
LOAD(v1);
LOAD(v2);
@@ -184,8 +157,4 @@ void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
v4 += 4;
v5 += 4;
} while (--lines > 0);
-
- disable_kernel_altivec();
- preempt_enable();
}
-EXPORT_SYMBOL(xor_altivec_5);
diff --git a/arch/powerpc/lib/xor_vmx.h b/arch/powerpc/lib/xor_vmx.h
new file mode 100644
index 000000000000..4746708451ae
--- /dev/null
+++ b/arch/powerpc/lib/xor_vmx.h
@@ -0,0 +1,20 @@
+/*
+ * Simple interface to link xor_vmx.c and xor_vmx_glue.c
+ *
+ * Separating these file ensures that no altivec instructions are run
+ * outside of the enable/disable altivec block.
+ */
+
+void __xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in);
+
+void __xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in, unsigned long *v3_in);
+
+void __xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in, unsigned long *v3_in,
+ unsigned long *v4_in);
+
+void __xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in, unsigned long *v3_in,
+ unsigned long *v4_in, unsigned long *v5_in);
diff --git a/arch/powerpc/lib/xor_vmx_glue.c b/arch/powerpc/lib/xor_vmx_glue.c
new file mode 100644
index 000000000000..6521fe5e8cef
--- /dev/null
+++ b/arch/powerpc/lib/xor_vmx_glue.c
@@ -0,0 +1,62 @@
+/*
+ * Altivec XOR operations
+ *
+ * Copyright 2017 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/preempt.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <asm/switch_to.h>
+#include "xor_vmx.h"
+
+void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in)
+{
+ preempt_disable();
+ enable_kernel_altivec();
+ __xor_altivec_2(bytes, v1_in, v2_in);
+ disable_kernel_altivec();
+ preempt_enable();
+}
+EXPORT_SYMBOL(xor_altivec_2);
+
+void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in, unsigned long *v3_in)
+{
+ preempt_disable();
+ enable_kernel_altivec();
+ __xor_altivec_3(bytes, v1_in, v2_in, v3_in);
+ disable_kernel_altivec();
+ preempt_enable();
+}
+EXPORT_SYMBOL(xor_altivec_3);
+
+void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in, unsigned long *v3_in,
+ unsigned long *v4_in)
+{
+ preempt_disable();
+ enable_kernel_altivec();
+ __xor_altivec_4(bytes, v1_in, v2_in, v3_in, v4_in);
+ disable_kernel_altivec();
+ preempt_enable();
+}
+EXPORT_SYMBOL(xor_altivec_4);
+
+void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
+ unsigned long *v2_in, unsigned long *v3_in,
+ unsigned long *v4_in, unsigned long *v5_in)
+{
+ preempt_disable();
+ enable_kernel_altivec();
+ __xor_altivec_5(bytes, v1_in, v2_in, v3_in, v4_in, v5_in);
+ disable_kernel_altivec();
+ preempt_enable();
+}
+EXPORT_SYMBOL(xor_altivec_5);
diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c
index 6c5025e81236..f4c6472f2fc4 100644
--- a/arch/powerpc/mm/8xx_mmu.c
+++ b/arch/powerpc/mm/8xx_mmu.c
@@ -88,7 +88,7 @@ static void mmu_mapin_immr(void)
int offset;
for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE)
- map_page(v + offset, p + offset, f);
+ map_kernel_page(v + offset, p + offset, f);
}
/* Address of instructions to patch */
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index 2dc74e5c6458..382528475433 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -227,7 +227,7 @@ __dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t
do {
SetPageReserved(page);
- map_page(vaddr, page_to_phys(page),
+ map_kernel_page(vaddr, page_to_phys(page),
pgprot_val(pgprot_noncached(PAGE_KERNEL)));
page++;
vaddr += PAGE_SIZE;
diff --git a/arch/powerpc/mm/dump_hashpagetable.c b/arch/powerpc/mm/dump_hashpagetable.c
index c6b900f54c07..b1c144b03fcf 100644
--- a/arch/powerpc/mm/dump_hashpagetable.c
+++ b/arch/powerpc/mm/dump_hashpagetable.c
@@ -335,7 +335,7 @@ static unsigned long hpte_find(struct pg_state *st, unsigned long ea, int psize)
unsigned long rpn, lp_bits;
int base_psize = 0, actual_psize = 0;
- if (ea <= PAGE_OFFSET)
+ if (ea < PAGE_OFFSET)
return -1;
/* Look in primary table */
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 3a7d580fdc59..4c422632047b 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -206,6 +206,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
int is_write = 0;
int trap = TRAP(regs);
int is_exec = trap == 0x400;
+ int is_user = user_mode(regs);
int fault;
int rc = 0, store_update_sp = 0;
@@ -216,7 +217,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
* bits we are interested in. But there are some bits which
* indicate errors in DSISR but can validly be set in SRR1.
*/
- if (trap == 0x400)
+ if (is_exec)
error_code &= 0x48200000;
else
is_write = error_code & DSISR_ISSTORE;
@@ -247,13 +248,13 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
* The kernel should never take an execute fault nor should it
* take a page fault to a kernel address.
*/
- if (!user_mode(regs) && (is_exec || (address >= TASK_SIZE))) {
+ if (!is_user && (is_exec || (address >= TASK_SIZE))) {
rc = SIGSEGV;
goto bail;
}
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
- defined(CONFIG_PPC_BOOK3S_64))
+ defined(CONFIG_PPC_BOOK3S_64) || defined(CONFIG_PPC_8xx))
if (error_code & DSISR_DABRMATCH) {
/* breakpoint match */
do_break(regs, address, error_code);
@@ -266,7 +267,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
local_irq_enable();
if (faulthandler_disabled() || mm == NULL) {
- if (!user_mode(regs)) {
+ if (!is_user) {
rc = SIGSEGV;
goto bail;
}
@@ -287,10 +288,10 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
* can result in fault, which will cause a deadlock when called with
* mmap_sem held
*/
- if (!is_exec && user_mode(regs))
+ if (is_write && is_user)
store_update_sp = store_updates_sp(regs);
- if (user_mode(regs))
+ if (is_user)
flags |= FAULT_FLAG_USER;
/* When running in the kernel we expect faults to occur only to
@@ -309,7 +310,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
* thus avoiding the deadlock.
*/
if (!down_read_trylock(&mm->mmap_sem)) {
- if (!user_mode(regs) && !search_exception_tables(regs->nip))
+ if (!is_user && !search_exception_tables(regs->nip))
goto bad_area_nosemaphore;
retry:
@@ -509,7 +510,7 @@ bad_area:
bad_area_nosemaphore:
/* User mode accesses cause a SIGSEGV */
- if (user_mode(regs)) {
+ if (is_user) {
_exception(SIGSEGV, regs, code, address);
goto bail;
}
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 65bb8f33b399..3848af167df9 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -15,6 +15,7 @@
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/of.h>
+#include <linux/processor.h>
#include <linux/threads.h>
#include <linux/smp.h>
@@ -23,6 +24,7 @@
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
+#include <asm/trace.h>
#include <asm/tlb.h>
#include <asm/cputable.h>
#include <asm/udbg.h>
@@ -98,6 +100,7 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
: "memory");
break;
}
+ trace_tlbie(0, 0, va, 0, 0, 0, 0);
}
static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
@@ -147,6 +150,7 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
: "memory");
break;
}
+ trace_tlbie(0, 1, va, 0, 0, 0, 0);
}
@@ -181,8 +185,10 @@ static inline void native_lock_hpte(struct hash_pte *hptep)
while (1) {
if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
break;
+ spin_begin();
while(test_bit(HPTE_LOCK_BIT, word))
- cpu_relax();
+ spin_cpu_relax();
+ spin_end();
}
}
@@ -407,6 +413,38 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
tlbie(vpn, psize, psize, ssize, 0);
}
+/*
+ * Remove a bolted kernel entry. Memory hotplug uses this.
+ *
+ * No need to lock here because we should be the only user.
+ */
+static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
+{
+ unsigned long vpn;
+ unsigned long vsid;
+ long slot;
+ struct hash_pte *hptep;
+
+ vsid = get_kernel_vsid(ea, ssize);
+ vpn = hpt_vpn(ea, vsid, ssize);
+
+ slot = native_hpte_find(vpn, psize, ssize);
+ if (slot == -1)
+ return -ENOENT;
+
+ hptep = htab_address + slot;
+
+ VM_WARN_ON(!(be64_to_cpu(hptep->v) & HPTE_V_BOLTED));
+
+ /* Invalidate the hpte */
+ hptep->v = 0;
+
+ /* Invalidate the TLB */
+ tlbie(vpn, psize, psize, ssize, 0);
+ return 0;
+}
+
+
static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
int bpsize, int apsize, int ssize, int local)
{
@@ -725,6 +763,7 @@ void __init hpte_init_native(void)
mmu_hash_ops.hpte_invalidate = native_hpte_invalidate;
mmu_hash_ops.hpte_updatepp = native_hpte_updatepp;
mmu_hash_ops.hpte_updateboltedpp = native_hpte_updateboltedpp;
+ mmu_hash_ops.hpte_removebolted = native_hpte_removebolted;
mmu_hash_ops.hpte_insert = native_hpte_insert;
mmu_hash_ops.hpte_remove = native_hpte_remove;
mmu_hash_ops.hpte_clear_all = native_hpte_clear;
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index f2095ce9d4b0..7a20669c19e7 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -810,6 +810,8 @@ static void update_hid_for_hash(void)
asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(0), "i"(0), "i"(2), "r"(0) : "memory");
asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory");
+ trace_tlbie(0, 0, rb, 0, 2, 0, 0);
+
/*
* now switch the HID
*/
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index a4f33de4008e..e1bf5ca397fe 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -17,6 +17,8 @@
#include <linux/memblock.h>
#include <linux/bootmem.h>
#include <linux/moduleparam.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
@@ -32,6 +34,7 @@
#define PAGE_SHIFT_16G 34
unsigned int HPAGE_SHIFT;
+EXPORT_SYMBOL(HPAGE_SHIFT);
/*
* Tracks gpages after the device tree is scanned and before the
@@ -55,7 +58,7 @@ static unsigned nr_gpages;
#define hugepd_none(hpd) (hpd_val(hpd) == 0)
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz)
{
/* Only called for hugetlbfs pages, hence can ignore THP */
return __find_linux_pte_or_hugepte(mm->pgd, addr, NULL, NULL);
@@ -77,7 +80,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
num_hugepd = 1;
}
- new = kmem_cache_zalloc(cachep, GFP_KERNEL);
+ new = kmem_cache_zalloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
BUG_ON(pshift > HUGEPD_SHIFT_MASK);
BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
@@ -617,62 +620,39 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
} while (addr = next, addr != end);
}
-/*
- * We are holding mmap_sem, so a parallel huge page collapse cannot run.
- * To prevent hugepage split, disable irq.
- */
-struct page *
-follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
+struct page *follow_huge_pd(struct vm_area_struct *vma,
+ unsigned long address, hugepd_t hpd,
+ int flags, int pdshift)
{
- bool is_thp;
- pte_t *ptep, pte;
- unsigned shift;
- unsigned long mask, flags;
- struct page *page = ERR_PTR(-EINVAL);
-
- local_irq_save(flags);
- ptep = find_linux_pte_or_hugepte(mm->pgd, address, &is_thp, &shift);
- if (!ptep)
- goto no_page;
- pte = READ_ONCE(*ptep);
- /*
- * Verify it is a huge page else bail.
- * Transparent hugepages are handled by generic code. We can skip them
- * here.
- */
- if (!shift || is_thp)
- goto no_page;
-
- if (!pte_present(pte)) {
- page = NULL;
- goto no_page;
+ pte_t *ptep;
+ spinlock_t *ptl;
+ struct page *page = NULL;
+ unsigned long mask;
+ int shift = hugepd_shift(hpd);
+ struct mm_struct *mm = vma->vm_mm;
+
+retry:
+ ptl = &mm->page_table_lock;
+ spin_lock(ptl);
+
+ ptep = hugepte_offset(hpd, address, pdshift);
+ if (pte_present(*ptep)) {
+ mask = (1UL << shift) - 1;
+ page = pte_page(*ptep);
+ page += ((address & mask) >> PAGE_SHIFT);
+ if (flags & FOLL_GET)
+ get_page(page);
+ } else {
+ if (is_hugetlb_entry_migration(*ptep)) {
+ spin_unlock(ptl);
+ __migration_entry_wait(mm, ptep, ptl);
+ goto retry;
+ }
}
- mask = (1UL << shift) - 1;
- page = pte_page(pte);
- if (page)
- page += (address & mask) / PAGE_SIZE;
-
-no_page:
- local_irq_restore(flags);
+ spin_unlock(ptl);
return page;
}
-struct page *
-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int write)
-{
- BUG();
- return NULL;
-}
-
-struct page *
-follow_huge_pud(struct mm_struct *mm, unsigned long address,
- pud_t *pud, int write)
-{
- BUG();
- return NULL;
-}
-
static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
unsigned long sz)
{
@@ -763,8 +743,11 @@ static int __init add_huge_page_size(unsigned long long size)
* Hash: 16M and 16G
*/
if (radix_enabled()) {
- if (mmu_psize != MMU_PAGE_2M)
- return -EINVAL;
+ if (mmu_psize != MMU_PAGE_2M) {
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1) ||
+ (mmu_psize != MMU_PAGE_1G))
+ return -EINVAL;
+ }
} else {
if (mmu_psize != MMU_PAGE_16M && mmu_psize != MMU_PAGE_16G)
return -EINVAL;
@@ -963,7 +946,7 @@ pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
if (pmd_none(pmd))
return NULL;
- if (pmd_trans_huge(pmd)) {
+ if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
if (is_thp)
*is_thp = true;
ret_pte = (pte_t *) pmdp;
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index ec84b31c6c86..5b4c25d12ff3 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -44,6 +44,7 @@
#include <linux/slab.h>
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
+#include <linux/memremap.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
@@ -110,8 +111,29 @@ static int __meminit vmemmap_populated(unsigned long start, int page_size)
return 0;
}
+/*
+ * vmemmap virtual address space management does not have a traditonal page
+ * table to track which virtual struct pages are backed by physical mapping.
+ * The virtual to physical mappings are tracked in a simple linked list
+ * format. 'vmemmap_list' maintains the entire vmemmap physical mapping at
+ * all times where as the 'next' list maintains the available
+ * vmemmap_backing structures which have been deleted from the
+ * 'vmemmap_global' list during system runtime (memory hotplug remove
+ * operation). The freed 'vmemmap_backing' structures are reused later when
+ * new requests come in without allocating fresh memory. This pointer also
+ * tracks the allocated 'vmemmap_backing' structures as we allocate one
+ * full page memory at a time when we dont have any.
+ */
struct vmemmap_backing *vmemmap_list;
static struct vmemmap_backing *next;
+
+/*
+ * The same pointer 'next' tracks individual chunks inside the allocated
+ * full page during the boot time and again tracks the freeed nodes during
+ * runtime. It is racy but it does not happen as they are separated by the
+ * boot process. Will create problem if some how we have memory hotplug
+ * operation during boot !!
+ */
static int num_left;
static int num_freed;
@@ -171,13 +193,17 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
for (; start < end; start += page_size) {
+ struct vmem_altmap *altmap;
void *p;
int rc;
if (vmemmap_populated(start, page_size))
continue;
- p = vmemmap_alloc_block(page_size, node);
+ /* altmap lookups only work at section boundaries */
+ altmap = to_vmem_altmap(SECTION_ALIGN_DOWN(start));
+
+ p = __vmemmap_alloc_block_buf(page_size, node, altmap);
if (!p)
return -ENOMEM;
@@ -234,13 +260,17 @@ static unsigned long vmemmap_list_free(unsigned long start)
void __ref vmemmap_free(unsigned long start, unsigned long end)
{
unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
+ unsigned long page_order = get_order(page_size);
start = _ALIGN_DOWN(start, page_size);
pr_debug("vmemmap_free %lx...%lx\n", start, end);
for (; start < end; start += page_size) {
- unsigned long addr;
+ unsigned long nr_pages, addr;
+ struct vmem_altmap *altmap;
+ struct page *section_base;
+ struct page *page;
/*
* the section has already be marked as invalid, so
@@ -251,29 +281,33 @@ void __ref vmemmap_free(unsigned long start, unsigned long end)
continue;
addr = vmemmap_list_free(start);
- if (addr) {
- struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
-
- if (PageReserved(page)) {
- /* allocated from bootmem */
- if (page_size < PAGE_SIZE) {
- /*
- * this shouldn't happen, but if it is
- * the case, leave the memory there
- */
- WARN_ON_ONCE(1);
- } else {
- unsigned int nr_pages =
- 1 << get_order(page_size);
- while (nr_pages--)
- free_reserved_page(page++);
- }
- } else
- free_pages((unsigned long)(__va(addr)),
- get_order(page_size));
-
- vmemmap_remove_mapping(start, page_size);
+ if (!addr)
+ continue;
+
+ page = pfn_to_page(addr >> PAGE_SHIFT);
+ section_base = pfn_to_page(vmemmap_section_start(start));
+ nr_pages = 1 << page_order;
+
+ altmap = to_vmem_altmap((unsigned long) section_base);
+ if (altmap) {
+ vmem_altmap_free(altmap, nr_pages);
+ } else if (PageReserved(page)) {
+ /* allocated from bootmem */
+ if (page_size < PAGE_SIZE) {
+ /*
+ * this shouldn't happen, but if it is
+ * the case, leave the memory there
+ */
+ WARN_ON_ONCE(1);
+ } else {
+ while (nr_pages--)
+ free_reserved_page(page++);
+ }
+ } else {
+ free_pages((unsigned long)(__va(addr)), page_order);
}
+
+ vmemmap_remove_mapping(start, page_size);
}
}
#endif
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 9ee536ec0739..8541f18694a4 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -36,6 +36,7 @@
#include <linux/hugetlb.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <linux/memremap.h>
#include <asm/pgalloc.h>
#include <asm/prom.h>
@@ -126,18 +127,14 @@ int __weak remove_section_mapping(unsigned long start, unsigned long end)
return -ENODEV;
}
-int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
+int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
{
- struct pglist_data *pgdata;
- struct zone *zone;
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
int rc;
resize_hpt_for_hotplug(memblock_phys_mem_size());
- pgdata = NODE_DATA(nid);
-
start = (unsigned long)__va(start);
rc = create_section_mapping(start, start + size);
if (rc) {
@@ -147,11 +144,7 @@ int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
return -EFAULT;
}
- /* this should work for most non-highmem platforms */
- zone = pgdata->node_zones +
- zone_for_memory(nid, start, size, 0, for_device);
-
- return __add_pages(nid, zone, start_pfn, nr_pages);
+ return __add_pages(nid, start_pfn, nr_pages, want_memblock);
}
#ifdef CONFIG_MEMORY_HOTREMOVE
@@ -159,11 +152,20 @@ int arch_remove_memory(u64 start, u64 size)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
- struct zone *zone;
+ struct vmem_altmap *altmap;
+ struct page *page;
int ret;
- zone = page_zone(pfn_to_page(start_pfn));
- ret = __remove_pages(zone, start_pfn, nr_pages);
+ /*
+ * If we have an altmap then we need to skip over any reserved PFNs
+ * when querying the zone.
+ */
+ page = pfn_to_page(start_pfn);
+ altmap = to_vmem_altmap((unsigned long) page);
+ if (altmap)
+ page += vmem_altmap_offset(altmap);
+
+ ret = __remove_pages(page_zone(page), start_pfn, nr_pages);
if (ret)
return ret;
@@ -313,11 +315,11 @@ void __init paging_init(void)
unsigned long end = __fix_to_virt(FIX_HOLE);
for (; v < end; v += PAGE_SIZE)
- map_page(v, 0, 0); /* XXX gross */
+ map_kernel_page(v, 0, 0); /* XXX gross */
#endif
#ifdef CONFIG_HIGHMEM
- map_page(PKMAP_BASE, 0, 0); /* XXX gross */
+ map_kernel_page(PKMAP_BASE, 0, 0); /* XXX gross */
pkmap_page_table = virt_to_kpte(PKMAP_BASE);
kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index a3edf813d455..71de2c6d88f3 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -235,10 +235,15 @@ void destroy_context(struct mm_struct *mm)
#ifdef CONFIG_PPC_RADIX_MMU
void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
{
- asm volatile("isync": : :"memory");
- mtspr(SPRN_PID, next->context.id);
- asm volatile("isync \n"
- PPC_SLBIA(0x7)
- : : :"memory");
+
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
+ isync();
+ mtspr(SPRN_PID, next->context.id);
+ isync();
+ asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
+ } else {
+ mtspr(SPRN_PID, next->context.id);
+ isync();
+ }
}
#endif
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index f988db655e5b..d46128b22150 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -94,7 +94,6 @@ extern void _tlbia(void);
#ifdef CONFIG_PPC32
extern void mapin_ram(void);
-extern int map_page(unsigned long va, phys_addr_t pa, int flags);
extern void setbat(int index, unsigned long virt, phys_addr_t phys,
unsigned int size, pgprot_t prot);
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index 5fcb3dd74c13..31eed8fa8e99 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -32,7 +32,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
{
int changed;
#ifdef CONFIG_DEBUG_VM
- WARN_ON(!pmd_trans_huge(*pmdp));
+ WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
assert_spin_locked(&vma->vm_mm->page_table_lock);
#endif
changed = !pmd_same(*(pmdp), entry);
@@ -59,7 +59,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
#ifdef CONFIG_DEBUG_VM
WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
assert_spin_locked(&mm->page_table_lock);
- WARN_ON(!pmd_trans_huge(pmd));
+ WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd)));
#endif
trace_hugepage_set_pmd(addr, pmd_val(pmd));
return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
diff --git a/arch/powerpc/mm/pgtable-hash64.c b/arch/powerpc/mm/pgtable-hash64.c
index 8b85a14b08ea..188b4107584d 100644
--- a/arch/powerpc/mm/pgtable-hash64.c
+++ b/arch/powerpc/mm/pgtable-hash64.c
@@ -11,8 +11,12 @@
#include <linux/sched.h>
#include <linux/mm_types.h>
+#include <linux/mm.h>
#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/sections.h>
+#include <asm/mmu.h>
#include <asm/tlb.h>
#include "mmu_decl.h"
@@ -22,6 +26,81 @@
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/*
+ * vmemmap is the starting address of the virtual address space where
+ * struct pages are allocated for all possible PFNs present on the system
+ * including holes and bad memory (hence sparse). These virtual struct
+ * pages are stored in sequence in this virtual address space irrespective
+ * of the fact whether the corresponding PFN is valid or not. This achieves
+ * constant relationship between address of struct page and its PFN.
+ *
+ * During boot or memory hotplug operation when a new memory section is
+ * added, physical memory allocation (including hash table bolting) will
+ * be performed for the set of struct pages which are part of the memory
+ * section. This saves memory by not allocating struct pages for PFNs
+ * which are not valid.
+ *
+ * ----------------------------------------------
+ * | PHYSICAL ALLOCATION OF VIRTUAL STRUCT PAGES|
+ * ----------------------------------------------
+ *
+ * f000000000000000 c000000000000000
+ * vmemmap +--------------+ +--------------+
+ * + | page struct | +--------------> | page struct |
+ * | +--------------+ +--------------+
+ * | | page struct | +--------------> | page struct |
+ * | +--------------+ | +--------------+
+ * | | page struct | + +------> | page struct |
+ * | +--------------+ | +--------------+
+ * | | page struct | | +--> | page struct |
+ * | +--------------+ | | +--------------+
+ * | | page struct | | |
+ * | +--------------+ | |
+ * | | page struct | | |
+ * | +--------------+ | |
+ * | | page struct | | |
+ * | +--------------+ | |
+ * | | page struct | | |
+ * | +--------------+ | |
+ * | | page struct | +-------+ |
+ * | +--------------+ |
+ * | | page struct | +-----------+
+ * | +--------------+
+ * | | page struct | No mapping
+ * | +--------------+
+ * | | page struct | No mapping
+ * v +--------------+
+ *
+ * -----------------------------------------
+ * | RELATION BETWEEN STRUCT PAGES AND PFNS|
+ * -----------------------------------------
+ *
+ * vmemmap +--------------+ +---------------+
+ * + | page struct | +-------------> | PFN |
+ * | +--------------+ +---------------+
+ * | | page struct | +-------------> | PFN |
+ * | +--------------+ +---------------+
+ * | | page struct | +-------------> | PFN |
+ * | +--------------+ +---------------+
+ * | | page struct | +-------------> | PFN |
+ * | +--------------+ +---------------+
+ * | | |
+ * | +--------------+
+ * | | |
+ * | +--------------+
+ * | | |
+ * | +--------------+ +---------------+
+ * | | page struct | +-------------> | PFN |
+ * | +--------------+ +---------------+
+ * | | |
+ * | +--------------+
+ * | | |
+ * | +--------------+ +---------------+
+ * | | page struct | +-------------> | PFN |
+ * | +--------------+ +---------------+
+ * | | page struct | +-------------> | PFN |
+ * v +--------------+ +---------------+
+ */
+/*
* On hash-based CPUs, the vmemmap is bolted in the hash table.
*
*/
@@ -109,7 +188,7 @@ unsigned long hash__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr
unsigned long old;
#ifdef CONFIG_DEBUG_VM
- WARN_ON(!pmd_trans_huge(*pmdp));
+ WARN_ON(!hash__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
assert_spin_locked(&mm->page_table_lock);
#endif
@@ -141,6 +220,7 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
VM_BUG_ON(pmd_trans_huge(*pmdp));
+ VM_BUG_ON(pmd_devmap(*pmdp));
pmd = *pmdp;
pmd_clear(pmdp);
@@ -221,6 +301,7 @@ void hash__pmdp_huge_split_prepare(struct vm_area_struct *vma,
{
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
VM_BUG_ON(REGION_ID(address) != USER_REGION_ID);
+ VM_BUG_ON(pmd_devmap(*pmdp));
/*
* We can't mark the pmd none here, because that will cause a race
@@ -342,3 +423,35 @@ int hash__has_transparent_hugepage(void)
return 1;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#ifdef CONFIG_STRICT_KERNEL_RWX
+void hash__mark_rodata_ro(void)
+{
+ unsigned long start = (unsigned long)_stext;
+ unsigned long end = (unsigned long)__init_begin;
+ unsigned long idx;
+ unsigned int step, shift;
+ unsigned long newpp = PP_RXXX;
+
+ shift = mmu_psize_defs[mmu_linear_psize].shift;
+ step = 1 << shift;
+
+ start = ((start + step - 1) >> shift) << shift;
+ end = (end >> shift) << shift;
+
+ pr_devel("marking ro start %lx, end %lx, step %x\n",
+ start, end, step);
+
+ if (start == end) {
+ pr_warn("could not set rodata ro, relocate the start"
+ " of the kernel to a 0x%x boundary\n", step);
+ return;
+ }
+
+ for (idx = start; idx < end; idx += step)
+ /* Not sure if we can do much with the return value */
+ mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize,
+ mmu_kernel_ssize);
+
+}
+#endif
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index c28165d8970b..8c13e4282308 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -11,6 +11,7 @@
#include <linux/sched/mm.h>
#include <linux/memblock.h>
#include <linux/of_fdt.h>
+#include <linux/mm.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -19,6 +20,8 @@
#include <asm/mmu.h>
#include <asm/firmware.h>
#include <asm/powernv.h>
+#include <asm/sections.h>
+#include <asm/trace.h>
#include <trace/events/thp.h>
@@ -108,6 +111,49 @@ set_the_pte:
return 0;
}
+#ifdef CONFIG_STRICT_KERNEL_RWX
+void radix__mark_rodata_ro(void)
+{
+ unsigned long start = (unsigned long)_stext;
+ unsigned long end = (unsigned long)__init_begin;
+ unsigned long idx;
+ pgd_t *pgdp;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ start = ALIGN_DOWN(start, PAGE_SIZE);
+ end = PAGE_ALIGN(end); // aligns up
+
+ pr_devel("marking ro start %lx, end %lx\n", start, end);
+
+ for (idx = start; idx < end; idx += PAGE_SIZE) {
+ pgdp = pgd_offset_k(idx);
+ pudp = pud_alloc(&init_mm, pgdp, idx);
+ if (!pudp)
+ continue;
+ if (pud_huge(*pudp)) {
+ ptep = (pte_t *)pudp;
+ goto update_the_pte;
+ }
+ pmdp = pmd_alloc(&init_mm, pudp, idx);
+ if (!pmdp)
+ continue;
+ if (pmd_huge(*pmdp)) {
+ ptep = pmdp_ptep(pmdp);
+ goto update_the_pte;
+ }
+ ptep = pte_alloc_kernel(pmdp, idx);
+ if (!ptep)
+ continue;
+update_the_pte:
+ radix__pte_update(&init_mm, idx, ptep, _PAGE_WRITE, 0, 0);
+ }
+
+ radix__flush_tlb_kernel_range(start, end);
+}
+#endif /* CONFIG_STRICT_KERNEL_RWX */
+
static inline void __meminit print_mapping(unsigned long start,
unsigned long end,
unsigned long size)
@@ -121,7 +167,14 @@ static inline void __meminit print_mapping(unsigned long start,
static int __meminit create_physical_mapping(unsigned long start,
unsigned long end)
{
- unsigned long addr, mapping_size = 0;
+ unsigned long vaddr, addr, mapping_size = 0;
+ pgprot_t prot;
+ unsigned long max_mapping_size;
+#ifdef CONFIG_STRICT_KERNEL_RWX
+ int split_text_mapping = 1;
+#else
+ int split_text_mapping = 0;
+#endif
start = _ALIGN_UP(start, PAGE_SIZE);
for (addr = start; addr < end; addr += mapping_size) {
@@ -130,9 +183,12 @@ static int __meminit create_physical_mapping(unsigned long start,
gap = end - addr;
previous_size = mapping_size;
+ max_mapping_size = PUD_SIZE;
+retry:
if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
- mmu_psize_defs[MMU_PAGE_1G].shift)
+ mmu_psize_defs[MMU_PAGE_1G].shift &&
+ PUD_SIZE <= max_mapping_size)
mapping_size = PUD_SIZE;
else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
mmu_psize_defs[MMU_PAGE_2M].shift)
@@ -140,13 +196,32 @@ static int __meminit create_physical_mapping(unsigned long start,
else
mapping_size = PAGE_SIZE;
+ if (split_text_mapping && (mapping_size == PUD_SIZE) &&
+ (addr <= __pa_symbol(__init_begin)) &&
+ (addr + mapping_size) >= __pa_symbol(_stext)) {
+ max_mapping_size = PMD_SIZE;
+ goto retry;
+ }
+
+ if (split_text_mapping && (mapping_size == PMD_SIZE) &&
+ (addr <= __pa_symbol(__init_begin)) &&
+ (addr + mapping_size) >= __pa_symbol(_stext))
+ mapping_size = PAGE_SIZE;
+
if (mapping_size != previous_size) {
print_mapping(start, addr, previous_size);
start = addr;
}
- rc = radix__map_kernel_page((unsigned long)__va(addr), addr,
- PAGE_KERNEL_X, mapping_size);
+ vaddr = (unsigned long)__va(addr);
+
+ if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
+ overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size))
+ prot = PAGE_KERNEL_X;
+ else
+ prot = PAGE_KERNEL;
+
+ rc = radix__map_kernel_page(vaddr, addr, prot, mapping_size);
if (rc)
return rc;
}
@@ -190,6 +265,7 @@ static void __init radix_init_pgtable(void)
asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
"r" (TLBIEL_INVAL_SET_LPID), "r" (0));
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+ trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1);
}
static void __init radix_init_partition_table(void)
@@ -316,6 +392,9 @@ static void update_hid_for_radix(void)
asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(1), "i"(1), "i"(2), "r"(0) : "memory");
asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory");
+ trace_tlbie(0, 0, rb, 0, 2, 0, 1);
+ trace_tlbie(0, 0, rb, 0, 2, 1, 1);
+
/*
* now switch the HID
*/
@@ -683,7 +762,7 @@ unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long add
unsigned long old;
#ifdef CONFIG_DEBUG_VM
- WARN_ON(!radix__pmd_trans_huge(*pmdp));
+ WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
assert_spin_locked(&mm->page_table_lock);
#endif
@@ -701,6 +780,7 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
+ VM_BUG_ON(pmd_devmap(*pmdp));
/*
* khugepaged calls this for normal pmd
*/
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index a65c0b4c0669..a9e4bfc025bc 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -60,7 +60,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *ptepage;
- gfp_t flags = GFP_KERNEL | __GFP_ZERO;
+ gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT;
ptepage = alloc_pages(flags, 0);
if (!ptepage)
@@ -189,7 +189,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
err = 0;
for (i = 0; i < size && err == 0; i += PAGE_SIZE)
- err = map_page(v+i, p+i, flags);
+ err = map_kernel_page(v+i, p+i, flags);
if (err) {
if (slab_is_available())
vunmap((void *)v);
@@ -215,7 +215,7 @@ void iounmap(volatile void __iomem *addr)
}
EXPORT_SYMBOL(iounmap);
-int map_page(unsigned long va, phys_addr_t pa, int flags)
+int map_kernel_page(unsigned long va, phys_addr_t pa, int flags)
{
pmd_t *pd;
pte_t *pg;
@@ -255,7 +255,7 @@ void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
ktext = ((char *)v >= _stext && (char *)v < etext) ||
((char *)v >= _sinittext && (char *)v < _einittext);
f = ktext ? pgprot_val(PAGE_KERNEL_TEXT) : pgprot_val(PAGE_KERNEL);
- map_page(v, p, f);
+ map_kernel_page(v, p, f);
#ifdef CONFIG_PPC_STD_MMU_32
if (ktext)
hash_preload(&init_mm, v, 0, 0x300);
@@ -387,11 +387,6 @@ void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
return;
}
- map_page(address, phys, pgprot_val(flags));
+ map_kernel_page(address, phys, pgprot_val(flags));
fixmaps++;
}
-
-void __this_fixmap_does_not_exist(void)
-{
- WARN_ON(1);
-}
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index db93cf747a03..5c0b795d656c 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -47,6 +47,7 @@
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/tlb.h>
+#include <asm/trace.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/sections.h>
@@ -323,7 +324,7 @@ struct page *pud_page(pud_t pud)
*/
struct page *pmd_page(pmd_t pmd)
{
- if (pmd_trans_huge(pmd) || pmd_huge(pmd))
+ if (pmd_trans_huge(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))
return pte_page(pmd_pte(pmd));
return virt_to_page(pmd_page_vaddr(pmd));
}
@@ -351,12 +352,20 @@ static pte_t *get_from_cache(struct mm_struct *mm)
static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
{
void *ret = NULL;
- struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
- if (!page)
- return NULL;
- if (!kernel && !pgtable_page_ctor(page)) {
- __free_page(page);
- return NULL;
+ struct page *page;
+
+ if (!kernel) {
+ page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
+ if (!page)
+ return NULL;
+ if (!pgtable_page_ctor(page)) {
+ __free_page(page);
+ return NULL;
+ }
+ } else {
+ page = alloc_page(PGALLOC_GFP);
+ if (!page)
+ return NULL;
}
ret = page_address(page);
@@ -469,13 +478,31 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
* use of this partition ID was, not the new use.
*/
asm volatile("ptesync" : : : "memory");
- if (old & PATB_HR)
+ if (old & PATB_HR) {
asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
"r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
- else
+ trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1);
+ } else {
asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
"r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
+ trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
+ }
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
}
EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
#endif /* CONFIG_PPC_BOOK3S_64 */
+
+#ifdef CONFIG_STRICT_KERNEL_RWX
+void mark_rodata_ro(void)
+{
+ if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) {
+ pr_warn("Warning: Unable to mark rodata read only on this CPU.\n");
+ return;
+ }
+
+ if (radix_enabled())
+ radix__mark_rodata_ro();
+ else
+ hash__mark_rodata_ro();
+}
+#endif
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 654a0d7ba0e7..13cfe413b40d 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -33,15 +33,7 @@ enum slb_index {
KSTACK_INDEX = 2, /* Kernel stack map */
};
-extern void slb_allocate_realmode(unsigned long ea);
-
-static void slb_allocate(unsigned long ea)
-{
- /* Currently, we do real mode for all SLBs including user, but
- * that will change if we bring back dynamic VSIDs
- */
- slb_allocate_realmode(ea);
-}
+extern void slb_allocate(unsigned long ea);
#define slb_esid_mask(ssize) \
(((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 1519617aab36..bde378559d01 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -65,14 +65,15 @@ MMU_FTR_SECTION_ELSE \
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_68_BIT_VA)
-/* void slb_allocate_realmode(unsigned long ea);
+/* void slb_allocate(unsigned long ea);
*
* Create an SLB entry for the given EA (user or kernel).
* r3 = faulting address, r13 = PACA
* r9, r10, r11 are clobbered by this function
+ * r3 is preserved.
* No other registers are examined or changed.
*/
-_GLOBAL(slb_allocate_realmode)
+_GLOBAL(slb_allocate)
/*
* check for bad kernel/user address
* (ea & ~REGION_MASK) >= PGTABLE_RANGE
@@ -235,6 +236,9 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
* dont have any LRU information to help us choose a slot.
*/
+ mr r9,r3
+
+ /* slb_finish_load_1T continues here. r9=EA with non-ESID bits clear */
7: ld r10,PACASTABRR(r13)
addi r10,r10,1
/* This gets soft patched on boot. */
@@ -249,10 +253,10 @@ slb_compare_rr_to_size:
std r10,PACASTABRR(r13)
3:
- rldimi r3,r10,0,36 /* r3= EA[0:35] | entry */
- oris r10,r3,SLB_ESID_V@h /* r3 |= SLB_ESID_V */
+ rldimi r9,r10,0,36 /* r9 = EA[0:35] | entry */
+ oris r10,r9,SLB_ESID_V@h /* r10 = r9 | SLB_ESID_V */
- /* r3 = ESID data, r11 = VSID data */
+ /* r9 = ESID data, r11 = VSID data */
/*
* No need for an isync before or after this slbmte. The exception
@@ -265,21 +269,21 @@ slb_compare_rr_to_size:
bgelr cr7
/* Update the slb cache */
- lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */
- cmpldi r3,SLB_CACHE_ENTRIES
+ lhz r9,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */
+ cmpldi r9,SLB_CACHE_ENTRIES
bge 1f
/* still room in the slb cache */
- sldi r11,r3,2 /* r11 = offset * sizeof(u32) */
+ sldi r11,r9,2 /* r11 = offset * sizeof(u32) */
srdi r10,r10,28 /* get the 36 bits of the ESID */
add r11,r11,r13 /* r11 = (u32 *)paca + offset */
stw r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */
- addi r3,r3,1 /* offset++ */
+ addi r9,r9,1 /* offset++ */
b 2f
1: /* offset >= SLB_CACHE_ENTRIES */
- li r3,SLB_CACHE_ENTRIES+1
+ li r9,SLB_CACHE_ENTRIES+1
2:
- sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */
+ sth r9,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */
crclr 4*cr0+eq /* set result to "success" */
blr
@@ -301,11 +305,11 @@ slb_compare_rr_to_size:
rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */
/* r3 = EA, r11 = VSID data */
- clrrdi r3,r3,SID_SHIFT_1T /* clear out non-ESID bits */
+ clrrdi r9,r3,SID_SHIFT_1T /* clear out non-ESID bits */
b 7b
-_ASM_NOKPROBE_SYMBOL(slb_allocate_realmode)
+_ASM_NOKPROBE_SYMBOL(slb_allocate)
_ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_linear)
_ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_io)
_ASM_NOKPROBE_SYMBOL(slb_compare_rr_to_size)
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 02e71402fdd3..744e0164ecf5 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -16,6 +16,7 @@
#include <asm/tlb.h>
#include <asm/tlbflush.h>
+#include <asm/trace.h>
#define RIC_FLUSH_TLB 0
@@ -35,6 +36,7 @@ static inline void __tlbiel_pid(unsigned long pid, int set,
asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+ trace_tlbie(0, 1, rb, rs, ric, prs, r);
}
/*
@@ -87,6 +89,7 @@ static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
asm volatile("eieio; tlbsync; ptesync": : :"memory");
+ trace_tlbie(0, 0, rb, rs, ric, prs, r);
}
static inline void _tlbiel_va(unsigned long va, unsigned long pid,
@@ -104,6 +107,7 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
asm volatile("ptesync": : :"memory");
+ trace_tlbie(0, 1, rb, rs, ric, prs, r);
}
static inline void _tlbie_va(unsigned long va, unsigned long pid,
@@ -121,6 +125,7 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid,
asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
asm volatile("eieio; tlbsync; ptesync": : :"memory");
+ trace_tlbie(0, 0, rb, rs, ric, prs, r);
}
/*
@@ -377,6 +382,7 @@ void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
asm volatile("eieio; tlbsync; ptesync": : :"memory");
+ trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
}
EXPORT_SYMBOL(radix__flush_tlb_lpid_va);
@@ -394,6 +400,7 @@ void radix__flush_tlb_lpid(unsigned long lpid)
asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
asm volatile("eieio; tlbsync; ptesync": : :"memory");
+ trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
}
EXPORT_SYMBOL(radix__flush_tlb_lpid);
@@ -420,12 +427,14 @@ void radix__flush_tlb_all(void)
*/
asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
+ trace_tlbie(0, 0, rb, rs, ric, prs, r);
/*
* now flush host entires by passing PRS = 0 and LPID == 0
*/
asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
asm volatile("eieio; tlbsync; ptesync": : :"memory");
+ trace_tlbie(0, 0, rb, 0, ric, prs, r);
}
void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 4517aa43a8b1..b5b0fb97b9c0 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -93,12 +93,10 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
/*
* Check if we have an active batch on this CPU. If not, just
- * flush now and return. For now, we don global invalidates
- * in that case, might be worth testing the mm cpu mask though
- * and decide to use local invalidates instead...
+ * flush now and return.
*/
if (!batch->active) {
- flush_hash_page(vpn, rpte, psize, ssize, 0);
+ flush_hash_page(vpn, rpte, psize, ssize, mm_is_thread_local(mm));
put_cpu_var(ppc64_tlb_batch);
return;
}
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 7b2ca16b1eb4..9c88b82f6229 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <asm/cputhreads.h>
#include <asm/firmware.h>
#include <asm/hvcall.h>
#include <asm/io.h>
@@ -27,6 +28,12 @@
#include "hv-24x7-catalog.h"
#include "hv-common.h"
+/* Version of the 24x7 hypervisor API that we should use in this machine. */
+static int interface_version;
+
+/* Whether we have to aggregate result data for some domains. */
+static bool aggregate_result_elements;
+
static bool domain_is_valid(unsigned domain)
{
switch (domain) {
@@ -54,6 +61,15 @@ static bool is_physical_domain(unsigned domain)
}
}
+/* Domains for which more than one result element are returned for each event. */
+static bool domain_needs_aggregation(unsigned int domain)
+{
+ return aggregate_result_elements &&
+ (domain == HV_PERF_DOMAIN_PHYS_CORE ||
+ (domain >= HV_PERF_DOMAIN_VCPU_HOME_CORE &&
+ domain <= HV_PERF_DOMAIN_VCPU_REMOTE_NODE));
+}
+
static const char *domain_name(unsigned domain)
{
if (!domain_is_valid(domain))
@@ -74,7 +90,11 @@ static const char *domain_name(unsigned domain)
static bool catalog_entry_domain_is_valid(unsigned domain)
{
- return is_physical_domain(domain);
+ /* POWER8 doesn't support virtual domains. */
+ if (interface_version == 1)
+ return is_physical_domain(domain);
+ else
+ return domain_is_valid(domain);
}
/*
@@ -166,6 +186,12 @@ DEFINE_PER_CPU(struct hv_24x7_hw, hv_24x7_hw);
DEFINE_PER_CPU(char, hv_24x7_reqb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
DEFINE_PER_CPU(char, hv_24x7_resb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
+static unsigned int max_num_requests(int interface_version)
+{
+ return (H24x7_DATA_BUFFER_SIZE - sizeof(struct hv_24x7_request_buffer))
+ / H24x7_REQUEST_SIZE(interface_version);
+}
+
static char *event_name(struct hv_24x7_event_data *ev, int *len)
{
*len = be16_to_cpu(ev->event_name_len) - 2;
@@ -260,9 +286,8 @@ static void *event_end(struct hv_24x7_event_data *ev, void *end)
return start + nl + dl + ldl;
}
-static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096,
- unsigned long version,
- unsigned long index)
+static long h_get_24x7_catalog_page_(unsigned long phys_4096,
+ unsigned long version, unsigned long index)
{
pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)",
phys_4096, version, index);
@@ -273,8 +298,7 @@ static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096,
phys_4096, version, index);
}
-static unsigned long h_get_24x7_catalog_page(char page[],
- u64 version, u32 index)
+static long h_get_24x7_catalog_page(char page[], u64 version, u32 index)
{
return h_get_24x7_catalog_page_(virt_to_phys(page),
version, index);
@@ -664,13 +688,13 @@ static int create_events_from_catalog(struct attribute ***events_,
struct attribute ***event_descs_,
struct attribute ***event_long_descs_)
{
- unsigned long hret;
+ long hret;
size_t catalog_len, catalog_page_len, event_entry_count,
event_data_len, event_data_offs,
event_data_bytes, junk_events, event_idx, event_attr_ct, i,
attr_max, event_idx_last, desc_ct, long_desc_ct;
ssize_t ct, ev_len;
- uint32_t catalog_version_num;
+ uint64_t catalog_version_num;
struct attribute **events, **event_descs, **event_long_descs;
struct hv_24x7_catalog_page_0 *page_0 =
kmem_cache_alloc(hv_page_cache, GFP_KERNEL);
@@ -706,8 +730,8 @@ static int create_events_from_catalog(struct attribute ***events_,
event_data_offs = be16_to_cpu(page_0->event_data_offs);
event_data_len = be16_to_cpu(page_0->event_data_len);
- pr_devel("cv %zu cl %zu eec %zu edo %zu edl %zu\n",
- (size_t)catalog_version_num, catalog_len,
+ pr_devel("cv %llu cl %zu eec %zu edo %zu edl %zu\n",
+ catalog_version_num, catalog_len,
event_entry_count, event_data_offs, event_data_len);
if ((MAX_4K < event_data_len)
@@ -761,8 +785,8 @@ static int create_events_from_catalog(struct attribute ***events_,
catalog_version_num,
i + event_data_offs);
if (hret) {
- pr_err("failed to get event data in page %zu\n",
- i + event_data_offs);
+ pr_err("Failed to get event data in page %zu: rc=%ld\n",
+ i + event_data_offs, hret);
ret = -EIO;
goto e_event_data;
}
@@ -903,7 +927,7 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
- unsigned long hret;
+ long hret;
ssize_t ret = 0;
size_t catalog_len = 0, catalog_page_len = 0;
loff_t page_offset = 0;
@@ -988,7 +1012,7 @@ static ssize_t _name##_show(struct device *dev, \
struct device_attribute *dev_attr, \
char *buf) \
{ \
- unsigned long hret; \
+ long hret; \
ssize_t ret = 0; \
void *page = kmem_cache_alloc(hv_page_cache, GFP_USER); \
struct hv_24x7_catalog_page_0 *page_0 = page; \
@@ -1040,21 +1064,6 @@ static const struct attribute_group *attr_groups[] = {
NULL,
};
-static void log_24x7_hcall(struct hv_24x7_request_buffer *request_buffer,
- struct hv_24x7_data_result_buffer *result_buffer,
- unsigned long ret)
-{
- struct hv_24x7_request *req;
-
- req = &request_buffer->requests[0];
- pr_notice_ratelimited("hcall failed: [%d %#x %#x %d] => "
- "ret 0x%lx (%ld) detail=0x%x failing ix=%x\n",
- req->performance_domain, req->data_offset,
- req->starting_ix, req->starting_lpar_ix, ret, ret,
- result_buffer->detailed_rc,
- result_buffer->failing_request_ix);
-}
-
/*
* Start the process for a new H_GET_24x7_DATA hcall.
*/
@@ -1062,10 +1071,10 @@ static void init_24x7_request(struct hv_24x7_request_buffer *request_buffer,
struct hv_24x7_data_result_buffer *result_buffer)
{
- memset(request_buffer, 0, 4096);
- memset(result_buffer, 0, 4096);
+ memset(request_buffer, 0, H24x7_DATA_BUFFER_SIZE);
+ memset(result_buffer, 0, H24x7_DATA_BUFFER_SIZE);
- request_buffer->interface_version = HV_24X7_IF_VERSION_CURRENT;
+ request_buffer->interface_version = interface_version;
/* memset above set request_buffer->num_requests to 0 */
}
@@ -1076,7 +1085,7 @@ static void init_24x7_request(struct hv_24x7_request_buffer *request_buffer,
static int make_24x7_request(struct hv_24x7_request_buffer *request_buffer,
struct hv_24x7_data_result_buffer *result_buffer)
{
- unsigned long ret;
+ long ret;
/*
* NOTE: Due to variable number of array elements in request and
@@ -1087,10 +1096,19 @@ static int make_24x7_request(struct hv_24x7_request_buffer *request_buffer,
virt_to_phys(request_buffer), H24x7_DATA_BUFFER_SIZE,
virt_to_phys(result_buffer), H24x7_DATA_BUFFER_SIZE);
- if (ret)
- log_24x7_hcall(request_buffer, result_buffer, ret);
+ if (ret) {
+ struct hv_24x7_request *req;
+
+ req = request_buffer->requests;
+ pr_notice_ratelimited("hcall failed: [%d %#x %#x %d] => ret 0x%lx (%ld) detail=0x%x failing ix=%x\n",
+ req->performance_domain, req->data_offset,
+ req->starting_ix, req->starting_lpar_ix,
+ ret, ret, result_buffer->detailed_rc,
+ result_buffer->failing_request_ix);
+ return -EIO;
+ }
- return ret;
+ return 0;
}
/*
@@ -1105,9 +1123,11 @@ static int add_event_to_24x7_request(struct perf_event *event,
{
u16 idx;
int i;
+ size_t req_size;
struct hv_24x7_request *req;
- if (request_buffer->num_requests > 254) {
+ if (request_buffer->num_requests >=
+ max_num_requests(request_buffer->interface_version)) {
pr_devel("Too many requests for 24x7 HCALL %d\n",
request_buffer->num_requests);
return -EINVAL;
@@ -1124,23 +1144,113 @@ static int add_event_to_24x7_request(struct perf_event *event,
idx = event_get_vcpu(event);
}
+ req_size = H24x7_REQUEST_SIZE(request_buffer->interface_version);
+
i = request_buffer->num_requests++;
- req = &request_buffer->requests[i];
+ req = (void *) request_buffer->requests + i * req_size;
req->performance_domain = event_get_domain(event);
req->data_size = cpu_to_be16(8);
req->data_offset = cpu_to_be32(event_get_offset(event));
- req->starting_lpar_ix = cpu_to_be16(event_get_lpar(event)),
+ req->starting_lpar_ix = cpu_to_be16(event_get_lpar(event));
req->max_num_lpars = cpu_to_be16(1);
req->starting_ix = cpu_to_be16(idx);
req->max_ix = cpu_to_be16(1);
+ if (request_buffer->interface_version > 1) {
+ if (domain_needs_aggregation(req->performance_domain))
+ req->max_num_thread_groups = -1;
+ else if (req->performance_domain != HV_PERF_DOMAIN_PHYS_CHIP) {
+ req->starting_thread_group_ix = idx % 2;
+ req->max_num_thread_groups = 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * get_count_from_result - get event count from all result elements in result
+ *
+ * If the event corresponding to this result needs aggregation of the result
+ * element values, then this function does that.
+ *
+ * @event: Event associated with @res.
+ * @resb: Result buffer containing @res.
+ * @res: Result to work on.
+ * @countp: Output variable containing the event count.
+ * @next: Optional output variable pointing to the next result in @resb.
+ */
+static int get_count_from_result(struct perf_event *event,
+ struct hv_24x7_data_result_buffer *resb,
+ struct hv_24x7_result *res, u64 *countp,
+ struct hv_24x7_result **next)
+{
+ u16 num_elements = be16_to_cpu(res->num_elements_returned);
+ u16 data_size = be16_to_cpu(res->result_element_data_size);
+ unsigned int data_offset;
+ void *element_data;
+ int i;
+ u64 count;
+
+ /*
+ * We can bail out early if the result is empty.
+ */
+ if (!num_elements) {
+ pr_debug("Result of request %hhu is empty, nothing to do\n",
+ res->result_ix);
+
+ if (next)
+ *next = (struct hv_24x7_result *) res->elements;
+
+ return -ENODATA;
+ }
+
+ /*
+ * Since we always specify 1 as the maximum for the smallest resource
+ * we're requesting, there should to be only one element per result.
+ * Except when an event needs aggregation, in which case there are more.
+ */
+ if (num_elements != 1 &&
+ !domain_needs_aggregation(event_get_domain(event))) {
+ pr_err("Error: result of request %hhu has %hu elements\n",
+ res->result_ix, num_elements);
+
+ return -EIO;
+ }
+
+ if (data_size != sizeof(u64)) {
+ pr_debug("Error: result of request %hhu has data of %hu bytes\n",
+ res->result_ix, data_size);
+
+ return -ENOTSUPP;
+ }
+
+ if (resb->interface_version == 1)
+ data_offset = offsetof(struct hv_24x7_result_element_v1,
+ element_data);
+ else
+ data_offset = offsetof(struct hv_24x7_result_element_v2,
+ element_data);
+
+ /* Go through the result elements in the result. */
+ for (i = count = 0, element_data = res->elements + data_offset;
+ i < num_elements;
+ i++, element_data += data_size + data_offset)
+ count += be64_to_cpu(*((u64 *) element_data));
+
+ *countp = count;
+
+ /* The next result is after the last result element. */
+ if (next)
+ *next = element_data - data_offset;
+
return 0;
}
-static unsigned long single_24x7_request(struct perf_event *event, u64 *count)
+static int single_24x7_request(struct perf_event *event, u64 *count)
{
- unsigned long ret;
+ int ret;
struct hv_24x7_request_buffer *request_buffer;
struct hv_24x7_data_result_buffer *result_buffer;
@@ -1157,13 +1267,12 @@ static unsigned long single_24x7_request(struct perf_event *event, u64 *count)
goto out;
ret = make_24x7_request(request_buffer, result_buffer);
- if (ret) {
- log_24x7_hcall(request_buffer, result_buffer, ret);
+ if (ret)
goto out;
- }
/* process result from hcall */
- *count = be64_to_cpu(result_buffer->results[0].elements[0].element_data[0]);
+ ret = get_count_from_result(event, result_buffer,
+ result_buffer->results, count, NULL);
out:
put_cpu_var(hv_24x7_reqb);
@@ -1216,9 +1325,8 @@ static int h_24x7_event_init(struct perf_event *event)
return -EINVAL;
}
- /* Domains above 6 are invalid */
domain = event_get_domain(event);
- if (domain > 6) {
+ if (domain >= HV_PERF_DOMAIN_MAX) {
pr_devel("invalid domain %d\n", domain);
return -EINVAL;
}
@@ -1250,10 +1358,9 @@ static int h_24x7_event_init(struct perf_event *event)
static u64 h_24x7_get_value(struct perf_event *event)
{
- unsigned long ret;
u64 ct;
- ret = single_24x7_request(event, &ct);
- if (ret)
+
+ if (single_24x7_request(event, &ct))
/* We checked this in event init, shouldn't fail here... */
return 0;
@@ -1396,8 +1503,7 @@ static int h_24x7_event_commit_txn(struct pmu *pmu)
{
struct hv_24x7_request_buffer *request_buffer;
struct hv_24x7_data_result_buffer *result_buffer;
- struct hv_24x7_result *resb;
- struct perf_event *event;
+ struct hv_24x7_result *res, *next_res;
u64 count;
int i, ret, txn_flags;
struct hv_24x7_hw *h24x7hw;
@@ -1417,19 +1523,21 @@ static int h_24x7_event_commit_txn(struct pmu *pmu)
result_buffer = (void *)get_cpu_var(hv_24x7_resb);
ret = make_24x7_request(request_buffer, result_buffer);
- if (ret) {
- log_24x7_hcall(request_buffer, result_buffer, ret);
+ if (ret)
goto put_reqb;
- }
h24x7hw = &get_cpu_var(hv_24x7_hw);
- /* Update event counts from hcall */
- for (i = 0; i < request_buffer->num_requests; i++) {
- resb = &result_buffer->results[i];
- count = be64_to_cpu(resb->elements[0].element_data[0]);
- event = h24x7hw->events[i];
- h24x7hw->events[i] = NULL;
+ /* Go through results in the result buffer to update event counts. */
+ for (i = 0, res = result_buffer->results;
+ i < result_buffer->num_results; i++, res = next_res) {
+ struct perf_event *event = h24x7hw->events[res->result_ix];
+
+ ret = get_count_from_result(event, result_buffer, res, &count,
+ &next_res);
+ if (ret)
+ break;
+
update_event_count(event, count);
}
@@ -1480,6 +1588,18 @@ static int hv_24x7_init(void)
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
pr_debug("not a virtualized system, not enabling\n");
return -ENODEV;
+ } else if (!cur_cpu_spec->oprofile_cpu_type)
+ return -ENODEV;
+
+ /* POWER8 only supports v1, while POWER9 only supports v2. */
+ if (!strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8"))
+ interface_version = 1;
+ else {
+ interface_version = 2;
+
+ /* SMT8 in POWER9 needs to aggregate result elements. */
+ if (threads_per_core == 8)
+ aggregate_result_elements = true;
}
hret = hv_perf_caps_get(&caps);
diff --git a/arch/powerpc/perf/hv-24x7.h b/arch/powerpc/perf/hv-24x7.h
index 634ef4082cdc..5092c4a222a6 100644
--- a/arch/powerpc/perf/hv-24x7.h
+++ b/arch/powerpc/perf/hv-24x7.h
@@ -10,6 +10,8 @@ enum hv_perf_domains {
HV_PERF_DOMAIN_MAX,
};
+#define H24x7_REQUEST_SIZE(iface_version) (iface_version == 1 ? 16 : 32)
+
struct hv_24x7_request {
/* PHYSICAL domains require enabling via phyp/hmc. */
__u8 performance_domain;
@@ -42,19 +44,27 @@ struct hv_24x7_request {
/* chip, core, or virtual processor based on @performance_domain */
__be16 starting_ix;
__be16 max_ix;
+
+ /* The following fields were added in v2 of the 24x7 interface. */
+
+ __u8 starting_thread_group_ix;
+
+ /* -1 means all thread groups starting at @starting_thread_group_ix */
+ __u8 max_num_thread_groups;
+
+ __u8 reserved2[0xE];
} __packed;
struct hv_24x7_request_buffer {
/* 0 - ? */
/* 1 - ? */
-#define HV_24X7_IF_VERSION_CURRENT 0x01
__u8 interface_version;
__u8 num_requests;
__u8 reserved[0xE];
- struct hv_24x7_request requests[1];
+ struct hv_24x7_request requests[];
} __packed;
-struct hv_24x7_result_element {
+struct hv_24x7_result_element_v1 {
__be16 lpar_ix;
/*
@@ -67,10 +77,38 @@ struct hv_24x7_result_element {
__be32 lpar_cfg_instance_id;
/* size = @result_element_data_size of containing result. */
- __u64 element_data[1];
+ __u64 element_data[];
+} __packed;
+
+/*
+ * We need a separate struct for v2 because the offset of @element_data changed
+ * between versions.
+ */
+struct hv_24x7_result_element_v2 {
+ __be16 lpar_ix;
+
+ /*
+ * represents the core, chip, or virtual processor based on the
+ * request's @performance_domain
+ */
+ __be16 domain_ix;
+
+ /* -1 if @performance_domain does not refer to a virtual processor */
+ __be32 lpar_cfg_instance_id;
+
+ __u8 thread_group_ix;
+
+ __u8 reserved[7];
+
+ /* size = @result_element_data_size of containing result. */
+ __u64 element_data[];
} __packed;
struct hv_24x7_result {
+ /*
+ * The index of the 24x7 Request Structure in the 24x7 Request Buffer
+ * used to request this result.
+ */
__u8 result_ix;
/*
@@ -81,14 +119,25 @@ struct hv_24x7_result {
__u8 results_complete;
__be16 num_elements_returned;
- /* This is a copy of @data_size from the corresponding hv_24x7_request */
+ /*
+ * This is a copy of @data_size from the corresponding hv_24x7_request
+ *
+ * Warning: to obtain the size of each element in @elements you have
+ * to add the size of the other members of the result_element struct.
+ */
__be16 result_element_data_size;
__u8 reserved[0x2];
- /* WARNING: only valid for first result element due to variable sizes
- * of result elements */
- /* struct hv_24x7_result_element[@num_elements_returned] */
- struct hv_24x7_result_element elements[1];
+ /*
+ * Either
+ * struct hv_24x7_result_element_v1[@num_elements_returned]
+ * or
+ * struct hv_24x7_result_element_v2[@num_elements_returned]
+ *
+ * depending on the interface_version field of the
+ * struct hv_24x7_data_result_buffer containing this result.
+ */
+ char elements[];
} __packed;
struct hv_24x7_data_result_buffer {
@@ -104,7 +153,7 @@ struct hv_24x7_data_result_buffer {
__u8 reserved2[0x8];
/* WARNING: only valid for the first result due to variable sizes of
* results */
- struct hv_24x7_result results[1]; /* [@num_results] */
+ struct hv_24x7_result results[]; /* [@num_results] */
} __packed;
#endif
diff --git a/arch/powerpc/perf/power9-events-list.h b/arch/powerpc/perf/power9-events-list.h
index 71a6bfee5c02..80204e064362 100644
--- a/arch/powerpc/perf/power9-events-list.h
+++ b/arch/powerpc/perf/power9-events-list.h
@@ -16,7 +16,7 @@ EVENT(PM_CYC, 0x0001e)
EVENT(PM_ICT_NOSLOT_CYC, 0x100f8)
EVENT(PM_CMPLU_STALL, 0x1e054)
EVENT(PM_INST_CMPL, 0x00002)
-EVENT(PM_BRU_CMPL, 0x10012)
+EVENT(PM_BRU_CMPL, 0x4d05e)
EVENT(PM_BR_MPRED_CMPL, 0x400f6)
/* All L1 D cache load references counted at finish, gated by reject */
@@ -56,3 +56,5 @@ EVENT(PM_RUN_CYC, 0x600f4)
/* Instruction Dispatched */
EVENT(PM_INST_DISP, 0x200f2)
EVENT(PM_INST_DISP_ALT, 0x300f2)
+/* Alternate Branch event code */
+EVENT(PM_BR_CMPL_ALT, 0x10012)
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index bb28e1a41257..f17435e4a489 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -231,7 +231,7 @@ static int power9_generic_events_dd1[] = {
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
[PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_DISP,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_CMPL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BR_CMPL_ALT,
[PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
[PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
[PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1_FIN,
@@ -453,6 +453,12 @@ static int __init init_power9_pmu(void)
* sampling scenarios in power9 DD1, instead use PM_INST_DISP.
*/
EVENT_VAR(PM_INST_CMPL, _g).id = PM_INST_DISP;
+ /*
+ * Power9 DD1 should use PM_BR_CMPL_ALT event code for
+ * "branches" to provide correct counter value.
+ */
+ EVENT_VAR(PM_BRU_CMPL, _g).id = PM_BR_CMPL_ALT;
+ EVENT_VAR(PM_BRU_CMPL, _c).id = PM_BR_CMPL_ALT;
rc = register_power_pmu(&power9_isa207_pmu);
} else {
rc = register_power_pmu(&power9_pmu);
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 9b0afe935cc1..01cb109ebf17 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -199,6 +199,18 @@ config CURRITUCK
help
This option enables support for the IBM Currituck (476fpe) evaluation board
+config FSP2
+ bool "IBM FSP2 (476fpe) Support"
+ depends on PPC_47x
+ default n
+ select 476FPE
+ select IBM_EMAC_EMAC4 if IBM_EMAC
+ select IBM_EMAC_RGMII if IBM_EMAC
+ select COMMON_CLK
+ select DEFAULT_UIMAGE
+ help
+ This option enables support for the IBM FSP2 (476fpe) board
+
config AKEBONO
bool "IBM Akebono (476gtr) Support"
depends on PPC_47x
diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile
index 26d35b5941f7..72b824160660 100644
--- a/arch/powerpc/platforms/44x/Makefile
+++ b/arch/powerpc/platforms/44x/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_ISS4xx) += iss4xx.o
obj-$(CONFIG_CANYONLANDS)+= canyonlands.o
obj-$(CONFIG_CURRITUCK) += ppc476.o
obj-$(CONFIG_AKEBONO) += ppc476.o
+obj-$(CONFIG_FSP2) += fsp2.o
diff --git a/arch/powerpc/platforms/44x/fsp2.c b/arch/powerpc/platforms/44x/fsp2.c
new file mode 100644
index 000000000000..92e98048404f
--- /dev/null
+++ b/arch/powerpc/platforms/44x/fsp2.c
@@ -0,0 +1,62 @@
+/*
+ * FSP-2 board specific routines
+ *
+ * Based on earlier code:
+ * Matt Porter <[email protected]>
+ * Copyright 2002-2005 MontaVista Software Inc.
+ *
+ * Eugene Surovegin <[email protected]> or <[email protected]>
+ * Copyright (c) 2003-2005 Zultys Technologies
+ *
+ * Rewritten and ported to the merged powerpc tree:
+ * Copyright 2007 David Gibson <[email protected]>, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/of_platform.h>
+#include <linux/rtc.h>
+
+#include <asm/machdep.h>
+#include <asm/prom.h>
+#include <asm/udbg.h>
+#include <asm/time.h>
+#include <asm/uic.h>
+#include <asm/ppc4xx.h>
+
+static __initdata struct of_device_id fsp2_of_bus[] = {
+ { .compatible = "ibm,plb4", },
+ { .compatible = "ibm,plb6", },
+ { .compatible = "ibm,opb", },
+ {},
+};
+
+static int __init fsp2_device_probe(void)
+{
+ of_platform_bus_probe(NULL, fsp2_of_bus, NULL);
+ return 0;
+}
+machine_device_initcall(fsp2, fsp2_device_probe);
+
+static int __init fsp2_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ if (!of_flat_dt_is_compatible(root, "ibm,fsp2"))
+ return 0;
+ return 1;
+}
+
+define_machine(fsp2) {
+ .name = "FSP-2",
+ .probe = fsp2_probe,
+ .progress = udbg_progress,
+ .init_IRQ = uic_init_tree,
+ .get_irq = uic_get_irq,
+ .restart = ppc4xx_reset_system,
+ .calibrate_decr = generic_calibrate_decr,
+};
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 684e886eaae4..2f629e0551e9 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -344,12 +344,18 @@ config PPC_STD_MMU_64
config PPC_RADIX_MMU
bool "Radix MMU Support"
depends on PPC_BOOK3S_64
+ select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
default y
help
Enable support for the Power ISA 3.0 Radix style MMU. Currently this
is only implemented by IBM Power9 CPUs, if you don't have one of them
you can probably disable this.
+config ARCH_ENABLE_HUGEPAGE_MIGRATION
+ def_bool y
+ depends on PPC_BOOK3S_64 && HUGETLB_PAGE && MIGRATION
+
+
config PPC_MMU_NOHASH
def_bool y
depends on !PPC_STD_MMU
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 71b995bbcae0..29d4f96ed33e 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -644,32 +644,22 @@ static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg,
direction, attrs);
}
-static int dma_fixed_dma_supported(struct device *dev, u64 mask)
-{
- return mask == DMA_BIT_MASK(64);
-}
-
-static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
+static int dma_suported_and_switch(struct device *dev, u64 dma_mask);
static const struct dma_map_ops dma_iommu_fixed_ops = {
.alloc = dma_fixed_alloc_coherent,
.free = dma_fixed_free_coherent,
.map_sg = dma_fixed_map_sg,
.unmap_sg = dma_fixed_unmap_sg,
- .dma_supported = dma_fixed_dma_supported,
- .set_dma_mask = dma_set_mask_and_switch,
+ .dma_supported = dma_suported_and_switch,
.map_page = dma_fixed_map_page,
.unmap_page = dma_fixed_unmap_page,
+ .mapping_error = dma_iommu_mapping_error,
};
-static void cell_dma_dev_setup_fixed(struct device *dev);
-
static void cell_dma_dev_setup(struct device *dev)
{
- /* Order is important here, these are not mutually exclusive */
- if (get_dma_ops(dev) == &dma_iommu_fixed_ops)
- cell_dma_dev_setup_fixed(dev);
- else if (get_pci_dma_ops() == &dma_iommu_ops)
+ if (get_pci_dma_ops() == &dma_iommu_ops)
set_iommu_table_base(dev, cell_get_iommu_table(dev));
else if (get_pci_dma_ops() == &dma_direct_ops)
set_dma_offset(dev, cell_dma_direct_offset);
@@ -956,38 +946,29 @@ out:
return dev_addr;
}
-static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask)
+static int dma_suported_and_switch(struct device *dev, u64 dma_mask)
{
- if (!dev->dma_mask || !dma_supported(dev, dma_mask))
- return -EIO;
-
if (dma_mask == DMA_BIT_MASK(64) &&
- cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR)
- {
+ cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR) {
+ u64 addr = cell_iommu_get_fixed_address(dev) +
+ dma_iommu_fixed_base;
dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
+ dev_dbg(dev, "iommu: fixed addr = %llx\n", addr);
set_dma_ops(dev, &dma_iommu_fixed_ops);
- } else {
+ set_dma_offset(dev, addr);
+ return 1;
+ }
+
+ if (dma_iommu_dma_supported(dev, dma_mask)) {
dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
set_dma_ops(dev, get_pci_dma_ops());
+ cell_dma_dev_setup(dev);
+ return 1;
}
- cell_dma_dev_setup(dev);
-
- *dev->dma_mask = dma_mask;
-
return 0;
}
-static void cell_dma_dev_setup_fixed(struct device *dev)
-{
- u64 addr;
-
- addr = cell_iommu_get_fixed_address(dev) + dma_iommu_fixed_base;
- set_dma_offset(dev, addr);
-
- dev_dbg(dev, "iommu: fixed addr = %llx\n", addr);
-}
-
static void insert_16M_pte(unsigned long addr, unsigned long *ptab,
unsigned long base_pte)
{
@@ -1139,7 +1120,7 @@ static int __init cell_iommu_fixed_mapping_init(void)
cell_iommu_setup_window(iommu, np, dbase, dsize, 0);
}
- dma_iommu_ops.set_dma_mask = dma_set_mask_and_switch;
+ dma_iommu_ops.dma_supported = dma_suported_and_switch;
set_pci_dma_ops(&dma_iommu_ops);
return 0;
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index 895560f4be69..f84d52a2db40 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -115,7 +115,8 @@ static void smp_cell_setup_cpu(int cpu)
static int smp_cell_kick_cpu(int nr)
{
- BUG_ON(nr < 0 || nr >= NR_CPUS);
+ if (nr < 0 || nr >= nr_cpu_ids)
+ return -EINVAL;
if (!smp_startup_cpu(nr))
return -ENOENT;
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index d12ea7b9fd47..3f48f6df1cf3 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -48,6 +48,7 @@ static int pnv_eeh_init(void)
{
struct pci_controller *hose;
struct pnv_phb *phb;
+ int max_diag_size = PNV_PCI_DIAG_BUF_SIZE;
if (!firmware_has_feature(FW_FEATURE_OPAL)) {
pr_warn("%s: OPAL is required !\n",
@@ -69,6 +70,9 @@ static int pnv_eeh_init(void)
if (phb->model == PNV_PHB_MODEL_P7IOC)
eeh_add_flag(EEH_ENABLE_IO_FOR_LOG);
+ if (phb->diag_data_size > max_diag_size)
+ max_diag_size = phb->diag_data_size;
+
/*
* PE#0 should be regarded as valid by EEH core
* if it's not the reserved one. Currently, we
@@ -82,6 +86,8 @@ static int pnv_eeh_init(void)
break;
}
+ eeh_set_pe_aux_size(max_diag_size);
+
return 0;
}
@@ -540,7 +546,7 @@ static void pnv_eeh_get_phb_diag(struct eeh_pe *pe)
s64 rc;
rc = opal_pci_get_phb_diag_data2(phb->opal_id, pe->data,
- PNV_PCI_DIAG_BUF_SIZE);
+ phb->diag_data_size);
if (rc != OPAL_SUCCESS)
pr_warn("%s: Failure %lld getting PHB#%x diag-data\n",
__func__, rc, pe->phb->global_number);
@@ -1314,7 +1320,8 @@ static void pnv_eeh_dump_hub_diag_common(struct OpalIoP7IOCErrorData *data)
static void pnv_eeh_get_and_dump_hub_diag(struct pci_controller *hose)
{
struct pnv_phb *phb = hose->private_data;
- struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
+ struct OpalIoP7IOCErrorData *data =
+ (struct OpalIoP7IOCErrorData*)phb->diag_data;
long rc;
rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
@@ -1549,10 +1556,10 @@ static int pnv_eeh_next_error(struct eeh_pe **pe)
/* Dump PHB diag-data */
rc = opal_pci_get_phb_diag_data2(phb->opal_id,
- phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE);
+ phb->diag_data, phb->diag_data_size);
if (rc == OPAL_SUCCESS)
pnv_pci_dump_phb_diag_data(hose,
- phb->diag.blob);
+ phb->diag_data);
/* Try best to clear it */
opal_pci_eeh_freeze_clear(phb->opal_id,
@@ -1795,7 +1802,6 @@ static int __init eeh_powernv_init(void)
{
int ret = -EINVAL;
- eeh_set_pe_aux_size(PNV_PCI_DIAG_BUF_SIZE);
ret = eeh_ops_register(&pnv_eeh_ops);
if (!ret)
pr_info("EEH: PowerNV platform initialized\n");
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index 445f30a2c5ef..2abee070373f 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -23,6 +23,7 @@
#include <asm/cpuidle.h>
#include <asm/code-patching.h>
#include <asm/smp.h>
+#include <asm/runlatch.h>
#include "powernv.h"
#include "subcore.h"
@@ -30,8 +31,33 @@
/* Power ISA 3.0 allows for stop states 0x0 - 0xF */
#define MAX_STOP_STATE 0xF
+#define P9_STOP_SPR_MSR 2000
+#define P9_STOP_SPR_PSSCR 855
+
static u32 supported_cpuidle_states;
+/*
+ * The default stop state that will be used by ppc_md.power_save
+ * function on platforms that support stop instruction.
+ */
+static u64 pnv_default_stop_val;
+static u64 pnv_default_stop_mask;
+static bool default_stop_found;
+
+/*
+ * First deep stop state. Used to figure out when to save/restore
+ * hypervisor context.
+ */
+u64 pnv_first_deep_stop_state = MAX_STOP_STATE;
+
+/*
+ * psscr value and mask of the deepest stop idle state.
+ * Used when a cpu is offlined.
+ */
+static u64 pnv_deepest_stop_psscr_val;
+static u64 pnv_deepest_stop_psscr_mask;
+static bool deepest_stop_found;
+
static int pnv_save_sprs_for_deep_states(void)
{
int cpu;
@@ -48,6 +74,8 @@ static int pnv_save_sprs_for_deep_states(void)
uint64_t hid4_val = mfspr(SPRN_HID4);
uint64_t hid5_val = mfspr(SPRN_HID5);
uint64_t hmeer_val = mfspr(SPRN_HMEER);
+ uint64_t msr_val = MSR_IDLE;
+ uint64_t psscr_val = pnv_deepest_stop_psscr_val;
for_each_possible_cpu(cpu) {
uint64_t pir = get_hard_smp_processor_id(cpu);
@@ -61,6 +89,18 @@ static int pnv_save_sprs_for_deep_states(void)
if (rc != 0)
return rc;
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ rc = opal_slw_set_reg(pir, P9_STOP_SPR_MSR, msr_val);
+ if (rc)
+ return rc;
+
+ rc = opal_slw_set_reg(pir,
+ P9_STOP_SPR_PSSCR, psscr_val);
+
+ if (rc)
+ return rc;
+ }
+
/* HIDs are per core registers */
if (cpu_thread_in_core(cpu) == 0) {
@@ -72,17 +112,21 @@ static int pnv_save_sprs_for_deep_states(void)
if (rc != 0)
return rc;
- rc = opal_slw_set_reg(pir, SPRN_HID1, hid1_val);
- if (rc != 0)
- return rc;
+ /* Only p8 needs to set extra HID regiters */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
- rc = opal_slw_set_reg(pir, SPRN_HID4, hid4_val);
- if (rc != 0)
- return rc;
+ rc = opal_slw_set_reg(pir, SPRN_HID1, hid1_val);
+ if (rc != 0)
+ return rc;
- rc = opal_slw_set_reg(pir, SPRN_HID5, hid5_val);
- if (rc != 0)
- return rc;
+ rc = opal_slw_set_reg(pir, SPRN_HID4, hid4_val);
+ if (rc != 0)
+ return rc;
+
+ rc = opal_slw_set_reg(pir, SPRN_HID5, hid5_val);
+ if (rc != 0)
+ return rc;
+ }
}
}
@@ -96,15 +140,24 @@ static void pnv_alloc_idle_core_states(void)
u32 *core_idle_state;
/*
- * core_idle_state - First 8 bits track the idle state of each thread
- * of the core. The 8th bit is the lock bit. Initially all thread bits
- * are set. They are cleared when the thread enters deep idle state
- * like sleep and winkle. Initially the lock bit is cleared.
- * The lock bit has 2 purposes
- * a. While the first thread is restoring core state, it prevents
- * other threads in the core from switching to process context.
- * b. While the last thread in the core is saving the core state, it
- * prevents a different thread from waking up.
+ * core_idle_state - The lower 8 bits track the idle state of
+ * each thread of the core.
+ *
+ * The most significant bit is the lock bit.
+ *
+ * Initially all the bits corresponding to threads_per_core
+ * are set. They are cleared when the thread enters deep idle
+ * state like sleep and winkle/stop.
+ *
+ * Initially the lock bit is cleared. The lock bit has 2
+ * purposes:
+ * a. While the first thread in the core waking up from
+ * idle is restoring core state, it prevents other
+ * threads in the core from switching to process
+ * context.
+ * b. While the last thread in the core is saving the
+ * core state, it prevents a different thread from
+ * waking up.
*/
for (i = 0; i < nr_cores; i++) {
int first_cpu = i * threads_per_core;
@@ -112,7 +165,7 @@ static void pnv_alloc_idle_core_states(void)
size_t paca_ptr_array_size;
core_idle_state = kmalloc_node(sizeof(u32), GFP_KERNEL, node);
- *core_idle_state = PNV_CORE_IDLE_THREAD_BITS;
+ *core_idle_state = (1 << threads_per_core) - 1;
paca_ptr_array_size = (threads_per_core *
sizeof(struct paca_struct *));
@@ -231,56 +284,104 @@ static DEVICE_ATTR(fastsleep_workaround_applyonce, 0600,
show_fastsleep_workaround_applyonce,
store_fastsleep_workaround_applyonce);
-/*
- * The default stop state that will be used by ppc_md.power_save
- * function on platforms that support stop instruction.
- */
-static u64 pnv_default_stop_val;
-static u64 pnv_default_stop_mask;
-static bool default_stop_found;
+static unsigned long __power7_idle_type(unsigned long type)
+{
+ unsigned long srr1;
-/*
- * Used for ppc_md.power_save which needs a function with no parameters
- */
-static void power9_idle(void)
+ if (!prep_irq_for_idle_irqsoff())
+ return 0;
+
+ __ppc64_runlatch_off();
+ srr1 = power7_idle_insn(type);
+ __ppc64_runlatch_on();
+
+ fini_irq_for_idle_irqsoff();
+
+ return srr1;
+}
+
+void power7_idle_type(unsigned long type)
+{
+ unsigned long srr1;
+
+ srr1 = __power7_idle_type(type);
+ irq_set_pending_from_srr1(srr1);
+}
+
+void power7_idle(void)
{
- power9_idle_stop(pnv_default_stop_val, pnv_default_stop_mask);
+ if (!powersave_nap)
+ return;
+
+ power7_idle_type(PNV_THREAD_NAP);
}
-/*
- * First deep stop state. Used to figure out when to save/restore
- * hypervisor context.
- */
-u64 pnv_first_deep_stop_state = MAX_STOP_STATE;
+static unsigned long __power9_idle_type(unsigned long stop_psscr_val,
+ unsigned long stop_psscr_mask)
+{
+ unsigned long psscr;
+ unsigned long srr1;
+
+ if (!prep_irq_for_idle_irqsoff())
+ return 0;
+
+ psscr = mfspr(SPRN_PSSCR);
+ psscr = (psscr & ~stop_psscr_mask) | stop_psscr_val;
+
+ __ppc64_runlatch_off();
+ srr1 = power9_idle_stop(psscr);
+ __ppc64_runlatch_on();
+
+ fini_irq_for_idle_irqsoff();
+
+ return srr1;
+}
+
+void power9_idle_type(unsigned long stop_psscr_val,
+ unsigned long stop_psscr_mask)
+{
+ unsigned long srr1;
+
+ srr1 = __power9_idle_type(stop_psscr_val, stop_psscr_mask);
+ irq_set_pending_from_srr1(srr1);
+}
/*
- * psscr value and mask of the deepest stop idle state.
- * Used when a cpu is offlined.
+ * Used for ppc_md.power_save which needs a function with no parameters
*/
-static u64 pnv_deepest_stop_psscr_val;
-static u64 pnv_deepest_stop_psscr_mask;
-static bool deepest_stop_found;
+void power9_idle(void)
+{
+ power9_idle_type(pnv_default_stop_val, pnv_default_stop_mask);
+}
+#ifdef CONFIG_HOTPLUG_CPU
/*
* pnv_cpu_offline: A function that puts the CPU into the deepest
* available platform idle state on a CPU-Offline.
+ * interrupts hard disabled and no lazy irq pending.
*/
unsigned long pnv_cpu_offline(unsigned int cpu)
{
unsigned long srr1;
-
u32 idle_states = pnv_get_supported_cpuidle_states();
+ __ppc64_runlatch_off();
+
if (cpu_has_feature(CPU_FTR_ARCH_300) && deepest_stop_found) {
- srr1 = power9_idle_stop(pnv_deepest_stop_psscr_val,
- pnv_deepest_stop_psscr_mask);
+ unsigned long psscr;
+
+ psscr = mfspr(SPRN_PSSCR);
+ psscr = (psscr & ~pnv_deepest_stop_psscr_mask) |
+ pnv_deepest_stop_psscr_val;
+ srr1 = power9_idle_stop(psscr);
+
} else if (idle_states & OPAL_PM_WINKLE_ENABLED) {
- srr1 = power7_winkle();
+ srr1 = power7_idle_insn(PNV_THREAD_WINKLE);
} else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
(idle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
- srr1 = power7_sleep();
+ srr1 = power7_idle_insn(PNV_THREAD_SLEEP);
} else if (idle_states & OPAL_PM_NAP_ENABLED) {
- srr1 = power7_nap(1);
+ srr1 = power7_idle_insn(PNV_THREAD_NAP);
} else {
/* This is the fallback method. We emulate snooze */
while (!generic_check_cpu_restart(cpu)) {
@@ -291,8 +392,11 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
HMT_medium();
}
+ __ppc64_runlatch_on();
+
return srr1;
}
+#endif
/*
* Power ISA 3.0 idle initialization.
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index f620572f891f..4ca6c26a56d5 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -99,10 +99,10 @@ opal_return:
lwz r4,8(r1);
ld r5,PPC_LR_STKOFF(r1);
ld r6,PACASAVEDMSR(r13);
- mtspr SPRN_SRR0,r5;
- mtspr SPRN_SRR1,r6;
mtcr r4;
- rfid
+ mtspr SPRN_HSRR0,r5;
+ mtspr SPRN_HSRR1,r6;
+ hrfid
opal_real_call:
mfcr r11
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 283caf1070c9..437613588df1 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1718,6 +1718,100 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
*/
}
+static bool pnv_pci_ioda_pe_single_vendor(struct pnv_ioda_pe *pe)
+{
+ unsigned short vendor = 0;
+ struct pci_dev *pdev;
+
+ if (pe->device_count == 1)
+ return true;
+
+ /* pe->pdev should be set if it's a single device, pe->pbus if not */
+ if (!pe->pbus)
+ return true;
+
+ list_for_each_entry(pdev, &pe->pbus->devices, bus_list) {
+ if (!vendor) {
+ vendor = pdev->vendor;
+ continue;
+ }
+
+ if (pdev->vendor != vendor)
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Reconfigure TVE#0 to be usable as 64-bit DMA space.
+ *
+ * The first 4GB of virtual memory for a PE is reserved for 32-bit accesses.
+ * Devices can only access more than that if bit 59 of the PCI address is set
+ * by hardware, which indicates TVE#1 should be used instead of TVE#0.
+ * Many PCI devices are not capable of addressing that many bits, and as a
+ * result are limited to the 4GB of virtual memory made available to 32-bit
+ * devices in TVE#0.
+ *
+ * In order to work around this, reconfigure TVE#0 to be suitable for 64-bit
+ * devices by configuring the virtual memory past the first 4GB inaccessible
+ * by 64-bit DMAs. This should only be used by devices that want more than
+ * 4GB, and only on PEs that have no 32-bit devices.
+ *
+ * Currently this will only work on PHB3 (POWER8).
+ */
+static int pnv_pci_ioda_dma_64bit_bypass(struct pnv_ioda_pe *pe)
+{
+ u64 window_size, table_size, tce_count, addr;
+ struct page *table_pages;
+ u64 tce_order = 28; /* 256MB TCEs */
+ __be64 *tces;
+ s64 rc;
+
+ /*
+ * Window size needs to be a power of two, but needs to account for
+ * shifting memory by the 4GB offset required to skip 32bit space.
+ */
+ window_size = roundup_pow_of_two(memory_hotplug_max() + (1ULL << 32));
+ tce_count = window_size >> tce_order;
+ table_size = tce_count << 3;
+
+ if (table_size < PAGE_SIZE)
+ table_size = PAGE_SIZE;
+
+ table_pages = alloc_pages_node(pe->phb->hose->node, GFP_KERNEL,
+ get_order(table_size));
+ if (!table_pages)
+ goto err;
+
+ tces = page_address(table_pages);
+ if (!tces)
+ goto err;
+
+ memset(tces, 0, table_size);
+
+ for (addr = 0; addr < memory_hotplug_max(); addr += (1 << tce_order)) {
+ tces[(addr + (1ULL << 32)) >> tce_order] =
+ cpu_to_be64(addr | TCE_PCI_READ | TCE_PCI_WRITE);
+ }
+
+ rc = opal_pci_map_pe_dma_window(pe->phb->opal_id,
+ pe->pe_number,
+ /* reconfigure window 0 */
+ (pe->pe_number << 1) + 0,
+ 1,
+ __pa(tces),
+ table_size,
+ 1 << tce_order);
+ if (rc == OPAL_SUCCESS) {
+ pe_info(pe, "Using 64-bit DMA iommu bypass (through TVE#0)\n");
+ return 0;
+ }
+err:
+ pe_err(pe, "Error configuring 64-bit DMA bypass\n");
+ return -EIO;
+}
+
static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
@@ -1726,6 +1820,7 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
struct pnv_ioda_pe *pe;
uint64_t top;
bool bypass = false;
+ s64 rc;
if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
return -ENODEV;;
@@ -1740,8 +1835,27 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
set_dma_ops(&pdev->dev, &dma_direct_ops);
} else {
- dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
- set_dma_ops(&pdev->dev, &dma_iommu_ops);
+ /*
+ * If the device can't set the TCE bypass bit but still wants
+ * to access 4GB or more, on PHB3 we can reconfigure TVE#0 to
+ * bypass the 32-bit region and be usable for 64-bit DMAs.
+ * The device needs to be able to address all of this space.
+ */
+ if (dma_mask >> 32 &&
+ dma_mask > (memory_hotplug_max() + (1ULL << 32)) &&
+ pnv_pci_ioda_pe_single_vendor(pe) &&
+ phb->model == PNV_PHB_MODEL_PHB3) {
+ /* Configure the bypass mode */
+ rc = pnv_pci_ioda_dma_64bit_bypass(pe);
+ if (rc)
+ return rc;
+ /* 4GB offset bypasses 32-bit space */
+ set_dma_offset(&pdev->dev, (1ULL << 32));
+ set_dma_ops(&pdev->dev, &dma_direct_ops);
+ } else {
+ dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
+ set_dma_ops(&pdev->dev, &dma_iommu_ops);
+ }
}
*pdev->dev.dma_mask = dma_mask;
@@ -3123,13 +3237,13 @@ static int pnv_pci_diag_data_set(void *data, u64 val)
phb = hose->private_data;
/* Retrieve the diag data from firmware */
- ret = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
- PNV_PCI_DIAG_BUF_SIZE);
+ ret = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data,
+ phb->diag_data_size);
if (ret != OPAL_SUCCESS)
return -EIO;
/* Print the diag data to the kernel log */
- pnv_pci_dump_phb_diag_data(phb->hose, phb->diag.blob);
+ pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data);
return 0;
}
@@ -3725,6 +3839,15 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
else
phb->model = PNV_PHB_MODEL_UNKNOWN;
+ /* Initialize diagnostic data buffer */
+ prop32 = of_get_property(np, "ibm,phb-diag-data-size", NULL);
+ if (prop32)
+ phb->diag_data_size = be32_to_cpup(prop32);
+ else
+ phb->diag_data_size = PNV_PCI_DIAG_BUF_SIZE;
+
+ phb->diag_data = memblock_virt_alloc(phb->diag_data_size, 0);
+
/* Parse 32-bit and IO ranges (if any) */
pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 935ccb249a8a..7905d179d036 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -227,11 +227,39 @@ void pnv_teardown_msi_irqs(struct pci_dev *pdev)
}
#endif /* CONFIG_PCI_MSI */
+/* Nicely print the contents of the PE State Tables (PEST). */
+static void pnv_pci_dump_pest(__be64 pestA[], __be64 pestB[], int pest_size)
+{
+ __be64 prevA = ULONG_MAX, prevB = ULONG_MAX;
+ bool dup = false;
+ int i;
+
+ for (i = 0; i < pest_size; i++) {
+ __be64 peA = be64_to_cpu(pestA[i]);
+ __be64 peB = be64_to_cpu(pestB[i]);
+
+ if (peA != prevA || peB != prevB) {
+ if (dup) {
+ pr_info("PE[..%03x] A/B: as above\n", i-1);
+ dup = false;
+ }
+ prevA = peA;
+ prevB = peB;
+ if (peA & PNV_IODA_STOPPED_STATE ||
+ peB & PNV_IODA_STOPPED_STATE)
+ pr_info("PE[%03x] A/B: %016llx %016llx\n",
+ i, peA, peB);
+ } else if (!dup && (peA & PNV_IODA_STOPPED_STATE ||
+ peB & PNV_IODA_STOPPED_STATE)) {
+ dup = true;
+ }
+ }
+}
+
static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
struct OpalIoPhbErrorCommon *common)
{
struct OpalIoP7IOCPhbErrorData *data;
- int i;
data = (struct OpalIoP7IOCPhbErrorData *)common;
pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n",
@@ -308,22 +336,13 @@ static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
be64_to_cpu(data->dma1ErrorLog0),
be64_to_cpu(data->dma1ErrorLog1));
- for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
- if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 &&
- (be64_to_cpu(data->pestB[i]) >> 63) == 0)
- continue;
-
- pr_info("PE[%3d] A/B: %016llx %016llx\n",
- i, be64_to_cpu(data->pestA[i]),
- be64_to_cpu(data->pestB[i]));
- }
+ pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_P7IOC_NUM_PEST_REGS);
}
static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
struct OpalIoPhbErrorCommon *common)
{
struct OpalIoPhb3ErrorData *data;
- int i;
data = (struct OpalIoPhb3ErrorData*)common;
pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n",
@@ -404,15 +423,109 @@ static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
be64_to_cpu(data->dma1ErrorLog0),
be64_to_cpu(data->dma1ErrorLog1));
- for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
- if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 &&
- (be64_to_cpu(data->pestB[i]) >> 63) == 0)
- continue;
+ pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB3_NUM_PEST_REGS);
+}
- pr_info("PE[%3d] A/B: %016llx %016llx\n",
- i, be64_to_cpu(data->pestA[i]),
- be64_to_cpu(data->pestB[i]));
- }
+static void pnv_pci_dump_phb4_diag_data(struct pci_controller *hose,
+ struct OpalIoPhbErrorCommon *common)
+{
+ struct OpalIoPhb4ErrorData *data;
+
+ data = (struct OpalIoPhb4ErrorData*)common;
+ pr_info("PHB4 PHB#%d Diag-data (Version: %d)\n",
+ hose->global_number, be32_to_cpu(common->version));
+ if (data->brdgCtl)
+ pr_info("brdgCtl: %08x\n",
+ be32_to_cpu(data->brdgCtl));
+ if (data->deviceStatus || data->slotStatus ||
+ data->linkStatus || data->devCmdStatus ||
+ data->devSecStatus)
+ pr_info("RootSts: %08x %08x %08x %08x %08x\n",
+ be32_to_cpu(data->deviceStatus),
+ be32_to_cpu(data->slotStatus),
+ be32_to_cpu(data->linkStatus),
+ be32_to_cpu(data->devCmdStatus),
+ be32_to_cpu(data->devSecStatus));
+ if (data->rootErrorStatus || data->uncorrErrorStatus ||
+ data->corrErrorStatus)
+ pr_info("RootErrSts: %08x %08x %08x\n",
+ be32_to_cpu(data->rootErrorStatus),
+ be32_to_cpu(data->uncorrErrorStatus),
+ be32_to_cpu(data->corrErrorStatus));
+ if (data->tlpHdr1 || data->tlpHdr2 ||
+ data->tlpHdr3 || data->tlpHdr4)
+ pr_info("RootErrLog: %08x %08x %08x %08x\n",
+ be32_to_cpu(data->tlpHdr1),
+ be32_to_cpu(data->tlpHdr2),
+ be32_to_cpu(data->tlpHdr3),
+ be32_to_cpu(data->tlpHdr4));
+ if (data->sourceId)
+ pr_info("sourceId: %08x\n", be32_to_cpu(data->sourceId));
+ if (data->nFir)
+ pr_info("nFir: %016llx %016llx %016llx\n",
+ be64_to_cpu(data->nFir),
+ be64_to_cpu(data->nFirMask),
+ be64_to_cpu(data->nFirWOF));
+ if (data->phbPlssr || data->phbCsr)
+ pr_info("PhbSts: %016llx %016llx\n",
+ be64_to_cpu(data->phbPlssr),
+ be64_to_cpu(data->phbCsr));
+ if (data->lemFir)
+ pr_info("Lem: %016llx %016llx %016llx\n",
+ be64_to_cpu(data->lemFir),
+ be64_to_cpu(data->lemErrorMask),
+ be64_to_cpu(data->lemWOF));
+ if (data->phbErrorStatus)
+ pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
+ be64_to_cpu(data->phbErrorStatus),
+ be64_to_cpu(data->phbFirstErrorStatus),
+ be64_to_cpu(data->phbErrorLog0),
+ be64_to_cpu(data->phbErrorLog1));
+ if (data->phbTxeErrorStatus)
+ pr_info("PhbTxeErr: %016llx %016llx %016llx %016llx\n",
+ be64_to_cpu(data->phbTxeErrorStatus),
+ be64_to_cpu(data->phbTxeFirstErrorStatus),
+ be64_to_cpu(data->phbTxeErrorLog0),
+ be64_to_cpu(data->phbTxeErrorLog1));
+ if (data->phbRxeArbErrorStatus)
+ pr_info("RxeArbErr: %016llx %016llx %016llx %016llx\n",
+ be64_to_cpu(data->phbRxeArbErrorStatus),
+ be64_to_cpu(data->phbRxeArbFirstErrorStatus),
+ be64_to_cpu(data->phbRxeArbErrorLog0),
+ be64_to_cpu(data->phbRxeArbErrorLog1));
+ if (data->phbRxeMrgErrorStatus)
+ pr_info("RxeMrgErr: %016llx %016llx %016llx %016llx\n",
+ be64_to_cpu(data->phbRxeMrgErrorStatus),
+ be64_to_cpu(data->phbRxeMrgFirstErrorStatus),
+ be64_to_cpu(data->phbRxeMrgErrorLog0),
+ be64_to_cpu(data->phbRxeMrgErrorLog1));
+ if (data->phbRxeTceErrorStatus)
+ pr_info("RxeTceErr: %016llx %016llx %016llx %016llx\n",
+ be64_to_cpu(data->phbRxeTceErrorStatus),
+ be64_to_cpu(data->phbRxeTceFirstErrorStatus),
+ be64_to_cpu(data->phbRxeTceErrorLog0),
+ be64_to_cpu(data->phbRxeTceErrorLog1));
+
+ if (data->phbPblErrorStatus)
+ pr_info("PblErr: %016llx %016llx %016llx %016llx\n",
+ be64_to_cpu(data->phbPblErrorStatus),
+ be64_to_cpu(data->phbPblFirstErrorStatus),
+ be64_to_cpu(data->phbPblErrorLog0),
+ be64_to_cpu(data->phbPblErrorLog1));
+ if (data->phbPcieDlpErrorStatus)
+ pr_info("PcieDlp: %016llx %016llx %016llx\n",
+ be64_to_cpu(data->phbPcieDlpErrorLog1),
+ be64_to_cpu(data->phbPcieDlpErrorLog2),
+ be64_to_cpu(data->phbPcieDlpErrorStatus));
+ if (data->phbRegbErrorStatus)
+ pr_info("RegbErr: %016llx %016llx %016llx %016llx\n",
+ be64_to_cpu(data->phbRegbErrorStatus),
+ be64_to_cpu(data->phbRegbFirstErrorStatus),
+ be64_to_cpu(data->phbRegbErrorLog0),
+ be64_to_cpu(data->phbRegbErrorLog1));
+
+
+ pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB4_NUM_PEST_REGS);
}
void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
@@ -431,6 +544,9 @@ void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
pnv_pci_dump_phb3_diag_data(hose, common);
break;
+ case OPAL_PHB_ERROR_DATA_TYPE_PHB4:
+ pnv_pci_dump_phb4_diag_data(hose, common);
+ break;
default:
pr_warn("%s: Unrecognized ioType %d\n",
__func__, be32_to_cpu(common->ioType));
@@ -445,8 +561,8 @@ static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
spin_lock_irqsave(&phb->lock, flags);
/* Fetch PHB diag-data */
- rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
- PNV_PCI_DIAG_BUF_SIZE);
+ rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data,
+ phb->diag_data_size);
has_diag = (rc == OPAL_SUCCESS);
/* If PHB supports compound PE, to handle it */
@@ -474,7 +590,7 @@ static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
* with the normal errors generated when probing empty slots
*/
if (has_diag && ret)
- pnv_pci_dump_phb_diag_data(phb->hose, phb->diag.blob);
+ pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data);
spin_unlock_irqrestore(&phb->lock, flags);
}
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 18c8a2fa03b8..f16bc403ec03 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -33,6 +33,9 @@ enum pnv_phb_model {
#define PNV_IODA_PE_SLAVE (1 << 4) /* Slave PE in compound case */
#define PNV_IODA_PE_VF (1 << 5) /* PE for one VF */
+/* Indicates operations are frozen for a PE: MMIO in PESTA & DMA in PESTB. */
+#define PNV_IODA_STOPPED_STATE 0x8000000000000000
+
/* Data associated with a PE, including IOMMU tracking etc.. */
struct pnv_phb;
struct pnv_ioda_pe {
@@ -169,13 +172,9 @@ struct pnv_phb {
unsigned int pe_rmap[0x10000];
} ioda;
- /* PHB and hub status structure */
- union {
- unsigned char blob[PNV_PCI_DIAG_BUF_SIZE];
- struct OpalIoP7IOCPhbErrorData p7ioc;
- struct OpalIoPhb3ErrorData phb3;
- struct OpalIoP7IOCErrorData hub_diag;
- } diag;
+ /* PHB and hub diagnostics */
+ unsigned int diag_data_size;
+ u8 *diag_data;
/* Nvlink2 data */
struct npu {
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 4aff754b6f2c..40dae96f7e20 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -63,7 +63,8 @@ static int pnv_smp_kick_cpu(int nr)
long rc;
uint8_t status;
- BUG_ON(nr < 0 || nr >= NR_CPUS);
+ if (nr < 0 || nr >= nr_cpu_ids)
+ return -EINVAL;
/*
* If we already started or OPAL is not supported, we just
@@ -144,7 +145,14 @@ static void pnv_smp_cpu_kill_self(void)
unsigned long srr1, wmask;
/* Standard hot unplug procedure */
- local_irq_disable();
+ /*
+ * This hard disables local interurpts, ensuring we have no lazy
+ * irqs pending.
+ */
+ WARN_ON(irqs_disabled());
+ hard_irq_disable();
+ WARN_ON(lazy_irq_pending());
+
idle_task_exit();
current->active_mm = NULL; /* for sanity */
cpu = smp_processor_id();
@@ -162,16 +170,6 @@ static void pnv_smp_cpu_kill_self(void)
*/
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
- /*
- * Hard-disable interrupts, and then clear irq_happened flags
- * that we can safely ignore while off-line, since they
- * are for things for which we do no processing when off-line
- * (or in the case of HMI, all the processing we need to do
- * is done in lower-level real-mode code).
- */
- hard_irq_disable();
- local_paca->irq_happened &= ~(PACA_IRQ_DEC | PACA_IRQ_HMI);
-
while (!generic_check_cpu_restart(cpu)) {
/*
* Clear IPI flag, since we don't handle IPIs while
@@ -182,9 +180,9 @@ static void pnv_smp_cpu_kill_self(void)
*/
kvmppc_set_host_ipi(cpu, 0);
- ppc64_runlatch_off();
srr1 = pnv_cpu_offline(cpu);
- ppc64_runlatch_on();
+
+ WARN_ON(lazy_irq_pending());
/*
* If the SRR1 value indicates that we woke up due to
@@ -198,8 +196,7 @@ static void pnv_smp_cpu_kill_self(void)
* contains 0.
*/
if (((srr1 & wmask) == SRR1_WAKEEE) ||
- ((srr1 & wmask) == SRR1_WAKEHVI) ||
- (local_paca->irq_happened & PACA_IRQ_EE)) {
+ ((srr1 & wmask) == SRR1_WAKEHVI)) {
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
if (xive_enabled())
xive_flush_interrupt();
@@ -211,14 +208,15 @@ static void pnv_smp_cpu_kill_self(void)
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
}
- local_paca->irq_happened &= ~(PACA_IRQ_EE | PACA_IRQ_DBELL);
smp_mb();
if (cpu_core_split_required())
continue;
if (srr1 && !generic_check_cpu_restart(cpu))
- DBG("CPU%d Unexpected exit while offline !\n", cpu);
+ DBG("CPU%d Unexpected exit while offline srr1=%lx!\n",
+ cpu, srr1);
+
}
/* Re-enable decrementer interrupts */
diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c
index 309876d699e9..596ae2e98040 100644
--- a/arch/powerpc/platforms/powernv/subcore.c
+++ b/arch/powerpc/platforms/powernv/subcore.c
@@ -18,6 +18,7 @@
#include <linux/stop_machine.h>
#include <asm/cputhreads.h>
+#include <asm/cpuidle.h>
#include <asm/kvm_ppc.h>
#include <asm/machdep.h>
#include <asm/opal.h>
@@ -182,7 +183,7 @@ static void unsplit_core(void)
cpu = smp_processor_id();
if (cpu_thread_in_core(cpu) != 0) {
while (mfspr(SPRN_HID0) & mask)
- power7_nap(0);
+ power7_idle_insn(PNV_THREAD_NAP);
per_cpu(split_state, cpu).step = SYNC_STEP_UNSPLIT;
return;
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 913c54e23eea..3a6dfd14f64b 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -124,7 +124,7 @@ config HV_PERF_CTRS
Enable access to hypervisor supplied counters in perf. Currently,
this enables code that uses the hcall GetPerfCounterInfo and 24x7
interfaces to retrieve counters. GPCI exists on Power 6 and later
- systems. 24x7 is available on Power 8 systems.
+ systems. 24x7 is available on Power 8 and later systems.
If unsure, select Y.
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 7bc0e91f8715..6afd1efd3633 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -554,7 +554,7 @@ static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index)
{
int rc;
- pr_debug("Attemping to remove CPU %s, drc index: %x\n",
+ pr_debug("Attempting to remove CPU %s, drc index: %x\n",
dn->name, drc_index);
rc = dlpar_offline_cpu(dn);
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 1fb162ba9d1c..ca9b2f4aaa22 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -22,6 +22,7 @@
#include <asm/machdep.h>
#include <asm/prom.h>
#include <asm/sparsemem.h>
+#include <asm/fadump.h>
#include "pseries.h"
static bool rtas_hp_event;
@@ -408,6 +409,12 @@ static bool lmb_is_removable(struct of_drconf_cell *lmb)
scns_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
phys_addr = lmb->base_addr;
+#ifdef CONFIG_FA_DUMP
+ /* Don't hot-remove memory that falls in fadump boot memory area */
+ if (is_fadump_boot_memory_area(phys_addr, block_sz))
+ return false;
+#endif
+
for (i = 0; i < scns_per_block; i++) {
pfn = PFN_DOWN(phys_addr);
if (!pfn_present(pfn))
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 6541d0b03e4c..495ba4e7336d 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -301,7 +301,7 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot,
int ssize, unsigned long inv_flags)
{
unsigned long lpar_rc;
- unsigned long flags = (newpp & 7) | H_AVPN;
+ unsigned long flags;
unsigned long want_v;
want_v = hpte_encode_avpn(vpn, psize, ssize);
@@ -309,6 +309,11 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot,
pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
want_v, slot, flags, psize);
+ flags = (newpp & 7) | H_AVPN;
+ if (mmu_has_feature(MMU_FTR_KERNEL_RO))
+ /* Move pp0 into bit 8 (IBM 55) */
+ flags |= (newpp & HPTE_R_PP0) >> 55;
+
lpar_rc = plpar_pte_protect(flags, slot, want_v);
if (lpar_rc == H_NOT_FOUND) {
@@ -380,6 +385,10 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
BUG_ON(slot == -1);
flags = newpp & 7;
+ if (mmu_has_feature(MMU_FTR_KERNEL_RO))
+ /* Move pp0 into bit 8 (IBM 55) */
+ flags |= (newpp & HPTE_R_PP0) >> 55;
+
lpar_rc = plpar_pte_protect(flags, slot, 0);
BUG_ON(lpar_rc != H_SUCCESS);
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 52ca6b311d44..24785f63fb40 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -151,7 +151,8 @@ static void smp_setup_cpu(int cpu)
static int smp_pSeries_kick_cpu(int nr)
{
- BUG_ON(nr < 0 || nr >= NR_CPUS);
+ if (nr < 0 || nr >= nr_cpu_ids)
+ return -EINVAL;
if (!smp_startup_cpu(nr))
return -ENOENT;
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index 117beb9e8786..8a47f168476b 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -519,7 +519,7 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
{
struct vio_dev *viodev = to_vio_dev(dev);
struct iommu_table *tbl;
- dma_addr_t ret = DMA_ERROR_CODE;
+ dma_addr_t ret = IOMMU_MAPPING_ERROR;
tbl = get_iommu_table_base(dev);
if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) {
@@ -625,6 +625,7 @@ static const struct dma_map_ops vio_dma_mapping_ops = {
.unmap_page = vio_dma_iommu_unmap_page,
.dma_supported = vio_dma_iommu_dma_supported,
.get_required_mask = vio_dma_get_required_mask,
+ .mapping_error = dma_iommu_mapping_error,
};
/**
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index a7fe5fee744f..2799706106c6 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -45,6 +45,7 @@
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/pfn_t.h>
+#include <linux/uio.h>
#include <asm/page.h>
#include <asm/prom.h>
@@ -163,8 +164,15 @@ axon_ram_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pa
return __axon_ram_direct_access(bank, pgoff, nr_pages, kaddr, pfn);
}
+static size_t axon_ram_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i)
+{
+ return copy_from_iter(addr, bytes, i);
+}
+
static const struct dax_operations axon_ram_dax_ops = {
.direct_access = axon_ram_dax_direct_access,
+ .copy_from_iter = axon_ram_copy_from_iter,
};
/**
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index 3e828b20c21e..2842f9d63d21 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -79,7 +79,7 @@ unsigned int mpc8xx_get_irq(void)
irq = in_be32(&siu_reg->sc_sivec) >> 26;
if (irq == PIC_VEC_SPURRIOUS)
- irq = 0;
+ return 0;
return irq_linear_revmap(mpc8xx_pic_host, irq);
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index 8f5e3035483b..6595462b1fc8 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -1417,7 +1417,7 @@ bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
/* Get ready for interrupts */
xive_setup_cpu();
- pr_info("Interrupt handling intialized with %s backend\n",
+ pr_info("Interrupt handling initialized with %s backend\n",
xive_ops->name);
pr_info("Using priority %d for all interrupts\n", max_prio);
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index ab9ecce61ee5..0f95476b01f6 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -633,8 +633,8 @@ u32 xive_native_alloc_vp_block(u32 max_vcpus)
if (max_vcpus > (1 << order))
order++;
- pr_info("VP block alloc, for max VCPUs %d use order %d\n",
- max_vcpus, order);
+ pr_debug("VP block alloc, for max VCPUs %d use order %d\n",
+ max_vcpus, order);
for (;;) {
rc = opal_xive_alloc_vp_block(order);
diff --git a/arch/powerpc/tools/head_check.sh b/arch/powerpc/tools/head_check.sh
new file mode 100644
index 000000000000..ad9e57209aa4
--- /dev/null
+++ b/arch/powerpc/tools/head_check.sh
@@ -0,0 +1,78 @@
+# Copyright © 2016 IBM Corporation
+
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version
+# 2 of the License, or (at your option) any later version.
+
+# This script checks the head of a vmlinux for linker stubs that
+# break our placement of fixed-location code for 64-bit.
+
+# based on relocs_check.pl
+# Copyright © 2009 IBM Corporation
+
+# NOTE!
+#
+# If the build dies here, it's likely code in head_64.S/exception-64*.S or
+# nearby, is branching to labels it can't reach directly, which results in the
+# linker inserting branch stubs. This can move code around in ways that break
+# the fixed section calculations (head-64.h). To debug this, disassemble the
+# vmlinux and look for branch stubs (long_branch, plt_branch, etc.) in the
+# fixed section region (0 - 0x8000ish). Check what code is calling those stubs,
+# and perhaps change so a direct branch can reach.
+#
+# A ".linker_stub_catch" section is used to catch some stubs generated by
+# early .text code, which tend to get placed at the start of the section.
+# If there are too many such stubs, they can overflow this section. Expanding
+# it may help (or reducing the number of stub branches).
+#
+# Linker stubs use the TOC pointer, so even if fixed section code could
+# tolerate them being inserted into head code, they can't be allowed in low
+# level entry code (boot, interrupt vectors, etc) until r2 is set up. This
+# could cause the kernel to die in early boot.
+
+# Turn this on if you want more debug output:
+# set -x
+
+if [ $# -lt 2 ]; then
+ echo "$0 [path to nm] [path to vmlinux]" 1>&2
+ exit 1
+fi
+
+# Have Kbuild supply the path to nm so we handle cross compilation.
+nm="$1"
+vmlinux="$2"
+
+# gcc-4.6-era toolchain make _stext an A (absolute) symbol rather than T
+$nm "$vmlinux" | grep -e " [TA] _stext$" -e " t start_first_256B$" -e " a text_start$" -e " t start_text$" -m4 > .tmp_symbols.txt
+
+
+vma=$(cat .tmp_symbols.txt | grep -e " [TA] _stext$" | cut -d' ' -f1)
+
+expected_start_head_addr=$vma
+
+start_head_addr=$(cat .tmp_symbols.txt | grep " t start_first_256B$" | cut -d' ' -f1)
+
+if [ "$start_head_addr" != "$expected_start_head_addr" ]; then
+ echo "ERROR: head code starts at $start_head_addr, should be $expected_start_head_addr"
+ echo "ERROR: try to enable LD_HEAD_STUB_CATCH config option"
+ echo "ERROR: see comments in arch/powerpc/tools/head_check.sh"
+
+ exit 1
+fi
+
+top_vma=$(echo $vma | cut -d'0' -f1)
+
+expected_start_text_addr=$(cat .tmp_symbols.txt | grep " a text_start$" | cut -d' ' -f1 | sed "s/^0/$top_vma/")
+
+start_text_addr=$(cat .tmp_symbols.txt | grep " t start_text$" | cut -d' ' -f1)
+
+if [ "$start_text_addr" != "$expected_start_text_addr" ]; then
+ echo "ERROR: start_text address is $start_text_addr, should be $expected_start_text_addr"
+ echo "ERROR: try to enable LD_HEAD_STUB_CATCH config option"
+ echo "ERROR: see comments in arch/powerpc/tools/head_check.sh"
+
+ exit 1
+fi
+
+rm -f .tmp_symbols.txt
diff --git a/arch/powerpc/tools/unrel_branch_check.sh b/arch/powerpc/tools/unrel_branch_check.sh
new file mode 100755
index 000000000000..1e972df3107e
--- /dev/null
+++ b/arch/powerpc/tools/unrel_branch_check.sh
@@ -0,0 +1,57 @@
+# Copyright © 2016 IBM Corporation
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version
+# 2 of the License, or (at your option) any later version.
+#
+# This script checks the relocations of a vmlinux for "suspicious"
+# branches from unrelocated code (head_64.S code).
+
+# Turn this on if you want more debug output:
+# set -x
+
+# Have Kbuild supply the path to objdump so we handle cross compilation.
+objdump="$1"
+vmlinux="$2"
+
+#__end_interrupts should be located within the first 64K
+
+end_intr=0x$(
+"$objdump" -R "$vmlinux" -d --start-address=0xc000000000000000 \
+ --stop-address=0xc000000000010000 |
+grep '\<__end_interrupts>:' |
+awk '{print $1}'
+)
+
+BRANCHES=$(
+"$objdump" -R "$vmlinux" -D --start-address=0xc000000000000000 \
+ --stop-address=${end_intr} |
+grep -e "^c[0-9a-f]*:[[:space:]]*\([0-9a-f][0-9a-f][[:space:]]\)\{4\}[[:space:]]*b" |
+grep -v '\<__start_initialization_multiplatform>' |
+grep -v -e 'b.\?.\?ctr' |
+grep -v -e 'b.\?.\?lr' |
+sed 's/://' |
+awk '{ print $1 ":" $6 ":0x" $7 ":" $8 " "}'
+)
+
+for tuple in $BRANCHES
+do
+ from=`echo $tuple | cut -d':' -f1`
+ branch=`echo $tuple | cut -d':' -f2`
+ to=`echo $tuple | cut -d':' -f3 | sed 's/cr[0-7],//'`
+ sym=`echo $tuple | cut -d':' -f4`
+
+ if (( $to > $end_intr ))
+ then
+ if [ -z "$bad_branches" ]; then
+ echo "WARNING: Unrelocated relative branches"
+ bad_branches="yes"
+ fi
+ echo "$from $branch-> $to $sym"
+ fi
+done
+
+if [ -z "$bad_branches" ]; then
+ exit 0
+fi
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index f11f65634aab..08e367e3e8c3 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -53,6 +53,7 @@
#include <asm/xive.h>
#include <asm/opal.h>
#include <asm/firmware.h>
+#include <asm/code-patching.h>
#ifdef CONFIG_PPC64
#include <asm/hvcall.h>
@@ -837,7 +838,8 @@ static void insert_bpts(void)
store_inst(&bp->instr[0]);
if (bp->enabled & BP_CIABR)
continue;
- if (mwrite(bp->address, &bpinstr, 4) != 4) {
+ if (patch_instruction((unsigned int *)bp->address,
+ bpinstr) != 0) {
printf("Couldn't write instruction at %lx, "
"disabling breakpoint there\n", bp->address);
bp->enabled &= ~BP_TRAP;
@@ -874,7 +876,8 @@ static void remove_bpts(void)
continue;
if (mread(bp->address, &instr, 4) == 4
&& instr == bpinstr
- && mwrite(bp->address, &bp->instr, 4) != 4)
+ && patch_instruction(
+ (unsigned int *)bp->address, bp->instr[0]) != 0)
printf("Couldn't remove breakpoint at %lx\n",
bp->address);
else
@@ -1242,14 +1245,14 @@ bpt_cmds(void)
{
int cmd;
unsigned long a;
- int mode, i;
+ int i;
struct bpt *bp;
- const char badaddr[] = "Only kernel addresses are permitted "
- "for breakpoints\n";
cmd = inchar();
switch (cmd) {
-#ifndef CONFIG_8xx
+#ifndef CONFIG_PPC_8xx
+ static const char badaddr[] = "Only kernel addresses are permitted for breakpoints\n";
+ int mode;
case 'd': /* bd - hardware data breakpoint */
mode = 7;
cmd = inchar();
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 37abe86e5bc9..7eeb75d758c1 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -68,7 +68,7 @@ config S390
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_GCOV_PROFILE_ALL
- select ARCH_HAS_GIGANTIC_PAGE
+ select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
select ARCH_HAS_KCOV
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SG_CHAIN
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index 0ddd37e6c29d..b9300f8aee10 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -178,7 +178,6 @@ struct compat_statfs64 {
u32 f_spare[4];
};
-#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
#define COMPAT_RLIM_INFINITY 0xffffffff
typedef u32 compat_old_sigset_t; /* at least 32 bits */
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index d0441ad2a990..e508dff92535 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -59,7 +59,9 @@ union ctlreg0 {
unsigned long lap : 1; /* Low-address-protection control */
unsigned long : 4;
unsigned long edat : 1; /* Enhanced-DAT-enablement control */
- unsigned long : 4;
+ unsigned long : 2;
+ unsigned long iep : 1; /* Instruction-Execution-Protection */
+ unsigned long : 1;
unsigned long afp : 1; /* AFP-register control */
unsigned long vx : 1; /* Vector enablement control */
unsigned long : 7;
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
index 3108b8dbe266..512ad0eaa11a 100644
--- a/arch/s390/include/asm/dma-mapping.h
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -8,8 +8,6 @@
#include <linux/dma-debug.h>
#include <linux/io.h>
-#define DMA_ERROR_CODE (~(dma_addr_t) 0x0)
-
extern const struct dma_map_ops s390_pci_dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index cd546a245c68..d95869ce3ca2 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -39,7 +39,7 @@ static inline int prepare_hugepage_range(struct file *file,
#define arch_clear_hugepage_flags(page) do { } while (0)
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
+ pte_t *ptep, unsigned long sz)
{
if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
pte_val(*ptep) = _REGION3_ENTRY_EMPTY;
@@ -112,4 +112,7 @@ static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
return pte_modify(pte, newprot);
}
+#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
+static inline bool gigantic_page_supported(void) { return true; }
+#endif
#endif /* _ASM_S390_HUGETLB_H */
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h
index 2f924bc30e35..dccf24ee26d3 100644
--- a/arch/s390/include/asm/kexec.h
+++ b/arch/s390/include/asm/kexec.h
@@ -41,24 +41,6 @@
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_S390
-/*
- * Size for s390x ELF notes per CPU
- *
- * Seven notes plus zero note at the end: prstatus, fpregset, timer,
- * tod_cmp, tod_reg, control regs, and prefix
- */
-#define KEXEC_NOTE_BYTES \
- (ALIGN(sizeof(struct elf_note), 4) * 8 + \
- ALIGN(sizeof("CORE"), 4) * 7 + \
- ALIGN(sizeof(struct elf_prstatus), 4) + \
- ALIGN(sizeof(elf_fpregset_t), 4) + \
- ALIGN(sizeof(u64), 4) + \
- ALIGN(sizeof(u64), 4) + \
- ALIGN(sizeof(u32), 4) + \
- ALIGN(sizeof(u64) * 16, 4) + \
- ALIGN(sizeof(u32), 4) \
- )
-
/* Provide a dummy definition to avoid build failures. */
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs) { }
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 6baae236f461..a409d5991934 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -42,9 +42,11 @@
#define KVM_HALT_POLL_NS_DEFAULT 80000
/* s390-specific vcpu->requests bit members */
-#define KVM_REQ_ENABLE_IBS 8
-#define KVM_REQ_DISABLE_IBS 9
-#define KVM_REQ_ICPT_OPEREXC 10
+#define KVM_REQ_ENABLE_IBS KVM_ARCH_REQ(0)
+#define KVM_REQ_DISABLE_IBS KVM_ARCH_REQ(1)
+#define KVM_REQ_ICPT_OPEREXC KVM_ARCH_REQ(2)
+#define KVM_REQ_START_MIGRATION KVM_ARCH_REQ(3)
+#define KVM_REQ_STOP_MIGRATION KVM_ARCH_REQ(4)
#define SIGP_CTRL_C 0x80
#define SIGP_CTRL_SCN_MASK 0x3f
@@ -56,7 +58,7 @@ union bsca_sigp_ctrl {
__u8 r : 1;
__u8 scn : 6;
};
-} __packed;
+};
union esca_sigp_ctrl {
__u16 value;
@@ -65,14 +67,14 @@ union esca_sigp_ctrl {
__u8 reserved: 7;
__u8 scn;
};
-} __packed;
+};
struct esca_entry {
union esca_sigp_ctrl sigp_ctrl;
__u16 reserved1[3];
__u64 sda;
__u64 reserved2[6];
-} __packed;
+};
struct bsca_entry {
__u8 reserved0;
@@ -80,7 +82,7 @@ struct bsca_entry {
__u16 reserved[3];
__u64 sda;
__u64 reserved2[2];
-} __attribute__((packed));
+};
union ipte_control {
unsigned long val;
@@ -97,7 +99,7 @@ struct bsca_block {
__u64 mcn;
__u64 reserved2;
struct bsca_entry cpu[KVM_S390_BSCA_CPU_SLOTS];
-} __attribute__((packed));
+};
struct esca_block {
union ipte_control ipte_control;
@@ -105,7 +107,7 @@ struct esca_block {
__u64 mcn[4];
__u64 reserved2[20];
struct esca_entry cpu[KVM_S390_ESCA_CPU_SLOTS];
-} __packed;
+};
/*
* This struct is used to store some machine check info from lowcore
@@ -274,7 +276,7 @@ struct kvm_s390_sie_block {
struct kvm_s390_itdb {
__u8 data[256];
-} __packed;
+};
struct sie_page {
struct kvm_s390_sie_block sie_block;
@@ -282,7 +284,7 @@ struct sie_page {
__u8 reserved218[1000]; /* 0x0218 */
struct kvm_s390_itdb itdb; /* 0x0600 */
__u8 reserved700[2304]; /* 0x0700 */
-} __packed;
+};
struct kvm_vcpu_stat {
u64 exit_userspace;
@@ -695,7 +697,7 @@ struct sie_page2 {
__u64 fac_list[S390_ARCH_FAC_LIST_SIZE_U64]; /* 0x0000 */
struct kvm_s390_crypto_cb crycb; /* 0x0800 */
u8 reserved900[0x1000 - 0x900]; /* 0x0900 */
-} __packed;
+};
struct kvm_s390_vsie {
struct mutex mutex;
@@ -705,6 +707,12 @@ struct kvm_s390_vsie {
struct page *pages[KVM_MAX_VCPUS];
};
+struct kvm_s390_migration_state {
+ unsigned long bitmap_size; /* in bits (number of guest pages) */
+ atomic64_t dirty_pages; /* number of dirty pages */
+ unsigned long *pgste_bitmap;
+};
+
struct kvm_arch{
void *sca;
int use_esca;
@@ -732,6 +740,7 @@ struct kvm_arch{
struct kvm_s390_crypto crypto;
struct kvm_s390_vsie vsie;
u64 epoch;
+ struct kvm_s390_migration_state *migration_state;
/* subset of available cpu features enabled by user space */
DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
};
diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h
index 13623b9991d4..9d91cf3e427f 100644
--- a/arch/s390/include/asm/nmi.h
+++ b/arch/s390/include/asm/nmi.h
@@ -26,6 +26,12 @@
#define MCCK_CODE_PSW_MWP_VALID _BITUL(63 - 20)
#define MCCK_CODE_PSW_IA_VALID _BITUL(63 - 23)
+#define MCCK_CR14_CR_PENDING_SUB_MASK (1 << 28)
+#define MCCK_CR14_RECOVERY_SUB_MASK (1 << 27)
+#define MCCK_CR14_DEGRAD_SUB_MASK (1 << 26)
+#define MCCK_CR14_EXT_DAMAGE_SUB_MASK (1 << 25)
+#define MCCK_CR14_WARN_SUB_MASK (1 << 24)
+
#ifndef __ASSEMBLY__
union mci {
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index 6ba0bf928909..6bc941be6921 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -64,6 +64,12 @@ static inline void syscall_get_arguments(struct task_struct *task,
{
unsigned long mask = -1UL;
+ /*
+ * No arguments for this syscall, there's nothing to do.
+ */
+ if (!n)
+ return;
+
BUG_ON(i + n > 6);
#ifdef CONFIG_COMPAT
if (test_tsk_thread_flag(task, TIF_31BIT))
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 78f3f093d143..28b528197cf5 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -276,23 +276,6 @@ static inline unsigned long strnlen_user(const char __user *src, unsigned long n
return __strnlen_user(src, n);
}
-/**
- * strlen_user: - Get the size of a string in user space.
- * @str: The string to measure.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Get the size of a NUL-terminated string in user space.
- *
- * Returns the size of the string INCLUDING the terminating NUL.
- * On exception, returns 0.
- *
- * If there is a limit on the length of a valid string, you may wish to
- * consider using strnlen_user() instead.
- */
-#define strlen_user(str) strnlen_user(str, ~0UL)
-
/*
* Zero Userspace
*/
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index 3dd2a1d308dd..69d09c39bbcd 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -28,6 +28,7 @@
#define KVM_DEV_FLIC_CLEAR_IO_IRQ 8
#define KVM_DEV_FLIC_AISM 9
#define KVM_DEV_FLIC_AIRQ_INJECT 10
+#define KVM_DEV_FLIC_AISM_ALL 11
/*
* We can have up to 4*64k pending subchannels + 8 adapter interrupts,
* as well as up to ASYNC_PF_PER_VCPU*KVM_MAX_VCPUS pfault done interrupts.
@@ -53,6 +54,11 @@ struct kvm_s390_ais_req {
__u16 mode;
};
+struct kvm_s390_ais_all {
+ __u8 simm;
+ __u8 nimm;
+};
+
#define KVM_S390_IO_ADAPTER_MASK 1
#define KVM_S390_IO_ADAPTER_MAP 2
#define KVM_S390_IO_ADAPTER_UNMAP 3
@@ -70,6 +76,7 @@ struct kvm_s390_io_adapter_req {
#define KVM_S390_VM_TOD 1
#define KVM_S390_VM_CRYPTO 2
#define KVM_S390_VM_CPU_MODEL 3
+#define KVM_S390_VM_MIGRATION 4
/* kvm attributes for mem_ctrl */
#define KVM_S390_VM_MEM_ENABLE_CMMA 0
@@ -151,6 +158,11 @@ struct kvm_s390_vm_cpu_subfunc {
#define KVM_S390_VM_CRYPTO_DISABLE_AES_KW 2
#define KVM_S390_VM_CRYPTO_DISABLE_DEA_KW 3
+/* kvm attributes for migration mode */
+#define KVM_S390_VM_MIGRATION_STOP 0
+#define KVM_S390_VM_MIGRATION_START 1
+#define KVM_S390_VM_MIGRATION_STATUS 2
+
/* for KVM_GET_REGS and KVM_SET_REGS */
struct kvm_regs {
/* general purpose regs for s390 */
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 875f8bea8c67..653cae5e1ee1 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -89,7 +89,7 @@ struct region3_table_entry_fc1 {
unsigned long f : 1; /* Fetch-Protection Bit */
unsigned long fc : 1; /* Format-Control */
unsigned long p : 1; /* DAT-Protection Bit */
- unsigned long co : 1; /* Change-Recording Override */
+ unsigned long iep: 1; /* Instruction-Execution-Protection */
unsigned long : 2;
unsigned long i : 1; /* Region-Invalid Bit */
unsigned long cr : 1; /* Common-Region Bit */
@@ -131,7 +131,7 @@ struct segment_entry_fc1 {
unsigned long f : 1; /* Fetch-Protection Bit */
unsigned long fc : 1; /* Format-Control */
unsigned long p : 1; /* DAT-Protection Bit */
- unsigned long co : 1; /* Change-Recording Override */
+ unsigned long iep: 1; /* Instruction-Execution-Protection */
unsigned long : 2;
unsigned long i : 1; /* Segment-Invalid Bit */
unsigned long cs : 1; /* Common-Segment Bit */
@@ -168,7 +168,8 @@ union page_table_entry {
unsigned long z : 1; /* Zero Bit */
unsigned long i : 1; /* Page-Invalid Bit */
unsigned long p : 1; /* DAT-Protection Bit */
- unsigned long : 9;
+ unsigned long iep: 1; /* Instruction-Execution-Protection */
+ unsigned long : 8;
};
};
@@ -241,7 +242,7 @@ struct ale {
unsigned long asteo : 25; /* ASN-Second-Table-Entry Origin */
unsigned long : 6;
unsigned long astesn : 32; /* ASTE Sequence Number */
-} __packed;
+};
struct aste {
unsigned long i : 1; /* ASX-Invalid Bit */
@@ -257,7 +258,7 @@ struct aste {
unsigned long ald : 32;
unsigned long astesn : 32;
/* .. more fields there */
-} __packed;
+};
int ipte_lock_held(struct kvm_vcpu *vcpu)
{
@@ -485,6 +486,7 @@ enum prot_type {
PROT_TYPE_KEYC = 1,
PROT_TYPE_ALC = 2,
PROT_TYPE_DAT = 3,
+ PROT_TYPE_IEP = 4,
};
static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
@@ -500,6 +502,9 @@ static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva,
switch (code) {
case PGM_PROTECTION:
switch (prot) {
+ case PROT_TYPE_IEP:
+ tec->b61 = 1;
+ /* FALL THROUGH */
case PROT_TYPE_LA:
tec->b56 = 1;
break;
@@ -591,6 +596,7 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
* @gpa: points to where guest physical (absolute) address should be stored
* @asce: effective asce
* @mode: indicates the access mode to be used
+ * @prot: returns the type for protection exceptions
*
* Translate a guest virtual address into a guest absolute address by means
* of dynamic address translation as specified by the architecture.
@@ -606,19 +612,21 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
*/
static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
unsigned long *gpa, const union asce asce,
- enum gacc_mode mode)
+ enum gacc_mode mode, enum prot_type *prot)
{
union vaddress vaddr = {.addr = gva};
union raddress raddr = {.addr = gva};
union page_table_entry pte;
int dat_protection = 0;
+ int iep_protection = 0;
union ctlreg0 ctlreg0;
unsigned long ptr;
- int edat1, edat2;
+ int edat1, edat2, iep;
ctlreg0.val = vcpu->arch.sie_block->gcr[0];
edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8);
edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78);
+ iep = ctlreg0.iep && test_kvm_facility(vcpu->kvm, 130);
if (asce.r)
goto real_address;
ptr = asce.origin * 4096;
@@ -702,6 +710,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
return PGM_TRANSLATION_SPEC;
if (rtte.fc && edat2) {
dat_protection |= rtte.fc1.p;
+ iep_protection = rtte.fc1.iep;
raddr.rfaa = rtte.fc1.rfaa;
goto absolute_address;
}
@@ -729,6 +738,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
return PGM_TRANSLATION_SPEC;
if (ste.fc && edat1) {
dat_protection |= ste.fc1.p;
+ iep_protection = ste.fc1.iep;
raddr.sfaa = ste.fc1.sfaa;
goto absolute_address;
}
@@ -745,12 +755,19 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
if (pte.z)
return PGM_TRANSLATION_SPEC;
dat_protection |= pte.p;
+ iep_protection = pte.iep;
raddr.pfra = pte.pfra;
real_address:
raddr.addr = kvm_s390_real_to_abs(vcpu, raddr.addr);
absolute_address:
- if (mode == GACC_STORE && dat_protection)
+ if (mode == GACC_STORE && dat_protection) {
+ *prot = PROT_TYPE_DAT;
return PGM_PROTECTION;
+ }
+ if (mode == GACC_IFETCH && iep_protection && iep) {
+ *prot = PROT_TYPE_IEP;
+ return PGM_PROTECTION;
+ }
if (kvm_is_error_gpa(vcpu->kvm, raddr.addr))
return PGM_ADDRESSING;
*gpa = raddr.addr;
@@ -782,6 +799,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
{
psw_t *psw = &vcpu->arch.sie_block->gpsw;
int lap_enabled, rc = 0;
+ enum prot_type prot;
lap_enabled = low_address_protection_enabled(vcpu, asce);
while (nr_pages) {
@@ -791,7 +809,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
PROT_TYPE_LA);
ga &= PAGE_MASK;
if (psw_bits(*psw).dat) {
- rc = guest_translate(vcpu, ga, pages, asce, mode);
+ rc = guest_translate(vcpu, ga, pages, asce, mode, &prot);
if (rc < 0)
return rc;
} else {
@@ -800,7 +818,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
rc = PGM_ADDRESSING;
}
if (rc)
- return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_DAT);
+ return trans_exc(vcpu, rc, ga, ar, mode, prot);
ga += PAGE_SIZE;
pages++;
nr_pages--;
@@ -886,6 +904,7 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
unsigned long *gpa, enum gacc_mode mode)
{
psw_t *psw = &vcpu->arch.sie_block->gpsw;
+ enum prot_type prot;
union asce asce;
int rc;
@@ -900,9 +919,9 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
}
if (psw_bits(*psw).dat && !asce.r) { /* Use DAT? */
- rc = guest_translate(vcpu, gva, gpa, asce, mode);
+ rc = guest_translate(vcpu, gva, gpa, asce, mode, &prot);
if (rc > 0)
- return trans_exc(vcpu, rc, gva, 0, mode, PROT_TYPE_DAT);
+ return trans_exc(vcpu, rc, gva, 0, mode, prot);
} else {
*gpa = kvm_s390_real_to_abs(vcpu, gva);
if (kvm_is_error_gpa(vcpu->kvm, *gpa))
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 2d120fef7d90..a619ddae610d 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -251,8 +251,13 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
__clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
if (psw_mchk_disabled(vcpu))
active_mask &= ~IRQ_PEND_MCHK_MASK;
+ /*
+ * Check both floating and local interrupt's cr14 because
+ * bit IRQ_PEND_MCHK_REP could be set in both cases.
+ */
if (!(vcpu->arch.sie_block->gcr[14] &
- vcpu->kvm->arch.float_int.mchk.cr14))
+ (vcpu->kvm->arch.float_int.mchk.cr14 |
+ vcpu->arch.local_int.irq.mchk.cr14)))
__clear_bit(IRQ_PEND_MCHK_REP, &active_mask);
/*
@@ -1876,6 +1881,28 @@ out:
return ret < 0 ? ret : n;
}
+static int flic_ais_mode_get_all(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+ struct kvm_s390_ais_all ais;
+
+ if (attr->attr < sizeof(ais))
+ return -EINVAL;
+
+ if (!test_kvm_facility(kvm, 72))
+ return -ENOTSUPP;
+
+ mutex_lock(&fi->ais_lock);
+ ais.simm = fi->simm;
+ ais.nimm = fi->nimm;
+ mutex_unlock(&fi->ais_lock);
+
+ if (copy_to_user((void __user *)attr->addr, &ais, sizeof(ais)))
+ return -EFAULT;
+
+ return 0;
+}
+
static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
int r;
@@ -1885,6 +1912,9 @@ static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
attr->attr);
break;
+ case KVM_DEV_FLIC_AISM_ALL:
+ r = flic_ais_mode_get_all(dev->kvm, attr);
+ break;
default:
r = -EINVAL;
}
@@ -2235,6 +2265,25 @@ static int flic_inject_airq(struct kvm *kvm, struct kvm_device_attr *attr)
return kvm_s390_inject_airq(kvm, adapter);
}
+static int flic_ais_mode_set_all(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
+ struct kvm_s390_ais_all ais;
+
+ if (!test_kvm_facility(kvm, 72))
+ return -ENOTSUPP;
+
+ if (copy_from_user(&ais, (void __user *)attr->addr, sizeof(ais)))
+ return -EFAULT;
+
+ mutex_lock(&fi->ais_lock);
+ fi->simm = ais.simm;
+ fi->nimm = ais.nimm;
+ mutex_unlock(&fi->ais_lock);
+
+ return 0;
+}
+
static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
int r = 0;
@@ -2277,6 +2326,9 @@ static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
case KVM_DEV_FLIC_AIRQ_INJECT:
r = flic_inject_airq(dev->kvm, attr);
break;
+ case KVM_DEV_FLIC_AISM_ALL:
+ r = flic_ais_mode_set_all(dev->kvm, attr);
+ break;
default:
r = -EINVAL;
}
@@ -2298,6 +2350,7 @@ static int flic_has_attr(struct kvm_device *dev,
case KVM_DEV_FLIC_CLEAR_IO_IRQ:
case KVM_DEV_FLIC_AISM:
case KVM_DEV_FLIC_AIRQ_INJECT:
+ case KVM_DEV_FLIC_AISM_ALL:
return 0;
}
return -ENXIO;
@@ -2415,6 +2468,42 @@ static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
return ret;
}
+/*
+ * Inject the machine check to the guest.
+ */
+void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
+ struct mcck_volatile_info *mcck_info)
+{
+ struct kvm_s390_interrupt_info inti;
+ struct kvm_s390_irq irq;
+ struct kvm_s390_mchk_info *mchk;
+ union mci mci;
+ __u64 cr14 = 0; /* upper bits are not used */
+
+ mci.val = mcck_info->mcic;
+ if (mci.sr)
+ cr14 |= MCCK_CR14_RECOVERY_SUB_MASK;
+ if (mci.dg)
+ cr14 |= MCCK_CR14_DEGRAD_SUB_MASK;
+ if (mci.w)
+ cr14 |= MCCK_CR14_WARN_SUB_MASK;
+
+ mchk = mci.ck ? &inti.mchk : &irq.u.mchk;
+ mchk->cr14 = cr14;
+ mchk->mcic = mcck_info->mcic;
+ mchk->ext_damage_code = mcck_info->ext_damage_code;
+ mchk->failing_storage_address = mcck_info->failing_storage_address;
+ if (mci.ck) {
+ /* Inject the floating machine check */
+ inti.type = KVM_S390_MCHK;
+ WARN_ON_ONCE(__inject_vm(vcpu->kvm, &inti));
+ } else {
+ /* Inject the machine check to specified vcpu */
+ irq.type = KVM_S390_MCHK;
+ WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
+ }
+}
+
int kvm_set_routing_entry(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index b0d7de5a533d..3f2884e99ed4 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -30,6 +30,7 @@
#include <linux/vmalloc.h>
#include <linux/bitmap.h>
#include <linux/sched/signal.h>
+#include <linux/string.h>
#include <asm/asm-offsets.h>
#include <asm/lowcore.h>
@@ -386,6 +387,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_SKEYS:
case KVM_CAP_S390_IRQ_STATE:
case KVM_CAP_S390_USER_INSTR0:
+ case KVM_CAP_S390_CMMA_MIGRATION:
case KVM_CAP_S390_AIS:
r = 1;
break;
@@ -749,6 +751,129 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
return 0;
}
+static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
+{
+ int cx;
+ struct kvm_vcpu *vcpu;
+
+ kvm_for_each_vcpu(cx, vcpu, kvm)
+ kvm_s390_sync_request(req, vcpu);
+}
+
+/*
+ * Must be called with kvm->srcu held to avoid races on memslots, and with
+ * kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
+ */
+static int kvm_s390_vm_start_migration(struct kvm *kvm)
+{
+ struct kvm_s390_migration_state *mgs;
+ struct kvm_memory_slot *ms;
+ /* should be the only one */
+ struct kvm_memslots *slots;
+ unsigned long ram_pages;
+ int slotnr;
+
+ /* migration mode already enabled */
+ if (kvm->arch.migration_state)
+ return 0;
+
+ slots = kvm_memslots(kvm);
+ if (!slots || !slots->used_slots)
+ return -EINVAL;
+
+ mgs = kzalloc(sizeof(*mgs), GFP_KERNEL);
+ if (!mgs)
+ return -ENOMEM;
+ kvm->arch.migration_state = mgs;
+
+ if (kvm->arch.use_cmma) {
+ /*
+ * Get the last slot. They should be sorted by base_gfn, so the
+ * last slot is also the one at the end of the address space.
+ * We have verified above that at least one slot is present.
+ */
+ ms = slots->memslots + slots->used_slots - 1;
+ /* round up so we only use full longs */
+ ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
+ /* allocate enough bytes to store all the bits */
+ mgs->pgste_bitmap = vmalloc(ram_pages / 8);
+ if (!mgs->pgste_bitmap) {
+ kfree(mgs);
+ kvm->arch.migration_state = NULL;
+ return -ENOMEM;
+ }
+
+ mgs->bitmap_size = ram_pages;
+ atomic64_set(&mgs->dirty_pages, ram_pages);
+ /* mark all the pages in active slots as dirty */
+ for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
+ ms = slots->memslots + slotnr;
+ bitmap_set(mgs->pgste_bitmap, ms->base_gfn, ms->npages);
+ }
+
+ kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
+ }
+ return 0;
+}
+
+/*
+ * Must be called with kvm->lock to avoid races with ourselves and
+ * kvm_s390_vm_start_migration.
+ */
+static int kvm_s390_vm_stop_migration(struct kvm *kvm)
+{
+ struct kvm_s390_migration_state *mgs;
+
+ /* migration mode already disabled */
+ if (!kvm->arch.migration_state)
+ return 0;
+ mgs = kvm->arch.migration_state;
+ kvm->arch.migration_state = NULL;
+
+ if (kvm->arch.use_cmma) {
+ kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
+ vfree(mgs->pgste_bitmap);
+ }
+ kfree(mgs);
+ return 0;
+}
+
+static int kvm_s390_vm_set_migration(struct kvm *kvm,
+ struct kvm_device_attr *attr)
+{
+ int idx, res = -ENXIO;
+
+ mutex_lock(&kvm->lock);
+ switch (attr->attr) {
+ case KVM_S390_VM_MIGRATION_START:
+ idx = srcu_read_lock(&kvm->srcu);
+ res = kvm_s390_vm_start_migration(kvm);
+ srcu_read_unlock(&kvm->srcu, idx);
+ break;
+ case KVM_S390_VM_MIGRATION_STOP:
+ res = kvm_s390_vm_stop_migration(kvm);
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&kvm->lock);
+
+ return res;
+}
+
+static int kvm_s390_vm_get_migration(struct kvm *kvm,
+ struct kvm_device_attr *attr)
+{
+ u64 mig = (kvm->arch.migration_state != NULL);
+
+ if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
+ return -ENXIO;
+
+ if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
+ return -EFAULT;
+ return 0;
+}
+
static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
{
u8 gtod_high;
@@ -1089,6 +1214,9 @@ static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
case KVM_S390_VM_CRYPTO:
ret = kvm_s390_vm_set_crypto(kvm, attr);
break;
+ case KVM_S390_VM_MIGRATION:
+ ret = kvm_s390_vm_set_migration(kvm, attr);
+ break;
default:
ret = -ENXIO;
break;
@@ -1111,6 +1239,9 @@ static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
case KVM_S390_VM_CPU_MODEL:
ret = kvm_s390_get_cpu_model(kvm, attr);
break;
+ case KVM_S390_VM_MIGRATION:
+ ret = kvm_s390_vm_get_migration(kvm, attr);
+ break;
default:
ret = -ENXIO;
break;
@@ -1178,6 +1309,9 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
break;
}
break;
+ case KVM_S390_VM_MIGRATION:
+ ret = 0;
+ break;
default:
ret = -ENXIO;
break;
@@ -1285,6 +1419,182 @@ out:
return r;
}
+/*
+ * Base address and length must be sent at the start of each block, therefore
+ * it's cheaper to send some clean data, as long as it's less than the size of
+ * two longs.
+ */
+#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
+/* for consistency */
+#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
+
+/*
+ * This function searches for the next page with dirty CMMA attributes, and
+ * saves the attributes in the buffer up to either the end of the buffer or
+ * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
+ * no trailing clean bytes are saved.
+ * In case no dirty bits were found, or if CMMA was not enabled or used, the
+ * output buffer will indicate 0 as length.
+ */
+static int kvm_s390_get_cmma_bits(struct kvm *kvm,
+ struct kvm_s390_cmma_log *args)
+{
+ struct kvm_s390_migration_state *s = kvm->arch.migration_state;
+ unsigned long bufsize, hva, pgstev, i, next, cur;
+ int srcu_idx, peek, r = 0, rr;
+ u8 *res;
+
+ cur = args->start_gfn;
+ i = next = pgstev = 0;
+
+ if (unlikely(!kvm->arch.use_cmma))
+ return -ENXIO;
+ /* Invalid/unsupported flags were specified */
+ if (args->flags & ~KVM_S390_CMMA_PEEK)
+ return -EINVAL;
+ /* Migration mode query, and we are not doing a migration */
+ peek = !!(args->flags & KVM_S390_CMMA_PEEK);
+ if (!peek && !s)
+ return -EINVAL;
+ /* CMMA is disabled or was not used, or the buffer has length zero */
+ bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
+ if (!bufsize || !kvm->mm->context.use_cmma) {
+ memset(args, 0, sizeof(*args));
+ return 0;
+ }
+
+ if (!peek) {
+ /* We are not peeking, and there are no dirty pages */
+ if (!atomic64_read(&s->dirty_pages)) {
+ memset(args, 0, sizeof(*args));
+ return 0;
+ }
+ cur = find_next_bit(s->pgste_bitmap, s->bitmap_size,
+ args->start_gfn);
+ if (cur >= s->bitmap_size) /* nothing found, loop back */
+ cur = find_next_bit(s->pgste_bitmap, s->bitmap_size, 0);
+ if (cur >= s->bitmap_size) { /* again! (very unlikely) */
+ memset(args, 0, sizeof(*args));
+ return 0;
+ }
+ next = find_next_bit(s->pgste_bitmap, s->bitmap_size, cur + 1);
+ }
+
+ res = vmalloc(bufsize);
+ if (!res)
+ return -ENOMEM;
+
+ args->start_gfn = cur;
+
+ down_read(&kvm->mm->mmap_sem);
+ srcu_idx = srcu_read_lock(&kvm->srcu);
+ while (i < bufsize) {
+ hva = gfn_to_hva(kvm, cur);
+ if (kvm_is_error_hva(hva)) {
+ r = -EFAULT;
+ break;
+ }
+ /* decrement only if we actually flipped the bit to 0 */
+ if (!peek && test_and_clear_bit(cur, s->pgste_bitmap))
+ atomic64_dec(&s->dirty_pages);
+ r = get_pgste(kvm->mm, hva, &pgstev);
+ if (r < 0)
+ pgstev = 0;
+ /* save the value */
+ res[i++] = (pgstev >> 24) & 0x3;
+ /*
+ * if the next bit is too far away, stop.
+ * if we reached the previous "next", find the next one
+ */
+ if (!peek) {
+ if (next > cur + KVM_S390_MAX_BIT_DISTANCE)
+ break;
+ if (cur == next)
+ next = find_next_bit(s->pgste_bitmap,
+ s->bitmap_size, cur + 1);
+ /* reached the end of the bitmap or of the buffer, stop */
+ if ((next >= s->bitmap_size) ||
+ (next >= args->start_gfn + bufsize))
+ break;
+ }
+ cur++;
+ }
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+ up_read(&kvm->mm->mmap_sem);
+ args->count = i;
+ args->remaining = s ? atomic64_read(&s->dirty_pages) : 0;
+
+ rr = copy_to_user((void __user *)args->values, res, args->count);
+ if (rr)
+ r = -EFAULT;
+
+ vfree(res);
+ return r;
+}
+
+/*
+ * This function sets the CMMA attributes for the given pages. If the input
+ * buffer has zero length, no action is taken, otherwise the attributes are
+ * set and the mm->context.use_cmma flag is set.
+ */
+static int kvm_s390_set_cmma_bits(struct kvm *kvm,
+ const struct kvm_s390_cmma_log *args)
+{
+ unsigned long hva, mask, pgstev, i;
+ uint8_t *bits;
+ int srcu_idx, r = 0;
+
+ mask = args->mask;
+
+ if (!kvm->arch.use_cmma)
+ return -ENXIO;
+ /* invalid/unsupported flags */
+ if (args->flags != 0)
+ return -EINVAL;
+ /* Enforce sane limit on memory allocation */
+ if (args->count > KVM_S390_CMMA_SIZE_MAX)
+ return -EINVAL;
+ /* Nothing to do */
+ if (args->count == 0)
+ return 0;
+
+ bits = vmalloc(sizeof(*bits) * args->count);
+ if (!bits)
+ return -ENOMEM;
+
+ r = copy_from_user(bits, (void __user *)args->values, args->count);
+ if (r) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ down_read(&kvm->mm->mmap_sem);
+ srcu_idx = srcu_read_lock(&kvm->srcu);
+ for (i = 0; i < args->count; i++) {
+ hva = gfn_to_hva(kvm, args->start_gfn + i);
+ if (kvm_is_error_hva(hva)) {
+ r = -EFAULT;
+ break;
+ }
+
+ pgstev = bits[i];
+ pgstev = pgstev << 24;
+ mask &= _PGSTE_GPS_USAGE_MASK;
+ set_pgste_bits(kvm->mm, hva, mask, pgstev);
+ }
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+ up_read(&kvm->mm->mmap_sem);
+
+ if (!kvm->mm->context.use_cmma) {
+ down_write(&kvm->mm->mmap_sem);
+ kvm->mm->context.use_cmma = 1;
+ up_write(&kvm->mm->mmap_sem);
+ }
+out:
+ vfree(bits);
+ return r;
+}
+
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -1363,6 +1673,29 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_s390_set_skeys(kvm, &args);
break;
}
+ case KVM_S390_GET_CMMA_BITS: {
+ struct kvm_s390_cmma_log args;
+
+ r = -EFAULT;
+ if (copy_from_user(&args, argp, sizeof(args)))
+ break;
+ r = kvm_s390_get_cmma_bits(kvm, &args);
+ if (!r) {
+ r = copy_to_user(argp, &args, sizeof(args));
+ if (r)
+ r = -EFAULT;
+ }
+ break;
+ }
+ case KVM_S390_SET_CMMA_BITS: {
+ struct kvm_s390_cmma_log args;
+
+ r = -EFAULT;
+ if (copy_from_user(&args, argp, sizeof(args)))
+ break;
+ r = kvm_s390_set_cmma_bits(kvm, &args);
+ break;
+ }
default:
r = -ENOTTY;
}
@@ -1631,6 +1964,10 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_s390_destroy_adapters(kvm);
kvm_s390_clear_float_irqs(kvm);
kvm_s390_vsie_destroy(kvm);
+ if (kvm->arch.migration_state) {
+ vfree(kvm->arch.migration_state->pgste_bitmap);
+ kfree(kvm->arch.migration_state);
+ }
KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
}
@@ -1975,7 +2312,6 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
if (!vcpu->arch.sie_block->cbrlo)
return -ENOMEM;
- vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
vcpu->arch.sie_block->ecb2 &= ~ECB2_PFMFI;
return 0;
}
@@ -2439,7 +2775,7 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
{
retry:
kvm_s390_vcpu_request_handled(vcpu);
- if (!vcpu->requests)
+ if (!kvm_request_pending(vcpu))
return 0;
/*
* We use MMU_RELOAD just to re-arm the ipte notifier for the
@@ -2488,6 +2824,27 @@ retry:
goto retry;
}
+ if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
+ /*
+ * Disable CMMA virtualization; we will emulate the ESSA
+ * instruction manually, in order to provide additional
+ * functionalities needed for live migration.
+ */
+ vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
+ goto retry;
+ }
+
+ if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
+ /*
+ * Re-enable CMMA virtualization if CMMA is available and
+ * was used.
+ */
+ if ((vcpu->kvm->arch.use_cmma) &&
+ (vcpu->kvm->mm->context.use_cmma))
+ vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
+ goto retry;
+ }
+
/* nothing to do, just clear the request */
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
@@ -2682,6 +3039,9 @@ static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
{
+ struct mcck_volatile_info *mcck_info;
+ struct sie_page *sie_page;
+
VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
vcpu->arch.sie_block->icptcode);
trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
@@ -2692,6 +3052,15 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
+ if (exit_reason == -EINTR) {
+ VCPU_EVENT(vcpu, 3, "%s", "machine check");
+ sie_page = container_of(vcpu->arch.sie_block,
+ struct sie_page, sie_block);
+ mcck_info = &sie_page->mcck_info;
+ kvm_s390_reinject_machine_check(vcpu, mcck_info);
+ return 0;
+ }
+
if (vcpu->arch.sie_block->icptcode > 0) {
int rc = kvm_handle_sie_intercept(vcpu);
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 55f5c8457d6d..6fedc8bc7a37 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -397,4 +397,6 @@ static inline int kvm_s390_use_sca_entries(void)
*/
return sclp.has_sigpif;
}
+void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
+ struct mcck_volatile_info *mcck_info);
#endif
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index e53292a89257..8a1dac793d6b 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -24,6 +24,7 @@
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
#include <asm/pgtable.h>
+#include <asm/page-states.h>
#include <asm/pgalloc.h>
#include <asm/gmap.h>
#include <asm/io.h>
@@ -949,13 +950,72 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
return 0;
}
+static inline int do_essa(struct kvm_vcpu *vcpu, const int orc)
+{
+ struct kvm_s390_migration_state *ms = vcpu->kvm->arch.migration_state;
+ int r1, r2, nappended, entries;
+ unsigned long gfn, hva, res, pgstev, ptev;
+ unsigned long *cbrlo;
+
+ /*
+ * We don't need to set SD.FPF.SK to 1 here, because if we have a
+ * machine check here we either handle it or crash
+ */
+
+ kvm_s390_get_regs_rre(vcpu, &r1, &r2);
+ gfn = vcpu->run->s.regs.gprs[r2] >> PAGE_SHIFT;
+ hva = gfn_to_hva(vcpu->kvm, gfn);
+ entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
+
+ if (kvm_is_error_hva(hva))
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+
+ nappended = pgste_perform_essa(vcpu->kvm->mm, hva, orc, &ptev, &pgstev);
+ if (nappended < 0) {
+ res = orc ? 0x10 : 0;
+ vcpu->run->s.regs.gprs[r1] = res; /* Exception Indication */
+ return 0;
+ }
+ res = (pgstev & _PGSTE_GPS_USAGE_MASK) >> 22;
+ /*
+ * Set the block-content state part of the result. 0 means resident, so
+ * nothing to do if the page is valid. 2 is for preserved pages
+ * (non-present and non-zero), and 3 for zero pages (non-present and
+ * zero).
+ */
+ if (ptev & _PAGE_INVALID) {
+ res |= 2;
+ if (pgstev & _PGSTE_GPS_ZERO)
+ res |= 1;
+ }
+ vcpu->run->s.regs.gprs[r1] = res;
+ /*
+ * It is possible that all the normal 511 slots were full, in which case
+ * we will now write in the 512th slot, which is reserved for host use.
+ * In both cases we let the normal essa handling code process all the
+ * slots, including the reserved one, if needed.
+ */
+ if (nappended > 0) {
+ cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo & PAGE_MASK);
+ cbrlo[entries] = gfn << PAGE_SHIFT;
+ }
+
+ if (orc) {
+ /* increment only if we are really flipping the bit to 1 */
+ if (!test_and_set_bit(gfn, ms->pgste_bitmap))
+ atomic64_inc(&ms->dirty_pages);
+ }
+
+ return nappended;
+}
+
static int handle_essa(struct kvm_vcpu *vcpu)
{
/* entries expected to be 1FF */
int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
unsigned long *cbrlo;
struct gmap *gmap;
- int i;
+ int i, orc;
VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
gmap = vcpu->arch.gmap;
@@ -965,12 +1025,45 @@ static int handle_essa(struct kvm_vcpu *vcpu)
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
-
- if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6)
+ /* Check for invalid operation request code */
+ orc = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
+ if (orc > ESSA_MAX)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- /* Retry the ESSA instruction */
- kvm_s390_retry_instr(vcpu);
+ if (likely(!vcpu->kvm->arch.migration_state)) {
+ /*
+ * CMMA is enabled in the KVM settings, but is disabled in
+ * the SIE block and in the mm_context, and we are not doing
+ * a migration. Enable CMMA in the mm_context.
+ * Since we need to take a write lock to write to the context
+ * to avoid races with storage keys handling, we check if the
+ * value really needs to be written to; if the value is
+ * already correct, we do nothing and avoid the lock.
+ */
+ if (vcpu->kvm->mm->context.use_cmma == 0) {
+ down_write(&vcpu->kvm->mm->mmap_sem);
+ vcpu->kvm->mm->context.use_cmma = 1;
+ up_write(&vcpu->kvm->mm->mmap_sem);
+ }
+ /*
+ * If we are here, we are supposed to have CMMA enabled in
+ * the SIE block. Enabling CMMA works on a per-CPU basis,
+ * while the context use_cmma flag is per process.
+ * It's possible that the context flag is enabled and the
+ * SIE flag is not, so we set the flag always; if it was
+ * already set, nothing changes, otherwise we enable it
+ * on this CPU too.
+ */
+ vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
+ /* Retry the ESSA instruction */
+ kvm_s390_retry_instr(vcpu);
+ } else {
+ /* Account for the possible extra cbrl entry */
+ i = do_essa(vcpu, orc);
+ if (i < 0)
+ return i;
+ entries += i;
+ }
vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
down_read(&gmap->mm->mmap_sem);
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 4719ecb9ab42..715c19c45d9a 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -26,16 +26,21 @@
struct vsie_page {
struct kvm_s390_sie_block scb_s; /* 0x0000 */
+ /*
+ * the backup info for machine check. ensure it's at
+ * the same offset as that in struct sie_page!
+ */
+ struct mcck_volatile_info mcck_info; /* 0x0200 */
/* the pinned originial scb */
- struct kvm_s390_sie_block *scb_o; /* 0x0200 */
+ struct kvm_s390_sie_block *scb_o; /* 0x0218 */
/* the shadow gmap in use by the vsie_page */
- struct gmap *gmap; /* 0x0208 */
+ struct gmap *gmap; /* 0x0220 */
/* address of the last reported fault to guest2 */
- unsigned long fault_addr; /* 0x0210 */
- __u8 reserved[0x0700 - 0x0218]; /* 0x0218 */
+ unsigned long fault_addr; /* 0x0228 */
+ __u8 reserved[0x0700 - 0x0230]; /* 0x0230 */
struct kvm_s390_crypto_cb crycb; /* 0x0700 */
__u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE]; /* 0x0800 */
-} __packed;
+};
/* trigger a validity icpt for the given scb */
static int set_validity_icpt(struct kvm_s390_sie_block *scb,
@@ -801,6 +806,8 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
+ struct mcck_volatile_info *mcck_info;
+ struct sie_page *sie_page;
int rc;
handle_last_fault(vcpu, vsie_page);
@@ -822,6 +829,14 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
local_irq_enable();
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ if (rc == -EINTR) {
+ VCPU_EVENT(vcpu, 3, "%s", "machine check");
+ sie_page = container_of(scb_s, struct sie_page, sie_block);
+ mcck_info = &sie_page->mcck_info;
+ kvm_s390_reinject_machine_check(vcpu, mcck_info);
+ return 0;
+ }
+
if (rc > 0)
rc = 0; /* we could still have an icpt */
else if (rc == -EFAULT)
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index d3a5e39756f6..44a8e6f0391e 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -180,7 +180,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
return (pte_t *) pmdp;
}
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+pte_t *huge_pte_offset(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz)
{
pgd_t *pgdp;
p4d_t *p4dp;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 3348e60dd8ad..8111694ce55a 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -166,43 +166,17 @@ unsigned long memory_block_size_bytes(void)
}
#ifdef CONFIG_MEMORY_HOTPLUG
-int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
+int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
{
- unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
unsigned long start_pfn = PFN_DOWN(start);
unsigned long size_pages = PFN_DOWN(size);
- pg_data_t *pgdat = NODE_DATA(nid);
- struct zone *zone;
- int rc, i;
+ int rc;
rc = vmem_add_mapping(start, size);
if (rc)
return rc;
- for (i = 0; i < MAX_NR_ZONES; i++) {
- zone = pgdat->node_zones + i;
- if (zone_idx(zone) != ZONE_MOVABLE) {
- /* Add range within existing zone limits, if possible */
- zone_start_pfn = zone->zone_start_pfn;
- zone_end_pfn = zone->zone_start_pfn +
- zone->spanned_pages;
- } else {
- /* Add remaining range to ZONE_MOVABLE */
- zone_start_pfn = start_pfn;
- zone_end_pfn = start_pfn + size_pages;
- }
- if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
- continue;
- nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
- zone_end_pfn - start_pfn : size_pages;
- rc = __add_pages(nid, zone, start_pfn, nr_pages);
- if (rc)
- break;
- start_pfn += nr_pages;
- size_pages -= nr_pages;
- if (!size_pages)
- break;
- }
+ rc = __add_pages(nid, start_pfn, size_pages, want_memblock);
if (rc)
vmem_remove_mapping(start, size);
return rc;
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 8eb1cc341dab..0d300ee00f4e 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -14,6 +14,8 @@
#include <linux/pci.h>
#include <asm/pci_dma.h>
+#define S390_MAPPING_ERROR (~(dma_addr_t) 0x0)
+
static struct kmem_cache *dma_region_table_cache;
static struct kmem_cache *dma_page_table_cache;
static int s390_iommu_strict;
@@ -281,7 +283,7 @@ static dma_addr_t dma_alloc_address(struct device *dev, int size)
out_error:
spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
- return DMA_ERROR_CODE;
+ return S390_MAPPING_ERROR;
}
static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
@@ -329,7 +331,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
/* This rounds up number of pages based on size and offset */
nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
dma_addr = dma_alloc_address(dev, nr_pages);
- if (dma_addr == DMA_ERROR_CODE) {
+ if (dma_addr == S390_MAPPING_ERROR) {
ret = -ENOSPC;
goto out_err;
}
@@ -352,7 +354,7 @@ out_free:
out_err:
zpci_err("map error:\n");
zpci_err_dma(ret, pa);
- return DMA_ERROR_CODE;
+ return S390_MAPPING_ERROR;
}
static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
@@ -429,7 +431,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
int ret;
dma_addr_base = dma_alloc_address(dev, nr_pages);
- if (dma_addr_base == DMA_ERROR_CODE)
+ if (dma_addr_base == S390_MAPPING_ERROR)
return -ENOMEM;
dma_addr = dma_addr_base;
@@ -476,7 +478,7 @@ static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
for (i = 1; i < nr_elements; i++) {
s = sg_next(s);
- s->dma_address = DMA_ERROR_CODE;
+ s->dma_address = S390_MAPPING_ERROR;
s->dma_length = 0;
if (s->offset || (size & ~PAGE_MASK) ||
@@ -525,6 +527,11 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
s->dma_length = 0;
}
}
+
+static int s390_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == S390_MAPPING_ERROR;
+}
int zpci_dma_init_device(struct zpci_dev *zdev)
{
@@ -659,6 +666,7 @@ const struct dma_map_ops s390_pci_dma_ops = {
.unmap_sg = s390_dma_unmap_sg,
.map_page = s390_dma_map_pages,
.unmap_page = s390_dma_unmap_pages,
+ .mapping_error = s390_mapping_error,
/* if we support direct DMA this must be conditional */
.is_phys = 0,
/* dma_supported is unconditionally true without a callback */
diff --git a/arch/score/include/asm/uaccess.h b/arch/score/include/asm/uaccess.h
index 916e5dbf0bfd..0ef220474d9b 100644
--- a/arch/score/include/asm/uaccess.h
+++ b/arch/score/include/asm/uaccess.h
@@ -359,12 +359,6 @@ static inline int strncpy_from_user(char *dst, const char *src, long len)
return -EFAULT;
}
-extern int __strlen_user(const char *src);
-static inline long strlen_user(const char __user *src)
-{
- return __strlen_user(src);
-}
-
extern int __strnlen_user(const char *str, long len);
static inline long strnlen_user(const char __user *str, long len)
{
diff --git a/arch/score/lib/string.S b/arch/score/lib/string.S
index 16efa3ad037f..e0c0318c9010 100644
--- a/arch/score/lib/string.S
+++ b/arch/score/lib/string.S
@@ -104,34 +104,6 @@ ENTRY(__strnlen_user)
.previous
.align 2
-ENTRY(__strlen_user)
-0: lb r6, [r4]
- mv r7, r4
- extsb r6, r6
- cmpi.c r6, 0
- mv r4, r6
- beq .L27
-.L28:
-1: lb r6, [r7, 1]+
- addi r6, 1
- cmpi.c r6, 0
- bne .L28
-.L27:
- br r3
- .section .fixup, "ax"
- ldi r4, 0x0
- br r3
-99:
- ldi r4, 0
- br r3
- .previous
- .section __ex_table, "a"
- .align 2
- .word 0b ,99b
- .word 1b ,99b
- .previous
-
- .align 2
ENTRY(__copy_tofrom_user)
cmpi.c r6, 0
mv r10,r6
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index ee086958b2b2..640a85925060 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -2,6 +2,7 @@ config SUPERH
def_bool y
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_NO_COHERENT_DMA_MMAP if !MMU
select HAVE_PATA_PLATFORM
select CLKDEV_LOOKUP
select HAVE_IDE if HAS_IOPORT_MAP
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 6d612792f6b8..1faf6cb93dcb 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -24,7 +24,7 @@
#include <linux/usb/r8a66597.h>
#include <linux/usb/renesas_usbhs.h>
#include <linux/i2c.h>
-#include <linux/i2c/tsc2007.h>
+#include <linux/platform_data/tsc2007.h>
#include <linux/spi/spi.h>
#include <linux/spi/sh_msiof.h>
#include <linux/spi/mmc_spi.h>
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h
index d99008af5f73..9b06be07db4d 100644
--- a/arch/sh/include/asm/dma-mapping.h
+++ b/arch/sh/include/asm/dma-mapping.h
@@ -9,8 +9,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return dma_ops;
}
-#define DMA_ERROR_CODE 0
-
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir);
diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h
index 2722b61b2283..211b44920dbe 100644
--- a/arch/sh/include/asm/uaccess.h
+++ b/arch/sh/include/asm/uaccess.h
@@ -100,7 +100,6 @@ struct __large_struct { unsigned long buf[100]; };
extern long strncpy_from_user(char *dest, const char __user *src, long count);
-extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
/* Generic arbitrary sized copy. */
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c
index 53783978162e..d18724d186f3 100644
--- a/arch/sh/kernel/ftrace.c
+++ b/arch/sh/kernel/ftrace.c
@@ -96,19 +96,6 @@ static int mod_code_status; /* holds return value of text write */
static void *mod_code_ip; /* holds the IP to write to */
static void *mod_code_newcode; /* holds the text to write to the IP */
-static unsigned nmi_wait_count;
-static atomic_t nmi_update_count = ATOMIC_INIT(0);
-
-int ftrace_arch_read_dyn_info(char *buf, int size)
-{
- int r;
-
- r = snprintf(buf, size, "%u %u",
- nmi_wait_count,
- atomic_read(&nmi_update_count));
- return r;
-}
-
static void clear_mod_flag(void)
{
int old = atomic_read(&nmi_running);
@@ -144,7 +131,6 @@ void arch_ftrace_nmi_enter(void)
if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
smp_rmb();
ftrace_mod_code();
- atomic_inc(&nmi_update_count);
}
/* Must have previous changes seen before executions */
smp_mb();
@@ -165,8 +151,6 @@ static void wait_for_nmi_and_set_mod_flag(void)
do {
cpu_relax();
} while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG));
-
- nmi_wait_count++;
}
static void wait_for_nmi(void)
@@ -177,8 +161,6 @@ static void wait_for_nmi(void)
do {
cpu_relax();
} while (atomic_read(&nmi_running));
-
- nmi_wait_count++;
}
static int
diff --git a/arch/sh/kernel/vsyscall/Makefile b/arch/sh/kernel/vsyscall/Makefile
index 8f0ea5fc835c..6ab108f1a0c6 100644
--- a/arch/sh/kernel/vsyscall/Makefile
+++ b/arch/sh/kernel/vsyscall/Makefile
@@ -1,11 +1,11 @@
-obj-y += vsyscall.o vsyscall-syscall.o
+obj-y += vsyscall.o vsyscall-syscall.o vsyscall-syms.o
$(obj)/vsyscall-syscall.o: \
$(foreach F,trapa,$(obj)/vsyscall-$F.so)
# Teach kbuild about targets
targets += $(foreach F,trapa,vsyscall-$F.o vsyscall-$F.so)
-targets += vsyscall-note.o vsyscall.lds
+targets += vsyscall-note.o vsyscall.lds vsyscall-dummy.o
# The DSO images are built using a special linker script
quiet_cmd_syscall = SYSCALL $@
@@ -26,11 +26,11 @@ $(obj)/vsyscall-%.so: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
# We also create a special relocatable object that should mirror the symbol
# table and layout of the linked DSO. With ld -R we can then refer to
# these symbols in the kernel code rather than hand-coded addresses.
-extra-y += vsyscall-syms.o
-$(obj)/built-in.o: $(obj)/vsyscall-syms.o
-$(obj)/built-in.o: ld_flags += -R $(obj)/vsyscall-syms.o
-
-SYSCFLAGS_vsyscall-syms.o = -r
-$(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \
+SYSCFLAGS_vsyscall-dummy.o = -r
+$(obj)/vsyscall-dummy.o: $(src)/vsyscall.lds \
$(obj)/vsyscall-trapa.o $(obj)/vsyscall-note.o FORCE
$(call if_changed,syscall)
+
+LDFLAGS_vsyscall-syms.o := -r -R
+$(obj)/vsyscall-syms.o: $(obj)/vsyscall-dummy.o FORCE
+ $(call if_changed,ld)
diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
index cc948db74878..d2412d2d6462 100644
--- a/arch/sh/mm/hugetlbpage.c
+++ b/arch/sh/mm/hugetlbpage.c
@@ -42,7 +42,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
return pte;
}
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+pte_t *huge_pte_offset(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
pud_t *pud;
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 75491862d900..bf726af5f1a5 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -485,20 +485,14 @@ void free_initrd_mem(unsigned long start, unsigned long end)
#endif
#ifdef CONFIG_MEMORY_HOTPLUG
-int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
+int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
{
- pg_data_t *pgdat;
unsigned long start_pfn = PFN_DOWN(start);
unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;
- pgdat = NODE_DATA(nid);
-
/* We only have ZONE_NORMAL, so this is easy.. */
- ret = __add_pages(nid, pgdat->node_zones +
- zone_for_memory(nid, start, size, ZONE_NORMAL,
- for_device),
- start_pfn, nr_pages);
+ ret = __add_pages(nid, start_pfn, nr_pages, want_memblock);
if (unlikely(ret))
printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 5639c9fe5b55..a4a626199c47 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -83,6 +83,8 @@ config SPARC64
select ARCH_SUPPORTS_ATOMIC_RMW
select HAVE_NMI
select HAVE_REGS_AND_STACK_ACCESS_API
+ select ARCH_USE_QUEUED_RWLOCKS
+ select ARCH_USE_QUEUED_SPINLOCKS
config ARCH_DEFCONFIG
string
@@ -92,6 +94,9 @@ config ARCH_DEFCONFIG
config ARCH_PROC_KCORE_TEXT
def_bool y
+config CPU_BIG_ENDIAN
+ def_bool y
+
config ARCH_ATU
bool
default y if SPARC64
diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h
index faa2f61058c2..4028f4f1e561 100644
--- a/arch/sparc/include/asm/cmpxchg_64.h
+++ b/arch/sparc/include/asm/cmpxchg_64.h
@@ -6,6 +6,17 @@
#ifndef __ARCH_SPARC64_CMPXCHG__
#define __ARCH_SPARC64_CMPXCHG__
+static inline unsigned long
+__cmpxchg_u32(volatile int *m, int old, int new)
+{
+ __asm__ __volatile__("cas [%2], %3, %0"
+ : "=&r" (new)
+ : "0" (new), "r" (m), "r" (old)
+ : "memory");
+
+ return new;
+}
+
static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
{
unsigned long tmp1, tmp2;
@@ -44,10 +55,38 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
void __xchg_called_with_bad_pointer(void);
+/*
+ * Use 4 byte cas instruction to achieve 2 byte xchg. Main logic
+ * here is to get the bit shift of the byte we are interested in.
+ * The XOR is handy for reversing the bits for big-endian byte order.
+ */
+static inline unsigned long
+xchg16(__volatile__ unsigned short *m, unsigned short val)
+{
+ unsigned long maddr = (unsigned long)m;
+ int bit_shift = (((unsigned long)m & 2) ^ 2) << 3;
+ unsigned int mask = 0xffff << bit_shift;
+ unsigned int *ptr = (unsigned int *) (maddr & ~2);
+ unsigned int old32, new32, load32;
+
+ /* Read the old value */
+ load32 = *ptr;
+
+ do {
+ old32 = load32;
+ new32 = (load32 & (~mask)) | val << bit_shift;
+ load32 = __cmpxchg_u32(ptr, old32, new32);
+ } while (load32 != old32);
+
+ return (load32 & mask) >> bit_shift;
+}
+
static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
int size)
{
switch (size) {
+ case 2:
+ return xchg16(ptr, x);
case 4:
return xchg32(ptr, x);
case 8:
@@ -65,10 +104,11 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
#include <asm-generic/cmpxchg-local.h>
+
static inline unsigned long
-__cmpxchg_u32(volatile int *m, int old, int new)
+__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
{
- __asm__ __volatile__("cas [%2], %3, %0"
+ __asm__ __volatile__("casx [%2], %3, %0"
: "=&r" (new)
: "0" (new), "r" (m), "r" (old)
: "memory");
@@ -76,15 +116,31 @@ __cmpxchg_u32(volatile int *m, int old, int new)
return new;
}
+/*
+ * Use 4 byte cas instruction to achieve 1 byte cmpxchg. Main logic
+ * here is to get the bit shift of the byte we are interested in.
+ * The XOR is handy for reversing the bits for big-endian byte order
+ */
static inline unsigned long
-__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
+__cmpxchg_u8(volatile unsigned char *m, unsigned char old, unsigned char new)
{
- __asm__ __volatile__("casx [%2], %3, %0"
- : "=&r" (new)
- : "0" (new), "r" (m), "r" (old)
- : "memory");
-
- return new;
+ unsigned long maddr = (unsigned long)m;
+ int bit_shift = (((unsigned long)m & 3) ^ 3) << 3;
+ unsigned int mask = 0xff << bit_shift;
+ unsigned int *ptr = (unsigned int *) (maddr & ~3);
+ unsigned int old32, new32, load;
+ unsigned int load32 = *ptr;
+
+ do {
+ new32 = (load32 & ~mask) | (new << bit_shift);
+ old32 = (load32 & ~mask) | (old << bit_shift);
+ load32 = __cmpxchg_u32(ptr, old32, new32);
+ if (load32 == old32)
+ return old;
+ load = (load32 & mask) >> bit_shift;
+ } while (load == old);
+
+ return load;
}
/* This function doesn't exist, so you'll get a linker error
@@ -95,6 +151,8 @@ static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
{
switch (size) {
+ case 1:
+ return __cmpxchg_u8(ptr, old, new);
case 4:
return __cmpxchg_u32(ptr, old, new);
case 8:
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 69cc627779f2..60bf1633d554 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -5,11 +5,6 @@
#include <linux/mm.h>
#include <linux/dma-debug.h>
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-
-#define HAVE_ARCH_DMA_SUPPORTED 1
-int dma_supported(struct device *dev, u64 mask);
-
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
@@ -19,7 +14,6 @@ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
}
extern const struct dma_map_ops *dma_ops;
-extern const struct dma_map_ops *leon_dma_ops;
extern const struct dma_map_ops pci32_dma_ops;
extern struct bus_type pci_bus_type;
@@ -28,7 +22,7 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
#ifdef CONFIG_SPARC_LEON
if (sparc_cpu_model == sparc_leon)
- return leon_dma_ops;
+ return &pci32_dma_ops;
#endif
#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
if (bus == &pci_bus_type)
diff --git a/arch/sparc/include/asm/ldc.h b/arch/sparc/include/asm/ldc.h
index 6e9004aa6f25..698738a10414 100644
--- a/arch/sparc/include/asm/ldc.h
+++ b/arch/sparc/include/asm/ldc.h
@@ -48,6 +48,8 @@ struct ldc_channel_config {
#define LDC_STATE_READY 0x03
#define LDC_STATE_CONNECTED 0x04
+#define LDC_PACKET_SIZE 64
+
struct ldc_channel;
/* Allocate state for a channel. */
@@ -72,6 +74,12 @@ int ldc_connect(struct ldc_channel *lp);
int ldc_disconnect(struct ldc_channel *lp);
int ldc_state(struct ldc_channel *lp);
+void ldc_set_state(struct ldc_channel *lp, u8 state);
+int ldc_mode(struct ldc_channel *lp);
+void __ldc_print(struct ldc_channel *lp, const char *caller);
+int ldc_rx_reset(struct ldc_channel *lp);
+
+#define ldc_print(chan) __ldc_print(chan, __func__)
/* Read and write operations. Only valid when the link is up. */
int ldc_write(struct ldc_channel *lp, const void *buf,
diff --git a/arch/sparc/include/asm/mdesc.h b/arch/sparc/include/asm/mdesc.h
index aebeb88f70db..e8a4c413a1c7 100644
--- a/arch/sparc/include/asm/mdesc.h
+++ b/arch/sparc/include/asm/mdesc.h
@@ -16,6 +16,7 @@ struct mdesc_handle *mdesc_grab(void);
void mdesc_release(struct mdesc_handle *);
#define MDESC_NODE_NULL (~(u64)0)
+#define MDESC_MAX_STR_LEN 256
u64 mdesc_node_by_name(struct mdesc_handle *handle,
u64 from_node, const char *name);
@@ -62,15 +63,32 @@ u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc);
void mdesc_update(void);
struct mdesc_notifier_client {
- void (*add)(struct mdesc_handle *handle, u64 node);
- void (*remove)(struct mdesc_handle *handle, u64 node);
-
+ void (*add)(struct mdesc_handle *handle, u64 node,
+ const char *node_name);
+ void (*remove)(struct mdesc_handle *handle, u64 node,
+ const char *node_name);
const char *node_name;
struct mdesc_notifier_client *next;
};
void mdesc_register_notifier(struct mdesc_notifier_client *client);
+union md_node_info {
+ struct vdev_port {
+ u64 id; /* id */
+ u64 parent_cfg_hdl; /* parent config handle */
+ const char *name; /* name (property) */
+ } vdev_port;
+ struct ds_port {
+ u64 id; /* id */
+ } ds_port;
+};
+
+u64 mdesc_get_node(struct mdesc_handle *hp, const char *node_name,
+ union md_node_info *node_info);
+int mdesc_get_node_info(struct mdesc_handle *hp, u64 node,
+ const char *node_name, union md_node_info *node_info);
+
void mdesc_fill_in_cpu_data(cpumask_t *mask);
void mdesc_populate_present_mask(cpumask_t *mask);
void mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask);
diff --git a/arch/sparc/include/asm/qrwlock.h b/arch/sparc/include/asm/qrwlock.h
new file mode 100644
index 000000000000..d68a4b102100
--- /dev/null
+++ b/arch/sparc/include/asm/qrwlock.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_SPARC_QRWLOCK_H
+#define _ASM_SPARC_QRWLOCK_H
+
+#include <asm-generic/qrwlock_types.h>
+#include <asm-generic/qrwlock.h>
+
+#endif /* _ASM_SPARC_QRWLOCK_H */
diff --git a/arch/sparc/include/asm/qspinlock.h b/arch/sparc/include/asm/qspinlock.h
new file mode 100644
index 000000000000..5ae9a2802846
--- /dev/null
+++ b/arch/sparc/include/asm/qspinlock.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_SPARC_QSPINLOCK_H
+#define _ASM_SPARC_QSPINLOCK_H
+
+#include <asm-generic/qspinlock_types.h>
+#include <asm-generic/qspinlock.h>
+
+#endif /* _ASM_SPARC_QSPINLOCK_H */
diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
index 3fae200dd251..8b32538084f7 100644
--- a/arch/sparc/include/asm/setup.h
+++ b/arch/sparc/include/asm/setup.h
@@ -1,5 +1,5 @@
/*
- * Just a place holder.
+ * Just a place holder.
*/
#ifndef _SPARC_SETUP_H
#define _SPARC_SETUP_H
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
index 07c9f2e9bf57..f7028f5e1a5a 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
@@ -10,216 +10,12 @@
#include <asm/processor.h>
#include <asm/barrier.h>
-
-/* To get debugging spinlocks which detect and catch
- * deadlock situations, set CONFIG_DEBUG_SPINLOCK
- * and rebuild your kernel.
- */
-
-/* Because we play games to save cycles in the non-contention case, we
- * need to be extra careful about branch targets into the "spinning"
- * code. They live in their own section, but the newer V9 branches
- * have a shorter range than the traditional 32-bit sparc branch
- * variants. The rule is that the branches that go into and out of
- * the spinner sections must be pre-V9 branches.
- */
-
-#define arch_spin_is_locked(lp) ((lp)->lock != 0)
-
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
- smp_cond_load_acquire(&lock->lock, !VAL);
-}
-
-static inline void arch_spin_lock(arch_spinlock_t *lock)
-{
- unsigned long tmp;
-
- __asm__ __volatile__(
-"1: ldstub [%1], %0\n"
-" brnz,pn %0, 2f\n"
-" nop\n"
-" .subsection 2\n"
-"2: ldub [%1], %0\n"
-" brnz,pt %0, 2b\n"
-" nop\n"
-" ba,a,pt %%xcc, 1b\n"
-" .previous"
- : "=&r" (tmp)
- : "r" (lock)
- : "memory");
-}
-
-static inline int arch_spin_trylock(arch_spinlock_t *lock)
-{
- unsigned long result;
-
- __asm__ __volatile__(
-" ldstub [%1], %0\n"
- : "=r" (result)
- : "r" (lock)
- : "memory");
-
- return (result == 0UL);
-}
-
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
- __asm__ __volatile__(
-" stb %%g0, [%0]"
- : /* No outputs */
- : "r" (lock)
- : "memory");
-}
-
-static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
-{
- unsigned long tmp1, tmp2;
-
- __asm__ __volatile__(
-"1: ldstub [%2], %0\n"
-" brnz,pn %0, 2f\n"
-" nop\n"
-" .subsection 2\n"
-"2: rdpr %%pil, %1\n"
-" wrpr %3, %%pil\n"
-"3: ldub [%2], %0\n"
-" brnz,pt %0, 3b\n"
-" nop\n"
-" ba,pt %%xcc, 1b\n"
-" wrpr %1, %%pil\n"
-" .previous"
- : "=&r" (tmp1), "=&r" (tmp2)
- : "r"(lock), "r"(flags)
- : "memory");
-}
-
-/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
-
-static inline void arch_read_lock(arch_rwlock_t *lock)
-{
- unsigned long tmp1, tmp2;
-
- __asm__ __volatile__ (
-"1: ldsw [%2], %0\n"
-" brlz,pn %0, 2f\n"
-"4: add %0, 1, %1\n"
-" cas [%2], %0, %1\n"
-" cmp %0, %1\n"
-" bne,pn %%icc, 1b\n"
-" nop\n"
-" .subsection 2\n"
-"2: ldsw [%2], %0\n"
-" brlz,pt %0, 2b\n"
-" nop\n"
-" ba,a,pt %%xcc, 4b\n"
-" .previous"
- : "=&r" (tmp1), "=&r" (tmp2)
- : "r" (lock)
- : "memory");
-}
-
-static inline int arch_read_trylock(arch_rwlock_t *lock)
-{
- int tmp1, tmp2;
-
- __asm__ __volatile__ (
-"1: ldsw [%2], %0\n"
-" brlz,a,pn %0, 2f\n"
-" mov 0, %0\n"
-" add %0, 1, %1\n"
-" cas [%2], %0, %1\n"
-" cmp %0, %1\n"
-" bne,pn %%icc, 1b\n"
-" mov 1, %0\n"
-"2:"
- : "=&r" (tmp1), "=&r" (tmp2)
- : "r" (lock)
- : "memory");
-
- return tmp1;
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *lock)
-{
- unsigned long tmp1, tmp2;
-
- __asm__ __volatile__(
-"1: lduw [%2], %0\n"
-" sub %0, 1, %1\n"
-" cas [%2], %0, %1\n"
-" cmp %0, %1\n"
-" bne,pn %%xcc, 1b\n"
-" nop"
- : "=&r" (tmp1), "=&r" (tmp2)
- : "r" (lock)
- : "memory");
-}
-
-static inline void arch_write_lock(arch_rwlock_t *lock)
-{
- unsigned long mask, tmp1, tmp2;
-
- mask = 0x80000000UL;
-
- __asm__ __volatile__(
-"1: lduw [%2], %0\n"
-" brnz,pn %0, 2f\n"
-"4: or %0, %3, %1\n"
-" cas [%2], %0, %1\n"
-" cmp %0, %1\n"
-" bne,pn %%icc, 1b\n"
-" nop\n"
-" .subsection 2\n"
-"2: lduw [%2], %0\n"
-" brnz,pt %0, 2b\n"
-" nop\n"
-" ba,a,pt %%xcc, 4b\n"
-" .previous"
- : "=&r" (tmp1), "=&r" (tmp2)
- : "r" (lock), "r" (mask)
- : "memory");
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *lock)
-{
- __asm__ __volatile__(
-" stw %%g0, [%0]"
- : /* no outputs */
- : "r" (lock)
- : "memory");
-}
-
-static inline int arch_write_trylock(arch_rwlock_t *lock)
-{
- unsigned long mask, tmp1, tmp2, result;
-
- mask = 0x80000000UL;
-
- __asm__ __volatile__(
-" mov 0, %2\n"
-"1: lduw [%3], %0\n"
-" brnz,pn %0, 2f\n"
-" or %0, %4, %1\n"
-" cas [%3], %0, %1\n"
-" cmp %0, %1\n"
-" bne,pn %%icc, 1b\n"
-" nop\n"
-" mov 1, %2\n"
-"2:"
- : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
- : "r" (lock), "r" (mask)
- : "memory");
-
- return result;
-}
+#include <asm/qrwlock.h>
+#include <asm/qspinlock.h>
#define arch_read_lock_flags(p, f) arch_read_lock(p)
#define arch_write_lock_flags(p, f) arch_write_lock(p)
-#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
-#define arch_write_can_lock(rw) (!(rw)->lock)
-
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h
index 9c454fdeaad8..bce8ef44dfa9 100644
--- a/arch/sparc/include/asm/spinlock_types.h
+++ b/arch/sparc/include/asm/spinlock_types.h
@@ -1,20 +1,24 @@
#ifndef __SPARC_SPINLOCK_TYPES_H
#define __SPARC_SPINLOCK_TYPES_H
-#ifndef __LINUX_SPINLOCK_TYPES_H
-# error "please don't include this file directly"
-#endif
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#include <asm-generic/qspinlock_types.h>
+#else
typedef struct {
volatile unsigned char lock;
} arch_spinlock_t;
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
+#endif /* CONFIG_QUEUED_SPINLOCKS */
+#ifdef CONFIG_QUEUED_RWLOCKS
+#include <asm-generic/qrwlock_types.h>
+#else
typedef struct {
volatile unsigned int lock;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
-
+#endif /* CONFIG_QUEUED_RWLOCKS */
#endif
diff --git a/arch/sparc/include/asm/timer_64.h b/arch/sparc/include/asm/timer_64.h
index fce415034000..51bc3bc54bfe 100644
--- a/arch/sparc/include/asm/timer_64.h
+++ b/arch/sparc/include/asm/timer_64.h
@@ -9,7 +9,12 @@
#include <linux/types.h>
#include <linux/init.h>
+/* The most frequently accessed fields should be first,
+ * to fit into the same cacheline.
+ */
struct sparc64_tick_ops {
+ unsigned long ticks_per_nsec_quotient;
+ unsigned long offset;
unsigned long long (*get_tick)(void);
int (*add_compare)(unsigned long);
unsigned long softint_mask;
@@ -17,6 +22,8 @@ struct sparc64_tick_ops {
void (*init_tick)(void);
unsigned long (*add_tick)(unsigned long);
+ unsigned long (*get_frequency)(void);
+ unsigned long frequency;
char *name;
};
@@ -27,4 +34,64 @@ unsigned long sparc64_get_clock_tick(unsigned int cpu);
void setup_sparc64_timer(void);
void __init time_init(void);
+#define TICK_PRIV_BIT BIT(63)
+#define TICKCMP_IRQ_BIT BIT(63)
+
+#define HBIRD_STICKCMP_ADDR 0x1fe0000f060UL
+#define HBIRD_STICK_ADDR 0x1fe0000f070UL
+
+#define GET_TICK_NINSTR 13
+struct get_tick_patch {
+ unsigned int addr;
+ unsigned int tick[GET_TICK_NINSTR];
+ unsigned int stick[GET_TICK_NINSTR];
+};
+
+extern struct get_tick_patch __get_tick_patch;
+extern struct get_tick_patch __get_tick_patch_end;
+
+static inline unsigned long get_tick(void)
+{
+ unsigned long tick, tmp1, tmp2;
+
+ __asm__ __volatile__(
+ /* read hbtick 13 instructions */
+ "661:\n"
+ " mov 0x1fe, %1\n"
+ " sllx %1, 0x20, %1\n"
+ " sethi %%hi(0xf000), %2\n"
+ " or %2, 0x70, %2\n"
+ " or %1, %2, %1\n" /* %1 = HBIRD_STICK_ADDR */
+ " add %1, 8, %2\n"
+ " ldxa [%2]%3, %0\n"
+ " ldxa [%1]%3, %1\n"
+ " ldxa [%2]%3, %2\n"
+ " sub %2, %0, %0\n" /* don't modify %xcc */
+ " brnz,pn %0, 661b\n" /* restart to save one register */
+ " sllx %2, 32, %2\n"
+ " or %2, %1, %0\n"
+ /* Common/not patched code */
+ " sllx %0, 1, %0\n"
+ " srlx %0, 1, %0\n" /* Clear TICK_PRIV_BIT */
+ /* Beginning of patch section */
+ " .section .get_tick_patch, \"ax\"\n"
+ " .word 661b\n"
+ /* read tick 2 instructions and 11 skipped */
+ " ba 1f\n"
+ " rd %%tick, %0\n"
+ " .skip 4 * (%4 - 2)\n"
+ "1:\n"
+ /* read stick 2 instructions and 11 skipped */
+ " ba 1f\n"
+ " rd %%asr24, %0\n"
+ " .skip 4 * (%4 - 2)\n"
+ "1:\n"
+ /* End of patch section */
+ " .previous\n"
+ : "=&r" (tick), "=&r" (tmp1), "=&r" (tmp2)
+ : "i" (ASI_PHYS_BYPASS_EC_E), "i" (GET_TICK_NINSTR));
+
+ return tick;
+}
+
#endif /* _SPARC64_TIMER_H */
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 12ebee2d97c7..bdb1447aa1bb 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -277,7 +277,6 @@ static inline unsigned long clear_user(void __user *addr, unsigned long n)
return n;
}
-__must_check long strlen_user(const char __user *str);
__must_check long strnlen_user(const char __user *str, long n);
#endif /* _ASM_UACCESS_H */
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index 6096d671aa63..113d84eaa15e 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -194,7 +194,6 @@ unsigned long __must_check __clear_user(void __user *, unsigned long);
#define clear_user __clear_user
-__must_check long strlen_user(const char __user *str);
__must_check long strnlen_user(const char __user *str, long n);
struct pt_regs;
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
index 9dca7a892978..d1c47e9f0090 100644
--- a/arch/sparc/include/asm/vio.h
+++ b/arch/sparc/include/asm/vio.h
@@ -316,24 +316,33 @@ static inline u32 vio_dring_prev(struct vio_dring_state *dr, u32 index)
}
#define VIO_MAX_TYPE_LEN 32
+#define VIO_MAX_NAME_LEN 32
#define VIO_MAX_COMPAT_LEN 64
struct vio_dev {
u64 mp;
struct device_node *dp;
+ char node_name[VIO_MAX_NAME_LEN];
char type[VIO_MAX_TYPE_LEN];
char compat[VIO_MAX_COMPAT_LEN];
int compat_len;
u64 dev_no;
- u64 id;
+ unsigned long port_id;
unsigned long channel_id;
unsigned int tx_irq;
unsigned int rx_irq;
u64 rx_ino;
+ u64 tx_ino;
+
+ /* Handle to the root of "channel-devices" sub-tree in MDESC */
+ u64 cdev_handle;
+
+ /* MD specific data used to identify the vdev in MD */
+ union md_node_info md_node_info;
struct device dev;
};
@@ -347,6 +356,7 @@ struct vio_driver {
void (*shutdown)(struct vio_dev *dev);
unsigned long driver_data;
struct device_driver driver;
+ bool no_irq;
};
struct vio_version {
@@ -490,5 +500,6 @@ int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
void vio_port_up(struct vio_driver_state *vio);
int vio_set_intr(unsigned long dev_ino, int state);
+u64 vio_vdev_node(struct mdesc_handle *hp, struct vio_dev *vdev);
#endif /* _SPARC64_VIO_H */
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c
index 9ebc37e7d64c..c988e7fa069b 100644
--- a/arch/sparc/kernel/apc.c
+++ b/arch/sparc/kernel/apc.c
@@ -167,7 +167,7 @@ static int apc_probe(struct platform_device *op)
return 0;
}
-static struct of_device_id apc_match[] = {
+static const struct of_device_id apc_match[] = {
{
.name = APC_OBPNAME,
},
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index c63ba99ca551..fcbcc031f615 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -314,7 +314,7 @@ bad:
bad_no_ctx:
if (printk_ratelimit())
WARN_ON(1);
- return DMA_ERROR_CODE;
+ return SPARC_MAPPING_ERROR;
}
static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
@@ -547,7 +547,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
if (outcount < incount) {
outs = sg_next(outs);
- outs->dma_address = DMA_ERROR_CODE;
+ outs->dma_address = SPARC_MAPPING_ERROR;
outs->dma_length = 0;
}
@@ -573,7 +573,7 @@ iommu_map_failed:
iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
IOMMU_ERROR_CODE);
- s->dma_address = DMA_ERROR_CODE;
+ s->dma_address = SPARC_MAPPING_ERROR;
s->dma_length = 0;
}
if (s == outs)
@@ -741,6 +741,26 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
spin_unlock_irqrestore(&iommu->lock, flags);
}
+static int dma_4u_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == SPARC_MAPPING_ERROR;
+}
+
+static int dma_4u_supported(struct device *dev, u64 device_mask)
+{
+ struct iommu *iommu = dev->archdata.iommu;
+
+ if (device_mask > DMA_BIT_MASK(32))
+ return 0;
+ if ((device_mask & iommu->dma_addr_mask) == iommu->dma_addr_mask)
+ return 1;
+#ifdef CONFIG_PCI
+ if (dev_is_pci(dev))
+ return pci64_dma_supported(to_pci_dev(dev), device_mask);
+#endif
+ return 0;
+}
+
static const struct dma_map_ops sun4u_dma_ops = {
.alloc = dma_4u_alloc_coherent,
.free = dma_4u_free_coherent,
@@ -750,31 +770,9 @@ static const struct dma_map_ops sun4u_dma_ops = {
.unmap_sg = dma_4u_unmap_sg,
.sync_single_for_cpu = dma_4u_sync_single_for_cpu,
.sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
+ .dma_supported = dma_4u_supported,
+ .mapping_error = dma_4u_mapping_error,
};
const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
EXPORT_SYMBOL(dma_ops);
-
-int dma_supported(struct device *dev, u64 device_mask)
-{
- struct iommu *iommu = dev->archdata.iommu;
- u64 dma_addr_mask = iommu->dma_addr_mask;
-
- if (device_mask > DMA_BIT_MASK(32)) {
- if (iommu->atu)
- dma_addr_mask = iommu->atu->dma_addr_mask;
- else
- return 0;
- }
-
- if ((device_mask & dma_addr_mask) == dma_addr_mask)
- return 1;
-
-#ifdef CONFIG_PCI
- if (dev_is_pci(dev))
- return pci64_dma_supported(to_pci_dev(dev), device_mask);
-#endif
-
- return 0;
-}
-EXPORT_SYMBOL(dma_supported);
diff --git a/arch/sparc/kernel/iommu_common.h b/arch/sparc/kernel/iommu_common.h
index 828493329f68..5ea5c192b1d9 100644
--- a/arch/sparc/kernel/iommu_common.h
+++ b/arch/sparc/kernel/iommu_common.h
@@ -47,4 +47,6 @@ static inline int is_span_boundary(unsigned long entry,
return iommu_is_span_boundary(entry, nr, shift, boundary_size);
}
+#define SPARC_MAPPING_ERROR (~(dma_addr_t)0x0)
+
#endif /* _IOMMU_COMMON_H */
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index cf20033a1458..12894f259bea 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -401,6 +401,11 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
BUG();
}
+static int sbus_dma_supported(struct device *dev, u64 mask)
+{
+ return 0;
+}
+
static const struct dma_map_ops sbus_dma_ops = {
.alloc = sbus_alloc_coherent,
.free = sbus_free_coherent,
@@ -410,6 +415,7 @@ static const struct dma_map_ops sbus_dma_ops = {
.unmap_sg = sbus_unmap_sg,
.sync_sg_for_cpu = sbus_sync_sg_for_cpu,
.sync_sg_for_device = sbus_sync_sg_for_device,
+ .dma_supported = sbus_dma_supported,
};
static int __init sparc_register_ioport(void)
@@ -637,6 +643,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
}
}
+/* note: leon re-uses pci32_dma_ops */
const struct dma_map_ops pci32_dma_ops = {
.alloc = pci32_alloc_coherent,
.free = pci32_free_coherent,
@@ -651,29 +658,9 @@ const struct dma_map_ops pci32_dma_ops = {
};
EXPORT_SYMBOL(pci32_dma_ops);
-/* leon re-uses pci32_dma_ops */
-const struct dma_map_ops *leon_dma_ops = &pci32_dma_ops;
-EXPORT_SYMBOL(leon_dma_ops);
-
const struct dma_map_ops *dma_ops = &sbus_dma_ops;
EXPORT_SYMBOL(dma_ops);
-
-/*
- * Return whether the given PCI device DMA address mask can be
- * supported properly. For example, if your device can only drive the
- * low 24-bits during PCI bus mastering, then you would pass
- * 0x00ffffff as the mask to this function.
- */
-int dma_supported(struct device *dev, u64 mask)
-{
- if (dev_is_pci(dev))
- return 1;
-
- return 0;
-}
-EXPORT_SYMBOL(dma_supported);
-
#ifdef CONFIG_PROC_FS
static int sparc_io_proc_show(struct seq_file *m, void *v)
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index 6ae1e77be0bf..b625db4cfb78 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -52,6 +52,9 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs);
void do_signal32(struct pt_regs * regs);
asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp);
+/* time_64.c */
+void __init time_init_early(void);
+
/* compat_audit.c */
extern unsigned int sparc32_dir_class[];
extern unsigned int sparc32_chattr_class[];
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index 59d503866431..840e0b21bfe3 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -34,7 +34,6 @@
static char version[] =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-#define LDC_PACKET_SIZE 64
/* Packet header layout for unreliable and reliable mode frames.
* When in RAW mode, packets are simply straight 64-byte payloads
@@ -178,6 +177,8 @@ do { if (lp->cfg.debug & LDC_DEBUG_##TYPE) \
printk(KERN_INFO PFX "ID[%lu] " f, lp->id, ## a); \
} while (0)
+#define LDC_ABORT(lp) ldc_abort((lp), __func__)
+
static const char *state_to_str(u8 state)
{
switch (state) {
@@ -196,15 +197,6 @@ static const char *state_to_str(u8 state)
}
}
-static void ldc_set_state(struct ldc_channel *lp, u8 state)
-{
- ldcdbg(STATE, "STATE (%s) --> (%s)\n",
- state_to_str(lp->state),
- state_to_str(state));
-
- lp->state = state;
-}
-
static unsigned long __advance(unsigned long off, unsigned long num_entries)
{
off += LDC_PACKET_SIZE;
@@ -516,11 +508,12 @@ static int send_data_nack(struct ldc_channel *lp, struct ldc_packet *data_pkt)
return err;
}
-static int ldc_abort(struct ldc_channel *lp)
+static int ldc_abort(struct ldc_channel *lp, const char *msg)
{
unsigned long hv_err;
- ldcdbg(STATE, "ABORT\n");
+ ldcdbg(STATE, "ABORT[%s]\n", msg);
+ ldc_print(lp);
/* We report but do not act upon the hypervisor errors because
* there really isn't much we can do if they fail at this point.
@@ -605,7 +598,7 @@ static int process_ver_info(struct ldc_channel *lp, struct ldc_version *vp)
}
}
if (err)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
return 0;
}
@@ -618,13 +611,13 @@ static int process_ver_ack(struct ldc_channel *lp, struct ldc_version *vp)
if (lp->hs_state == LDC_HS_GOTVERS) {
if (lp->ver.major != vp->major ||
lp->ver.minor != vp->minor)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
} else {
lp->ver = *vp;
lp->hs_state = LDC_HS_GOTVERS;
}
if (send_rts(lp))
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
return 0;
}
@@ -635,17 +628,17 @@ static int process_ver_nack(struct ldc_channel *lp, struct ldc_version *vp)
unsigned long new_tail;
if (vp->major == 0 && vp->minor == 0)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
vap = find_by_major(vp->major);
if (!vap)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
p = handshake_compose_ctrl(lp, LDC_INFO, LDC_VERS,
vap, sizeof(*vap),
&new_tail);
if (!p)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
return send_tx_packet(lp, p, new_tail);
}
@@ -668,7 +661,7 @@ static int process_version(struct ldc_channel *lp,
return process_ver_nack(lp, vp);
default:
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
}
}
@@ -681,13 +674,13 @@ static int process_rts(struct ldc_channel *lp,
if (p->stype != LDC_INFO ||
lp->hs_state != LDC_HS_GOTVERS ||
p->env != lp->cfg.mode)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
lp->snd_nxt = p->seqid;
lp->rcv_nxt = p->seqid;
lp->hs_state = LDC_HS_SENTRTR;
if (send_rtr(lp))
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
return 0;
}
@@ -700,7 +693,7 @@ static int process_rtr(struct ldc_channel *lp,
if (p->stype != LDC_INFO ||
p->env != lp->cfg.mode)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
lp->snd_nxt = p->seqid;
lp->hs_state = LDC_HS_COMPLETE;
@@ -723,7 +716,7 @@ static int process_rdx(struct ldc_channel *lp,
if (p->stype != LDC_INFO ||
!(rx_seq_ok(lp, p->seqid)))
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
lp->rcv_nxt = p->seqid;
@@ -750,14 +743,14 @@ static int process_control_frame(struct ldc_channel *lp,
return process_rdx(lp, p);
default:
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
}
}
static int process_error_frame(struct ldc_channel *lp,
struct ldc_packet *p)
{
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
}
static int process_data_ack(struct ldc_channel *lp,
@@ -776,7 +769,7 @@ static int process_data_ack(struct ldc_channel *lp,
return 0;
}
if (head == lp->tx_tail)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
}
return 0;
@@ -820,16 +813,21 @@ static irqreturn_t ldc_rx(int irq, void *dev_id)
lp->hs_state = LDC_HS_COMPLETE;
ldc_set_state(lp, LDC_STATE_CONNECTED);
- event_mask |= LDC_EVENT_UP;
-
- orig_state = lp->chan_state;
+ /*
+ * Generate an LDC_EVENT_UP event if the channel
+ * was not already up.
+ */
+ if (orig_state != LDC_CHANNEL_UP) {
+ event_mask |= LDC_EVENT_UP;
+ orig_state = lp->chan_state;
+ }
}
/* If we are in reset state, flush the RX queue and ignore
* everything.
*/
if (lp->flags & LDC_FLAG_RESET) {
- (void) __set_rx_head(lp, lp->rx_tail);
+ (void) ldc_rx_reset(lp);
goto out;
}
@@ -880,7 +878,7 @@ handshake_complete:
break;
default:
- err = ldc_abort(lp);
+ err = LDC_ABORT(lp);
break;
}
@@ -895,7 +893,7 @@ handshake_complete:
err = __set_rx_head(lp, new);
if (err < 0) {
- (void) ldc_abort(lp);
+ (void) LDC_ABORT(lp);
break;
}
if (lp->hs_state == LDC_HS_COMPLETE)
@@ -936,7 +934,14 @@ static irqreturn_t ldc_tx(int irq, void *dev_id)
lp->hs_state = LDC_HS_COMPLETE;
ldc_set_state(lp, LDC_STATE_CONNECTED);
- event_mask |= LDC_EVENT_UP;
+ /*
+ * Generate an LDC_EVENT_UP event if the channel
+ * was not already up.
+ */
+ if (orig_state != LDC_CHANNEL_UP) {
+ event_mask |= LDC_EVENT_UP;
+ orig_state = lp->chan_state;
+ }
}
spin_unlock_irqrestore(&lp->lock, flags);
@@ -1342,6 +1347,14 @@ int ldc_bind(struct ldc_channel *lp)
lp->hs_state = LDC_HS_OPEN;
ldc_set_state(lp, LDC_STATE_BOUND);
+ if (lp->cfg.mode == LDC_MODE_RAW) {
+ /*
+ * There is no handshake in RAW mode, so handshake
+ * is completed.
+ */
+ lp->hs_state = LDC_HS_COMPLETE;
+ }
+
spin_unlock_irqrestore(&lp->lock, flags);
return 0;
@@ -1447,12 +1460,54 @@ int ldc_state(struct ldc_channel *lp)
}
EXPORT_SYMBOL(ldc_state);
+void ldc_set_state(struct ldc_channel *lp, u8 state)
+{
+ ldcdbg(STATE, "STATE (%s) --> (%s)\n",
+ state_to_str(lp->state),
+ state_to_str(state));
+
+ lp->state = state;
+}
+EXPORT_SYMBOL(ldc_set_state);
+
+int ldc_mode(struct ldc_channel *lp)
+{
+ return lp->cfg.mode;
+}
+EXPORT_SYMBOL(ldc_mode);
+
+int ldc_rx_reset(struct ldc_channel *lp)
+{
+ return __set_rx_head(lp, lp->rx_tail);
+}
+
+void __ldc_print(struct ldc_channel *lp, const char *caller)
+{
+ pr_info("%s: id=0x%lx flags=0x%x state=%s cstate=0x%lx hsstate=0x%x\n"
+ "\trx_h=0x%lx rx_t=0x%lx rx_n=%ld\n"
+ "\ttx_h=0x%lx tx_t=0x%lx tx_n=%ld\n"
+ "\trcv_nxt=%u snd_nxt=%u\n",
+ caller, lp->id, lp->flags, state_to_str(lp->state),
+ lp->chan_state, lp->hs_state,
+ lp->rx_head, lp->rx_tail, lp->rx_num_entries,
+ lp->tx_head, lp->tx_tail, lp->tx_num_entries,
+ lp->rcv_nxt, lp->snd_nxt);
+}
+
static int write_raw(struct ldc_channel *lp, const void *buf, unsigned int size)
{
struct ldc_packet *p;
- unsigned long new_tail;
+ unsigned long new_tail, hv_err;
int err;
+ hv_err = sun4v_ldc_tx_get_state(lp->id, &lp->tx_head, &lp->tx_tail,
+ &lp->chan_state);
+ if (unlikely(hv_err))
+ return -EBUSY;
+
+ if (unlikely(lp->chan_state != LDC_CHANNEL_UP))
+ return LDC_ABORT(lp);
+
if (size > LDC_PACKET_SIZE)
return -EMSGSIZE;
@@ -1483,7 +1538,7 @@ static int read_raw(struct ldc_channel *lp, void *buf, unsigned int size)
&lp->rx_tail,
&lp->chan_state);
if (hv_err)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
if (lp->chan_state == LDC_CHANNEL_DOWN ||
lp->chan_state == LDC_CHANNEL_RESETTING)
@@ -1526,7 +1581,7 @@ static int write_nonraw(struct ldc_channel *lp, const void *buf,
return -EBUSY;
if (unlikely(lp->chan_state != LDC_CHANNEL_UP))
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
if (!tx_has_space_for(lp, size))
return -EAGAIN;
@@ -1592,9 +1647,9 @@ static int rx_bad_seq(struct ldc_channel *lp, struct ldc_packet *p,
if (err)
return err;
- err = __set_rx_head(lp, lp->rx_tail);
+ err = ldc_rx_reset(lp);
if (err < 0)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
return 0;
}
@@ -1607,7 +1662,7 @@ static int data_ack_nack(struct ldc_channel *lp, struct ldc_packet *p)
return err;
}
if (p->stype & LDC_NACK)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
return 0;
}
@@ -1627,7 +1682,7 @@ static int rx_data_wait(struct ldc_channel *lp, unsigned long cur_head)
&lp->rx_tail,
&lp->chan_state);
if (hv_err)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
if (lp->chan_state == LDC_CHANNEL_DOWN ||
lp->chan_state == LDC_CHANNEL_RESETTING)
@@ -1650,7 +1705,7 @@ static int rx_set_head(struct ldc_channel *lp, unsigned long head)
int err = __set_rx_head(lp, head);
if (err < 0)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
lp->rx_head = head;
return 0;
@@ -1689,7 +1744,7 @@ static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size)
&lp->rx_tail,
&lp->chan_state);
if (hv_err)
- return ldc_abort(lp);
+ return LDC_ABORT(lp);
if (lp->chan_state == LDC_CHANNEL_DOWN ||
lp->chan_state == LDC_CHANNEL_RESETTING)
@@ -1733,9 +1788,14 @@ static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size)
lp->rcv_nxt = p->seqid;
+ /*
+ * If this is a control-only packet, there is nothing
+ * else to do but advance the rx queue since the packet
+ * was already processed above.
+ */
if (!(p->type & LDC_DATA)) {
new = rx_advance(lp, new);
- goto no_data;
+ break;
}
if (p->stype & (LDC_ACK | LDC_NACK)) {
err = data_ack_nack(lp, p);
@@ -1900,6 +1960,8 @@ int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size)
unsigned long flags;
int err;
+ ldcdbg(RX, "%s: entered size=%d\n", __func__, size);
+
if (!buf)
return -EINVAL;
@@ -1915,6 +1977,9 @@ int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size)
spin_unlock_irqrestore(&lp->lock, flags);
+ ldcdbg(RX, "%s: mode=%d, head=%lu, tail=%lu rv=%d\n", __func__,
+ lp->cfg.mode, lp->rx_head, lp->rx_tail, err);
+
return err;
}
EXPORT_SYMBOL(ldc_read);
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index c0765bbf60ea..e4b4e790bf89 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -75,6 +75,74 @@ struct mdesc_handle {
struct mdesc_hdr mdesc;
};
+typedef int (*mdesc_node_info_get_f)(struct mdesc_handle *, u64,
+ union md_node_info *);
+typedef void (*mdesc_node_info_rel_f)(union md_node_info *);
+typedef bool (*mdesc_node_match_f)(union md_node_info *, union md_node_info *);
+
+struct md_node_ops {
+ char *name;
+ mdesc_node_info_get_f get_info;
+ mdesc_node_info_rel_f rel_info;
+ mdesc_node_match_f node_match;
+};
+
+static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node,
+ union md_node_info *node_info);
+static void rel_vdev_port_node_info(union md_node_info *node_info);
+static bool vdev_port_node_match(union md_node_info *a_node_info,
+ union md_node_info *b_node_info);
+
+static int get_ds_port_node_info(struct mdesc_handle *md, u64 node,
+ union md_node_info *node_info);
+static void rel_ds_port_node_info(union md_node_info *node_info);
+static bool ds_port_node_match(union md_node_info *a_node_info,
+ union md_node_info *b_node_info);
+
+/* supported node types which can be registered */
+static struct md_node_ops md_node_ops_table[] = {
+ {"virtual-device-port", get_vdev_port_node_info,
+ rel_vdev_port_node_info, vdev_port_node_match},
+ {"domain-services-port", get_ds_port_node_info,
+ rel_ds_port_node_info, ds_port_node_match},
+ {NULL, NULL, NULL, NULL}
+};
+
+static void mdesc_get_node_ops(const char *node_name,
+ mdesc_node_info_get_f *get_info_f,
+ mdesc_node_info_rel_f *rel_info_f,
+ mdesc_node_match_f *match_f)
+{
+ int i;
+
+ if (get_info_f)
+ *get_info_f = NULL;
+
+ if (rel_info_f)
+ *rel_info_f = NULL;
+
+ if (match_f)
+ *match_f = NULL;
+
+ if (!node_name)
+ return;
+
+ for (i = 0; md_node_ops_table[i].name != NULL; i++) {
+ if (strcmp(md_node_ops_table[i].name, node_name) == 0) {
+ if (get_info_f)
+ *get_info_f = md_node_ops_table[i].get_info;
+
+ if (rel_info_f)
+ *rel_info_f = md_node_ops_table[i].rel_info;
+
+ if (match_f)
+ *match_f = md_node_ops_table[i].node_match;
+
+ break;
+ }
+ }
+}
+
static void mdesc_handle_init(struct mdesc_handle *hp,
unsigned int handle_size,
void *base)
@@ -137,12 +205,10 @@ static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
handle_size = (sizeof(struct mdesc_handle) -
sizeof(struct mdesc_hdr) +
mdesc_size);
+ base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_REPEAT);
+ if (!base)
+ return NULL;
- /*
- * Allocation has to succeed because mdesc update would be missed
- * and such events are not retransmitted.
- */
- base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_NOFAIL);
addr = (unsigned long)base;
addr = (addr + 15UL) & ~15UL;
hp = (struct mdesc_handle *) addr;
@@ -218,14 +284,31 @@ static struct mdesc_notifier_client *client_list;
void mdesc_register_notifier(struct mdesc_notifier_client *client)
{
+ bool supported = false;
u64 node;
+ int i;
mutex_lock(&mdesc_mutex);
+
+ /* check to see if the node is supported for registration */
+ for (i = 0; md_node_ops_table[i].name != NULL; i++) {
+ if (strcmp(md_node_ops_table[i].name, client->node_name) == 0) {
+ supported = true;
+ break;
+ }
+ }
+
+ if (!supported) {
+ pr_err("MD: %s node not supported\n", client->node_name);
+ mutex_unlock(&mdesc_mutex);
+ return;
+ }
+
client->next = client_list;
client_list = client;
mdesc_for_each_node_by_name(cur_mdesc, node, client->node_name)
- client->add(cur_mdesc, node);
+ client->add(cur_mdesc, node, client->node_name);
mutex_unlock(&mdesc_mutex);
}
@@ -249,59 +332,145 @@ static const u64 *parent_cfg_handle(struct mdesc_handle *hp, u64 node)
return id;
}
+static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node,
+ union md_node_info *node_info)
+{
+ const u64 *parent_cfg_hdlp;
+ const char *name;
+ const u64 *idp;
+
+ /*
+ * Virtual device nodes are distinguished by:
+ * 1. "id" property
+ * 2. "name" property
+ * 3. parent node "cfg-handle" property
+ */
+ idp = mdesc_get_property(md, node, "id", NULL);
+ name = mdesc_get_property(md, node, "name", NULL);
+ parent_cfg_hdlp = parent_cfg_handle(md, node);
+
+ if (!idp || !name || !parent_cfg_hdlp)
+ return -1;
+
+ node_info->vdev_port.id = *idp;
+ node_info->vdev_port.name = kstrdup_const(name, GFP_KERNEL);
+ node_info->vdev_port.parent_cfg_hdl = *parent_cfg_hdlp;
+
+ return 0;
+}
+
+static void rel_vdev_port_node_info(union md_node_info *node_info)
+{
+ if (node_info && node_info->vdev_port.name) {
+ kfree_const(node_info->vdev_port.name);
+ node_info->vdev_port.name = NULL;
+ }
+}
+
+static bool vdev_port_node_match(union md_node_info *a_node_info,
+ union md_node_info *b_node_info)
+{
+ if (a_node_info->vdev_port.id != b_node_info->vdev_port.id)
+ return false;
+
+ if (a_node_info->vdev_port.parent_cfg_hdl !=
+ b_node_info->vdev_port.parent_cfg_hdl)
+ return false;
+
+ if (strncmp(a_node_info->vdev_port.name,
+ b_node_info->vdev_port.name, MDESC_MAX_STR_LEN) != 0)
+ return false;
+
+ return true;
+}
+
+static int get_ds_port_node_info(struct mdesc_handle *md, u64 node,
+ union md_node_info *node_info)
+{
+ const u64 *idp;
+
+ /* DS port nodes use the "id" property to distinguish them */
+ idp = mdesc_get_property(md, node, "id", NULL);
+ if (!idp)
+ return -1;
+
+ node_info->ds_port.id = *idp;
+
+ return 0;
+}
+
+static void rel_ds_port_node_info(union md_node_info *node_info)
+{
+}
+
+static bool ds_port_node_match(union md_node_info *a_node_info,
+ union md_node_info *b_node_info)
+{
+ if (a_node_info->ds_port.id != b_node_info->ds_port.id)
+ return false;
+
+ return true;
+}
+
/* Run 'func' on nodes which are in A but not in B. */
static void invoke_on_missing(const char *name,
struct mdesc_handle *a,
struct mdesc_handle *b,
- void (*func)(struct mdesc_handle *, u64))
+ void (*func)(struct mdesc_handle *, u64,
+ const char *node_name))
{
- u64 node;
+ mdesc_node_info_get_f get_info_func;
+ mdesc_node_info_rel_f rel_info_func;
+ mdesc_node_match_f node_match_func;
+ union md_node_info a_node_info;
+ union md_node_info b_node_info;
+ bool found;
+ u64 a_node;
+ u64 b_node;
+ int rv;
- mdesc_for_each_node_by_name(a, node, name) {
- int found = 0, is_vdc_port = 0;
- const char *name_prop;
- const u64 *id;
- u64 fnode;
-
- name_prop = mdesc_get_property(a, node, "name", NULL);
- if (name_prop && !strcmp(name_prop, "vdc-port")) {
- is_vdc_port = 1;
- id = parent_cfg_handle(a, node);
- } else
- id = mdesc_get_property(a, node, "id", NULL);
-
- if (!id) {
- printk(KERN_ERR "MD: Cannot find ID for %s node.\n",
- (name_prop ? name_prop : name));
+ /*
+ * Find the get_info, rel_info and node_match ops for the given
+ * node name
+ */
+ mdesc_get_node_ops(name, &get_info_func, &rel_info_func,
+ &node_match_func);
+
+ /* If we didn't find a match, the node type is not supported */
+ if (!get_info_func || !rel_info_func || !node_match_func) {
+ pr_err("MD: %s node type is not supported\n", name);
+ return;
+ }
+
+ mdesc_for_each_node_by_name(a, a_node, name) {
+ found = false;
+
+ rv = get_info_func(a, a_node, &a_node_info);
+ if (rv != 0) {
+ pr_err("MD: Cannot find 1 or more required match properties for %s node.\n",
+ name);
continue;
}
- mdesc_for_each_node_by_name(b, fnode, name) {
- const u64 *fid;
-
- if (is_vdc_port) {
- name_prop = mdesc_get_property(b, fnode,
- "name", NULL);
- if (!name_prop ||
- strcmp(name_prop, "vdc-port"))
- continue;
- fid = parent_cfg_handle(b, fnode);
- if (!fid) {
- printk(KERN_ERR "MD: Cannot find ID "
- "for vdc-port node.\n");
- continue;
- }
- } else
- fid = mdesc_get_property(b, fnode,
- "id", NULL);
-
- if (*id == *fid) {
- found = 1;
+ /* Check each node in B for node matching a_node */
+ mdesc_for_each_node_by_name(b, b_node, name) {
+ rv = get_info_func(b, b_node, &b_node_info);
+ if (rv != 0)
+ continue;
+
+ if (node_match_func(&a_node_info, &b_node_info)) {
+ found = true;
+ rel_info_func(&b_node_info);
break;
}
+
+ rel_info_func(&b_node_info);
}
+
+ rel_info_func(&a_node_info);
+
if (!found)
- func(a, node);
+ func(a, a_node, name);
}
}
@@ -367,6 +536,76 @@ out:
mutex_unlock(&mdesc_mutex);
}
+u64 mdesc_get_node(struct mdesc_handle *hp, const char *node_name,
+ union md_node_info *node_info)
+{
+ mdesc_node_info_get_f get_info_func;
+ mdesc_node_info_rel_f rel_info_func;
+ mdesc_node_match_f node_match_func;
+ union md_node_info hp_node_info;
+ u64 hp_node;
+ int rv;
+
+ if (hp == NULL || node_name == NULL || node_info == NULL)
+ return MDESC_NODE_NULL;
+
+ /* Find the ops for the given node name */
+ mdesc_get_node_ops(node_name, &get_info_func, &rel_info_func,
+ &node_match_func);
+
+ /* If we didn't find ops for the given node name, it is not supported */
+ if (!get_info_func || !rel_info_func || !node_match_func) {
+ pr_err("MD: %s node is not supported\n", node_name);
+ return -EINVAL;
+ }
+
+ mdesc_for_each_node_by_name(hp, hp_node, node_name) {
+ rv = get_info_func(hp, hp_node, &hp_node_info);
+ if (rv != 0)
+ continue;
+
+ if (node_match_func(node_info, &hp_node_info))
+ break;
+
+ rel_info_func(&hp_node_info);
+ }
+
+ rel_info_func(&hp_node_info);
+
+ return hp_node;
+}
+EXPORT_SYMBOL(mdesc_get_node);
+
+int mdesc_get_node_info(struct mdesc_handle *hp, u64 node,
+ const char *node_name, union md_node_info *node_info)
+{
+ mdesc_node_info_get_f get_info_func;
+ int rv;
+
+ if (hp == NULL || node == MDESC_NODE_NULL ||
+ node_name == NULL || node_info == NULL)
+ return -EINVAL;
+
+ /* Find the get_info op for the given node name */
+ mdesc_get_node_ops(node_name, &get_info_func, NULL, NULL);
+
+ /* If we didn't find a get_info_func, the node name is not supported */
+ if (get_info_func == NULL) {
+ pr_err("MD: %s node is not supported\n", node_name);
+ return -EINVAL;
+ }
+
+ rv = get_info_func(hp, node, node_info);
+ if (rv != 0) {
+ pr_err("MD: Cannot find 1 or more required match properties for %s node.\n",
+ node_name);
+ return -1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mdesc_get_node_info);
+
static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
{
return (struct mdesc_elem *) (mdesc + 1);
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 68bec7c97cb8..24f21c726dfa 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -24,6 +24,7 @@
#include "pci_impl.h"
#include "iommu_common.h"
+#include "kernel.h"
#include "pci_sun4v.h"
@@ -412,12 +413,12 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
bad:
if (printk_ratelimit())
WARN_ON(1);
- return DMA_ERROR_CODE;
+ return SPARC_MAPPING_ERROR;
iommu_map_fail:
local_irq_restore(flags);
iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
- return DMA_ERROR_CODE;
+ return SPARC_MAPPING_ERROR;
}
static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
@@ -590,7 +591,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
if (outcount < incount) {
outs = sg_next(outs);
- outs->dma_address = DMA_ERROR_CODE;
+ outs->dma_address = SPARC_MAPPING_ERROR;
outs->dma_length = 0;
}
@@ -607,7 +608,7 @@ iommu_map_failed:
iommu_tbl_range_free(tbl, vaddr, npages,
IOMMU_ERROR_CODE);
/* XXX demap? XXX */
- s->dma_address = DMA_ERROR_CODE;
+ s->dma_address = SPARC_MAPPING_ERROR;
s->dma_length = 0;
}
if (s == outs)
@@ -669,6 +670,26 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
local_irq_restore(flags);
}
+static int dma_4v_supported(struct device *dev, u64 device_mask)
+{
+ struct iommu *iommu = dev->archdata.iommu;
+ u64 dma_addr_mask;
+
+ if (device_mask > DMA_BIT_MASK(32) && iommu->atu)
+ dma_addr_mask = iommu->atu->dma_addr_mask;
+ else
+ dma_addr_mask = iommu->dma_addr_mask;
+
+ if ((device_mask & dma_addr_mask) == dma_addr_mask)
+ return 1;
+ return pci64_dma_supported(to_pci_dev(dev), device_mask);
+}
+
+static int dma_4v_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == SPARC_MAPPING_ERROR;
+}
+
static const struct dma_map_ops sun4v_dma_ops = {
.alloc = dma_4v_alloc_coherent,
.free = dma_4v_free_coherent,
@@ -676,6 +697,8 @@ static const struct dma_map_ops sun4v_dma_ops = {
.unmap_page = dma_4v_unmap_page,
.map_sg = dma_4v_map_sg,
.unmap_sg = dma_4v_unmap_sg,
+ .dma_supported = dma_4v_supported,
+ .mapping_error = dma_4v_mapping_error,
};
static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c
index f12b23f7b515..3b26cf62df6d 100644
--- a/arch/sparc/kernel/pmc.c
+++ b/arch/sparc/kernel/pmc.c
@@ -71,7 +71,7 @@ static int pmc_probe(struct platform_device *op)
return 0;
}
-static struct of_device_id pmc_match[] = {
+static const struct of_device_id pmc_match[] = {
{
.name = PMC_OBPNAME,
},
diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c
index 20cc5d80a471..baeaeed64993 100644
--- a/arch/sparc/kernel/prom_64.c
+++ b/arch/sparc/kernel/prom_64.c
@@ -381,7 +381,7 @@ bool arch_find_n_match_cpu_physical_id(struct device_node *cpun,
int this_cpu_id;
/* On hypervisor based platforms we interrogate the 'reg'
- * property. On everything else we look for a 'upa-portis',
+ * property. On everything else we look for a 'upa-portid',
* 'portid', or 'cpuid' property.
*/
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 422b17880955..4d9c3e13c150 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -95,7 +95,7 @@ static struct console prom_early_console = {
.index = -1,
};
-/*
+/*
* Process kernel command line switches that are specific to the
* SPARC or that require special low-level processing.
*/
@@ -365,6 +365,7 @@ void __init start_early_boot(void)
}
current_thread_info()->cpu = cpu;
+ time_init_early();
prom_init_report();
start_kernel();
}
@@ -639,7 +640,7 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_BLK_DEV_RAM
rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
- rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
+ rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
#endif
task_thread_info(&init_task)->kregs = &fake_swapper_regs;
@@ -648,7 +649,7 @@ void __init setup_arch(char **cmdline_p)
if (!ic_set_manually) {
phandle chosen = prom_finddevice("/chosen");
u32 cl, sv, gw;
-
+
cl = prom_getintdefault (chosen, "client-ip", 0);
sv = prom_getintdefault (chosen, "server-ip", 0);
gw = prom_getintdefault (chosen, "gateway-ip", 0);
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index 9f575dfc2e41..2ce2e7b2abbb 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -298,7 +298,7 @@ static int clock_probe(struct platform_device *op)
return 0;
}
-static struct of_device_id clock_match[] = {
+static const struct of_device_id clock_match[] = {
{
.name = "eeprom",
},
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 98d05de8da66..564f0e46ffd4 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -32,7 +32,6 @@
#include <linux/kernel_stat.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/ftrace.h>
@@ -47,14 +46,13 @@
#include <asm/cpudata.h>
#include <linux/uaccess.h>
#include <asm/irq_regs.h>
+#include <asm/cacheflush.h>
#include "entry.h"
+#include "kernel.h"
DEFINE_SPINLOCK(rtc_lock);
-#define TICK_PRIV_BIT (1UL << 63)
-#define TICKCMP_IRQ_BIT (1UL << 63)
-
#ifdef CONFIG_SMP
unsigned long profile_pc(struct pt_regs *regs)
{
@@ -164,13 +162,44 @@ static unsigned long tick_add_tick(unsigned long adj)
return new_tick;
}
-static struct sparc64_tick_ops tick_operations __read_mostly = {
+/* Searches for cpu clock frequency with given cpuid in OpenBoot tree */
+static unsigned long cpuid_to_freq(phandle node, int cpuid)
+{
+ bool is_cpu_node = false;
+ unsigned long freq = 0;
+ char type[128];
+
+ if (!node)
+ return freq;
+
+ if (prom_getproperty(node, "device_type", type, sizeof(type)) != -1)
+ is_cpu_node = (strcmp(type, "cpu") == 0);
+
+ /* try upa-portid then cpuid to get cpuid, see prom_64.c */
+ if (is_cpu_node && (prom_getint(node, "upa-portid") == cpuid ||
+ prom_getint(node, "cpuid") == cpuid))
+ freq = prom_getintdefault(node, "clock-frequency", 0);
+ if (!freq)
+ freq = cpuid_to_freq(prom_getchild(node), cpuid);
+ if (!freq)
+ freq = cpuid_to_freq(prom_getsibling(node), cpuid);
+
+ return freq;
+}
+
+static unsigned long tick_get_frequency(void)
+{
+ return cpuid_to_freq(prom_root_node, hard_smp_processor_id());
+}
+
+static struct sparc64_tick_ops tick_operations __cacheline_aligned = {
.name = "tick",
.init_tick = tick_init_tick,
.disable_irq = tick_disable_irq,
.get_tick = tick_get_tick,
.add_tick = tick_add_tick,
.add_compare = tick_add_compare,
+ .get_frequency = tick_get_frequency,
.softint_mask = 1UL << 0,
};
@@ -250,6 +279,11 @@ static int stick_add_compare(unsigned long adj)
return ((long)(new_tick - (orig_tick+adj))) > 0L;
}
+static unsigned long stick_get_frequency(void)
+{
+ return prom_getintdefault(prom_root_node, "stick-frequency", 0);
+}
+
static struct sparc64_tick_ops stick_operations __read_mostly = {
.name = "stick",
.init_tick = stick_init_tick,
@@ -257,6 +291,7 @@ static struct sparc64_tick_ops stick_operations __read_mostly = {
.get_tick = stick_get_tick,
.add_tick = stick_add_tick,
.add_compare = stick_add_compare,
+ .get_frequency = stick_get_frequency,
.softint_mask = 1UL << 16,
};
@@ -277,9 +312,6 @@ static struct sparc64_tick_ops stick_operations __read_mostly = {
* 2) write high
* 3) write low
*/
-#define HBIRD_STICKCMP_ADDR 0x1fe0000f060UL
-#define HBIRD_STICK_ADDR 0x1fe0000f070UL
-
static unsigned long __hbird_read_stick(void)
{
unsigned long ret, tmp1, tmp2, tmp3;
@@ -381,6 +413,11 @@ static int hbtick_add_compare(unsigned long adj)
return ((long)(val2 - val)) > 0L;
}
+static unsigned long hbtick_get_frequency(void)
+{
+ return prom_getintdefault(prom_root_node, "stick-frequency", 0);
+}
+
static struct sparc64_tick_ops hbtick_operations __read_mostly = {
.name = "hbtick",
.init_tick = hbtick_init_tick,
@@ -388,11 +425,10 @@ static struct sparc64_tick_ops hbtick_operations __read_mostly = {
.get_tick = hbtick_get_tick,
.add_tick = hbtick_add_tick,
.add_compare = hbtick_add_compare,
+ .get_frequency = hbtick_get_frequency,
.softint_mask = 1UL << 0,
};
-static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
-
unsigned long cmos_regs;
EXPORT_SYMBOL(cmos_regs);
@@ -582,34 +618,17 @@ static int __init clock_init(void)
*/
fs_initcall(clock_init);
-/* This is gets the master TICK_INT timer going. */
-static unsigned long sparc64_init_timers(void)
+/* Return true if this is Hummingbird, aka Ultra-IIe */
+static bool is_hummingbird(void)
{
- struct device_node *dp;
- unsigned long freq;
+ unsigned long ver, manuf, impl;
- dp = of_find_node_by_path("/");
- if (tlb_type == spitfire) {
- unsigned long ver, manuf, impl;
-
- __asm__ __volatile__ ("rdpr %%ver, %0"
- : "=&r" (ver));
- manuf = ((ver >> 48) & 0xffff);
- impl = ((ver >> 32) & 0xffff);
- if (manuf == 0x17 && impl == 0x13) {
- /* Hummingbird, aka Ultra-IIe */
- tick_ops = &hbtick_operations;
- freq = of_getintprop_default(dp, "stick-frequency", 0);
- } else {
- tick_ops = &tick_operations;
- freq = local_cpu_data().clock_tick;
- }
- } else {
- tick_ops = &stick_operations;
- freq = of_getintprop_default(dp, "stick-frequency", 0);
- }
+ __asm__ __volatile__ ("rdpr %%ver, %0"
+ : "=&r" (ver));
+ manuf = ((ver >> 48) & 0xffff);
+ impl = ((ver >> 32) & 0xffff);
- return freq;
+ return (manuf == 0x17 && impl == 0x13);
}
struct freq_table {
@@ -671,12 +690,12 @@ core_initcall(register_sparc64_cpufreq_notifier);
static int sparc64_next_event(unsigned long delta,
struct clock_event_device *evt)
{
- return tick_ops->add_compare(delta) ? -ETIME : 0;
+ return tick_operations.add_compare(delta) ? -ETIME : 0;
}
static int sparc64_timer_shutdown(struct clock_event_device *evt)
{
- tick_ops->disable_irq();
+ tick_operations.disable_irq();
return 0;
}
@@ -693,7 +712,7 @@ static DEFINE_PER_CPU(struct clock_event_device, sparc64_events);
void __irq_entry timer_interrupt(int irq, struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- unsigned long tick_mask = tick_ops->softint_mask;
+ unsigned long tick_mask = tick_operations.softint_mask;
int cpu = smp_processor_id();
struct clock_event_device *evt = &per_cpu(sparc64_events, cpu);
@@ -728,7 +747,7 @@ void setup_sparc64_timer(void)
: "=r" (pstate)
: "i" (PSTATE_IE));
- tick_ops->init_tick();
+ tick_operations.init_tick();
/* Restore PSTATE_IE. */
__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
@@ -755,12 +774,10 @@ static unsigned long tb_ticks_per_usec __read_mostly;
void __delay(unsigned long loops)
{
- unsigned long bclock, now;
+ unsigned long bclock = get_tick();
- bclock = tick_ops->get_tick();
- do {
- now = tick_ops->get_tick();
- } while ((now-bclock) < loops);
+ while ((get_tick() - bclock) < loops)
+ ;
}
EXPORT_SYMBOL(__delay);
@@ -772,26 +789,71 @@ EXPORT_SYMBOL(udelay);
static u64 clocksource_tick_read(struct clocksource *cs)
{
- return tick_ops->get_tick();
+ return get_tick();
+}
+
+static void __init get_tick_patch(void)
+{
+ unsigned int *addr, *instr, i;
+ struct get_tick_patch *p;
+
+ if (tlb_type == spitfire && is_hummingbird())
+ return;
+
+ for (p = &__get_tick_patch; p < &__get_tick_patch_end; p++) {
+ instr = (tlb_type == spitfire) ? p->tick : p->stick;
+ addr = (unsigned int *)(unsigned long)p->addr;
+ for (i = 0; i < GET_TICK_NINSTR; i++) {
+ addr[i] = instr[i];
+ /* ensure that address is modified before flush */
+ wmb();
+ flushi(&addr[i]);
+ }
+ }
+}
+
+static void init_tick_ops(struct sparc64_tick_ops *ops)
+{
+ unsigned long freq, quotient, tick;
+
+ freq = ops->get_frequency();
+ quotient = clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT);
+ tick = ops->get_tick();
+
+ ops->offset = (tick * quotient) >> SPARC64_NSEC_PER_CYC_SHIFT;
+ ops->ticks_per_nsec_quotient = quotient;
+ ops->frequency = freq;
+ tick_operations = *ops;
+ get_tick_patch();
+}
+
+void __init time_init_early(void)
+{
+ if (tlb_type == spitfire) {
+ if (is_hummingbird())
+ init_tick_ops(&hbtick_operations);
+ else
+ init_tick_ops(&tick_operations);
+ } else {
+ init_tick_ops(&stick_operations);
+ }
}
void __init time_init(void)
{
- unsigned long freq = sparc64_init_timers();
+ unsigned long freq;
+ freq = tick_operations.frequency;
tb_ticks_per_usec = freq / USEC_PER_SEC;
- timer_ticks_per_nsec_quotient =
- clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT);
-
- clocksource_tick.name = tick_ops->name;
+ clocksource_tick.name = tick_operations.name;
clocksource_tick.read = clocksource_tick_read;
clocksource_register_hz(&clocksource_tick, freq);
printk("clocksource: mult[%x] shift[%d]\n",
clocksource_tick.mult, clocksource_tick.shift);
- sparc64_clockevent.name = tick_ops->name;
+ sparc64_clockevent.name = tick_operations.name;
clockevents_calc_mult_shift(&sparc64_clockevent, freq, 4);
sparc64_clockevent.max_delta_ns =
@@ -809,14 +871,21 @@ void __init time_init(void)
unsigned long long sched_clock(void)
{
- unsigned long ticks = tick_ops->get_tick();
+ unsigned long quotient = tick_operations.ticks_per_nsec_quotient;
+ unsigned long offset = tick_operations.offset;
+
+ /* Use barrier so the compiler emits the loads first and overlaps load
+ * latency with reading tick, because reading %tick/%stick is a
+ * post-sync instruction that will flush and restart subsequent
+ * instructions after it commits.
+ */
+ barrier();
- return (ticks * timer_ticks_per_nsec_quotient)
- >> SPARC64_NSEC_PER_CYC_SHIFT;
+ return ((get_tick() * quotient) >> SPARC64_NSEC_PER_CYC_SHIFT) - offset;
}
int read_current_timer(unsigned long *timer_val)
{
- *timer_val = tick_ops->get_tick();
+ *timer_val = get_tick();
return 0;
}
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index 7dd9b57f3a61..1c8763c9c52b 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -70,15 +70,26 @@ static int vio_device_probe(struct device *dev)
struct vio_dev *vdev = to_vio_dev(dev);
struct vio_driver *drv = to_vio_driver(dev->driver);
const struct vio_device_id *id;
- int error = -ENODEV;
- if (drv->probe) {
- id = vio_match_device(drv->id_table, vdev);
- if (id)
- error = drv->probe(vdev, id);
+ if (!drv->probe)
+ return -ENODEV;
+
+ id = vio_match_device(drv->id_table, vdev);
+ if (!id)
+ return -ENODEV;
+
+ /* alloc irqs (unless the driver specified not to) */
+ if (!drv->no_irq) {
+ if (vdev->tx_irq == 0 && vdev->tx_ino != ~0UL)
+ vdev->tx_irq = sun4v_build_virq(vdev->cdev_handle,
+ vdev->tx_ino);
+
+ if (vdev->rx_irq == 0 && vdev->rx_ino != ~0UL)
+ vdev->rx_irq = sun4v_build_virq(vdev->cdev_handle,
+ vdev->rx_ino);
}
- return error;
+ return drv->probe(vdev, id);
}
static int vio_device_remove(struct device *dev)
@@ -86,8 +97,15 @@ static int vio_device_remove(struct device *dev)
struct vio_dev *vdev = to_vio_dev(dev);
struct vio_driver *drv = to_vio_driver(dev->driver);
- if (drv->remove)
+ if (drv->remove) {
+ /*
+ * Ideally, we would remove/deallocate tx/rx virqs
+ * here - however, there are currently no support
+ * routines to do so at the moment. TBD
+ */
+
return drv->remove(vdev);
+ }
return 1;
}
@@ -185,11 +203,58 @@ static struct device_node *cdev_node;
static struct vio_dev *root_vdev;
static u64 cdev_cfg_handle;
+static const u64 *vio_cfg_handle(struct mdesc_handle *hp, u64 node)
+{
+ const u64 *cfg_handle = NULL;
+ u64 a;
+
+ mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
+ u64 target;
+
+ target = mdesc_arc_target(hp, a);
+ cfg_handle = mdesc_get_property(hp, target,
+ "cfg-handle", NULL);
+ if (cfg_handle)
+ break;
+ }
+
+ return cfg_handle;
+}
+
+/**
+ * vio_vdev_node() - Find VDEV node in MD
+ * @hp: Handle to the MD
+ * @vdev: Pointer to VDEV
+ *
+ * Find the node in the current MD which matches the given vio_dev. This
+ * must be done dynamically since the node value can change if the MD
+ * is updated.
+ *
+ * NOTE: the MD must be locked, using mdesc_grab(), when calling this routine
+ *
+ * Return: The VDEV node in MDESC
+ */
+u64 vio_vdev_node(struct mdesc_handle *hp, struct vio_dev *vdev)
+{
+ u64 node;
+
+ if (vdev == NULL)
+ return MDESC_NODE_NULL;
+
+ node = mdesc_get_node(hp, (const char *)vdev->node_name,
+ &vdev->md_node_info);
+
+ return node;
+}
+
static void vio_fill_channel_info(struct mdesc_handle *hp, u64 mp,
struct vio_dev *vdev)
{
u64 a;
+ vdev->tx_ino = ~0UL;
+ vdev->rx_ino = ~0UL;
+ vdev->channel_id = ~0UL;
mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
const u64 *chan_id;
const u64 *irq;
@@ -199,18 +264,18 @@ static void vio_fill_channel_info(struct mdesc_handle *hp, u64 mp,
irq = mdesc_get_property(hp, target, "tx-ino", NULL);
if (irq)
- vdev->tx_irq = sun4v_build_virq(cdev_cfg_handle, *irq);
+ vdev->tx_ino = *irq;
irq = mdesc_get_property(hp, target, "rx-ino", NULL);
- if (irq) {
- vdev->rx_irq = sun4v_build_virq(cdev_cfg_handle, *irq);
+ if (irq)
vdev->rx_ino = *irq;
- }
chan_id = mdesc_get_property(hp, target, "id", NULL);
if (chan_id)
vdev->channel_id = *chan_id;
}
+
+ vdev->cdev_handle = cdev_cfg_handle;
}
int vio_set_intr(unsigned long dev_ino, int state)
@@ -223,14 +288,14 @@ int vio_set_intr(unsigned long dev_ino, int state)
EXPORT_SYMBOL(vio_set_intr);
static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
+ const char *node_name,
struct device *parent)
{
- const char *type, *compat, *bus_id_name;
+ const char *type, *compat;
struct device_node *dp;
struct vio_dev *vdev;
int err, tlen, clen;
const u64 *id, *cfg_handle;
- u64 a;
type = mdesc_get_property(hp, mp, "device-type", &tlen);
if (!type) {
@@ -240,7 +305,7 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
tlen = strlen(type) + 1;
}
}
- if (tlen > VIO_MAX_TYPE_LEN) {
+ if (tlen > VIO_MAX_TYPE_LEN || strlen(type) >= VIO_MAX_TYPE_LEN) {
printk(KERN_ERR "VIO: Type string [%s] is too long.\n",
type);
return NULL;
@@ -248,31 +313,7 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
id = mdesc_get_property(hp, mp, "id", NULL);
- cfg_handle = NULL;
- mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) {
- u64 target;
-
- target = mdesc_arc_target(hp, a);
- cfg_handle = mdesc_get_property(hp, target,
- "cfg-handle", NULL);
- if (cfg_handle)
- break;
- }
-
- bus_id_name = type;
- if (!strcmp(type, "domain-services-port"))
- bus_id_name = "ds";
-
- /*
- * 20 char is the old driver-core name size limit, which is no more.
- * This check can probably be removed after review and possible
- * adaption of the vio users name length handling.
- */
- if (strlen(bus_id_name) >= 20 - 4) {
- printk(KERN_ERR "VIO: bus_id_name [%s] is too long.\n",
- bus_id_name);
- return NULL;
- }
+ cfg_handle = vio_cfg_handle(hp, mp);
compat = mdesc_get_property(hp, mp, "device-type", &clen);
if (!compat) {
@@ -297,25 +338,23 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
memset(vdev->compat, 0, sizeof(vdev->compat));
vdev->compat_len = clen;
- vdev->channel_id = ~0UL;
- vdev->tx_irq = ~0;
- vdev->rx_irq = ~0;
+ vdev->port_id = ~0UL;
+ vdev->tx_irq = 0;
+ vdev->rx_irq = 0;
vio_fill_channel_info(hp, mp, vdev);
if (!id) {
- dev_set_name(&vdev->dev, "%s", bus_id_name);
+ dev_set_name(&vdev->dev, "%s", type);
vdev->dev_no = ~(u64)0;
- vdev->id = ~(u64)0;
} else if (!cfg_handle) {
- dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id);
+ dev_set_name(&vdev->dev, "%s-%llu", type, *id);
vdev->dev_no = *id;
- vdev->id = ~(u64)0;
} else {
- dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name,
+ dev_set_name(&vdev->dev, "%s-%llu-%llu", type,
*cfg_handle, *id);
vdev->dev_no = *cfg_handle;
- vdev->id = *id;
+ vdev->port_id = *id;
}
vdev->dev.parent = parent;
@@ -337,7 +376,26 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
}
vdev->dp = dp;
- printk(KERN_INFO "VIO: Adding device %s\n", dev_name(&vdev->dev));
+ /*
+ * node_name is NULL for the parent/channel-devices node and
+ * the parent doesn't require the MD node info.
+ */
+ if (node_name != NULL) {
+ (void) snprintf(vdev->node_name, VIO_MAX_NAME_LEN, "%s",
+ node_name);
+
+ err = mdesc_get_node_info(hp, mp, node_name,
+ &vdev->md_node_info);
+ if (err) {
+ pr_err("VIO: Could not get MD node info %s, err=%d\n",
+ dev_name(&vdev->dev), err);
+ kfree(vdev);
+ return NULL;
+ }
+ }
+
+ pr_info("VIO: Adding device %s (tx_ino = %llx, rx_ino = %llx)\n",
+ dev_name(&vdev->dev), vdev->tx_ino, vdev->rx_ino);
err = device_register(&vdev->dev);
if (err) {
@@ -353,73 +411,42 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
return vdev;
}
-static void vio_add(struct mdesc_handle *hp, u64 node)
+static void vio_add(struct mdesc_handle *hp, u64 node,
+ const char *node_name)
{
- (void) vio_create_one(hp, node, &root_vdev->dev);
+ (void) vio_create_one(hp, node, node_name, &root_vdev->dev);
}
-struct vio_md_node_query {
- const char *type;
- u64 dev_no;
- u64 id;
+struct vio_remove_node_data {
+ struct mdesc_handle *hp;
+ u64 node;
};
static int vio_md_node_match(struct device *dev, void *arg)
{
- struct vio_md_node_query *query = (struct vio_md_node_query *) arg;
struct vio_dev *vdev = to_vio_dev(dev);
+ struct vio_remove_node_data *node_data;
+ u64 node;
- if (vdev->dev_no != query->dev_no)
- return 0;
- if (vdev->id != query->id)
- return 0;
- if (strcmp(vdev->type, query->type))
- return 0;
+ node_data = (struct vio_remove_node_data *)arg;
- return 1;
+ node = vio_vdev_node(node_data->hp, vdev);
+
+ if (node == node_data->node)
+ return 1;
+ else
+ return 0;
}
-static void vio_remove(struct mdesc_handle *hp, u64 node)
+static void vio_remove(struct mdesc_handle *hp, u64 node, const char *node_name)
{
- const char *type;
- const u64 *id, *cfg_handle;
- u64 a;
- struct vio_md_node_query query;
+ struct vio_remove_node_data node_data;
struct device *dev;
- type = mdesc_get_property(hp, node, "device-type", NULL);
- if (!type) {
- type = mdesc_get_property(hp, node, "name", NULL);
- if (!type)
- type = mdesc_node_name(hp, node);
- }
-
- query.type = type;
-
- id = mdesc_get_property(hp, node, "id", NULL);
- cfg_handle = NULL;
- mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
- u64 target;
-
- target = mdesc_arc_target(hp, a);
- cfg_handle = mdesc_get_property(hp, target,
- "cfg-handle", NULL);
- if (cfg_handle)
- break;
- }
-
- if (!id) {
- query.dev_no = ~(u64)0;
- query.id = ~(u64)0;
- } else if (!cfg_handle) {
- query.dev_no = *id;
- query.id = ~(u64)0;
- } else {
- query.dev_no = *cfg_handle;
- query.id = *id;
- }
+ node_data.hp = hp;
+ node_data.node = node;
- dev = device_find_child(&root_vdev->dev, &query,
+ dev = device_find_child(&root_vdev->dev, (void *)&node_data,
vio_md_node_match);
if (dev) {
printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
@@ -427,15 +454,7 @@ static void vio_remove(struct mdesc_handle *hp, u64 node)
device_unregister(dev);
put_device(dev);
} else {
- if (!id)
- printk(KERN_ERR "VIO: Removed unknown %s node.\n",
- type);
- else if (!cfg_handle)
- printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n",
- type, *id);
- else
- printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n",
- type, *cfg_handle, *id);
+ pr_err("VIO: %s node not found in MDESC\n", node_name);
}
}
@@ -450,7 +469,8 @@ static struct mdesc_notifier_client vio_device_notifier = {
* under "openboot" that we should not mess with as aparently that is
* reserved exclusively for OBP use.
*/
-static void vio_add_ds(struct mdesc_handle *hp, u64 node)
+static void vio_add_ds(struct mdesc_handle *hp, u64 node,
+ const char *node_name)
{
int found;
u64 a;
@@ -467,7 +487,7 @@ static void vio_add_ds(struct mdesc_handle *hp, u64 node)
}
if (found)
- (void) vio_create_one(hp, node, &root_vdev->dev);
+ (void) vio_create_one(hp, node, node_name, &root_vdev->dev);
}
static struct mdesc_notifier_client vio_ds_notifier = {
@@ -534,7 +554,7 @@ static int __init vio_init(void)
cdev_cfg_handle = *cfg_handle;
- root_vdev = vio_create_one(hp, root, NULL);
+ root_vdev = vio_create_one(hp, root, NULL, NULL);
err = -ENODEV;
if (!root_vdev) {
printk(KERN_ERR "VIO: Could not create root device.\n");
diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c
index b30b30ab3ddd..d4f13c037a40 100644
--- a/arch/sparc/kernel/viohs.c
+++ b/arch/sparc/kernel/viohs.c
@@ -223,6 +223,9 @@ static int send_rdx(struct vio_driver_state *vio)
static int send_attr(struct vio_driver_state *vio)
{
+ if (!vio->ops)
+ return -EINVAL;
+
return vio->ops->send_attr(vio);
}
@@ -283,6 +286,7 @@ static int process_ver_info(struct vio_driver_state *vio,
ver.minor = vap->minor;
pkt->minor = ver.minor;
pkt->tag.stype = VIO_SUBTYPE_ACK;
+ pkt->dev_class = vio->dev_class;
viodbg(HS, "SEND VERSION ACK maj[%u] min[%u]\n",
pkt->major, pkt->minor);
err = send_ctrl(vio, &pkt->tag, sizeof(*pkt));
@@ -374,6 +378,9 @@ static int process_attr(struct vio_driver_state *vio, void *pkt)
if (!(vio->hs_state & VIO_HS_GOTVERS))
return handshake_failure(vio);
+ if (!vio->ops)
+ return 0;
+
err = vio->ops->handle_attr(vio, pkt);
if (err < 0) {
return handshake_failure(vio);
@@ -388,6 +395,7 @@ static int process_attr(struct vio_driver_state *vio, void *pkt)
vio->hs_state |= VIO_HS_SENT_DREG;
}
}
+
return 0;
}
@@ -647,10 +655,13 @@ int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt)
err = process_unknown(vio, pkt);
break;
}
+
if (!err &&
vio->hs_state != prev_state &&
- (vio->hs_state & VIO_HS_COMPLETE))
- vio->ops->handshake_complete(vio);
+ (vio->hs_state & VIO_HS_COMPLETE)) {
+ if (vio->ops)
+ vio->ops->handshake_complete(vio);
+ }
return err;
}
@@ -765,7 +776,11 @@ void vio_port_up(struct vio_driver_state *vio)
}
if (!err) {
- err = ldc_connect(vio->lp);
+ if (ldc_mode(vio->lp) == LDC_MODE_RAW)
+ ldc_set_state(vio->lp, LDC_STATE_CONNECTED);
+ else
+ err = ldc_connect(vio->lp);
+
if (err)
printk(KERN_WARNING "%s: Port %lu connect failed, "
"err=%d\n",
@@ -805,8 +820,7 @@ int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
return -EINVAL;
}
- if (!ops->send_attr ||
- !ops->handle_attr ||
+ if (!ops || !ops->send_attr || !ops->handle_attr ||
!ops->handshake_complete)
return -EINVAL;
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 572db686f845..03b3d65d1266 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -149,6 +149,11 @@ SECTIONS
*(.sun_m7_2insn_patch)
__sun_m7_2insn_patch_end = .;
}
+ .get_tick_patch : {
+ __get_tick_patch = .;
+ *(.get_tick_patch)
+ __get_tick_patch_end = .;
+ }
PERCPU_SECTION(SMP_CACHE_BYTES)
#ifdef CONFIG_JUMP_LABEL
diff --git a/arch/sparc/lib/hweight.S b/arch/sparc/lib/hweight.S
index f9985f129fb6..d21cf20e5c1e 100644
--- a/arch/sparc/lib/hweight.S
+++ b/arch/sparc/lib/hweight.S
@@ -4,9 +4,9 @@
.text
.align 32
ENTRY(__arch_hweight8)
- ba,pt %xcc, __sw_hweight8
+ sethi %hi(__sw_hweight8), %g1
+ jmpl %g1 + %lo(__sw_hweight8), %g0
nop
- nop
ENDPROC(__arch_hweight8)
EXPORT_SYMBOL(__arch_hweight8)
.section .popc_3insn_patch, "ax"
@@ -17,9 +17,9 @@ EXPORT_SYMBOL(__arch_hweight8)
.previous
ENTRY(__arch_hweight16)
- ba,pt %xcc, __sw_hweight16
+ sethi %hi(__sw_hweight16), %g1
+ jmpl %g1 + %lo(__sw_hweight16), %g0
nop
- nop
ENDPROC(__arch_hweight16)
EXPORT_SYMBOL(__arch_hweight16)
.section .popc_3insn_patch, "ax"
@@ -30,9 +30,9 @@ EXPORT_SYMBOL(__arch_hweight16)
.previous
ENTRY(__arch_hweight32)
- ba,pt %xcc, __sw_hweight32
+ sethi %hi(__sw_hweight32), %g1
+ jmpl %g1 + %lo(__sw_hweight32), %g0
nop
- nop
ENDPROC(__arch_hweight32)
EXPORT_SYMBOL(__arch_hweight32)
.section .popc_3insn_patch, "ax"
@@ -43,9 +43,9 @@ EXPORT_SYMBOL(__arch_hweight32)
.previous
ENTRY(__arch_hweight64)
- ba,pt %xcc, __sw_hweight64
+ sethi %hi(__sw_hweight16), %g1
+ jmpl %g1 + %lo(__sw_hweight16), %g0
nop
- nop
ENDPROC(__arch_hweight64)
EXPORT_SYMBOL(__arch_hweight64)
.section .popc_3insn_patch, "ax"
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 88855e383b34..28ee8d8ffa07 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -277,7 +277,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
return pte;
}
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+pte_t *huge_pte_offset(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
pud_t *pud;
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index a803f6bb4d92..d0c79c1c54b4 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -327,7 +327,6 @@ extern unsigned long raw_copy_in_user(
extern long strnlen_user(const char __user *str, long n);
-extern long strlen_user(const char __user *str);
extern long strncpy_from_user(char *dst, const char __user *src, long);
/**
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
index 569bb6dd154a..f2abedc8a080 100644
--- a/arch/tile/kernel/pci-dma.c
+++ b/arch/tile/kernel/pci-dma.c
@@ -317,18 +317,6 @@ static void tile_dma_sync_sg_for_device(struct device *dev,
}
}
-static inline int
-tile_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
-
-static inline int
-tile_dma_supported(struct device *dev, u64 mask)
-{
- return 1;
-}
-
static const struct dma_map_ops tile_default_dma_map_ops = {
.alloc = tile_dma_alloc_coherent,
.free = tile_dma_free_coherent,
@@ -340,8 +328,6 @@ static const struct dma_map_ops tile_default_dma_map_ops = {
.sync_single_for_device = tile_dma_sync_single_for_device,
.sync_sg_for_cpu = tile_dma_sync_sg_for_cpu,
.sync_sg_for_device = tile_dma_sync_sg_for_device,
- .mapping_error = tile_dma_mapping_error,
- .dma_supported = tile_dma_supported
};
const struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops;
@@ -504,18 +490,6 @@ static void tile_pci_dma_sync_sg_for_device(struct device *dev,
}
}
-static inline int
-tile_pci_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
-
-static inline int
-tile_pci_dma_supported(struct device *dev, u64 mask)
-{
- return 1;
-}
-
static const struct dma_map_ops tile_pci_default_dma_map_ops = {
.alloc = tile_pci_dma_alloc_coherent,
.free = tile_pci_dma_free_coherent,
@@ -527,8 +501,6 @@ static const struct dma_map_ops tile_pci_default_dma_map_ops = {
.sync_single_for_device = tile_pci_dma_sync_single_for_device,
.sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu,
.sync_sg_for_device = tile_pci_dma_sync_sg_for_device,
- .mapping_error = tile_pci_dma_mapping_error,
- .dma_supported = tile_pci_dma_supported
};
const struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops;
@@ -578,8 +550,6 @@ static const struct dma_map_ops pci_hybrid_dma_ops = {
.sync_single_for_device = tile_pci_dma_sync_single_for_device,
.sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu,
.sync_sg_for_device = tile_pci_dma_sync_sg_for_device,
- .mapping_error = tile_pci_dma_mapping_error,
- .dma_supported = tile_pci_dma_supported
};
const struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops;
diff --git a/arch/tile/kernel/vdso/Makefile b/arch/tile/kernel/vdso/Makefile
index c54fff37b5ff..71d5f5d71f79 100644
--- a/arch/tile/kernel/vdso/Makefile
+++ b/arch/tile/kernel/vdso/Makefile
@@ -5,15 +5,14 @@ vdso-syms = rt_sigreturn gettimeofday
obj-vdso = $(patsubst %, v%.o, $(vdso-syms))
# Build rules
-targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds
+targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o
obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
# vdso32 is only for tilegx -m32 compat task.
VDSO32-$(CONFIG_COMPAT) := y
-obj-y += vdso.o
+obj-y += vdso.o vdso-syms.o
obj-$(VDSO32-y) += vdso32.o
-extra-y += vdso.lds
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
@@ -42,23 +41,22 @@ $(obj)/vdso.o: $(obj)/vdso.so
# link rule for the .so file, .lds has to be first
SYSCFLAGS_vdso.so.dbg = $(c_flags)
-$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso)
+$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,vdsold)
-
# We also create a special relocatable object that should mirror the symbol
# table and layout of the linked DSO. With ld -R we can then refer to
# these symbols in the kernel code rather than hand-coded addresses.
-extra-y += vdso-syms.o
-$(obj)/built-in.o: $(obj)/vdso-syms.o
-$(obj)/built-in.o: ld_flags += -R $(obj)/vdso-syms.o
SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
$(call cc-ldoption, -Wl$(comma)--hash-style=both)
-SYSCFLAGS_vdso_syms.o = -r
-$(obj)/vdso-syms.o: $(src)/vdso.lds $(obj)/vrt_sigreturn.o FORCE
+SYSCFLAGS_vdso_dummy.o = -r
+$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/vrt_sigreturn.o FORCE
$(call if_changed,vdsold)
+LDFLAGS_vdso-syms.o := -r -R
+$(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE
+ $(call if_changed,ld)
# strip rule for the .so file
$(obj)/%.so: OBJCOPYFLAGS := -S
@@ -96,17 +94,17 @@ KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
KBUILD_CFLAGS_32 += -m32 -fPIC -shared
obj-vdso32 = $(patsubst %, v%32.o, $(vdso-syms))
-obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
targets += $(obj-vdso32) vdso32.so vdso32.so.dbg
+obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
$(obj-vdso32:%=%): KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)
$(obj-vdso32:%=%): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
-$(obj)/vgettimeofday32.o: $(obj)/vgettimeofday.c
+$(obj)/vgettimeofday32.o: $(obj)/vgettimeofday.c FORCE
$(call if_changed_rule,cc_o_c)
-$(obj)/vrt_sigreturn32.o: $(obj)/vrt_sigreturn.S
+$(obj)/vrt_sigreturn32.o: $(obj)/vrt_sigreturn.S FORCE
$(call if_changed,as_o_S)
# Force dependency
@@ -114,5 +112,5 @@ $(obj)/vdso32.o: $(obj)/vdso32.so
SYSCFLAGS_vdso32.so.dbg = -m32 -shared -s -Wl,-soname=linux-vdso32.so.1 \
$(call cc-ldoption, -Wl$(comma)--hash-style=both)
-$(obj)/vdso32.so.dbg: $(src)/vdso.lds $(obj-vdso32)
+$(obj)/vdso32.so.dbg: $(src)/vdso.lds $(obj-vdso32) FORCE
$(call if_changed,vdsold)
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index 03e5cc4e76e4..0986d426a413 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -102,7 +102,8 @@ static pte_t *get_pte(pte_t *base, int index, int level)
return ptep;
}
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+pte_t *huge_pte_offset(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
pud_t *pud;
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 492a7361e58e..ec5576fd3a86 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -503,6 +503,17 @@ void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
}
EXPORT_SYMBOL(ioremap_prot);
+#if !defined(CONFIG_PCI) || !defined(CONFIG_TILEGX)
+/* ioremap is conditionally declared in pci_gx.c */
+
+void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
+{
+ return NULL;
+}
+EXPORT_SYMBOL(ioremap);
+
+#endif
+
/* Unmap an MMIO VA mapping. */
void iounmap(volatile void __iomem *addr_in)
{
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index a9bd61820042..2c7f721eccbc 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -255,11 +255,6 @@ int clear_user_proc(void __user *buf, int size)
return clear_user(buf, size);
}
-int strlen_user_proc(char __user *str)
-{
- return strlen_user(str);
-}
-
int cpu(void)
{
return current_thread_info()->cpu;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e767ed24aeb4..94a18681353d 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -22,7 +22,7 @@ config X86_64
def_bool y
depends on 64BIT
# Options that are inherently 64-bit kernel only:
- select ARCH_HAS_GIGANTIC_PAGE
+ select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
select ARCH_SUPPORTS_INT128
select ARCH_USE_CMPXCHG_LOCKREF
select HAVE_ARCH_SOFT_DIRTY
@@ -54,11 +54,13 @@ config X86
select ARCH_HAS_KCOV if X86_64
select ARCH_HAS_MMIO_FLUSH
select ARCH_HAS_PMEM_API if X86_64
+ select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SG_CHAIN
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX
select ARCH_HAS_UBSAN_SANITIZE_ALL
+ select ARCH_HAS_ZONE_DEVICE if X86_64
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select ARCH_MIGHT_HAVE_PC_PARPORT
@@ -72,6 +74,7 @@ config X86
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
+ select ARCH_WANTS_THP_SWAP if X86_64
select BUILDTIME_EXTABLE_SORT
select CLKEVT_I8253
select CLOCKSOURCE_VALIDATE_LAST_CYCLE
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index ad2db82e9953..1e902f926be3 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -11,6 +11,14 @@ else
KBUILD_DEFCONFIG := $(ARCH)_defconfig
endif
+# For gcc stack alignment is specified with -mpreferred-stack-boundary,
+# clang has the option -mstack-alignment for that purpose.
+ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
+ cc_stack_align_opt := -mpreferred-stack-boundary
+else ifneq ($(call cc-option, -mstack-alignment=4),)
+ cc_stack_align_opt := -mstack-alignment
+endif
+
# How to compile the 16-bit code. Note we always compile for -march=i386;
# that way we can complain to the user if the CPU is insufficient.
#
@@ -24,10 +32,11 @@ REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -D__KERNEL__ \
-DDISABLE_BRANCH_PROFILING \
-Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \
- -mno-mmx -mno-sse \
- $(call cc-option, -ffreestanding) \
- $(call cc-option, -fno-stack-protector) \
- $(call cc-option, -mpreferred-stack-boundary=2)
+ -mno-mmx -mno-sse
+
+REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding)
+REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector)
+REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align_opt)=2)
export REALMODE_CFLAGS
# BITS is used as extension for files which are available in a 32 bit
@@ -64,8 +73,10 @@ ifeq ($(CONFIG_X86_32),y)
# with nonstandard options
KBUILD_CFLAGS += -fno-pic
- # prevent gcc from keeping the stack 16 byte aligned
- KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2)
+ # Align the stack to the register width instead of using the default
+ # alignment of 16 bytes. This reduces stack usage and the number of
+ # alignment instructions.
+ KBUILD_CFLAGS += $(call cc-option,$(cc_stack_align_opt)=2)
# Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
# a lot more stack due to the lack of sharing of stacklots:
@@ -97,8 +108,14 @@ else
KBUILD_CFLAGS += $(call cc-option,-mno-80387)
KBUILD_CFLAGS += $(call cc-option,-mno-fp-ret-in-387)
- # Use -mpreferred-stack-boundary=3 if supported.
- KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
+ # By default gcc and clang use a stack alignment of 16 bytes for x86.
+ # However the standard kernel entry on x86-64 leaves the stack on an
+ # 8-byte boundary. If the compiler isn't informed about the actual
+ # alignment it will generate extra alignment instructions for the
+ # default alignment which keep the stack *mis*aligned.
+ # Furthermore an alignment to the register width reduces stack usage
+ # and the number of alignment instructions.
+ KBUILD_CFLAGS += $(call cc-option,$(cc_stack_align_opt)=3)
# Use -mskip-rax-setup if supported.
KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
index a45eb15b7cf2..f3717d36718a 100644
--- a/arch/x86/Makefile_32.cpu
+++ b/arch/x86/Makefile_32.cpu
@@ -9,7 +9,6 @@ else
tune = $(call cc-option,-mcpu=$(1),$(2))
endif
-align := $(cc-option-align)
cflags-$(CONFIG_M486) += -march=i486
cflags-$(CONFIG_M586) += -march=i586
cflags-$(CONFIG_M586TSC) += -march=i586
@@ -24,11 +23,11 @@ cflags-$(CONFIG_MK6) += -march=k6
# They make zero difference whatsosever to performance at this time.
cflags-$(CONFIG_MK7) += -march=athlon
cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon)
-cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
-cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
+cflags-$(CONFIG_MCRUSOE) += -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0
+cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0
cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
cflags-$(CONFIG_MWINCHIP3D) += $(call cc-option,-march=winchip2,-march=i586)
-cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
+cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) -falign-functions=0 -falign-jumps=0 -falign-loops=0
cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
cflags-$(CONFIG_MVIAC7) += -march=i686
cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2)
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 139ad7726e10..726355ce8497 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -78,9 +78,6 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
if (image->size != new_size)
return -EINVAL;
- if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
- return -EFAULT;
-
vdso_fix_landing(image, new_vma);
current->mm->context.vdso = (void __user *)new_vma->vm_start;
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index 24118c0b4640..5343c19814b3 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -116,7 +116,6 @@ struct compat_statfs {
int f_spare[4];
};
-#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
#define COMPAT_RLIM_INFINITY 0xffffffff
typedef u32 compat_old_sigset_t; /* at least 32 bits */
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 08a0838b83fb..398c79889f5c 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -19,8 +19,6 @@
# define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
#endif
-#define DMA_ERROR_CODE 0
-
extern int iommu_merge;
extern struct device x86_dma_fallback_dev;
extern int panic_on_overflow;
@@ -35,9 +33,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
#define arch_dma_alloc_attrs arch_dma_alloc_attrs
-#define HAVE_ARCH_DMA_SUPPORTED 1
-extern int dma_supported(struct device *hwdev, u64 mask);
-
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
unsigned long attrs);
diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
index 3a106165e03a..535af0f2d8ac 100644
--- a/arch/x86/include/asm/hugetlb.h
+++ b/arch/x86/include/asm/hugetlb.h
@@ -85,4 +85,8 @@ static inline void arch_clear_hugepage_flags(struct page *page)
{
}
+#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
+static inline bool gigantic_page_supported(void) { return true; }
+#endif
+
#endif /* _ASM_X86_HUGETLB_H */
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index 793869879464..fca144a104e4 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -6,6 +6,8 @@ extern int force_iommu, no_iommu;
extern int iommu_detected;
extern int iommu_pass_through;
+int x86_dma_supported(struct device *dev, u64 mask);
+
/* 10 seconds */
#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 695605eb1dfb..1588e9e3dc01 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -48,28 +48,31 @@
#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS
/* x86-specific vcpu->requests bit members */
-#define KVM_REQ_MIGRATE_TIMER 8
-#define KVM_REQ_REPORT_TPR_ACCESS 9
-#define KVM_REQ_TRIPLE_FAULT 10
-#define KVM_REQ_MMU_SYNC 11
-#define KVM_REQ_CLOCK_UPDATE 12
-#define KVM_REQ_EVENT 14
-#define KVM_REQ_APF_HALT 15
-#define KVM_REQ_STEAL_UPDATE 16
-#define KVM_REQ_NMI 17
-#define KVM_REQ_PMU 18
-#define KVM_REQ_PMI 19
-#define KVM_REQ_SMI 20
-#define KVM_REQ_MASTERCLOCK_UPDATE 21
-#define KVM_REQ_MCLOCK_INPROGRESS (22 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_SCAN_IOAPIC (23 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_GLOBAL_CLOCK_UPDATE 24
-#define KVM_REQ_APIC_PAGE_RELOAD (25 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_HV_CRASH 26
-#define KVM_REQ_IOAPIC_EOI_EXIT 27
-#define KVM_REQ_HV_RESET 28
-#define KVM_REQ_HV_EXIT 29
-#define KVM_REQ_HV_STIMER 30
+#define KVM_REQ_MIGRATE_TIMER KVM_ARCH_REQ(0)
+#define KVM_REQ_REPORT_TPR_ACCESS KVM_ARCH_REQ(1)
+#define KVM_REQ_TRIPLE_FAULT KVM_ARCH_REQ(2)
+#define KVM_REQ_MMU_SYNC KVM_ARCH_REQ(3)
+#define KVM_REQ_CLOCK_UPDATE KVM_ARCH_REQ(4)
+#define KVM_REQ_EVENT KVM_ARCH_REQ(6)
+#define KVM_REQ_APF_HALT KVM_ARCH_REQ(7)
+#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(8)
+#define KVM_REQ_NMI KVM_ARCH_REQ(9)
+#define KVM_REQ_PMU KVM_ARCH_REQ(10)
+#define KVM_REQ_PMI KVM_ARCH_REQ(11)
+#define KVM_REQ_SMI KVM_ARCH_REQ(12)
+#define KVM_REQ_MASTERCLOCK_UPDATE KVM_ARCH_REQ(13)
+#define KVM_REQ_MCLOCK_INPROGRESS \
+ KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_SCAN_IOAPIC \
+ KVM_ARCH_REQ_FLAGS(15, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_GLOBAL_CLOCK_UPDATE KVM_ARCH_REQ(16)
+#define KVM_REQ_APIC_PAGE_RELOAD \
+ KVM_ARCH_REQ_FLAGS(17, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_HV_CRASH KVM_ARCH_REQ(18)
+#define KVM_REQ_IOAPIC_EOI_EXIT KVM_ARCH_REQ(19)
+#define KVM_REQ_HV_RESET KVM_ARCH_REQ(20)
+#define KVM_REQ_HV_EXIT KVM_ARCH_REQ(21)
+#define KVM_REQ_HV_STIMER KVM_ARCH_REQ(22)
#define CR0_RESERVED_BITS \
(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
@@ -254,7 +257,8 @@ union kvm_mmu_page_role {
unsigned cr0_wp:1;
unsigned smep_andnot_wp:1;
unsigned smap_andnot_wp:1;
- unsigned :8;
+ unsigned ad_disabled:1;
+ unsigned :7;
/*
* This is left at the top of the word so that
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index d406894cd9a2..5573c75f8e4c 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -426,6 +426,8 @@
#define MSR_IA32_TSC_ADJUST 0x0000003b
#define MSR_IA32_BNDCFGS 0x00000d90
+#define MSR_IA32_BNDCFGS_RSVD 0x00000ffc
+
#define MSR_IA32_XSS 0x00000da0
#define FEATURE_CONTROL_LOCKED (1<<0)
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
deleted file mode 100644
index 0ff8fe71b255..000000000000
--- a/arch/x86/include/asm/pmem.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-#ifndef __ASM_X86_PMEM_H__
-#define __ASM_X86_PMEM_H__
-
-#include <linux/uaccess.h>
-#include <asm/cacheflush.h>
-#include <asm/cpufeature.h>
-#include <asm/special_insns.h>
-
-#ifdef CONFIG_ARCH_HAS_PMEM_API
-/**
- * arch_memcpy_to_pmem - copy data to persistent memory
- * @dst: destination buffer for the copy
- * @src: source buffer for the copy
- * @n: length of the copy in bytes
- *
- * Copy data to persistent memory media via non-temporal stores so that
- * a subsequent pmem driver flush operation will drain posted write queues.
- */
-static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
-{
- int rem;
-
- /*
- * We are copying between two kernel buffers, if
- * __copy_from_user_inatomic_nocache() returns an error (page
- * fault) we would have already reported a general protection fault
- * before the WARN+BUG.
- */
- rem = __copy_from_user_inatomic_nocache(dst, (void __user *) src, n);
- if (WARN(rem, "%s: fault copying %p <- %p unwritten: %d\n",
- __func__, dst, src, rem))
- BUG();
-}
-
-/**
- * arch_wb_cache_pmem - write back a cache range with CLWB
- * @vaddr: virtual start address
- * @size: number of bytes to write back
- *
- * Write back a cache range using the CLWB (cache line write back)
- * instruction. Note that @size is internally rounded up to be cache
- * line size aligned.
- */
-static inline void arch_wb_cache_pmem(void *addr, size_t size)
-{
- u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
- unsigned long clflush_mask = x86_clflush_size - 1;
- void *vend = addr + size;
- void *p;
-
- for (p = (void *)((unsigned long)addr & ~clflush_mask);
- p < vend; p += x86_clflush_size)
- clwb(p);
-}
-
-/**
- * arch_copy_from_iter_pmem - copy data from an iterator to PMEM
- * @addr: PMEM destination address
- * @bytes: number of bytes to copy
- * @i: iterator with source data
- *
- * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
- */
-static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
- struct iov_iter *i)
-{
- size_t len;
-
- /* TODO: skip the write-back by always using non-temporal stores */
- len = copy_from_iter_nocache(addr, bytes, i);
-
- /*
- * In the iovec case on x86_64 copy_from_iter_nocache() uses
- * non-temporal stores for the bulk of the transfer, but we need
- * to manually flush if the transfer is unaligned. A cached
- * memory copy is used when destination or size is not naturally
- * aligned. That is:
- * - Require 8-byte alignment when size is 8 bytes or larger.
- * - Require 4-byte alignment when size is 4 bytes.
- *
- * In the non-iovec case the entire destination needs to be
- * flushed.
- */
- if (iter_is_iovec(i)) {
- unsigned long flushed, dest = (unsigned long) addr;
-
- if (bytes < 8) {
- if (!IS_ALIGNED(dest, 4) || (bytes != 4))
- arch_wb_cache_pmem(addr, bytes);
- } else {
- if (!IS_ALIGNED(dest, 8)) {
- dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
- arch_wb_cache_pmem(addr, 1);
- }
-
- flushed = dest - (unsigned long) addr;
- if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
- arch_wb_cache_pmem(addr + bytes - 1, 1);
- }
- } else
- arch_wb_cache_pmem(addr, bytes);
-
- return len;
-}
-
-/**
- * arch_clear_pmem - zero a PMEM memory range
- * @addr: virtual start address
- * @size: number of bytes to zero
- *
- * Write zeros into the memory range starting at 'addr' for 'size' bytes.
- */
-static inline void arch_clear_pmem(void *addr, size_t size)
-{
- memset(addr, 0, size);
- arch_wb_cache_pmem(addr, size);
-}
-
-static inline void arch_invalidate_pmem(void *addr, size_t size)
-{
- clflush_cache_range(addr, size);
-}
-#endif /* CONFIG_ARCH_HAS_PMEM_API */
-#endif /* __ASM_X86_PMEM_H__ */
diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
index 733bae07fb29..1f22bc277c45 100644
--- a/arch/x86/include/asm/string_64.h
+++ b/arch/x86/include/asm/string_64.h
@@ -109,6 +109,11 @@ memcpy_mcsafe(void *dst, const void *src, size_t cnt)
return 0;
}
+#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
+#define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
+void memcpy_flushcache(void *dst, const void *src, size_t cnt);
+#endif
+
#endif /* __KERNEL__ */
#endif /* _ASM_X86_STRING_64_H */
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index a059aac9e937..476ea27f490b 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -565,7 +565,6 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
extern __must_check long
strncpy_from_user(char *dst, const char __user *src, long count);
-extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
unsigned long __must_check clear_user(void __user *mem, unsigned long len);
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index c5504b9a472e..b16f6a1d8b26 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -171,6 +171,10 @@ unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigne
extern long __copy_user_nocache(void *dst, const void __user *src,
unsigned size, int zerorest);
+extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
+extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
+ size_t len);
+
static inline int
__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
unsigned size)
@@ -179,6 +183,13 @@ __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
return __copy_user_nocache(dst, src, size, 0);
}
+static inline int
+__copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
+{
+ kasan_check_write(dst, size);
+ return __copy_user_flushcache(dst, src, size);
+}
+
unsigned long
copy_user_handle_tail(char *to, char *from, unsigned len);
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index f6d20f6cca12..11071fcd630e 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -43,6 +43,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
+#include <asm/smap.h>
#include <xen/interface/xen.h>
#include <xen/interface/sched.h>
@@ -50,6 +51,8 @@
#include <xen/interface/platform.h>
#include <xen/interface/xen-mca.h>
+struct xen_dm_op_buf;
+
/*
* The hypercall asms have to meet several constraints:
* - Work on 32- and 64-bit.
@@ -214,10 +217,12 @@ privcmd_call(unsigned call,
__HYPERCALL_DECLS;
__HYPERCALL_5ARG(a1, a2, a3, a4, a5);
+ stac();
asm volatile("call *%[call]"
: __HYPERCALL_5PARAM
: [call] "a" (&hypercall_page[call])
: __HYPERCALL_CLOBBER5);
+ clac();
return (long)__res;
}
@@ -474,9 +479,13 @@ HYPERVISOR_xenpmu_op(unsigned int op, void *arg)
static inline int
HYPERVISOR_dm_op(
- domid_t dom, unsigned int nr_bufs, void *bufs)
+ domid_t dom, unsigned int nr_bufs, struct xen_dm_op_buf *bufs)
{
- return _hypercall3(int, dm_op, dom, nr_bufs, bufs);
+ int ret;
+ stac();
+ ret = _hypercall3(int, dm_op, dom, nr_bufs, bufs);
+ clac();
+ return ret;
}
static inline void
diff --git a/arch/x86/include/uapi/asm/Kbuild b/arch/x86/include/uapi/asm/Kbuild
index 83b6e9a0dce4..da1489cb64dc 100644
--- a/arch/x86/include/uapi/asm/Kbuild
+++ b/arch/x86/include/uapi/asm/Kbuild
@@ -1,6 +1,6 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-genhdr-y += unistd_32.h
-genhdr-y += unistd_64.h
-genhdr-y += unistd_x32.h
+generated-y += unistd_32.h
+generated-y += unistd_64.h
+generated-y += unistd_x32.h
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
index f4fef5a24ebd..127ddadee1a5 100644
--- a/arch/x86/include/uapi/asm/hyperv.h
+++ b/arch/x86/include/uapi/asm/hyperv.h
@@ -150,6 +150,12 @@
#define HV_X64_DEPRECATING_AEOI_RECOMMENDED (1 << 9)
/*
+ * HV_VP_SET available
+ */
+#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED (1 << 11)
+
+
+/*
* Crash notification flag.
*/
#define HV_CRASH_CTL_CRASH_NOTIFY (1ULL << 63)
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index 815dd63f49d0..cc0e8bc0ea3f 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -704,6 +704,7 @@ static const struct dma_map_ops gart_dma_ops = {
.alloc = gart_alloc_coherent,
.free = gart_free_coherent,
.mapping_error = gart_mapping_error,
+ .dma_supported = x86_dma_supported,
};
static void gart_iommu_shutdown(void)
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index fda7867046d0..5286a4a92cf7 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -50,6 +50,8 @@
#include <asm/x86_init.h>
#include <asm/iommu_table.h>
+#define CALGARY_MAPPING_ERROR 0
+
#ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT
int use_calgary __read_mostly = 1;
#else
@@ -252,7 +254,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
if (panic_on_overflow)
panic("Calgary: fix the allocator.\n");
else
- return DMA_ERROR_CODE;
+ return CALGARY_MAPPING_ERROR;
}
}
@@ -272,10 +274,10 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
entry = iommu_range_alloc(dev, tbl, npages);
- if (unlikely(entry == DMA_ERROR_CODE)) {
+ if (unlikely(entry == CALGARY_MAPPING_ERROR)) {
pr_warn("failed to allocate %u pages in iommu %p\n",
npages, tbl);
- return DMA_ERROR_CODE;
+ return CALGARY_MAPPING_ERROR;
}
/* set the return dma address */
@@ -295,7 +297,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned long flags;
/* were we called with bad_dma_address? */
- badend = DMA_ERROR_CODE + (EMERGENCY_PAGES * PAGE_SIZE);
+ badend = CALGARY_MAPPING_ERROR + (EMERGENCY_PAGES * PAGE_SIZE);
if (unlikely(dma_addr < badend)) {
WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA "
"address 0x%Lx\n", dma_addr);
@@ -380,7 +382,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE);
entry = iommu_range_alloc(dev, tbl, npages);
- if (entry == DMA_ERROR_CODE) {
+ if (entry == CALGARY_MAPPING_ERROR) {
/* makes sure unmap knows to stop */
s->dma_length = 0;
goto error;
@@ -398,7 +400,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
error:
calgary_unmap_sg(dev, sg, nelems, dir, 0);
for_each_sg(sg, s, nelems, i) {
- sg->dma_address = DMA_ERROR_CODE;
+ sg->dma_address = CALGARY_MAPPING_ERROR;
sg->dma_length = 0;
}
return 0;
@@ -453,7 +455,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,
/* set up tces to cover the allocated range */
mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL);
- if (mapping == DMA_ERROR_CODE)
+ if (mapping == CALGARY_MAPPING_ERROR)
goto free;
*dma_handle = mapping;
return ret;
@@ -478,6 +480,11 @@ static void calgary_free_coherent(struct device *dev, size_t size,
free_pages((unsigned long)vaddr, get_order(size));
}
+static int calgary_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == CALGARY_MAPPING_ERROR;
+}
+
static const struct dma_map_ops calgary_dma_ops = {
.alloc = calgary_alloc_coherent,
.free = calgary_free_coherent,
@@ -485,6 +492,8 @@ static const struct dma_map_ops calgary_dma_ops = {
.unmap_sg = calgary_unmap_sg,
.map_page = calgary_map_page,
.unmap_page = calgary_unmap_page,
+ .mapping_error = calgary_mapping_error,
+ .dma_supported = x86_dma_supported,
};
static inline void __iomem * busno_to_bbar(unsigned char num)
@@ -732,7 +741,7 @@ static void __init calgary_reserve_regions(struct pci_dev *dev)
struct iommu_table *tbl = pci_iommu(dev->bus);
/* reserve EMERGENCY_PAGES from bad_dma_address and up */
- iommu_range_reserve(tbl, DMA_ERROR_CODE, EMERGENCY_PAGES);
+ iommu_range_reserve(tbl, CALGARY_MAPPING_ERROR, EMERGENCY_PAGES);
/* avoid the BIOS/VGA first 640KB-1MB region */
/* for CalIOC2 - avoid the entire first MB */
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 3a216ec869cd..5e16d3f29594 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -213,10 +213,8 @@ static __init int iommu_setup(char *p)
}
early_param("iommu", iommu_setup);
-int dma_supported(struct device *dev, u64 mask)
+int x86_dma_supported(struct device *dev, u64 mask)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
#ifdef CONFIG_PCI
if (mask > 0xffffffff && forbid_dac > 0) {
dev_info(dev, "PCI: Disallowing DAC for device\n");
@@ -224,9 +222,6 @@ int dma_supported(struct device *dev, u64 mask)
}
#endif
- if (ops->dma_supported)
- return ops->dma_supported(dev, mask);
-
/* Copied from i386. Doesn't make much sense, because it will
only work for pci_alloc_coherent.
The caller just has to use GFP_DMA in this case. */
@@ -252,7 +247,6 @@ int dma_supported(struct device *dev, u64 mask)
return 1;
}
-EXPORT_SYMBOL(dma_supported);
static int __init pci_iommu_init(void)
{
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index a88952ef371c..a6d404087fe3 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -11,6 +11,8 @@
#include <asm/iommu.h>
#include <asm/dma.h>
+#define NOMMU_MAPPING_ERROR 0
+
static int
check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
{
@@ -33,7 +35,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
dma_addr_t bus = page_to_phys(page) + offset;
WARN_ON(size == 0);
if (!check_addr("map_single", dev, bus, size))
- return DMA_ERROR_CODE;
+ return NOMMU_MAPPING_ERROR;
flush_write_buffers();
return bus;
}
@@ -88,6 +90,11 @@ static void nommu_sync_sg_for_device(struct device *dev,
flush_write_buffers();
}
+static int nommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == NOMMU_MAPPING_ERROR;
+}
+
const struct dma_map_ops nommu_dma_ops = {
.alloc = dma_generic_alloc_coherent,
.free = dma_generic_free_coherent,
@@ -96,4 +103,6 @@ const struct dma_map_ops nommu_dma_ops = {
.sync_single_for_device = nommu_sync_single_for_device,
.sync_sg_for_device = nommu_sync_sg_for_device,
.is_phys = 1,
+ .mapping_error = nommu_mapping_error,
+ .dma_supported = x86_dma_supported,
};
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index a6fd40aade7c..da6728383052 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -144,6 +144,14 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
return best && (best->ebx & bit(X86_FEATURE_RTM));
}
+static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ return best && (best->ebx & bit(X86_FEATURE_MPX));
+}
+
static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 80890dee66ce..fb0055953fbc 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -900,7 +900,7 @@ static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
if (rc != X86EMUL_CONTINUE) \
goto done; \
ctxt->_eip += sizeof(_type); \
- _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
+ memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
ctxt->fetch.ptr += sizeof(_type); \
_x; \
})
@@ -3942,6 +3942,25 @@ static int check_fxsr(struct x86_emulate_ctxt *ctxt)
}
/*
+ * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
+ * and restore MXCSR.
+ */
+static size_t __fxstate_size(int nregs)
+{
+ return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
+}
+
+static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
+{
+ bool cr4_osfxsr;
+ if (ctxt->mode == X86EMUL_MODE_PROT64)
+ return __fxstate_size(16);
+
+ cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
+ return __fxstate_size(cr4_osfxsr ? 8 : 0);
+}
+
+/*
* FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
* 1) 16 bit mode
* 2) 32 bit mode
@@ -3962,7 +3981,6 @@ static int check_fxsr(struct x86_emulate_ctxt *ctxt)
static int em_fxsave(struct x86_emulate_ctxt *ctxt)
{
struct fxregs_state fx_state;
- size_t size;
int rc;
rc = check_fxsr(ctxt);
@@ -3978,68 +3996,42 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
- if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR)
- size = offsetof(struct fxregs_state, xmm_space[8 * 16/4]);
- else
- size = offsetof(struct fxregs_state, xmm_space[0]);
-
- return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
-}
-
-static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
- struct fxregs_state *new)
-{
- int rc = X86EMUL_CONTINUE;
- struct fxregs_state old;
-
- rc = asm_safe("fxsave %[fx]", , [fx] "+m"(old));
- if (rc != X86EMUL_CONTINUE)
- return rc;
-
- /*
- * 64 bit host will restore XMM 8-15, which is not correct on non-64
- * bit guests. Load the current values in order to preserve 64 bit
- * XMMs after fxrstor.
- */
-#ifdef CONFIG_X86_64
- /* XXX: accessing XMM 8-15 very awkwardly */
- memcpy(&new->xmm_space[8 * 16/4], &old.xmm_space[8 * 16/4], 8 * 16);
-#endif
-
- /*
- * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but
- * does save and restore MXCSR.
- */
- if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))
- memcpy(new->xmm_space, old.xmm_space, 8 * 16);
-
- return rc;
+ return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
+ fxstate_size(ctxt));
}
static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
{
struct fxregs_state fx_state;
int rc;
+ size_t size;
rc = check_fxsr(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
- rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
- if (rc != X86EMUL_CONTINUE)
- return rc;
+ ctxt->ops->get_fpu(ctxt);
- if (fx_state.mxcsr >> 16)
- return emulate_gp(ctxt, 0);
+ size = fxstate_size(ctxt);
+ if (size < __fxstate_size(16)) {
+ rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
+ if (rc != X86EMUL_CONTINUE)
+ goto out;
+ }
- ctxt->ops->get_fpu(ctxt);
+ rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
+ if (rc != X86EMUL_CONTINUE)
+ goto out;
- if (ctxt->mode < X86EMUL_MODE_PROT64)
- rc = fxrstor_fixup(ctxt, &fx_state);
+ if (fx_state.mxcsr >> 16) {
+ rc = emulate_gp(ctxt, 0);
+ goto out;
+ }
if (rc == X86EMUL_CONTINUE)
rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
+out:
ctxt->ops->put_fpu(ctxt);
return rc;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index d24c8742d9b0..2819d4c123eb 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1495,6 +1495,7 @@ EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
static void cancel_hv_timer(struct kvm_lapic *apic)
{
+ WARN_ON(!apic->lapic_timer.hv_timer_in_use);
preempt_disable();
kvm_x86_ops->cancel_hv_timer(apic->vcpu);
apic->lapic_timer.hv_timer_in_use = false;
@@ -1503,25 +1504,56 @@ static void cancel_hv_timer(struct kvm_lapic *apic)
static bool start_hv_timer(struct kvm_lapic *apic)
{
- u64 tscdeadline = apic->lapic_timer.tscdeadline;
+ struct kvm_timer *ktimer = &apic->lapic_timer;
+ int r;
- if ((atomic_read(&apic->lapic_timer.pending) &&
- !apic_lvtt_period(apic)) ||
- kvm_x86_ops->set_hv_timer(apic->vcpu, tscdeadline)) {
- if (apic->lapic_timer.hv_timer_in_use)
- cancel_hv_timer(apic);
- } else {
- apic->lapic_timer.hv_timer_in_use = true;
- hrtimer_cancel(&apic->lapic_timer.timer);
+ if (!kvm_x86_ops->set_hv_timer)
+ return false;
+
+ if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
+ return false;
- /* In case the sw timer triggered in the window */
- if (atomic_read(&apic->lapic_timer.pending) &&
- !apic_lvtt_period(apic))
- cancel_hv_timer(apic);
+ r = kvm_x86_ops->set_hv_timer(apic->vcpu, ktimer->tscdeadline);
+ if (r < 0)
+ return false;
+
+ ktimer->hv_timer_in_use = true;
+ hrtimer_cancel(&ktimer->timer);
+
+ /*
+ * Also recheck ktimer->pending, in case the sw timer triggered in
+ * the window. For periodic timer, leave the hv timer running for
+ * simplicity, and the deadline will be recomputed on the next vmexit.
+ */
+ if (!apic_lvtt_period(apic) && (r || atomic_read(&ktimer->pending))) {
+ if (r)
+ apic_timer_expired(apic);
+ return false;
}
- trace_kvm_hv_timer_state(apic->vcpu->vcpu_id,
- apic->lapic_timer.hv_timer_in_use);
- return apic->lapic_timer.hv_timer_in_use;
+
+ trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, true);
+ return true;
+}
+
+static void start_sw_timer(struct kvm_lapic *apic)
+{
+ struct kvm_timer *ktimer = &apic->lapic_timer;
+ if (apic->lapic_timer.hv_timer_in_use)
+ cancel_hv_timer(apic);
+ if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
+ return;
+
+ if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
+ start_sw_period(apic);
+ else if (apic_lvtt_tscdeadline(apic))
+ start_sw_tscdeadline(apic);
+ trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
+}
+
+static void restart_apic_timer(struct kvm_lapic *apic)
+{
+ if (!start_hv_timer(apic))
+ start_sw_timer(apic);
}
void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
@@ -1535,19 +1567,14 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
advance_periodic_target_expiration(apic);
- if (!start_hv_timer(apic))
- start_sw_period(apic);
+ restart_apic_timer(apic);
}
}
EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
{
- struct kvm_lapic *apic = vcpu->arch.apic;
-
- WARN_ON(apic->lapic_timer.hv_timer_in_use);
-
- start_hv_timer(apic);
+ restart_apic_timer(vcpu->arch.apic);
}
EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer);
@@ -1556,33 +1583,28 @@ void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
struct kvm_lapic *apic = vcpu->arch.apic;
/* Possibly the TSC deadline timer is not enabled yet */
- if (!apic->lapic_timer.hv_timer_in_use)
- return;
-
- cancel_hv_timer(apic);
+ if (apic->lapic_timer.hv_timer_in_use)
+ start_sw_timer(apic);
+}
+EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
- if (atomic_read(&apic->lapic_timer.pending))
- return;
+void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
- if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
- start_sw_period(apic);
- else if (apic_lvtt_tscdeadline(apic))
- start_sw_tscdeadline(apic);
+ WARN_ON(!apic->lapic_timer.hv_timer_in_use);
+ restart_apic_timer(apic);
}
-EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
static void start_apic_timer(struct kvm_lapic *apic)
{
atomic_set(&apic->lapic_timer.pending, 0);
- if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
- if (set_target_expiration(apic) &&
- !(kvm_x86_ops->set_hv_timer && start_hv_timer(apic)))
- start_sw_period(apic);
- } else if (apic_lvtt_tscdeadline(apic)) {
- if (!(kvm_x86_ops->set_hv_timer && start_hv_timer(apic)))
- start_sw_tscdeadline(apic);
- }
+ if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
+ && !set_target_expiration(apic))
+ return;
+
+ restart_apic_timer(apic);
}
static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
@@ -1813,16 +1835,6 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu)
* LAPIC interface
*----------------------------------------------------------------------
*/
-u64 kvm_get_lapic_target_expiration_tsc(struct kvm_vcpu *vcpu)
-{
- struct kvm_lapic *apic = vcpu->arch.apic;
-
- if (!lapic_in_kernel(vcpu))
- return 0;
-
- return apic->lapic_timer.tscdeadline;
-}
-
u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index bcbe811f3b97..29caa2c3dff9 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -87,7 +87,6 @@ int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s);
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
-u64 kvm_get_lapic_target_expiration_tsc(struct kvm_vcpu *vcpu);
u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu);
void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
@@ -216,4 +215,5 @@ void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu);
void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu);
void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu);
bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu);
+void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu);
#endif
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index cb8225969255..aafd399cf8c6 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -183,13 +183,13 @@ static u64 __read_mostly shadow_user_mask;
static u64 __read_mostly shadow_accessed_mask;
static u64 __read_mostly shadow_dirty_mask;
static u64 __read_mostly shadow_mmio_mask;
+static u64 __read_mostly shadow_mmio_value;
static u64 __read_mostly shadow_present_mask;
/*
- * The mask/value to distinguish a PTE that has been marked not-present for
- * access tracking purposes.
- * The mask would be either 0 if access tracking is disabled, or
- * SPTE_SPECIAL_MASK|VMX_EPT_RWX_MASK if access tracking is enabled.
+ * SPTEs used by MMUs without A/D bits are marked with shadow_acc_track_value.
+ * Non-present SPTEs with shadow_acc_track_value set are in place for access
+ * tracking.
*/
static u64 __read_mostly shadow_acc_track_mask;
static const u64 shadow_acc_track_value = SPTE_SPECIAL_MASK;
@@ -207,16 +207,40 @@ static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIF
static void mmu_spte_set(u64 *sptep, u64 spte);
static void mmu_free_roots(struct kvm_vcpu *vcpu);
-void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask)
+void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value)
{
+ BUG_ON((mmio_mask & mmio_value) != mmio_value);
+ shadow_mmio_value = mmio_value | SPTE_SPECIAL_MASK;
shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK;
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
+static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
+{
+ return sp->role.ad_disabled;
+}
+
+static inline bool spte_ad_enabled(u64 spte)
+{
+ MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value);
+ return !(spte & shadow_acc_track_value);
+}
+
+static inline u64 spte_shadow_accessed_mask(u64 spte)
+{
+ MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value);
+ return spte_ad_enabled(spte) ? shadow_accessed_mask : 0;
+}
+
+static inline u64 spte_shadow_dirty_mask(u64 spte)
+{
+ MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value);
+ return spte_ad_enabled(spte) ? shadow_dirty_mask : 0;
+}
+
static inline bool is_access_track_spte(u64 spte)
{
- /* Always false if shadow_acc_track_mask is zero. */
- return (spte & shadow_acc_track_mask) == shadow_acc_track_value;
+ return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0;
}
/*
@@ -270,7 +294,7 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
u64 mask = generation_mmio_spte_mask(gen);
access &= ACC_WRITE_MASK | ACC_USER_MASK;
- mask |= shadow_mmio_mask | access | gfn << PAGE_SHIFT;
+ mask |= shadow_mmio_value | access | gfn << PAGE_SHIFT;
trace_mark_mmio_spte(sptep, gfn, access, gen);
mmu_spte_set(sptep, mask);
@@ -278,7 +302,7 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
static bool is_mmio_spte(u64 spte)
{
- return (spte & shadow_mmio_mask) == shadow_mmio_mask;
+ return (spte & shadow_mmio_mask) == shadow_mmio_value;
}
static gfn_t get_mmio_spte_gfn(u64 spte)
@@ -315,12 +339,20 @@ static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
return likely(kvm_gen == spte_gen);
}
+/*
+ * Sets the shadow PTE masks used by the MMU.
+ *
+ * Assumptions:
+ * - Setting either @accessed_mask or @dirty_mask requires setting both
+ * - At least one of @accessed_mask or @acc_track_mask must be set
+ */
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
u64 acc_track_mask)
{
- if (acc_track_mask != 0)
- acc_track_mask |= SPTE_SPECIAL_MASK;
+ BUG_ON(!dirty_mask != !accessed_mask);
+ BUG_ON(!accessed_mask && !acc_track_mask);
+ BUG_ON(acc_track_mask & shadow_acc_track_value);
shadow_user_mask = user_mask;
shadow_accessed_mask = accessed_mask;
@@ -329,7 +361,6 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
shadow_x_mask = x_mask;
shadow_present_mask = p_mask;
shadow_acc_track_mask = acc_track_mask;
- WARN_ON(shadow_accessed_mask != 0 && shadow_acc_track_mask != 0);
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
@@ -549,7 +580,7 @@ static bool spte_has_volatile_bits(u64 spte)
is_access_track_spte(spte))
return true;
- if (shadow_accessed_mask) {
+ if (spte_ad_enabled(spte)) {
if ((spte & shadow_accessed_mask) == 0 ||
(is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
return true;
@@ -560,14 +591,17 @@ static bool spte_has_volatile_bits(u64 spte)
static bool is_accessed_spte(u64 spte)
{
- return shadow_accessed_mask ? spte & shadow_accessed_mask
- : !is_access_track_spte(spte);
+ u64 accessed_mask = spte_shadow_accessed_mask(spte);
+
+ return accessed_mask ? spte & accessed_mask
+ : !is_access_track_spte(spte);
}
static bool is_dirty_spte(u64 spte)
{
- return shadow_dirty_mask ? spte & shadow_dirty_mask
- : spte & PT_WRITABLE_MASK;
+ u64 dirty_mask = spte_shadow_dirty_mask(spte);
+
+ return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK;
}
/* Rules for using mmu_spte_set:
@@ -707,10 +741,10 @@ static u64 mmu_spte_get_lockless(u64 *sptep)
static u64 mark_spte_for_access_track(u64 spte)
{
- if (shadow_accessed_mask != 0)
+ if (spte_ad_enabled(spte))
return spte & ~shadow_accessed_mask;
- if (shadow_acc_track_mask == 0 || is_access_track_spte(spte))
+ if (is_access_track_spte(spte))
return spte;
/*
@@ -729,7 +763,6 @@ static u64 mark_spte_for_access_track(u64 spte)
spte |= (spte & shadow_acc_track_saved_bits_mask) <<
shadow_acc_track_saved_bits_shift;
spte &= ~shadow_acc_track_mask;
- spte |= shadow_acc_track_value;
return spte;
}
@@ -741,6 +774,7 @@ static u64 restore_acc_track_spte(u64 spte)
u64 saved_bits = (spte >> shadow_acc_track_saved_bits_shift)
& shadow_acc_track_saved_bits_mask;
+ WARN_ON_ONCE(spte_ad_enabled(spte));
WARN_ON_ONCE(!is_access_track_spte(spte));
new_spte &= ~shadow_acc_track_mask;
@@ -759,7 +793,7 @@ static bool mmu_spte_age(u64 *sptep)
if (!is_accessed_spte(spte))
return false;
- if (shadow_accessed_mask) {
+ if (spte_ad_enabled(spte)) {
clear_bit((ffs(shadow_accessed_mask) - 1),
(unsigned long *)sptep);
} else {
@@ -1390,6 +1424,22 @@ static bool spte_clear_dirty(u64 *sptep)
return mmu_spte_update(sptep, spte);
}
+static bool wrprot_ad_disabled_spte(u64 *sptep)
+{
+ bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
+ (unsigned long *)sptep);
+ if (was_writable)
+ kvm_set_pfn_dirty(spte_to_pfn(*sptep));
+
+ return was_writable;
+}
+
+/*
+ * Gets the GFN ready for another round of dirty logging by clearing the
+ * - D bit on ad-enabled SPTEs, and
+ * - W bit on ad-disabled SPTEs.
+ * Returns true iff any D or W bits were cleared.
+ */
static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
{
u64 *sptep;
@@ -1397,7 +1447,10 @@ static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
bool flush = false;
for_each_rmap_spte(rmap_head, &iter, sptep)
- flush |= spte_clear_dirty(sptep);
+ if (spte_ad_enabled(*sptep))
+ flush |= spte_clear_dirty(sptep);
+ else
+ flush |= wrprot_ad_disabled_spte(sptep);
return flush;
}
@@ -1420,7 +1473,8 @@ static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
bool flush = false;
for_each_rmap_spte(rmap_head, &iter, sptep)
- flush |= spte_set_dirty(sptep);
+ if (spte_ad_enabled(*sptep))
+ flush |= spte_set_dirty(sptep);
return flush;
}
@@ -1452,7 +1506,8 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
}
/**
- * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages
+ * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
+ * protect the page if the D-bit isn't supported.
* @kvm: kvm instance
* @slot: slot to clear D-bit
* @gfn_offset: start of the BITS_PER_LONG pages we care about
@@ -1766,18 +1821,9 @@ static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
u64 *sptep;
struct rmap_iterator iter;
- /*
- * If there's no access bit in the secondary pte set by the hardware and
- * fast access tracking is also not enabled, it's up to gup-fast/gup to
- * set the access bit in the primary pte or in the page structure.
- */
- if (!shadow_accessed_mask && !shadow_acc_track_mask)
- goto out;
-
for_each_rmap_spte(rmap_head, &iter, sptep)
if (is_accessed_spte(*sptep))
return 1;
-out:
return 0;
}
@@ -1798,18 +1844,6 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
{
- /*
- * In case of absence of EPT Access and Dirty Bits supports,
- * emulate the accessed bit for EPT, by checking if this page has
- * an EPT mapping, and clearing it if it does. On the next access,
- * a new EPT mapping will be established.
- * This has some overhead, but not as much as the cost of swapping
- * out actively used pages or breaking up actively used hugepages.
- */
- if (!shadow_accessed_mask && !shadow_acc_track_mask)
- return kvm_handle_hva_range(kvm, start, end, 0,
- kvm_unmap_rmapp);
-
return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
}
@@ -2398,7 +2432,12 @@ static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
spte = __pa(sp->spt) | shadow_present_mask | PT_WRITABLE_MASK |
- shadow_user_mask | shadow_x_mask | shadow_accessed_mask;
+ shadow_user_mask | shadow_x_mask;
+
+ if (sp_ad_disabled(sp))
+ spte |= shadow_acc_track_value;
+ else
+ spte |= shadow_accessed_mask;
mmu_spte_set(sptep, spte);
@@ -2666,10 +2705,15 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
{
u64 spte = 0;
int ret = 0;
+ struct kvm_mmu_page *sp;
if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
return 0;
+ sp = page_header(__pa(sptep));
+ if (sp_ad_disabled(sp))
+ spte |= shadow_acc_track_value;
+
/*
* For the EPT case, shadow_present_mask is 0 if hardware
* supports exec-only page table entries. In that case,
@@ -2678,7 +2722,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
*/
spte |= shadow_present_mask;
if (!speculative)
- spte |= shadow_accessed_mask;
+ spte |= spte_shadow_accessed_mask(spte);
if (pte_access & ACC_EXEC_MASK)
spte |= shadow_x_mask;
@@ -2735,7 +2779,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (pte_access & ACC_WRITE_MASK) {
kvm_vcpu_mark_page_dirty(vcpu, gfn);
- spte |= shadow_dirty_mask;
+ spte |= spte_shadow_dirty_mask(spte);
}
if (speculative)
@@ -2877,16 +2921,16 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
{
struct kvm_mmu_page *sp;
+ sp = page_header(__pa(sptep));
+
/*
- * Since it's no accessed bit on EPT, it's no way to
- * distinguish between actually accessed translations
- * and prefetched, so disable pte prefetch if EPT is
- * enabled.
+ * Without accessed bits, there's no way to distinguish between
+ * actually accessed translations and prefetched, so disable pte
+ * prefetch if accessed bits aren't available.
*/
- if (!shadow_accessed_mask)
+ if (sp_ad_disabled(sp))
return;
- sp = page_header(__pa(sptep));
if (sp->role.level > PT_PAGE_TABLE_LEVEL)
return;
@@ -4290,6 +4334,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->base_role.word = 0;
context->base_role.smm = is_smm(vcpu);
+ context->base_role.ad_disabled = (shadow_accessed_mask == 0);
context->page_fault = tdp_page_fault;
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
@@ -4377,6 +4422,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
context->root_level = context->shadow_root_level;
context->root_hpa = INVALID_PAGE;
context->direct_map = false;
+ context->base_role.ad_disabled = !accessed_dirty;
update_permission_bitmask(vcpu, context, true);
update_pkru_bitmask(vcpu, context, true);
@@ -4636,6 +4682,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
mask.smep_andnot_wp = 1;
mask.smap_andnot_wp = 1;
mask.smm = 1;
+ mask.ad_disabled = 1;
/*
* If we don't have indirect shadow pages, it means no page is
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 330bf3a811fb..a276834950c1 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -51,7 +51,7 @@ static inline u64 rsvd_bits(int s, int e)
return ((1ULL << (e - s + 1)) - 1) << s;
}
-void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
+void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value);
void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index 5a24b846a1cb..8b97a6cba8d1 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -30,8 +30,9 @@
\
role.word = __entry->role; \
\
- trace_seq_printf(p, "sp gen %lx gfn %llx %u%s q%u%s %s%s" \
- " %snxe root %u %s%c", __entry->mmu_valid_gen, \
+ trace_seq_printf(p, "sp gen %lx gfn %llx l%u%s q%u%s %s%s" \
+ " %snxe %sad root %u %s%c", \
+ __entry->mmu_valid_gen, \
__entry->gfn, role.level, \
role.cr4_pae ? " pae" : "", \
role.quadrant, \
@@ -39,6 +40,7 @@
access_str[role.access], \
role.invalid ? " invalid" : "", \
role.nxe ? "" : "!", \
+ role.ad_disabled ? "!" : "", \
__entry->root_count, \
__entry->unsync ? "unsync" : "sync", 0); \
saved_ptr; \
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 33460fcdeef9..905ea6052517 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -190,6 +190,7 @@ struct vcpu_svm {
struct nested_state nested;
bool nmi_singlestep;
+ u64 nmi_singlestep_guest_rflags;
unsigned int3_injected;
unsigned long int3_rip;
@@ -964,6 +965,18 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
}
+static void disable_nmi_singlestep(struct vcpu_svm *svm)
+{
+ svm->nmi_singlestep = false;
+ if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
+ /* Clear our flags if they were not set by the guest */
+ if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
+ svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
+ if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
+ svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
+ }
+}
+
/* Note:
* This hash table is used to map VM_ID to a struct kvm_arch,
* when handling AMD IOMMU GALOG notification to schedule in
@@ -1713,11 +1726,24 @@ static void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
{
- return to_svm(vcpu)->vmcb->save.rflags;
+ struct vcpu_svm *svm = to_svm(vcpu);
+ unsigned long rflags = svm->vmcb->save.rflags;
+
+ if (svm->nmi_singlestep) {
+ /* Hide our flags if they were not set by the guest */
+ if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
+ rflags &= ~X86_EFLAGS_TF;
+ if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
+ rflags &= ~X86_EFLAGS_RF;
+ }
+ return rflags;
}
static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
+ if (to_svm(vcpu)->nmi_singlestep)
+ rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
+
/*
* Any change of EFLAGS.VM is accompanied by a reload of SS
* (caused by either a task switch or an inter-privilege IRET),
@@ -2112,10 +2138,7 @@ static int db_interception(struct vcpu_svm *svm)
}
if (svm->nmi_singlestep) {
- svm->nmi_singlestep = false;
- if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
- svm->vmcb->save.rflags &=
- ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
+ disable_nmi_singlestep(svm);
}
if (svm->vcpu.guest_debug &
@@ -2370,8 +2393,8 @@ static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
static int nested_svm_check_permissions(struct vcpu_svm *svm)
{
- if (!(svm->vcpu.arch.efer & EFER_SVME)
- || !is_paging(&svm->vcpu)) {
+ if (!(svm->vcpu.arch.efer & EFER_SVME) ||
+ !is_paging(&svm->vcpu)) {
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1;
}
@@ -2381,7 +2404,7 @@ static int nested_svm_check_permissions(struct vcpu_svm *svm)
return 1;
}
- return 0;
+ return 0;
}
static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
@@ -2534,6 +2557,31 @@ static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
}
+/* DB exceptions for our internal use must not cause vmexit */
+static int nested_svm_intercept_db(struct vcpu_svm *svm)
+{
+ unsigned long dr6;
+
+ /* if we're not singlestepping, it's not ours */
+ if (!svm->nmi_singlestep)
+ return NESTED_EXIT_DONE;
+
+ /* if it's not a singlestep exception, it's not ours */
+ if (kvm_get_dr(&svm->vcpu, 6, &dr6))
+ return NESTED_EXIT_DONE;
+ if (!(dr6 & DR6_BS))
+ return NESTED_EXIT_DONE;
+
+ /* if the guest is singlestepping, it should get the vmexit */
+ if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) {
+ disable_nmi_singlestep(svm);
+ return NESTED_EXIT_DONE;
+ }
+
+ /* it's ours, the nested hypervisor must not see this one */
+ return NESTED_EXIT_HOST;
+}
+
static int nested_svm_exit_special(struct vcpu_svm *svm)
{
u32 exit_code = svm->vmcb->control.exit_code;
@@ -2589,8 +2637,12 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
}
case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
- if (svm->nested.intercept_exceptions & excp_bits)
- vmexit = NESTED_EXIT_DONE;
+ if (svm->nested.intercept_exceptions & excp_bits) {
+ if (exit_code == SVM_EXIT_EXCP_BASE + DB_VECTOR)
+ vmexit = nested_svm_intercept_db(svm);
+ else
+ vmexit = NESTED_EXIT_DONE;
+ }
/* async page fault always cause vmexit */
else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
svm->apf_reason != 0)
@@ -4627,10 +4679,17 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
== HF_NMI_MASK)
return; /* IRET will cause a vm exit */
+ if ((svm->vcpu.arch.hflags & HF_GIF_MASK) == 0)
+ return; /* STGI will cause a vm exit */
+
+ if (svm->nested.exit_required)
+ return; /* we're not going to run the guest yet */
+
/*
* Something prevents NMI from been injected. Single step over possible
* problem (IRET or exception injection or interrupt shadow)
*/
+ svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
svm->nmi_singlestep = true;
svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
}
@@ -4771,6 +4830,22 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
if (unlikely(svm->nested.exit_required))
return;
+ /*
+ * Disable singlestep if we're injecting an interrupt/exception.
+ * We don't want our modified rflags to be pushed on the stack where
+ * we might not be able to easily reset them if we disabled NMI
+ * singlestep later.
+ */
+ if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
+ /*
+ * Event injection happens before external interrupts cause a
+ * vmexit and interrupts are disabled here, so smp_send_reschedule
+ * is enough to force an immediate vmexit.
+ */
+ disable_nmi_singlestep(svm);
+ smp_send_reschedule(vcpu->cpu);
+ }
+
pre_svm_run(svm);
sync_lapic_to_cr8(vcpu);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 6dcc4873e435..f76efad248ab 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -913,8 +913,9 @@ static void nested_release_page_clean(struct page *page)
kvm_release_page_clean(page);
}
+static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu);
static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
-static u64 construct_eptp(unsigned long root_hpa);
+static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
static bool vmx_xsaves_supported(void);
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
static void vmx_set_segment(struct kvm_vcpu *vcpu,
@@ -2772,7 +2773,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
if (enable_ept_ad_bits) {
vmx->nested.nested_vmx_secondary_ctls_high |=
SECONDARY_EXEC_ENABLE_PML;
- vmx->nested.nested_vmx_ept_caps |= VMX_EPT_AD_BIT;
+ vmx->nested.nested_vmx_ept_caps |= VMX_EPT_AD_BIT;
}
} else
vmx->nested.nested_vmx_ept_caps = 0;
@@ -3198,7 +3199,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
break;
case MSR_IA32_BNDCFGS:
- if (!kvm_mpx_supported())
+ if (!kvm_mpx_supported() ||
+ (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
return 1;
msr_info->data = vmcs_read64(GUEST_BNDCFGS);
break;
@@ -3280,7 +3282,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vmcs_writel(GUEST_SYSENTER_ESP, data);
break;
case MSR_IA32_BNDCFGS:
- if (!kvm_mpx_supported())
+ if (!kvm_mpx_supported() ||
+ (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
+ return 1;
+ if (is_noncanonical_address(data & PAGE_MASK) ||
+ (data & MSR_IA32_BNDCFGS_RSVD))
return 1;
vmcs_write64(GUEST_BNDCFGS, data);
break;
@@ -4013,7 +4019,7 @@ static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid)
if (enable_ept) {
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return;
- ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
+ ept_sync_context(construct_eptp(vcpu, vcpu->arch.mmu.root_hpa));
} else {
vpid_sync_context(vpid);
}
@@ -4188,14 +4194,15 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
vmx->emulation_required = emulation_required(vcpu);
}
-static u64 construct_eptp(unsigned long root_hpa)
+static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
{
u64 eptp;
/* TODO write the value reading from MSR */
eptp = VMX_EPT_DEFAULT_MT |
VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
- if (enable_ept_ad_bits)
+ if (enable_ept_ad_bits &&
+ (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu)))
eptp |= VMX_EPT_AD_ENABLE_BIT;
eptp |= (root_hpa & PAGE_MASK);
@@ -4209,7 +4216,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
guest_cr3 = cr3;
if (enable_ept) {
- eptp = construct_eptp(cr3);
+ eptp = construct_eptp(vcpu, cr3);
vmcs_write64(EPT_POINTER, eptp);
if (is_paging(vcpu) || is_guest_mode(vcpu))
guest_cr3 = kvm_read_cr3(vcpu);
@@ -5170,7 +5177,8 @@ static void ept_set_mmio_spte_mask(void)
* EPT Misconfigurations can be generated if the value of bits 2:0
* of an EPT paging-structure entry is 110b (write/execute).
*/
- kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE);
+ kvm_mmu_set_mmio_spte_mask(VMX_EPT_RWX_MASK,
+ VMX_EPT_MISCONFIG_WX_VALUE);
}
#define VMX_XSS_EXIT_BITMAP 0
@@ -6220,17 +6228,6 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
- if (is_guest_mode(vcpu)
- && !(exit_qualification & EPT_VIOLATION_GVA_TRANSLATED)) {
- /*
- * Fix up exit_qualification according to whether guest
- * page table accesses are reads or writes.
- */
- u64 eptp = nested_ept_get_cr3(vcpu);
- if (!(eptp & VMX_EPT_AD_ENABLE_BIT))
- exit_qualification &= ~EPT_VIOLATION_ACC_WRITE;
- }
-
/*
* EPT violation happened while executing iret from NMI,
* "blocked by NMI" bit has to be set before next VM entry.
@@ -6453,7 +6450,7 @@ void vmx_enable_tdp(void)
enable_ept_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull,
0ull, VMX_EPT_EXECUTABLE_MASK,
cpu_has_vmx_ept_execute_only() ? 0ull : VMX_EPT_READABLE_MASK,
- enable_ept_ad_bits ? 0ull : VMX_EPT_RWX_MASK);
+ VMX_EPT_RWX_MASK);
ept_set_mmio_spte_mask();
kvm_enable_tdp();
@@ -6557,7 +6554,6 @@ static __init int hardware_setup(void)
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
- vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
memcpy(vmx_msr_bitmap_legacy_x2apic_apicv,
vmx_msr_bitmap_legacy, PAGE_SIZE);
@@ -7661,7 +7657,10 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
unsigned long type, types;
gva_t gva;
struct x86_exception e;
- int vpid;
+ struct {
+ u64 vpid;
+ u64 gla;
+ } operand;
if (!(vmx->nested.nested_vmx_secondary_ctls_high &
SECONDARY_EXEC_ENABLE_VPID) ||
@@ -7691,17 +7690,28 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
vmx_instruction_info, false, &gva))
return 1;
- if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vpid,
- sizeof(u32), &e)) {
+ if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
+ sizeof(operand), &e)) {
kvm_inject_page_fault(vcpu, &e);
return 1;
}
+ if (operand.vpid >> 16) {
+ nested_vmx_failValid(vcpu,
+ VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
+ return kvm_skip_emulated_instruction(vcpu);
+ }
switch (type) {
case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
+ if (is_noncanonical_address(operand.gla)) {
+ nested_vmx_failValid(vcpu,
+ VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
+ return kvm_skip_emulated_instruction(vcpu);
+ }
+ /* fall through */
case VMX_VPID_EXTENT_SINGLE_CONTEXT:
case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
- if (!vpid) {
+ if (!operand.vpid) {
nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
return kvm_skip_emulated_instruction(vcpu);
@@ -9394,6 +9404,11 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
vmcs12->guest_physical_address = fault->address;
}
+static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
+{
+ return nested_ept_get_cr3(vcpu) & VMX_EPT_AD_ENABLE_BIT;
+}
+
/* Callbacks for nested_ept_init_mmu_context: */
static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
@@ -9404,18 +9419,18 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
{
- u64 eptp;
+ bool wants_ad;
WARN_ON(mmu_is_nested(vcpu));
- eptp = nested_ept_get_cr3(vcpu);
- if ((eptp & VMX_EPT_AD_ENABLE_BIT) && !enable_ept_ad_bits)
+ wants_ad = nested_ept_ad_enabled(vcpu);
+ if (wants_ad && !enable_ept_ad_bits)
return 1;
kvm_mmu_unload(vcpu);
kvm_init_shadow_ept_mmu(vcpu,
to_vmx(vcpu)->nested.nested_vmx_ept_caps &
VMX_EPT_EXECUTE_ONLY_BIT,
- eptp & VMX_EPT_AD_ENABLE_BIT);
+ wants_ad);
vcpu->arch.mmu.set_cr3 = vmx_set_cr3;
vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3;
vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
@@ -10728,8 +10743,7 @@ static void sync_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
}
- if (nested_cpu_has_ept(vmcs12))
- vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
+ vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
if (nested_cpu_has_vid(vmcs12))
vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
@@ -10754,8 +10768,6 @@ static void sync_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
if (kvm_mpx_supported())
vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
- if (nested_cpu_has_xsaves(vmcs12))
- vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP);
}
/*
@@ -11152,7 +11164,8 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
vmx->hv_deadline_tsc = tscl + delta_tsc;
vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
PIN_BASED_VMX_PREEMPTION_TIMER);
- return 0;
+
+ return delta_tsc == 0;
}
static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0e846f0cb83b..6c7266f7766d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2841,10 +2841,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_vcpu_write_tsc_offset(vcpu, offset);
vcpu->arch.tsc_catchup = 1;
}
- if (kvm_lapic_hv_timer_in_use(vcpu) &&
- kvm_x86_ops->set_hv_timer(vcpu,
- kvm_get_lapic_target_expiration_tsc(vcpu)))
- kvm_lapic_switch_to_sw_timer(vcpu);
+
+ if (kvm_lapic_hv_timer_in_use(vcpu))
+ kvm_lapic_restart_hv_timer(vcpu);
+
/*
* On a host with synchronized TSC, there is no need to update
* kvmclock on vcpu->cpu migration
@@ -6011,7 +6011,7 @@ static void kvm_set_mmio_spte_mask(void)
mask &= ~1ull;
#endif
- kvm_mmu_set_mmio_spte_mask(mask);
+ kvm_mmu_set_mmio_spte_mask(mask, mask);
}
#ifdef CONFIG_X86_64
@@ -6733,7 +6733,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
bool req_immediate_exit = false;
- if (vcpu->requests) {
+ if (kvm_request_pending(vcpu)) {
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
kvm_mmu_unload(vcpu);
if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
@@ -6897,7 +6897,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_x86_ops->sync_pir_to_irr(vcpu);
}
- if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
+ if (vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu)
|| need_resched() || signal_pending(current)) {
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 3b7c40a2e3e1..75d3776123cc 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -7,6 +7,7 @@
*/
#include <linux/export.h>
#include <linux/uaccess.h>
+#include <linux/highmem.h>
/*
* Zero Userspace
@@ -73,3 +74,136 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
clac();
return len;
}
+
+#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
+/**
+ * clean_cache_range - write back a cache range with CLWB
+ * @vaddr: virtual start address
+ * @size: number of bytes to write back
+ *
+ * Write back a cache range using the CLWB (cache line write back)
+ * instruction. Note that @size is internally rounded up to be cache
+ * line size aligned.
+ */
+static void clean_cache_range(void *addr, size_t size)
+{
+ u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
+ unsigned long clflush_mask = x86_clflush_size - 1;
+ void *vend = addr + size;
+ void *p;
+
+ for (p = (void *)((unsigned long)addr & ~clflush_mask);
+ p < vend; p += x86_clflush_size)
+ clwb(p);
+}
+
+void arch_wb_cache_pmem(void *addr, size_t size)
+{
+ clean_cache_range(addr, size);
+}
+EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
+
+long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
+{
+ unsigned long flushed, dest = (unsigned long) dst;
+ long rc = __copy_user_nocache(dst, src, size, 0);
+
+ /*
+ * __copy_user_nocache() uses non-temporal stores for the bulk
+ * of the transfer, but we need to manually flush if the
+ * transfer is unaligned. A cached memory copy is used when
+ * destination or size is not naturally aligned. That is:
+ * - Require 8-byte alignment when size is 8 bytes or larger.
+ * - Require 4-byte alignment when size is 4 bytes.
+ */
+ if (size < 8) {
+ if (!IS_ALIGNED(dest, 4) || size != 4)
+ clean_cache_range(dst, 1);
+ } else {
+ if (!IS_ALIGNED(dest, 8)) {
+ dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
+ clean_cache_range(dst, 1);
+ }
+
+ flushed = dest - (unsigned long) dst;
+ if (size > flushed && !IS_ALIGNED(size - flushed, 8))
+ clean_cache_range(dst + size - 1, 1);
+ }
+
+ return rc;
+}
+
+void memcpy_flushcache(void *_dst, const void *_src, size_t size)
+{
+ unsigned long dest = (unsigned long) _dst;
+ unsigned long source = (unsigned long) _src;
+
+ /* cache copy and flush to align dest */
+ if (!IS_ALIGNED(dest, 8)) {
+ unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest);
+
+ memcpy((void *) dest, (void *) source, len);
+ clean_cache_range((void *) dest, len);
+ dest += len;
+ source += len;
+ size -= len;
+ if (!size)
+ return;
+ }
+
+ /* 4x8 movnti loop */
+ while (size >= 32) {
+ asm("movq (%0), %%r8\n"
+ "movq 8(%0), %%r9\n"
+ "movq 16(%0), %%r10\n"
+ "movq 24(%0), %%r11\n"
+ "movnti %%r8, (%1)\n"
+ "movnti %%r9, 8(%1)\n"
+ "movnti %%r10, 16(%1)\n"
+ "movnti %%r11, 24(%1)\n"
+ :: "r" (source), "r" (dest)
+ : "memory", "r8", "r9", "r10", "r11");
+ dest += 32;
+ source += 32;
+ size -= 32;
+ }
+
+ /* 1x8 movnti loop */
+ while (size >= 8) {
+ asm("movq (%0), %%r8\n"
+ "movnti %%r8, (%1)\n"
+ :: "r" (source), "r" (dest)
+ : "memory", "r8");
+ dest += 8;
+ source += 8;
+ size -= 8;
+ }
+
+ /* 1x4 movnti loop */
+ while (size >= 4) {
+ asm("movl (%0), %%r8d\n"
+ "movnti %%r8d, (%1)\n"
+ :: "r" (source), "r" (dest)
+ : "memory", "r8");
+ dest += 4;
+ source += 4;
+ size -= 4;
+ }
+
+ /* cache copy for remaining bytes */
+ if (size) {
+ memcpy((void *) dest, (void *) source, size);
+ clean_cache_range((void *) dest, size);
+ }
+}
+EXPORT_SYMBOL_GPL(memcpy_flushcache);
+
+void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
+ size_t len)
+{
+ char *from = kmap_atomic(page);
+
+ memcpy_flushcache(to, from + offset, len);
+ kunmap_atomic(from);
+}
+#endif
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index adad702b39cd..2824607df108 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -33,7 +33,7 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
if (!vma || !is_vm_hugetlb_page(vma))
return ERR_PTR(-EINVAL);
- pte = huge_pte_offset(mm, address);
+ pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
/* hugetlb should be locked, and hence, prefaulted */
WARN_ON(!pte || pte_none(*pte));
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 99fb83819a5f..8a64a6f2848d 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -823,15 +823,12 @@ void __init mem_init(void)
}
#ifdef CONFIG_MEMORY_HOTPLUG
-int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
+int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
{
- struct pglist_data *pgdata = NODE_DATA(nid);
- struct zone *zone = pgdata->node_zones +
- zone_for_memory(nid, start, size, ZONE_HIGHMEM, for_device);
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
- return __add_pages(nid, zone, start_pfn, nr_pages);
+ return __add_pages(nid, start_pfn, nr_pages, want_memblock);
}
#ifdef CONFIG_MEMORY_HOTREMOVE
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index dae6a5e5ad4a..136422d7d539 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -772,22 +772,15 @@ static void update_end_of_memory_vars(u64 start, u64 size)
}
}
-/*
- * Memory is added always to NORMAL zone. This means you will never get
- * additional DMA/DMA32 memory.
- */
-int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
+int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock)
{
- struct pglist_data *pgdat = NODE_DATA(nid);
- struct zone *zone = pgdat->node_zones +
- zone_for_memory(nid, start, size, ZONE_NORMAL, for_device);
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;
init_memory_mapping(start, start + size);
- ret = __add_pages(nid, zone, start_pfn, nr_pages);
+ ret = __add_pages(nid, start_pfn, nr_pages, want_memblock);
WARN_ON_ONCE(ret);
/* update max_pfn, max_low_pfn and high_memory */
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index c8520b2c62d2..757b0bcdf712 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -150,6 +150,12 @@ void clflush_cache_range(void *vaddr, unsigned int size)
}
EXPORT_SYMBOL_GPL(clflush_cache_range);
+void arch_invalidate_pmem(void *addr, size_t size)
+{
+ clflush_cache_range(addr, size);
+}
+EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
+
static void __cpa_flush_all(void *arg)
{
unsigned long cache = (unsigned long)arg;
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index cfd1a89fd04e..dbe2132b0ed4 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -24,7 +24,6 @@ unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 |
unsigned int pci_early_dump_regs;
static int pci_bf_sort;
-static int smbios_type_b1_flag;
int pci_routeirq;
int noioapicquirk;
#ifdef CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS
@@ -197,34 +196,18 @@ static int __init set_bf_sort(const struct dmi_system_id *d)
static void __init read_dmi_type_b1(const struct dmi_header *dm,
void *private_data)
{
- u8 *d = (u8 *)dm + 4;
+ u8 *data = (u8 *)dm + 4;
if (dm->type != 0xB1)
return;
- switch (((*(u32 *)d) >> 9) & 0x03) {
- case 0x00:
- printk(KERN_INFO "dmi type 0xB1 record - unknown flag\n");
- break;
- case 0x01: /* set pci=bfsort */
- smbios_type_b1_flag = 1;
- break;
- case 0x02: /* do not set pci=bfsort */
- smbios_type_b1_flag = 2;
- break;
- default:
- break;
- }
+ if ((((*(u32 *)data) >> 9) & 0x03) == 0x01)
+ set_bf_sort((const struct dmi_system_id *)private_data);
}
static int __init find_sort_method(const struct dmi_system_id *d)
{
- dmi_walk(read_dmi_type_b1, NULL);
-
- if (smbios_type_b1_flag == 1) {
- set_bf_sort(d);
- return 0;
- }
- return -1;
+ dmi_walk(read_dmi_type_b1, (void *)d);
+ return 0;
}
/*
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 6d52b94f4bb9..11e407489db0 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -571,3 +571,50 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar);
+
+/*
+ * Device [1022:7808]
+ * 23. USB Wake on Connect/Disconnect with Low Speed Devices
+ * https://support.amd.com/TechDocs/46837.pdf
+ * Appendix A2
+ * https://support.amd.com/TechDocs/42413.pdf
+ */
+static void pci_fixup_amd_ehci_pme(struct pci_dev *dev)
+{
+ dev_info(&dev->dev, "PME# does not work under D3, disabling it\n");
+ dev->pme_support &= ~((PCI_PM_CAP_PME_D3 | PCI_PM_CAP_PME_D3cold)
+ >> PCI_PM_CAP_PME_SHIFT);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7808, pci_fixup_amd_ehci_pme);
+
+/*
+ * Apple MacBook Pro: Avoid [mem 0x7fa00000-0x7fbfffff]
+ *
+ * Using the [mem 0x7fa00000-0x7fbfffff] region, e.g., by assigning it to
+ * the 00:1c.0 Root Port, causes a conflict with [io 0x1804], which is used
+ * for soft poweroff and suspend-to-RAM.
+ *
+ * As far as we know, this is related to the address space, not to the Root
+ * Port itself. Attaching the quirk to the Root Port is a convenience, but
+ * it could probably also be a standalone DMI quirk.
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=103211
+ */
+static void quirk_apple_mbp_poweroff(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+
+ if ((!dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,4") &&
+ !dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,5")) ||
+ pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x1c, 0))
+ return;
+
+ res = request_mem_region(0x7fa00000, 0x200000,
+ "MacBook Pro poweroff workaround");
+ if (res)
+ dev_info(dev, "claimed %s %pR\n", res->name, res);
+ else
+ dev_info(dev, "can't work around MacBook Pro poweroff issue\n");
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff);
diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
index c1bdb9edcae7..76595408ff53 100644
--- a/arch/x86/pci/pcbios.c
+++ b/arch/x86/pci/pcbios.c
@@ -46,7 +46,7 @@ static inline void set_bios_x(void)
pcibios_enabled = 1;
set_memory_x(PAGE_OFFSET + BIOS_BEGIN, (BIOS_END - BIOS_BEGIN) >> PAGE_SHIFT);
if (__supported_pte_mask & _PAGE_NX)
- printk(KERN_INFO "PCI : PCI BIOS area is rw and x. Use pci=nobios if you want it NX.\n");
+ printk(KERN_INFO "PCI: PCI BIOS area is rw and x. Use pci=nobios if you want it NX.\n");
}
/*
diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c
index ec008e800b45..53d600217973 100644
--- a/arch/x86/pci/sta2x11-fixup.c
+++ b/arch/x86/pci/sta2x11-fixup.c
@@ -26,6 +26,7 @@
#include <linux/pci_ids.h>
#include <linux/export.h>
#include <linux/list.h>
+#include <asm/iommu.h>
#define STA2X11_SWIOTLB_SIZE (4*1024*1024)
extern int swiotlb_late_init_with_default_size(size_t default_size);
@@ -191,7 +192,7 @@ static const struct dma_map_ops sta2x11_dma_ops = {
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
.sync_sg_for_device = swiotlb_sync_sg_for_device,
.mapping_error = swiotlb_dma_mapping_error,
- .dma_supported = NULL, /* FIXME: we should use this instead! */
+ .dma_supported = x86_dma_supported,
};
/* At setup time, we use our own ops if the device is a ConneXt one */
diff --git a/arch/x86/um/vdso/Makefile b/arch/x86/um/vdso/Makefile
index d72dec406ccb..329406224330 100644
--- a/arch/x86/um/vdso/Makefile
+++ b/arch/x86/um/vdso/Makefile
@@ -53,7 +53,7 @@ CFLAGS_REMOVE_vdso-note.o = -pg -fprofile-arcs -ftest-coverage
CFLAGS_REMOVE_um_vdso.o = -pg -fprofile-arcs -ftest-coverage
targets += vdso-syms.lds
-obj-$(VDSO64-y) += vdso-syms.lds
+extra-$(VDSO64-y) += vdso-syms.lds
#
# Match symbols in the DSO that look like VDSO*; produce a file of constants.
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index a5ffcbb20cc0..0e7ef69e8531 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -106,15 +106,83 @@ int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int),
return rc >= 0 ? 0 : rc;
}
-static void clamp_max_cpus(void)
+static int xen_vcpu_setup_restore(int cpu)
{
-#ifdef CONFIG_SMP
- if (setup_max_cpus > MAX_VIRT_CPUS)
- setup_max_cpus = MAX_VIRT_CPUS;
-#endif
+ int rc = 0;
+
+ /* Any per_cpu(xen_vcpu) is stale, so reset it */
+ xen_vcpu_info_reset(cpu);
+
+ /*
+ * For PVH and PVHVM, setup online VCPUs only. The rest will
+ * be handled by hotplug.
+ */
+ if (xen_pv_domain() ||
+ (xen_hvm_domain() && cpu_online(cpu))) {
+ rc = xen_vcpu_setup(cpu);
+ }
+
+ return rc;
+}
+
+/*
+ * On restore, set the vcpu placement up again.
+ * If it fails, then we're in a bad state, since
+ * we can't back out from using it...
+ */
+void xen_vcpu_restore(void)
+{
+ int cpu, rc;
+
+ for_each_possible_cpu(cpu) {
+ bool other_cpu = (cpu != smp_processor_id());
+ bool is_up;
+
+ if (xen_vcpu_nr(cpu) == XEN_VCPU_ID_INVALID)
+ continue;
+
+ /* Only Xen 4.5 and higher support this. */
+ is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up,
+ xen_vcpu_nr(cpu), NULL) > 0;
+
+ if (other_cpu && is_up &&
+ HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL))
+ BUG();
+
+ if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock))
+ xen_setup_runstate_info(cpu);
+
+ rc = xen_vcpu_setup_restore(cpu);
+ if (rc)
+ pr_emerg_once("vcpu restore failed for cpu=%d err=%d. "
+ "System will hang.\n", cpu, rc);
+ /*
+ * In case xen_vcpu_setup_restore() fails, do not bring up the
+ * VCPU. This helps us avoid the resulting OOPS when the VCPU
+ * accesses pvclock_vcpu_time via xen_vcpu (which is NULL.)
+ * Note that this does not improve the situation much -- now the
+ * VM hangs instead of OOPSing -- with the VCPUs that did not
+ * fail, spinning in stop_machine(), waiting for the failed
+ * VCPUs to come up.
+ */
+ if (other_cpu && is_up && (rc == 0) &&
+ HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL))
+ BUG();
+ }
}
-void xen_vcpu_setup(int cpu)
+void xen_vcpu_info_reset(int cpu)
+{
+ if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS) {
+ per_cpu(xen_vcpu, cpu) =
+ &HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)];
+ } else {
+ /* Set to NULL so that if somebody accesses it we get an OOPS */
+ per_cpu(xen_vcpu, cpu) = NULL;
+ }
+}
+
+int xen_vcpu_setup(int cpu)
{
struct vcpu_register_vcpu_info info;
int err;
@@ -123,11 +191,11 @@ void xen_vcpu_setup(int cpu)
BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
/*
- * This path is called twice on PVHVM - first during bootup via
- * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being
- * hotplugged: cpu_up -> xen_hvm_cpu_notify.
- * As we can only do the VCPUOP_register_vcpu_info once lets
- * not over-write its result.
+ * This path is called on PVHVM at bootup (xen_hvm_smp_prepare_boot_cpu)
+ * and at restore (xen_vcpu_restore). Also called for hotplugged
+ * VCPUs (cpu_init -> xen_hvm_cpu_prepare_hvm).
+ * However, the hypercall can only be done once (see below) so if a VCPU
+ * is offlined and comes back online then let's not redo the hypercall.
*
* For PV it is called during restore (xen_vcpu_restore) and bootup
* (xen_setup_vcpu_info_placement). The hotplug mechanism does not
@@ -135,42 +203,44 @@ void xen_vcpu_setup(int cpu)
*/
if (xen_hvm_domain()) {
if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
- return;
+ return 0;
}
- if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS)
- per_cpu(xen_vcpu, cpu) =
- &HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)];
- if (!xen_have_vcpu_info_placement) {
- if (cpu >= MAX_VIRT_CPUS)
- clamp_max_cpus();
- return;
+ if (xen_have_vcpu_info_placement) {
+ vcpup = &per_cpu(xen_vcpu_info, cpu);
+ info.mfn = arbitrary_virt_to_mfn(vcpup);
+ info.offset = offset_in_page(vcpup);
+
+ /*
+ * Check to see if the hypervisor will put the vcpu_info
+ * structure where we want it, which allows direct access via
+ * a percpu-variable.
+ * N.B. This hypercall can _only_ be called once per CPU.
+ * Subsequent calls will error out with -EINVAL. This is due to
+ * the fact that hypervisor has no unregister variant and this
+ * hypercall does not allow to over-write info.mfn and
+ * info.offset.
+ */
+ err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info,
+ xen_vcpu_nr(cpu), &info);
+
+ if (err) {
+ pr_warn_once("register_vcpu_info failed: cpu=%d err=%d\n",
+ cpu, err);
+ xen_have_vcpu_info_placement = 0;
+ } else {
+ /*
+ * This cpu is using the registered vcpu info, even if
+ * later ones fail to.
+ */
+ per_cpu(xen_vcpu, cpu) = vcpup;
+ }
}
- vcpup = &per_cpu(xen_vcpu_info, cpu);
- info.mfn = arbitrary_virt_to_mfn(vcpup);
- info.offset = offset_in_page(vcpup);
-
- /* Check to see if the hypervisor will put the vcpu_info
- structure where we want it, which allows direct access via
- a percpu-variable.
- N.B. This hypercall can _only_ be called once per CPU. Subsequent
- calls will error out with -EINVAL. This is due to the fact that
- hypervisor has no unregister variant and this hypercall does not
- allow to over-write info.mfn and info.offset.
- */
- err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, xen_vcpu_nr(cpu),
- &info);
+ if (!xen_have_vcpu_info_placement)
+ xen_vcpu_info_reset(cpu);
- if (err) {
- printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
- xen_have_vcpu_info_placement = 0;
- clamp_max_cpus();
- } else {
- /* This cpu is using the registered vcpu info, even if
- later ones fail to. */
- per_cpu(xen_vcpu, cpu) = vcpup;
- }
+ return ((per_cpu(xen_vcpu, cpu) == NULL) ? -ENODEV : 0);
}
void xen_reboot(int reason)
diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
index a6d014f47e52..87d791356ea9 100644
--- a/arch/x86/xen/enlighten_hvm.c
+++ b/arch/x86/xen/enlighten_hvm.c
@@ -1,5 +1,6 @@
#include <linux/cpu.h>
#include <linux/kexec.h>
+#include <linux/memblock.h>
#include <xen/features.h>
#include <xen/events.h>
@@ -10,9 +11,11 @@
#include <asm/reboot.h>
#include <asm/setup.h>
#include <asm/hypervisor.h>
+#include <asm/e820/api.h>
#include <asm/xen/cpuid.h>
#include <asm/xen/hypervisor.h>
+#include <asm/xen/page.h>
#include "xen-ops.h"
#include "mmu.h"
@@ -20,37 +23,34 @@
void __ref xen_hvm_init_shared_info(void)
{
- int cpu;
struct xen_add_to_physmap xatp;
- static struct shared_info *shared_info_page;
+ u64 pa;
+
+ if (HYPERVISOR_shared_info == &xen_dummy_shared_info) {
+ /*
+ * Search for a free page starting at 4kB physical address.
+ * Low memory is preferred to avoid an EPT large page split up
+ * by the mapping.
+ * Starting below X86_RESERVE_LOW (usually 64kB) is fine as
+ * the BIOS used for HVM guests is well behaved and won't
+ * clobber memory other than the first 4kB.
+ */
+ for (pa = PAGE_SIZE;
+ !e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||
+ memblock_is_reserved(pa);
+ pa += PAGE_SIZE)
+ ;
+
+ memblock_reserve(pa, PAGE_SIZE);
+ HYPERVISOR_shared_info = __va(pa);
+ }
- if (!shared_info_page)
- shared_info_page = (struct shared_info *)
- extend_brk(PAGE_SIZE, PAGE_SIZE);
xatp.domid = DOMID_SELF;
xatp.idx = 0;
xatp.space = XENMAPSPACE_shared_info;
- xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
+ xatp.gpfn = virt_to_pfn(HYPERVISOR_shared_info);
if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
BUG();
-
- HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
-
- /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
- * page, we use it in the event channel upcall and in some pvclock
- * related functions. We don't need the vcpu_info placement
- * optimizations because we don't use any pv_mmu or pv_irq op on
- * HVM.
- * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is
- * online but xen_hvm_init_shared_info is run at resume time too and
- * in that case multiple vcpus might be online. */
- for_each_online_cpu(cpu) {
- /* Leave it to be NULL. */
- if (xen_vcpu_nr(cpu) >= MAX_VIRT_CPUS)
- continue;
- per_cpu(xen_vcpu, cpu) =
- &HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)];
- }
}
static void __init init_hvm_pv_info(void)
@@ -106,7 +106,7 @@ static void xen_hvm_crash_shutdown(struct pt_regs *regs)
static int xen_cpu_up_prepare_hvm(unsigned int cpu)
{
- int rc;
+ int rc = 0;
/*
* This can happen if CPU was offlined earlier and
@@ -121,7 +121,9 @@ static int xen_cpu_up_prepare_hvm(unsigned int cpu)
per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
else
per_cpu(xen_vcpu_id, cpu) = cpu;
- xen_vcpu_setup(cpu);
+ rc = xen_vcpu_setup(cpu);
+ if (rc)
+ return rc;
if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock))
xen_setup_timer(cpu);
@@ -130,9 +132,8 @@ static int xen_cpu_up_prepare_hvm(unsigned int cpu)
if (rc) {
WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
cpu, rc);
- return rc;
}
- return 0;
+ return rc;
}
static int xen_cpu_dead_hvm(unsigned int cpu)
@@ -154,6 +155,13 @@ static void __init xen_hvm_guest_init(void)
xen_hvm_init_shared_info();
+ /*
+ * xen_vcpu is a pointer to the vcpu_info struct in the shared_info
+ * page, we use it in the event channel upcall and in some pvclock
+ * related functions.
+ */
+ xen_vcpu_info_reset(0);
+
xen_panic_handler_init();
if (xen_feature(XENFEAT_hvm_callback_vector))
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index f33eef4ebd12..811e4ddb3f37 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -89,8 +89,6 @@
void *xen_initial_gdt;
-RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
-
static int xen_cpu_up_prepare_pv(unsigned int cpu);
static int xen_cpu_dead_pv(unsigned int cpu);
@@ -107,35 +105,6 @@ struct tls_descs {
*/
static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc);
-/*
- * On restore, set the vcpu placement up again.
- * If it fails, then we're in a bad state, since
- * we can't back out from using it...
- */
-void xen_vcpu_restore(void)
-{
- int cpu;
-
- for_each_possible_cpu(cpu) {
- bool other_cpu = (cpu != smp_processor_id());
- bool is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up, xen_vcpu_nr(cpu),
- NULL);
-
- if (other_cpu && is_up &&
- HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL))
- BUG();
-
- xen_setup_runstate_info(cpu);
-
- if (xen_have_vcpu_info_placement)
- xen_vcpu_setup(cpu);
-
- if (other_cpu && is_up &&
- HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL))
- BUG();
- }
-}
-
static void __init xen_banner(void)
{
unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
@@ -960,30 +929,43 @@ void xen_setup_shared_info(void)
HYPERVISOR_shared_info =
(struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
-#ifndef CONFIG_SMP
- /* In UP this is as good a place as any to set up shared info */
- xen_setup_vcpu_info_placement();
-#endif
-
xen_setup_mfn_list_list();
- /*
- * Now that shared info is set up we can start using routines that
- * point to pvclock area.
- */
- if (system_state == SYSTEM_BOOTING)
+ if (system_state == SYSTEM_BOOTING) {
+#ifndef CONFIG_SMP
+ /*
+ * In UP this is as good a place as any to set up shared info.
+ * Limit this to boot only, at restore vcpu setup is done via
+ * xen_vcpu_restore().
+ */
+ xen_setup_vcpu_info_placement();
+#endif
+ /*
+ * Now that shared info is set up we can start using routines
+ * that point to pvclock area.
+ */
xen_init_time_ops();
+ }
}
/* This is called once we have the cpu_possible_mask */
-void xen_setup_vcpu_info_placement(void)
+void __ref xen_setup_vcpu_info_placement(void)
{
int cpu;
for_each_possible_cpu(cpu) {
/* Set up direct vCPU id mapping for PV guests. */
per_cpu(xen_vcpu_id, cpu) = cpu;
- xen_vcpu_setup(cpu);
+
+ /*
+ * xen_vcpu_setup(cpu) can fail -- in which case it
+ * falls back to the shared_info version for cpus
+ * where xen_vcpu_nr(cpu) < MAX_VIRT_CPUS.
+ *
+ * xen_cpu_up_prepare_pv() handles the rest by failing
+ * them in hotplug.
+ */
+ (void) xen_vcpu_setup(cpu);
}
/*
@@ -1332,9 +1314,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
*/
acpi_numa = -1;
#endif
- /* Don't do the full vcpu_info placement stuff until we have a
- possible map and a non-dummy shared_info. */
- per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
+ /* Let's presume PV guests always boot on vCPU with id 0. */
+ per_cpu(xen_vcpu_id, 0) = 0;
+
+ /*
+ * Setup xen_vcpu early because start_kernel needs it for
+ * local_irq_disable(), irqs_disabled().
+ *
+ * Don't do the full vcpu_info placement stuff until we have
+ * the cpu_possible_mask and a non-dummy shared_info.
+ */
+ xen_vcpu_info_reset(0);
WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv));
@@ -1431,9 +1421,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
#endif
xen_raw_console_write("about to get started...\n");
- /* Let's presume PV guests always boot on vCPU with id 0. */
- per_cpu(xen_vcpu_id, 0) = 0;
-
+ /* We need this for printk timestamps */
xen_setup_runstate_info(0);
xen_efi_init();
@@ -1451,6 +1439,9 @@ static int xen_cpu_up_prepare_pv(unsigned int cpu)
{
int rc;
+ if (per_cpu(xen_vcpu, cpu) == NULL)
+ return -ENODEV;
+
xen_setup_timer(cpu);
rc = xen_smp_intr_init(cpu);
diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c
index 42b08f8fc2ca..37c6056a7bba 100644
--- a/arch/x86/xen/pci-swiotlb-xen.c
+++ b/arch/x86/xen/pci-swiotlb-xen.c
@@ -18,20 +18,6 @@
int xen_swiotlb __read_mostly;
-static const struct dma_map_ops xen_swiotlb_dma_ops = {
- .alloc = xen_swiotlb_alloc_coherent,
- .free = xen_swiotlb_free_coherent,
- .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
- .sync_single_for_device = xen_swiotlb_sync_single_for_device,
- .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
- .map_sg = xen_swiotlb_map_sg_attrs,
- .unmap_sg = xen_swiotlb_unmap_sg_attrs,
- .map_page = xen_swiotlb_map_page,
- .unmap_page = xen_swiotlb_unmap_page,
- .dma_supported = xen_swiotlb_dma_supported,
-};
-
/*
* pci_xen_swiotlb_detect - set xen_swiotlb to 1 if necessary
*
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index a5bf7c451435..c81046323ebc 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -499,7 +499,7 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
void __init xen_remap_memory(void)
{
unsigned long buf = (unsigned long)&xen_remap_buf;
- unsigned long mfn_save, mfn, pfn;
+ unsigned long mfn_save, pfn;
unsigned long remapped = 0;
unsigned int i;
unsigned long pfn_s = ~0UL;
@@ -515,8 +515,7 @@ void __init xen_remap_memory(void)
pfn = xen_remap_buf.target_pfn;
for (i = 0; i < xen_remap_buf.size; i++) {
- mfn = xen_remap_buf.mfns[i];
- xen_update_mem_tables(pfn, mfn);
+ xen_update_mem_tables(pfn, xen_remap_buf.mfns[i]);
remapped++;
pfn++;
}
@@ -530,8 +529,6 @@ void __init xen_remap_memory(void)
pfn_s = xen_remap_buf.target_pfn;
len = xen_remap_buf.size;
}
-
- mfn = xen_remap_mfn;
xen_remap_mfn = xen_remap_buf.next_area_mfn;
}
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 82ac611f2fc1..e7f02eb73727 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -1,4 +1,5 @@
#include <linux/smp.h>
+#include <linux/cpu.h>
#include <linux/slab.h>
#include <linux/cpumask.h>
#include <linux/percpu.h>
@@ -114,6 +115,36 @@ int xen_smp_intr_init(unsigned int cpu)
return rc;
}
+void __init xen_smp_cpus_done(unsigned int max_cpus)
+{
+ int cpu, rc, count = 0;
+
+ if (xen_hvm_domain())
+ native_smp_cpus_done(max_cpus);
+
+ if (xen_have_vcpu_info_placement)
+ return;
+
+ for_each_online_cpu(cpu) {
+ if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS)
+ continue;
+
+ rc = cpu_down(cpu);
+
+ if (rc == 0) {
+ /*
+ * Reset vcpu_info so this cpu cannot be onlined again.
+ */
+ xen_vcpu_info_reset(cpu);
+ count++;
+ } else {
+ pr_warn("%s: failed to bring CPU %d down, error %d\n",
+ __func__, cpu, rc);
+ }
+ }
+ WARN(count, "%s: brought %d CPUs offline\n", __func__, count);
+}
+
void xen_smp_send_reschedule(int cpu)
{
xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h
index 8ebb6acca64a..87d3c76cba37 100644
--- a/arch/x86/xen/smp.h
+++ b/arch/x86/xen/smp.h
@@ -14,6 +14,8 @@ extern void xen_smp_intr_free(unsigned int cpu);
int xen_smp_intr_init_pv(unsigned int cpu);
void xen_smp_intr_free_pv(unsigned int cpu);
+void xen_smp_cpus_done(unsigned int max_cpus);
+
void xen_smp_send_reschedule(int cpu);
void xen_smp_send_call_function_ipi(const struct cpumask *mask);
void xen_smp_send_call_function_single_ipi(int cpu);
diff --git a/arch/x86/xen/smp_hvm.c b/arch/x86/xen/smp_hvm.c
index f18561bbf5c9..fd60abedf658 100644
--- a/arch/x86/xen/smp_hvm.c
+++ b/arch/x86/xen/smp_hvm.c
@@ -12,7 +12,8 @@ static void __init xen_hvm_smp_prepare_boot_cpu(void)
native_smp_prepare_boot_cpu();
/*
- * Setup vcpu_info for boot CPU.
+ * Setup vcpu_info for boot CPU. Secondary CPUs get their vcpu_info
+ * in xen_cpu_up_prepare_hvm().
*/
xen_vcpu_setup(0);
@@ -27,10 +28,20 @@ static void __init xen_hvm_smp_prepare_boot_cpu(void)
static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
{
+ int cpu;
+
native_smp_prepare_cpus(max_cpus);
WARN_ON(xen_smp_intr_init(0));
xen_init_lock_cpu(0);
+
+ for_each_possible_cpu(cpu) {
+ if (cpu == 0)
+ continue;
+
+ /* Set default vcpu_id to make sure that we don't use cpu-0's */
+ per_cpu(xen_vcpu_id, cpu) = XEN_VCPU_ID_INVALID;
+ }
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -60,4 +71,5 @@ void __init xen_hvm_smp_init(void)
smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
+ smp_ops.smp_cpus_done = xen_smp_cpus_done;
}
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index aae32535f4ec..1ea598e5f030 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -371,10 +371,6 @@ static int xen_pv_cpu_up(unsigned int cpu, struct task_struct *idle)
return 0;
}
-static void xen_pv_smp_cpus_done(unsigned int max_cpus)
-{
-}
-
#ifdef CONFIG_HOTPLUG_CPU
static int xen_pv_cpu_disable(void)
{
@@ -469,7 +465,7 @@ static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
static const struct smp_ops xen_smp_ops __initconst = {
.smp_prepare_boot_cpu = xen_pv_smp_prepare_boot_cpu,
.smp_prepare_cpus = xen_pv_smp_prepare_cpus,
- .smp_cpus_done = xen_pv_smp_cpus_done,
+ .smp_cpus_done = xen_smp_cpus_done,
.cpu_up = xen_pv_cpu_up,
.cpu_die = xen_pv_cpu_die,
diff --git a/arch/x86/xen/suspend_hvm.c b/arch/x86/xen/suspend_hvm.c
index 01afcadde50a..484999416d8b 100644
--- a/arch/x86/xen/suspend_hvm.c
+++ b/arch/x86/xen/suspend_hvm.c
@@ -8,15 +8,10 @@
void xen_hvm_post_suspend(int suspend_cancelled)
{
- int cpu;
-
- if (!suspend_cancelled)
+ if (!suspend_cancelled) {
xen_hvm_init_shared_info();
+ xen_vcpu_restore();
+ }
xen_callback_vector();
xen_unplug_emulated_devices();
- if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
- for_each_online_cpu(cpu) {
- xen_setup_runstate_info(cpu);
- }
- }
}
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 9a440a42c618..0d5004477db6 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -78,7 +78,8 @@ bool xen_vcpu_stolen(int vcpu);
extern int xen_have_vcpu_info_placement;
-void xen_vcpu_setup(int cpu);
+int xen_vcpu_setup(int cpu);
+void xen_vcpu_info_reset(int cpu);
void xen_setup_vcpu_info_placement(void);
#ifdef CONFIG_SMP
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index f4126cf997a4..7ad6d77b2f22 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -3,6 +3,7 @@ config ZONE_DMA
config XTENSA
def_bool y
+ select ARCH_NO_COHERENT_DMA_MMAP if !MMU
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h
index c6140fa8c0be..269738dc9d1d 100644
--- a/arch/xtensa/include/asm/dma-mapping.h
+++ b/arch/xtensa/include/asm/dma-mapping.h
@@ -16,8 +16,6 @@
#include <linux/mm.h>
#include <linux/scatterlist.h>
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-
extern const struct dma_map_ops xtensa_dma_map_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index 2e7bac0d4b2c..b8f152b6aaa5 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -278,19 +278,15 @@ clear_user(void *addr, unsigned long size)
extern long __strncpy_user(char *, const char *, long);
-#define __strncpy_from_user __strncpy_user
static inline long
strncpy_from_user(char *dst, const char *src, long count)
{
if (access_ok(VERIFY_READ, src, 1))
- return __strncpy_from_user(dst, src, count);
+ return __strncpy_user(dst, src, count);
return -EFAULT;
}
-
-#define strlen_user(str) strnlen_user((str), TASK_SIZE - 1)
-
/*
* Return the size of a string (including the ending 0!)
*/
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 04325b81c2b4..38554c2ea38a 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -4,7 +4,6 @@
#include <linux/cdrom.h>
#include <linux/compat.h>
#include <linux/elevator.h>
-#include <linux/fd.h>
#include <linux/hdreg.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
@@ -80,19 +79,16 @@ static int compat_hdio_getgeo(struct gendisk *disk, struct block_device *bdev,
static int compat_hdio_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- mm_segment_t old_fs = get_fs();
- unsigned long kval;
- unsigned int __user *uvp;
+ unsigned long *__user p;
int error;
- set_fs(KERNEL_DS);
+ p = compat_alloc_user_space(sizeof(unsigned long));
error = __blkdev_driver_ioctl(bdev, mode,
- cmd, (unsigned long)(&kval));
- set_fs(old_fs);
-
+ cmd, (unsigned long)p);
if (error == 0) {
- uvp = compat_ptr(arg);
- if (put_user(kval, uvp))
+ unsigned int __user *uvp = compat_ptr(arg);
+ unsigned long v;
+ if (get_user(v, p) || put_user(v, uvp))
error = -EFAULT;
}
return error;
@@ -209,318 +205,6 @@ static int compat_blkpg_ioctl(struct block_device *bdev, fmode_t mode,
#define BLKBSZSET_32 _IOW(0x12, 113, int)
#define BLKGETSIZE64_32 _IOR(0x12, 114, int)
-struct compat_floppy_drive_params {
- char cmos;
- compat_ulong_t max_dtr;
- compat_ulong_t hlt;
- compat_ulong_t hut;
- compat_ulong_t srt;
- compat_ulong_t spinup;
- compat_ulong_t spindown;
- unsigned char spindown_offset;
- unsigned char select_delay;
- unsigned char rps;
- unsigned char tracks;
- compat_ulong_t timeout;
- unsigned char interleave_sect;
- struct floppy_max_errors max_errors;
- char flags;
- char read_track;
- short autodetect[8];
- compat_int_t checkfreq;
- compat_int_t native_format;
-};
-
-struct compat_floppy_drive_struct {
- signed char flags;
- compat_ulong_t spinup_date;
- compat_ulong_t select_date;
- compat_ulong_t first_read_date;
- short probed_format;
- short track;
- short maxblock;
- short maxtrack;
- compat_int_t generation;
- compat_int_t keep_data;
- compat_int_t fd_ref;
- compat_int_t fd_device;
- compat_int_t last_checked;
- compat_caddr_t dmabuf;
- compat_int_t bufblocks;
-};
-
-struct compat_floppy_fdc_state {
- compat_int_t spec1;
- compat_int_t spec2;
- compat_int_t dtr;
- unsigned char version;
- unsigned char dor;
- compat_ulong_t address;
- unsigned int rawcmd:2;
- unsigned int reset:1;
- unsigned int need_configure:1;
- unsigned int perp_mode:2;
- unsigned int has_fifo:1;
- unsigned int driver_version;
- unsigned char track[4];
-};
-
-struct compat_floppy_write_errors {
- unsigned int write_errors;
- compat_ulong_t first_error_sector;
- compat_int_t first_error_generation;
- compat_ulong_t last_error_sector;
- compat_int_t last_error_generation;
- compat_uint_t badness;
-};
-
-#define FDSETPRM32 _IOW(2, 0x42, struct compat_floppy_struct)
-#define FDDEFPRM32 _IOW(2, 0x43, struct compat_floppy_struct)
-#define FDSETDRVPRM32 _IOW(2, 0x90, struct compat_floppy_drive_params)
-#define FDGETDRVPRM32 _IOR(2, 0x11, struct compat_floppy_drive_params)
-#define FDGETDRVSTAT32 _IOR(2, 0x12, struct compat_floppy_drive_struct)
-#define FDPOLLDRVSTAT32 _IOR(2, 0x13, struct compat_floppy_drive_struct)
-#define FDGETFDCSTAT32 _IOR(2, 0x15, struct compat_floppy_fdc_state)
-#define FDWERRORGET32 _IOR(2, 0x17, struct compat_floppy_write_errors)
-
-static struct {
- unsigned int cmd32;
- unsigned int cmd;
-} fd_ioctl_trans_table[] = {
- { FDSETPRM32, FDSETPRM },
- { FDDEFPRM32, FDDEFPRM },
- { FDGETPRM32, FDGETPRM },
- { FDSETDRVPRM32, FDSETDRVPRM },
- { FDGETDRVPRM32, FDGETDRVPRM },
- { FDGETDRVSTAT32, FDGETDRVSTAT },
- { FDPOLLDRVSTAT32, FDPOLLDRVSTAT },
- { FDGETFDCSTAT32, FDGETFDCSTAT },
- { FDWERRORGET32, FDWERRORGET }
-};
-
-#define NR_FD_IOCTL_TRANS ARRAY_SIZE(fd_ioctl_trans_table)
-
-static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- mm_segment_t old_fs = get_fs();
- void *karg = NULL;
- unsigned int kcmd = 0;
- int i, err;
-
- for (i = 0; i < NR_FD_IOCTL_TRANS; i++)
- if (cmd == fd_ioctl_trans_table[i].cmd32) {
- kcmd = fd_ioctl_trans_table[i].cmd;
- break;
- }
- if (!kcmd)
- return -EINVAL;
-
- switch (cmd) {
- case FDSETPRM32:
- case FDDEFPRM32:
- case FDGETPRM32:
- {
- compat_uptr_t name;
- struct compat_floppy_struct __user *uf;
- struct floppy_struct *f;
-
- uf = compat_ptr(arg);
- f = karg = kmalloc(sizeof(struct floppy_struct), GFP_KERNEL);
- if (!karg)
- return -ENOMEM;
- if (cmd == FDGETPRM32)
- break;
- err = __get_user(f->size, &uf->size);
- err |= __get_user(f->sect, &uf->sect);
- err |= __get_user(f->head, &uf->head);
- err |= __get_user(f->track, &uf->track);
- err |= __get_user(f->stretch, &uf->stretch);
- err |= __get_user(f->gap, &uf->gap);
- err |= __get_user(f->rate, &uf->rate);
- err |= __get_user(f->spec1, &uf->spec1);
- err |= __get_user(f->fmt_gap, &uf->fmt_gap);
- err |= __get_user(name, &uf->name);
- f->name = compat_ptr(name);
- if (err) {
- err = -EFAULT;
- goto out;
- }
- break;
- }
- case FDSETDRVPRM32:
- case FDGETDRVPRM32:
- {
- struct compat_floppy_drive_params __user *uf;
- struct floppy_drive_params *f;
-
- uf = compat_ptr(arg);
- f = karg = kmalloc(sizeof(struct floppy_drive_params), GFP_KERNEL);
- if (!karg)
- return -ENOMEM;
- if (cmd == FDGETDRVPRM32)
- break;
- err = __get_user(f->cmos, &uf->cmos);
- err |= __get_user(f->max_dtr, &uf->max_dtr);
- err |= __get_user(f->hlt, &uf->hlt);
- err |= __get_user(f->hut, &uf->hut);
- err |= __get_user(f->srt, &uf->srt);
- err |= __get_user(f->spinup, &uf->spinup);
- err |= __get_user(f->spindown, &uf->spindown);
- err |= __get_user(f->spindown_offset, &uf->spindown_offset);
- err |= __get_user(f->select_delay, &uf->select_delay);
- err |= __get_user(f->rps, &uf->rps);
- err |= __get_user(f->tracks, &uf->tracks);
- err |= __get_user(f->timeout, &uf->timeout);
- err |= __get_user(f->interleave_sect, &uf->interleave_sect);
- err |= __copy_from_user(&f->max_errors, &uf->max_errors, sizeof(f->max_errors));
- err |= __get_user(f->flags, &uf->flags);
- err |= __get_user(f->read_track, &uf->read_track);
- err |= __copy_from_user(f->autodetect, uf->autodetect, sizeof(f->autodetect));
- err |= __get_user(f->checkfreq, &uf->checkfreq);
- err |= __get_user(f->native_format, &uf->native_format);
- if (err) {
- err = -EFAULT;
- goto out;
- }
- break;
- }
- case FDGETDRVSTAT32:
- case FDPOLLDRVSTAT32:
- karg = kmalloc(sizeof(struct floppy_drive_struct), GFP_KERNEL);
- if (!karg)
- return -ENOMEM;
- break;
- case FDGETFDCSTAT32:
- karg = kmalloc(sizeof(struct floppy_fdc_state), GFP_KERNEL);
- if (!karg)
- return -ENOMEM;
- break;
- case FDWERRORGET32:
- karg = kmalloc(sizeof(struct floppy_write_errors), GFP_KERNEL);
- if (!karg)
- return -ENOMEM;
- break;
- default:
- return -EINVAL;
- }
- set_fs(KERNEL_DS);
- err = __blkdev_driver_ioctl(bdev, mode, kcmd, (unsigned long)karg);
- set_fs(old_fs);
- if (err)
- goto out;
- switch (cmd) {
- case FDGETPRM32:
- {
- struct floppy_struct *f = karg;
- struct compat_floppy_struct __user *uf = compat_ptr(arg);
-
- err = __put_user(f->size, &uf->size);
- err |= __put_user(f->sect, &uf->sect);
- err |= __put_user(f->head, &uf->head);
- err |= __put_user(f->track, &uf->track);
- err |= __put_user(f->stretch, &uf->stretch);
- err |= __put_user(f->gap, &uf->gap);
- err |= __put_user(f->rate, &uf->rate);
- err |= __put_user(f->spec1, &uf->spec1);
- err |= __put_user(f->fmt_gap, &uf->fmt_gap);
- err |= __put_user((u64)f->name, (compat_caddr_t __user *)&uf->name);
- break;
- }
- case FDGETDRVPRM32:
- {
- struct compat_floppy_drive_params __user *uf;
- struct floppy_drive_params *f = karg;
-
- uf = compat_ptr(arg);
- err = __put_user(f->cmos, &uf->cmos);
- err |= __put_user(f->max_dtr, &uf->max_dtr);
- err |= __put_user(f->hlt, &uf->hlt);
- err |= __put_user(f->hut, &uf->hut);
- err |= __put_user(f->srt, &uf->srt);
- err |= __put_user(f->spinup, &uf->spinup);
- err |= __put_user(f->spindown, &uf->spindown);
- err |= __put_user(f->spindown_offset, &uf->spindown_offset);
- err |= __put_user(f->select_delay, &uf->select_delay);
- err |= __put_user(f->rps, &uf->rps);
- err |= __put_user(f->tracks, &uf->tracks);
- err |= __put_user(f->timeout, &uf->timeout);
- err |= __put_user(f->interleave_sect, &uf->interleave_sect);
- err |= __copy_to_user(&uf->max_errors, &f->max_errors, sizeof(f->max_errors));
- err |= __put_user(f->flags, &uf->flags);
- err |= __put_user(f->read_track, &uf->read_track);
- err |= __copy_to_user(uf->autodetect, f->autodetect, sizeof(f->autodetect));
- err |= __put_user(f->checkfreq, &uf->checkfreq);
- err |= __put_user(f->native_format, &uf->native_format);
- break;
- }
- case FDGETDRVSTAT32:
- case FDPOLLDRVSTAT32:
- {
- struct compat_floppy_drive_struct __user *uf;
- struct floppy_drive_struct *f = karg;
-
- uf = compat_ptr(arg);
- err = __put_user(f->flags, &uf->flags);
- err |= __put_user(f->spinup_date, &uf->spinup_date);
- err |= __put_user(f->select_date, &uf->select_date);
- err |= __put_user(f->first_read_date, &uf->first_read_date);
- err |= __put_user(f->probed_format, &uf->probed_format);
- err |= __put_user(f->track, &uf->track);
- err |= __put_user(f->maxblock, &uf->maxblock);
- err |= __put_user(f->maxtrack, &uf->maxtrack);
- err |= __put_user(f->generation, &uf->generation);
- err |= __put_user(f->keep_data, &uf->keep_data);
- err |= __put_user(f->fd_ref, &uf->fd_ref);
- err |= __put_user(f->fd_device, &uf->fd_device);
- err |= __put_user(f->last_checked, &uf->last_checked);
- err |= __put_user((u64)f->dmabuf, &uf->dmabuf);
- err |= __put_user((u64)f->bufblocks, &uf->bufblocks);
- break;
- }
- case FDGETFDCSTAT32:
- {
- struct compat_floppy_fdc_state __user *uf;
- struct floppy_fdc_state *f = karg;
-
- uf = compat_ptr(arg);
- err = __put_user(f->spec1, &uf->spec1);
- err |= __put_user(f->spec2, &uf->spec2);
- err |= __put_user(f->dtr, &uf->dtr);
- err |= __put_user(f->version, &uf->version);
- err |= __put_user(f->dor, &uf->dor);
- err |= __put_user(f->address, &uf->address);
- err |= __copy_to_user((char __user *)&uf->address + sizeof(uf->address),
- (char *)&f->address + sizeof(f->address), sizeof(int));
- err |= __put_user(f->driver_version, &uf->driver_version);
- err |= __copy_to_user(uf->track, f->track, sizeof(f->track));
- break;
- }
- case FDWERRORGET32:
- {
- struct compat_floppy_write_errors __user *uf;
- struct floppy_write_errors *f = karg;
-
- uf = compat_ptr(arg);
- err = __put_user(f->write_errors, &uf->write_errors);
- err |= __put_user(f->first_error_sector, &uf->first_error_sector);
- err |= __put_user(f->first_error_generation, &uf->first_error_generation);
- err |= __put_user(f->last_error_sector, &uf->last_error_sector);
- err |= __put_user(f->last_error_generation, &uf->last_error_generation);
- err |= __put_user(f->badness, &uf->badness);
- break;
- }
- default:
- break;
- }
- if (err)
- err = -EFAULT;
-
-out:
- kfree(karg);
- return err;
-}
-
static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long arg)
{
@@ -537,16 +221,6 @@ static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
case HDIO_GET_ADDRESS:
case HDIO_GET_BUSSTATE:
return compat_hdio_ioctl(bdev, mode, cmd, arg);
- case FDSETPRM32:
- case FDDEFPRM32:
- case FDGETPRM32:
- case FDSETDRVPRM32:
- case FDGETDRVPRM32:
- case FDGETDRVSTAT32:
- case FDPOLLDRVSTAT32:
- case FDGETFDCSTAT32:
- case FDWERRORGET32:
- return compat_fd_ioctl(bdev, mode, cmd, arg);
case CDROMREADAUDIO:
return compat_cdrom_read_audio(bdev, mode, cmd, arg);
case CDROM_SEND_PACKET:
@@ -566,23 +240,6 @@ static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
case HDIO_DRIVE_CMD:
/* 0x330 is reserved -- it used to be HDIO_GETGEO_BIG */
case 0x330:
- /* 0x02 -- Floppy ioctls */
- case FDMSGON:
- case FDMSGOFF:
- case FDSETEMSGTRESH:
- case FDFLUSH:
- case FDWERRORCLR:
- case FDSETMAXERRS:
- case FDGETMAXERRS:
- case FDGETDRVTYP:
- case FDEJECT:
- case FDCLRPRM:
- case FDFMTBEG:
- case FDFMTEND:
- case FDRESET:
- case FDTWADDLE:
- case FDFMTTRK:
- case FDRAWCMD:
/* CDROM stuff */
case CDROMPAUSE:
case CDROMRESUME:
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index f83de99d7d71..56bd612927ab 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -62,9 +62,6 @@ do_async_gen_syndrome(struct dma_chan *chan,
dma_addr_t dma_dest[2];
int src_off = 0;
- if (submit->flags & ASYNC_TX_FENCE)
- dma_flags |= DMA_PREP_FENCE;
-
while (src_cnt > 0) {
submit->flags = flags_orig;
pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
@@ -83,6 +80,8 @@ do_async_gen_syndrome(struct dma_chan *chan,
if (cb_fn_orig)
dma_flags |= DMA_PREP_INTERRUPT;
}
+ if (submit->flags & ASYNC_TX_FENCE)
+ dma_flags |= DMA_PREP_FENCE;
/* Drivers force forward progress in case they can not provide
* a descriptor
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 097eff0b963d..b75b734ee73a 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -20,7 +20,6 @@
#include <linux/list.h>
#include <linux/acpi.h>
#include <linux/sort.h>
-#include <linux/pmem.h>
#include <linux/io.h>
#include <linux/nd.h>
#include <asm/cacheflush.h>
@@ -253,6 +252,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
cmd_name = nvdimm_bus_cmd_name(cmd);
cmd_mask = nd_desc->cmd_mask;
dsm_mask = cmd_mask;
+ if (cmd == ND_CMD_CALL)
+ dsm_mask = nd_desc->bus_dsm_mask;
desc = nd_cmd_bus_desc(cmd);
guid = to_nfit_uuid(NFIT_DEV_BUS);
handle = adev->handle;
@@ -927,6 +928,17 @@ static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
return 0;
}
+static ssize_t bus_dsm_mask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
+ struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
+
+ return sprintf(buf, "%#lx\n", nd_desc->bus_dsm_mask);
+}
+static struct device_attribute dev_attr_bus_dsm_mask =
+ __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL);
+
static ssize_t revision_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1031,7 +1043,7 @@ static ssize_t scrub_store(struct device *dev,
if (nd_desc) {
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
- rc = acpi_nfit_ars_rescan(acpi_desc);
+ rc = acpi_nfit_ars_rescan(acpi_desc, 0);
}
device_unlock(dev);
if (rc)
@@ -1063,10 +1075,11 @@ static struct attribute *acpi_nfit_attributes[] = {
&dev_attr_revision.attr,
&dev_attr_scrub.attr,
&dev_attr_hw_error_scrub.attr,
+ &dev_attr_bus_dsm_mask.attr,
NULL,
};
-static struct attribute_group acpi_nfit_attribute_group = {
+static const struct attribute_group acpi_nfit_attribute_group = {
.name = "nfit",
.attrs = acpi_nfit_attributes,
.is_visible = nfit_visible,
@@ -1346,7 +1359,7 @@ static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
return a->mode;
}
-static struct attribute_group acpi_nfit_dimm_attribute_group = {
+static const struct attribute_group acpi_nfit_dimm_attribute_group = {
.name = "nfit",
.attrs = acpi_nfit_dimm_attributes,
.is_visible = acpi_nfit_dimm_attr_visible,
@@ -1608,11 +1621,23 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
acpi_desc);
}
+/*
+ * These constants are private because there are no kernel consumers of
+ * these commands.
+ */
+enum nfit_aux_cmds {
+ NFIT_CMD_TRANSLATE_SPA = 5,
+ NFIT_CMD_ARS_INJECT_SET = 7,
+ NFIT_CMD_ARS_INJECT_CLEAR = 8,
+ NFIT_CMD_ARS_INJECT_GET = 9,
+};
+
static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
{
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS);
struct acpi_device *adev;
+ unsigned long dsm_mask;
int i;
nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
@@ -1623,6 +1648,20 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
set_bit(i, &nd_desc->cmd_mask);
+ set_bit(ND_CMD_CALL, &nd_desc->cmd_mask);
+
+ dsm_mask =
+ (1 << ND_CMD_ARS_CAP) |
+ (1 << ND_CMD_ARS_START) |
+ (1 << ND_CMD_ARS_STATUS) |
+ (1 << ND_CMD_CLEAR_ERROR) |
+ (1 << NFIT_CMD_TRANSLATE_SPA) |
+ (1 << NFIT_CMD_ARS_INJECT_SET) |
+ (1 << NFIT_CMD_ARS_INJECT_CLEAR) |
+ (1 << NFIT_CMD_ARS_INJECT_GET);
+ for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
+ if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
+ set_bit(i, &nd_desc->bus_dsm_mask);
}
static ssize_t range_index_show(struct device *dev,
@@ -1640,7 +1679,7 @@ static struct attribute *acpi_nfit_region_attributes[] = {
NULL,
};
-static struct attribute_group acpi_nfit_region_attribute_group = {
+static const struct attribute_group acpi_nfit_region_attribute_group = {
.name = "nfit",
.attrs = acpi_nfit_region_attributes,
};
@@ -1663,12 +1702,29 @@ struct nfit_set_info {
} mapping[0];
};
+struct nfit_set_info2 {
+ struct nfit_set_info_map2 {
+ u64 region_offset;
+ u32 serial_number;
+ u16 vendor_id;
+ u16 manufacturing_date;
+ u8 manufacturing_location;
+ u8 reserved[31];
+ } mapping[0];
+};
+
static size_t sizeof_nfit_set_info(int num_mappings)
{
return sizeof(struct nfit_set_info)
+ num_mappings * sizeof(struct nfit_set_info_map);
}
+static size_t sizeof_nfit_set_info2(int num_mappings)
+{
+ return sizeof(struct nfit_set_info2)
+ + num_mappings * sizeof(struct nfit_set_info_map2);
+}
+
static int cmp_map_compat(const void *m0, const void *m1)
{
const struct nfit_set_info_map *map0 = m0;
@@ -1690,6 +1746,18 @@ static int cmp_map(const void *m0, const void *m1)
return 0;
}
+static int cmp_map2(const void *m0, const void *m1)
+{
+ const struct nfit_set_info_map2 *map0 = m0;
+ const struct nfit_set_info_map2 *map1 = m1;
+
+ if (map0->region_offset < map1->region_offset)
+ return -1;
+ else if (map0->region_offset > map1->region_offset)
+ return 1;
+ return 0;
+}
+
/* Retrieve the nth entry referencing this spa */
static struct acpi_nfit_memory_map *memdev_from_spa(
struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
@@ -1707,27 +1775,31 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
struct nd_region_desc *ndr_desc,
struct acpi_nfit_system_address *spa)
{
- int i, spa_type = nfit_spa_type(spa);
struct device *dev = acpi_desc->dev;
struct nd_interleave_set *nd_set;
u16 nr = ndr_desc->num_mappings;
+ struct nfit_set_info2 *info2;
struct nfit_set_info *info;
-
- if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE)
- /* pass */;
- else
- return 0;
+ int i;
nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
if (!nd_set)
return -ENOMEM;
+ ndr_desc->nd_set = nd_set;
+ guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid);
info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
if (!info)
return -ENOMEM;
+
+ info2 = devm_kzalloc(dev, sizeof_nfit_set_info2(nr), GFP_KERNEL);
+ if (!info2)
+ return -ENOMEM;
+
for (i = 0; i < nr; i++) {
struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
struct nfit_set_info_map *map = &info->mapping[i];
+ struct nfit_set_info_map2 *map2 = &info2->mapping[i];
struct nvdimm *nvdimm = mapping->nvdimm;
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
@@ -1740,19 +1812,32 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
map->region_offset = memdev->region_offset;
map->serial_number = nfit_mem->dcr->serial_number;
+
+ map2->region_offset = memdev->region_offset;
+ map2->serial_number = nfit_mem->dcr->serial_number;
+ map2->vendor_id = nfit_mem->dcr->vendor_id;
+ map2->manufacturing_date = nfit_mem->dcr->manufacturing_date;
+ map2->manufacturing_location = nfit_mem->dcr->manufacturing_location;
}
+ /* v1.1 namespaces */
sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
cmp_map, NULL);
- nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
+ nd_set->cookie1 = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
+
+ /* v1.2 namespaces */
+ sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2),
+ cmp_map2, NULL);
+ nd_set->cookie2 = nd_fletcher64(info2, sizeof_nfit_set_info2(nr), 0);
- /* support namespaces created with the wrong sort order */
+ /* support v1.1 namespaces created with the wrong sort order */
sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
cmp_map_compat, NULL);
nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
ndr_desc->nd_set = nd_set;
devm_kfree(dev, info);
+ devm_kfree(dev, info2);
return 0;
}
@@ -1842,8 +1927,7 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
}
if (rw)
- memcpy_to_pmem(mmio->addr.aperture + offset,
- iobuf + copied, c);
+ memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c);
else {
if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
mmio_flush_range((void __force *)
@@ -1957,7 +2041,7 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
nfit_blk->bdw_offset = nfit_mem->bdw->offset;
mmio = &nfit_blk->mmio[BDW];
mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
- nfit_mem->spa_bdw->length, ARCH_MEMREMAP_PMEM);
+ nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr));
if (!mmio->addr.base) {
dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
nvdimm_name(nvdimm));
@@ -2051,6 +2135,7 @@ static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa
memset(&ars_start, 0, sizeof(ars_start));
ars_start.address = spa->address;
ars_start.length = spa->length;
+ ars_start.flags = acpi_desc->ars_start_flags;
if (nfit_spa_type(spa) == NFIT_SPA_PM)
ars_start.type = ND_ARS_PERSISTENT;
else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
@@ -2077,6 +2162,7 @@ static int ars_continue(struct acpi_nfit_desc *acpi_desc)
ars_start.address = ars_status->restart_address;
ars_start.length = ars_status->restart_length;
ars_start.type = ars_status->type;
+ ars_start.flags = acpi_desc->ars_start_flags;
rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
sizeof(ars_start), &cmd_rc);
if (rc < 0)
@@ -2179,7 +2265,7 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
struct acpi_nfit_system_address *spa = nfit_spa->spa;
struct nd_blk_region_desc *ndbr_desc;
struct nfit_mem *nfit_mem;
- int blk_valid = 0;
+ int blk_valid = 0, rc;
if (!nvdimm) {
dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
@@ -2211,6 +2297,9 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
ndbr_desc = to_blk_region_desc(ndr_desc);
ndbr_desc->enable = acpi_nfit_blk_region_enable;
ndbr_desc->do_io = acpi_desc->blk_do_io;
+ rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
+ if (rc)
+ return rc;
nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
ndr_desc);
if (!nfit_spa->nd_region)
@@ -2229,6 +2318,13 @@ static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa)
nfit_spa_type(spa) == NFIT_SPA_PCD);
}
+static bool nfit_spa_is_volatile(struct acpi_nfit_system_address *spa)
+{
+ return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
+ nfit_spa_type(spa) == NFIT_SPA_VCD ||
+ nfit_spa_type(spa) == NFIT_SPA_VOLATILE);
+}
+
static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
struct nfit_spa *nfit_spa)
{
@@ -2303,7 +2399,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
ndr_desc);
if (!nfit_spa->nd_region)
rc = -ENOMEM;
- } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) {
+ } else if (nfit_spa_is_volatile(spa)) {
nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
ndr_desc);
if (!nfit_spa->nd_region)
@@ -2595,6 +2691,7 @@ static void acpi_nfit_scrub(struct work_struct *work)
list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
acpi_nfit_async_scrub(acpi_desc, nfit_spa);
acpi_desc->scrub_count++;
+ acpi_desc->ars_start_flags = 0;
if (acpi_desc->scrub_count_state)
sysfs_notify_dirent(acpi_desc->scrub_count_state);
mutex_unlock(&acpi_desc->init_mutex);
@@ -2613,6 +2710,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
return rc;
}
+ acpi_desc->ars_start_flags = 0;
if (!acpi_desc->cancel)
queue_work(nfit_wq, &acpi_desc->work);
return 0;
@@ -2817,7 +2915,7 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
return 0;
}
-int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc)
+int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, u8 flags)
{
struct device *dev = acpi_desc->dev;
struct nfit_spa *nfit_spa;
@@ -2839,6 +2937,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc)
nfit_spa->ars_required = 1;
}
+ acpi_desc->ars_start_flags = flags;
queue_work(nfit_wq, &acpi_desc->work);
dev_dbg(dev, "%s: ars_scan triggered\n", __func__);
mutex_unlock(&acpi_desc->init_mutex);
@@ -2967,7 +3066,7 @@ static int acpi_nfit_remove(struct acpi_device *adev)
return 0;
}
-void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
+static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle)
{
struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -2975,11 +3074,6 @@ void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
acpi_status status;
int ret;
- dev_dbg(dev, "%s: event: %d\n", __func__, event);
-
- if (event != NFIT_NOTIFY_UPDATE)
- return;
-
if (!dev->driver) {
/* dev->driver may be null if we're being removed */
dev_dbg(dev, "%s: no driver found for dev\n", __func__);
@@ -3016,6 +3110,29 @@ void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
dev_err(dev, "Invalid _FIT\n");
kfree(buf.pointer);
}
+
+static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle)
+{
+ struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
+ u8 flags = (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) ?
+ 0 : ND_ARS_RETURN_PREV_DATA;
+
+ acpi_nfit_ars_rescan(acpi_desc, flags);
+}
+
+void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
+{
+ dev_dbg(dev, "%s: event: 0x%x\n", __func__, event);
+
+ switch (event) {
+ case NFIT_NOTIFY_UPDATE:
+ return acpi_nfit_update_notify(dev, handle);
+ case NFIT_NOTIFY_UC_MEMORY_ERROR:
+ return acpi_nfit_uc_error_notify(dev, handle);
+ default:
+ return;
+ }
+}
EXPORT_SYMBOL_GPL(__acpi_nfit_notify);
static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c
index fd86bec98dea..feeb95d574fa 100644
--- a/drivers/acpi/nfit/mce.c
+++ b/drivers/acpi/nfit/mce.c
@@ -79,7 +79,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
* already in progress, just let that be the last
* authoritative one
*/
- acpi_nfit_ars_rescan(acpi_desc);
+ acpi_nfit_ars_rescan(acpi_desc, 0);
}
break;
}
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index 29bdd959517f..54292db61262 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -79,6 +79,7 @@ enum {
enum nfit_root_notifiers {
NFIT_NOTIFY_UPDATE = 0x80,
+ NFIT_NOTIFY_UC_MEMORY_ERROR = 0x81,
};
enum nfit_dimm_notifiers {
@@ -154,6 +155,7 @@ struct acpi_nfit_desc {
struct list_head idts;
struct nvdimm_bus *nvdimm_bus;
struct device *dev;
+ u8 ars_start_flags;
struct nd_cmd_ars_status *ars_status;
size_t ars_status_size;
struct work_struct work;
@@ -206,7 +208,7 @@ struct nfit_blk {
extern struct list_head acpi_descs;
extern struct mutex acpi_desc_lock;
-int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc);
+int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, u8 flags);
#ifdef CONFIG_X86_MCE
void nfit_mce_register(void);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index de3eaf051697..948fc86980a1 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -213,6 +213,16 @@ config SATA_FSL
If unsure, say N.
+config SATA_GEMINI
+ tristate "Gemini SATA bridge support"
+ depends on PATA_FTIDE010
+ default ARCH_GEMINI
+ help
+ This enabled support for the FTIDE010 to SATA bridge
+ found in Cortina Systems Gemini platform.
+
+ If unsure, say N.
+
config SATA_AHCI_SEATTLE
tristate "AMD Seattle 6.0Gbps AHCI SATA host controller support"
depends on ARCH_SEATTLE
@@ -599,6 +609,17 @@ config PATA_EP93XX
If unsure, say N.
+config PATA_FTIDE010
+ tristate "Faraday Technology FTIDE010 PATA support"
+ depends on OF
+ depends on ARM
+ default ARCH_GEMINI
+ help
+ This option enables support for the Faraday FTIDE010
+ PATA controller found in the Cortina Gemini SoCs.
+
+ If unsure, say N.
+
config PATA_HPT366
tristate "HPT 366/368 PATA support"
depends on PCI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index cd931a5eba92..a26ef5a93919 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_SATA_ACARD_AHCI) += acard-ahci.o libahci.o
obj-$(CONFIG_SATA_AHCI_SEATTLE) += ahci_seattle.o libahci.o libahci_platform.o
obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o libahci_platform.o
obj-$(CONFIG_SATA_FSL) += sata_fsl.o
+obj-$(CONFIG_SATA_GEMINI) += sata_gemini.o
obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o
@@ -60,6 +61,7 @@ obj-$(CONFIG_PATA_CS5536) += pata_cs5536.o
obj-$(CONFIG_PATA_CYPRESS) += pata_cypress.o
obj-$(CONFIG_PATA_EFAR) += pata_efar.o
obj-$(CONFIG_PATA_EP93XX) += pata_ep93xx.o
+obj-$(CONFIG_PATA_FTIDE010) += pata_ftide010.o
obj-$(CONFIG_PATA_HPT366) += pata_hpt366.o
obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o
obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 1e1c355121e4..5a5fd0b404eb 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -548,6 +548,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */
{ PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
+ { PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci }, /* ASM1061R */
+ { PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci }, /* ASM1062R */
/*
* Samsung SSDs found on some macbooks. NCQ times out if MSI is
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 30f67a1a4f54..8b61123d2c3c 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -248,6 +248,9 @@ enum {
AHCI_HFLAG_MULTI_MSI = 0,
#endif
AHCI_HFLAG_WAKE_BEFORE_STOP = (1 << 22), /* wake before DMA stop */
+ AHCI_HFLAG_YES_ALPM = (1 << 23), /* force ALPM cap on */
+ AHCI_HFLAG_NO_WRITE_TO_RO = (1 << 24), /* don't write to read
+ only registers */
/* ap->flags bits */
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index 6f8a7341fa08..5936d1679bf3 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -39,7 +39,6 @@
#define PIODATA_ENDIAN_SHIFT 6
#define ENDIAN_SWAP_NONE 0
#define ENDIAN_SWAP_FULL 2
- #define OVERRIDE_HWINIT BIT(16)
#define SATA_TOP_CTRL_TP_CTRL 0x8
#define SATA_TOP_CTRL_PHY_CTRL 0xc
#define SATA_TOP_CTRL_PHY_CTRL_1 0x0
@@ -126,17 +125,13 @@ static inline void brcm_sata_writereg(u32 val, void __iomem *addr)
static void brcm_sata_alpm_init(struct ahci_host_priv *hpriv)
{
struct brcm_ahci_priv *priv = hpriv->plat_data;
- u32 bus_ctrl, port_ctrl, host_caps;
+ u32 port_ctrl, host_caps;
int i;
/* Enable support for ALPM */
- bus_ctrl = brcm_sata_readreg(priv->top_ctrl +
- SATA_TOP_CTRL_BUS_CTRL);
- brcm_sata_writereg(bus_ctrl | OVERRIDE_HWINIT,
- priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL);
host_caps = readl(hpriv->mmio + HOST_CAP);
- writel(host_caps | HOST_CAP_ALPM, hpriv->mmio);
- brcm_sata_writereg(bus_ctrl, priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL);
+ if (!(host_caps & HOST_CAP_ALPM))
+ hpriv->flags |= AHCI_HFLAG_YES_ALPM;
/*
* Adjust timeout to allow PLL sufficient time to lock while waking
@@ -360,6 +355,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
if (priv->quirks & BRCM_AHCI_QUIRK_NO_NCQ)
hpriv->flags |= AHCI_HFLAG_NO_NCQ;
+ hpriv->flags |= AHCI_HFLAG_NO_WRITE_TO_RO;
ret = ahci_platform_init_host(pdev, hpriv, &ahci_brcm_port_info,
&ahci_platform_sht);
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index 4c96f3ac4976..b6b0bf76dfc7 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -47,12 +47,14 @@
#define SATA_ECC_DISABLE 0x00020000
#define ECC_DIS_ARMV8_CH2 0x80000000
+#define ECC_DIS_LS1088A 0x40000000
enum ahci_qoriq_type {
AHCI_LS1021A,
AHCI_LS1043A,
AHCI_LS2080A,
AHCI_LS1046A,
+ AHCI_LS1088A,
AHCI_LS2088A,
};
@@ -68,6 +70,7 @@ static const struct of_device_id ahci_qoriq_of_match[] = {
{ .compatible = "fsl,ls1043a-ahci", .data = (void *)AHCI_LS1043A},
{ .compatible = "fsl,ls2080a-ahci", .data = (void *)AHCI_LS2080A},
{ .compatible = "fsl,ls1046a-ahci", .data = (void *)AHCI_LS1046A},
+ { .compatible = "fsl,ls1088a-ahci", .data = (void *)AHCI_LS1088A},
{ .compatible = "fsl,ls2088a-ahci", .data = (void *)AHCI_LS2088A},
{},
};
@@ -203,6 +206,17 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
break;
+ case AHCI_LS1088A:
+ if (!qpriv->ecc_addr)
+ return -EINVAL;
+ writel(readl(qpriv->ecc_addr) | ECC_DIS_LS1088A,
+ qpriv->ecc_addr);
+ writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
+ writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+ if (qpriv->is_dmacoherent)
+ writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
+ break;
+
case AHCI_LS2088A:
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 6154f0e2b81a..3e286d86ab42 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -504,6 +504,11 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
cap &= ~HOST_CAP_FBS;
}
+ if (!(cap & HOST_CAP_ALPM) && (hpriv->flags & AHCI_HFLAG_YES_ALPM)) {
+ dev_info(dev, "controller can do ALPM, turning on CAP_ALPM\n");
+ cap |= HOST_CAP_ALPM;
+ }
+
if (hpriv->force_port_map && port_map != hpriv->force_port_map) {
dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
port_map, hpriv->force_port_map);
@@ -940,7 +945,8 @@ int ahci_reset_controller(struct ata_host *host)
/* Some registers might be cleared on reset. Restore
* initial values.
*/
- ahci_restore_initial_config(host);
+ if (!(hpriv->flags & AHCI_HFLAG_NO_WRITE_TO_RO))
+ ahci_restore_initial_config(host);
} else
dev_info(host->dev, "skipping global host reset\n");
@@ -1400,7 +1406,7 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
ata_tf_init(link->device, &tf);
- /* issue the first D2H Register FIS */
+ /* issue the first H2D Register FIS */
msecs = 0;
now = jiffies;
if (time_after(deadline, now))
@@ -1417,7 +1423,7 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
/* spec says at least 5us, but be generous and sleep for 1ms */
ata_msleep(ap, 1);
- /* issue the second D2H Register FIS */
+ /* issue the second H2D Register FIS */
tf.ctl &= ~ATA_SRST;
ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index b82d6bb88d27..8453f9a4682f 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2047,6 +2047,110 @@ retry:
return rc;
}
+/**
+ * ata_read_log_page - read a specific log page
+ * @dev: target device
+ * @log: log to read
+ * @page: page to read
+ * @buf: buffer to store read page
+ * @sectors: number of sectors to read
+ *
+ * Read log page using READ_LOG_EXT command.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * 0 on success, AC_ERR_* mask otherwise.
+ */
+unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
+ u8 page, void *buf, unsigned int sectors)
+{
+ unsigned long ap_flags = dev->link->ap->flags;
+ struct ata_taskfile tf;
+ unsigned int err_mask;
+ bool dma = false;
+
+ DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
+
+ /*
+ * Return error without actually issuing the command on controllers
+ * which e.g. lockup on a read log page.
+ */
+ if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
+ return AC_ERR_DEV;
+
+retry:
+ ata_tf_init(dev, &tf);
+ if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
+ !(dev->horkage & ATA_HORKAGE_NO_NCQ_LOG)) {
+ tf.command = ATA_CMD_READ_LOG_DMA_EXT;
+ tf.protocol = ATA_PROT_DMA;
+ dma = true;
+ } else {
+ tf.command = ATA_CMD_READ_LOG_EXT;
+ tf.protocol = ATA_PROT_PIO;
+ dma = false;
+ }
+ tf.lbal = log;
+ tf.lbam = page;
+ tf.nsect = sectors;
+ tf.hob_nsect = sectors >> 8;
+ tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
+
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
+ buf, sectors * ATA_SECT_SIZE, 0);
+
+ if (err_mask && dma) {
+ dev->horkage |= ATA_HORKAGE_NO_NCQ_LOG;
+ ata_dev_warn(dev, "READ LOG DMA EXT failed, trying unqueued\n");
+ goto retry;
+ }
+
+ DPRINTK("EXIT, err_mask=%x\n", err_mask);
+ return err_mask;
+}
+
+static bool ata_log_supported(struct ata_device *dev, u8 log)
+{
+ struct ata_port *ap = dev->link->ap;
+
+ if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
+ return false;
+ return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false;
+}
+
+static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
+{
+ struct ata_port *ap = dev->link->ap;
+ unsigned int err, i;
+
+ if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
+ ata_dev_warn(dev, "ATA Identify Device Log not supported\n");
+ return false;
+ }
+
+ /*
+ * Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
+ * supported.
+ */
+ err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
+ 1);
+ if (err) {
+ ata_dev_info(dev,
+ "failed to get Device Identify Log Emask 0x%x\n",
+ err);
+ return false;
+ }
+
+ for (i = 0; i < ap->sector_buf[8]; i++) {
+ if (ap->sector_buf[9 + i] == page)
+ return true;
+ }
+
+ return false;
+}
+
static int ata_do_link_spd_horkage(struct ata_device *dev)
{
struct ata_link *plink = ata_dev_phys_link(dev);
@@ -2094,21 +2198,9 @@ static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
unsigned int err_mask;
- int log_index = ATA_LOG_NCQ_SEND_RECV * 2;
- u16 log_pages;
- err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY,
- 0, ap->sector_buf, 1);
- if (err_mask) {
- ata_dev_dbg(dev,
- "failed to get Log Directory Emask 0x%x\n",
- err_mask);
- return;
- }
- log_pages = get_unaligned_le16(&ap->sector_buf[log_index]);
- if (!log_pages) {
- ata_dev_warn(dev,
- "NCQ Send/Recv Log not supported\n");
+ if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
+ ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n");
return;
}
err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
@@ -2135,19 +2227,8 @@ static void ata_dev_config_ncq_non_data(struct ata_device *dev)
{
struct ata_port *ap = dev->link->ap;
unsigned int err_mask;
- int log_index = ATA_LOG_NCQ_NON_DATA * 2;
- u16 log_pages;
- err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY,
- 0, ap->sector_buf, 1);
- if (err_mask) {
- ata_dev_dbg(dev,
- "failed to get Log Directory Emask 0x%x\n",
- err_mask);
- return;
- }
- log_pages = get_unaligned_le16(&ap->sector_buf[log_index]);
- if (!log_pages) {
+ if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
ata_dev_warn(dev,
"NCQ Send/Recv Log not supported\n");
return;
@@ -2176,7 +2257,7 @@ static void ata_dev_config_ncq_prio(struct ata_device *dev)
}
err_mask = ata_read_log_page(dev,
- ATA_LOG_SATA_ID_DEV_DATA,
+ ATA_LOG_IDENTIFY_DEVICE,
ATA_LOG_SATA_SETTINGS,
ap->sector_buf,
1);
@@ -2275,8 +2356,6 @@ static void ata_dev_config_zac(struct ata_device *dev)
struct ata_port *ap = dev->link->ap;
unsigned int err_mask;
u8 *identify_buf = ap->sector_buf;
- int log_index = ATA_LOG_SATA_ID_DEV_DATA * 2, i, found = 0;
- u16 log_pages;
dev->zac_zones_optimal_open = U32_MAX;
dev->zac_zones_optimal_nonseq = U32_MAX;
@@ -2296,44 +2375,7 @@ static void ata_dev_config_zac(struct ata_device *dev)
if (!(dev->flags & ATA_DFLAG_ZAC))
return;
- /*
- * Read Log Directory to figure out if IDENTIFY DEVICE log
- * is supported.
- */
- err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY,
- 0, ap->sector_buf, 1);
- if (err_mask) {
- ata_dev_info(dev,
- "failed to get Log Directory Emask 0x%x\n",
- err_mask);
- return;
- }
- log_pages = get_unaligned_le16(&ap->sector_buf[log_index]);
- if (log_pages == 0) {
- ata_dev_warn(dev,
- "ATA Identify Device Log not supported\n");
- return;
- }
- /*
- * Read IDENTIFY DEVICE data log, page 0, to figure out
- * if page 9 is supported.
- */
- err_mask = ata_read_log_page(dev, ATA_LOG_SATA_ID_DEV_DATA, 0,
- identify_buf, 1);
- if (err_mask) {
- ata_dev_info(dev,
- "failed to get Device Identify Log Emask 0x%x\n",
- err_mask);
- return;
- }
- log_pages = identify_buf[8];
- for (i = 0; i < log_pages; i++) {
- if (identify_buf[9 + i] == ATA_LOG_ZONED_INFORMATION) {
- found++;
- break;
- }
- }
- if (!found) {
+ if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
ata_dev_warn(dev,
"ATA Zoned Information Log not supported\n");
return;
@@ -2342,7 +2384,7 @@ static void ata_dev_config_zac(struct ata_device *dev)
/*
* Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
*/
- err_mask = ata_read_log_page(dev, ATA_LOG_SATA_ID_DEV_DATA,
+ err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
ATA_LOG_ZONED_INFORMATION,
identify_buf, 1);
if (!err_mask) {
@@ -2363,6 +2405,37 @@ static void ata_dev_config_zac(struct ata_device *dev)
}
}
+static void ata_dev_config_trusted(struct ata_device *dev)
+{
+ struct ata_port *ap = dev->link->ap;
+ u64 trusted_cap;
+ unsigned int err;
+
+ if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
+ ata_dev_warn(dev,
+ "Security Log not supported\n");
+ return;
+ }
+
+ err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
+ ap->sector_buf, 1);
+ if (err) {
+ ata_dev_dbg(dev,
+ "failed to read Security Log, Emask 0x%x\n", err);
+ return;
+ }
+
+ trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
+ if (!(trusted_cap & (1ULL << 63))) {
+ ata_dev_dbg(dev,
+ "Trusted Computing capability qword not valid!\n");
+ return;
+ }
+
+ if (trusted_cap & (1 << 0))
+ dev->flags |= ATA_DFLAG_TRUSTED;
+}
+
/**
* ata_dev_configure - Configure the specified ATA/ATAPI device
* @dev: Target device to configure
@@ -2571,7 +2644,7 @@ int ata_dev_configure(struct ata_device *dev)
dev->flags |= ATA_DFLAG_DEVSLP;
err_mask = ata_read_log_page(dev,
- ATA_LOG_SATA_ID_DEV_DATA,
+ ATA_LOG_IDENTIFY_DEVICE,
ATA_LOG_SATA_SETTINGS,
sata_setting,
1);
@@ -2587,7 +2660,8 @@ int ata_dev_configure(struct ata_device *dev)
}
ata_dev_config_sense_reporting(dev);
ata_dev_config_zac(dev);
- dev->cdb_len = 16;
+ ata_dev_config_trusted(dev);
+ dev->cdb_len = 32;
}
/* ATAPI-specific feature tests */
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 7e33e200aae5..b70bcf6d2914 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1488,70 +1488,6 @@ static const char *ata_err_string(unsigned int err_mask)
}
/**
- * ata_read_log_page - read a specific log page
- * @dev: target device
- * @log: log to read
- * @page: page to read
- * @buf: buffer to store read page
- * @sectors: number of sectors to read
- *
- * Read log page using READ_LOG_EXT command.
- *
- * LOCKING:
- * Kernel thread context (may sleep).
- *
- * RETURNS:
- * 0 on success, AC_ERR_* mask otherwise.
- */
-unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
- u8 page, void *buf, unsigned int sectors)
-{
- unsigned long ap_flags = dev->link->ap->flags;
- struct ata_taskfile tf;
- unsigned int err_mask;
- bool dma = false;
-
- DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
-
- /*
- * Return error without actually issuing the command on controllers
- * which e.g. lockup on a read log page.
- */
- if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
- return AC_ERR_DEV;
-
-retry:
- ata_tf_init(dev, &tf);
- if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
- !(dev->horkage & ATA_HORKAGE_NO_NCQ_LOG)) {
- tf.command = ATA_CMD_READ_LOG_DMA_EXT;
- tf.protocol = ATA_PROT_DMA;
- dma = true;
- } else {
- tf.command = ATA_CMD_READ_LOG_EXT;
- tf.protocol = ATA_PROT_PIO;
- dma = false;
- }
- tf.lbal = log;
- tf.lbam = page;
- tf.nsect = sectors;
- tf.hob_nsect = sectors >> 8;
- tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
-
- err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
- buf, sectors * ATA_SECT_SIZE, 0);
-
- if (err_mask && dma) {
- dev->horkage |= ATA_HORKAGE_NO_NCQ_LOG;
- ata_dev_warn(dev, "READ LOG DMA EXT failed, trying unqueued\n");
- goto retry;
- }
-
- DPRINTK("EXIT, err_mask=%x\n", err_mask);
- return err_mask;
-}
-
-/**
* ata_eh_read_log_10h - Read log page 10h for NCQ error details
* @dev: Device to read log page 10h from
* @tag: Resulting tag of the failed command
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index b0866f040d1f..d462c5a3a7ef 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -903,32 +903,32 @@ static void ata_dump_status(unsigned id, struct ata_taskfile *tf)
{
u8 stat = tf->command, err = tf->feature;
- printk(KERN_WARNING "ata%u: status=0x%02x { ", id, stat);
+ pr_warn("ata%u: status=0x%02x { ", id, stat);
if (stat & ATA_BUSY) {
- printk("Busy }\n"); /* Data is not valid in this case */
+ pr_cont("Busy }\n"); /* Data is not valid in this case */
} else {
- if (stat & ATA_DRDY) printk("DriveReady ");
- if (stat & ATA_DF) printk("DeviceFault ");
- if (stat & ATA_DSC) printk("SeekComplete ");
- if (stat & ATA_DRQ) printk("DataRequest ");
- if (stat & ATA_CORR) printk("CorrectedError ");
- if (stat & ATA_SENSE) printk("Sense ");
- if (stat & ATA_ERR) printk("Error ");
- printk("}\n");
+ if (stat & ATA_DRDY) pr_cont("DriveReady ");
+ if (stat & ATA_DF) pr_cont("DeviceFault ");
+ if (stat & ATA_DSC) pr_cont("SeekComplete ");
+ if (stat & ATA_DRQ) pr_cont("DataRequest ");
+ if (stat & ATA_CORR) pr_cont("CorrectedError ");
+ if (stat & ATA_SENSE) pr_cont("Sense ");
+ if (stat & ATA_ERR) pr_cont("Error ");
+ pr_cont("}\n");
if (err) {
- printk(KERN_WARNING "ata%u: error=0x%02x { ", id, err);
- if (err & ATA_ABORTED) printk("DriveStatusError ");
+ pr_warn("ata%u: error=0x%02x { ", id, err);
+ if (err & ATA_ABORTED) pr_cont("DriveStatusError ");
if (err & ATA_ICRC) {
if (err & ATA_ABORTED)
- printk("BadCRC ");
- else printk("Sector ");
+ pr_cont("BadCRC ");
+ else pr_cont("Sector ");
}
- if (err & ATA_UNC) printk("UncorrectableError ");
- if (err & ATA_IDNF) printk("SectorIdNotFound ");
- if (err & ATA_TRK0NF) printk("TrackZeroNotFound ");
- if (err & ATA_AMNF) printk("AddrMarkNotFound ");
- printk("}\n");
+ if (err & ATA_UNC) pr_cont("UncorrectableError ");
+ if (err & ATA_IDNF) pr_cont("SectorIdNotFound ");
+ if (err & ATA_TRK0NF) pr_cont("TrackZeroNotFound ");
+ if (err & ATA_AMNF) pr_cont("AddrMarkNotFound ");
+ pr_cont("}\n");
}
}
}
@@ -1059,8 +1059,7 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
translate_done:
if (verbose)
- printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x "
- "to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n",
+ pr_err("ata%u: translated ATA stat/err 0x%02x/%02x to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n",
id, drv_stat, drv_err, *sk, *asc, *ascq);
return;
}
@@ -1322,6 +1321,9 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
blk_queue_flush_queueable(q, false);
+ if (dev->flags & ATA_DFLAG_TRUSTED)
+ sdev->security_supported = 1;
+
dev->sdev = sdev;
return 0;
}
@@ -3127,7 +3129,7 @@ ata_scsi_map_proto(u8 byte1)
* ata_scsi_pass_thru - convert ATA pass-thru CDB to taskfile
* @qc: command structure to be initialized
*
- * Handles either 12 or 16-byte versions of the CDB.
+ * Handles either 12, 16, or 32-byte versions of the CDB.
*
* RETURNS:
* Zero on success, non-zero on failure.
@@ -3139,13 +3141,19 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
struct ata_device *dev = qc->dev;
const u8 *cdb = scmd->cmnd;
u16 fp;
+ u16 cdb_offset = 0;
+
+ /* 7Fh variable length cmd means a ata pass-thru(32) */
+ if (cdb[0] == VARIABLE_LENGTH_CMD)
+ cdb_offset = 9;
- if ((tf->protocol = ata_scsi_map_proto(cdb[1])) == ATA_PROT_UNKNOWN) {
+ tf->protocol = ata_scsi_map_proto(cdb[1 + cdb_offset]);
+ if (tf->protocol == ATA_PROT_UNKNOWN) {
fp = 1;
goto invalid_fld;
}
- if (ata_is_ncq(tf->protocol) && (cdb[2] & 0x3) == 0)
+ if (ata_is_ncq(tf->protocol) && (cdb[2 + cdb_offset] & 0x3) == 0)
tf->protocol = ATA_PROT_NCQ_NODATA;
/* enable LBA */
@@ -3181,7 +3189,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
tf->lbah = cdb[12];
tf->device = cdb[13];
tf->command = cdb[14];
- } else {
+ } else if (cdb[0] == ATA_12) {
/*
* 12-byte CDB - incapable of extended commands.
*/
@@ -3194,6 +3202,30 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
tf->lbah = cdb[7];
tf->device = cdb[8];
tf->command = cdb[9];
+ } else {
+ /*
+ * 32-byte CDB - may contain extended command fields.
+ *
+ * If that is the case, copy the upper byte register values.
+ */
+ if (cdb[10] & 0x01) {
+ tf->hob_feature = cdb[20];
+ tf->hob_nsect = cdb[22];
+ tf->hob_lbal = cdb[16];
+ tf->hob_lbam = cdb[15];
+ tf->hob_lbah = cdb[14];
+ tf->flags |= ATA_TFLAG_LBA48;
+ } else
+ tf->flags &= ~ATA_TFLAG_LBA48;
+
+ tf->feature = cdb[21];
+ tf->nsect = cdb[23];
+ tf->lbal = cdb[19];
+ tf->lbam = cdb[18];
+ tf->lbah = cdb[17];
+ tf->device = cdb[24];
+ tf->command = cdb[25];
+ tf->auxiliary = get_unaligned_be32(&cdb[28]);
}
/* For NCQ commands copy the tag value */
@@ -3564,6 +3596,11 @@ static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
dev->class == ATA_DEV_ZAC)
supported = 3;
break;
+ case SECURITY_PROTOCOL_IN:
+ case SECURITY_PROTOCOL_OUT:
+ if (dev->flags & ATA_DFLAG_TRUSTED)
+ supported = 3;
+ break;
default:
break;
}
@@ -3910,7 +3947,7 @@ static int ata_mselect_control(struct ata_queued_cmd *qc,
}
/**
- * ata_scsiop_mode_select - Simulate MODE SELECT 6, 10 commands
+ * ata_scsi_mode_select_xlat - Simulate MODE SELECT 6, 10 commands
* @qc: Storage for translated ATA taskfile
*
* Converts a MODE SELECT command to an ATA SET FEATURES taskfile.
@@ -4068,6 +4105,99 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
return 1;
}
+static u8 ata_scsi_trusted_op(u32 len, bool send, bool dma)
+{
+ if (len == 0)
+ return ATA_CMD_TRUSTED_NONDATA;
+ else if (send)
+ return dma ? ATA_CMD_TRUSTED_SND_DMA : ATA_CMD_TRUSTED_SND;
+ else
+ return dma ? ATA_CMD_TRUSTED_RCV_DMA : ATA_CMD_TRUSTED_RCV;
+}
+
+static unsigned int ata_scsi_security_inout_xlat(struct ata_queued_cmd *qc)
+{
+ struct scsi_cmnd *scmd = qc->scsicmd;
+ const u8 *cdb = scmd->cmnd;
+ struct ata_taskfile *tf = &qc->tf;
+ u8 secp = cdb[1];
+ bool send = (cdb[0] == SECURITY_PROTOCOL_OUT);
+ u16 spsp = get_unaligned_be16(&cdb[2]);
+ u32 len = get_unaligned_be32(&cdb[6]);
+ bool dma = !(qc->dev->flags & ATA_DFLAG_PIO);
+
+ /*
+ * We don't support the ATA "security" protocol.
+ */
+ if (secp == 0xef) {
+ ata_scsi_set_invalid_field(qc->dev, scmd, 1, 0);
+ return 1;
+ }
+
+ if (cdb[4] & 7) { /* INC_512 */
+ if (len > 0xffff) {
+ ata_scsi_set_invalid_field(qc->dev, scmd, 6, 0);
+ return 1;
+ }
+ } else {
+ if (len > 0x01fffe00) {
+ ata_scsi_set_invalid_field(qc->dev, scmd, 6, 0);
+ return 1;
+ }
+
+ /* convert to the sector-based ATA addressing */
+ len = (len + 511) / 512;
+ }
+
+ tf->protocol = dma ? ATA_PROT_DMA : ATA_PROT_PIO;
+ tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR | ATA_TFLAG_LBA;
+ if (send)
+ tf->flags |= ATA_TFLAG_WRITE;
+ tf->command = ata_scsi_trusted_op(len, send, dma);
+ tf->feature = secp;
+ tf->lbam = spsp & 0xff;
+ tf->lbah = spsp >> 8;
+
+ if (len) {
+ tf->nsect = len & 0xff;
+ tf->lbal = len >> 8;
+ } else {
+ if (!send)
+ tf->lbah = (1 << 7);
+ }
+
+ ata_qc_set_pc_nbytes(qc);
+ return 0;
+}
+
+/**
+ * ata_scsi_var_len_cdb_xlat - SATL variable length CDB to Handler
+ * @qc: Command to be translated
+ *
+ * Translate a SCSI variable length CDB to specified commands.
+ * It checks a service action value in CDB to call corresponding handler.
+ *
+ * RETURNS:
+ * Zero on success, non-zero on failure
+ *
+ */
+static unsigned int ata_scsi_var_len_cdb_xlat(struct ata_queued_cmd *qc)
+{
+ struct scsi_cmnd *scmd = qc->scsicmd;
+ const u8 *cdb = scmd->cmnd;
+ const u16 sa = get_unaligned_be16(&cdb[8]);
+
+ /*
+ * if service action represents a ata pass-thru(32) command,
+ * then pass it to ata_scsi_pass_thru handler.
+ */
+ if (sa == ATA_32)
+ return ata_scsi_pass_thru(qc);
+
+ /* unsupported service action */
+ return 1;
+}
+
/**
* ata_get_xlat_func - check if SCSI to ATA translation is possible
* @dev: ATA device
@@ -4108,6 +4238,9 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
case ATA_16:
return ata_scsi_pass_thru;
+ case VARIABLE_LENGTH_CMD:
+ return ata_scsi_var_len_cdb_xlat;
+
case MODE_SELECT:
case MODE_SELECT_10:
return ata_scsi_mode_select_xlat;
@@ -4119,6 +4252,12 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
case ZBC_OUT:
return ata_scsi_zbc_out_xlat;
+ case SECURITY_PROTOCOL_IN:
+ case SECURITY_PROTOCOL_OUT:
+ if (!(dev->flags & ATA_DFLAG_TRUSTED))
+ break;
+ return ata_scsi_security_inout_xlat;
+
case START_STOP:
return ata_scsi_start_stop_xlat;
}
@@ -4386,7 +4525,7 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
shost->max_id = 16;
shost->max_lun = 1;
shost->max_channel = 1;
- shost->max_cmd_len = 16;
+ shost->max_cmd_len = 32;
/* Schedule policy is determined by ->qc_defer()
* callback and it needs to see every deferred qc.
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 052921352f31..cc2f2e35f4c2 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -716,24 +716,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
- if (PageHighMem(page)) {
- unsigned long flags;
-
- /* FIXME: use a bounce buffer */
- local_irq_save(flags);
- buf = kmap_atomic(page);
-
- /* do the actual data transfer */
- ap->ops->sff_data_xfer(qc, buf + offset, qc->sect_size,
- do_write);
-
- kunmap_atomic(buf);
- local_irq_restore(flags);
- } else {
- buf = page_address(page);
- ap->ops->sff_data_xfer(qc, buf + offset, qc->sect_size,
- do_write);
- }
+ /* do the actual data transfer */
+ buf = kmap_atomic(page);
+ ap->ops->sff_data_xfer(qc, buf + offset, qc->sect_size, do_write);
+ kunmap_atomic(buf);
if (!do_write && !PageSlab(page))
flush_dcache_page(page);
@@ -861,24 +847,10 @@ next_sg:
DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
- if (PageHighMem(page)) {
- unsigned long flags;
-
- /* FIXME: use bounce buffer */
- local_irq_save(flags);
- buf = kmap_atomic(page);
-
- /* do the actual data transfer */
- consumed = ap->ops->sff_data_xfer(qc, buf + offset,
- count, rw);
-
- kunmap_atomic(buf);
- local_irq_restore(flags);
- } else {
- buf = page_address(page);
- consumed = ap->ops->sff_data_xfer(qc, buf + offset,
- count, rw);
- }
+ /* do the actual data transfer */
+ buf = kmap_atomic(page);
+ consumed = ap->ops->sff_data_xfer(qc, buf + offset, count, rw);
+ kunmap_atomic(buf);
bytes -= min(bytes, consumed);
qc->curbytes += count;
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 5afe35baf61b..839d487394b7 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -98,6 +98,8 @@ extern struct ata_port *ata_port_alloc(struct ata_host *host);
extern const char *sata_spd_string(unsigned int spd);
extern int ata_port_probe(struct ata_port *ap);
extern void __ata_port_probe(struct ata_port *ap);
+extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
+ u8 page, void *buf, unsigned int sectors);
#define to_ata_port(d) container_of(d, struct ata_port, tdev)
@@ -160,8 +162,6 @@ extern void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
unsigned int action);
extern void ata_eh_done(struct ata_link *link, struct ata_device *dev,
unsigned int action);
-extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
- u8 page, void *buf, unsigned int sectors);
extern void ata_eh_autopsy(struct ata_port *ap);
const char *ata_get_cmd_descript(u8 command);
extern void ata_eh_report(struct ata_port *ap);
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index 9c5780a7e1b9..0e55a8da2748 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -1597,8 +1597,6 @@ static int bfin_atapi_probe(struct platform_device *pdev)
return -ENODEV;
}
- platform_set_drvdata(pdev, host);
-
return 0;
}
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index bf1b910c5d69..0a550190955a 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -944,7 +944,6 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
goto err_rel_gpio;
}
- platform_set_drvdata(pdev, drv_data);
drv_data->pdev = pdev;
drv_data->ide_base = ide_base;
drv_data->udma_in_phys = mem_res->start + IDEUDMADATAIN;
diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c
new file mode 100644
index 000000000000..5d4b72e21161
--- /dev/null
+++ b/drivers/ata/pata_ftide010.c
@@ -0,0 +1,567 @@
+/*
+ * Faraday Technology FTIDE010 driver
+ * Copyright (C) 2017 Linus Walleij <[email protected]>
+ *
+ * Includes portions of the SL2312/SL3516/Gemini PATA driver
+ * Copyright (C) 2003 StorLine, Inc <[email protected]>
+ * Copyright (C) 2009 Janos Laube <[email protected]>
+ * Copyright (C) 2010 Frederic Pecourt <[email protected]>
+ * Copyright (C) 2011 Tobias Waldvogel <[email protected]>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/libata.h>
+#include <linux/bitops.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include "sata_gemini.h"
+
+#define DRV_NAME "pata_ftide010"
+
+/**
+ * struct ftide010 - state container for the Faraday FTIDE010
+ * @dev: pointer back to the device representing this controller
+ * @base: remapped I/O space address
+ * @pclk: peripheral clock for the IDE block
+ * @host: pointer to the ATA host for this device
+ * @master_cbl: master cable type
+ * @slave_cbl: slave cable type
+ * @sg: Gemini SATA bridge pointer, if running on the Gemini
+ * @master_to_sata0: Gemini SATA bridge: the ATA master is connected
+ * to the SATA0 bridge
+ * @slave_to_sata0: Gemini SATA bridge: the ATA slave is connected
+ * to the SATA0 bridge
+ * @master_to_sata1: Gemini SATA bridge: the ATA master is connected
+ * to the SATA1 bridge
+ * @slave_to_sata1: Gemini SATA bridge: the ATA slave is connected
+ * to the SATA1 bridge
+ */
+struct ftide010 {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *pclk;
+ struct ata_host *host;
+ unsigned int master_cbl;
+ unsigned int slave_cbl;
+ /* Gemini-specific properties */
+ struct sata_gemini *sg;
+ bool master_to_sata0;
+ bool slave_to_sata0;
+ bool master_to_sata1;
+ bool slave_to_sata1;
+};
+
+#define FTIDE010_DMA_REG 0x00
+#define FTIDE010_DMA_STATUS 0x02
+#define FTIDE010_IDE_BMDTPR 0x04
+#define FTIDE010_IDE_DEVICE_ID 0x08
+#define FTIDE010_PIO_TIMING 0x10
+#define FTIDE010_MWDMA_TIMING 0x11
+#define FTIDE010_UDMA_TIMING0 0x12 /* Master */
+#define FTIDE010_UDMA_TIMING1 0x13 /* Slave */
+#define FTIDE010_CLK_MOD 0x14
+/* These registers are mapped directly to the IDE registers */
+#define FTIDE010_CMD_DATA 0x20
+#define FTIDE010_ERROR_FEATURES 0x21
+#define FTIDE010_NSECT 0x22
+#define FTIDE010_LBAL 0x23
+#define FTIDE010_LBAM 0x24
+#define FTIDE010_LBAH 0x25
+#define FTIDE010_DEVICE 0x26
+#define FTIDE010_STATUS_COMMAND 0x27
+#define FTIDE010_ALTSTAT_CTRL 0x36
+
+/* Set this bit for UDMA mode 5 and 6 */
+#define FTIDE010_UDMA_TIMING_MODE_56 BIT(7)
+
+/* 0 = 50 MHz, 1 = 66 MHz */
+#define FTIDE010_CLK_MOD_DEV0_CLK_SEL BIT(0)
+#define FTIDE010_CLK_MOD_DEV1_CLK_SEL BIT(1)
+/* Enable UDMA on a device */
+#define FTIDE010_CLK_MOD_DEV0_UDMA_EN BIT(4)
+#define FTIDE010_CLK_MOD_DEV1_UDMA_EN BIT(5)
+
+static struct scsi_host_template pata_ftide010_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+};
+
+/*
+ * Bus timings
+ *
+ * The unit of the below required timings is two clock periods of the ATA
+ * reference clock which is 30 nanoseconds per unit at 66MHz and 20
+ * nanoseconds per unit at 50 MHz. The PIO timings assume 33MHz speed for
+ * PIO.
+ *
+ * pio_active_time: array of 5 elements for T2 timing for Mode 0,
+ * 1, 2, 3 and 4. Range 0..15.
+ * pio_recovery_time: array of 5 elements for T2l timing for Mode 0,
+ * 1, 2, 3 and 4. Range 0..15.
+ * mdma_50_active_time: array of 4 elements for Td timing for multi
+ * word DMA, Mode 0, 1, and 2 at 50 MHz. Range 0..15.
+ * mdma_50_recovery_time: array of 4 elements for Tk timing for
+ * multi word DMA, Mode 0, 1 and 2 at 50 MHz. Range 0..15.
+ * mdma_66_active_time: array of 4 elements for Td timing for multi
+ * word DMA, Mode 0, 1 and 2 at 66 MHz. Range 0..15.
+ * mdma_66_recovery_time: array of 4 elements for Tk timing for
+ * multi word DMA, Mode 0, 1 and 2 at 66 MHz. Range 0..15.
+ * udma_50_setup_time: array of 4 elements for Tvds timing for ultra
+ * DMA, Mode 0, 1, 2, 3, 4 and 5 at 50 MHz. Range 0..7.
+ * udma_50_hold_time: array of 4 elements for Tdvh timing for
+ * multi word DMA, Mode 0, 1, 2, 3, 4 and 5 at 50 MHz, Range 0..7.
+ * udma_66_setup_time: array of 4 elements for Tvds timing for multi
+ * word DMA, Mode 0, 1, 2, 3, 4, 5 and 6 at 66 MHz. Range 0..7.
+ * udma_66_hold_time: array of 4 elements for Tdvh timing for
+ * multi word DMA, Mode 0, 1, 2, 3, 4, 5 and 6 at 66 MHz. Range 0..7.
+ */
+static const u8 pio_active_time[5] = {10, 10, 10, 3, 3};
+static const u8 pio_recovery_time[5] = {10, 3, 1, 3, 1};
+static const u8 mwdma_50_active_time[3] = {6, 2, 2};
+static const u8 mwdma_50_recovery_time[3] = {6, 2, 1};
+static const u8 mwdma_66_active_time[3] = {8, 3, 3};
+static const u8 mwdma_66_recovery_time[3] = {8, 2, 1};
+static const u8 udma_50_setup_time[6] = {3, 3, 2, 2, 1, 1};
+static const u8 udma_50_hold_time[6] = {3, 1, 1, 1, 1, 1};
+static const u8 udma_66_setup_time[7] = {4, 4, 3, 2, };
+static const u8 udma_66_hold_time[7] = {};
+
+/*
+ * We set 66 MHz for all MWDMA modes
+ */
+static const bool set_mdma_66_mhz[] = { true, true, true, true };
+
+/*
+ * We set 66 MHz for UDMA modes 3, 4 and 6 and no others
+ */
+static const bool set_udma_66_mhz[] = { false, false, false, true, true, false, true };
+
+static void ftide010_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct ftide010 *ftide = ap->host->private_data;
+ u8 speed = adev->dma_mode;
+ u8 devno = adev->devno & 1;
+ u8 udma_en_mask;
+ u8 f66m_en_mask;
+ u8 clkreg;
+ u8 timreg;
+ u8 i;
+
+ /* Target device 0 (master) or 1 (slave) */
+ if (!devno) {
+ udma_en_mask = FTIDE010_CLK_MOD_DEV0_UDMA_EN;
+ f66m_en_mask = FTIDE010_CLK_MOD_DEV0_CLK_SEL;
+ } else {
+ udma_en_mask = FTIDE010_CLK_MOD_DEV1_UDMA_EN;
+ f66m_en_mask = FTIDE010_CLK_MOD_DEV1_CLK_SEL;
+ }
+
+ clkreg = readb(ftide->base + FTIDE010_CLK_MOD);
+ clkreg &= ~udma_en_mask;
+ clkreg &= ~f66m_en_mask;
+
+ if (speed & XFER_UDMA_0) {
+ i = speed & ~XFER_UDMA_0;
+ dev_dbg(ftide->dev, "set UDMA mode %02x, index %d\n",
+ speed, i);
+
+ clkreg |= udma_en_mask;
+ if (set_udma_66_mhz[i]) {
+ clkreg |= f66m_en_mask;
+ timreg = udma_66_setup_time[i] << 4 |
+ udma_66_hold_time[i];
+ } else {
+ timreg = udma_50_setup_time[i] << 4 |
+ udma_50_hold_time[i];
+ }
+
+ /* A special bit needs to be set for modes 5 and 6 */
+ if (i >= 5)
+ timreg |= FTIDE010_UDMA_TIMING_MODE_56;
+
+ dev_dbg(ftide->dev, "UDMA write clkreg = %02x, timreg = %02x\n",
+ clkreg, timreg);
+
+ writeb(clkreg, ftide->base + FTIDE010_CLK_MOD);
+ writeb(timreg, ftide->base + FTIDE010_UDMA_TIMING0 + devno);
+ } else {
+ i = speed & ~XFER_MW_DMA_0;
+ dev_dbg(ftide->dev, "set MWDMA mode %02x, index %d\n",
+ speed, i);
+
+ if (set_mdma_66_mhz[i]) {
+ clkreg |= f66m_en_mask;
+ timreg = mwdma_66_active_time[i] << 4 |
+ mwdma_66_recovery_time[i];
+ } else {
+ timreg = mwdma_50_active_time[i] << 4 |
+ mwdma_50_recovery_time[i];
+ }
+ dev_dbg(ftide->dev,
+ "MWDMA write clkreg = %02x, timreg = %02x\n",
+ clkreg, timreg);
+ /* This will affect all devices */
+ writeb(clkreg, ftide->base + FTIDE010_CLK_MOD);
+ writeb(timreg, ftide->base + FTIDE010_MWDMA_TIMING);
+ }
+
+ /*
+ * Store the current device (master or slave) in ap->private_data
+ * so that .qc_issue() can detect if this changes and reprogram
+ * the DMA settings.
+ */
+ ap->private_data = adev;
+
+ return;
+}
+
+static void ftide010_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct ftide010 *ftide = ap->host->private_data;
+ u8 pio = adev->pio_mode - XFER_PIO_0;
+
+ dev_dbg(ftide->dev, "set PIO mode %02x, index %d\n",
+ adev->pio_mode, pio);
+ writeb(pio_active_time[pio] << 4 | pio_recovery_time[pio],
+ ftide->base + FTIDE010_PIO_TIMING);
+}
+
+/*
+ * We implement our own qc_issue() callback since we may need to set up
+ * the timings differently for master and slave transfers: the CLK_MOD_REG
+ * and MWDMA_TIMING_REG is shared between master and slave, so reprogramming
+ * this may be necessary.
+ */
+static unsigned int ftide010_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ata_device *adev = qc->dev;
+
+ /*
+ * If the device changed, i.e. slave->master, master->slave,
+ * then set up the DMA mode again so we are sure the timings
+ * are correct.
+ */
+ if (adev != ap->private_data && ata_dma_enabled(adev))
+ ftide010_set_dmamode(ap, adev);
+
+ return ata_bmdma_qc_issue(qc);
+}
+
+static struct ata_port_operations pata_ftide010_port_ops = {
+ .inherits = &ata_bmdma_port_ops,
+ .set_dmamode = ftide010_set_dmamode,
+ .set_piomode = ftide010_set_piomode,
+ .qc_issue = ftide010_qc_issue,
+};
+
+static struct ata_port_info ftide010_port_info[] = {
+ {
+ .flags = ATA_FLAG_SLAVE_POSS,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .pio_mask = ATA_PIO4,
+ .port_ops = &pata_ftide010_port_ops,
+ },
+};
+
+#if IS_ENABLED(CONFIG_SATA_GEMINI)
+
+static int pata_ftide010_gemini_port_start(struct ata_port *ap)
+{
+ struct ftide010 *ftide = ap->host->private_data;
+ struct device *dev = ftide->dev;
+ struct sata_gemini *sg = ftide->sg;
+ int bridges = 0;
+ int ret;
+
+ ret = ata_bmdma_port_start(ap);
+ if (ret)
+ return ret;
+
+ if (ftide->master_to_sata0) {
+ dev_info(dev, "SATA0 (master) start\n");
+ ret = gemini_sata_start_bridge(sg, 0);
+ if (!ret)
+ bridges++;
+ }
+ if (ftide->master_to_sata1) {
+ dev_info(dev, "SATA1 (master) start\n");
+ ret = gemini_sata_start_bridge(sg, 1);
+ if (!ret)
+ bridges++;
+ }
+ /* Avoid double-starting */
+ if (ftide->slave_to_sata0 && !ftide->master_to_sata0) {
+ dev_info(dev, "SATA0 (slave) start\n");
+ ret = gemini_sata_start_bridge(sg, 0);
+ if (!ret)
+ bridges++;
+ }
+ /* Avoid double-starting */
+ if (ftide->slave_to_sata1 && !ftide->master_to_sata1) {
+ dev_info(dev, "SATA1 (slave) start\n");
+ ret = gemini_sata_start_bridge(sg, 1);
+ if (!ret)
+ bridges++;
+ }
+
+ dev_info(dev, "brought %d bridges online\n", bridges);
+ return (bridges > 0) ? 0 : -EINVAL; // -ENODEV;
+}
+
+static void pata_ftide010_gemini_port_stop(struct ata_port *ap)
+{
+ struct ftide010 *ftide = ap->host->private_data;
+ struct device *dev = ftide->dev;
+ struct sata_gemini *sg = ftide->sg;
+
+ if (ftide->master_to_sata0) {
+ dev_info(dev, "SATA0 (master) stop\n");
+ gemini_sata_stop_bridge(sg, 0);
+ }
+ if (ftide->master_to_sata1) {
+ dev_info(dev, "SATA1 (master) stop\n");
+ gemini_sata_stop_bridge(sg, 1);
+ }
+ /* Avoid double-stopping */
+ if (ftide->slave_to_sata0 && !ftide->master_to_sata0) {
+ dev_info(dev, "SATA0 (slave) stop\n");
+ gemini_sata_stop_bridge(sg, 0);
+ }
+ /* Avoid double-stopping */
+ if (ftide->slave_to_sata1 && !ftide->master_to_sata1) {
+ dev_info(dev, "SATA1 (slave) stop\n");
+ gemini_sata_stop_bridge(sg, 1);
+ }
+}
+
+static int pata_ftide010_gemini_cable_detect(struct ata_port *ap)
+{
+ struct ftide010 *ftide = ap->host->private_data;
+
+ /*
+ * Return the master cable, I have no clue how to return a different
+ * cable for the slave than for the master.
+ */
+ return ftide->master_cbl;
+}
+
+static int pata_ftide010_gemini_init(struct ftide010 *ftide,
+ bool is_ata1)
+{
+ struct device *dev = ftide->dev;
+ struct sata_gemini *sg;
+ enum gemini_muxmode muxmode;
+
+ /* Look up SATA bridge */
+ sg = gemini_sata_bridge_get();
+ if (IS_ERR(sg))
+ return PTR_ERR(sg);
+ ftide->sg = sg;
+
+ muxmode = gemini_sata_get_muxmode(sg);
+
+ /* Special ops */
+ pata_ftide010_port_ops.port_start =
+ pata_ftide010_gemini_port_start;
+ pata_ftide010_port_ops.port_stop =
+ pata_ftide010_gemini_port_stop;
+ pata_ftide010_port_ops.cable_detect =
+ pata_ftide010_gemini_cable_detect;
+
+ /* Flag port as SATA-capable */
+ if (gemini_sata_bridge_enabled(sg, is_ata1))
+ ftide010_port_info[0].flags |= ATA_FLAG_SATA;
+
+ /*
+ * We assume that a simple 40-wire cable is used in the PATA mode.
+ * if you're adding a system using the PATA interface, make sure
+ * the right cable is set up here, it might be necessary to use
+ * special hardware detection or encode the cable type in the device
+ * tree with special properties.
+ */
+ if (!is_ata1) {
+ switch (muxmode) {
+ case GEMINI_MUXMODE_0:
+ ftide->master_cbl = ATA_CBL_SATA;
+ ftide->slave_cbl = ATA_CBL_PATA40;
+ ftide->master_to_sata0 = true;
+ break;
+ case GEMINI_MUXMODE_1:
+ ftide->master_cbl = ATA_CBL_SATA;
+ ftide->slave_cbl = ATA_CBL_NONE;
+ ftide->master_to_sata0 = true;
+ break;
+ case GEMINI_MUXMODE_2:
+ ftide->master_cbl = ATA_CBL_PATA40;
+ ftide->slave_cbl = ATA_CBL_PATA40;
+ break;
+ case GEMINI_MUXMODE_3:
+ ftide->master_cbl = ATA_CBL_SATA;
+ ftide->slave_cbl = ATA_CBL_SATA;
+ ftide->master_to_sata0 = true;
+ ftide->slave_to_sata1 = true;
+ break;
+ }
+ } else {
+ switch (muxmode) {
+ case GEMINI_MUXMODE_0:
+ ftide->master_cbl = ATA_CBL_SATA;
+ ftide->slave_cbl = ATA_CBL_NONE;
+ ftide->master_to_sata1 = true;
+ break;
+ case GEMINI_MUXMODE_1:
+ ftide->master_cbl = ATA_CBL_SATA;
+ ftide->slave_cbl = ATA_CBL_PATA40;
+ ftide->master_to_sata1 = true;
+ break;
+ case GEMINI_MUXMODE_2:
+ ftide->master_cbl = ATA_CBL_SATA;
+ ftide->slave_cbl = ATA_CBL_SATA;
+ ftide->slave_to_sata0 = true;
+ ftide->master_to_sata1 = true;
+ break;
+ case GEMINI_MUXMODE_3:
+ ftide->master_cbl = ATA_CBL_PATA40;
+ ftide->slave_cbl = ATA_CBL_PATA40;
+ break;
+ }
+ }
+ dev_info(dev, "set up Gemini PATA%d\n", is_ata1);
+
+ return 0;
+}
+#else
+static int pata_ftide010_gemini_init(struct ftide010 *ftide,
+ bool is_ata1)
+{
+ return -ENOTSUPP;
+}
+#endif
+
+
+static int pata_ftide010_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct ata_port_info pi = ftide010_port_info[0];
+ const struct ata_port_info *ppi[] = { &pi, NULL };
+ struct ftide010 *ftide;
+ struct resource *res;
+ int irq;
+ int ret;
+ int i;
+
+ ftide = devm_kzalloc(dev, sizeof(*ftide), GFP_KERNEL);
+ if (!ftide)
+ return -ENOMEM;
+ ftide->dev = dev;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ ftide->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ftide->base))
+ return PTR_ERR(ftide->base);
+
+ ftide->pclk = devm_clk_get(dev, "PCLK");
+ if (!IS_ERR(ftide->pclk)) {
+ ret = clk_prepare_enable(ftide->pclk);
+ if (ret) {
+ dev_err(dev, "failed to enable PCLK\n");
+ return ret;
+ }
+ }
+
+ /* Some special Cortina Gemini init, if needed */
+ if (of_device_is_compatible(np, "cortina,gemini-pata")) {
+ /*
+ * We need to know which instance is probing (the
+ * Gemini has two instances of FTIDE010) and we do
+ * this simply by looking at the physical base
+ * address, which is 0x63400000 for ATA1, else we
+ * are ATA0. This will also set up the cable types.
+ */
+ ret = pata_ftide010_gemini_init(ftide,
+ (res->start == 0x63400000));
+ if (ret)
+ goto err_dis_clk;
+ } else {
+ /* Else assume we are connected using PATA40 */
+ ftide->master_cbl = ATA_CBL_PATA40;
+ ftide->slave_cbl = ATA_CBL_PATA40;
+ }
+
+ ftide->host = ata_host_alloc_pinfo(dev, ppi, 1);
+ if (!ftide->host) {
+ ret = -ENOMEM;
+ goto err_dis_clk;
+ }
+ ftide->host->private_data = ftide;
+
+ for (i = 0; i < ftide->host->n_ports; i++) {
+ struct ata_port *ap = ftide->host->ports[i];
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+
+ ioaddr->bmdma_addr = ftide->base + FTIDE010_DMA_REG;
+ ioaddr->cmd_addr = ftide->base + FTIDE010_CMD_DATA;
+ ioaddr->ctl_addr = ftide->base + FTIDE010_ALTSTAT_CTRL;
+ ioaddr->altstatus_addr = ftide->base + FTIDE010_ALTSTAT_CTRL;
+ ata_sff_std_ports(ioaddr);
+ }
+
+ dev_info(dev, "device ID %08x, irq %d, reg %pR\n",
+ readl(ftide->base + FTIDE010_IDE_DEVICE_ID), irq, res);
+
+ ret = ata_host_activate(ftide->host, irq, ata_bmdma_interrupt,
+ 0, &pata_ftide010_sht);
+ if (ret)
+ goto err_dis_clk;
+
+ return 0;
+
+err_dis_clk:
+ if (!IS_ERR(ftide->pclk))
+ clk_disable_unprepare(ftide->pclk);
+ return ret;
+}
+
+static int pata_ftide010_remove(struct platform_device *pdev)
+{
+ struct ata_host *host = platform_get_drvdata(pdev);
+ struct ftide010 *ftide = host->private_data;
+
+ ata_host_detach(ftide->host);
+ if (!IS_ERR(ftide->pclk))
+ clk_disable_unprepare(ftide->pclk);
+
+ return 0;
+}
+
+static const struct of_device_id pata_ftide010_of_match[] = {
+ {
+ .compatible = "faraday,ftide010",
+ },
+ {},
+};
+
+static struct platform_driver pata_ftide010_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(pata_ftide010_of_match),
+ },
+ .probe = pata_ftide010_probe,
+ .remove = pata_ftide010_remove,
+};
+module_platform_driver(pata_ftide010_driver);
+
+MODULE_AUTHOR("Linus Walleij <[email protected]>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index f524a9099d01..1ba03d6df951 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -1038,7 +1038,7 @@ static void octeon_cf_shutdown(struct device *dev)
}
}
-static struct of_device_id octeon_cf_match[] = {
+static const struct of_device_id octeon_cf_match[] = {
{
.compatible = "cavium,ebt3000-compact-flash",
},
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index c8b6a780a290..653b9a0bf727 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -148,8 +148,6 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
if (!ah)
return -ENOMEM;
- platform_set_drvdata(pdev, ah);
-
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
index 9ce5952216bc..959bb54fd803 100644
--- a/drivers/ata/pata_rdc.c
+++ b/drivers/ata/pata_rdc.c
@@ -292,7 +292,7 @@ static struct ata_port_operations rdc_pata_ops = {
.prereset = rdc_pata_prereset,
};
-static struct ata_port_info rdc_port_info = {
+static const struct ata_port_info rdc_port_info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index 431c7de30ce6..50801c40b029 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -582,8 +582,6 @@ static int __init pata_s3c_probe(struct platform_device *pdev)
/* Set endianness and enable the interface */
pata_s3c_hwinit(info, pdata);
- platform_set_drvdata(pdev, host);
-
ret = ata_host_activate(host, info->irq,
info->irq ? pata_s3c_irq : NULL,
0, &pata_s3c_sht);
diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
index b920c3407f8b..1b80a66caa54 100644
--- a/drivers/ata/pata_sch.c
+++ b/drivers/ata/pata_sch.c
@@ -81,7 +81,7 @@ static struct ata_port_operations sch_pata_ops = {
.set_dmamode = sch_set_dmamode,
};
-static struct ata_port_info sch_port_info = {
+static const struct ata_port_info sch_port_info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index e0939bd5ea73..ce128d5a6ded 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -1285,7 +1285,6 @@ static int sata_dwc_probe(struct platform_device *ofdev)
if (err)
dev_err(&ofdev->dev, "failed to activate host");
- dev_set_drvdata(&ofdev->dev, host);
return 0;
error_out:
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 01734d54c69c..95bf3abda6f6 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1523,8 +1523,6 @@ static int sata_fsl_probe(struct platform_device *ofdev)
ata_host_activate(host, irq, sata_fsl_interrupt, SATA_FSL_IRQ_FLAG,
&sata_fsl_sht);
- platform_set_drvdata(ofdev, host);
-
host_priv->intr_coalescing.show = fsl_sata_intr_coalescing_show;
host_priv->intr_coalescing.store = fsl_sata_intr_coalescing_store;
sysfs_attr_init(&host_priv->intr_coalescing.attr);
diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
new file mode 100644
index 000000000000..8c704523bae7
--- /dev/null
+++ b/drivers/ata/sata_gemini.c
@@ -0,0 +1,438 @@
+/*
+ * Cortina Systems Gemini SATA bridge add-on to Faraday FTIDE010
+ * Copyright (C) 2017 Linus Walleij <[email protected]>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/reset.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include "sata_gemini.h"
+
+#define DRV_NAME "gemini_sata_bridge"
+
+/**
+ * struct sata_gemini - a state container for a Gemini SATA bridge
+ * @dev: the containing device
+ * @base: remapped I/O memory base
+ * @muxmode: the current muxing mode
+ * @ide_pins: if the device is using the plain IDE interface pins
+ * @sata_bridge: if the device enables the SATA bridge
+ * @sata0_reset: SATA0 reset handler
+ * @sata1_reset: SATA1 reset handler
+ * @sata0_pclk: SATA0 PCLK handler
+ * @sata1_pclk: SATA1 PCLK handler
+ */
+struct sata_gemini {
+ struct device *dev;
+ void __iomem *base;
+ enum gemini_muxmode muxmode;
+ bool ide_pins;
+ bool sata_bridge;
+ struct reset_control *sata0_reset;
+ struct reset_control *sata1_reset;
+ struct clk *sata0_pclk;
+ struct clk *sata1_pclk;
+};
+
+/* Global IDE PAD Skew Control Register */
+#define GEMINI_GLOBAL_IDE_SKEW_CTRL 0x18
+#define GEMINI_IDE1_HOST_STROBE_DELAY_SHIFT 28
+#define GEMINI_IDE1_DEVICE_STROBE_DELAY_SHIFT 24
+#define GEMINI_IDE1_OUTPUT_IO_SKEW_SHIFT 20
+#define GEMINI_IDE1_INPUT_IO_SKEW_SHIFT 16
+#define GEMINI_IDE0_HOST_STROBE_DELAY_SHIFT 12
+#define GEMINI_IDE0_DEVICE_STROBE_DELAY_SHIFT 8
+#define GEMINI_IDE0_OUTPUT_IO_SKEW_SHIFT 4
+#define GEMINI_IDE0_INPUT_IO_SKEW_SHIFT 0
+
+/* Miscellaneous Control Register */
+#define GEMINI_GLOBAL_MISC_CTRL 0x30
+/*
+ * Values of IDE IOMUX bits in the misc control register
+ *
+ * Bits 26:24 are "IDE IO Select", which decides what SATA
+ * adapters are connected to which of the two IDE/ATA
+ * controllers in the Gemini. We can connect the two IDE blocks
+ * to one SATA adapter each, both acting as master, or one IDE
+ * blocks to two SATA adapters so the IDE block can act in a
+ * master/slave configuration.
+ *
+ * We also bring out different blocks on the actual IDE
+ * pins (not SATA pins) if (and only if) these are muxed in.
+ *
+ * 111-100 - Reserved
+ * Mode 0: 000 - ata0 master <-> sata0
+ * ata1 master <-> sata1
+ * ata0 slave interface brought out on IDE pads
+ * Mode 1: 001 - ata0 master <-> sata0
+ * ata1 master <-> sata1
+ * ata1 slave interface brought out on IDE pads
+ * Mode 2: 010 - ata1 master <-> sata1
+ * ata1 slave <-> sata0
+ * ata0 master and slave interfaces brought out
+ * on IDE pads
+ * Mode 3: 011 - ata0 master <-> sata0
+ * ata1 slave <-> sata1
+ * ata1 master and slave interfaces brought out
+ * on IDE pads
+ */
+#define GEMINI_IDE_IOMUX_MASK (7 << 24)
+#define GEMINI_IDE_IOMUX_MODE0 (0 << 24)
+#define GEMINI_IDE_IOMUX_MODE1 (1 << 24)
+#define GEMINI_IDE_IOMUX_MODE2 (2 << 24)
+#define GEMINI_IDE_IOMUX_MODE3 (3 << 24)
+#define GEMINI_IDE_IOMUX_SHIFT (24)
+#define GEMINI_IDE_PADS_ENABLE BIT(4)
+#define GEMINI_PFLASH_PADS_DISABLE BIT(1)
+
+/*
+ * Registers directly controlling the PATA<->SATA adapters
+ */
+#define GEMINI_SATA_ID 0x00
+#define GEMINI_SATA_PHY_ID 0x04
+#define GEMINI_SATA0_STATUS 0x08
+#define GEMINI_SATA1_STATUS 0x0c
+#define GEMINI_SATA0_CTRL 0x18
+#define GEMINI_SATA1_CTRL 0x1c
+
+#define GEMINI_SATA_STATUS_BIST_DONE BIT(5)
+#define GEMINI_SATA_STATUS_BIST_OK BIT(4)
+#define GEMINI_SATA_STATUS_PHY_READY BIT(0)
+
+#define GEMINI_SATA_CTRL_PHY_BIST_EN BIT(14)
+#define GEMINI_SATA_CTRL_PHY_FORCE_IDLE BIT(13)
+#define GEMINI_SATA_CTRL_PHY_FORCE_READY BIT(12)
+#define GEMINI_SATA_CTRL_PHY_AFE_LOOP_EN BIT(10)
+#define GEMINI_SATA_CTRL_PHY_DIG_LOOP_EN BIT(9)
+#define GEMINI_SATA_CTRL_HOTPLUG_DETECT_EN BIT(4)
+#define GEMINI_SATA_CTRL_ATAPI_EN BIT(3)
+#define GEMINI_SATA_CTRL_BUS_WITH_20 BIT(2)
+#define GEMINI_SATA_CTRL_SLAVE_EN BIT(1)
+#define GEMINI_SATA_CTRL_EN BIT(0)
+
+/*
+ * There is only ever one instance of this bridge on a system,
+ * so create a singleton so that the FTIDE010 instances can grab
+ * a reference to it.
+ */
+static struct sata_gemini *sg_singleton;
+
+struct sata_gemini *gemini_sata_bridge_get(void)
+{
+ if (sg_singleton)
+ return sg_singleton;
+ return ERR_PTR(-EPROBE_DEFER);
+}
+EXPORT_SYMBOL(gemini_sata_bridge_get);
+
+bool gemini_sata_bridge_enabled(struct sata_gemini *sg, bool is_ata1)
+{
+ if (!sg->sata_bridge)
+ return false;
+ /*
+ * In muxmode 2 and 3 one of the ATA controllers is
+ * actually not connected to any SATA bridge.
+ */
+ if ((sg->muxmode == GEMINI_MUXMODE_2) &&
+ !is_ata1)
+ return false;
+ if ((sg->muxmode == GEMINI_MUXMODE_3) &&
+ is_ata1)
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(gemini_sata_bridge_enabled);
+
+enum gemini_muxmode gemini_sata_get_muxmode(struct sata_gemini *sg)
+{
+ return sg->muxmode;
+}
+EXPORT_SYMBOL(gemini_sata_get_muxmode);
+
+static int gemini_sata_setup_bridge(struct sata_gemini *sg,
+ unsigned int bridge)
+{
+ unsigned long timeout = jiffies + (HZ * 1);
+ bool bridge_online;
+ u32 val;
+
+ if (bridge == 0) {
+ val = GEMINI_SATA_CTRL_HOTPLUG_DETECT_EN | GEMINI_SATA_CTRL_EN;
+ /* SATA0 slave mode is only used in muxmode 2 */
+ if (sg->muxmode == GEMINI_MUXMODE_2)
+ val |= GEMINI_SATA_CTRL_SLAVE_EN;
+ writel(val, sg->base + GEMINI_SATA0_CTRL);
+ } else {
+ val = GEMINI_SATA_CTRL_HOTPLUG_DETECT_EN | GEMINI_SATA_CTRL_EN;
+ /* SATA1 slave mode is only used in muxmode 3 */
+ if (sg->muxmode == GEMINI_MUXMODE_3)
+ val |= GEMINI_SATA_CTRL_SLAVE_EN;
+ writel(val, sg->base + GEMINI_SATA1_CTRL);
+ }
+
+ /* Vendor code waits 10 ms here */
+ msleep(10);
+
+ /* Wait for PHY to become ready */
+ do {
+ msleep(100);
+
+ if (bridge == 0)
+ val = readl(sg->base + GEMINI_SATA0_STATUS);
+ else
+ val = readl(sg->base + GEMINI_SATA1_STATUS);
+ if (val & GEMINI_SATA_STATUS_PHY_READY)
+ break;
+ } while (time_before(jiffies, timeout));
+
+ bridge_online = !!(val & GEMINI_SATA_STATUS_PHY_READY);
+
+ dev_info(sg->dev, "SATA%d PHY %s\n", bridge,
+ bridge_online ? "ready" : "not ready");
+
+ return bridge_online ? 0: -ENODEV;
+}
+
+int gemini_sata_start_bridge(struct sata_gemini *sg, unsigned int bridge)
+{
+ struct clk *pclk;
+ int ret;
+
+ if (bridge == 0)
+ pclk = sg->sata0_pclk;
+ else
+ pclk = sg->sata1_pclk;
+ clk_enable(pclk);
+ msleep(10);
+
+ /* Do not keep clocking a bridge that is not online */
+ ret = gemini_sata_setup_bridge(sg, bridge);
+ if (ret)
+ clk_disable(pclk);
+
+ return ret;
+}
+EXPORT_SYMBOL(gemini_sata_start_bridge);
+
+void gemini_sata_stop_bridge(struct sata_gemini *sg, unsigned int bridge)
+{
+ if (bridge == 0)
+ clk_disable(sg->sata0_pclk);
+ else if (bridge == 1)
+ clk_disable(sg->sata1_pclk);
+}
+EXPORT_SYMBOL(gemini_sata_stop_bridge);
+
+int gemini_sata_reset_bridge(struct sata_gemini *sg,
+ unsigned int bridge)
+{
+ if (bridge == 0)
+ reset_control_reset(sg->sata0_reset);
+ else
+ reset_control_reset(sg->sata1_reset);
+ msleep(10);
+ return gemini_sata_setup_bridge(sg, bridge);
+}
+EXPORT_SYMBOL(gemini_sata_reset_bridge);
+
+static int gemini_sata_bridge_init(struct sata_gemini *sg)
+{
+ struct device *dev = sg->dev;
+ u32 sata_id, sata_phy_id;
+ int ret;
+
+ sg->sata0_pclk = devm_clk_get(dev, "SATA0_PCLK");
+ if (IS_ERR(sg->sata0_pclk)) {
+ dev_err(dev, "no SATA0 PCLK");
+ return -ENODEV;
+ }
+ sg->sata1_pclk = devm_clk_get(dev, "SATA1_PCLK");
+ if (IS_ERR(sg->sata1_pclk)) {
+ dev_err(dev, "no SATA1 PCLK");
+ return -ENODEV;
+ }
+
+ ret = clk_prepare_enable(sg->sata0_pclk);
+ if (ret) {
+ pr_err("failed to enable SATA0 PCLK\n");
+ return ret;
+ }
+ ret = clk_prepare_enable(sg->sata1_pclk);
+ if (ret) {
+ pr_err("failed to enable SATA1 PCLK\n");
+ clk_disable_unprepare(sg->sata0_pclk);
+ return ret;
+ }
+
+ sg->sata0_reset = devm_reset_control_get(dev, "sata0");
+ if (IS_ERR(sg->sata0_reset)) {
+ dev_err(dev, "no SATA0 reset controller\n");
+ clk_disable_unprepare(sg->sata1_pclk);
+ clk_disable_unprepare(sg->sata0_pclk);
+ return PTR_ERR(sg->sata0_reset);
+ }
+ sg->sata1_reset = devm_reset_control_get(dev, "sata1");
+ if (IS_ERR(sg->sata1_reset)) {
+ dev_err(dev, "no SATA1 reset controller\n");
+ clk_disable_unprepare(sg->sata1_pclk);
+ clk_disable_unprepare(sg->sata0_pclk);
+ return PTR_ERR(sg->sata1_reset);
+ }
+
+ sata_id = readl(sg->base + GEMINI_SATA_ID);
+ sata_phy_id = readl(sg->base + GEMINI_SATA_PHY_ID);
+ sg->sata_bridge = true;
+ clk_disable(sg->sata0_pclk);
+ clk_disable(sg->sata1_pclk);
+
+ dev_info(dev, "SATA ID %08x, PHY ID: %08x\n", sata_id, sata_phy_id);
+
+ return 0;
+}
+
+static int gemini_sata_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct sata_gemini *sg;
+ static struct regmap *map;
+ struct resource *res;
+ enum gemini_muxmode muxmode;
+ u32 gmode;
+ u32 gmask;
+ u32 val;
+ int ret;
+
+ sg = devm_kzalloc(dev, sizeof(*sg), GFP_KERNEL);
+ if (!sg)
+ return -ENOMEM;
+ sg->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ sg->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(sg->base))
+ return PTR_ERR(sg->base);
+
+ map = syscon_regmap_lookup_by_phandle(np, "syscon");
+ if (IS_ERR(map)) {
+ dev_err(dev, "no global syscon\n");
+ return PTR_ERR(map);
+ }
+
+ /* Set up the SATA bridge if need be */
+ if (of_property_read_bool(np, "cortina,gemini-enable-sata-bridge")) {
+ ret = gemini_sata_bridge_init(sg);
+ if (ret)
+ return ret;
+ }
+
+ if (of_property_read_bool(np, "cortina,gemini-enable-ide-pins"))
+ sg->ide_pins = true;
+
+ if (!sg->sata_bridge && !sg->ide_pins) {
+ dev_err(dev, "neither SATA bridge or IDE output enabled\n");
+ ret = -EINVAL;
+ goto out_unprep_clk;
+ }
+
+ ret = of_property_read_u32(np, "cortina,gemini-ata-muxmode", &muxmode);
+ if (ret) {
+ dev_err(dev, "could not parse ATA muxmode\n");
+ goto out_unprep_clk;
+ }
+ if (muxmode > GEMINI_MUXMODE_3) {
+ dev_err(dev, "illegal muxmode %d\n", muxmode);
+ ret = -EINVAL;
+ goto out_unprep_clk;
+ }
+ sg->muxmode = muxmode;
+ gmask = GEMINI_IDE_IOMUX_MASK;
+ gmode = (muxmode << GEMINI_IDE_IOMUX_SHIFT);
+
+ /*
+ * If we mux out the IDE, parallel flash must be disabled.
+ * SATA0 and SATA1 have dedicated pins and may coexist with
+ * parallel flash.
+ */
+ if (sg->ide_pins)
+ gmode |= GEMINI_IDE_PADS_ENABLE | GEMINI_PFLASH_PADS_DISABLE;
+ else
+ gmask |= GEMINI_IDE_PADS_ENABLE;
+
+ ret = regmap_update_bits(map, GEMINI_GLOBAL_MISC_CTRL, gmask, gmode);
+ if (ret) {
+ dev_err(dev, "unable to set up IDE muxing\n");
+ ret = -ENODEV;
+ goto out_unprep_clk;
+ }
+
+ /* FIXME: add more elaborate IDE skew control handling */
+ if (sg->ide_pins) {
+ ret = regmap_read(map, GEMINI_GLOBAL_IDE_SKEW_CTRL, &val);
+ if (ret) {
+ dev_err(dev, "cannot read IDE skew control register\n");
+ return ret;
+ }
+ dev_info(dev, "IDE skew control: %08x\n", val);
+ }
+
+ dev_info(dev, "set up the Gemini IDE/SATA nexus\n");
+ platform_set_drvdata(pdev, sg);
+ sg_singleton = sg;
+
+ return 0;
+
+out_unprep_clk:
+ if (sg->sata_bridge) {
+ clk_unprepare(sg->sata1_pclk);
+ clk_unprepare(sg->sata0_pclk);
+ }
+ return ret;
+}
+
+static int gemini_sata_remove(struct platform_device *pdev)
+{
+ struct sata_gemini *sg = platform_get_drvdata(pdev);
+
+ if (sg->sata_bridge) {
+ clk_unprepare(sg->sata1_pclk);
+ clk_unprepare(sg->sata0_pclk);
+ }
+ sg_singleton = NULL;
+
+ return 0;
+}
+
+static const struct of_device_id gemini_sata_of_match[] = {
+ {
+ .compatible = "cortina,gemini-sata-bridge",
+ },
+ {},
+};
+
+static struct platform_driver gemini_sata_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(gemini_sata_of_match),
+ },
+ .probe = gemini_sata_probe,
+ .remove = gemini_sata_remove,
+};
+module_platform_driver(gemini_sata_driver);
+
+MODULE_AUTHOR("Linus Walleij <[email protected]>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/sata_gemini.h b/drivers/ata/sata_gemini.h
new file mode 100644
index 000000000000..ca1837a394c8
--- /dev/null
+++ b/drivers/ata/sata_gemini.h
@@ -0,0 +1,21 @@
+/* Header for the Gemini SATA bridge */
+#ifndef SATA_GEMINI_H
+#define SATA_GEMINI_H
+
+struct sata_gemini;
+
+enum gemini_muxmode {
+ GEMINI_MUXMODE_0 = 0,
+ GEMINI_MUXMODE_1,
+ GEMINI_MUXMODE_2,
+ GEMINI_MUXMODE_3,
+};
+
+struct sata_gemini *gemini_sata_bridge_get(void);
+bool gemini_sata_bridge_enabled(struct sata_gemini *sg, bool is_ata1);
+enum gemini_muxmode gemini_sata_get_muxmode(struct sata_gemini *sg);
+int gemini_sata_start_bridge(struct sata_gemini *sg, unsigned int bridge);
+void gemini_sata_stop_bridge(struct sata_gemini *sg, unsigned int bridge);
+int gemini_sata_reset_bridge(struct sata_gemini *sg, unsigned int bridge);
+
+#endif
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index e81a8217f1ff..9b6d7930d1c7 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -737,7 +737,7 @@ static struct ata_port_operations inic_port_ops = {
.port_start = inic_port_start,
};
-static struct ata_port_info inic_port_info = {
+static const struct ata_port_info inic_port_info = {
.flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index b7939a2c1fab..ee9844758736 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -828,7 +828,7 @@ static void sata_rcar_init_controller(struct ata_host *host)
iowrite32(ATAPI_INT_ENABLE_SATAINT, base + ATAPI_INT_ENABLE_REG);
}
-static struct of_device_id sata_rcar_match[] = {
+static const struct of_device_id sata_rcar_match[] = {
{
/* Deprecated by "renesas,sata-r8a7779" */
.compatible = "renesas,rcar-sata",
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 22e96fc77d09..93b8d783936a 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -80,6 +80,10 @@ struct svia_priv {
bool wd_workaround;
};
+static int vt6420_hotplug;
+module_param_named(vt6420_hotplug, vt6420_hotplug, int, 0644);
+MODULE_PARM_DESC(vt6420_hotplug, "Enable hot-plug support for VT6420 (0=Don't support, 1=support)");
+
static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
#ifdef CONFIG_PM_SLEEP
static int svia_pci_device_resume(struct pci_dev *pdev);
@@ -166,7 +170,7 @@ static const struct ata_port_info vt6420_port_info = {
.port_ops = &vt6420_sata_ops,
};
-static struct ata_port_info vt6421_sport_info = {
+static const struct ata_port_info vt6421_sport_info = {
.flags = ATA_FLAG_SATA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
@@ -174,7 +178,7 @@ static struct ata_port_info vt6421_sport_info = {
.port_ops = &vt6421_sata_ops,
};
-static struct ata_port_info vt6421_pport_info = {
+static const struct ata_port_info vt6421_pport_info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
/* No MWDMA */
@@ -182,7 +186,7 @@ static struct ata_port_info vt6421_pport_info = {
.port_ops = &vt6421_pata_ops,
};
-static struct ata_port_info vt8251_port_info = {
+static const struct ata_port_info vt8251_port_info = {
.flags = ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
@@ -473,6 +477,11 @@ static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
struct ata_host *host;
int rc;
+ if (vt6420_hotplug) {
+ ppi[0]->port_ops->scr_read = svia_scr_read;
+ ppi[0]->port_ops->scr_write = svia_scr_write;
+ }
+
rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
if (rc)
return rc;
@@ -556,7 +565,7 @@ static void svia_wd_fix(struct pci_dev *pdev)
pci_write_config_byte(pdev, 0x52, tmp8 | BIT(2));
}
-static irqreturn_t vt6421_interrupt(int irq, void *dev_instance)
+static irqreturn_t vt642x_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
irqreturn_t rc = ata_bmdma_interrupt(irq, dev_instance);
@@ -644,7 +653,7 @@ static void svia_configure(struct pci_dev *pdev, int board_id,
pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
}
- if (board_id == vt6421) {
+ if ((board_id == vt6420 && vt6420_hotplug) || board_id == vt6421) {
/* enable IRQ on hotplug */
pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
@@ -744,8 +753,8 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
svia_configure(pdev, board_id, hpriv);
pci_set_master(pdev);
- if (board_id == vt6421)
- return ata_host_activate(host, pdev->irq, vt6421_interrupt,
+ if ((board_id == vt6420 && vt6420_hotplug) || board_id == vt6421)
+ return ata_host_activate(host, pdev->irq, vt642x_interrupt,
IRQF_SHARED, &svia_sht);
else
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 8dde934f8d15..755451f684bc 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2664,7 +2664,11 @@ void device_shutdown(void)
pm_runtime_get_noresume(dev);
pm_runtime_barrier(dev);
- if (dev->bus && dev->bus->shutdown) {
+ if (dev->class && dev->class->shutdown) {
+ if (initcall_debug)
+ dev_info(dev, "shutdown\n");
+ dev->class->shutdown(dev);
+ } else if (dev->bus && dev->bus->shutdown) {
if (initcall_debug)
dev_info(dev, "shutdown\n");
dev->bus->shutdown(dev);
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 640a7e63c453..2ae24c28e70c 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -16,8 +16,27 @@ struct dma_coherent_mem {
int flags;
unsigned long *bitmap;
spinlock_t spinlock;
+ bool use_dev_dma_pfn_offset;
};
+static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
+
+static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
+{
+ if (dev && dev->dma_mem)
+ return dev->dma_mem;
+ return dma_coherent_default_memory;
+}
+
+static inline dma_addr_t dma_get_device_base(struct device *dev,
+ struct dma_coherent_mem * mem)
+{
+ if (mem->use_dev_dma_pfn_offset)
+ return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
+ else
+ return mem->device_base;
+}
+
static bool dma_init_coherent_memory(
phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
struct dma_coherent_mem **mem)
@@ -83,6 +102,9 @@ static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
static int dma_assign_coherent_memory(struct device *dev,
struct dma_coherent_mem *mem)
{
+ if (!dev)
+ return -ENODEV;
+
if (dev->dma_mem)
return -EBUSY;
@@ -133,7 +155,7 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
return ERR_PTR(-EINVAL);
spin_lock_irqsave(&mem->spinlock, flags);
- pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
+ pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem));
err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
spin_unlock_irqrestore(&mem->spinlock, flags);
@@ -161,15 +183,12 @@ EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
int dma_alloc_from_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle, void **ret)
{
- struct dma_coherent_mem *mem;
+ struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
int order = get_order(size);
unsigned long flags;
int pageno;
int dma_memory_map;
- if (!dev)
- return 0;
- mem = dev->dma_mem;
if (!mem)
return 0;
@@ -186,7 +205,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
/*
* Memory was found in the per-device area.
*/
- *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
+ *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT);
*ret = mem->virt_base + (pageno << PAGE_SHIFT);
dma_memory_map = (mem->flags & DMA_MEMORY_MAP);
spin_unlock_irqrestore(&mem->spinlock, flags);
@@ -223,7 +242,7 @@ EXPORT_SYMBOL(dma_alloc_from_coherent);
*/
int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
{
- struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+ struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
if (mem && vaddr >= mem->virt_base && vaddr <
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
@@ -257,7 +276,7 @@ EXPORT_SYMBOL(dma_release_from_coherent);
int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
void *vaddr, size_t size, int *ret)
{
- struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+ struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
if (mem && vaddr >= mem->virt_base && vaddr + size <=
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
@@ -287,6 +306,8 @@ EXPORT_SYMBOL(dma_mmap_from_coherent);
#include <linux/of_fdt.h>
#include <linux/of_reserved_mem.h>
+static struct reserved_mem *dma_reserved_default_memory __initdata;
+
static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
{
struct dma_coherent_mem *mem = rmem->priv;
@@ -299,6 +320,7 @@ static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
&rmem->base, (unsigned long)rmem->size / SZ_1M);
return -ENODEV;
}
+ mem->use_dev_dma_pfn_offset = true;
rmem->priv = mem;
dma_assign_coherent_memory(dev, mem);
return 0;
@@ -307,7 +329,8 @@ static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
static void rmem_dma_device_release(struct reserved_mem *rmem,
struct device *dev)
{
- dev->dma_mem = NULL;
+ if (dev)
+ dev->dma_mem = NULL;
}
static const struct reserved_mem_ops rmem_dma_ops = {
@@ -327,6 +350,12 @@ static int __init rmem_dma_setup(struct reserved_mem *rmem)
pr_err("Reserved memory: regions without no-map are not yet supported\n");
return -EINVAL;
}
+
+ if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
+ WARN(dma_reserved_default_memory,
+ "Reserved memory: region for default DMA coherent area is redefined\n");
+ dma_reserved_default_memory = rmem;
+ }
#endif
rmem->ops = &rmem_dma_ops;
@@ -334,5 +363,32 @@ static int __init rmem_dma_setup(struct reserved_mem *rmem)
&rmem->base, (unsigned long)rmem->size / SZ_1M);
return 0;
}
+
+static int __init dma_init_reserved_memory(void)
+{
+ const struct reserved_mem_ops *ops;
+ int ret;
+
+ if (!dma_reserved_default_memory)
+ return -ENOMEM;
+
+ ops = dma_reserved_default_memory->ops;
+
+ /*
+ * We rely on rmem_dma_device_init() does not propagate error of
+ * dma_assign_coherent_memory() for "NULL" device.
+ */
+ ret = ops->device_init(dma_reserved_default_memory, NULL);
+
+ if (!ret) {
+ dma_coherent_default_memory = dma_reserved_default_memory->priv;
+ pr_info("DMA: default coherent area is set\n");
+ }
+
+ return ret;
+}
+
+core_initcall(dma_init_reserved_memory);
+
RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
#endif
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 9dbef4d1baa4..5096755d185e 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -22,20 +22,15 @@ struct dma_devres {
size_t size;
void *vaddr;
dma_addr_t dma_handle;
+ unsigned long attrs;
};
-static void dmam_coherent_release(struct device *dev, void *res)
+static void dmam_release(struct device *dev, void *res)
{
struct dma_devres *this = res;
- dma_free_coherent(dev, this->size, this->vaddr, this->dma_handle);
-}
-
-static void dmam_noncoherent_release(struct device *dev, void *res)
-{
- struct dma_devres *this = res;
-
- dma_free_noncoherent(dev, this->size, this->vaddr, this->dma_handle);
+ dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
+ this->attrs);
}
static int dmam_match(struct device *dev, void *res, void *match_data)
@@ -69,7 +64,7 @@ void *dmam_alloc_coherent(struct device *dev, size_t size,
struct dma_devres *dr;
void *vaddr;
- dr = devres_alloc(dmam_coherent_release, sizeof(*dr), gfp);
+ dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
if (!dr)
return NULL;
@@ -104,35 +99,35 @@ void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
struct dma_devres match_data = { size, vaddr, dma_handle };
dma_free_coherent(dev, size, vaddr, dma_handle);
- WARN_ON(devres_destroy(dev, dmam_coherent_release, dmam_match,
- &match_data));
+ WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
}
EXPORT_SYMBOL(dmam_free_coherent);
/**
- * dmam_alloc_non_coherent - Managed dma_alloc_noncoherent()
+ * dmam_alloc_attrs - Managed dma_alloc_attrs()
* @dev: Device to allocate non_coherent memory for
* @size: Size of allocation
* @dma_handle: Out argument for allocated DMA handle
* @gfp: Allocation flags
+ * @attrs: Flags in the DMA_ATTR_* namespace.
*
- * Managed dma_alloc_noncoherent(). Memory allocated using this
- * function will be automatically released on driver detach.
+ * Managed dma_alloc_attrs(). Memory allocated using this function will be
+ * automatically released on driver detach.
*
* RETURNS:
* Pointer to allocated memory on success, NULL on failure.
*/
-void *dmam_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs)
{
struct dma_devres *dr;
void *vaddr;
- dr = devres_alloc(dmam_noncoherent_release, sizeof(*dr), gfp);
+ dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
if (!dr)
return NULL;
- vaddr = dma_alloc_noncoherent(dev, size, dma_handle, gfp);
+ vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
if (!vaddr) {
devres_free(dr);
return NULL;
@@ -141,32 +136,13 @@ void *dmam_alloc_noncoherent(struct device *dev, size_t size,
dr->vaddr = vaddr;
dr->dma_handle = *dma_handle;
dr->size = size;
+ dr->attrs = attrs;
devres_add(dev, dr);
return vaddr;
}
-EXPORT_SYMBOL(dmam_alloc_noncoherent);
-
-/**
- * dmam_free_coherent - Managed dma_free_noncoherent()
- * @dev: Device to free noncoherent memory for
- * @size: Size of allocation
- * @vaddr: Virtual address of the memory to free
- * @dma_handle: DMA handle of the memory to free
- *
- * Managed dma_free_noncoherent().
- */
-void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
-{
- struct dma_devres match_data = { size, vaddr, dma_handle };
-
- dma_free_noncoherent(dev, size, vaddr, dma_handle);
- WARN_ON(!devres_destroy(dev, dmam_noncoherent_release, dmam_match,
- &match_data));
-}
-EXPORT_SYMBOL(dmam_free_noncoherent);
+EXPORT_SYMBOL(dmam_alloc_attrs);
#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
@@ -251,7 +227,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
int ret = -ENXIO;
-#if defined(CONFIG_MMU) && !defined(CONFIG_ARCH_NO_COHERENT_DMA_MMAP)
+#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
@@ -268,7 +244,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
user_count << PAGE_SHIFT,
vma->vm_page_prot);
}
-#endif /* CONFIG_MMU && !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
+#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
return ret;
}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index cc4f1d0cbffe..c7c4e0325cdb 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -128,6 +128,9 @@ static ssize_t show_mem_removable(struct device *dev,
int ret = 1;
struct memory_block *mem = to_memory_block(dev);
+ if (mem->state != MEM_ONLINE)
+ goto out;
+
for (i = 0; i < sections_per_block; i++) {
if (!present_section_nr(mem->start_section_nr + i))
continue;
@@ -135,6 +138,7 @@ static ssize_t show_mem_removable(struct device *dev,
ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
}
+out:
return sprintf(buf, "%d\n", ret);
}
@@ -388,39 +392,43 @@ static ssize_t show_valid_zones(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct memory_block *mem = to_memory_block(dev);
- unsigned long start_pfn, end_pfn;
- unsigned long valid_start, valid_end, valid_pages;
+ unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
- struct zone *zone;
- int zone_shift = 0;
+ unsigned long valid_start_pfn, valid_end_pfn;
+ bool append = false;
+ int nid;
- start_pfn = section_nr_to_pfn(mem->start_section_nr);
- end_pfn = start_pfn + nr_pages;
-
- /* The block contains more than one zone can not be offlined. */
- if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
+ /*
+ * The block contains more than one zone can not be offlined.
+ * This can happen e.g. for ZONE_DMA and ZONE_DMA32
+ */
+ if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, &valid_start_pfn, &valid_end_pfn))
return sprintf(buf, "none\n");
- zone = page_zone(pfn_to_page(valid_start));
- valid_pages = valid_end - valid_start;
+ start_pfn = valid_start_pfn;
+ nr_pages = valid_end_pfn - start_pfn;
- /* MMOP_ONLINE_KEEP */
- sprintf(buf, "%s", zone->name);
-
- /* MMOP_ONLINE_KERNEL */
- zone_can_shift(valid_start, valid_pages, ZONE_NORMAL, &zone_shift);
- if (zone_shift) {
- strcat(buf, " ");
- strcat(buf, (zone + zone_shift)->name);
+ /*
+ * Check the existing zone. Make sure that we do that only on the
+ * online nodes otherwise the page_zone is not reliable
+ */
+ if (mem->state == MEM_ONLINE) {
+ strcat(buf, page_zone(pfn_to_page(start_pfn))->name);
+ goto out;
}
- /* MMOP_ONLINE_MOVABLE */
- zone_can_shift(valid_start, valid_pages, ZONE_MOVABLE, &zone_shift);
- if (zone_shift) {
- strcat(buf, " ");
- strcat(buf, (zone + zone_shift)->name);
+ nid = pfn_to_nid(start_pfn);
+ if (allow_online_pfn_range(nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL)) {
+ strcat(buf, default_zone_for_pfn(nid, start_pfn, nr_pages)->name);
+ append = true;
}
+ if (allow_online_pfn_range(nid, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE)) {
+ if (append)
+ strcat(buf, " ");
+ strcat(buf, NODE_DATA(nid)->node_zones[ZONE_MOVABLE].name);
+ }
+out:
strcat(buf, "\n");
return strlen(buf);
@@ -685,14 +693,6 @@ static int add_memory_block(int base_section_nr)
return 0;
}
-static bool is_zone_device_section(struct mem_section *ms)
-{
- struct page *page;
-
- page = sparse_decode_mem_map(ms->section_mem_map, __section_nr(ms));
- return is_zone_device_page(page);
-}
-
/*
* need an interface for the VM to add new memory regions,
* but without onlining it.
@@ -702,9 +702,6 @@ int register_new_memory(int nid, struct mem_section *section)
int ret = 0;
struct memory_block *mem;
- if (is_zone_device_section(section))
- return 0;
-
mutex_lock(&mem_sysfs_mutex);
mem = find_memory_block(section);
@@ -741,11 +738,16 @@ static int remove_memory_section(unsigned long node_id,
{
struct memory_block *mem;
- if (is_zone_device_section(section))
- return 0;
-
mutex_lock(&mem_sysfs_mutex);
+
+ /*
+ * Some users of the memory hotplug do not want/need memblock to
+ * track all sections. Skip over those.
+ */
mem = find_memory_block(section);
+ if (!mem)
+ goto out_unlock;
+
unregister_mem_sect_under_nodes(mem, __section_nr(section));
mem->section_count--;
@@ -754,6 +756,7 @@ static int remove_memory_section(unsigned long node_id,
else
put_device(&mem->dev);
+out_unlock:
mutex_unlock(&mem_sysfs_mutex);
return 0;
}
@@ -820,6 +823,10 @@ int __init memory_dev_init(void)
*/
mutex_lock(&mem_sysfs_mutex);
for (i = 0; i < NR_MEM_SECTIONS; i += sections_per_block) {
+ /* Don't iterate over sections we know are !present: */
+ if (i > __highest_present_section_nr)
+ break;
+
err = add_memory_block(i);
if (!ret)
ret = err;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 0440d95c9b5b..73d39bc58c42 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -129,11 +129,11 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
- nid, K(sum_zone_node_page_state(nid, NR_SLAB_RECLAIMABLE) +
- sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
- nid, K(sum_zone_node_page_state(nid, NR_SLAB_RECLAIMABLE)),
+ nid, K(node_page_state(pgdat, NR_SLAB_RECLAIMABLE) +
+ node_page_state(pgdat, NR_SLAB_UNRECLAIMABLE)),
+ nid, K(node_page_state(pgdat, NR_SLAB_RECLAIMABLE)),
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- nid, K(sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
+ nid, K(node_page_state(pgdat, NR_SLAB_UNRECLAIMABLE)),
nid, K(node_page_state(pgdat, NR_ANON_THPS) *
HPAGE_PMD_NR),
nid, K(node_page_state(pgdat, NR_SHMEM_THPS) *
@@ -141,7 +141,7 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) *
HPAGE_PMD_NR));
#else
- nid, K(sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)));
+ nid, K(node_page_state(pgdat, NR_SLAB_UNRECLAIMABLE)));
#endif
n += hugetlb_report_node_meminfo(nid, buf + n);
return n;
@@ -368,21 +368,14 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
}
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
-#define page_initialized(page) (page->lru.next)
-
static int __ref get_nid_for_pfn(unsigned long pfn)
{
- struct page *page;
-
if (!pfn_valid_within(pfn))
return -1;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
if (system_state < SYSTEM_RUNNING)
return early_pfn_to_nid(pfn);
#endif
- page = pfn_to_page(pfn);
- if (!page_initialized(page))
- return -1;
return pfn_to_nid(pfn);
}
@@ -468,10 +461,9 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
return 0;
}
-static int link_mem_sections(int nid)
+int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages)
{
- unsigned long start_pfn = NODE_DATA(nid)->node_start_pfn;
- unsigned long end_pfn = start_pfn + NODE_DATA(nid)->node_spanned_pages;
+ unsigned long end_pfn = start_pfn + nr_pages;
unsigned long pfn;
struct memory_block *mem_blk = NULL;
int err = 0;
@@ -559,10 +551,7 @@ static int node_memory_callback(struct notifier_block *self,
return NOTIFY_OK;
}
#endif /* CONFIG_HUGETLBFS */
-#else /* !CONFIG_MEMORY_HOTPLUG_SPARSE */
-
-static int link_mem_sections(int nid) { return 0; }
-#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
+#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
#if !defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || \
!defined(CONFIG_HUGETLBFS)
@@ -576,39 +565,32 @@ static void init_node_hugetlb_work(int nid) { }
#endif
-int register_one_node(int nid)
+int __register_one_node(int nid)
{
- int error = 0;
+ int p_node = parent_node(nid);
+ struct node *parent = NULL;
+ int error;
int cpu;
- if (node_online(nid)) {
- int p_node = parent_node(nid);
- struct node *parent = NULL;
-
- if (p_node != nid)
- parent = node_devices[p_node];
-
- node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL);
- if (!node_devices[nid])
- return -ENOMEM;
-
- error = register_node(node_devices[nid], nid, parent);
+ if (p_node != nid)
+ parent = node_devices[p_node];
- /* link cpu under this node */
- for_each_present_cpu(cpu) {
- if (cpu_to_node(cpu) == nid)
- register_cpu_under_node(cpu, nid);
- }
+ node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL);
+ if (!node_devices[nid])
+ return -ENOMEM;
- /* link memory sections under this node */
- error = link_mem_sections(nid);
+ error = register_node(node_devices[nid], nid, parent);
- /* initialize work queue for memory hot plug */
- init_node_hugetlb_work(nid);
+ /* link cpu under this node */
+ for_each_present_cpu(cpu) {
+ if (cpu_to_node(cpu) == nid)
+ register_cpu_under_node(cpu, nid);
}
- return error;
+ /* initialize work queue for memory hot plug */
+ init_node_hugetlb_work(nid);
+ return error;
}
void unregister_one_node(int nid)
@@ -657,9 +639,7 @@ static struct node_attr node_state_attr[] = {
#ifdef CONFIG_HIGHMEM
[N_HIGH_MEMORY] = _NODE_ATTR(has_high_memory, N_HIGH_MEMORY),
#endif
-#ifdef CONFIG_MOVABLE_NODE
[N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY),
-#endif
[N_CPU] = _NODE_ATTR(has_cpu, N_CPU),
};
@@ -670,9 +650,7 @@ static struct attribute *node_state_attrs[] = {
#ifdef CONFIG_HIGHMEM
&node_state_attr[N_HIGH_MEMORY].attr.attr,
#endif
-#ifdef CONFIG_MOVABLE_NODE
&node_state_attr[N_MEMORY].attr.attr,
-#endif
&node_state_attr[N_CPU].attr.attr,
NULL
};
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 97332d094fe2..d1bd99271066 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(platform_device_add_data);
* platform device is released.
*/
int platform_device_add_properties(struct platform_device *pdev,
- struct property_entry *properties)
+ const struct property_entry *properties)
{
return device_add_properties(&pdev->dev, properties);
}
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 6112e99bedf7..17723fd50a53 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -22,6 +22,7 @@
#ifdef CONFIG_BLK_DEV_RAM_DAX
#include <linux/pfn_t.h>
#include <linux/dax.h>
+#include <linux/uio.h>
#endif
#include <linux/uaccess.h>
@@ -354,8 +355,15 @@ static long brd_dax_direct_access(struct dax_device *dax_dev,
return __brd_direct_access(brd, pgoff, nr_pages, kaddr, pfn);
}
+static size_t brd_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i)
+{
+ return copy_from_iter(addr, bytes, i);
+}
+
static const struct dax_operations brd_dax_ops = {
.direct_access = brd_dax_direct_access,
+ .copy_from_iter = brd_dax_copy_from_iter,
};
#endif
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index ce823647a9c4..9c00f29e40c1 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -192,6 +192,7 @@ static int print_unex = 1;
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/async.h>
+#include <linux/compat.h>
/*
* PS/2 floppies have much slower step rates than regular floppies.
@@ -3568,6 +3569,330 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode,
return ret;
}
+#ifdef CONFIG_COMPAT
+
+struct compat_floppy_drive_params {
+ char cmos;
+ compat_ulong_t max_dtr;
+ compat_ulong_t hlt;
+ compat_ulong_t hut;
+ compat_ulong_t srt;
+ compat_ulong_t spinup;
+ compat_ulong_t spindown;
+ unsigned char spindown_offset;
+ unsigned char select_delay;
+ unsigned char rps;
+ unsigned char tracks;
+ compat_ulong_t timeout;
+ unsigned char interleave_sect;
+ struct floppy_max_errors max_errors;
+ char flags;
+ char read_track;
+ short autodetect[8];
+ compat_int_t checkfreq;
+ compat_int_t native_format;
+};
+
+struct compat_floppy_drive_struct {
+ signed char flags;
+ compat_ulong_t spinup_date;
+ compat_ulong_t select_date;
+ compat_ulong_t first_read_date;
+ short probed_format;
+ short track;
+ short maxblock;
+ short maxtrack;
+ compat_int_t generation;
+ compat_int_t keep_data;
+ compat_int_t fd_ref;
+ compat_int_t fd_device;
+ compat_int_t last_checked;
+ compat_caddr_t dmabuf;
+ compat_int_t bufblocks;
+};
+
+struct compat_floppy_fdc_state {
+ compat_int_t spec1;
+ compat_int_t spec2;
+ compat_int_t dtr;
+ unsigned char version;
+ unsigned char dor;
+ compat_ulong_t address;
+ unsigned int rawcmd:2;
+ unsigned int reset:1;
+ unsigned int need_configure:1;
+ unsigned int perp_mode:2;
+ unsigned int has_fifo:1;
+ unsigned int driver_version;
+ unsigned char track[4];
+};
+
+struct compat_floppy_write_errors {
+ unsigned int write_errors;
+ compat_ulong_t first_error_sector;
+ compat_int_t first_error_generation;
+ compat_ulong_t last_error_sector;
+ compat_int_t last_error_generation;
+ compat_uint_t badness;
+};
+
+#define FDSETPRM32 _IOW(2, 0x42, struct compat_floppy_struct)
+#define FDDEFPRM32 _IOW(2, 0x43, struct compat_floppy_struct)
+#define FDSETDRVPRM32 _IOW(2, 0x90, struct compat_floppy_drive_params)
+#define FDGETDRVPRM32 _IOR(2, 0x11, struct compat_floppy_drive_params)
+#define FDGETDRVSTAT32 _IOR(2, 0x12, struct compat_floppy_drive_struct)
+#define FDPOLLDRVSTAT32 _IOR(2, 0x13, struct compat_floppy_drive_struct)
+#define FDGETFDCSTAT32 _IOR(2, 0x15, struct compat_floppy_fdc_state)
+#define FDWERRORGET32 _IOR(2, 0x17, struct compat_floppy_write_errors)
+
+static int compat_set_geometry(struct block_device *bdev, fmode_t mode, unsigned int cmd,
+ struct compat_floppy_struct __user *arg)
+{
+ struct floppy_struct v;
+ int drive, type;
+ int err;
+
+ BUILD_BUG_ON(offsetof(struct floppy_struct, name) !=
+ offsetof(struct compat_floppy_struct, name));
+
+ if (!(mode & (FMODE_WRITE | FMODE_WRITE_IOCTL)))
+ return -EPERM;
+
+ memset(&v, 0, sizeof(struct floppy_struct));
+ if (copy_from_user(&v, arg, offsetof(struct floppy_struct, name)))
+ return -EFAULT;
+
+ mutex_lock(&floppy_mutex);
+ drive = (long)bdev->bd_disk->private_data;
+ type = ITYPE(UDRS->fd_device);
+ err = set_geometry(cmd == FDSETPRM32 ? FDSETPRM : FDDEFPRM,
+ &v, drive, type, bdev);
+ mutex_unlock(&floppy_mutex);
+ return err;
+}
+
+static int compat_get_prm(int drive,
+ struct compat_floppy_struct __user *arg)
+{
+ struct compat_floppy_struct v;
+ struct floppy_struct *p;
+ int err;
+
+ memset(&v, 0, sizeof(v));
+ mutex_lock(&floppy_mutex);
+ err = get_floppy_geometry(drive, ITYPE(UDRS->fd_device), &p);
+ if (err) {
+ mutex_unlock(&floppy_mutex);
+ return err;
+ }
+ memcpy(&v, p, offsetof(struct floppy_struct, name));
+ mutex_unlock(&floppy_mutex);
+ if (copy_to_user(arg, &v, sizeof(struct compat_floppy_struct)))
+ return -EFAULT;
+ return 0;
+}
+
+static int compat_setdrvprm(int drive,
+ struct compat_floppy_drive_params __user *arg)
+{
+ struct compat_floppy_drive_params v;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (copy_from_user(&v, arg, sizeof(struct compat_floppy_drive_params)))
+ return -EFAULT;
+ mutex_lock(&floppy_mutex);
+ UDP->cmos = v.cmos;
+ UDP->max_dtr = v.max_dtr;
+ UDP->hlt = v.hlt;
+ UDP->hut = v.hut;
+ UDP->srt = v.srt;
+ UDP->spinup = v.spinup;
+ UDP->spindown = v.spindown;
+ UDP->spindown_offset = v.spindown_offset;
+ UDP->select_delay = v.select_delay;
+ UDP->rps = v.rps;
+ UDP->tracks = v.tracks;
+ UDP->timeout = v.timeout;
+ UDP->interleave_sect = v.interleave_sect;
+ UDP->max_errors = v.max_errors;
+ UDP->flags = v.flags;
+ UDP->read_track = v.read_track;
+ memcpy(UDP->autodetect, v.autodetect, sizeof(v.autodetect));
+ UDP->checkfreq = v.checkfreq;
+ UDP->native_format = v.native_format;
+ mutex_unlock(&floppy_mutex);
+ return 0;
+}
+
+static int compat_getdrvprm(int drive,
+ struct compat_floppy_drive_params __user *arg)
+{
+ struct compat_floppy_drive_params v;
+
+ memset(&v, 0, sizeof(struct compat_floppy_drive_params));
+ mutex_lock(&floppy_mutex);
+ v.cmos = UDP->cmos;
+ v.max_dtr = UDP->max_dtr;
+ v.hlt = UDP->hlt;
+ v.hut = UDP->hut;
+ v.srt = UDP->srt;
+ v.spinup = UDP->spinup;
+ v.spindown = UDP->spindown;
+ v.spindown_offset = UDP->spindown_offset;
+ v.select_delay = UDP->select_delay;
+ v.rps = UDP->rps;
+ v.tracks = UDP->tracks;
+ v.timeout = UDP->timeout;
+ v.interleave_sect = UDP->interleave_sect;
+ v.max_errors = UDP->max_errors;
+ v.flags = UDP->flags;
+ v.read_track = UDP->read_track;
+ memcpy(v.autodetect, UDP->autodetect, sizeof(v.autodetect));
+ v.checkfreq = UDP->checkfreq;
+ v.native_format = UDP->native_format;
+ mutex_unlock(&floppy_mutex);
+
+ if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_params)))
+ return -EFAULT;
+ return 0;
+}
+
+static int compat_getdrvstat(int drive, bool poll,
+ struct compat_floppy_drive_struct __user *arg)
+{
+ struct compat_floppy_drive_struct v;
+
+ memset(&v, 0, sizeof(struct compat_floppy_drive_struct));
+ mutex_lock(&floppy_mutex);
+
+ if (poll) {
+ if (lock_fdc(drive))
+ goto Eintr;
+ if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
+ goto Eintr;
+ process_fd_request();
+ }
+ v.spinup_date = UDRS->spinup_date;
+ v.select_date = UDRS->select_date;
+ v.first_read_date = UDRS->first_read_date;
+ v.probed_format = UDRS->probed_format;
+ v.track = UDRS->track;
+ v.maxblock = UDRS->maxblock;
+ v.maxtrack = UDRS->maxtrack;
+ v.generation = UDRS->generation;
+ v.keep_data = UDRS->keep_data;
+ v.fd_ref = UDRS->fd_ref;
+ v.fd_device = UDRS->fd_device;
+ v.last_checked = UDRS->last_checked;
+ v.dmabuf = (uintptr_t)UDRS->dmabuf;
+ v.bufblocks = UDRS->bufblocks;
+ mutex_unlock(&floppy_mutex);
+
+ if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_struct)))
+ return -EFAULT;
+ return 0;
+Eintr:
+ mutex_unlock(&floppy_mutex);
+ return -EINTR;
+}
+
+static int compat_getfdcstat(int drive,
+ struct compat_floppy_fdc_state __user *arg)
+{
+ struct compat_floppy_fdc_state v32;
+ struct floppy_fdc_state v;
+
+ mutex_lock(&floppy_mutex);
+ v = *UFDCS;
+ mutex_unlock(&floppy_mutex);
+
+ memset(&v32, 0, sizeof(struct compat_floppy_fdc_state));
+ v32.spec1 = v.spec1;
+ v32.spec2 = v.spec2;
+ v32.dtr = v.dtr;
+ v32.version = v.version;
+ v32.dor = v.dor;
+ v32.address = v.address;
+ v32.rawcmd = v.rawcmd;
+ v32.reset = v.reset;
+ v32.need_configure = v.need_configure;
+ v32.perp_mode = v.perp_mode;
+ v32.has_fifo = v.has_fifo;
+ v32.driver_version = v.driver_version;
+ memcpy(v32.track, v.track, 4);
+ if (copy_to_user(arg, &v32, sizeof(struct compat_floppy_fdc_state)))
+ return -EFAULT;
+ return 0;
+}
+
+static int compat_werrorget(int drive,
+ struct compat_floppy_write_errors __user *arg)
+{
+ struct compat_floppy_write_errors v32;
+ struct floppy_write_errors v;
+
+ memset(&v32, 0, sizeof(struct compat_floppy_write_errors));
+ mutex_lock(&floppy_mutex);
+ v = *UDRWE;
+ mutex_unlock(&floppy_mutex);
+ v32.write_errors = v.write_errors;
+ v32.first_error_sector = v.first_error_sector;
+ v32.first_error_generation = v.first_error_generation;
+ v32.last_error_sector = v.last_error_sector;
+ v32.last_error_generation = v.last_error_generation;
+ v32.badness = v.badness;
+ if (copy_to_user(arg, &v32, sizeof(struct compat_floppy_write_errors)))
+ return -EFAULT;
+ return 0;
+}
+
+static int fd_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
+ unsigned long param)
+{
+ int drive = (long)bdev->bd_disk->private_data;
+ switch (cmd) {
+ case FDMSGON:
+ case FDMSGOFF:
+ case FDSETEMSGTRESH:
+ case FDFLUSH:
+ case FDWERRORCLR:
+ case FDEJECT:
+ case FDCLRPRM:
+ case FDFMTBEG:
+ case FDRESET:
+ case FDTWADDLE:
+ return fd_ioctl(bdev, mode, cmd, param);
+ case FDSETMAXERRS:
+ case FDGETMAXERRS:
+ case FDGETDRVTYP:
+ case FDFMTEND:
+ case FDFMTTRK:
+ case FDRAWCMD:
+ return fd_ioctl(bdev, mode, cmd,
+ (unsigned long)compat_ptr(param));
+ case FDSETPRM32:
+ case FDDEFPRM32:
+ return compat_set_geometry(bdev, mode, cmd, compat_ptr(param));
+ case FDGETPRM32:
+ return compat_get_prm(drive, compat_ptr(param));
+ case FDSETDRVPRM32:
+ return compat_setdrvprm(drive, compat_ptr(param));
+ case FDGETDRVPRM32:
+ return compat_getdrvprm(drive, compat_ptr(param));
+ case FDPOLLDRVSTAT32:
+ return compat_getdrvstat(drive, true, compat_ptr(param));
+ case FDGETDRVSTAT32:
+ return compat_getdrvstat(drive, false, compat_ptr(param));
+ case FDGETFDCSTAT32:
+ return compat_getfdcstat(drive, compat_ptr(param));
+ case FDWERRORGET32:
+ return compat_werrorget(drive, compat_ptr(param));
+ }
+ return -EINVAL;
+}
+#endif
+
static void __init config_types(void)
{
bool has_drive = false;
@@ -3885,6 +4210,9 @@ static const struct block_device_operations floppy_fops = {
.getgeo = fd_getgeo,
.check_events = floppy_check_events,
.revalidate_disk = floppy_revalidate,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = fd_compat_ioctl,
+#endif
};
/*
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 558e245fab0c..d3e3af22a088 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -469,6 +469,7 @@ static bool zram_same_page_write(struct zram *zram, u32 index,
zram_slot_unlock(zram, index);
atomic64_inc(&zram->stats.same_pages);
+ atomic64_inc(&zram->stats.pages_stored);
return true;
}
kunmap_atomic(mem);
@@ -524,6 +525,7 @@ static void zram_free_page(struct zram *zram, size_t index)
zram_clear_flag(zram, index, ZRAM_SAME);
zram_set_element(zram, index, 0);
atomic64_dec(&zram->stats.same_pages);
+ atomic64_dec(&zram->stats.pages_stored);
return;
}
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index f45119c5337d..2ffca4232686 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -231,6 +231,102 @@ static int handle_send_req(ipmi_user_t user,
return rv;
}
+static int handle_recv(struct ipmi_file_private *priv,
+ bool trunc, struct ipmi_recv *rsp,
+ int (*copyout)(struct ipmi_recv *, void __user *),
+ void __user *to)
+{
+ int addr_len;
+ struct list_head *entry;
+ struct ipmi_recv_msg *msg;
+ unsigned long flags;
+ int rv = 0;
+
+ /* We claim a mutex because we don't want two
+ users getting something from the queue at a time.
+ Since we have to release the spinlock before we can
+ copy the data to the user, it's possible another
+ user will grab something from the queue, too. Then
+ the messages might get out of order if something
+ fails and the message gets put back onto the
+ queue. This mutex prevents that problem. */
+ mutex_lock(&priv->recv_mutex);
+
+ /* Grab the message off the list. */
+ spin_lock_irqsave(&(priv->recv_msg_lock), flags);
+ if (list_empty(&(priv->recv_msgs))) {
+ spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
+ rv = -EAGAIN;
+ goto recv_err;
+ }
+ entry = priv->recv_msgs.next;
+ msg = list_entry(entry, struct ipmi_recv_msg, link);
+ list_del(entry);
+ spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
+
+ addr_len = ipmi_addr_length(msg->addr.addr_type);
+ if (rsp->addr_len < addr_len)
+ {
+ rv = -EINVAL;
+ goto recv_putback_on_err;
+ }
+
+ if (copy_to_user(rsp->addr, &(msg->addr), addr_len)) {
+ rv = -EFAULT;
+ goto recv_putback_on_err;
+ }
+ rsp->addr_len = addr_len;
+
+ rsp->recv_type = msg->recv_type;
+ rsp->msgid = msg->msgid;
+ rsp->msg.netfn = msg->msg.netfn;
+ rsp->msg.cmd = msg->msg.cmd;
+
+ if (msg->msg.data_len > 0) {
+ if (rsp->msg.data_len < msg->msg.data_len) {
+ rv = -EMSGSIZE;
+ if (trunc)
+ msg->msg.data_len = rsp->msg.data_len;
+ else
+ goto recv_putback_on_err;
+ }
+
+ if (copy_to_user(rsp->msg.data,
+ msg->msg.data,
+ msg->msg.data_len))
+ {
+ rv = -EFAULT;
+ goto recv_putback_on_err;
+ }
+ rsp->msg.data_len = msg->msg.data_len;
+ } else {
+ rsp->msg.data_len = 0;
+ }
+
+ rv = copyout(rsp, to);
+ if (rv)
+ goto recv_putback_on_err;
+
+ mutex_unlock(&priv->recv_mutex);
+ ipmi_free_recv_msg(msg);
+ return 0;
+
+recv_putback_on_err:
+ /* If we got an error, put the message back onto
+ the head of the queue. */
+ spin_lock_irqsave(&(priv->recv_msg_lock), flags);
+ list_add(entry, &(priv->recv_msgs));
+ spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
+recv_err:
+ mutex_unlock(&priv->recv_mutex);
+ return rv;
+}
+
+static int copyout_recv(struct ipmi_recv *rsp, void __user *to)
+{
+ return copy_to_user(to, rsp, sizeof(struct ipmi_recv)) ? -EFAULT : 0;
+}
+
static int ipmi_ioctl(struct file *file,
unsigned int cmd,
unsigned long data)
@@ -277,100 +373,12 @@ static int ipmi_ioctl(struct file *file,
case IPMICTL_RECEIVE_MSG_TRUNC:
{
struct ipmi_recv rsp;
- int addr_len;
- struct list_head *entry;
- struct ipmi_recv_msg *msg;
- unsigned long flags;
-
-
- rv = 0;
- if (copy_from_user(&rsp, arg, sizeof(rsp))) {
- rv = -EFAULT;
- break;
- }
-
- /* We claim a mutex because we don't want two
- users getting something from the queue at a time.
- Since we have to release the spinlock before we can
- copy the data to the user, it's possible another
- user will grab something from the queue, too. Then
- the messages might get out of order if something
- fails and the message gets put back onto the
- queue. This mutex prevents that problem. */
- mutex_lock(&priv->recv_mutex);
-
- /* Grab the message off the list. */
- spin_lock_irqsave(&(priv->recv_msg_lock), flags);
- if (list_empty(&(priv->recv_msgs))) {
- spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
- rv = -EAGAIN;
- goto recv_err;
- }
- entry = priv->recv_msgs.next;
- msg = list_entry(entry, struct ipmi_recv_msg, link);
- list_del(entry);
- spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
-
- addr_len = ipmi_addr_length(msg->addr.addr_type);
- if (rsp.addr_len < addr_len)
- {
- rv = -EINVAL;
- goto recv_putback_on_err;
- }
- if (copy_to_user(rsp.addr, &(msg->addr), addr_len)) {
+ if (copy_from_user(&rsp, arg, sizeof(rsp)))
rv = -EFAULT;
- goto recv_putback_on_err;
- }
- rsp.addr_len = addr_len;
-
- rsp.recv_type = msg->recv_type;
- rsp.msgid = msg->msgid;
- rsp.msg.netfn = msg->msg.netfn;
- rsp.msg.cmd = msg->msg.cmd;
-
- if (msg->msg.data_len > 0) {
- if (rsp.msg.data_len < msg->msg.data_len) {
- rv = -EMSGSIZE;
- if (cmd == IPMICTL_RECEIVE_MSG_TRUNC) {
- msg->msg.data_len = rsp.msg.data_len;
- } else {
- goto recv_putback_on_err;
- }
- }
-
- if (copy_to_user(rsp.msg.data,
- msg->msg.data,
- msg->msg.data_len))
- {
- rv = -EFAULT;
- goto recv_putback_on_err;
- }
- rsp.msg.data_len = msg->msg.data_len;
- } else {
- rsp.msg.data_len = 0;
- }
-
- if (copy_to_user(arg, &rsp, sizeof(rsp))) {
- rv = -EFAULT;
- goto recv_putback_on_err;
- }
-
- mutex_unlock(&priv->recv_mutex);
- ipmi_free_recv_msg(msg);
- break;
-
- recv_putback_on_err:
- /* If we got an error, put the message back onto
- the head of the queue. */
- spin_lock_irqsave(&(priv->recv_msg_lock), flags);
- list_add(entry, &(priv->recv_msgs));
- spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
- mutex_unlock(&priv->recv_mutex);
- break;
-
- recv_err:
- mutex_unlock(&priv->recv_mutex);
+ else
+ rv = handle_recv(priv, cmd == IPMICTL_RECEIVE_MSG_TRUNC,
+ &rsp, copyout_recv, arg);
break;
}
@@ -696,85 +704,56 @@ struct compat_ipmi_req_settime {
/*
* Define some helper functions for copying IPMI data
*/
-static long get_compat_ipmi_msg(struct ipmi_msg *p64,
- struct compat_ipmi_msg __user *p32)
-{
- compat_uptr_t tmp;
-
- if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
- __get_user(p64->netfn, &p32->netfn) ||
- __get_user(p64->cmd, &p32->cmd) ||
- __get_user(p64->data_len, &p32->data_len) ||
- __get_user(tmp, &p32->data))
- return -EFAULT;
- p64->data = compat_ptr(tmp);
- return 0;
-}
-
-static long put_compat_ipmi_msg(struct ipmi_msg *p64,
- struct compat_ipmi_msg __user *p32)
+static void get_compat_ipmi_msg(struct ipmi_msg *p64,
+ struct compat_ipmi_msg *p32)
{
- if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) ||
- __put_user(p64->netfn, &p32->netfn) ||
- __put_user(p64->cmd, &p32->cmd) ||
- __put_user(p64->data_len, &p32->data_len))
- return -EFAULT;
- return 0;
+ p64->netfn = p32->netfn;
+ p64->cmd = p32->cmd;
+ p64->data_len = p32->data_len;
+ p64->data = compat_ptr(p32->data);
}
-static long get_compat_ipmi_req(struct ipmi_req *p64,
- struct compat_ipmi_req __user *p32)
+static void get_compat_ipmi_req(struct ipmi_req *p64,
+ struct compat_ipmi_req *p32)
{
-
- compat_uptr_t tmp;
-
- if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
- __get_user(tmp, &p32->addr) ||
- __get_user(p64->addr_len, &p32->addr_len) ||
- __get_user(p64->msgid, &p32->msgid) ||
- get_compat_ipmi_msg(&p64->msg, &p32->msg))
- return -EFAULT;
- p64->addr = compat_ptr(tmp);
- return 0;
+ p64->addr = compat_ptr(p32->addr);
+ p64->addr_len = p32->addr_len;
+ p64->msgid = p32->msgid;
+ get_compat_ipmi_msg(&p64->msg, &p32->msg);
}
-static long get_compat_ipmi_req_settime(struct ipmi_req_settime *p64,
- struct compat_ipmi_req_settime __user *p32)
+static void get_compat_ipmi_req_settime(struct ipmi_req_settime *p64,
+ struct compat_ipmi_req_settime *p32)
{
- if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
- get_compat_ipmi_req(&p64->req, &p32->req) ||
- __get_user(p64->retries, &p32->retries) ||
- __get_user(p64->retry_time_ms, &p32->retry_time_ms))
- return -EFAULT;
- return 0;
+ get_compat_ipmi_req(&p64->req, &p32->req);
+ p64->retries = p32->retries;
+ p64->retry_time_ms = p32->retry_time_ms;
}
-static long get_compat_ipmi_recv(struct ipmi_recv *p64,
- struct compat_ipmi_recv __user *p32)
+static void get_compat_ipmi_recv(struct ipmi_recv *p64,
+ struct compat_ipmi_recv *p32)
{
- compat_uptr_t tmp;
-
- if (!access_ok(VERIFY_READ, p32, sizeof(*p32)) ||
- __get_user(p64->recv_type, &p32->recv_type) ||
- __get_user(tmp, &p32->addr) ||
- __get_user(p64->addr_len, &p32->addr_len) ||
- __get_user(p64->msgid, &p32->msgid) ||
- get_compat_ipmi_msg(&p64->msg, &p32->msg))
- return -EFAULT;
- p64->addr = compat_ptr(tmp);
- return 0;
+ memset(p64, 0, sizeof(struct ipmi_recv));
+ p64->recv_type = p32->recv_type;
+ p64->addr = compat_ptr(p32->addr);
+ p64->addr_len = p32->addr_len;
+ p64->msgid = p32->msgid;
+ get_compat_ipmi_msg(&p64->msg, &p32->msg);
}
-static long put_compat_ipmi_recv(struct ipmi_recv *p64,
- struct compat_ipmi_recv __user *p32)
+static int copyout_recv32(struct ipmi_recv *p64, void __user *to)
{
- if (!access_ok(VERIFY_WRITE, p32, sizeof(*p32)) ||
- __put_user(p64->recv_type, &p32->recv_type) ||
- __put_user(p64->addr_len, &p32->addr_len) ||
- __put_user(p64->msgid, &p32->msgid) ||
- put_compat_ipmi_msg(&p64->msg, &p32->msg))
- return -EFAULT;
- return 0;
+ struct compat_ipmi_recv v32;
+ memset(&v32, 0, sizeof(struct compat_ipmi_recv));
+ v32.recv_type = p64->recv_type;
+ v32.addr = ptr_to_compat(p64->addr);
+ v32.addr_len = p64->addr_len;
+ v32.msgid = p64->msgid;
+ v32.msg.netfn = p64->msg.netfn;
+ v32.msg.cmd = p64->msg.cmd;
+ v32.msg.data_len = p64->msg.data_len;
+ v32.msg.data = ptr_to_compat(p64->msg.data);
+ return copy_to_user(to, &v32, sizeof(v32)) ? -EFAULT : 0;
}
/*
@@ -783,17 +762,19 @@ static long put_compat_ipmi_recv(struct ipmi_recv *p64,
static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
unsigned long arg)
{
- int rc;
struct ipmi_file_private *priv = filep->private_data;
switch(cmd) {
case COMPAT_IPMICTL_SEND_COMMAND:
{
struct ipmi_req rp;
+ struct compat_ipmi_req r32;
- if (get_compat_ipmi_req(&rp, compat_ptr(arg)))
+ if (copy_from_user(&r32, compat_ptr(arg), sizeof(r32)))
return -EFAULT;
+ get_compat_ipmi_req(&rp, &r32);
+
return handle_send_req(priv->user, &rp,
priv->default_retries,
priv->default_retry_time_ms);
@@ -801,42 +782,30 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
case COMPAT_IPMICTL_SEND_COMMAND_SETTIME:
{
struct ipmi_req_settime sp;
+ struct compat_ipmi_req_settime sp32;
- if (get_compat_ipmi_req_settime(&sp, compat_ptr(arg)))
+ if (copy_from_user(&sp32, compat_ptr(arg), sizeof(sp32)))
return -EFAULT;
+ get_compat_ipmi_req_settime(&sp, &sp32);
+
return handle_send_req(priv->user, &sp.req,
sp.retries, sp.retry_time_ms);
}
case COMPAT_IPMICTL_RECEIVE_MSG:
case COMPAT_IPMICTL_RECEIVE_MSG_TRUNC:
{
- struct ipmi_recv __user *precv64;
struct ipmi_recv recv64;
+ struct compat_ipmi_recv recv32;
- memset(&recv64, 0, sizeof(recv64));
- if (get_compat_ipmi_recv(&recv64, compat_ptr(arg)))
+ if (copy_from_user(&recv32, compat_ptr(arg), sizeof(recv32)))
return -EFAULT;
- precv64 = compat_alloc_user_space(sizeof(recv64));
- if (copy_to_user(precv64, &recv64, sizeof(recv64)))
- return -EFAULT;
-
- rc = ipmi_ioctl(filep,
- ((cmd == COMPAT_IPMICTL_RECEIVE_MSG)
- ? IPMICTL_RECEIVE_MSG
- : IPMICTL_RECEIVE_MSG_TRUNC),
- (unsigned long) precv64);
- if (rc != 0)
- return rc;
-
- if (copy_from_user(&recv64, precv64, sizeof(recv64)))
- return -EFAULT;
-
- if (put_compat_ipmi_recv(&recv64, compat_ptr(arg)))
- return -EFAULT;
+ get_compat_ipmi_recv(&recv64, &recv32);
- return rc;
+ return handle_recv(priv,
+ cmd == COMPAT_IPMICTL_RECEIVE_MSG_TRUNC,
+ &recv64, copyout_recv32, compat_ptr(arg));
}
default:
return ipmi_ioctl(filep, cmd, arg);
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 322b8a51ffc6..67ec9d3d04f5 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -143,6 +143,39 @@ static void tpm_devs_release(struct device *dev)
}
/**
+ * tpm_class_shutdown() - prepare the TPM device for loss of power.
+ * @dev: device to which the chip is associated.
+ *
+ * Issues a TPM2_Shutdown command prior to loss of power, as required by the
+ * TPM 2.0 spec.
+ * Then, calls bus- and device- specific shutdown code.
+ *
+ * XXX: This codepath relies on the fact that sysfs is not enabled for
+ * TPM2: sysfs uses an implicit lock on chip->ops, so this could race if TPM2
+ * has sysfs support enabled before TPM sysfs's implicit locking is fixed.
+ */
+static int tpm_class_shutdown(struct device *dev)
+{
+ struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ down_write(&chip->ops_sem);
+ tpm2_shutdown(chip, TPM2_SU_CLEAR);
+ chip->ops = NULL;
+ up_write(&chip->ops_sem);
+ }
+ /* Allow bus- and device-specific code to run. Note: since chip->ops
+ * is NULL, more-specific shutdown code will not be able to issue TPM
+ * commands.
+ */
+ if (dev->bus && dev->bus->shutdown)
+ dev->bus->shutdown(dev);
+ else if (dev->driver && dev->driver->shutdown)
+ dev->driver->shutdown(dev);
+ return 0;
+}
+
+/**
* tpm_chip_alloc() - allocate a new struct tpm_chip instance
* @pdev: device to which the chip is associated
* At this point pdev mst be initialized, but does not have to
@@ -181,6 +214,7 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev,
device_initialize(&chip->devs);
chip->dev.class = tpm_class;
+ chip->dev.class->shutdown = tpm_class_shutdown;
chip->dev.release = tpm_dev_release;
chip->dev.parent = pdev;
chip->dev.groups = chip->groups;
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index d2b4df6d9894..fe597e6c55c4 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -540,6 +540,47 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_space *space,
}
EXPORT_SYMBOL_GPL(tpm_transmit_cmd);
+#define TPM_ORD_STARTUP 153
+#define TPM_ST_CLEAR 1
+
+/**
+ * tpm_startup - turn on the TPM
+ * @chip: TPM chip to use
+ *
+ * Normally the firmware should start the TPM. This function is provided as a
+ * workaround if this does not happen. A legal case for this could be for
+ * example when a TPM emulator is used.
+ *
+ * Return: same as tpm_transmit_cmd()
+ */
+int tpm_startup(struct tpm_chip *chip)
+{
+ struct tpm_buf buf;
+ int rc;
+
+ dev_info(&chip->dev, "starting up the TPM manually\n");
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_STARTUP);
+ if (rc < 0)
+ return rc;
+
+ tpm_buf_append_u16(&buf, TPM2_SU_CLEAR);
+ } else {
+ rc = tpm_buf_init(&buf, TPM_TAG_RQU_COMMAND, TPM_ORD_STARTUP);
+ if (rc < 0)
+ return rc;
+
+ tpm_buf_append_u16(&buf, TPM_ST_CLEAR);
+ }
+
+ rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0,
+ "attempting to start the TPM");
+
+ tpm_buf_destroy(&buf);
+ return rc;
+}
+
#define TPM_DIGEST_SIZE 20
#define TPM_RET_CODE_IDX 6
#define TPM_INTERNAL_RESULT_SIZE 200
@@ -586,27 +627,6 @@ ssize_t tpm_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
}
EXPORT_SYMBOL_GPL(tpm_getcap);
-#define TPM_ORD_STARTUP 153
-#define TPM_ST_CLEAR cpu_to_be16(1)
-#define TPM_ST_STATE cpu_to_be16(2)
-#define TPM_ST_DEACTIVATED cpu_to_be16(3)
-static const struct tpm_input_header tpm_startup_header = {
- .tag = cpu_to_be16(TPM_TAG_RQU_COMMAND),
- .length = cpu_to_be32(12),
- .ordinal = cpu_to_be32(TPM_ORD_STARTUP)
-};
-
-static int tpm_startup(struct tpm_chip *chip, __be16 startup_type)
-{
- struct tpm_cmd_t start_cmd;
- start_cmd.header.in = tpm_startup_header;
-
- start_cmd.params.startup_in.startup_type = startup_type;
- return tpm_transmit_cmd(chip, NULL, &start_cmd,
- TPM_INTERNAL_RESULT_SIZE, 0,
- 0, "attempting to start the TPM");
-}
-
int tpm_get_timeouts(struct tpm_chip *chip)
{
cap_t cap;
@@ -636,10 +656,7 @@ int tpm_get_timeouts(struct tpm_chip *chip)
rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, NULL,
sizeof(cap.timeout));
if (rc == TPM_ERR_INVALID_POSTINIT) {
- /* The TPM is not started, we are the first to talk to it.
- Execute a startup command. */
- dev_info(&chip->dev, "Issuing TPM_STARTUP\n");
- if (tpm_startup(chip, TPM_ST_CLEAR))
+ if (tpm_startup(chip))
return rc;
rc = tpm_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap,
@@ -1102,6 +1119,9 @@ int tpm_pm_suspend(struct device *dev)
if (chip == NULL)
return -ENODEV;
+ if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED)
+ return 0;
+
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
tpm2_shutdown(chip, TPM2_SU_STATE);
return 0;
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index 4bd0997cfa2d..86f38d239476 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -36,9 +36,10 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
ssize_t err;
int i, rc;
char *str = buf;
-
struct tpm_chip *chip = to_tpm_chip(dev);
+ memset(&tpm_cmd, 0, sizeof(tpm_cmd));
+
tpm_cmd.header.in = tpm_readpubek_header;
err = tpm_transmit_cmd(chip, NULL, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
READ_PUBEK_RESULT_MIN_BODY_SIZE, 0,
@@ -294,6 +295,9 @@ static const struct attribute_group tpm_dev_group = {
void tpm_sysfs_add_device(struct tpm_chip *chip)
{
+ /* XXX: If you wish to remove this restriction, you must first update
+ * tpm_sysfs to explicitly lock chip->ops.
+ */
if (chip->flags & TPM_CHIP_FLAG_TPM2)
return;
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 1df0521138d3..04fbff2edbf3 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -36,6 +36,10 @@
#include <linux/highmem.h>
#include <crypto/hash_info.h>
+#ifdef CONFIG_X86
+#include <asm/intel-family.h>
+#endif
+
enum tpm_const {
TPM_MINOR = 224, /* officially assigned */
TPM_BUFSIZE = 4096,
@@ -170,6 +174,7 @@ enum tpm_chip_flags {
TPM_CHIP_FLAG_IRQ = BIT(2),
TPM_CHIP_FLAG_VIRTUAL = BIT(3),
TPM_CHIP_FLAG_HAVE_TIMEOUTS = BIT(4),
+ TPM_CHIP_FLAG_ALWAYS_POWERED = BIT(5),
};
struct tpm_bios_log {
@@ -378,10 +383,6 @@ struct tpm_getrandom_in {
__be32 num_bytes;
} __packed;
-struct tpm_startup_in {
- __be16 startup_type;
-} __packed;
-
typedef union {
struct tpm_readpubek_params_out readpubek_out;
u8 readpubek_out_buffer[sizeof(struct tpm_readpubek_params_out)];
@@ -389,7 +390,6 @@ typedef union {
struct tpm_pcrread_out pcrread_out;
struct tpm_getrandom_in getrandom_in;
struct tpm_getrandom_out getrandom_out;
- struct tpm_startup_in startup_in;
} tpm_cmd_params;
struct tpm_cmd_t {
@@ -515,6 +515,7 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_space *space,
const void *buf, size_t bufsiz,
size_t min_rsp_body_length, unsigned int flags,
const char *desc);
+int tpm_startup(struct tpm_chip *chip);
ssize_t tpm_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
const char *desc, size_t min_cap_length);
int tpm_get_timeouts(struct tpm_chip *);
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 3a9964326279..f7f34b2aa981 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -35,24 +35,6 @@ struct tpm2_self_test_in {
u8 full_test;
} __packed;
-struct tpm2_pcr_read_in {
- __be32 pcr_selects_cnt;
- __be16 hash_alg;
- u8 pcr_select_size;
- u8 pcr_select[TPM2_PCR_SELECT_MIN];
-} __packed;
-
-struct tpm2_pcr_read_out {
- __be32 update_cnt;
- __be32 pcr_selects_cnt;
- __be16 hash_alg;
- u8 pcr_select_size;
- u8 pcr_select[TPM2_PCR_SELECT_MIN];
- __be32 digests_cnt;
- __be16 digest_size;
- u8 digest[TPM_DIGEST_SIZE];
-} __packed;
-
struct tpm2_get_tpm_pt_in {
__be32 cap_id;
__be32 property_id;
@@ -79,8 +61,6 @@ struct tpm2_get_random_out {
union tpm2_cmd_params {
struct tpm2_startup_in startup_in;
struct tpm2_self_test_in selftest_in;
- struct tpm2_pcr_read_in pcrread_in;
- struct tpm2_pcr_read_out pcrread_out;
struct tpm2_get_tpm_pt_in get_tpm_pt_in;
struct tpm2_get_tpm_pt_out get_tpm_pt_out;
struct tpm2_get_random_in getrandom_in;
@@ -227,18 +207,16 @@ static const u8 tpm2_ordinal_duration[TPM2_CC_LAST - TPM2_CC_FIRST + 1] = {
TPM_UNDEFINED /* 18f */
};
-#define TPM2_PCR_READ_IN_SIZE \
- (sizeof(struct tpm_input_header) + \
- sizeof(struct tpm2_pcr_read_in))
-
-#define TPM2_PCR_READ_RESP_BODY_SIZE \
- sizeof(struct tpm2_pcr_read_out)
-
-static const struct tpm_input_header tpm2_pcrread_header = {
- .tag = cpu_to_be16(TPM2_ST_NO_SESSIONS),
- .length = cpu_to_be32(TPM2_PCR_READ_IN_SIZE),
- .ordinal = cpu_to_be32(TPM2_CC_PCR_READ)
-};
+struct tpm2_pcr_read_out {
+ __be32 update_cnt;
+ __be32 pcr_selects_cnt;
+ __be16 hash_alg;
+ u8 pcr_select_size;
+ u8 pcr_select[TPM2_PCR_SELECT_MIN];
+ __be32 digests_cnt;
+ __be16 digest_size;
+ u8 digest[];
+} __packed;
/**
* tpm2_pcr_read() - read a PCR value
@@ -251,29 +229,33 @@ static const struct tpm_input_header tpm2_pcrread_header = {
int tpm2_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
{
int rc;
- struct tpm2_cmd cmd;
- u8 *buf;
+ struct tpm_buf buf;
+ struct tpm2_pcr_read_out *out;
+ u8 pcr_select[TPM2_PCR_SELECT_MIN] = {0};
if (pcr_idx >= TPM2_PLATFORM_PCR)
return -EINVAL;
- cmd.header.in = tpm2_pcrread_header;
- cmd.params.pcrread_in.pcr_selects_cnt = cpu_to_be32(1);
- cmd.params.pcrread_in.hash_alg = cpu_to_be16(TPM2_ALG_SHA1);
- cmd.params.pcrread_in.pcr_select_size = TPM2_PCR_SELECT_MIN;
+ rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_PCR_READ);
+ if (rc)
+ return rc;
+
+ pcr_select[pcr_idx >> 3] = 1 << (pcr_idx & 0x7);
- memset(cmd.params.pcrread_in.pcr_select, 0,
- sizeof(cmd.params.pcrread_in.pcr_select));
- cmd.params.pcrread_in.pcr_select[pcr_idx >> 3] = 1 << (pcr_idx & 0x7);
+ tpm_buf_append_u32(&buf, 1);
+ tpm_buf_append_u16(&buf, TPM2_ALG_SHA1);
+ tpm_buf_append_u8(&buf, TPM2_PCR_SELECT_MIN);
+ tpm_buf_append(&buf, (const unsigned char *)pcr_select,
+ sizeof(pcr_select));
- rc = tpm_transmit_cmd(chip, NULL, &cmd, sizeof(cmd),
- TPM2_PCR_READ_RESP_BODY_SIZE,
- 0, "attempting to read a pcr value");
- if (rc == 0) {
- buf = cmd.params.pcrread_out.digest;
- memcpy(res_buf, buf, TPM_DIGEST_SIZE);
+ rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0,
+ res_buf ? "attempting to read a pcr value" : NULL);
+ if (rc == 0 && res_buf) {
+ out = (struct tpm2_pcr_read_out *)&buf.data[TPM_HEADER_SIZE];
+ memcpy(res_buf, out->digest, SHA1_DIGEST_SIZE);
}
+ tpm_buf_destroy(&buf);
return rc;
}
@@ -779,36 +761,6 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value,
}
EXPORT_SYMBOL_GPL(tpm2_get_tpm_pt);
-#define TPM2_STARTUP_IN_SIZE \
- (sizeof(struct tpm_input_header) + \
- sizeof(struct tpm2_startup_in))
-
-static const struct tpm_input_header tpm2_startup_header = {
- .tag = cpu_to_be16(TPM2_ST_NO_SESSIONS),
- .length = cpu_to_be32(TPM2_STARTUP_IN_SIZE),
- .ordinal = cpu_to_be32(TPM2_CC_STARTUP)
-};
-
-/**
- * tpm2_startup() - send startup command to the TPM chip
- *
- * @chip: TPM chip to use.
- * @startup_type: startup type. The value is either
- * TPM_SU_CLEAR or TPM_SU_STATE.
- *
- * Return: Same as with tpm_transmit_cmd.
- */
-static int tpm2_startup(struct tpm_chip *chip, u16 startup_type)
-{
- struct tpm2_cmd cmd;
-
- cmd.header.in = tpm2_startup_header;
-
- cmd.params.startup_in.startup_type = cpu_to_be16(startup_type);
- return tpm_transmit_cmd(chip, NULL, &cmd, sizeof(cmd), 0, 0,
- "attempting to start the TPM");
-}
-
#define TPM2_SHUTDOWN_IN_SIZE \
(sizeof(struct tpm_input_header) + \
sizeof(struct tpm2_startup_in))
@@ -928,7 +880,6 @@ static int tpm2_do_selftest(struct tpm_chip *chip)
unsigned int loops;
unsigned int delay_msec = 100;
unsigned long duration;
- struct tpm2_cmd cmd;
int i;
duration = tpm2_calc_ordinal_duration(chip, TPM2_CC_SELF_TEST);
@@ -941,20 +892,10 @@ static int tpm2_do_selftest(struct tpm_chip *chip)
for (i = 0; i < loops; i++) {
/* Attempt to read a PCR value */
- cmd.header.in = tpm2_pcrread_header;
- cmd.params.pcrread_in.pcr_selects_cnt = cpu_to_be32(1);
- cmd.params.pcrread_in.hash_alg = cpu_to_be16(TPM2_ALG_SHA1);
- cmd.params.pcrread_in.pcr_select_size = TPM2_PCR_SELECT_MIN;
- cmd.params.pcrread_in.pcr_select[0] = 0x01;
- cmd.params.pcrread_in.pcr_select[1] = 0x00;
- cmd.params.pcrread_in.pcr_select[2] = 0x00;
-
- rc = tpm_transmit_cmd(chip, NULL, &cmd, sizeof(cmd), 0, 0,
- NULL);
+ rc = tpm2_pcr_read(chip, 0, NULL);
if (rc < 0)
break;
- rc = be32_to_cpu(cmd.header.out.return_code);
if (rc != TPM2_RC_TESTING)
break;
@@ -1150,7 +1091,7 @@ int tpm2_auto_startup(struct tpm_chip *chip)
}
if (rc == TPM2_RC_INITIALIZE) {
- rc = tpm2_startup(chip, TPM2_SU_CLEAR);
+ rc = tpm_startup(chip);
if (rc)
goto out;
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index fe42c4a0d8d1..a4ac63a21d8a 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -514,11 +514,12 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
goto out;
}
- priv->cmd_size = cmd_size;
-
priv->rsp = priv->cmd;
out:
+ if (!ret)
+ priv->cmd_size = cmd_size;
+
crb_go_idle(dev, priv);
return ret;
diff --git a/drivers/char/tpm/tpm_of.c b/drivers/char/tpm/tpm_of.c
index de57d4ac8901..aadb7f464076 100644
--- a/drivers/char/tpm/tpm_of.c
+++ b/drivers/char/tpm/tpm_of.c
@@ -36,6 +36,9 @@ int tpm_read_log_of(struct tpm_chip *chip)
else
return -ENODEV;
+ if (of_property_read_bool(np, "powered-while-suspended"))
+ chip->flags |= TPM_CHIP_FLAG_ALWAYS_POWERED;
+
sizep = of_get_property(np, "linux,sml-size", NULL);
basep = of_get_property(np, "linux,sml-base", NULL);
if (sizep == NULL && basep == NULL)
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index b14d4aa97af8..7e55aa9ce680 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -132,13 +132,93 @@ static int check_acpi_tpm2(struct device *dev)
}
#endif
+#ifdef CONFIG_X86
+#define INTEL_LEGACY_BLK_BASE_ADDR 0xFED08000
+#define ILB_REMAP_SIZE 0x100
+#define LPC_CNTRL_REG_OFFSET 0x84
+#define LPC_CLKRUN_EN (1 << 2)
+
+static void __iomem *ilb_base_addr;
+
+static inline bool is_bsw(void)
+{
+ return ((boot_cpu_data.x86_model == INTEL_FAM6_ATOM_AIRMONT) ? 1 : 0);
+}
+
+/**
+ * tpm_platform_begin_xfer() - clear LPC CLKRUN_EN i.e. clocks will be running
+ */
+static void tpm_platform_begin_xfer(void)
+{
+ u32 clkrun_val;
+
+ if (!is_bsw())
+ return;
+
+ clkrun_val = ioread32(ilb_base_addr + LPC_CNTRL_REG_OFFSET);
+
+ /* Disable LPC CLKRUN# */
+ clkrun_val &= ~LPC_CLKRUN_EN;
+ iowrite32(clkrun_val, ilb_base_addr + LPC_CNTRL_REG_OFFSET);
+
+ /*
+ * Write any random value on port 0x80 which is on LPC, to make
+ * sure LPC clock is running before sending any TPM command.
+ */
+ outb(0xCC, 0x80);
+
+}
+
+/**
+ * tpm_platform_end_xfer() - set LPC CLKRUN_EN i.e. clocks can be turned off
+ */
+static void tpm_platform_end_xfer(void)
+{
+ u32 clkrun_val;
+
+ if (!is_bsw())
+ return;
+
+ clkrun_val = ioread32(ilb_base_addr + LPC_CNTRL_REG_OFFSET);
+
+ /* Enable LPC CLKRUN# */
+ clkrun_val |= LPC_CLKRUN_EN;
+ iowrite32(clkrun_val, ilb_base_addr + LPC_CNTRL_REG_OFFSET);
+
+ /*
+ * Write any random value on port 0x80 which is on LPC, to make
+ * sure LPC clock is running before sending any TPM command.
+ */
+ outb(0xCC, 0x80);
+
+}
+#else
+static inline bool is_bsw(void)
+{
+ return false;
+}
+
+static void tpm_platform_begin_xfer(void)
+{
+}
+
+static void tpm_platform_end_xfer(void)
+{
+}
+#endif
+
static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
u8 *result)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
+ tpm_platform_begin_xfer();
+
while (len--)
*result++ = ioread8(phy->iobase + addr);
+
+ tpm_platform_end_xfer();
+
return 0;
}
@@ -147,8 +227,13 @@ static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
+ tpm_platform_begin_xfer();
+
while (len--)
iowrite8(*value++, phy->iobase + addr);
+
+ tpm_platform_end_xfer();
+
return 0;
}
@@ -156,7 +241,12 @@ static int tpm_tcg_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
+ tpm_platform_begin_xfer();
+
*result = ioread16(phy->iobase + addr);
+
+ tpm_platform_end_xfer();
+
return 0;
}
@@ -164,7 +254,12 @@ static int tpm_tcg_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
+ tpm_platform_begin_xfer();
+
*result = ioread32(phy->iobase + addr);
+
+ tpm_platform_end_xfer();
+
return 0;
}
@@ -172,7 +267,12 @@ static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
+ tpm_platform_begin_xfer();
+
iowrite32(value, phy->iobase + addr);
+
+ tpm_platform_end_xfer();
+
return 0;
}
@@ -360,6 +460,11 @@ static int __init init_tis(void)
if (rc)
goto err_force;
+#ifdef CONFIG_X86
+ if (is_bsw())
+ ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
+ ILB_REMAP_SIZE);
+#endif
rc = platform_driver_register(&tis_drv);
if (rc)
goto err_platform;
@@ -378,6 +483,10 @@ err_pnp:
err_platform:
if (force_pdev)
platform_device_unregister(force_pdev);
+#ifdef CONFIG_X86
+ if (is_bsw())
+ iounmap(ilb_base_addr);
+#endif
err_force:
return rc;
}
@@ -387,6 +496,10 @@ static void __exit cleanup_tis(void)
pnp_unregister_driver(&tis_pnp_driver);
platform_driver_unregister(&tis_drv);
+#ifdef CONFIG_X86
+ if (is_bsw())
+ iounmap(ilb_base_addr);
+#endif
if (force_pdev)
platform_device_unregister(force_pdev);
}
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 36cfea38135f..d406b087553f 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -126,6 +126,15 @@ config COMMON_CLK_CS2000_CP
help
If you say yes here you get support for the CS2000 clock multiplier.
+config COMMON_CLK_GEMINI
+ bool "Clock driver for Cortina Systems Gemini SoC"
+ depends on ARCH_GEMINI || COMPILE_TEST
+ select MFD_SYSCON
+ select RESET_CONTROLLER
+ ---help---
+ This driver supports the SoC clocks on the Cortina Systems Gemini
+ platform, also known as SL3516 or CS3516.
+
config COMMON_CLK_S2MPS11
tristate "Clock driver for S2MPS1X/S5M8767 MFD"
depends on MFD_SEC_CORE || COMPILE_TEST
@@ -164,13 +173,6 @@ config COMMON_CLK_XGENE
---help---
Sypport for the APM X-Gene SoC reference, PLL, and device clocks.
-config COMMON_CLK_KEYSTONE
- tristate "Clock drivers for Keystone based SOCs"
- depends on (ARCH_KEYSTONE || COMPILE_TEST) && OF
- ---help---
- Supports clock drivers for Keystone based SOCs. These SOCs have local
- a power sleep control module that gate the clock to the IPs and PLLs.
-
config COMMON_CLK_NXP
def_bool COMMON_CLK && (ARCH_LPC18XX || ARCH_LPC32XX)
select REGMAP_MMIO if ARCH_LPC32XX
@@ -219,6 +221,7 @@ config COMMON_CLK_VC5
source "drivers/clk/bcm/Kconfig"
source "drivers/clk/hisilicon/Kconfig"
+source "drivers/clk/keystone/Kconfig"
source "drivers/clk/mediatek/Kconfig"
source "drivers/clk/meson/Kconfig"
source "drivers/clk/mvebu/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index c19983afcb81..4f6a812342ed 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -1,5 +1,5 @@
# common clock types
-obj-$(CONFIG_HAVE_CLK) += clk-devres.o
+obj-$(CONFIG_HAVE_CLK) += clk-devres.o clk-bulk.o
obj-$(CONFIG_CLKDEV_LOOKUP) += clkdev.o
obj-$(CONFIG_COMMON_CLK) += clk.o
obj-$(CONFIG_COMMON_CLK) += clk-divider.o
@@ -25,6 +25,7 @@ obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o
obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o
obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o
obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
+obj-$(CONFIG_COMMON_CLK_GEMINI) += clk-gemini.o
obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
obj-$(CONFIG_ARCH_MB86S7X) += clk-mb86s7x.o
@@ -61,7 +62,7 @@ obj-$(CONFIG_H8300) += h8300/
obj-$(CONFIG_ARCH_HISI) += hisilicon/
obj-$(CONFIG_ARCH_MXC) += imx/
obj-$(CONFIG_MACH_INGENIC) += ingenic/
-obj-$(CONFIG_COMMON_CLK_KEYSTONE) += keystone/
+obj-$(CONFIG_ARCH_KEYSTONE) += keystone/
obj-$(CONFIG_MACH_LOONGSON32) += loongson1/
obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
obj-$(CONFIG_COMMON_CLK_AMLOGIC) += meson/
@@ -75,7 +76,7 @@ obj-$(CONFIG_COMMON_CLK_NXP) += nxp/
obj-$(CONFIG_MACH_PISTACHIO) += pistachio/
obj-$(CONFIG_COMMON_CLK_PXA) += pxa/
obj-$(CONFIG_COMMON_CLK_QCOM) += qcom/
-obj-$(CONFIG_ARCH_RENESAS) += renesas/
+obj-y += renesas/
obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/
obj-$(CONFIG_ARCH_SIRF) += sirf/
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index 4e1cd5aa69d8..f0b7ae904ce2 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -260,13 +260,15 @@ at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
gck->lock = lock;
gck->range = *range;
+ clk_generated_startup(gck);
hw = &gck->hw;
ret = clk_hw_register(NULL, &gck->hw);
if (ret) {
kfree(gck);
hw = ERR_PTR(ret);
- } else
- clk_generated_startup(gck);
+ } else {
+ pmc_register_id(id);
+ }
return hw;
}
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
index dc29fd979d3f..770118369230 100644
--- a/drivers/clk/at91/clk-peripheral.c
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -367,8 +367,10 @@ at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
if (ret) {
kfree(periph);
hw = ERR_PTR(ret);
- } else
+ } else {
clk_sam9x5_peripheral_autodiv(periph);
+ pmc_register_id(id);
+ }
return hw;
}
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
index 526df5ba042d..775af473fe11 100644
--- a/drivers/clk/at91/pmc.c
+++ b/drivers/clk/at91/pmc.c
@@ -13,12 +13,16 @@
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/syscore_ops.h>
#include <asm/proc-fns.h>
#include "pmc.h"
+#define PMC_MAX_IDS 128
+
int of_at91_get_clk_range(struct device_node *np, const char *propname,
struct clk_range *range)
{
@@ -41,3 +45,128 @@ int of_at91_get_clk_range(struct device_node *np, const char *propname,
return 0;
}
EXPORT_SYMBOL_GPL(of_at91_get_clk_range);
+
+#ifdef CONFIG_PM
+static struct regmap *pmcreg;
+
+static u8 registered_ids[PMC_MAX_IDS];
+
+static struct
+{
+ u32 scsr;
+ u32 pcsr0;
+ u32 uckr;
+ u32 mor;
+ u32 mcfr;
+ u32 pllar;
+ u32 mckr;
+ u32 usb;
+ u32 imr;
+ u32 pcsr1;
+ u32 pcr[PMC_MAX_IDS];
+ u32 audio_pll0;
+ u32 audio_pll1;
+} pmc_cache;
+
+void pmc_register_id(u8 id)
+{
+ int i;
+
+ for (i = 0; i < PMC_MAX_IDS; i++) {
+ if (registered_ids[i] == 0) {
+ registered_ids[i] = id;
+ break;
+ }
+ if (registered_ids[i] == id)
+ break;
+ }
+}
+
+static int pmc_suspend(void)
+{
+ int i;
+
+ regmap_read(pmcreg, AT91_PMC_IMR, &pmc_cache.scsr);
+ regmap_read(pmcreg, AT91_PMC_PCSR, &pmc_cache.pcsr0);
+ regmap_read(pmcreg, AT91_CKGR_UCKR, &pmc_cache.uckr);
+ regmap_read(pmcreg, AT91_CKGR_MOR, &pmc_cache.mor);
+ regmap_read(pmcreg, AT91_CKGR_MCFR, &pmc_cache.mcfr);
+ regmap_read(pmcreg, AT91_CKGR_PLLAR, &pmc_cache.pllar);
+ regmap_read(pmcreg, AT91_PMC_MCKR, &pmc_cache.mckr);
+ regmap_read(pmcreg, AT91_PMC_USB, &pmc_cache.usb);
+ regmap_read(pmcreg, AT91_PMC_IMR, &pmc_cache.imr);
+ regmap_read(pmcreg, AT91_PMC_PCSR1, &pmc_cache.pcsr1);
+
+ for (i = 0; registered_ids[i]; i++) {
+ regmap_write(pmcreg, AT91_PMC_PCR,
+ (registered_ids[i] & AT91_PMC_PCR_PID_MASK));
+ regmap_read(pmcreg, AT91_PMC_PCR,
+ &pmc_cache.pcr[registered_ids[i]]);
+ }
+
+ return 0;
+}
+
+static void pmc_resume(void)
+{
+ int i, ret = 0;
+ u32 tmp;
+
+ regmap_read(pmcreg, AT91_PMC_MCKR, &tmp);
+ if (pmc_cache.mckr != tmp)
+ pr_warn("MCKR was not configured properly by the firmware\n");
+ regmap_read(pmcreg, AT91_CKGR_PLLAR, &tmp);
+ if (pmc_cache.pllar != tmp)
+ pr_warn("PLLAR was not configured properly by the firmware\n");
+
+ regmap_write(pmcreg, AT91_PMC_IMR, pmc_cache.scsr);
+ regmap_write(pmcreg, AT91_PMC_PCER, pmc_cache.pcsr0);
+ regmap_write(pmcreg, AT91_CKGR_UCKR, pmc_cache.uckr);
+ regmap_write(pmcreg, AT91_CKGR_MOR, pmc_cache.mor);
+ regmap_write(pmcreg, AT91_CKGR_MCFR, pmc_cache.mcfr);
+ regmap_write(pmcreg, AT91_PMC_USB, pmc_cache.usb);
+ regmap_write(pmcreg, AT91_PMC_IMR, pmc_cache.imr);
+ regmap_write(pmcreg, AT91_PMC_PCER1, pmc_cache.pcsr1);
+
+ for (i = 0; registered_ids[i]; i++) {
+ regmap_write(pmcreg, AT91_PMC_PCR,
+ pmc_cache.pcr[registered_ids[i]] |
+ AT91_PMC_PCR_CMD);
+ }
+
+ if (pmc_cache.uckr & AT91_PMC_UPLLEN) {
+ ret = regmap_read_poll_timeout(pmcreg, AT91_PMC_SR, tmp,
+ !(tmp & AT91_PMC_LOCKU),
+ 10, 5000);
+ if (ret)
+ pr_crit("USB PLL didn't lock when resuming\n");
+ }
+}
+
+static struct syscore_ops pmc_syscore_ops = {
+ .suspend = pmc_suspend,
+ .resume = pmc_resume,
+};
+
+static const struct of_device_id sama5d2_pmc_dt_ids[] = {
+ { .compatible = "atmel,sama5d2-pmc" },
+ { /* sentinel */ }
+};
+
+static int __init pmc_register_ops(void)
+{
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, sama5d2_pmc_dt_ids);
+
+ pmcreg = syscon_node_to_regmap(np);
+ if (IS_ERR(pmcreg))
+ return PTR_ERR(pmcreg);
+
+ register_syscore_ops(&pmc_syscore_ops);
+
+ return 0;
+}
+/* This has to happen before arch_initcall because of the tcb_clksrc driver */
+postcore_initcall(pmc_register_ops);
+#endif
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index 5771fff0ee3f..858e8ef7e8db 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -29,4 +29,10 @@ struct clk_range {
int of_at91_get_clk_range(struct device_node *np, const char *propname,
struct clk_range *range);
+#ifdef CONFIG_PM
+void pmc_register_id(u8 id);
+#else
+static inline void pmc_register_id(u8 id) {}
+#endif
+
#endif /* __PMC_H_ */
diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig
index b5ae5311b0a2..1d9187df167b 100644
--- a/drivers/clk/bcm/Kconfig
+++ b/drivers/clk/bcm/Kconfig
@@ -46,3 +46,11 @@ config CLK_BCM_NS2
default ARCH_BCM_IPROC
help
Enable common clock framework support for the Broadcom Northstar 2 SoC
+
+config CLK_BCM_SR
+ bool "Broadcom Stingray clock support"
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
+ select COMMON_CLK_IPROC
+ default ARCH_BCM_IPROC
+ help
+ Enable common clock framework support for the Broadcom Stingray SoC
diff --git a/drivers/clk/bcm/Makefile b/drivers/clk/bcm/Makefile
index d9dc848f18c9..a0c14fa4aa1e 100644
--- a/drivers/clk/bcm/Makefile
+++ b/drivers/clk/bcm/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_ARCH_BCM_53573) += clk-bcm53573-ilp.o
obj-$(CONFIG_CLK_BCM_CYGNUS) += clk-cygnus.o
obj-$(CONFIG_CLK_BCM_NSP) += clk-nsp.o
obj-$(CONFIG_CLK_BCM_NS2) += clk-ns2.o
+obj-$(CONFIG_CLK_BCM_SR) += clk-sr.o
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 025853870619..58ce6af8452d 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -530,6 +530,7 @@ struct bcm2835_clock_data {
bool is_vpu_clock;
bool is_mash_clock;
+ bool low_jitter;
u32 tcnt_mux;
};
@@ -616,8 +617,10 @@ static unsigned long bcm2835_pll_get_rate(struct clk_hw *hw,
using_prediv = cprman_read(cprman, data->ana_reg_base + 4) &
data->ana->fb_prediv_mask;
- if (using_prediv)
+ if (using_prediv) {
ndiv *= 2;
+ fdiv *= 2;
+ }
return bcm2835_pll_rate_from_divisors(parent_rate, ndiv, fdiv, pdiv);
}
@@ -1124,7 +1127,8 @@ static unsigned long bcm2835_clock_choose_div_and_prate(struct clk_hw *hw,
int parent_idx,
unsigned long rate,
u32 *div,
- unsigned long *prate)
+ unsigned long *prate,
+ unsigned long *avgrate)
{
struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
struct bcm2835_cprman *cprman = clock->cprman;
@@ -1139,8 +1143,25 @@ static unsigned long bcm2835_clock_choose_div_and_prate(struct clk_hw *hw,
*prate = clk_hw_get_rate(parent);
*div = bcm2835_clock_choose_div(hw, rate, *prate, true);
- return bcm2835_clock_rate_from_divisor(clock, *prate,
- *div);
+ *avgrate = bcm2835_clock_rate_from_divisor(clock, *prate, *div);
+
+ if (data->low_jitter && (*div & CM_DIV_FRAC_MASK)) {
+ unsigned long high, low;
+ u32 int_div = *div & ~CM_DIV_FRAC_MASK;
+
+ high = bcm2835_clock_rate_from_divisor(clock, *prate,
+ int_div);
+ int_div += CM_DIV_FRAC_MASK + 1;
+ low = bcm2835_clock_rate_from_divisor(clock, *prate,
+ int_div);
+
+ /*
+ * Return a value which is the maximum deviation
+ * below the ideal rate, for use as a metric.
+ */
+ return *avgrate - max(*avgrate - low, high - *avgrate);
+ }
+ return *avgrate;
}
if (data->frac_bits)
@@ -1167,6 +1188,7 @@ static unsigned long bcm2835_clock_choose_div_and_prate(struct clk_hw *hw,
*div = curdiv << CM_DIV_FRAC_BITS;
*prate = curdiv * best_rate;
+ *avgrate = best_rate;
return best_rate;
}
@@ -1178,6 +1200,7 @@ static int bcm2835_clock_determine_rate(struct clk_hw *hw,
bool current_parent_is_pllc;
unsigned long rate, best_rate = 0;
unsigned long prate, best_prate = 0;
+ unsigned long avgrate, best_avgrate = 0;
size_t i;
u32 div;
@@ -1202,11 +1225,13 @@ static int bcm2835_clock_determine_rate(struct clk_hw *hw,
continue;
rate = bcm2835_clock_choose_div_and_prate(hw, i, req->rate,
- &div, &prate);
+ &div, &prate,
+ &avgrate);
if (rate > best_rate && rate <= req->rate) {
best_parent = parent;
best_prate = prate;
best_rate = rate;
+ best_avgrate = avgrate;
}
}
@@ -1216,7 +1241,7 @@ static int bcm2835_clock_determine_rate(struct clk_hw *hw,
req->best_parent_hw = best_parent;
req->best_parent_rate = best_prate;
- req->rate = best_rate;
+ req->rate = best_avgrate;
return 0;
}
@@ -1516,6 +1541,31 @@ static const char *const bcm2835_clock_per_parents[] = {
.parents = bcm2835_clock_per_parents, \
__VA_ARGS__)
+/*
+ * Restrict clock sources for the PCM peripheral to the oscillator and
+ * PLLD_PER because other source may have varying rates or be switched
+ * off.
+ *
+ * Prevent other sources from being selected by replacing their names in
+ * the list of potential parents with dummy entries (entry index is
+ * significant).
+ */
+static const char *const bcm2835_pcm_per_parents[] = {
+ "-",
+ "xosc",
+ "-",
+ "-",
+ "-",
+ "-",
+ "plld_per",
+ "-",
+};
+
+#define REGISTER_PCM_CLK(...) REGISTER_CLK( \
+ .num_mux_parents = ARRAY_SIZE(bcm2835_pcm_per_parents), \
+ .parents = bcm2835_pcm_per_parents, \
+ __VA_ARGS__)
+
/* main vpu parent mux */
static const char *const bcm2835_clock_vpu_parents[] = {
"gnd",
@@ -1993,13 +2043,14 @@ static const struct bcm2835_clk_desc clk_desc_array[] = {
.int_bits = 4,
.frac_bits = 8,
.tcnt_mux = 22),
- [BCM2835_CLOCK_PCM] = REGISTER_PER_CLK(
+ [BCM2835_CLOCK_PCM] = REGISTER_PCM_CLK(
.name = "pcm",
.ctl_reg = CM_PCMCTL,
.div_reg = CM_PCMDIV,
.int_bits = 12,
.frac_bits = 12,
.is_mash_clock = true,
+ .low_jitter = true,
.tcnt_mux = 23),
[BCM2835_CLOCK_PWM] = REGISTER_PER_CLK(
.name = "pwm",
diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c
index 2d61893da024..375d8dd80d45 100644
--- a/drivers/clk/bcm/clk-iproc-pll.c
+++ b/drivers/clk/bcm/clk-iproc-pll.c
@@ -617,12 +617,12 @@ static void iproc_pll_sw_cfg(struct iproc_pll *pll)
}
}
-void __init iproc_pll_clk_setup(struct device_node *node,
- const struct iproc_pll_ctrl *pll_ctrl,
- const struct iproc_pll_vco_param *vco,
- unsigned int num_vco_entries,
- const struct iproc_clk_ctrl *clk_ctrl,
- unsigned int num_clks)
+void iproc_pll_clk_setup(struct device_node *node,
+ const struct iproc_pll_ctrl *pll_ctrl,
+ const struct iproc_pll_vco_param *vco,
+ unsigned int num_vco_entries,
+ const struct iproc_clk_ctrl *clk_ctrl,
+ unsigned int num_clks)
{
int i, ret;
struct iproc_pll *pll;
diff --git a/drivers/clk/bcm/clk-sr.c b/drivers/clk/bcm/clk-sr.c
new file mode 100644
index 000000000000..adc74f4584cf
--- /dev/null
+++ b/drivers/clk/bcm/clk-sr.c
@@ -0,0 +1,327 @@
+/*
+ * Copyright 2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/bcm-sr.h>
+#include "clk-iproc.h"
+
+#define REG_VAL(o, s, w) { .offset = o, .shift = s, .width = w, }
+
+#define AON_VAL(o, pw, ps, is) { .offset = o, .pwr_width = pw, \
+ .pwr_shift = ps, .iso_shift = is }
+
+#define SW_CTRL_VAL(o, s) { .offset = o, .shift = s, }
+
+#define RESET_VAL(o, rs, prs) { .offset = o, .reset_shift = rs, \
+ .p_reset_shift = prs }
+
+#define DF_VAL(o, kis, kiw, kps, kpw, kas, kaw) { .offset = o, \
+ .ki_shift = kis, .ki_width = kiw, .kp_shift = kps, .kp_width = kpw, \
+ .ka_shift = kas, .ka_width = kaw }
+
+#define VCO_CTRL_VAL(uo, lo) { .u_offset = uo, .l_offset = lo }
+
+#define ENABLE_VAL(o, es, hs, bs) { .offset = o, .enable_shift = es, \
+ .hold_shift = hs, .bypass_shift = bs }
+
+
+static const struct iproc_pll_ctrl sr_genpll0 = {
+ .flags = IPROC_CLK_AON | IPROC_CLK_PLL_HAS_NDIV_FRAC |
+ IPROC_CLK_PLL_NEEDS_SW_CFG,
+ .aon = AON_VAL(0x0, 5, 1, 0),
+ .reset = RESET_VAL(0x0, 12, 11),
+ .dig_filter = DF_VAL(0x0, 4, 3, 0, 4, 7, 3),
+ .sw_ctrl = SW_CTRL_VAL(0x10, 31),
+ .ndiv_int = REG_VAL(0x10, 20, 10),
+ .ndiv_frac = REG_VAL(0x10, 0, 20),
+ .pdiv = REG_VAL(0x14, 0, 4),
+ .status = REG_VAL(0x30, 12, 1),
+};
+
+static const struct iproc_clk_ctrl sr_genpll0_clk[] = {
+ [BCM_SR_GENPLL0_SATA_CLK] = {
+ .channel = BCM_SR_GENPLL0_SATA_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 6, 0, 12),
+ .mdiv = REG_VAL(0x18, 0, 9),
+ },
+ [BCM_SR_GENPLL0_SCR_CLK] = {
+ .channel = BCM_SR_GENPLL0_SCR_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 7, 1, 13),
+ .mdiv = REG_VAL(0x18, 10, 9),
+ },
+ [BCM_SR_GENPLL0_250M_CLK] = {
+ .channel = BCM_SR_GENPLL0_250M_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 8, 2, 14),
+ .mdiv = REG_VAL(0x18, 20, 9),
+ },
+ [BCM_SR_GENPLL0_PCIE_AXI_CLK] = {
+ .channel = BCM_SR_GENPLL0_PCIE_AXI_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 9, 3, 15),
+ .mdiv = REG_VAL(0x1c, 0, 9),
+ },
+ [BCM_SR_GENPLL0_PAXC_AXI_X2_CLK] = {
+ .channel = BCM_SR_GENPLL0_PAXC_AXI_X2_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 10, 4, 16),
+ .mdiv = REG_VAL(0x1c, 10, 9),
+ },
+ [BCM_SR_GENPLL0_PAXC_AXI_CLK] = {
+ .channel = BCM_SR_GENPLL0_PAXC_AXI_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 11, 5, 17),
+ .mdiv = REG_VAL(0x1c, 20, 9),
+ },
+};
+
+static int sr_genpll0_clk_init(struct platform_device *pdev)
+{
+ iproc_pll_clk_setup(pdev->dev.of_node,
+ &sr_genpll0, NULL, 0, sr_genpll0_clk,
+ ARRAY_SIZE(sr_genpll0_clk));
+ return 0;
+}
+
+static const struct iproc_pll_ctrl sr_genpll3 = {
+ .flags = IPROC_CLK_AON | IPROC_CLK_PLL_HAS_NDIV_FRAC |
+ IPROC_CLK_PLL_NEEDS_SW_CFG,
+ .aon = AON_VAL(0x0, 1, 19, 18),
+ .reset = RESET_VAL(0x0, 12, 11),
+ .dig_filter = DF_VAL(0x0, 4, 3, 0, 4, 7, 3),
+ .sw_ctrl = SW_CTRL_VAL(0x10, 31),
+ .ndiv_int = REG_VAL(0x10, 20, 10),
+ .ndiv_frac = REG_VAL(0x10, 0, 20),
+ .pdiv = REG_VAL(0x14, 0, 4),
+ .status = REG_VAL(0x30, 12, 1),
+};
+
+static const struct iproc_clk_ctrl sr_genpll3_clk[] = {
+ [BCM_SR_GENPLL3_HSLS_CLK] = {
+ .channel = BCM_SR_GENPLL3_HSLS_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 6, 0, 12),
+ .mdiv = REG_VAL(0x18, 0, 9),
+ },
+ [BCM_SR_GENPLL3_SDIO_CLK] = {
+ .channel = BCM_SR_GENPLL3_SDIO_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 7, 1, 13),
+ .mdiv = REG_VAL(0x18, 10, 9),
+ },
+};
+
+static void sr_genpll3_clk_init(struct device_node *node)
+{
+ iproc_pll_clk_setup(node, &sr_genpll3, NULL, 0, sr_genpll3_clk,
+ ARRAY_SIZE(sr_genpll3_clk));
+}
+CLK_OF_DECLARE(sr_genpll3_clk, "brcm,sr-genpll3", sr_genpll3_clk_init);
+
+static const struct iproc_pll_ctrl sr_genpll4 = {
+ .flags = IPROC_CLK_AON | IPROC_CLK_PLL_HAS_NDIV_FRAC |
+ IPROC_CLK_PLL_NEEDS_SW_CFG,
+ .aon = AON_VAL(0x0, 1, 25, 24),
+ .reset = RESET_VAL(0x0, 12, 11),
+ .dig_filter = DF_VAL(0x0, 4, 3, 0, 4, 7, 3),
+ .sw_ctrl = SW_CTRL_VAL(0x10, 31),
+ .ndiv_int = REG_VAL(0x10, 20, 10),
+ .ndiv_frac = REG_VAL(0x10, 0, 20),
+ .pdiv = REG_VAL(0x14, 0, 4),
+ .status = REG_VAL(0x30, 12, 1),
+};
+
+static const struct iproc_clk_ctrl sr_genpll4_clk[] = {
+ [BCM_SR_GENPLL4_CCN_CLK] = {
+ .channel = BCM_SR_GENPLL4_CCN_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 6, 0, 12),
+ .mdiv = REG_VAL(0x18, 0, 9),
+ },
+};
+
+static int sr_genpll4_clk_init(struct platform_device *pdev)
+{
+ iproc_pll_clk_setup(pdev->dev.of_node,
+ &sr_genpll4, NULL, 0, sr_genpll4_clk,
+ ARRAY_SIZE(sr_genpll4_clk));
+ return 0;
+}
+
+static const struct iproc_pll_ctrl sr_genpll5 = {
+ .flags = IPROC_CLK_AON | IPROC_CLK_PLL_HAS_NDIV_FRAC |
+ IPROC_CLK_PLL_NEEDS_SW_CFG,
+ .aon = AON_VAL(0x0, 1, 1, 0),
+ .reset = RESET_VAL(0x0, 12, 11),
+ .dig_filter = DF_VAL(0x0, 4, 3, 0, 4, 7, 3),
+ .sw_ctrl = SW_CTRL_VAL(0x10, 31),
+ .ndiv_int = REG_VAL(0x10, 20, 10),
+ .ndiv_frac = REG_VAL(0x10, 0, 20),
+ .pdiv = REG_VAL(0x14, 0, 4),
+ .status = REG_VAL(0x30, 12, 1),
+};
+
+static const struct iproc_clk_ctrl sr_genpll5_clk[] = {
+ [BCM_SR_GENPLL5_FS_CLK] = {
+ .channel = BCM_SR_GENPLL5_FS_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 6, 0, 12),
+ .mdiv = REG_VAL(0x18, 0, 9),
+ },
+ [BCM_SR_GENPLL5_SPU_CLK] = {
+ .channel = BCM_SR_GENPLL5_SPU_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x4, 6, 0, 12),
+ .mdiv = REG_VAL(0x18, 10, 9),
+ },
+};
+
+static int sr_genpll5_clk_init(struct platform_device *pdev)
+{
+ iproc_pll_clk_setup(pdev->dev.of_node,
+ &sr_genpll5, NULL, 0, sr_genpll5_clk,
+ ARRAY_SIZE(sr_genpll5_clk));
+ return 0;
+}
+
+static const struct iproc_pll_ctrl sr_lcpll0 = {
+ .flags = IPROC_CLK_AON | IPROC_CLK_PLL_NEEDS_SW_CFG,
+ .aon = AON_VAL(0x0, 2, 19, 18),
+ .reset = RESET_VAL(0x0, 31, 30),
+ .sw_ctrl = SW_CTRL_VAL(0x4, 31),
+ .ndiv_int = REG_VAL(0x4, 16, 10),
+ .pdiv = REG_VAL(0x4, 26, 4),
+ .status = REG_VAL(0x38, 12, 1),
+};
+
+static const struct iproc_clk_ctrl sr_lcpll0_clk[] = {
+ [BCM_SR_LCPLL0_SATA_REF_CLK] = {
+ .channel = BCM_SR_LCPLL0_SATA_REF_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 7, 1, 13),
+ .mdiv = REG_VAL(0x14, 0, 9),
+ },
+ [BCM_SR_LCPLL0_USB_REF_CLK] = {
+ .channel = BCM_SR_LCPLL0_USB_REF_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 8, 2, 14),
+ .mdiv = REG_VAL(0x14, 10, 9),
+ },
+ [BCM_SR_LCPLL0_SATA_REFPN_CLK] = {
+ .channel = BCM_SR_LCPLL0_SATA_REFPN_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 9, 3, 15),
+ .mdiv = REG_VAL(0x14, 20, 9),
+ },
+};
+
+static int sr_lcpll0_clk_init(struct platform_device *pdev)
+{
+ iproc_pll_clk_setup(pdev->dev.of_node,
+ &sr_lcpll0, NULL, 0, sr_lcpll0_clk,
+ ARRAY_SIZE(sr_lcpll0_clk));
+ return 0;
+}
+
+static const struct iproc_pll_ctrl sr_lcpll1 = {
+ .flags = IPROC_CLK_AON | IPROC_CLK_PLL_NEEDS_SW_CFG,
+ .aon = AON_VAL(0x0, 2, 22, 21),
+ .reset = RESET_VAL(0x0, 31, 30),
+ .sw_ctrl = SW_CTRL_VAL(0x4, 31),
+ .ndiv_int = REG_VAL(0x4, 16, 10),
+ .pdiv = REG_VAL(0x4, 26, 4),
+ .status = REG_VAL(0x38, 12, 1),
+};
+
+static const struct iproc_clk_ctrl sr_lcpll1_clk[] = {
+ [BCM_SR_LCPLL1_WAN_CLK] = {
+ .channel = BCM_SR_LCPLL1_WAN_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 7, 1, 13),
+ .mdiv = REG_VAL(0x14, 0, 9),
+ },
+};
+
+static int sr_lcpll1_clk_init(struct platform_device *pdev)
+{
+ iproc_pll_clk_setup(pdev->dev.of_node,
+ &sr_lcpll1, NULL, 0, sr_lcpll1_clk,
+ ARRAY_SIZE(sr_lcpll1_clk));
+ return 0;
+}
+
+static const struct iproc_pll_ctrl sr_lcpll_pcie = {
+ .flags = IPROC_CLK_AON | IPROC_CLK_PLL_NEEDS_SW_CFG,
+ .aon = AON_VAL(0x0, 2, 25, 24),
+ .reset = RESET_VAL(0x0, 31, 30),
+ .sw_ctrl = SW_CTRL_VAL(0x4, 31),
+ .ndiv_int = REG_VAL(0x4, 16, 10),
+ .pdiv = REG_VAL(0x4, 26, 4),
+ .status = REG_VAL(0x38, 12, 1),
+};
+
+static const struct iproc_clk_ctrl sr_lcpll_pcie_clk[] = {
+ [BCM_SR_LCPLL_PCIE_PHY_REF_CLK] = {
+ .channel = BCM_SR_LCPLL_PCIE_PHY_REF_CLK,
+ .flags = IPROC_CLK_AON,
+ .enable = ENABLE_VAL(0x0, 7, 1, 13),
+ .mdiv = REG_VAL(0x14, 0, 9),
+ },
+};
+
+static int sr_lcpll_pcie_clk_init(struct platform_device *pdev)
+{
+ iproc_pll_clk_setup(pdev->dev.of_node,
+ &sr_lcpll_pcie, NULL, 0, sr_lcpll_pcie_clk,
+ ARRAY_SIZE(sr_lcpll_pcie_clk));
+ return 0;
+}
+
+static const struct of_device_id sr_clk_dt_ids[] = {
+ { .compatible = "brcm,sr-genpll0", .data = sr_genpll0_clk_init },
+ { .compatible = "brcm,sr-genpll4", .data = sr_genpll4_clk_init },
+ { .compatible = "brcm,sr-genpll5", .data = sr_genpll5_clk_init },
+ { .compatible = "brcm,sr-lcpll0", .data = sr_lcpll0_clk_init },
+ { .compatible = "brcm,sr-lcpll1", .data = sr_lcpll1_clk_init },
+ { .compatible = "brcm,sr-lcpll-pcie", .data = sr_lcpll_pcie_clk_init },
+ { /* sentinel */ }
+};
+
+static int sr_clk_probe(struct platform_device *pdev)
+{
+ int (*probe_func)(struct platform_device *);
+
+ probe_func = of_device_get_match_data(&pdev->dev);
+ if (!probe_func)
+ return -ENODEV;
+
+ return probe_func(pdev);
+}
+
+static struct platform_driver sr_clk_driver = {
+ .driver = {
+ .name = "sr-clk",
+ .of_match_table = sr_clk_dt_ids,
+ },
+ .probe = sr_clk_probe,
+};
+builtin_platform_driver(sr_clk_driver);
diff --git a/drivers/clk/clk-bulk.c b/drivers/clk/clk-bulk.c
new file mode 100644
index 000000000000..c834f5abfc49
--- /dev/null
+++ b/drivers/clk/clk-bulk.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2017 NXP
+ *
+ * Dong Aisheng <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/export.h>
+
+void clk_bulk_put(int num_clks, struct clk_bulk_data *clks)
+{
+ while (--num_clks >= 0) {
+ clk_put(clks[num_clks].clk);
+ clks[num_clks].clk = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(clk_bulk_put);
+
+int __must_check clk_bulk_get(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < num_clks; i++)
+ clks[i].clk = NULL;
+
+ for (i = 0; i < num_clks; i++) {
+ clks[i].clk = clk_get(dev, clks[i].id);
+ if (IS_ERR(clks[i].clk)) {
+ ret = PTR_ERR(clks[i].clk);
+ dev_err(dev, "Failed to get clk '%s': %d\n",
+ clks[i].id, ret);
+ clks[i].clk = NULL;
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ clk_bulk_put(i, clks);
+
+ return ret;
+}
+EXPORT_SYMBOL(clk_bulk_get);
+
+#ifdef CONFIG_HAVE_CLK_PREPARE
+
+/**
+ * clk_bulk_unprepare - undo preparation of a set of clock sources
+ * @num_clks: the number of clk_bulk_data
+ * @clks: the clk_bulk_data table being unprepared
+ *
+ * clk_bulk_unprepare may sleep, which differentiates it from clk_bulk_disable.
+ * Returns 0 on success, -EERROR otherwise.
+ */
+void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks)
+{
+ while (--num_clks >= 0)
+ clk_unprepare(clks[num_clks].clk);
+}
+EXPORT_SYMBOL_GPL(clk_bulk_unprepare);
+
+/**
+ * clk_bulk_prepare - prepare a set of clocks
+ * @num_clks: the number of clk_bulk_data
+ * @clks: the clk_bulk_data table being prepared
+ *
+ * clk_bulk_prepare may sleep, which differentiates it from clk_bulk_enable.
+ * Returns 0 on success, -EERROR otherwise.
+ */
+int __must_check clk_bulk_prepare(int num_clks,
+ const struct clk_bulk_data *clks)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < num_clks; i++) {
+ ret = clk_prepare(clks[i].clk);
+ if (ret) {
+ pr_err("Failed to prepare clk '%s': %d\n",
+ clks[i].id, ret);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ clk_bulk_unprepare(i, clks);
+
+ return ret;
+}
+
+#endif /* CONFIG_HAVE_CLK_PREPARE */
+
+/**
+ * clk_bulk_disable - gate a set of clocks
+ * @num_clks: the number of clk_bulk_data
+ * @clks: the clk_bulk_data table being gated
+ *
+ * clk_bulk_disable must not sleep, which differentiates it from
+ * clk_bulk_unprepare. clk_bulk_disable must be called before
+ * clk_bulk_unprepare.
+ */
+void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks)
+{
+
+ while (--num_clks >= 0)
+ clk_disable(clks[num_clks].clk);
+}
+EXPORT_SYMBOL_GPL(clk_bulk_disable);
+
+/**
+ * clk_bulk_enable - ungate a set of clocks
+ * @num_clks: the number of clk_bulk_data
+ * @clks: the clk_bulk_data table being ungated
+ *
+ * clk_bulk_enable must not sleep
+ * Returns 0 on success, -EERROR otherwise.
+ */
+int __must_check clk_bulk_enable(int num_clks, const struct clk_bulk_data *clks)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < num_clks; i++) {
+ ret = clk_enable(clks[i].clk);
+ if (ret) {
+ pr_err("Failed to enable clk '%s': %d\n",
+ clks[i].id, ret);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ clk_bulk_disable(i, clks);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(clk_bulk_enable);
diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c
index e0e02a6e5900..7ec36722f8ab 100644
--- a/drivers/clk/clk-conf.c
+++ b/drivers/clk/clk-conf.c
@@ -109,7 +109,7 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier)
rc = clk_set_rate(clk, rate);
if (rc < 0)
- pr_err("clk: couldn't set %s clk rate to %d (%d), current rate: %ld\n",
+ pr_err("clk: couldn't set %s clk rate to %u (%d), current rate: %lu\n",
__clk_get_name(clk), rate, rc,
clk_get_rate(clk));
clk_put(clk);
diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
index 3a218c3a06ae..d854e26a8ddb 100644
--- a/drivers/clk/clk-devres.c
+++ b/drivers/clk/clk-devres.c
@@ -34,6 +34,42 @@ struct clk *devm_clk_get(struct device *dev, const char *id)
}
EXPORT_SYMBOL(devm_clk_get);
+struct clk_bulk_devres {
+ struct clk_bulk_data *clks;
+ int num_clks;
+};
+
+static void devm_clk_bulk_release(struct device *dev, void *res)
+{
+ struct clk_bulk_devres *devres = res;
+
+ clk_bulk_put(devres->num_clks, devres->clks);
+}
+
+int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks)
+{
+ struct clk_bulk_devres *devres;
+ int ret;
+
+ devres = devres_alloc(devm_clk_bulk_release,
+ sizeof(*devres), GFP_KERNEL);
+ if (!devres)
+ return -ENOMEM;
+
+ ret = clk_bulk_get(dev, num_clks, clks);
+ if (!ret) {
+ devres->clks = clks;
+ devres->num_clks = num_clks;
+ devres_add(dev, devres);
+ } else {
+ devres_free(devres);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_clk_bulk_get);
+
static int devm_clk_match(struct device *dev, void *res, void *data)
{
struct clk **c = res;
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 96386ffc8483..9bb472cccca6 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -275,7 +275,8 @@ static int _next_div(const struct clk_div_table *table, int div,
return div;
}
-static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
+static int clk_divider_bestdiv(struct clk_hw *hw, struct clk_hw *parent,
+ unsigned long rate,
unsigned long *best_parent_rate,
const struct clk_div_table *table, u8 width,
unsigned long flags)
@@ -314,8 +315,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
*best_parent_rate = parent_rate_saved;
return i;
}
- parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
- rate * i);
+ parent_rate = clk_hw_round_rate(parent, rate * i);
now = DIV_ROUND_UP_ULL((u64)parent_rate, i);
if (_is_best_div(rate, now, best, flags)) {
bestdiv = i;
@@ -326,23 +326,24 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
if (!bestdiv) {
bestdiv = _get_maxdiv(table, width, flags);
- *best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), 1);
+ *best_parent_rate = clk_hw_round_rate(parent, 1);
}
return bestdiv;
}
-long divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate, const struct clk_div_table *table,
- u8 width, unsigned long flags)
+long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
+ unsigned long rate, unsigned long *prate,
+ const struct clk_div_table *table,
+ u8 width, unsigned long flags)
{
int div;
- div = clk_divider_bestdiv(hw, rate, prate, table, width, flags);
+ div = clk_divider_bestdiv(hw, parent, rate, prate, table, width, flags);
return DIV_ROUND_UP_ULL((u64)*prate, div);
}
-EXPORT_SYMBOL_GPL(divider_round_rate);
+EXPORT_SYMBOL_GPL(divider_round_rate_parent);
static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
diff --git a/drivers/clk/clk-gemini.c b/drivers/clk/clk-gemini.c
new file mode 100644
index 000000000000..c391a49aaaff
--- /dev/null
+++ b/drivers/clk/clk-gemini.c
@@ -0,0 +1,455 @@
+/*
+ * Cortina Gemini SoC Clock Controller driver
+ * Copyright (c) 2017 Linus Walleij <[email protected]>
+ */
+
+#define pr_fmt(fmt) "clk-gemini: " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+#include <linux/reset-controller.h>
+#include <dt-bindings/reset/cortina,gemini-reset.h>
+#include <dt-bindings/clock/cortina,gemini-clock.h>
+
+/* Globally visible clocks */
+static DEFINE_SPINLOCK(gemini_clk_lock);
+
+#define GEMINI_GLOBAL_STATUS 0x04
+#define PLL_OSC_SEL BIT(30)
+#define AHBSPEED_SHIFT (15)
+#define AHBSPEED_MASK 0x07
+#define CPU_AHB_RATIO_SHIFT (18)
+#define CPU_AHB_RATIO_MASK 0x03
+
+#define GEMINI_GLOBAL_PLL_CONTROL 0x08
+
+#define GEMINI_GLOBAL_SOFT_RESET 0x0c
+
+#define GEMINI_GLOBAL_MISC_CONTROL 0x30
+#define PCI_CLK_66MHZ BIT(18)
+#define PCI_CLK_OE BIT(17)
+
+#define GEMINI_GLOBAL_CLOCK_CONTROL 0x34
+#define PCI_CLKRUN_EN BIT(16)
+#define TVC_HALFDIV_SHIFT (24)
+#define TVC_HALFDIV_MASK 0x1f
+#define SECURITY_CLK_SEL BIT(29)
+
+#define GEMINI_GLOBAL_PCI_DLL_CONTROL 0x44
+#define PCI_DLL_BYPASS BIT(31)
+#define PCI_DLL_TAP_SEL_MASK 0x1f
+
+/**
+ * struct gemini_data_data - Gemini gated clocks
+ * @bit_idx: the bit used to gate this clock in the clock register
+ * @name: the clock name
+ * @parent_name: the name of the parent clock
+ * @flags: standard clock framework flags
+ */
+struct gemini_gate_data {
+ u8 bit_idx;
+ const char *name;
+ const char *parent_name;
+ unsigned long flags;
+};
+
+/**
+ * struct clk_gemini_pci - Gemini PCI clock
+ * @hw: corresponding clock hardware entry
+ * @map: regmap to access the registers
+ * @rate: current rate
+ */
+struct clk_gemini_pci {
+ struct clk_hw hw;
+ struct regmap *map;
+ unsigned long rate;
+};
+
+/**
+ * struct gemini_reset - gemini reset controller
+ * @map: regmap to access the containing system controller
+ * @rcdev: reset controller device
+ */
+struct gemini_reset {
+ struct regmap *map;
+ struct reset_controller_dev rcdev;
+};
+
+/* Keeps track of all clocks */
+static struct clk_hw_onecell_data *gemini_clk_data;
+
+static const struct gemini_gate_data gemini_gates[] = {
+ { 1, "security-gate", "secdiv", 0 },
+ { 2, "gmac0-gate", "ahb", 0 },
+ { 3, "gmac1-gate", "ahb", 0 },
+ { 4, "sata0-gate", "ahb", 0 },
+ { 5, "sata1-gate", "ahb", 0 },
+ { 6, "usb0-gate", "ahb", 0 },
+ { 7, "usb1-gate", "ahb", 0 },
+ { 8, "ide-gate", "ahb", 0 },
+ { 9, "pci-gate", "ahb", 0 },
+ /*
+ * The DDR controller may never have a driver, but certainly must
+ * not be gated off.
+ */
+ { 10, "ddr-gate", "ahb", CLK_IS_CRITICAL },
+ /*
+ * The flash controller must be on to access NOR flash through the
+ * memory map.
+ */
+ { 11, "flash-gate", "ahb", CLK_IGNORE_UNUSED },
+ { 12, "tvc-gate", "ahb", 0 },
+ { 13, "boot-gate", "apb", 0 },
+};
+
+#define to_pciclk(_hw) container_of(_hw, struct clk_gemini_pci, hw)
+
+#define to_gemini_reset(p) container_of((p), struct gemini_reset, rcdev)
+
+static unsigned long gemini_pci_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_gemini_pci *pciclk = to_pciclk(hw);
+ u32 val;
+
+ regmap_read(pciclk->map, GEMINI_GLOBAL_MISC_CONTROL, &val);
+ if (val & PCI_CLK_66MHZ)
+ return 66000000;
+ return 33000000;
+}
+
+static long gemini_pci_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ /* We support 33 and 66 MHz */
+ if (rate < 48000000)
+ return 33000000;
+ return 66000000;
+}
+
+static int gemini_pci_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_gemini_pci *pciclk = to_pciclk(hw);
+
+ if (rate == 33000000)
+ return regmap_update_bits(pciclk->map,
+ GEMINI_GLOBAL_MISC_CONTROL,
+ PCI_CLK_66MHZ, 0);
+ if (rate == 66000000)
+ return regmap_update_bits(pciclk->map,
+ GEMINI_GLOBAL_MISC_CONTROL,
+ 0, PCI_CLK_66MHZ);
+ return -EINVAL;
+}
+
+static int gemini_pci_enable(struct clk_hw *hw)
+{
+ struct clk_gemini_pci *pciclk = to_pciclk(hw);
+
+ regmap_update_bits(pciclk->map, GEMINI_GLOBAL_CLOCK_CONTROL,
+ 0, PCI_CLKRUN_EN);
+ regmap_update_bits(pciclk->map,
+ GEMINI_GLOBAL_MISC_CONTROL,
+ 0, PCI_CLK_OE);
+ return 0;
+}
+
+static void gemini_pci_disable(struct clk_hw *hw)
+{
+ struct clk_gemini_pci *pciclk = to_pciclk(hw);
+
+ regmap_update_bits(pciclk->map,
+ GEMINI_GLOBAL_MISC_CONTROL,
+ PCI_CLK_OE, 0);
+ regmap_update_bits(pciclk->map, GEMINI_GLOBAL_CLOCK_CONTROL,
+ PCI_CLKRUN_EN, 0);
+}
+
+static int gemini_pci_is_enabled(struct clk_hw *hw)
+{
+ struct clk_gemini_pci *pciclk = to_pciclk(hw);
+ unsigned int val;
+
+ regmap_read(pciclk->map, GEMINI_GLOBAL_CLOCK_CONTROL, &val);
+ return !!(val & PCI_CLKRUN_EN);
+}
+
+static const struct clk_ops gemini_pci_clk_ops = {
+ .recalc_rate = gemini_pci_recalc_rate,
+ .round_rate = gemini_pci_round_rate,
+ .set_rate = gemini_pci_set_rate,
+ .enable = gemini_pci_enable,
+ .disable = gemini_pci_disable,
+ .is_enabled = gemini_pci_is_enabled,
+};
+
+static struct clk_hw *gemini_pci_clk_setup(const char *name,
+ const char *parent_name,
+ struct regmap *map)
+{
+ struct clk_gemini_pci *pciclk;
+ struct clk_init_data init;
+ int ret;
+
+ pciclk = kzalloc(sizeof(*pciclk), GFP_KERNEL);
+ if (!pciclk)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &gemini_pci_clk_ops;
+ init.flags = 0;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ pciclk->map = map;
+ pciclk->hw.init = &init;
+
+ ret = clk_hw_register(NULL, &pciclk->hw);
+ if (ret) {
+ kfree(pciclk);
+ return ERR_PTR(ret);
+ }
+
+ return &pciclk->hw;
+}
+
+/*
+ * This is a self-deasserting reset controller.
+ */
+static int gemini_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct gemini_reset *gr = to_gemini_reset(rcdev);
+
+ /* Manual says to always set BIT 30 (CPU1) to 1 */
+ return regmap_write(gr->map,
+ GEMINI_GLOBAL_SOFT_RESET,
+ BIT(GEMINI_RESET_CPU1) | BIT(id));
+}
+
+static int gemini_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct gemini_reset *gr = to_gemini_reset(rcdev);
+ u32 val;
+ int ret;
+
+ ret = regmap_read(gr->map, GEMINI_GLOBAL_SOFT_RESET, &val);
+ if (ret)
+ return ret;
+
+ return !!(val & BIT(id));
+}
+
+static const struct reset_control_ops gemini_reset_ops = {
+ .reset = gemini_reset,
+ .status = gemini_reset_status,
+};
+
+static int gemini_clk_probe(struct platform_device *pdev)
+{
+ /* Gives the fracions 1x, 1.5x, 1.85x and 2x */
+ unsigned int cpu_ahb_mult[4] = { 1, 3, 24, 2 };
+ unsigned int cpu_ahb_div[4] = { 1, 2, 13, 1 };
+ void __iomem *base;
+ struct gemini_reset *gr;
+ struct regmap *map;
+ struct clk_hw *hw;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ unsigned int mult, div;
+ struct resource *res;
+ u32 val;
+ int ret;
+ int i;
+
+ gr = devm_kzalloc(dev, sizeof(*gr), GFP_KERNEL);
+ if (!gr)
+ return -ENOMEM;
+
+ /* Remap the system controller for the exclusive register */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ map = syscon_node_to_regmap(np);
+ if (IS_ERR(map)) {
+ dev_err(dev, "no syscon regmap\n");
+ return PTR_ERR(map);
+ }
+
+ gr->map = map;
+ gr->rcdev.owner = THIS_MODULE;
+ gr->rcdev.nr_resets = 32;
+ gr->rcdev.ops = &gemini_reset_ops;
+ gr->rcdev.of_node = np;
+
+ ret = devm_reset_controller_register(dev, &gr->rcdev);
+ if (ret) {
+ dev_err(dev, "could not register reset controller\n");
+ return ret;
+ }
+
+ /* RTC clock 32768 Hz */
+ hw = clk_hw_register_fixed_rate(NULL, "rtc", NULL, 0, 32768);
+ gemini_clk_data->hws[GEMINI_CLK_RTC] = hw;
+
+ /* CPU clock derived as a fixed ratio from the AHB clock */
+ regmap_read(map, GEMINI_GLOBAL_STATUS, &val);
+ val >>= CPU_AHB_RATIO_SHIFT;
+ val &= CPU_AHB_RATIO_MASK;
+ hw = clk_hw_register_fixed_factor(NULL, "cpu", "ahb", 0,
+ cpu_ahb_mult[val],
+ cpu_ahb_div[val]);
+ gemini_clk_data->hws[GEMINI_CLK_CPU] = hw;
+
+ /* Security clock is 1:1 or 0.75 of APB */
+ regmap_read(map, GEMINI_GLOBAL_CLOCK_CONTROL, &val);
+ if (val & SECURITY_CLK_SEL) {
+ mult = 1;
+ div = 1;
+ } else {
+ mult = 3;
+ div = 4;
+ }
+ hw = clk_hw_register_fixed_factor(NULL, "secdiv", "ahb", 0, mult, div);
+
+ /*
+ * These are the leaf gates, at boot no clocks are gated.
+ */
+ for (i = 0; i < ARRAY_SIZE(gemini_gates); i++) {
+ const struct gemini_gate_data *gd;
+
+ gd = &gemini_gates[i];
+ gemini_clk_data->hws[GEMINI_CLK_GATES + i] =
+ clk_hw_register_gate(NULL, gd->name,
+ gd->parent_name,
+ gd->flags,
+ base + GEMINI_GLOBAL_CLOCK_CONTROL,
+ gd->bit_idx,
+ CLK_GATE_SET_TO_DISABLE,
+ &gemini_clk_lock);
+ }
+
+ /*
+ * The TV Interface Controller has a 5-bit half divider register.
+ * This clock is supposed to be 27MHz as this is an exact multiple
+ * of PAL and NTSC frequencies. The register is undocumented :(
+ * FIXME: figure out the parent and how the divider works.
+ */
+ mult = 1;
+ div = ((val >> TVC_HALFDIV_SHIFT) & TVC_HALFDIV_MASK);
+ dev_dbg(dev, "TVC half divider value = %d\n", div);
+ div += 1;
+ hw = clk_hw_register_fixed_rate(NULL, "tvcdiv", "xtal", 0, 27000000);
+ gemini_clk_data->hws[GEMINI_CLK_TVC] = hw;
+
+ /* FIXME: very unclear what the parent is */
+ hw = gemini_pci_clk_setup("PCI", "xtal", map);
+ gemini_clk_data->hws[GEMINI_CLK_PCI] = hw;
+
+ /* FIXME: very unclear what the parent is */
+ hw = clk_hw_register_fixed_rate(NULL, "uart", "xtal", 0, 48000000);
+ gemini_clk_data->hws[GEMINI_CLK_UART] = hw;
+
+ return 0;
+}
+
+static const struct of_device_id gemini_clk_dt_ids[] = {
+ { .compatible = "cortina,gemini-syscon", },
+ { /* sentinel */ },
+};
+
+static struct platform_driver gemini_clk_driver = {
+ .probe = gemini_clk_probe,
+ .driver = {
+ .name = "gemini-clk",
+ .of_match_table = gemini_clk_dt_ids,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(gemini_clk_driver);
+
+static void __init gemini_cc_init(struct device_node *np)
+{
+ struct regmap *map;
+ struct clk_hw *hw;
+ unsigned long freq;
+ unsigned int mult, div;
+ u32 val;
+ int ret;
+ int i;
+
+ gemini_clk_data = kzalloc(sizeof(*gemini_clk_data) +
+ sizeof(*gemini_clk_data->hws) * GEMINI_NUM_CLKS,
+ GFP_KERNEL);
+ if (!gemini_clk_data)
+ return;
+
+ /*
+ * This way all clock fetched before the platform device probes,
+ * except those we assign here for early use, will be deferred.
+ */
+ for (i = 0; i < GEMINI_NUM_CLKS; i++)
+ gemini_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
+
+ map = syscon_node_to_regmap(np);
+ if (IS_ERR(map)) {
+ pr_err("no syscon regmap\n");
+ return;
+ }
+ /*
+ * We check that the regmap works on this very first access,
+ * but as this is an MMIO-backed regmap, subsequent regmap
+ * access is not going to fail and we skip error checks from
+ * this point.
+ */
+ ret = regmap_read(map, GEMINI_GLOBAL_STATUS, &val);
+ if (ret) {
+ pr_err("failed to read global status register\n");
+ return;
+ }
+
+ /*
+ * XTAL is the crystal oscillator, 60 or 30 MHz selected from
+ * strap pin E6
+ */
+ if (val & PLL_OSC_SEL)
+ freq = 30000000;
+ else
+ freq = 60000000;
+ hw = clk_hw_register_fixed_rate(NULL, "xtal", NULL, 0, freq);
+ pr_debug("main crystal @%lu MHz\n", freq / 1000000);
+
+ /* VCO clock derived from the crystal */
+ mult = 13 + ((val >> AHBSPEED_SHIFT) & AHBSPEED_MASK);
+ div = 2;
+ /* If we run on 30 MHz crystal we have to multiply with two */
+ if (val & PLL_OSC_SEL)
+ mult *= 2;
+ hw = clk_hw_register_fixed_factor(NULL, "vco", "xtal", 0, mult, div);
+
+ /* The AHB clock is always 1/3 of the VCO */
+ hw = clk_hw_register_fixed_factor(NULL, "ahb", "vco", 0, 1, 3);
+ gemini_clk_data->hws[GEMINI_CLK_AHB] = hw;
+
+ /* The APB clock is always 1/6 of the AHB */
+ hw = clk_hw_register_fixed_factor(NULL, "apb", "ahb", 0, 1, 6);
+ gemini_clk_data->hws[GEMINI_CLK_APB] = hw;
+
+ /* Register the clocks to be accessed by the device tree */
+ gemini_clk_data->num = GEMINI_NUM_CLKS;
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, gemini_clk_data);
+}
+CLK_OF_DECLARE_DRIVER(gemini_cc, "cortina,gemini-syscon", gemini_cc_init);
diff --git a/drivers/clk/clk-palmas.c b/drivers/clk/clk-palmas.c
index 31f590cea493..7f51c01085ab 100644
--- a/drivers/clk/clk-palmas.c
+++ b/drivers/clk/clk-palmas.c
@@ -229,6 +229,7 @@ static int palmas_clks_init_configure(struct palmas_clock_info *cinfo)
if (ret < 0) {
dev_err(cinfo->dev, "Ext config for %s failed, %d\n",
cinfo->clk_desc->clk_name, ret);
+ clk_unprepare(cinfo->hw.clk);
return ret;
}
}
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index d0bf8b1c67de..f3931e38fac0 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -87,7 +87,7 @@ struct clockgen {
struct device_node *node;
void __iomem *regs;
struct clockgen_chipinfo info; /* mutable copy */
- struct clk *sysclk;
+ struct clk *sysclk, *coreclk;
struct clockgen_pll pll[6];
struct clk *cmux[NUM_CMUX];
struct clk *hwaccel[NUM_HWACCEL];
@@ -904,7 +904,12 @@ static void __init create_muxes(struct clockgen *cg)
static void __init clockgen_init(struct device_node *np);
-/* Legacy nodes may get probed before the parent clockgen node */
+/*
+ * Legacy nodes may get probed before the parent clockgen node.
+ * It is assumed that device trees with legacy nodes will not
+ * contain a "clocks" property -- otherwise the input clocks may
+ * not be initialized at this point.
+ */
static void __init legacy_init_clockgen(struct device_node *np)
{
if (!clockgen.node)
@@ -945,18 +950,13 @@ static struct clk __init
return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
}
-static struct clk *sysclk_from_parent(const char *name)
+static struct clk __init *input_clock(const char *name, struct clk *clk)
{
- struct clk *clk;
- const char *parent_name;
-
- clk = of_clk_get(clockgen.node, 0);
- if (IS_ERR(clk))
- return clk;
+ const char *input_name;
/* Register the input clock under the desired name. */
- parent_name = __clk_get_name(clk);
- clk = clk_register_fixed_factor(NULL, name, parent_name,
+ input_name = __clk_get_name(clk);
+ clk = clk_register_fixed_factor(NULL, name, input_name,
0, 1, 1);
if (IS_ERR(clk))
pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
@@ -965,6 +965,29 @@ static struct clk *sysclk_from_parent(const char *name)
return clk;
}
+static struct clk __init *input_clock_by_name(const char *name,
+ const char *dtname)
+{
+ struct clk *clk;
+
+ clk = of_clk_get_by_name(clockgen.node, dtname);
+ if (IS_ERR(clk))
+ return clk;
+
+ return input_clock(name, clk);
+}
+
+static struct clk __init *input_clock_by_index(const char *name, int idx)
+{
+ struct clk *clk;
+
+ clk = of_clk_get(clockgen.node, 0);
+ if (IS_ERR(clk))
+ return clk;
+
+ return input_clock(name, clk);
+}
+
static struct clk * __init create_sysclk(const char *name)
{
struct device_node *sysclk;
@@ -974,7 +997,11 @@ static struct clk * __init create_sysclk(const char *name)
if (!IS_ERR(clk))
return clk;
- clk = sysclk_from_parent(name);
+ clk = input_clock_by_name(name, "sysclk");
+ if (!IS_ERR(clk))
+ return clk;
+
+ clk = input_clock_by_index(name, 0);
if (!IS_ERR(clk))
return clk;
@@ -985,7 +1012,27 @@ static struct clk * __init create_sysclk(const char *name)
return clk;
}
- pr_err("%s: No input clock\n", __func__);
+ pr_err("%s: No input sysclk\n", __func__);
+ return NULL;
+}
+
+static struct clk * __init create_coreclk(const char *name)
+{
+ struct clk *clk;
+
+ clk = input_clock_by_name(name, "coreclk");
+ if (!IS_ERR(clk))
+ return clk;
+
+ /*
+ * This indicates a mix of legacy nodes with the new coreclk
+ * mechanism, which should never happen. If this error occurs,
+ * don't use the wrong input clock just because coreclk isn't
+ * ready yet.
+ */
+ if (WARN_ON(PTR_ERR(clk) == -EPROBE_DEFER))
+ return clk;
+
return NULL;
}
@@ -1008,11 +1055,19 @@ static void __init create_one_pll(struct clockgen *cg, int idx)
u32 __iomem *reg;
u32 mult;
struct clockgen_pll *pll = &cg->pll[idx];
+ const char *input = "cg-sysclk";
int i;
if (!(cg->info.pll_mask & (1 << idx)))
return;
+ if (cg->coreclk && idx != PLATFORM_PLL) {
+ if (IS_ERR(cg->coreclk))
+ return;
+
+ input = "cg-coreclk";
+ }
+
if (cg->info.flags & CG_VER3) {
switch (idx) {
case PLATFORM_PLL:
@@ -1063,7 +1118,7 @@ static void __init create_one_pll(struct clockgen *cg, int idx)
"cg-pll%d-div%d", idx, i + 1);
clk = clk_register_fixed_factor(NULL,
- pll->div[i].name, "cg-sysclk", 0, mult, i + 1);
+ pll->div[i].name, input, 0, mult, i + 1);
if (IS_ERR(clk)) {
pr_err("%s: %s: register failed %ld\n",
__func__, pll->div[i].name, PTR_ERR(clk));
@@ -1200,6 +1255,13 @@ static struct clk *clockgen_clk_get(struct of_phandle_args *clkspec, void *data)
goto bad_args;
clk = pll->div[idx].clk;
break;
+ case 5:
+ if (idx != 0)
+ goto bad_args;
+ clk = cg->coreclk;
+ if (IS_ERR(clk))
+ clk = NULL;
+ break;
default:
goto bad_args;
}
@@ -1311,6 +1373,7 @@ static void __init clockgen_init(struct device_node *np)
clockgen.info.flags |= CG_CMUX_GE_PLAT;
clockgen.sysclk = create_sysclk("cg-sysclk");
+ clockgen.coreclk = create_coreclk("cg-coreclk");
create_plls(&clockgen);
create_muxes(&clockgen);
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index 96d37175d0ad..25854722810e 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -71,15 +71,15 @@ static const struct clk_ops scpi_clk_ops = {
};
/* find closest match to given frequency in OPP table */
-static int __scpi_dvfs_round_rate(struct scpi_clk *clk, unsigned long rate)
+static long __scpi_dvfs_round_rate(struct scpi_clk *clk, unsigned long rate)
{
int idx;
- u32 fmin = 0, fmax = ~0, ftmp;
+ unsigned long fmin = 0, fmax = ~0, ftmp;
const struct scpi_opp *opp = clk->info->opps;
for (idx = 0; idx < clk->info->count; idx++, opp++) {
ftmp = opp->freq;
- if (ftmp >= (u32)rate) {
+ if (ftmp >= rate) {
if (ftmp <= fmax)
fmax = ftmp;
break;
@@ -245,10 +245,12 @@ static int scpi_clk_add(struct device *dev, struct device_node *np,
sclk->id = val;
err = scpi_clk_ops_init(dev, match, sclk, name);
- if (err)
+ if (err) {
dev_err(dev, "failed to register clock '%s'\n", name);
- else
- dev_dbg(dev, "Registered clock '%s'\n", name);
+ return err;
+ }
+
+ dev_dbg(dev, "Registered clock '%s'\n", name);
clk_data->clk[idx] = sclk;
}
diff --git a/drivers/clk/hisilicon/clk-hi3660.c b/drivers/clk/hisilicon/clk-hi3660.c
index 96a9697b06cf..a18258eb89cb 100644
--- a/drivers/clk/hisilicon/clk-hi3660.c
+++ b/drivers/clk/hisilicon/clk-hi3660.c
@@ -20,7 +20,7 @@ static const struct hisi_fixed_rate_clock hi3660_fixed_rate_clks[] = {
{ HI3660_CLK_FLL_SRC, "clk_fll_src", NULL, 0, 128000000, },
{ HI3660_CLK_PPLL0, "clk_ppll0", NULL, 0, 1600000000, },
{ HI3660_CLK_PPLL1, "clk_ppll1", NULL, 0, 1866000000, },
- { HI3660_CLK_PPLL2, "clk_ppll2", NULL, 0, 960000000, },
+ { HI3660_CLK_PPLL2, "clk_ppll2", NULL, 0, 2880000000UL, },
{ HI3660_CLK_PPLL3, "clk_ppll3", NULL, 0, 1290000000, },
{ HI3660_CLK_SCPLL, "clk_scpll", NULL, 0, 245760000, },
{ HI3660_PCLK, "pclk", NULL, 0, 20000000, },
@@ -42,14 +42,19 @@ static const struct hisi_fixed_factor_clock hi3660_crg_fixed_factor_clks[] = {
{ HI3660_CLK_GATE_I2C6, "clk_gate_i2c6", "clk_i2c6_iomcu", 1, 4, 0, },
{ HI3660_CLK_DIV_SYSBUS, "clk_div_sysbus", "clk_mux_sysbus", 1, 7, 0, },
{ HI3660_CLK_DIV_320M, "clk_div_320m", "clk_320m_pll_gt", 1, 5, 0, },
- { HI3660_CLK_DIV_A53, "clk_div_a53hpm", "clk_a53hpm_andgt", 1, 2, 0, },
+ { HI3660_CLK_DIV_A53, "clk_div_a53hpm", "clk_a53hpm_andgt", 1, 6, 0, },
{ HI3660_CLK_GATE_SPI0, "clk_gate_spi0", "clk_ppll0", 1, 8, 0, },
{ HI3660_CLK_GATE_SPI2, "clk_gate_spi2", "clk_ppll0", 1, 8, 0, },
{ HI3660_PCIEPHY_REF, "clk_pciephy_ref", "clk_div_pciephy", 1, 1, 0, },
{ HI3660_CLK_ABB_USB, "clk_abb_usb", "clk_gate_usb_tcxo_en", 1, 1, 0 },
+ { HI3660_VENC_VOLT_HOLD, "venc_volt_hold", "peri_volt_hold", 1, 1, 0, },
+ { HI3660_CLK_FAC_ISP_SNCLK, "clk_isp_snclk_fac", "clk_isp_snclk_angt",
+ 1, 10, 0, },
};
static const struct hisi_gate_clock hi3660_crgctrl_gate_sep_clks[] = {
+ { HI3660_PERI_VOLT_HOLD, "peri_volt_hold", "clkin_sys",
+ CLK_SET_RATE_PARENT, 0x0, 0, 0, },
{ HI3660_HCLK_GATE_SDIO0, "hclk_gate_sdio0", "clk_div_sysbus",
CLK_SET_RATE_PARENT, 0x0, 21, 0, },
{ HI3660_HCLK_GATE_SD, "hclk_gate_sd", "clk_div_sysbus",
@@ -120,6 +125,10 @@ static const struct hisi_gate_clock hi3660_crgctrl_gate_sep_clks[] = {
CLK_SET_RATE_PARENT, 0x20, 27, 0, },
{ HI3660_CLK_GATE_DMAC, "clk_gate_dmac", "clk_div_sysbus",
CLK_SET_RATE_PARENT, 0x30, 1, 0, },
+ { HI3660_CLK_GATE_VENC, "clk_gate_venc", "clk_div_venc",
+ CLK_SET_RATE_PARENT, 0x30, 10, 0, },
+ { HI3660_CLK_GATE_VDEC, "clk_gate_vdec", "clk_div_vdec",
+ CLK_SET_RATE_PARENT, 0x30, 11, 0, },
{ HI3660_PCLK_GATE_DSS, "pclk_gate_dss", "clk_div_cfgbus",
CLK_SET_RATE_PARENT, 0x30, 12, 0, },
{ HI3660_ACLK_GATE_DSS, "aclk_gate_dss", "clk_gate_vivobus",
@@ -148,6 +157,12 @@ static const struct hisi_gate_clock hi3660_crgctrl_gate_sep_clks[] = {
CLK_SET_RATE_PARENT, 0x40, 17, 0, },
{ HI3660_CLK_GATE_SDIO0, "clk_gate_sdio0", "clk_mux_sdio_sys",
CLK_SET_RATE_PARENT, 0x40, 19, 0, },
+ { HI3660_CLK_GATE_ISP_SNCLK0, "clk_gate_isp_snclk0",
+ "clk_isp_snclk_mux", CLK_SET_RATE_PARENT, 0x50, 16, 0, },
+ { HI3660_CLK_GATE_ISP_SNCLK1, "clk_gate_isp_snclk1",
+ "clk_isp_snclk_mux", CLK_SET_RATE_PARENT, 0x50, 17, 0, },
+ { HI3660_CLK_GATE_ISP_SNCLK2, "clk_gate_isp_snclk2",
+ "clk_isp_snclk_mux", CLK_SET_RATE_PARENT, 0x50, 18, 0, },
{ HI3660_CLK_GATE_UFS_SUBSYS, "clk_gate_ufs_subsys", "clk_div_sysbus",
CLK_SET_RATE_PARENT, 0x50, 21, 0, },
{ HI3660_PCLK_GATE_DSI0, "pclk_gate_dsi0", "clk_div_cfgbus",
@@ -171,6 +186,10 @@ static const struct hisi_gate_clock hi3660_crgctrl_gate_clks[] = {
CLK_SET_RATE_PARENT, 0xf0, 7, CLK_GATE_HIWORD_MASK, },
{ HI3660_CLK_ANDGT_EDC0, "clk_andgt_edc0", "clk_mux_edc0",
CLK_SET_RATE_PARENT, 0xf0, 8, CLK_GATE_HIWORD_MASK, },
+ { HI3660_CLK_ANDGT_VDEC, "clk_andgt_vdec", "clk_mux_vdec",
+ CLK_SET_RATE_PARENT, 0xf0, 15, CLK_GATE_HIWORD_MASK, },
+ { HI3660_CLK_ANDGT_VENC, "clk_andgt_venc", "clk_mux_venc",
+ CLK_SET_RATE_PARENT, 0xf4, 0, CLK_GATE_HIWORD_MASK, },
{ HI3660_CLK_GATE_UFSPHY_GT, "clk_gate_ufsphy_gt", "clk_div_ufsperi",
CLK_SET_RATE_PARENT, 0xf4, 1, CLK_GATE_HIWORD_MASK, },
{ HI3660_CLK_ANDGT_MMC, "clk_andgt_mmc", "clk_mux_mmc_pll",
@@ -195,6 +214,8 @@ static const struct hisi_gate_clock hi3660_crgctrl_gate_clks[] = {
CLK_SET_RATE_PARENT, 0xf8, 3, CLK_GATE_HIWORD_MASK, },
{ HI3660_CLK_320M_PLL_GT, "clk_320m_pll_gt", "clk_mux_320m",
CLK_SET_RATE_PARENT, 0xf8, 10, 0, },
+ { HI3660_CLK_ANGT_ISP_SNCLK, "clk_isp_snclk_angt", "clk_div_a53hpm",
+ CLK_SET_RATE_PARENT, 0x108, 2, CLK_GATE_HIWORD_MASK, },
{ HI3660_AUTODIV_EMMC0BUS, "autodiv_emmc0bus", "autodiv_sysbus",
CLK_SET_RATE_PARENT, 0x404, 1, CLK_GATE_HIWORD_MASK, },
{ HI3660_AUTODIV_SYSBUS, "autodiv_sysbus", "clk_div_sysbus",
@@ -206,6 +227,8 @@ static const struct hisi_gate_clock hi3660_crgctrl_gate_clks[] = {
};
static const char *const
+clk_mux_sysbus_p[] = {"clk_ppll1", "clk_ppll0"};
+static const char *const
clk_mux_sdio_sys_p[] = {"clk_factor_mmc", "clk_div_sdio",};
static const char *const
clk_mux_sd_sys_p[] = {"clk_factor_mmc", "clk_div_sd",};
@@ -237,10 +260,14 @@ static const char *const
clk_mux_spi_p[] = {"clkin_sys", "clk_div_spi",};
static const char *const
clk_mux_i2c_p[] = {"clkin_sys", "clk_div_i2c",};
+static const char *const
+clk_mux_venc_p[] = {"clk_ppll0", "clk_ppll1", "clk_ppll3", "clk_ppll3",};
+static const char *const
+clk_mux_isp_snclk_p[] = {"clkin_sys", "clk_isp_snclk_div"};
static const struct hisi_mux_clock hi3660_crgctrl_mux_clks[] = {
- { HI3660_CLK_MUX_SYSBUS, "clk_mux_sysbus", clk_mux_sdio_sys_p,
- ARRAY_SIZE(clk_mux_sdio_sys_p), CLK_SET_RATE_PARENT, 0xac, 0, 1,
+ { HI3660_CLK_MUX_SYSBUS, "clk_mux_sysbus", clk_mux_sysbus_p,
+ ARRAY_SIZE(clk_mux_sysbus_p), CLK_SET_RATE_PARENT, 0xac, 0, 1,
CLK_MUX_HIWORD_MASK, },
{ HI3660_CLK_MUX_UART0, "clk_mux_uart0", clk_mux_uart0_p,
ARRAY_SIZE(clk_mux_uart0_p), CLK_SET_RATE_PARENT, 0xac, 2, 1,
@@ -281,6 +308,12 @@ static const struct hisi_mux_clock hi3660_crgctrl_mux_clks[] = {
{ HI3660_CLK_MUX_SDIO_PLL, "clk_mux_sdio_pll", clk_mux_pll_p,
ARRAY_SIZE(clk_mux_pll_p), CLK_SET_RATE_PARENT, 0xc0, 4, 2,
CLK_MUX_HIWORD_MASK, },
+ { HI3660_CLK_MUX_VENC, "clk_mux_venc", clk_mux_venc_p,
+ ARRAY_SIZE(clk_mux_venc_p), CLK_SET_RATE_PARENT, 0xc8, 11, 2,
+ CLK_MUX_HIWORD_MASK, },
+ { HI3660_CLK_MUX_VDEC, "clk_mux_vdec", clk_mux_pll0123_p,
+ ARRAY_SIZE(clk_mux_pll0123_p), CLK_SET_RATE_PARENT, 0xcc, 5, 2,
+ CLK_MUX_HIWORD_MASK, },
{ HI3660_CLK_MUX_VIVOBUS, "clk_mux_vivobus", clk_mux_pll0123_p,
ARRAY_SIZE(clk_mux_pll0123_p), CLK_SET_RATE_PARENT, 0xd0, 12, 2,
CLK_MUX_HIWORD_MASK, },
@@ -290,6 +323,9 @@ static const struct hisi_mux_clock hi3660_crgctrl_mux_clks[] = {
{ HI3660_CLK_MUX_320M, "clk_mux_320m", clk_mux_pll02p,
ARRAY_SIZE(clk_mux_pll02p), CLK_SET_RATE_PARENT, 0x100, 0, 1,
CLK_MUX_HIWORD_MASK, },
+ { HI3660_CLK_MUX_ISP_SNCLK, "clk_isp_snclk_mux", clk_mux_isp_snclk_p,
+ ARRAY_SIZE(clk_mux_isp_snclk_p), CLK_SET_RATE_PARENT, 0x108, 3, 1,
+ CLK_MUX_HIWORD_MASK, },
{ HI3660_CLK_MUX_IOPERI, "clk_mux_ioperi", clk_mux_ioperi_p,
ARRAY_SIZE(clk_mux_ioperi_p), CLK_SET_RATE_PARENT, 0x108, 10, 1,
CLK_MUX_HIWORD_MASK, },
@@ -316,6 +352,10 @@ static const struct hisi_divider_clock hi3660_crgctrl_divider_clks[] = {
CLK_SET_RATE_PARENT, 0xc0, 8, 6, CLK_DIVIDER_HIWORD_MASK, 0, },
{ HI3660_CLK_DIV_SPI, "clk_div_spi", "clk_andgt_spi",
CLK_SET_RATE_PARENT, 0xc4, 12, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_DIV_VENC, "clk_div_venc", "clk_andgt_venc",
+ CLK_SET_RATE_PARENT, 0xc8, 6, 5, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_DIV_VDEC, "clk_div_vdec", "clk_andgt_vdec",
+ CLK_SET_RATE_PARENT, 0xcc, 0, 5, CLK_DIVIDER_HIWORD_MASK, 0, },
{ HI3660_CLK_DIV_VIVOBUS, "clk_div_vivobus", "clk_vivobus_andgt",
CLK_SET_RATE_PARENT, 0xd0, 7, 5, CLK_DIVIDER_HIWORD_MASK, 0, },
{ HI3660_CLK_DIV_I2C, "clk_div_i2c", "clk_div_320m",
@@ -332,6 +372,8 @@ static const struct hisi_divider_clock hi3660_crgctrl_divider_clks[] = {
CLK_SET_RATE_PARENT, 0xec, 14, 1, CLK_DIVIDER_HIWORD_MASK, 0, },
{ HI3660_CLK_DIV_AOMM, "clk_div_aomm", "clk_aomm_andgt",
CLK_SET_RATE_PARENT, 0x100, 7, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
+ { HI3660_CLK_DIV_ISP_SNCLK, "clk_isp_snclk_div", "clk_isp_snclk_fac",
+ CLK_SET_RATE_PARENT, 0x108, 0, 2, CLK_DIVIDER_HIWORD_MASK, 0, },
{ HI3660_CLK_DIV_IOPERI, "clk_div_ioperi", "clk_mux_ioperi",
CLK_SET_RATE_PARENT, 0x108, 11, 4, CLK_DIVIDER_HIWORD_MASK, 0, },
};
@@ -427,6 +469,8 @@ static const struct hisi_gate_clock hi3660_iomcu_gate_sep_clks[] = {
CLK_SET_RATE_PARENT, 0x90, 0, 0, },
};
+static struct hisi_clock_data *clk_crgctrl_data;
+
static void hi3660_clk_iomcu_init(struct device_node *np)
{
struct hisi_clock_data *clk_data;
@@ -489,38 +533,64 @@ static void hi3660_clk_sctrl_init(struct device_node *np)
clk_data);
}
-static void hi3660_clk_crgctrl_init(struct device_node *np)
+static void hi3660_clk_crgctrl_early_init(struct device_node *np)
{
- struct hisi_clock_data *clk_data;
int nr = ARRAY_SIZE(hi3660_fixed_rate_clks) +
ARRAY_SIZE(hi3660_crgctrl_gate_sep_clks) +
ARRAY_SIZE(hi3660_crgctrl_gate_clks) +
ARRAY_SIZE(hi3660_crgctrl_mux_clks) +
ARRAY_SIZE(hi3660_crg_fixed_factor_clks) +
ARRAY_SIZE(hi3660_crgctrl_divider_clks);
+ int i;
- clk_data = hisi_clk_init(np, nr);
- if (!clk_data)
+ clk_crgctrl_data = hisi_clk_init(np, nr);
+ if (!clk_crgctrl_data)
return;
+ for (i = 0; i < nr; i++)
+ clk_crgctrl_data->clk_data.clks[i] = ERR_PTR(-EPROBE_DEFER);
+
hisi_clk_register_fixed_rate(hi3660_fixed_rate_clks,
ARRAY_SIZE(hi3660_fixed_rate_clks),
- clk_data);
+ clk_crgctrl_data);
+}
+CLK_OF_DECLARE_DRIVER(hi3660_clk_crgctrl, "hisilicon,hi3660-crgctrl",
+ hi3660_clk_crgctrl_early_init);
+
+static void hi3660_clk_crgctrl_init(struct device_node *np)
+{
+ struct clk **clks;
+ int i;
+
+ if (!clk_crgctrl_data)
+ hi3660_clk_crgctrl_early_init(np);
+
+ /* clk_crgctrl_data initialization failed */
+ if (!clk_crgctrl_data)
+ return;
+
hisi_clk_register_gate_sep(hi3660_crgctrl_gate_sep_clks,
ARRAY_SIZE(hi3660_crgctrl_gate_sep_clks),
- clk_data);
+ clk_crgctrl_data);
hisi_clk_register_gate(hi3660_crgctrl_gate_clks,
ARRAY_SIZE(hi3660_crgctrl_gate_clks),
- clk_data);
+ clk_crgctrl_data);
hisi_clk_register_mux(hi3660_crgctrl_mux_clks,
ARRAY_SIZE(hi3660_crgctrl_mux_clks),
- clk_data);
+ clk_crgctrl_data);
hisi_clk_register_fixed_factor(hi3660_crg_fixed_factor_clks,
ARRAY_SIZE(hi3660_crg_fixed_factor_clks),
- clk_data);
+ clk_crgctrl_data);
hisi_clk_register_divider(hi3660_crgctrl_divider_clks,
ARRAY_SIZE(hi3660_crgctrl_divider_clks),
- clk_data);
+ clk_crgctrl_data);
+
+ clks = clk_crgctrl_data->clk_data.clks;
+ for (i = 0; i < clk_crgctrl_data->clk_data.clk_num; i++) {
+ if (IS_ERR(clks[i]) && PTR_ERR(clks[i]) != -EPROBE_DEFER)
+ pr_err("Failed to register crgctrl clock[%d] err=%ld\n",
+ i, PTR_ERR(clks[i]));
+ }
}
static const struct of_device_id hi3660_clk_match_table[] = {
diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c
index 2ae151ce623a..4181b6808545 100644
--- a/drivers/clk/hisilicon/clk-hi6220.c
+++ b/drivers/clk/hisilicon/clk-hi6220.c
@@ -285,3 +285,25 @@ static void __init hi6220_clk_power_init(struct device_node *np)
ARRAY_SIZE(hi6220_div_clks_power), clk_data);
}
CLK_OF_DECLARE(hi6220_clk_power, "hisilicon,hi6220-pmctrl", hi6220_clk_power_init);
+
+/* clocks in acpu */
+static const struct hisi_gate_clock hi6220_acpu_sc_gate_sep_clks[] = {
+ { HI6220_ACPU_SFT_AT_S, "sft_at_s", "cs_atb",
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0xc, 11, 0, },
+};
+
+static void __init hi6220_clk_acpu_init(struct device_node *np)
+{
+ struct hisi_clock_data *clk_data;
+ int nr = ARRAY_SIZE(hi6220_acpu_sc_gate_sep_clks);
+
+ clk_data = hisi_clk_init(np, nr);
+ if (!clk_data)
+ return;
+
+ hisi_clk_register_gate_sep(hi6220_acpu_sc_gate_sep_clks,
+ ARRAY_SIZE(hi6220_acpu_sc_gate_sep_clks),
+ clk_data);
+}
+
+CLK_OF_DECLARE(hi6220_clk_acpu, "hisilicon,hi6220-acpu-sctrl", hi6220_clk_acpu_init);
diff --git a/drivers/clk/hisilicon/crg-hi3798cv200.c b/drivers/clk/hisilicon/crg-hi3798cv200.c
index fc8b5bc2d50d..ed8bb5f7507f 100644
--- a/drivers/clk/hisilicon/crg-hi3798cv200.c
+++ b/drivers/clk/hisilicon/crg-hi3798cv200.c
@@ -44,6 +44,9 @@
#define HI3798CV200_ETH_BUS0_CLK 78
#define HI3798CV200_ETH_BUS1_CLK 79
#define HI3798CV200_COMBPHY1_MUX 80
+#define HI3798CV200_FIXED_12M 81
+#define HI3798CV200_FIXED_48M 82
+#define HI3798CV200_FIXED_60M 83
#define HI3798CV200_CRG_NR_CLKS 128
@@ -51,9 +54,12 @@ static const struct hisi_fixed_rate_clock hi3798cv200_fixed_rate_clks[] = {
{ HISTB_OSC_CLK, "clk_osc", NULL, 0, 24000000, },
{ HISTB_APB_CLK, "clk_apb", NULL, 0, 100000000, },
{ HISTB_AHB_CLK, "clk_ahb", NULL, 0, 200000000, },
+ { HI3798CV200_FIXED_12M, "12m", NULL, 0, 12000000, },
{ HI3798CV200_FIXED_24M, "24m", NULL, 0, 24000000, },
{ HI3798CV200_FIXED_25M, "25m", NULL, 0, 25000000, },
+ { HI3798CV200_FIXED_48M, "48m", NULL, 0, 48000000, },
{ HI3798CV200_FIXED_50M, "50m", NULL, 0, 50000000, },
+ { HI3798CV200_FIXED_60M, "60m", NULL, 0, 60000000, },
{ HI3798CV200_FIXED_75M, "75m", NULL, 0, 75000000, },
{ HI3798CV200_FIXED_100M, "100m", NULL, 0, 100000000, },
{ HI3798CV200_FIXED_150M, "150m", NULL, 0, 150000000, },
@@ -134,6 +140,21 @@ static const struct hisi_gate_clock hi3798cv200_gate_clks[] = {
/* COMBPHY1 */
{ HISTB_COMBPHY1_CLK, "clk_combphy1", "combphy1_mux",
CLK_SET_RATE_PARENT, 0x188, 8, 0, },
+ /* USB2 */
+ { HISTB_USB2_BUS_CLK, "clk_u2_bus", "clk_ahb",
+ CLK_SET_RATE_PARENT, 0xb8, 0, 0, },
+ { HISTB_USB2_PHY_CLK, "clk_u2_phy", "60m",
+ CLK_SET_RATE_PARENT, 0xb8, 4, 0, },
+ { HISTB_USB2_12M_CLK, "clk_u2_12m", "12m",
+ CLK_SET_RATE_PARENT, 0xb8, 2, 0 },
+ { HISTB_USB2_48M_CLK, "clk_u2_48m", "48m",
+ CLK_SET_RATE_PARENT, 0xb8, 1, 0 },
+ { HISTB_USB2_UTMI_CLK, "clk_u2_utmi", "60m",
+ CLK_SET_RATE_PARENT, 0xb8, 5, 0 },
+ { HISTB_USB2_PHY1_REF_CLK, "clk_u2_phy1_ref", "24m",
+ CLK_SET_RATE_PARENT, 0xbc, 0, 0 },
+ { HISTB_USB2_PHY2_REF_CLK, "clk_u2_phy2_ref", "24m",
+ CLK_SET_RATE_PARENT, 0xbc, 2, 0 },
};
static struct hisi_clock_data *hi3798cv200_clk_register(
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index 93b03640da9b..3da121826b1b 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -25,6 +25,7 @@
static u32 share_count_sai1;
static u32 share_count_sai2;
static u32 share_count_sai3;
+static u32 share_count_nand;
static struct clk_div_table test_div_table[] = {
{ .val = 3, .div = 1, },
@@ -424,7 +425,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_PLL_VIDEO_MAIN_SRC] = imx_clk_mux("pll_video_main_src", base + 0x130, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel));
clks[IMX7D_PLL_ARM_MAIN] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll_arm_main", "osc", base + 0x60, 0x7f);
- clks[IMX7D_PLL_DRAM_MAIN] = imx_clk_pllv3(IMX_PLLV3_AV, "pll_dram_main", "osc", base + 0x70, 0x7f);
+ clks[IMX7D_PLL_DRAM_MAIN] = imx_clk_pllv3(IMX_PLLV3_DDR_IMX7, "pll_dram_main", "osc", base + 0x70, 0x7f);
clks[IMX7D_PLL_SYS_MAIN] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll_sys_main", "osc", base + 0xb0, 0x1);
clks[IMX7D_PLL_ENET_MAIN] = imx_clk_pllv3(IMX_PLLV3_ENET_IMX7, "pll_enet_main", "osc", base + 0xe0, 0x0);
clks[IMX7D_PLL_AUDIO_MAIN] = imx_clk_pllv3(IMX_PLLV3_AV, "pll_audio_main", "osc", base + 0xf0, 0x7f);
@@ -748,7 +749,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_ENET2_TIME_ROOT_DIV] = imx_clk_divider2("enet2_time_post_div", "enet2_time_pre_div", base + 0xa880, 0, 6);
clks[IMX7D_ENET_PHY_REF_ROOT_DIV] = imx_clk_divider2("enet_phy_ref_post_div", "enet_phy_ref_pre_div", base + 0xa900, 0, 6);
clks[IMX7D_EIM_ROOT_DIV] = imx_clk_divider2("eim_post_div", "eim_pre_div", base + 0xa980, 0, 6);
- clks[IMX7D_NAND_ROOT_DIV] = imx_clk_divider2("nand_post_div", "nand_pre_div", base + 0xaa00, 0, 6);
+ clks[IMX7D_NAND_ROOT_CLK] = imx_clk_divider2("nand_root_clk", "nand_pre_div", base + 0xaa00, 0, 6);
clks[IMX7D_QSPI_ROOT_DIV] = imx_clk_divider2("qspi_post_div", "qspi_pre_div", base + 0xaa80, 0, 6);
clks[IMX7D_USDHC1_ROOT_DIV] = imx_clk_divider2("usdhc1_post_div", "usdhc1_pre_div", base + 0xab00, 0, 6);
clks[IMX7D_USDHC2_ROOT_DIV] = imx_clk_divider2("usdhc2_post_div", "usdhc2_pre_div", base + 0xab80, 0, 6);
@@ -825,7 +826,8 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_ENET2_TIME_ROOT_CLK] = imx_clk_gate4("enet2_time_root_clk", "enet2_time_post_div", base + 0x4510, 0);
clks[IMX7D_ENET_PHY_REF_ROOT_CLK] = imx_clk_gate4("enet_phy_ref_root_clk", "enet_phy_ref_post_div", base + 0x4520, 0);
clks[IMX7D_EIM_ROOT_CLK] = imx_clk_gate4("eim_root_clk", "eim_post_div", base + 0x4160, 0);
- clks[IMX7D_NAND_ROOT_CLK] = imx_clk_gate4("nand_root_clk", "nand_post_div", base + 0x4140, 0);
+ clks[IMX7D_NAND_RAWNAND_CLK] = imx_clk_gate2_shared2("nand_rawnand_clk", "nand_root_clk", base + 0x4140, 0, &share_count_nand);
+ clks[IMX7D_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_root_clk", base + 0x4140, 0, &share_count_nand);
clks[IMX7D_QSPI_ROOT_CLK] = imx_clk_gate4("qspi_root_clk", "qspi_post_div", base + 0x4150, 0);
clks[IMX7D_USDHC1_ROOT_CLK] = imx_clk_gate4("usdhc1_root_clk", "usdhc1_post_div", base + 0x46c0, 0);
clks[IMX7D_USDHC2_ROOT_CLK] = imx_clk_gate4("usdhc2_root_clk", "usdhc2_post_div", base + 0x46d0, 0);
diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
index f1099167ba31..9af62ee8f347 100644
--- a/drivers/clk/imx/clk-pllv3.c
+++ b/drivers/clk/imx/clk-pllv3.c
@@ -27,6 +27,7 @@
#define BM_PLL_POWER (0x1 << 12)
#define BM_PLL_LOCK (0x1 << 31)
#define IMX7_ENET_PLL_POWER (0x1 << 5)
+#define IMX7_DDR_PLL_POWER (0x1 << 20)
/**
* struct clk_pllv3 - IMX PLL clock version 3
@@ -451,6 +452,10 @@ struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
pll->ref_clock = 500000000;
ops = &clk_pllv3_enet_ops;
break;
+ case IMX_PLLV3_DDR_IMX7:
+ pll->power_bit = IMX7_DDR_PLL_POWER;
+ ops = &clk_pllv3_av_ops;
+ break;
default:
ops = &clk_pllv3_ops;
}
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index e1f5e425db73..d54f0720afba 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -35,6 +35,7 @@ enum imx_pllv3_type {
IMX_PLLV3_ENET,
IMX_PLLV3_ENET_IMX7,
IMX_PLLV3_SYS_VF610,
+ IMX_PLLV3_DDR_IMX7,
};
struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
diff --git a/drivers/clk/keystone/Kconfig b/drivers/clk/keystone/Kconfig
new file mode 100644
index 000000000000..7e9f0176578a
--- /dev/null
+++ b/drivers/clk/keystone/Kconfig
@@ -0,0 +1,16 @@
+config COMMON_CLK_KEYSTONE
+ tristate "Clock drivers for Keystone based SOCs"
+ depends on (ARCH_KEYSTONE || COMPILE_TEST) && OF
+ ---help---
+ Supports clock drivers for Keystone based SOCs. These SOCs have local
+ a power sleep control module that gate the clock to the IPs and PLLs.
+
+config TI_SCI_CLK
+ tristate "TI System Control Interface clock drivers"
+ depends on (ARCH_KEYSTONE || COMPILE_TEST) && OF
+ depends on TI_SCI_PROTOCOL
+ default ARCH_KEYSTONE
+ ---help---
+ This adds the clock driver support over TI System Control Interface.
+ If you wish to use clock resources from the PMMC firmware, say Y.
+ Otherwise, say N.
diff --git a/drivers/clk/keystone/Makefile b/drivers/clk/keystone/Makefile
index 0477cf63f132..c12593966f9b 100644
--- a/drivers/clk/keystone/Makefile
+++ b/drivers/clk/keystone/Makefile
@@ -1 +1,2 @@
-obj-y += pll.o gate.o
+obj-$(CONFIG_COMMON_CLK_KEYSTONE) += pll.o gate.o
+obj-$(CONFIG_TI_SCI_CLK) += sci-clk.o
diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c
new file mode 100644
index 000000000000..43b0f2f08df2
--- /dev/null
+++ b/drivers/clk/keystone/sci-clk.c
@@ -0,0 +1,724 @@
+/*
+ * SCI Clock driver for keystone based devices
+ *
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
+ * Tero Kristo <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+
+#define SCI_CLK_SSC_ENABLE BIT(0)
+#define SCI_CLK_ALLOW_FREQ_CHANGE BIT(1)
+#define SCI_CLK_INPUT_TERMINATION BIT(2)
+
+/**
+ * struct sci_clk_data - TI SCI clock data
+ * @dev: device index
+ * @num_clks: number of clocks for this device
+ */
+struct sci_clk_data {
+ u16 dev;
+ u16 num_clks;
+};
+
+/**
+ * struct sci_clk_provider - TI SCI clock provider representation
+ * @sci: Handle to the System Control Interface protocol handler
+ * @ops: Pointer to the SCI ops to be used by the clocks
+ * @dev: Device pointer for the clock provider
+ * @clk_data: Clock data
+ * @clocks: Clocks array for this device
+ */
+struct sci_clk_provider {
+ const struct ti_sci_handle *sci;
+ const struct ti_sci_clk_ops *ops;
+ struct device *dev;
+ const struct sci_clk_data *clk_data;
+ struct clk_hw **clocks;
+};
+
+/**
+ * struct sci_clk - TI SCI clock representation
+ * @hw: Hardware clock cookie for common clock framework
+ * @dev_id: Device index
+ * @clk_id: Clock index
+ * @node: Clocks list link
+ * @provider: Master clock provider
+ * @flags: Flags for the clock
+ */
+struct sci_clk {
+ struct clk_hw hw;
+ u16 dev_id;
+ u8 clk_id;
+ struct list_head node;
+ struct sci_clk_provider *provider;
+ u8 flags;
+};
+
+#define to_sci_clk(_hw) container_of(_hw, struct sci_clk, hw)
+
+/**
+ * sci_clk_prepare - Prepare (enable) a TI SCI clock
+ * @hw: clock to prepare
+ *
+ * Prepares a clock to be actively used. Returns the SCI protocol status.
+ */
+static int sci_clk_prepare(struct clk_hw *hw)
+{
+ struct sci_clk *clk = to_sci_clk(hw);
+ bool enable_ssc = clk->flags & SCI_CLK_SSC_ENABLE;
+ bool allow_freq_change = clk->flags & SCI_CLK_ALLOW_FREQ_CHANGE;
+ bool input_termination = clk->flags & SCI_CLK_INPUT_TERMINATION;
+
+ return clk->provider->ops->get_clock(clk->provider->sci, clk->dev_id,
+ clk->clk_id, enable_ssc,
+ allow_freq_change,
+ input_termination);
+}
+
+/**
+ * sci_clk_unprepare - Un-prepares (disables) a TI SCI clock
+ * @hw: clock to unprepare
+ *
+ * Un-prepares a clock from active state.
+ */
+static void sci_clk_unprepare(struct clk_hw *hw)
+{
+ struct sci_clk *clk = to_sci_clk(hw);
+ int ret;
+
+ ret = clk->provider->ops->put_clock(clk->provider->sci, clk->dev_id,
+ clk->clk_id);
+ if (ret)
+ dev_err(clk->provider->dev,
+ "unprepare failed for dev=%d, clk=%d, ret=%d\n",
+ clk->dev_id, clk->clk_id, ret);
+}
+
+/**
+ * sci_clk_is_prepared - Check if a TI SCI clock is prepared or not
+ * @hw: clock to check status for
+ *
+ * Checks if a clock is prepared (enabled) in hardware. Returns non-zero
+ * value if clock is enabled, zero otherwise.
+ */
+static int sci_clk_is_prepared(struct clk_hw *hw)
+{
+ struct sci_clk *clk = to_sci_clk(hw);
+ bool req_state, current_state;
+ int ret;
+
+ ret = clk->provider->ops->is_on(clk->provider->sci, clk->dev_id,
+ clk->clk_id, &req_state,
+ &current_state);
+ if (ret) {
+ dev_err(clk->provider->dev,
+ "is_prepared failed for dev=%d, clk=%d, ret=%d\n",
+ clk->dev_id, clk->clk_id, ret);
+ return 0;
+ }
+
+ return req_state;
+}
+
+/**
+ * sci_clk_recalc_rate - Get clock rate for a TI SCI clock
+ * @hw: clock to get rate for
+ * @parent_rate: parent rate provided by common clock framework, not used
+ *
+ * Gets the current clock rate of a TI SCI clock. Returns the current
+ * clock rate, or zero in failure.
+ */
+static unsigned long sci_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct sci_clk *clk = to_sci_clk(hw);
+ u64 freq;
+ int ret;
+
+ ret = clk->provider->ops->get_freq(clk->provider->sci, clk->dev_id,
+ clk->clk_id, &freq);
+ if (ret) {
+ dev_err(clk->provider->dev,
+ "recalc-rate failed for dev=%d, clk=%d, ret=%d\n",
+ clk->dev_id, clk->clk_id, ret);
+ return 0;
+ }
+
+ return freq;
+}
+
+/**
+ * sci_clk_determine_rate - Determines a clock rate a clock can be set to
+ * @hw: clock to change rate for
+ * @req: requested rate configuration for the clock
+ *
+ * Determines a suitable clock rate and parent for a TI SCI clock.
+ * The parent handling is un-used, as generally the parent clock rates
+ * are not known by the kernel; instead these are internally handled
+ * by the firmware. Returns 0 on success, negative error value on failure.
+ */
+static int sci_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct sci_clk *clk = to_sci_clk(hw);
+ int ret;
+ u64 new_rate;
+
+ ret = clk->provider->ops->get_best_match_freq(clk->provider->sci,
+ clk->dev_id,
+ clk->clk_id,
+ req->min_rate,
+ req->rate,
+ req->max_rate,
+ &new_rate);
+ if (ret) {
+ dev_err(clk->provider->dev,
+ "determine-rate failed for dev=%d, clk=%d, ret=%d\n",
+ clk->dev_id, clk->clk_id, ret);
+ return ret;
+ }
+
+ req->rate = new_rate;
+
+ return 0;
+}
+
+/**
+ * sci_clk_set_rate - Set rate for a TI SCI clock
+ * @hw: clock to change rate for
+ * @rate: target rate for the clock
+ * @parent_rate: rate of the clock parent, not used for TI SCI clocks
+ *
+ * Sets a clock frequency for a TI SCI clock. Returns the TI SCI
+ * protocol status.
+ */
+static int sci_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct sci_clk *clk = to_sci_clk(hw);
+
+ return clk->provider->ops->set_freq(clk->provider->sci, clk->dev_id,
+ clk->clk_id, rate, rate, rate);
+}
+
+/**
+ * sci_clk_get_parent - Get the current parent of a TI SCI clock
+ * @hw: clock to get parent for
+ *
+ * Returns the index of the currently selected parent for a TI SCI clock.
+ */
+static u8 sci_clk_get_parent(struct clk_hw *hw)
+{
+ struct sci_clk *clk = to_sci_clk(hw);
+ u8 parent_id;
+ int ret;
+
+ ret = clk->provider->ops->get_parent(clk->provider->sci, clk->dev_id,
+ clk->clk_id, &parent_id);
+ if (ret) {
+ dev_err(clk->provider->dev,
+ "get-parent failed for dev=%d, clk=%d, ret=%d\n",
+ clk->dev_id, clk->clk_id, ret);
+ return 0;
+ }
+
+ return parent_id - clk->clk_id - 1;
+}
+
+/**
+ * sci_clk_set_parent - Set the parent of a TI SCI clock
+ * @hw: clock to set parent for
+ * @index: new parent index for the clock
+ *
+ * Sets the parent of a TI SCI clock. Return TI SCI protocol status.
+ */
+static int sci_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct sci_clk *clk = to_sci_clk(hw);
+
+ return clk->provider->ops->set_parent(clk->provider->sci, clk->dev_id,
+ clk->clk_id,
+ index + 1 + clk->clk_id);
+}
+
+static const struct clk_ops sci_clk_ops = {
+ .prepare = sci_clk_prepare,
+ .unprepare = sci_clk_unprepare,
+ .is_prepared = sci_clk_is_prepared,
+ .recalc_rate = sci_clk_recalc_rate,
+ .determine_rate = sci_clk_determine_rate,
+ .set_rate = sci_clk_set_rate,
+ .get_parent = sci_clk_get_parent,
+ .set_parent = sci_clk_set_parent,
+};
+
+/**
+ * _sci_clk_get - Gets a handle for an SCI clock
+ * @provider: Handle to SCI clock provider
+ * @dev_id: device ID for the clock to register
+ * @clk_id: clock ID for the clock to register
+ *
+ * Gets a handle to an existing TI SCI hw clock, or builds a new clock
+ * entry and registers it with the common clock framework. Called from
+ * the common clock framework, when a corresponding of_clk_get call is
+ * executed, or recursively from itself when parsing parent clocks.
+ * Returns a pointer to the hw clock struct, or ERR_PTR value in failure.
+ */
+static struct clk_hw *_sci_clk_build(struct sci_clk_provider *provider,
+ u16 dev_id, u8 clk_id)
+{
+ struct clk_init_data init = { NULL };
+ struct sci_clk *sci_clk = NULL;
+ char *name = NULL;
+ char **parent_names = NULL;
+ int i;
+ int ret;
+
+ sci_clk = devm_kzalloc(provider->dev, sizeof(*sci_clk), GFP_KERNEL);
+ if (!sci_clk)
+ return ERR_PTR(-ENOMEM);
+
+ sci_clk->dev_id = dev_id;
+ sci_clk->clk_id = clk_id;
+ sci_clk->provider = provider;
+
+ ret = provider->ops->get_num_parents(provider->sci, dev_id,
+ clk_id,
+ &init.num_parents);
+ if (ret)
+ goto err;
+
+ name = kasprintf(GFP_KERNEL, "%s:%d:%d", dev_name(provider->dev),
+ sci_clk->dev_id, sci_clk->clk_id);
+
+ init.name = name;
+
+ /*
+ * From kernel point of view, we only care about a clocks parents,
+ * if it has more than 1 possible parent. In this case, it is going
+ * to have mux functionality. Otherwise it is going to act as a root
+ * clock.
+ */
+ if (init.num_parents < 2)
+ init.num_parents = 0;
+
+ if (init.num_parents) {
+ parent_names = kcalloc(init.num_parents, sizeof(char *),
+ GFP_KERNEL);
+
+ if (!parent_names) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < init.num_parents; i++) {
+ char *parent_name;
+
+ parent_name = kasprintf(GFP_KERNEL, "%s:%d:%d",
+ dev_name(provider->dev),
+ sci_clk->dev_id,
+ sci_clk->clk_id + 1 + i);
+ if (!parent_name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ parent_names[i] = parent_name;
+ }
+ init.parent_names = (void *)parent_names;
+ }
+
+ init.ops = &sci_clk_ops;
+ sci_clk->hw.init = &init;
+
+ ret = devm_clk_hw_register(provider->dev, &sci_clk->hw);
+ if (ret)
+ dev_err(provider->dev, "failed clk register with %d\n", ret);
+
+err:
+ if (parent_names) {
+ for (i = 0; i < init.num_parents; i++)
+ kfree(parent_names[i]);
+
+ kfree(parent_names);
+ }
+
+ kfree(name);
+
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &sci_clk->hw;
+}
+
+/**
+ * sci_clk_get - Xlate function for getting clock handles
+ * @clkspec: device tree clock specifier
+ * @data: pointer to the clock provider
+ *
+ * Xlate function for retrieving clock TI SCI hw clock handles based on
+ * device tree clock specifier. Called from the common clock framework,
+ * when a corresponding of_clk_get call is executed. Returns a pointer
+ * to the TI SCI hw clock struct, or ERR_PTR value in failure.
+ */
+static struct clk_hw *sci_clk_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct sci_clk_provider *provider = data;
+ u16 dev_id;
+ u8 clk_id;
+ const struct sci_clk_data *clks = provider->clk_data;
+ struct clk_hw **clocks = provider->clocks;
+
+ if (clkspec->args_count != 2)
+ return ERR_PTR(-EINVAL);
+
+ dev_id = clkspec->args[0];
+ clk_id = clkspec->args[1];
+
+ while (clks->num_clks) {
+ if (clks->dev == dev_id) {
+ if (clk_id >= clks->num_clks)
+ return ERR_PTR(-EINVAL);
+
+ return clocks[clk_id];
+ }
+
+ clks++;
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+static int ti_sci_init_clocks(struct sci_clk_provider *p)
+{
+ const struct sci_clk_data *data = p->clk_data;
+ struct clk_hw *hw;
+ int i;
+
+ while (data->num_clks) {
+ p->clocks = devm_kcalloc(p->dev, data->num_clks,
+ sizeof(struct sci_clk),
+ GFP_KERNEL);
+ if (!p->clocks)
+ return -ENOMEM;
+
+ for (i = 0; i < data->num_clks; i++) {
+ hw = _sci_clk_build(p, data->dev, i);
+ if (!IS_ERR(hw)) {
+ p->clocks[i] = hw;
+ continue;
+ }
+
+ /* Skip any holes in the clock lists */
+ if (PTR_ERR(hw) == -ENODEV)
+ continue;
+
+ return PTR_ERR(hw);
+ }
+ data++;
+ }
+
+ return 0;
+}
+
+static const struct sci_clk_data k2g_clk_data[] = {
+ /* pmmc */
+ { .dev = 0x0, .num_clks = 4 },
+
+ /* mlb0 */
+ { .dev = 0x1, .num_clks = 5 },
+
+ /* dss0 */
+ { .dev = 0x2, .num_clks = 2 },
+
+ /* mcbsp0 */
+ { .dev = 0x3, .num_clks = 8 },
+
+ /* mcasp0 */
+ { .dev = 0x4, .num_clks = 8 },
+
+ /* mcasp1 */
+ { .dev = 0x5, .num_clks = 8 },
+
+ /* mcasp2 */
+ { .dev = 0x6, .num_clks = 8 },
+
+ /* dcan0 */
+ { .dev = 0x8, .num_clks = 2 },
+
+ /* dcan1 */
+ { .dev = 0x9, .num_clks = 2 },
+
+ /* emif0 */
+ { .dev = 0xa, .num_clks = 6 },
+
+ /* mmchs0 */
+ { .dev = 0xb, .num_clks = 3 },
+
+ /* mmchs1 */
+ { .dev = 0xc, .num_clks = 3 },
+
+ /* gpmc0 */
+ { .dev = 0xd, .num_clks = 1 },
+
+ /* elm0 */
+ { .dev = 0xe, .num_clks = 1 },
+
+ /* spi0 */
+ { .dev = 0x10, .num_clks = 1 },
+
+ /* spi1 */
+ { .dev = 0x11, .num_clks = 1 },
+
+ /* spi2 */
+ { .dev = 0x12, .num_clks = 1 },
+
+ /* spi3 */
+ { .dev = 0x13, .num_clks = 1 },
+
+ /* icss0 */
+ { .dev = 0x14, .num_clks = 6 },
+
+ /* icss1 */
+ { .dev = 0x15, .num_clks = 6 },
+
+ /* usb0 */
+ { .dev = 0x16, .num_clks = 7 },
+
+ /* usb1 */
+ { .dev = 0x17, .num_clks = 7 },
+
+ /* nss0 */
+ { .dev = 0x18, .num_clks = 14 },
+
+ /* pcie0 */
+ { .dev = 0x19, .num_clks = 1 },
+
+ /* gpio0 */
+ { .dev = 0x1b, .num_clks = 1 },
+
+ /* gpio1 */
+ { .dev = 0x1c, .num_clks = 1 },
+
+ /* timer64_0 */
+ { .dev = 0x1d, .num_clks = 9 },
+
+ /* timer64_1 */
+ { .dev = 0x1e, .num_clks = 9 },
+
+ /* timer64_2 */
+ { .dev = 0x1f, .num_clks = 9 },
+
+ /* timer64_3 */
+ { .dev = 0x20, .num_clks = 9 },
+
+ /* timer64_4 */
+ { .dev = 0x21, .num_clks = 9 },
+
+ /* timer64_5 */
+ { .dev = 0x22, .num_clks = 9 },
+
+ /* timer64_6 */
+ { .dev = 0x23, .num_clks = 9 },
+
+ /* msgmgr0 */
+ { .dev = 0x25, .num_clks = 1 },
+
+ /* bootcfg0 */
+ { .dev = 0x26, .num_clks = 1 },
+
+ /* arm_bootrom0 */
+ { .dev = 0x27, .num_clks = 1 },
+
+ /* dsp_bootrom0 */
+ { .dev = 0x29, .num_clks = 1 },
+
+ /* debugss0 */
+ { .dev = 0x2b, .num_clks = 8 },
+
+ /* uart0 */
+ { .dev = 0x2c, .num_clks = 1 },
+
+ /* uart1 */
+ { .dev = 0x2d, .num_clks = 1 },
+
+ /* uart2 */
+ { .dev = 0x2e, .num_clks = 1 },
+
+ /* ehrpwm0 */
+ { .dev = 0x2f, .num_clks = 1 },
+
+ /* ehrpwm1 */
+ { .dev = 0x30, .num_clks = 1 },
+
+ /* ehrpwm2 */
+ { .dev = 0x31, .num_clks = 1 },
+
+ /* ehrpwm3 */
+ { .dev = 0x32, .num_clks = 1 },
+
+ /* ehrpwm4 */
+ { .dev = 0x33, .num_clks = 1 },
+
+ /* ehrpwm5 */
+ { .dev = 0x34, .num_clks = 1 },
+
+ /* eqep0 */
+ { .dev = 0x35, .num_clks = 1 },
+
+ /* eqep1 */
+ { .dev = 0x36, .num_clks = 1 },
+
+ /* eqep2 */
+ { .dev = 0x37, .num_clks = 1 },
+
+ /* ecap0 */
+ { .dev = 0x38, .num_clks = 1 },
+
+ /* ecap1 */
+ { .dev = 0x39, .num_clks = 1 },
+
+ /* i2c0 */
+ { .dev = 0x3a, .num_clks = 1 },
+
+ /* i2c1 */
+ { .dev = 0x3b, .num_clks = 1 },
+
+ /* i2c2 */
+ { .dev = 0x3c, .num_clks = 1 },
+
+ /* edma0 */
+ { .dev = 0x3f, .num_clks = 2 },
+
+ /* semaphore0 */
+ { .dev = 0x40, .num_clks = 1 },
+
+ /* intc0 */
+ { .dev = 0x41, .num_clks = 1 },
+
+ /* gic0 */
+ { .dev = 0x42, .num_clks = 1 },
+
+ /* qspi0 */
+ { .dev = 0x43, .num_clks = 5 },
+
+ /* arm_64b_counter0 */
+ { .dev = 0x44, .num_clks = 2 },
+
+ /* tetris0 */
+ { .dev = 0x45, .num_clks = 2 },
+
+ /* cgem0 */
+ { .dev = 0x46, .num_clks = 2 },
+
+ /* msmc0 */
+ { .dev = 0x47, .num_clks = 1 },
+
+ /* cbass0 */
+ { .dev = 0x49, .num_clks = 1 },
+
+ /* board0 */
+ { .dev = 0x4c, .num_clks = 36 },
+
+ /* edma1 */
+ { .dev = 0x4f, .num_clks = 2 },
+ { .num_clks = 0 },
+};
+
+static const struct of_device_id ti_sci_clk_of_match[] = {
+ { .compatible = "ti,k2g-sci-clk", .data = &k2g_clk_data },
+ { /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ti_sci_clk_of_match);
+
+/**
+ * ti_sci_clk_probe - Probe function for the TI SCI clock driver
+ * @pdev: platform device pointer to be probed
+ *
+ * Probes the TI SCI clock device. Allocates a new clock provider
+ * and registers this to the common clock framework. Also applies
+ * any required flags to the identified clocks via clock lists
+ * supplied from DT. Returns 0 for success, negative error value
+ * for failure.
+ */
+static int ti_sci_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct sci_clk_provider *provider;
+ const struct ti_sci_handle *handle;
+ const struct sci_clk_data *data;
+ int ret;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ handle = devm_ti_sci_get_handle(dev);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ provider = devm_kzalloc(dev, sizeof(*provider), GFP_KERNEL);
+ if (!provider)
+ return -ENOMEM;
+
+ provider->clk_data = data;
+
+ provider->sci = handle;
+ provider->ops = &handle->ops.clk_ops;
+ provider->dev = dev;
+
+ ret = ti_sci_init_clocks(provider);
+ if (ret) {
+ pr_err("ti-sci-init-clocks failed.\n");
+ return ret;
+ }
+
+ return of_clk_add_hw_provider(np, sci_clk_get, provider);
+}
+
+/**
+ * ti_sci_clk_remove - Remove TI SCI clock device
+ * @pdev: platform device pointer for the device to be removed
+ *
+ * Removes the TI SCI device. Unregisters the clock provider registered
+ * via common clock framework. Any memory allocated for the device will
+ * be free'd silently via the devm framework. Returns 0 always.
+ */
+static int ti_sci_clk_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+
+ return 0;
+}
+
+static struct platform_driver ti_sci_clk_driver = {
+ .probe = ti_sci_clk_probe,
+ .remove = ti_sci_clk_remove,
+ .driver = {
+ .name = "ti-sci-clk",
+ .of_match_table = of_match_ptr(ti_sci_clk_of_match),
+ },
+};
+module_platform_driver(ti_sci_clk_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI System Control Interface(SCI) Clock driver");
+MODULE_AUTHOR("Tero Kristo");
+MODULE_ALIAS("platform:ti-sci-clk");
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index 5c3afb86b9ec..2a755b5fb51b 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_COMMON_CLK_MEDIATEK) += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o
+obj-$(CONFIG_COMMON_CLK_MEDIATEK) += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o clk-cpumux.o
obj-$(CONFIG_RESET_CONTROLLER) += reset.o
obj-$(CONFIG_COMMON_CLK_MT6797) += clk-mt6797.o
obj-$(CONFIG_COMMON_CLK_MT6797_IMGSYS) += clk-mt6797-img.o
diff --git a/drivers/clk/mediatek/clk-cpumux.c b/drivers/clk/mediatek/clk-cpumux.c
new file mode 100644
index 000000000000..edd8e6918050
--- /dev/null
+++ b/drivers/clk/mediatek/clk-cpumux.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2015 Linaro Ltd.
+ * Author: Pi-Cheng Chen <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/slab.h>
+
+#include "clk-mtk.h"
+#include "clk-cpumux.h"
+
+static inline struct mtk_clk_cpumux *to_mtk_clk_cpumux(struct clk_hw *_hw)
+{
+ return container_of(_hw, struct mtk_clk_cpumux, hw);
+}
+
+static u8 clk_cpumux_get_parent(struct clk_hw *hw)
+{
+ struct mtk_clk_cpumux *mux = to_mtk_clk_cpumux(hw);
+ int num_parents = clk_hw_get_num_parents(hw);
+ unsigned int val;
+
+ regmap_read(mux->regmap, mux->reg, &val);
+
+ val >>= mux->shift;
+ val &= mux->mask;
+
+ if (val >= num_parents)
+ return -EINVAL;
+
+ return val;
+}
+
+static int clk_cpumux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct mtk_clk_cpumux *mux = to_mtk_clk_cpumux(hw);
+ u32 mask, val;
+
+ val = index << mux->shift;
+ mask = mux->mask << mux->shift;
+
+ return regmap_update_bits(mux->regmap, mux->reg, mask, val);
+}
+
+static const struct clk_ops clk_cpumux_ops = {
+ .get_parent = clk_cpumux_get_parent,
+ .set_parent = clk_cpumux_set_parent,
+};
+
+static struct clk __init *
+mtk_clk_register_cpumux(const struct mtk_composite *mux,
+ struct regmap *regmap)
+{
+ struct mtk_clk_cpumux *cpumux;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ cpumux = kzalloc(sizeof(*cpumux), GFP_KERNEL);
+ if (!cpumux)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = mux->name;
+ init.ops = &clk_cpumux_ops;
+ init.parent_names = mux->parent_names;
+ init.num_parents = mux->num_parents;
+ init.flags = mux->flags;
+
+ cpumux->reg = mux->mux_reg;
+ cpumux->shift = mux->mux_shift;
+ cpumux->mask = BIT(mux->mux_width) - 1;
+ cpumux->regmap = regmap;
+ cpumux->hw.init = &init;
+
+ clk = clk_register(NULL, &cpumux->hw);
+ if (IS_ERR(clk))
+ kfree(cpumux);
+
+ return clk;
+}
+
+int __init mtk_clk_register_cpumuxes(struct device_node *node,
+ const struct mtk_composite *clks, int num,
+ struct clk_onecell_data *clk_data)
+{
+ int i;
+ struct clk *clk;
+ struct regmap *regmap;
+
+ regmap = syscon_node_to_regmap(node);
+ if (IS_ERR(regmap)) {
+ pr_err("Cannot find regmap for %s: %ld\n", node->full_name,
+ PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ for (i = 0; i < num; i++) {
+ const struct mtk_composite *mux = &clks[i];
+
+ clk = mtk_clk_register_cpumux(mux, regmap);
+ if (IS_ERR(clk)) {
+ pr_err("Failed to register clk %s: %ld\n",
+ mux->name, PTR_ERR(clk));
+ continue;
+ }
+
+ clk_data->clks[mux->id] = clk;
+ }
+
+ return 0;
+}
diff --git a/drivers/clk/mediatek/clk-cpumux.h b/drivers/clk/mediatek/clk-cpumux.h
new file mode 100644
index 000000000000..dddaad57454d
--- /dev/null
+++ b/drivers/clk/mediatek/clk-cpumux.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2015 Linaro Ltd.
+ * Author: Pi-Cheng Chen <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DRV_CLK_CPUMUX_H
+#define __DRV_CLK_CPUMUX_H
+
+struct mtk_clk_cpumux {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ u32 reg;
+ u32 mask;
+ u8 shift;
+};
+
+int mtk_clk_register_cpumuxes(struct device_node *node,
+ const struct mtk_composite *clks, int num,
+ struct clk_onecell_data *clk_data);
+
+#endif /* __DRV_CLK_CPUMUX_H */
diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
index 6f26e6a37a6b..9598889f972b 100644
--- a/drivers/clk/mediatek/clk-mt2701.c
+++ b/drivers/clk/mediatek/clk-mt2701.c
@@ -20,6 +20,7 @@
#include "clk-mtk.h"
#include "clk-gate.h"
+#include "clk-cpumux.h"
#include <dt-bindings/clock/mt2701-clk.h>
@@ -493,6 +494,10 @@ static const char * const cpu_parents[] = {
"mmpll"
};
+static const struct mtk_composite cpu_muxes[] __initconst = {
+ MUX(CLK_INFRA_CPUSEL, "infra_cpu_sel", cpu_parents, 0x0000, 2, 2),
+};
+
static const struct mtk_composite top_muxes[] = {
MUX_GATE_FLAGS(CLK_TOP_AXI_SEL, "axi_sel", axi_parents,
0x0040, 0, 3, 7, CLK_IS_CRITICAL),
@@ -759,6 +764,9 @@ static void mtk_infrasys_init_early(struct device_node *node)
mtk_clk_register_factors(infra_fixed_divs, ARRAY_SIZE(infra_fixed_divs),
infra_clk_data);
+ mtk_clk_register_cpumuxes(node, cpu_muxes, ARRAY_SIZE(cpu_muxes),
+ infra_clk_data);
+
r = of_clk_add_provider(node, of_clk_src_onecell_get, infra_clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c
index 0ac3aee87726..96c292c3e440 100644
--- a/drivers/clk/mediatek/clk-mt8173.c
+++ b/drivers/clk/mediatek/clk-mt8173.c
@@ -18,6 +18,7 @@
#include "clk-mtk.h"
#include "clk-gate.h"
+#include "clk-cpumux.h"
#include <dt-bindings/clock/mt8173-clk.h>
@@ -525,6 +526,25 @@ static const char * const i2s3_b_ck_parents[] __initconst = {
"apll2_div5"
};
+static const char * const ca53_parents[] __initconst = {
+ "clk26m",
+ "armca7pll",
+ "mainpll",
+ "univpll"
+};
+
+static const char * const ca57_parents[] __initconst = {
+ "clk26m",
+ "armca15pll",
+ "mainpll",
+ "univpll"
+};
+
+static const struct mtk_composite cpu_muxes[] __initconst = {
+ MUX(CLK_INFRA_CA53SEL, "infra_ca53_sel", ca53_parents, 0x0000, 0, 2),
+ MUX(CLK_INFRA_CA57SEL, "infra_ca57_sel", ca57_parents, 0x0000, 2, 2),
+};
+
static const struct mtk_composite top_muxes[] __initconst = {
/* CLK_CFG_0 */
MUX(CLK_TOP_AXI_SEL, "axi_sel", axi_parents, 0x0040, 0, 3),
@@ -948,6 +968,9 @@ static void __init mtk_infrasys_init(struct device_node *node)
clk_data);
mtk_clk_register_factors(infra_divs, ARRAY_SIZE(infra_divs), clk_data);
+ mtk_clk_register_cpumuxes(node, cpu_muxes, ARRAY_SIZE(cpu_muxes),
+ clk_data);
+
r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index 2f29ee1a4d00..5588f75a8414 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -7,9 +7,9 @@ config COMMON_CLK_MESON8B
bool
depends on COMMON_CLK_AMLOGIC
help
- Support for the clock controller on AmLogic S805 devices, aka
- meson8b. Say Y if you want peripherals and CPU frequency scaling to
- work.
+ Support for the clock controller on AmLogic S802 (Meson8),
+ S805 (Meson8b) and S812 (Meson8m2) devices. Say Y if you
+ want peripherals and CPU frequency scaling to work.
config COMMON_CLK_GXBB
bool
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index ad5f027af1a2..a897ea45327c 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -278,20 +278,6 @@ static const struct pll_rate_table gxl_gp0_pll_rate_table[] = {
{ /* sentinel */ },
};
-static const struct clk_div_table cpu_div_table[] = {
- { .val = 1, .div = 1 },
- { .val = 2, .div = 2 },
- { .val = 3, .div = 3 },
- { .val = 2, .div = 4 },
- { .val = 3, .div = 6 },
- { .val = 4, .div = 8 },
- { .val = 5, .div = 10 },
- { .val = 6, .div = 12 },
- { .val = 7, .div = 14 },
- { .val = 8, .div = 16 },
- { /* sentinel */ },
-};
-
static struct meson_clk_pll gxbb_fixed_pll = {
.m = {
.reg_off = HHI_MPLL_CNTL,
@@ -612,23 +598,16 @@ static struct meson_clk_mpll gxbb_mpll2 = {
};
/*
- * FIXME cpu clocks and the legacy composite clocks (e.g. clk81) are both PLL
- * post-dividers and should be modeled with their respective PLLs via the
- * forthcoming coordinated clock rates feature
+ * FIXME The legacy composite clocks (e.g. clk81) are both PLL post-dividers
+ * and should be modeled with their respective PLLs via the forthcoming
+ * coordinated clock rates feature
*/
-static struct meson_clk_cpu gxbb_cpu_clk = {
- .reg_off = HHI_SYS_CPU_CLK_CNTL1,
- .div_table = cpu_div_table,
- .clk_nb.notifier_call = meson_clk_cpu_notifier_cb,
- .hw.init = &(struct clk_init_data){
- .name = "cpu_clk",
- .ops = &meson_clk_cpu_ops,
- .parent_names = (const char *[]){ "sys_pll" },
- .num_parents = 1,
- },
-};
-static u32 mux_table_clk81[] = { 6, 5, 7 };
+static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
+static const char * const clk81_parent_names[] = {
+ "xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4",
+ "fclk_div3", "fclk_div5"
+};
static struct clk_mux gxbb_mpeg_clk_sel = {
.reg = (void *)HHI_MPEG_CLK_CNTL,
@@ -641,13 +620,12 @@ static struct clk_mux gxbb_mpeg_clk_sel = {
.name = "mpeg_clk_sel",
.ops = &clk_mux_ro_ops,
/*
- * FIXME bits 14:12 selects from 8 possible parents:
+ * bits 14:12 selects from 8 possible parents:
* xtal, 1'b0 (wtf), fclk_div7, mpll_clkout1, mpll_clkout2,
* fclk_div4, fclk_div3, fclk_div5
*/
- .parent_names = (const char *[]){ "fclk_div3", "fclk_div4",
- "fclk_div5" },
- .num_parents = 3,
+ .parent_names = clk81_parent_names,
+ .num_parents = ARRAY_SIZE(clk81_parent_names),
.flags = (CLK_SET_RATE_NO_REPARENT | CLK_IGNORE_UNUSED),
},
};
@@ -676,7 +654,7 @@ static struct clk_gate gxbb_clk81 = {
.ops = &clk_gate_ops,
.parent_names = (const char *[]){ "mpeg_clk_div" },
.num_parents = 1,
- .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED | CLK_IS_CRITICAL),
+ .flags = (CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
},
};
@@ -726,7 +704,7 @@ static struct clk_gate gxbb_sar_adc_clk = {
*/
static u32 mux_table_mali_0_1[] = {0, 1, 2, 3, 4, 5, 6, 7};
-static const char *gxbb_mali_0_1_parent_names[] = {
+static const char * const gxbb_mali_0_1_parent_names[] = {
"xtal", "gp0_pll", "mpll2", "mpll1", "fclk_div7",
"fclk_div4", "fclk_div3", "fclk_div5"
};
@@ -826,7 +804,7 @@ static struct clk_gate gxbb_mali_1 = {
};
static u32 mux_table_mali[] = {0, 1};
-static const char *gxbb_mali_parent_names[] = {
+static const char * const gxbb_mali_parent_names[] = {
"mali_0", "mali_1"
};
@@ -951,6 +929,51 @@ static struct clk_mux gxbb_cts_i958 = {
},
};
+static struct clk_divider gxbb_32k_clk_div = {
+ .reg = (void *)HHI_32K_CLK_CNTL,
+ .shift = 0,
+ .width = 14,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "32k_clk_div",
+ .ops = &clk_divider_ops,
+ .parent_names = (const char *[]){ "32k_clk_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_DIVIDER_ROUND_CLOSEST,
+ },
+};
+
+static struct clk_gate gxbb_32k_clk = {
+ .reg = (void *)HHI_32K_CLK_CNTL,
+ .bit_idx = 15,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "32k_clk",
+ .ops = &clk_gate_ops,
+ .parent_names = (const char *[]){ "32k_clk_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const char * const gxbb_32k_clk_parent_names[] = {
+ "xtal", "cts_slow_oscin", "fclk_div3", "fclk_div5"
+};
+
+static struct clk_mux gxbb_32k_clk_sel = {
+ .reg = (void *)HHI_32K_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 16,
+ .lock = &clk_lock,
+ .hw.init = &(struct clk_init_data){
+ .name = "32k_clk_sel",
+ .ops = &clk_mux_ops,
+ .parent_names = gxbb_32k_clk_parent_names,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
/* Everything Else (EE) domain gates */
static MESON_GATE(gxbb_ddr, HHI_GCLK_MPEG0, 0);
static MESON_GATE(gxbb_dos, HHI_GCLK_MPEG0, 1);
@@ -1045,7 +1068,6 @@ static MESON_GATE(gxbb_ao_i2c, HHI_GCLK_AO, 4);
static struct clk_hw_onecell_data gxbb_hw_onecell_data = {
.hws = {
[CLKID_SYS_PLL] = &gxbb_sys_pll.hw,
- [CLKID_CPUCLK] = &gxbb_cpu_clk.hw,
[CLKID_HDMI_PLL] = &gxbb_hdmi_pll.hw,
[CLKID_FIXED_PLL] = &gxbb_fixed_pll.hw,
[CLKID_FCLK_DIV2] = &gxbb_fclk_div2.hw,
@@ -1158,6 +1180,9 @@ static struct clk_hw_onecell_data gxbb_hw_onecell_data = {
[CLKID_CTS_MCLK_I958_SEL] = &gxbb_cts_mclk_i958_sel.hw,
[CLKID_CTS_MCLK_I958_DIV] = &gxbb_cts_mclk_i958_div.hw,
[CLKID_CTS_I958] = &gxbb_cts_i958.hw,
+ [CLKID_32K_CLK] = &gxbb_32k_clk.hw,
+ [CLKID_32K_CLK_SEL] = &gxbb_32k_clk_sel.hw,
+ [CLKID_32K_CLK_DIV] = &gxbb_32k_clk_div.hw,
},
.num = NR_CLKS,
};
@@ -1165,7 +1190,6 @@ static struct clk_hw_onecell_data gxbb_hw_onecell_data = {
static struct clk_hw_onecell_data gxl_hw_onecell_data = {
.hws = {
[CLKID_SYS_PLL] = &gxbb_sys_pll.hw,
- [CLKID_CPUCLK] = &gxbb_cpu_clk.hw,
[CLKID_HDMI_PLL] = &gxbb_hdmi_pll.hw,
[CLKID_FIXED_PLL] = &gxbb_fixed_pll.hw,
[CLKID_FCLK_DIV2] = &gxbb_fclk_div2.hw,
@@ -1278,6 +1302,9 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = {
[CLKID_CTS_MCLK_I958_SEL] = &gxbb_cts_mclk_i958_sel.hw,
[CLKID_CTS_MCLK_I958_DIV] = &gxbb_cts_mclk_i958_div.hw,
[CLKID_CTS_I958] = &gxbb_cts_i958.hw,
+ [CLKID_32K_CLK] = &gxbb_32k_clk.hw,
+ [CLKID_32K_CLK_SEL] = &gxbb_32k_clk_sel.hw,
+ [CLKID_32K_CLK_DIV] = &gxbb_32k_clk_div.hw,
},
.num = NR_CLKS,
};
@@ -1392,6 +1419,7 @@ static struct clk_gate *const gxbb_clk_gates[] = {
&gxbb_mali_1,
&gxbb_cts_amclk,
&gxbb_cts_mclk_i958,
+ &gxbb_32k_clk,
};
static struct clk_mux *const gxbb_clk_muxes[] = {
@@ -1403,6 +1431,7 @@ static struct clk_mux *const gxbb_clk_muxes[] = {
&gxbb_cts_amclk_sel,
&gxbb_cts_mclk_i958_sel,
&gxbb_cts_i958,
+ &gxbb_32k_clk_sel,
};
static struct clk_divider *const gxbb_clk_dividers[] = {
@@ -1411,6 +1440,7 @@ static struct clk_divider *const gxbb_clk_dividers[] = {
&gxbb_mali_0_div,
&gxbb_mali_1_div,
&gxbb_cts_mclk_i958_div,
+ &gxbb_32k_clk_div,
};
static struct meson_clk_audio_divider *const gxbb_audio_dividers[] = {
@@ -1430,7 +1460,6 @@ struct clkc_data {
unsigned int clk_dividers_count;
struct meson_clk_audio_divider *const *clk_audio_dividers;
unsigned int clk_audio_dividers_count;
- struct meson_clk_cpu *cpu_clk;
struct clk_hw_onecell_data *hw_onecell_data;
};
@@ -1447,7 +1476,6 @@ static const struct clkc_data gxbb_clkc_data = {
.clk_dividers_count = ARRAY_SIZE(gxbb_clk_dividers),
.clk_audio_dividers = gxbb_audio_dividers,
.clk_audio_dividers_count = ARRAY_SIZE(gxbb_audio_dividers),
- .cpu_clk = &gxbb_cpu_clk,
.hw_onecell_data = &gxbb_hw_onecell_data,
};
@@ -1464,7 +1492,6 @@ static const struct clkc_data gxl_clkc_data = {
.clk_dividers_count = ARRAY_SIZE(gxbb_clk_dividers),
.clk_audio_dividers = gxbb_audio_dividers,
.clk_audio_dividers_count = ARRAY_SIZE(gxbb_audio_dividers),
- .cpu_clk = &gxbb_cpu_clk,
.hw_onecell_data = &gxl_hw_onecell_data,
};
@@ -1479,8 +1506,6 @@ static int gxbb_clkc_probe(struct platform_device *pdev)
const struct clkc_data *clkc_data;
void __iomem *clk_base;
int ret, clkid, i;
- struct clk_hw *parent_hw;
- struct clk *parent_clk;
struct device *dev = &pdev->dev;
clkc_data = of_device_get_match_data(&pdev->dev);
@@ -1502,9 +1527,6 @@ static int gxbb_clkc_probe(struct platform_device *pdev)
for (i = 0; i < clkc_data->clk_mplls_count; i++)
clkc_data->clk_mplls[i]->base = clk_base;
- /* Populate the base address for CPU clk */
- clkc_data->cpu_clk->base = clk_base;
-
/* Populate base address for gates */
for (i = 0; i < clkc_data->clk_gates_count; i++)
clkc_data->clk_gates[i]->reg = clk_base +
@@ -1538,29 +1560,6 @@ static int gxbb_clkc_probe(struct platform_device *pdev)
goto iounmap;
}
- /*
- * Register CPU clk notifier
- *
- * FIXME this is wrong for a lot of reasons. First, the muxes should be
- * struct clk_hw objects. Second, we shouldn't program the muxes in
- * notifier handlers. The tricky programming sequence will be handled
- * by the forthcoming coordinated clock rates mechanism once that
- * feature is released.
- *
- * Furthermore, looking up the parent this way is terrible. At some
- * point we will stop allocating a default struct clk when registering
- * a new clk_hw, and this hack will no longer work. Releasing the ccr
- * feature before that time solves the problem :-)
- */
- parent_hw = clk_hw_get_parent(&clkc_data->cpu_clk->hw);
- parent_clk = parent_hw->clk;
- ret = clk_notifier_register(parent_clk, &clkc_data->cpu_clk->clk_nb);
- if (ret) {
- pr_err("%s: failed to register clock notifier for cpu_clk\n",
- __func__);
- goto iounmap;
- }
-
return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
clkc_data->hw_onecell_data);
diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h
index 4d04f4a3cd59..d63e77e8433d 100644
--- a/drivers/clk/meson/gxbb.h
+++ b/drivers/clk/meson/gxbb.h
@@ -171,7 +171,7 @@
* to be exposed to client nodes in DT: include/dt-bindings/clock/gxbb-clkc.h
*/
#define CLKID_SYS_PLL 0
-#define CLKID_CPUCLK 1
+/* ID 1 is unused (it was used by the non-existing CLKID_CPUCLK before) */
/* CLKID_HDMI_PLL */
#define CLKID_FIXED_PLL 3
/* CLKID_FCLK_DIV2 */
@@ -284,8 +284,11 @@
#define CLKID_CTS_MCLK_I958_SEL 111
#define CLKID_CTS_MCLK_I958_DIV 112
/* CLKID_CTS_I958 */
+#define CLKID_32K_CLK 114
+#define CLKID_32K_CLK_SEL 115
+#define CLKID_32K_CLK_DIV 116
-#define NR_CLKS 114
+#define NR_CLKS 117
/* include the CLKIDs that have been made part of the stable DT binding */
#include <dt-bindings/clock/gxbb-clkc.h>
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index e9985503165c..bb3f1de876b1 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -1,5 +1,6 @@
/*
- * AmLogic S805 / Meson8b Clock Controller Driver
+ * AmLogic S802 (Meson8) / S805 (Meson8b) / S812 (Meson8m2) Clock Controller
+ * Driver
*
* Copyright (c) 2015 Endless Mobile, Inc.
* Author: Carlo Caione <[email protected]>
@@ -399,7 +400,7 @@ struct clk_gate meson8b_clk81 = {
.ops = &clk_gate_ops,
.parent_names = (const char *[]){ "mpeg_clk_div" },
.num_parents = 1,
- .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED),
+ .flags = (CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
},
};
@@ -777,7 +778,9 @@ iounmap:
}
static const struct of_device_id meson8b_clkc_match_table[] = {
+ { .compatible = "amlogic,meson8-clkc" },
{ .compatible = "amlogic,meson8b-clkc" },
+ { .compatible = "amlogic,meson8m2-clkc" },
{ }
};
diff --git a/drivers/clk/mvebu/ap806-system-controller.c b/drivers/clk/mvebu/ap806-system-controller.c
index 8155baccc98e..fa2fbd2cef4a 100644
--- a/drivers/clk/mvebu/ap806-system-controller.c
+++ b/drivers/clk/mvebu/ap806-system-controller.c
@@ -32,24 +32,38 @@ static struct clk_onecell_data ap806_clk_data = {
.clk_num = AP806_CLK_NUM,
};
-static int ap806_syscon_clk_probe(struct platform_device *pdev)
+static char *ap806_unique_name(struct device *dev, struct device_node *np,
+ char *name)
+{
+ const __be32 *reg;
+ u64 addr;
+
+ reg = of_get_property(np, "reg", NULL);
+ addr = of_translate_address(np, reg);
+ return devm_kasprintf(dev, GFP_KERNEL, "%llx-%s",
+ (unsigned long long)addr, name);
+}
+
+static int ap806_syscon_common_probe(struct platform_device *pdev,
+ struct device_node *syscon_node)
{
unsigned int freq_mode, cpuclk_freq;
const char *name, *fixedclk_name;
- struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
struct regmap *regmap;
u32 reg;
int ret;
- regmap = syscon_node_to_regmap(np);
+ regmap = syscon_node_to_regmap(syscon_node);
if (IS_ERR(regmap)) {
- dev_err(&pdev->dev, "cannot get regmap\n");
+ dev_err(dev, "cannot get regmap\n");
return PTR_ERR(regmap);
}
ret = regmap_read(regmap, AP806_SAR_REG, &reg);
if (ret) {
- dev_err(&pdev->dev, "cannot read from regmap\n");
+ dev_err(dev, "cannot read from regmap\n");
return ret;
}
@@ -89,7 +103,7 @@ static int ap806_syscon_clk_probe(struct platform_device *pdev)
cpuclk_freq = 600;
break;
default:
- dev_err(&pdev->dev, "invalid SAR value\n");
+ dev_err(dev, "invalid SAR value\n");
return -EINVAL;
}
@@ -97,18 +111,16 @@ static int ap806_syscon_clk_probe(struct platform_device *pdev)
cpuclk_freq *= 1000 * 1000;
/* CPU clocks depend on the Sample At Reset configuration */
- of_property_read_string_index(np, "clock-output-names",
- 0, &name);
- ap806_clks[0] = clk_register_fixed_rate(&pdev->dev, name, NULL,
+ name = ap806_unique_name(dev, syscon_node, "cpu-cluster-0");
+ ap806_clks[0] = clk_register_fixed_rate(dev, name, NULL,
0, cpuclk_freq);
if (IS_ERR(ap806_clks[0])) {
ret = PTR_ERR(ap806_clks[0]);
goto fail0;
}
- of_property_read_string_index(np, "clock-output-names",
- 1, &name);
- ap806_clks[1] = clk_register_fixed_rate(&pdev->dev, name, NULL, 0,
+ name = ap806_unique_name(dev, syscon_node, "cpu-cluster-1");
+ ap806_clks[1] = clk_register_fixed_rate(dev, name, NULL, 0,
cpuclk_freq);
if (IS_ERR(ap806_clks[1])) {
ret = PTR_ERR(ap806_clks[1]);
@@ -116,9 +128,8 @@ static int ap806_syscon_clk_probe(struct platform_device *pdev)
}
/* Fixed clock is always 1200 Mhz */
- of_property_read_string_index(np, "clock-output-names",
- 2, &fixedclk_name);
- ap806_clks[2] = clk_register_fixed_rate(&pdev->dev, fixedclk_name, NULL,
+ fixedclk_name = ap806_unique_name(dev, syscon_node, "fixed");
+ ap806_clks[2] = clk_register_fixed_rate(dev, fixedclk_name, NULL,
0, 1200 * 1000 * 1000);
if (IS_ERR(ap806_clks[2])) {
ret = PTR_ERR(ap806_clks[2]);
@@ -126,8 +137,7 @@ static int ap806_syscon_clk_probe(struct platform_device *pdev)
}
/* MSS Clock is fixed clock divided by 6 */
- of_property_read_string_index(np, "clock-output-names",
- 3, &name);
+ name = ap806_unique_name(dev, syscon_node, "mss");
ap806_clks[3] = clk_register_fixed_factor(NULL, name, fixedclk_name,
0, 1, 6);
if (IS_ERR(ap806_clks[3])) {
@@ -135,20 +145,14 @@ static int ap806_syscon_clk_probe(struct platform_device *pdev)
goto fail3;
}
- /* eMMC Clock is fixed clock divided by 3 */
- if (of_property_read_string_index(np, "clock-output-names",
- 4, &name)) {
- ap806_clk_data.clk_num--;
- dev_warn(&pdev->dev,
- "eMMC clock missing: update the device tree!\n");
- } else {
- ap806_clks[4] = clk_register_fixed_factor(NULL, name,
- fixedclk_name,
- 0, 1, 3);
- if (IS_ERR(ap806_clks[4])) {
- ret = PTR_ERR(ap806_clks[4]);
- goto fail4;
- }
+ /* SDIO(/eMMC) Clock is fixed clock divided by 3 */
+ name = ap806_unique_name(dev, syscon_node, "sdio");
+ ap806_clks[4] = clk_register_fixed_factor(NULL, name,
+ fixedclk_name,
+ 0, 1, 3);
+ if (IS_ERR(ap806_clks[4])) {
+ ret = PTR_ERR(ap806_clks[4]);
+ goto fail4;
}
of_clk_add_provider(np, of_clk_src_onecell_get, &ap806_clk_data);
@@ -172,17 +176,48 @@ fail0:
return ret;
}
-static const struct of_device_id ap806_syscon_of_match[] = {
+static int ap806_syscon_legacy_probe(struct platform_device *pdev)
+{
+ dev_warn(&pdev->dev, FW_WARN "Using legacy device tree binding\n");
+ dev_warn(&pdev->dev, FW_WARN "Update your device tree:\n");
+ dev_warn(&pdev->dev, FW_WARN
+ "This binding won't be supported in future kernel\n");
+
+ return ap806_syscon_common_probe(pdev, pdev->dev.of_node);
+
+}
+
+static int ap806_clock_probe(struct platform_device *pdev)
+{
+ return ap806_syscon_common_probe(pdev, pdev->dev.of_node->parent);
+}
+
+static const struct of_device_id ap806_syscon_legacy_of_match[] = {
{ .compatible = "marvell,ap806-system-controller", },
{ }
};
-static struct platform_driver ap806_syscon_driver = {
- .probe = ap806_syscon_clk_probe,
+static struct platform_driver ap806_syscon_legacy_driver = {
+ .probe = ap806_syscon_legacy_probe,
.driver = {
.name = "marvell-ap806-system-controller",
- .of_match_table = ap806_syscon_of_match,
+ .of_match_table = ap806_syscon_legacy_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(ap806_syscon_legacy_driver);
+
+static const struct of_device_id ap806_clock_of_match[] = {
+ { .compatible = "marvell,ap806-clock", },
+ { }
+};
+
+static struct platform_driver ap806_clock_driver = {
+ .probe = ap806_clock_probe,
+ .driver = {
+ .name = "marvell-ap806-clock",
+ .of_match_table = ap806_clock_of_match,
.suppress_bind_attrs = true,
},
};
-builtin_platform_driver(ap806_syscon_driver);
+builtin_platform_driver(ap806_clock_driver);
diff --git a/drivers/clk/mvebu/armada-38x.c b/drivers/clk/mvebu/armada-38x.c
index 8bccf4ecdab6..394aa6f03f01 100644
--- a/drivers/clk/mvebu/armada-38x.c
+++ b/drivers/clk/mvebu/armada-38x.c
@@ -49,7 +49,8 @@ static const u32 armada_38x_cpu_frequencies[] __initconst = {
0, 0, 0, 0,
1066 * 1000 * 1000, 0, 0, 0,
1332 * 1000 * 1000, 0, 0, 0,
- 1600 * 1000 * 1000,
+ 1600 * 1000 * 1000, 0, 0, 0,
+ 1866 * 1000 * 1000,
};
static u32 __init armada_38x_get_cpu_freq(void __iomem *sar)
@@ -79,7 +80,7 @@ static const int armada_38x_cpu_l2_ratios[32][2] __initconst = {
{1, 2}, {0, 1}, {0, 1}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
- {0, 1}, {0, 1}, {0, 1}, {0, 1},
+ {1, 2}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
@@ -90,7 +91,7 @@ static const int armada_38x_cpu_ddr_ratios[32][2] __initconst = {
{1, 2}, {0, 1}, {0, 1}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
{1, 2}, {0, 1}, {0, 1}, {0, 1},
- {0, 1}, {0, 1}, {0, 1}, {0, 1},
+ {1, 2}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
{0, 1}, {0, 1}, {0, 1}, {0, 1},
diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c
index 6b11d7b3e0e0..ca9a0a536174 100644
--- a/drivers/clk/mvebu/cp110-system-controller.c
+++ b/drivers/clk/mvebu/cp110-system-controller.c
@@ -11,15 +11,16 @@
*/
/*
- * CP110 has 5 core clocks:
+ * CP110 has 6 core clocks:
*
* - APLL (1 Ghz)
* - PPv2 core (1/3 APLL)
* - EIP (1/2 APLL)
- * - Core (1/2 EIP)
+ * - Core (1/2 EIP)
+ * - SDIO (2/5 APLL)
*
* - NAND clock, which is either:
- * - Equal to the core clock
+ * - Equal to SDIO clock
* - 2/5 APLL
*
* CP110 has 32 gatable clocks, for the various peripherals in the
@@ -46,7 +47,7 @@ enum {
CP110_CLK_TYPE_GATABLE,
};
-#define CP110_MAX_CORE_CLOCKS 5
+#define CP110_MAX_CORE_CLOCKS 6
#define CP110_MAX_GATABLE_CLOCKS 32
#define CP110_CLK_NUM \
@@ -57,6 +58,7 @@ enum {
#define CP110_CORE_EIP 2
#define CP110_CORE_CORE 3
#define CP110_CORE_NAND 4
+#define CP110_CORE_SDIO 5
/* A number of gatable clocks need special handling */
#define CP110_GATE_AUDIO 0
@@ -84,6 +86,33 @@ enum {
#define CP110_GATE_EIP150 25
#define CP110_GATE_EIP197 26
+static const char * const gate_base_names[] = {
+ [CP110_GATE_AUDIO] = "audio",
+ [CP110_GATE_COMM_UNIT] = "communit",
+ [CP110_GATE_NAND] = "nand",
+ [CP110_GATE_PPV2] = "ppv2",
+ [CP110_GATE_SDIO] = "sdio",
+ [CP110_GATE_MG] = "mg-domain",
+ [CP110_GATE_MG_CORE] = "mg-core",
+ [CP110_GATE_XOR1] = "xor1",
+ [CP110_GATE_XOR0] = "xor0",
+ [CP110_GATE_GOP_DP] = "gop-dp",
+ [CP110_GATE_PCIE_X1_0] = "pcie_x10",
+ [CP110_GATE_PCIE_X1_1] = "pcie_x11",
+ [CP110_GATE_PCIE_X4] = "pcie_x4",
+ [CP110_GATE_PCIE_XOR] = "pcie-xor",
+ [CP110_GATE_SATA] = "sata",
+ [CP110_GATE_SATA_USB] = "sata-usb",
+ [CP110_GATE_MAIN] = "main",
+ [CP110_GATE_SDMMC_GOP] = "sd-mmc-gop",
+ [CP110_GATE_SLOW_IO] = "slow-io",
+ [CP110_GATE_USB3H0] = "usb3h0",
+ [CP110_GATE_USB3H1] = "usb3h1",
+ [CP110_GATE_USB3DEV] = "usb3dev",
+ [CP110_GATE_EIP150] = "eip150",
+ [CP110_GATE_EIP197] = "eip197"
+};
+
struct cp110_gate_clk {
struct clk_hw hw;
struct regmap *regmap;
@@ -186,17 +215,37 @@ static struct clk_hw *cp110_of_clk_get(struct of_phandle_args *clkspec,
return ERR_PTR(-EINVAL);
}
-static int cp110_syscon_clk_probe(struct platform_device *pdev)
+static char *cp110_unique_name(struct device *dev, struct device_node *np,
+ const char *name)
+{
+ const __be32 *reg;
+ u64 addr;
+
+ /* Do not create a name if there is no clock */
+ if (!name)
+ return NULL;
+
+ reg = of_get_property(np, "reg", NULL);
+ addr = of_translate_address(np, reg);
+ return devm_kasprintf(dev, GFP_KERNEL, "%llx-%s",
+ (unsigned long long)addr, name);
+}
+
+static int cp110_syscon_common_probe(struct platform_device *pdev,
+ struct device_node *syscon_node)
{
struct regmap *regmap;
- struct device_node *np = pdev->dev.of_node;
- const char *ppv2_name, *apll_name, *core_name, *eip_name, *nand_name;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const char *ppv2_name, *apll_name, *core_name, *eip_name, *nand_name,
+ *sdio_name;
struct clk_hw_onecell_data *cp110_clk_data;
struct clk_hw *hw, **cp110_clks;
u32 nand_clk_ctrl;
int i, ret;
+ char *gate_name[ARRAY_SIZE(gate_base_names)];
- regmap = syscon_node_to_regmap(np);
+ regmap = syscon_node_to_regmap(syscon_node);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
@@ -205,7 +254,7 @@ static int cp110_syscon_clk_probe(struct platform_device *pdev)
if (ret)
return ret;
- cp110_clk_data = devm_kzalloc(&pdev->dev, sizeof(*cp110_clk_data) +
+ cp110_clk_data = devm_kzalloc(dev, sizeof(*cp110_clk_data) +
sizeof(struct clk_hw *) * CP110_CLK_NUM,
GFP_KERNEL);
if (!cp110_clk_data)
@@ -215,53 +264,47 @@ static int cp110_syscon_clk_probe(struct platform_device *pdev)
cp110_clk_data->num = CP110_CLK_NUM;
/* Register the APLL which is the root of the hw tree */
- of_property_read_string_index(np, "core-clock-output-names",
- CP110_CORE_APLL, &apll_name);
+ apll_name = cp110_unique_name(dev, syscon_node, "apll");
hw = clk_hw_register_fixed_rate(NULL, apll_name, NULL, 0,
1000 * 1000 * 1000);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
- goto fail0;
+ goto fail_apll;
}
cp110_clks[CP110_CORE_APLL] = hw;
/* PPv2 is APLL/3 */
- of_property_read_string_index(np, "core-clock-output-names",
- CP110_CORE_PPV2, &ppv2_name);
+ ppv2_name = cp110_unique_name(dev, syscon_node, "ppv2-core");
hw = clk_hw_register_fixed_factor(NULL, ppv2_name, apll_name, 0, 1, 3);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
- goto fail1;
+ goto fail_ppv2;
}
cp110_clks[CP110_CORE_PPV2] = hw;
/* EIP clock is APLL/2 */
- of_property_read_string_index(np, "core-clock-output-names",
- CP110_CORE_EIP, &eip_name);
+ eip_name = cp110_unique_name(dev, syscon_node, "eip");
hw = clk_hw_register_fixed_factor(NULL, eip_name, apll_name, 0, 1, 2);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
- goto fail2;
+ goto fail_eip;
}
cp110_clks[CP110_CORE_EIP] = hw;
/* Core clock is EIP/2 */
- of_property_read_string_index(np, "core-clock-output-names",
- CP110_CORE_CORE, &core_name);
+ core_name = cp110_unique_name(dev, syscon_node, "core");
hw = clk_hw_register_fixed_factor(NULL, core_name, eip_name, 0, 1, 2);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
- goto fail3;
+ goto fail_core;
}
cp110_clks[CP110_CORE_CORE] = hw;
-
/* NAND can be either APLL/2.5 or core clock */
- of_property_read_string_index(np, "core-clock-output-names",
- CP110_CORE_NAND, &nand_name);
+ nand_name = cp110_unique_name(dev, syscon_node, "nand-core");
if (nand_clk_ctrl & NF_CLOCK_SEL_400_MASK)
hw = clk_hw_register_fixed_factor(NULL, nand_name,
apll_name, 0, 2, 5);
@@ -270,23 +313,31 @@ static int cp110_syscon_clk_probe(struct platform_device *pdev)
core_name, 0, 1, 1);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
- goto fail4;
+ goto fail_nand;
}
cp110_clks[CP110_CORE_NAND] = hw;
- for (i = 0; i < CP110_MAX_GATABLE_CLOCKS; i++) {
- const char *parent, *name;
- int ret;
-
- ret = of_property_read_string_index(np,
- "gate-clock-output-names",
- i, &name);
- /* Reached the end of the list? */
- if (ret < 0)
- break;
+ /* SDIO clock is APLL/2.5 */
+ sdio_name = cp110_unique_name(dev, syscon_node, "sdio-core");
+ hw = clk_hw_register_fixed_factor(NULL, sdio_name,
+ apll_name, 0, 2, 5);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto fail_sdio;
+ }
+
+ cp110_clks[CP110_CORE_SDIO] = hw;
+
+ /* create the unique name for all the gate clocks */
+ for (i = 0; i < ARRAY_SIZE(gate_base_names); i++)
+ gate_name[i] = cp110_unique_name(dev, syscon_node,
+ gate_base_names[i]);
+
+ for (i = 0; i < ARRAY_SIZE(gate_base_names); i++) {
+ const char *parent;
- if (!strcmp(name, "none"))
+ if (gate_name[i] == NULL)
continue;
switch (i) {
@@ -295,14 +346,10 @@ static int cp110_syscon_clk_probe(struct platform_device *pdev)
case CP110_GATE_EIP150:
case CP110_GATE_EIP197:
case CP110_GATE_SLOW_IO:
- of_property_read_string_index(np,
- "gate-clock-output-names",
- CP110_GATE_MAIN, &parent);
+ parent = gate_name[CP110_GATE_MAIN];
break;
case CP110_GATE_MG:
- of_property_read_string_index(np,
- "gate-clock-output-names",
- CP110_GATE_MG_CORE, &parent);
+ parent = gate_name[CP110_GATE_MG_CORE];
break;
case CP110_GATE_NAND:
parent = nand_name;
@@ -311,34 +358,30 @@ static int cp110_syscon_clk_probe(struct platform_device *pdev)
parent = ppv2_name;
break;
case CP110_GATE_SDIO:
+ parent = sdio_name;
+ break;
case CP110_GATE_GOP_DP:
- of_property_read_string_index(np,
- "gate-clock-output-names",
- CP110_GATE_SDMMC_GOP, &parent);
+ parent = gate_name[CP110_GATE_SDMMC_GOP];
break;
case CP110_GATE_XOR1:
case CP110_GATE_XOR0:
case CP110_GATE_PCIE_X1_0:
case CP110_GATE_PCIE_X1_1:
case CP110_GATE_PCIE_X4:
- of_property_read_string_index(np,
- "gate-clock-output-names",
- CP110_GATE_PCIE_XOR, &parent);
+ parent = gate_name[CP110_GATE_PCIE_XOR];
break;
case CP110_GATE_SATA:
case CP110_GATE_USB3H0:
case CP110_GATE_USB3H1:
case CP110_GATE_USB3DEV:
- of_property_read_string_index(np,
- "gate-clock-output-names",
- CP110_GATE_SATA_USB, &parent);
+ parent = gate_name[CP110_GATE_SATA_USB];
break;
default:
parent = core_name;
break;
}
+ hw = cp110_register_gate(gate_name[i], parent, regmap, i);
- hw = cp110_register_gate(name, parent, regmap, i);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail_gate;
@@ -364,30 +407,62 @@ fail_gate:
cp110_unregister_gate(hw);
}
+ clk_hw_unregister_fixed_factor(cp110_clks[CP110_CORE_SDIO]);
+fail_sdio:
clk_hw_unregister_fixed_factor(cp110_clks[CP110_CORE_NAND]);
-fail4:
+fail_nand:
clk_hw_unregister_fixed_factor(cp110_clks[CP110_CORE_CORE]);
-fail3:
+fail_core:
clk_hw_unregister_fixed_factor(cp110_clks[CP110_CORE_EIP]);
-fail2:
+fail_eip:
clk_hw_unregister_fixed_factor(cp110_clks[CP110_CORE_PPV2]);
-fail1:
+fail_ppv2:
clk_hw_unregister_fixed_rate(cp110_clks[CP110_CORE_APLL]);
-fail0:
+fail_apll:
return ret;
}
-static const struct of_device_id cp110_syscon_of_match[] = {
+static int cp110_syscon_legacy_clk_probe(struct platform_device *pdev)
+{
+ dev_warn(&pdev->dev, FW_WARN "Using legacy device tree binding\n");
+ dev_warn(&pdev->dev, FW_WARN "Update your device tree:\n");
+ dev_warn(&pdev->dev, FW_WARN
+ "This binding won't be supported in future kernels\n");
+
+ return cp110_syscon_common_probe(pdev, pdev->dev.of_node);
+}
+
+static int cp110_clk_probe(struct platform_device *pdev)
+{
+ return cp110_syscon_common_probe(pdev, pdev->dev.of_node->parent);
+}
+
+static const struct of_device_id cp110_syscon_legacy_of_match[] = {
{ .compatible = "marvell,cp110-system-controller0", },
{ }
};
-static struct platform_driver cp110_syscon_driver = {
- .probe = cp110_syscon_clk_probe,
+static struct platform_driver cp110_syscon_legacy_driver = {
+ .probe = cp110_syscon_legacy_clk_probe,
.driver = {
.name = "marvell-cp110-system-controller0",
- .of_match_table = cp110_syscon_of_match,
+ .of_match_table = cp110_syscon_legacy_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(cp110_syscon_legacy_driver);
+
+static const struct of_device_id cp110_clock_of_match[] = {
+ { .compatible = "marvell,cp110-clock", },
+ { }
+};
+
+static struct platform_driver cp110_clock_driver = {
+ .probe = cp110_clk_probe,
+ .driver = {
+ .name = "marvell-cp110-clock",
+ .of_match_table = cp110_clock_of_match,
.suppress_bind_attrs = true,
},
};
-builtin_platform_driver(cp110_syscon_driver);
+builtin_platform_driver(cp110_clock_driver);
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 5fb8d7430908..9f6c278deead 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -82,6 +82,15 @@ config IPQ_LCC_806X
Say Y if you want to use audio devices such as i2s, pcm,
S/PDIF, etc.
+config IPQ_GCC_8074
+ tristate "IPQ8074 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for global clock controller on ipq8074 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, etc. Select this for the root clock
+ of ipq8074.
+
config MSM_GCC_8660
tristate "MSM8660 Global Clock Controller"
depends on COMMON_CLK_QCOM
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 1c3e222b917b..3f3aff229fb7 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
+obj-$(CONFIG_IPQ_GCC_8074) += gcc-ipq8074.o
obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
obj-$(CONFIG_MDM_GCC_9615) += gcc-mdm9615.o
obj-$(CONFIG_MDM_LCC_9615) += lcc-mdm9615.o
diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
new file mode 100644
index 000000000000..0f735d37690f
--- /dev/null
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -0,0 +1,1007 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gcc-ipq8074.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "clk-alpha-pll.h"
+#include "reset.h"
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+enum {
+ P_XO,
+ P_GPLL0,
+ P_GPLL0_DIV2,
+};
+
+static const char * const gcc_xo_gpll0_gpll0_out_main_div2[] = {
+ "xo",
+ "gpll0",
+ "gpll0_out_main_div2",
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll0_out_main_div2_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL0_DIV2, 4 },
+};
+
+static struct clk_alpha_pll gpll0_main = {
+ .offset = 0x21000,
+ .clkr = {
+ .enable_reg = 0x0b000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_main",
+ .parent_names = (const char *[]){
+ "xo"
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor gpll0_out_main_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_main_div2",
+ .parent_names = (const char *[]){
+ "gpll0_main"
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll0 = {
+ .offset = 0x21000,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_names = (const char *[]){
+ "gpll0_main"
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_pcnoc_bfdcd_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pcnoc_bfdcd_clk_src = {
+ .cmd_rcgr = 0x27000,
+ .freq_tbl = ftbl_pcnoc_bfdcd_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pcnoc_bfdcd_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_IS_CRITICAL,
+ },
+};
+
+static struct clk_fixed_factor pcnoc_clk_src = {
+ .mult = 1,
+ .div = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "pcnoc_clk_src",
+ .parent_names = (const char *[]){
+ "pcnoc_bfdcd_clk_src"
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_branch gcc_sleep_clk_src = {
+ .halt_reg = 0x30000,
+ .clkr = {
+ .enable_reg = 0x30000,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sleep_clk_src",
+ .parent_names = (const char *[]){
+ "sleep_clk"
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_qup_i2c_apps_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0_DIV2, 16, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0200c,
+ .freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_qup_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(12500000, P_GPLL0_DIV2, 16, 1, 2),
+ F(16000000, P_GPLL0, 10, 1, 5),
+ F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x02024,
+ .freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x03000,
+ .freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x03014,
+ .freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x04000,
+ .freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x04014,
+ .freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x05000,
+ .freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x05014,
+ .freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x06000,
+ .freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup5_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
+ .cmd_rcgr = 0x06014,
+ .freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup5_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x07000,
+ .freq_tbl = ftbl_blsp1_qup_i2c_apps_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup6_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
+ .cmd_rcgr = 0x07014,
+ .freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup6_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_uart_apps_clk_src[] = {
+ F(3686400, P_GPLL0_DIV2, 1, 144, 15625),
+ F(7372800, P_GPLL0_DIV2, 1, 288, 15625),
+ F(14745600, P_GPLL0_DIV2, 1, 576, 15625),
+ F(16000000, P_GPLL0_DIV2, 5, 1, 5),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 1, 3, 100),
+ F(25000000, P_GPLL0, 16, 1, 2),
+ F(32000000, P_GPLL0, 1, 1, 25),
+ F(40000000, P_GPLL0, 1, 1, 20),
+ F(46400000, P_GPLL0, 1, 29, 500),
+ F(48000000, P_GPLL0, 1, 3, 50),
+ F(51200000, P_GPLL0, 1, 8, 125),
+ F(56000000, P_GPLL0, 1, 7, 100),
+ F(58982400, P_GPLL0, 1, 1152, 15625),
+ F(60000000, P_GPLL0, 1, 3, 40),
+ F(64000000, P_GPLL0, 12.5, 1, 1),
+ { }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x02044,
+ .freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart1_apps_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x03034,
+ .freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart2_apps_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart3_apps_clk_src = {
+ .cmd_rcgr = 0x04034,
+ .freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart3_apps_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart4_apps_clk_src = {
+ .cmd_rcgr = 0x05034,
+ .freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart4_apps_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart5_apps_clk_src = {
+ .cmd_rcgr = 0x06034,
+ .freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart5_apps_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
+ .cmd_rcgr = 0x07034,
+ .freq_tbl = ftbl_blsp1_uart_apps_clk_src,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_out_main_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart6_apps_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll0_out_main_div2,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x01008,
+ .clkr = {
+ .enable_reg = 0x01008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x02008,
+ .clkr = {
+ .enable_reg = 0x02008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup1_i2c_apps_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x02004,
+ .clkr = {
+ .enable_reg = 0x02004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup1_spi_apps_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x03010,
+ .clkr = {
+ .enable_reg = 0x03010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup2_i2c_apps_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x0300c,
+ .clkr = {
+ .enable_reg = 0x0300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup2_spi_apps_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x04010,
+ .clkr = {
+ .enable_reg = 0x04010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup3_i2c_apps_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x0400c,
+ .clkr = {
+ .enable_reg = 0x0400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup3_spi_apps_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x05010,
+ .clkr = {
+ .enable_reg = 0x05010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup4_i2c_apps_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x0500c,
+ .clkr = {
+ .enable_reg = 0x0500c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup4_spi_apps_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
+ .halt_reg = 0x06010,
+ .clkr = {
+ .enable_reg = 0x06010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup5_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup5_i2c_apps_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
+ .halt_reg = 0x0600c,
+ .clkr = {
+ .enable_reg = 0x0600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup5_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup5_spi_apps_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
+ .halt_reg = 0x07010,
+ .clkr = {
+ .enable_reg = 0x07010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup6_i2c_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup6_i2c_apps_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
+ .halt_reg = 0x0700c,
+ .clkr = {
+ .enable_reg = 0x0700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup6_spi_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_qup6_spi_apps_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x0203c,
+ .clkr = {
+ .enable_reg = 0x0203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart1_apps_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x0302c,
+ .clkr = {
+ .enable_reg = 0x0302c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart2_apps_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart3_apps_clk = {
+ .halt_reg = 0x0402c,
+ .clkr = {
+ .enable_reg = 0x0402c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart3_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart3_apps_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart4_apps_clk = {
+ .halt_reg = 0x0502c,
+ .clkr = {
+ .enable_reg = 0x0502c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart4_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart4_apps_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart5_apps_clk = {
+ .halt_reg = 0x0602c,
+ .clkr = {
+ .enable_reg = 0x0602c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart5_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart5_apps_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart6_apps_clk = {
+ .halt_reg = 0x0702c,
+ .clkr = {
+ .enable_reg = 0x0702c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart6_apps_clk",
+ .parent_names = (const char *[]){
+ "blsp1_uart6_apps_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x13004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x0b004,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qpic_ahb_clk = {
+ .halt_reg = 0x57024,
+ .clkr = {
+ .enable_reg = 0x57024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qpic_ahb_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qpic_clk = {
+ .halt_reg = 0x57020,
+ .clkr = {
+ .enable_reg = 0x57020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qpic_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_hw *gcc_ipq8074_hws[] = {
+ &gpll0_out_main_div2.hw,
+ &pcnoc_clk_src.hw,
+};
+
+static struct clk_regmap *gcc_ipq8074_clks[] = {
+ [GPLL0_MAIN] = &gpll0_main.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [PCNOC_BFDCD_CLK_SRC] = &pcnoc_bfdcd_clk_src.clkr,
+ [GCC_SLEEP_CLK_SRC] = &gcc_sleep_clk_src.clkr,
+ [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+ [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+ [BLSP1_QUP5_I2C_APPS_CLK_SRC] = &blsp1_qup5_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP5_SPI_APPS_CLK_SRC] = &blsp1_qup5_spi_apps_clk_src.clkr,
+ [BLSP1_QUP6_I2C_APPS_CLK_SRC] = &blsp1_qup6_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP6_SPI_APPS_CLK_SRC] = &blsp1_qup6_spi_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [BLSP1_UART3_APPS_CLK_SRC] = &blsp1_uart3_apps_clk_src.clkr,
+ [BLSP1_UART4_APPS_CLK_SRC] = &blsp1_uart4_apps_clk_src.clkr,
+ [BLSP1_UART5_APPS_CLK_SRC] = &blsp1_uart5_apps_clk_src.clkr,
+ [BLSP1_UART6_APPS_CLK_SRC] = &blsp1_uart6_apps_clk_src.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_I2C_APPS_CLK] = &gcc_blsp1_qup5_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_SPI_APPS_CLK] = &gcc_blsp1_qup5_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_I2C_APPS_CLK] = &gcc_blsp1_qup6_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_SPI_APPS_CLK] = &gcc_blsp1_qup6_spi_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP1_UART3_APPS_CLK] = &gcc_blsp1_uart3_apps_clk.clkr,
+ [GCC_BLSP1_UART4_APPS_CLK] = &gcc_blsp1_uart4_apps_clk.clkr,
+ [GCC_BLSP1_UART5_APPS_CLK] = &gcc_blsp1_uart5_apps_clk.clkr,
+ [GCC_BLSP1_UART6_APPS_CLK] = &gcc_blsp1_uart6_apps_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QPIC_AHB_CLK] = &gcc_qpic_ahb_clk.clkr,
+ [GCC_QPIC_CLK] = &gcc_qpic_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_ipq8074_resets[] = {
+ [GCC_BLSP1_BCR] = { 0x01000, 0 },
+ [GCC_BLSP1_QUP1_BCR] = { 0x02000, 0 },
+ [GCC_BLSP1_UART1_BCR] = { 0x02038, 0 },
+ [GCC_BLSP1_QUP2_BCR] = { 0x03008, 0 },
+ [GCC_BLSP1_UART2_BCR] = { 0x03028, 0 },
+ [GCC_BLSP1_QUP3_BCR] = { 0x04008, 0 },
+ [GCC_BLSP1_UART3_BCR] = { 0x04028, 0 },
+ [GCC_BLSP1_QUP4_BCR] = { 0x05008, 0 },
+ [GCC_BLSP1_UART4_BCR] = { 0x05028, 0 },
+ [GCC_BLSP1_QUP5_BCR] = { 0x06008, 0 },
+ [GCC_BLSP1_UART5_BCR] = { 0x06028, 0 },
+ [GCC_BLSP1_QUP6_BCR] = { 0x07008, 0 },
+ [GCC_BLSP1_UART6_BCR] = { 0x07028, 0 },
+ [GCC_IMEM_BCR] = { 0x0e000, 0 },
+ [GCC_SMMU_BCR] = { 0x12000, 0 },
+ [GCC_APSS_TCU_BCR] = { 0x12050, 0 },
+ [GCC_SMMU_XPU_BCR] = { 0x12054, 0 },
+ [GCC_PCNOC_TBU_BCR] = { 0x12058, 0 },
+ [GCC_SMMU_CFG_BCR] = { 0x1208c, 0 },
+ [GCC_PRNG_BCR] = { 0x13000, 0 },
+ [GCC_BOOT_ROM_BCR] = { 0x13008, 0 },
+ [GCC_CRYPTO_BCR] = { 0x16000, 0 },
+ [GCC_WCSS_BCR] = { 0x18000, 0 },
+ [GCC_WCSS_Q6_BCR] = { 0x18100, 0 },
+ [GCC_NSS_BCR] = { 0x19000, 0 },
+ [GCC_SEC_CTRL_BCR] = { 0x1a000, 0 },
+ [GCC_ADSS_BCR] = { 0x1c000, 0 },
+ [GCC_DDRSS_BCR] = { 0x1e000, 0 },
+ [GCC_SYSTEM_NOC_BCR] = { 0x26000, 0 },
+ [GCC_PCNOC_BCR] = { 0x27018, 0 },
+ [GCC_TCSR_BCR] = { 0x28000, 0 },
+ [GCC_QDSS_BCR] = { 0x29000, 0 },
+ [GCC_DCD_BCR] = { 0x2a000, 0 },
+ [GCC_MSG_RAM_BCR] = { 0x2b000, 0 },
+ [GCC_MPM_BCR] = { 0x2c000, 0 },
+ [GCC_SPMI_BCR] = { 0x2e000, 0 },
+ [GCC_SPDM_BCR] = { 0x2f000, 0 },
+ [GCC_RBCPR_BCR] = { 0x33000, 0 },
+ [GCC_RBCPR_MX_BCR] = { 0x33014, 0 },
+ [GCC_TLMM_BCR] = { 0x34000, 0 },
+ [GCC_RBCPR_WCSS_BCR] = { 0x3a000, 0 },
+ [GCC_USB0_PHY_BCR] = { 0x3e034, 0 },
+ [GCC_USB3PHY_0_PHY_BCR] = { 0x3e03c, 0 },
+ [GCC_USB0_BCR] = { 0x3e070, 0 },
+ [GCC_USB1_PHY_BCR] = { 0x3f034, 0 },
+ [GCC_USB3PHY_1_PHY_BCR] = { 0x3f03c, 0 },
+ [GCC_USB1_BCR] = { 0x3f070, 0 },
+ [GCC_QUSB2_0_PHY_BCR] = { 0x4103c, 0 },
+ [GCC_QUSB2_1_PHY_BCR] = { 0x41040, 0 },
+ [GCC_SDCC1_BCR] = { 0x42000, 0 },
+ [GCC_SDCC2_BCR] = { 0x43000, 0 },
+ [GCC_SNOC_BUS_TIMEOUT0_BCR] = { 0x47000, 0 },
+ [GCC_SNOC_BUS_TIMEOUT2_BCR] = { 0x47008, 0 },
+ [GCC_SNOC_BUS_TIMEOUT3_BCR] = { 0x47010, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT0_BCR] = { 0x48000, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT1_BCR] = { 0x48008, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT2_BCR] = { 0x48010, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT3_BCR] = { 0x48018, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT4_BCR] = { 0x48020, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT5_BCR] = { 0x48028, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT6_BCR] = { 0x48030, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT7_BCR] = { 0x48038, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT8_BCR] = { 0x48040, 0 },
+ [GCC_PCNOC_BUS_TIMEOUT9_BCR] = { 0x48048, 0 },
+ [GCC_UNIPHY0_BCR] = { 0x56000, 0 },
+ [GCC_UNIPHY1_BCR] = { 0x56100, 0 },
+ [GCC_UNIPHY2_BCR] = { 0x56200, 0 },
+ [GCC_CMN_12GPLL_BCR] = { 0x56300, 0 },
+ [GCC_QPIC_BCR] = { 0x57018, 0 },
+ [GCC_MDIO_BCR] = { 0x58000, 0 },
+ [GCC_PCIE1_TBU_BCR] = { 0x65000, 0 },
+ [GCC_WCSS_CORE_TBU_BCR] = { 0x66000, 0 },
+ [GCC_WCSS_Q6_TBU_BCR] = { 0x67000, 0 },
+ [GCC_USB0_TBU_BCR] = { 0x6a000, 0 },
+ [GCC_USB1_TBU_BCR] = { 0x6a004, 0 },
+ [GCC_PCIE0_TBU_BCR] = { 0x6b000, 0 },
+ [GCC_NSS_NOC_TBU_BCR] = { 0x6e000, 0 },
+ [GCC_PCIE0_BCR] = { 0x75004, 0 },
+ [GCC_PCIE0_PHY_BCR] = { 0x75038, 0 },
+ [GCC_PCIE0PHY_PHY_BCR] = { 0x7503c, 0 },
+ [GCC_PCIE0_LINK_DOWN_BCR] = { 0x75044, 0 },
+ [GCC_PCIE1_BCR] = { 0x76004, 0 },
+ [GCC_PCIE1_PHY_BCR] = { 0x76038, 0 },
+ [GCC_PCIE1PHY_PHY_BCR] = { 0x7603c, 0 },
+ [GCC_PCIE1_LINK_DOWN_BCR] = { 0x76044, 0 },
+ [GCC_DCC_BCR] = { 0x77000, 0 },
+ [GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR] = { 0x78000, 0 },
+ [GCC_APC1_VOLTAGE_DROOP_DETECTOR_BCR] = { 0x79000, 0 },
+ [GCC_SMMU_CATS_BCR] = { 0x7c000, 0 },
+};
+
+static const struct of_device_id gcc_ipq8074_match_table[] = {
+ { .compatible = "qcom,gcc-ipq8074" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_ipq8074_match_table);
+
+static const struct regmap_config gcc_ipq8074_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x7fffc,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_ipq8074_desc = {
+ .config = &gcc_ipq8074_regmap_config,
+ .clks = gcc_ipq8074_clks,
+ .num_clks = ARRAY_SIZE(gcc_ipq8074_clks),
+ .resets = gcc_ipq8074_resets,
+ .num_resets = ARRAY_SIZE(gcc_ipq8074_resets),
+};
+
+static int gcc_ipq8074_probe(struct platform_device *pdev)
+{
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(gcc_ipq8074_hws); i++) {
+ ret = devm_clk_hw_register(&pdev->dev, gcc_ipq8074_hws[i]);
+ if (ret)
+ return ret;
+ }
+
+ return qcom_cc_probe(pdev, &gcc_ipq8074_desc);
+}
+
+static struct platform_driver gcc_ipq8074_driver = {
+ .probe = gcc_ipq8074_probe,
+ .driver = {
+ .name = "qcom,gcc-ipq8074",
+ .of_match_table = gcc_ipq8074_match_table,
+ },
+};
+
+static int __init gcc_ipq8074_init(void)
+{
+ return platform_driver_register(&gcc_ipq8074_driver);
+}
+core_initcall(gcc_ipq8074_init);
+
+static void __exit gcc_ipq8074_exit(void)
+{
+ platform_driver_unregister(&gcc_ipq8074_driver);
+}
+module_exit(gcc_ipq8074_exit);
+
+MODULE_DESCRIPTION("QCOM GCC IPQ8074 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-ipq8074");
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index 628e6ca276ec..2cfe7000fc60 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -1430,6 +1430,7 @@ static struct clk_branch gcc_ultaudio_stc_xo_clk = {
};
static const struct freq_tbl ftbl_codec_clk[] = {
+ F(9600000, P_XO, 2, 0, 0),
F(19200000, P_XO, 1, 0, 0),
F(11289600, P_EXT_MCLK, 1, 0, 0),
{ }
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 2586dfa0026b..78d1df9112ba 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -1,20 +1,129 @@
+config CLK_RENESAS
+ bool "Renesas SoC clock support" if COMPILE_TEST && !ARCH_RENESAS
+ default y if ARCH_RENESAS
+ select CLK_EMEV2 if ARCH_EMEV2
+ select CLK_RZA1 if ARCH_R7S72100
+ select CLK_R8A73A4 if ARCH_R8A73A4
+ select CLK_R8A7740 if ARCH_R8A7740
+ select CLK_R8A7743 if ARCH_R8A7743
+ select CLK_R8A7745 if ARCH_R8A7745
+ select CLK_R8A7778 if ARCH_R8A7778
+ select CLK_R8A7779 if ARCH_R8A7779
+ select CLK_R8A7790 if ARCH_R8A7790
+ select CLK_R8A7791 if ARCH_R8A7791 || ARCH_R8A7793
+ select CLK_R8A7792 if ARCH_R8A7792
+ select CLK_R8A7794 if ARCH_R8A7794
+ select CLK_R8A7795 if ARCH_R8A7795
+ select CLK_R8A7796 if ARCH_R8A7796
+ select CLK_SH73A0 if ARCH_SH73A0
+
+if CLK_RENESAS
+
+config CLK_RENESAS_LEGACY
+ bool "Legacy DT clock support"
+ depends on CLK_R8A7790 || CLK_R8A7791 || CLK_R8A7792 || CLK_R8A7794
+ default y
+ help
+ Enable backward compatibility with old device trees describing a
+ hierarchical representation of the various CPG and MSTP clocks.
+
+ Say Y if you want your kernel to work with old DTBs.
+
+# SoC
+config CLK_EMEV2
+ bool "Emma Mobile EV2 clock support" if COMPILE_TEST
+
+config CLK_RZA1
+ bool
+ select CLK_RENESAS_CPG_MSTP
+
+config CLK_R8A73A4
+ bool
+ select CLK_RENESAS_CPG_MSTP
+ select CLK_RENESAS_DIV6
+
+config CLK_R8A7740
+ bool
+ select CLK_RENESAS_CPG_MSTP
+ select CLK_RENESAS_DIV6
+
+config CLK_R8A7743
+ bool
+ select CLK_RCAR_GEN2_CPG
+
+config CLK_R8A7745
+ bool
+ select CLK_RCAR_GEN2_CPG
+
+config CLK_R8A7778
+ bool
+ select CLK_RENESAS_CPG_MSTP
+
+config CLK_R8A7779
+ bool
+ select CLK_RENESAS_CPG_MSTP
+
+config CLK_R8A7790
+ bool
+ select CLK_RCAR_GEN2 if CLK_RENESAS_LEGACY
+ select CLK_RCAR_GEN2_CPG
+ select CLK_RENESAS_DIV6
+
+config CLK_R8A7791
+ bool
+ select CLK_RCAR_GEN2 if CLK_RENESAS_LEGACY
+ select CLK_RCAR_GEN2_CPG
+ select CLK_RENESAS_DIV6
+
+config CLK_R8A7792
+ bool
+ select CLK_RCAR_GEN2 if CLK_RENESAS_LEGACY
+ select CLK_RCAR_GEN2_CPG
+
+config CLK_R8A7794
+ bool
+ select CLK_RCAR_GEN2 if CLK_RENESAS_LEGACY
+ select CLK_RCAR_GEN2_CPG
+ select CLK_RENESAS_DIV6
+
+config CLK_R8A7795
+ bool
+ select CLK_RCAR_GEN3_CPG
+
+config CLK_R8A7796
+ bool
+ select CLK_RCAR_GEN3_CPG
+
+config CLK_SH73A0
+ bool
+ select CLK_RENESAS_CPG_MSTP
+ select CLK_RENESAS_DIV6
+
+
+# Family
+config CLK_RCAR_GEN2
+ bool
+ select CLK_RENESAS_CPG_MSTP
+ select CLK_RENESAS_DIV6
+
+config CLK_RCAR_GEN2_CPG
+ bool
+ select CLK_RENESAS_CPG_MSSR
+
+config CLK_RCAR_GEN3_CPG
+ bool
+ select CLK_RENESAS_CPG_MSSR
+
+
+# Generic
config CLK_RENESAS_CPG_MSSR
bool
- default y if ARCH_R8A7743
- default y if ARCH_R8A7745
- default y if ARCH_R8A7795
- default y if ARCH_R8A7796
+ select CLK_RENESAS_DIV6
config CLK_RENESAS_CPG_MSTP
bool
- default y if ARCH_R7S72100
- default y if ARCH_R8A73A4
- default y if ARCH_R8A7740
- default y if ARCH_R8A7778
- default y if ARCH_R8A7779
- default y if ARCH_R8A7790
- default y if ARCH_R8A7791
- default y if ARCH_R8A7792
- default y if ARCH_R8A7793
- default y if ARCH_R8A7794
- default y if ARCH_SH73A0
+
+config CLK_RENESAS_DIV6
+ bool "DIV6 clock support" if COMPILE_TEST
+
+endif # CLK_RENESAS
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index 1072f7653c0c..02d04124371f 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -1,19 +1,26 @@
-obj-$(CONFIG_ARCH_EMEV2) += clk-emev2.o
-obj-$(CONFIG_ARCH_R7S72100) += clk-rz.o
-obj-$(CONFIG_ARCH_R8A73A4) += clk-r8a73a4.o clk-div6.o
-obj-$(CONFIG_ARCH_R8A7740) += clk-r8a7740.o clk-div6.o
-obj-$(CONFIG_ARCH_R8A7743) += r8a7743-cpg-mssr.o rcar-gen2-cpg.o
-obj-$(CONFIG_ARCH_R8A7745) += r8a7745-cpg-mssr.o rcar-gen2-cpg.o
-obj-$(CONFIG_ARCH_R8A7778) += clk-r8a7778.o
-obj-$(CONFIG_ARCH_R8A7779) += clk-r8a7779.o
-obj-$(CONFIG_ARCH_R8A7790) += clk-rcar-gen2.o clk-div6.o
-obj-$(CONFIG_ARCH_R8A7791) += clk-rcar-gen2.o clk-div6.o
-obj-$(CONFIG_ARCH_R8A7792) += clk-rcar-gen2.o clk-div6.o
-obj-$(CONFIG_ARCH_R8A7793) += clk-rcar-gen2.o clk-div6.o
-obj-$(CONFIG_ARCH_R8A7794) += clk-rcar-gen2.o clk-div6.o
-obj-$(CONFIG_ARCH_R8A7795) += r8a7795-cpg-mssr.o rcar-gen3-cpg.o
-obj-$(CONFIG_ARCH_R8A7796) += r8a7796-cpg-mssr.o rcar-gen3-cpg.o
-obj-$(CONFIG_ARCH_SH73A0) += clk-sh73a0.o clk-div6.o
+# SoC
+obj-$(CONFIG_CLK_EMEV2) += clk-emev2.o
+obj-$(CONFIG_CLK_RZA1) += clk-rz.o
+obj-$(CONFIG_CLK_R8A73A4) += clk-r8a73a4.o
+obj-$(CONFIG_CLK_R8A7740) += clk-r8a7740.o
+obj-$(CONFIG_CLK_R8A7743) += r8a7743-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A7745) += r8a7745-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A7778) += clk-r8a7778.o
+obj-$(CONFIG_CLK_R8A7779) += clk-r8a7779.o
+obj-$(CONFIG_CLK_R8A7790) += r8a7790-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A7791) += r8a7791-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A7792) += r8a7792-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A7794) += r8a7794-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A7795) += r8a7795-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A7796) += r8a7796-cpg-mssr.o
+obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
-obj-$(CONFIG_CLK_RENESAS_CPG_MSSR) += renesas-cpg-mssr.o clk-div6.o
+# Family
+obj-$(CONFIG_CLK_RCAR_GEN2) += clk-rcar-gen2.o
+obj-$(CONFIG_CLK_RCAR_GEN2_CPG) += rcar-gen2-cpg.o
+obj-$(CONFIG_CLK_RCAR_GEN3_CPG) += rcar-gen3-cpg.o
+
+# Generic
+obj-$(CONFIG_CLK_RENESAS_CPG_MSSR) += renesas-cpg-mssr.o
obj-$(CONFIG_CLK_RENESAS_CPG_MSTP) += clk-mstp.o
+obj-$(CONFIG_CLK_RENESAS_DIV6) += clk-div6.o
diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index 4067216bf31f..f1617dd044cb 100644
--- a/drivers/clk/renesas/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -325,7 +325,7 @@ fail_put:
void cpg_mstp_detach_dev(struct generic_pm_domain *unused, struct device *dev)
{
- if (!list_empty(&dev->power.subsys_data->clock_list))
+ if (!pm_clk_no_clocks(dev))
pm_clk_destroy(dev);
}
diff --git a/drivers/clk/renesas/clk-rcar-gen2.c b/drivers/clk/renesas/clk-rcar-gen2.c
index f39519edc645..51a2479ed5d7 100644
--- a/drivers/clk/renesas/clk-rcar-gen2.c
+++ b/drivers/clk/renesas/clk-rcar-gen2.c
@@ -272,11 +272,14 @@ struct cpg_pll_config {
unsigned int extal_div;
unsigned int pll1_mult;
unsigned int pll3_mult;
+ unsigned int pll0_mult; /* For R-Car V2H and E2 only */
};
static const struct cpg_pll_config cpg_pll_configs[8] __initconst = {
- { 1, 208, 106 }, { 1, 208, 88 }, { 1, 156, 80 }, { 1, 156, 66 },
- { 2, 240, 122 }, { 2, 240, 102 }, { 2, 208, 106 }, { 2, 208, 88 },
+ { 1, 208, 106, 200 }, { 1, 208, 88, 200 },
+ { 1, 156, 80, 150 }, { 1, 156, 66, 150 },
+ { 2, 240, 122, 230 }, { 2, 240, 102, 230 },
+ { 2, 208, 106, 200 }, { 2, 208, 88, 200 },
};
/* SDHI divisors */
@@ -298,6 +301,12 @@ static const struct clk_div_table cpg_sd01_div_table[] = {
static u32 cpg_mode __initdata;
+static const char * const pll0_mult_match[] = {
+ "renesas,r8a7792-cpg-clocks",
+ "renesas,r8a7794-cpg-clocks",
+ NULL
+};
+
static struct clk * __init
rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg,
const struct cpg_pll_config *config,
@@ -318,9 +327,15 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg,
* clock implementation and we currently have no need to change
* the multiplier value.
*/
- u32 value = clk_readl(cpg->reg + CPG_PLL0CR);
+ if (of_device_compatible_match(np, pll0_mult_match)) {
+ /* R-Car V2H and E2 do not have PLL0CR */
+ mult = config->pll0_mult;
+ div = 3;
+ } else {
+ u32 value = clk_readl(cpg->reg + CPG_PLL0CR);
+ mult = ((value >> 24) & ((1 << 7) - 1)) + 1;
+ }
parent_name = "main";
- mult = ((value >> 24) & ((1 << 7) - 1)) + 1;
} else if (!strcmp(name, "pll1")) {
parent_name = "main";
mult = config->pll1_mult / 2;
diff --git a/drivers/clk/renesas/r8a7745-cpg-mssr.c b/drivers/clk/renesas/r8a7745-cpg-mssr.c
index 2f15ba786c3b..9e2360a8e14b 100644
--- a/drivers/clk/renesas/r8a7745-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7745-cpg-mssr.c
@@ -167,16 +167,12 @@ static const struct mssr_mod_clk r8a7745_mod_clks[] __initconst = {
DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)),
DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)),
DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)),
- DEF_MOD("scu-src9", 1022, MOD_CLK_ID(1017)),
- DEF_MOD("scu-src8", 1023, MOD_CLK_ID(1017)),
- DEF_MOD("scu-src7", 1024, MOD_CLK_ID(1017)),
DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)),
DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)),
DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)),
DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)),
DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)),
DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)),
- DEF_MOD("scu-src0", 1031, MOD_CLK_ID(1017)),
DEF_MOD("scifa3", 1106, R8A7745_CLK_MP),
DEF_MOD("scifa4", 1107, R8A7745_CLK_MP),
DEF_MOD("scifa5", 1108, R8A7745_CLK_MP),
@@ -194,31 +190,22 @@ static const unsigned int r8a7745_crit_mod_clks[] __initconst = {
* MD EXTAL PLL0 PLL1 PLL3
* 14 13 19 (MHz) *1 *2
*---------------------------------------------------
- * 0 0 0 15 x200/3 x208/2 x106
* 0 0 1 15 x200/3 x208/2 x88
- * 0 1 0 20 x150/3 x156/2 x80
* 0 1 1 20 x150/3 x156/2 x66
- * 1 0 0 26 / 2 x230/3 x240/2 x122
* 1 0 1 26 / 2 x230/3 x240/2 x102
- * 1 1 0 30 / 2 x200/3 x208/2 x106
* 1 1 1 30 / 2 x200/3 x208/2 x88
*
* *1 : Table 7.5b indicates VCO output (PLL0 = VCO/3)
* *2 : Table 7.5b indicates VCO output (PLL1 = VCO/2)
*/
-#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 12) | \
- (((md) & BIT(13)) >> 12) | \
- (((md) & BIT(19)) >> 19))
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \
+ (((md) & BIT(13)) >> 13))
static const struct rcar_gen2_cpg_pll_config cpg_pll_configs[8] __initconst = {
/* EXTAL div PLL1 mult PLL3 mult PLL0 mult */
- { 1, 208, 106, 200 },
{ 1, 208, 88, 200 },
- { 1, 156, 80, 150 },
{ 1, 156, 66, 150 },
- { 2, 240, 122, 230 },
{ 2, 240, 102, 230 },
- { 2, 208, 106, 200 },
{ 2, 208, 88, 200 },
};
diff --git a/drivers/clk/renesas/r8a7790-cpg-mssr.c b/drivers/clk/renesas/r8a7790-cpg-mssr.c
new file mode 100644
index 000000000000..46bb55bb223d
--- /dev/null
+++ b/drivers/clk/renesas/r8a7790-cpg-mssr.c
@@ -0,0 +1,278 @@
+/*
+ * r8a7790 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2017 Glider bvba
+ *
+ * Based on clk-rcar-gen2.c
+ *
+ * Copyright (C) 2013 Ideas On Board SPRL
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/r8a7790-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen2-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A7790_CLK_OSC,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+ CLK_USB_EXTAL,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL0,
+ CLK_PLL1,
+ CLK_PLL3,
+ CLK_PLL1_DIV2,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a7790_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+ DEF_INPUT("usb_extal", CLK_USB_EXTAL),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN2_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN2_PLL0, CLK_MAIN),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN2_PLL1, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN2_PLL3, CLK_MAIN),
+
+ DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
+
+ /* Core Clock Outputs */
+ DEF_BASE("z", R8A7790_CLK_Z, CLK_TYPE_GEN2_Z, CLK_PLL0),
+ DEF_BASE("lb", R8A7790_CLK_LB, CLK_TYPE_GEN2_LB, CLK_PLL1),
+ DEF_BASE("adsp", R8A7790_CLK_ADSP, CLK_TYPE_GEN2_ADSP, CLK_PLL1),
+ DEF_BASE("sdh", R8A7790_CLK_SDH, CLK_TYPE_GEN2_SDH, CLK_PLL1),
+ DEF_BASE("sd0", R8A7790_CLK_SD0, CLK_TYPE_GEN2_SD0, CLK_PLL1),
+ DEF_BASE("sd1", R8A7790_CLK_SD1, CLK_TYPE_GEN2_SD1, CLK_PLL1),
+ DEF_BASE("qspi", R8A7790_CLK_QSPI, CLK_TYPE_GEN2_QSPI, CLK_PLL1_DIV2),
+ DEF_BASE("rcan", R8A7790_CLK_RCAN, CLK_TYPE_GEN2_RCAN, CLK_USB_EXTAL),
+
+ DEF_FIXED("z2", R8A7790_CLK_Z2, CLK_PLL1, 2, 1),
+ DEF_FIXED("zg", R8A7790_CLK_ZG, CLK_PLL1, 3, 1),
+ DEF_FIXED("zx", R8A7790_CLK_ZX, CLK_PLL1, 3, 1),
+ DEF_FIXED("zs", R8A7790_CLK_ZS, CLK_PLL1, 6, 1),
+ DEF_FIXED("hp", R8A7790_CLK_HP, CLK_PLL1, 12, 1),
+ DEF_FIXED("i", R8A7790_CLK_I, CLK_PLL1, 2, 1),
+ DEF_FIXED("b", R8A7790_CLK_B, CLK_PLL1, 12, 1),
+ DEF_FIXED("p", R8A7790_CLK_P, CLK_PLL1, 24, 1),
+ DEF_FIXED("cl", R8A7790_CLK_CL, CLK_PLL1, 48, 1),
+ DEF_FIXED("m2", R8A7790_CLK_M2, CLK_PLL1, 8, 1),
+ DEF_FIXED("imp", R8A7790_CLK_IMP, CLK_PLL1, 4, 1),
+ DEF_FIXED("zb3", R8A7790_CLK_ZB3, CLK_PLL3, 4, 1),
+ DEF_FIXED("zb3d2", R8A7790_CLK_ZB3D2, CLK_PLL3, 8, 1),
+ DEF_FIXED("ddr", R8A7790_CLK_DDR, CLK_PLL3, 8, 1),
+ DEF_FIXED("mp", R8A7790_CLK_MP, CLK_PLL1_DIV2, 15, 1),
+ DEF_FIXED("cp", R8A7790_CLK_CP, CLK_EXTAL, 2, 1),
+ DEF_FIXED("r", R8A7790_CLK_R, CLK_PLL1, 49152, 1),
+ DEF_FIXED("osc", R8A7790_CLK_OSC, CLK_PLL1, 12288, 1),
+
+ DEF_DIV6P1("sd2", R8A7790_CLK_SD2, CLK_PLL1_DIV2, 0x078),
+ DEF_DIV6P1("sd3", R8A7790_CLK_SD3, CLK_PLL1_DIV2, 0x26c),
+ DEF_DIV6P1("mmc0", R8A7790_CLK_MMC0, CLK_PLL1_DIV2, 0x240),
+ DEF_DIV6P1("mmc1", R8A7790_CLK_MMC1, CLK_PLL1_DIV2, 0x244),
+ DEF_DIV6P1("ssp", R8A7790_CLK_SSP, CLK_PLL1_DIV2, 0x248),
+ DEF_DIV6P1("ssprs", R8A7790_CLK_SSPRS, CLK_PLL1_DIV2, 0x24c),
+};
+
+static const struct mssr_mod_clk r8a7790_mod_clks[] __initconst = {
+ DEF_MOD("msiof0", 0, R8A7790_CLK_MP),
+ DEF_MOD("vcp1", 100, R8A7790_CLK_ZS),
+ DEF_MOD("vcp0", 101, R8A7790_CLK_ZS),
+ DEF_MOD("vpc1", 102, R8A7790_CLK_ZS),
+ DEF_MOD("vpc0", 103, R8A7790_CLK_ZS),
+ DEF_MOD("jpu", 106, R8A7790_CLK_M2),
+ DEF_MOD("ssp1", 109, R8A7790_CLK_ZS),
+ DEF_MOD("tmu1", 111, R8A7790_CLK_P),
+ DEF_MOD("3dg", 112, R8A7790_CLK_ZG),
+ DEF_MOD("2d-dmac", 115, R8A7790_CLK_ZS),
+ DEF_MOD("fdp1-2", 117, R8A7790_CLK_ZS),
+ DEF_MOD("fdp1-1", 118, R8A7790_CLK_ZS),
+ DEF_MOD("fdp1-0", 119, R8A7790_CLK_ZS),
+ DEF_MOD("tmu3", 121, R8A7790_CLK_P),
+ DEF_MOD("tmu2", 122, R8A7790_CLK_P),
+ DEF_MOD("cmt0", 124, R8A7790_CLK_R),
+ DEF_MOD("tmu0", 125, R8A7790_CLK_CP),
+ DEF_MOD("vsp1du1", 127, R8A7790_CLK_ZS),
+ DEF_MOD("vsp1du0", 128, R8A7790_CLK_ZS),
+ DEF_MOD("vsp1-rt", 130, R8A7790_CLK_ZS),
+ DEF_MOD("vsp1-sy", 131, R8A7790_CLK_ZS),
+ DEF_MOD("scifa2", 202, R8A7790_CLK_MP),
+ DEF_MOD("scifa1", 203, R8A7790_CLK_MP),
+ DEF_MOD("scifa0", 204, R8A7790_CLK_MP),
+ DEF_MOD("msiof2", 205, R8A7790_CLK_MP),
+ DEF_MOD("scifb0", 206, R8A7790_CLK_MP),
+ DEF_MOD("scifb1", 207, R8A7790_CLK_MP),
+ DEF_MOD("msiof1", 208, R8A7790_CLK_MP),
+ DEF_MOD("msiof3", 215, R8A7790_CLK_MP),
+ DEF_MOD("scifb2", 216, R8A7790_CLK_MP),
+ DEF_MOD("sys-dmac1", 218, R8A7790_CLK_ZS),
+ DEF_MOD("sys-dmac0", 219, R8A7790_CLK_ZS),
+ DEF_MOD("iic2", 300, R8A7790_CLK_HP),
+ DEF_MOD("tpu0", 304, R8A7790_CLK_CP),
+ DEF_MOD("mmcif1", 305, R8A7790_CLK_MMC1),
+ DEF_MOD("scif2", 310, R8A7790_CLK_P),
+ DEF_MOD("sdhi3", 311, R8A7790_CLK_SD3),
+ DEF_MOD("sdhi2", 312, R8A7790_CLK_SD2),
+ DEF_MOD("sdhi1", 313, R8A7790_CLK_SD1),
+ DEF_MOD("sdhi0", 314, R8A7790_CLK_SD0),
+ DEF_MOD("mmcif0", 315, R8A7790_CLK_MMC0),
+ DEF_MOD("iic0", 318, R8A7790_CLK_HP),
+ DEF_MOD("pciec", 319, R8A7790_CLK_MP),
+ DEF_MOD("iic1", 323, R8A7790_CLK_HP),
+ DEF_MOD("usb3.0", 328, R8A7790_CLK_MP),
+ DEF_MOD("cmt1", 329, R8A7790_CLK_R),
+ DEF_MOD("usbhs-dmac0", 330, R8A7790_CLK_HP),
+ DEF_MOD("usbhs-dmac1", 331, R8A7790_CLK_HP),
+ DEF_MOD("irqc", 407, R8A7790_CLK_CP),
+ DEF_MOD("intc-sys", 408, R8A7790_CLK_ZS),
+ DEF_MOD("audio-dmac1", 501, R8A7790_CLK_HP),
+ DEF_MOD("audio-dmac0", 502, R8A7790_CLK_HP),
+ DEF_MOD("adsp_mod", 506, R8A7790_CLK_ADSP),
+ DEF_MOD("thermal", 522, CLK_EXTAL),
+ DEF_MOD("pwm", 523, R8A7790_CLK_P),
+ DEF_MOD("usb-ehci", 703, R8A7790_CLK_MP),
+ DEF_MOD("usbhs", 704, R8A7790_CLK_HP),
+ DEF_MOD("hscif1", 716, R8A7790_CLK_ZS),
+ DEF_MOD("hscif0", 717, R8A7790_CLK_ZS),
+ DEF_MOD("scif1", 720, R8A7790_CLK_P),
+ DEF_MOD("scif0", 721, R8A7790_CLK_P),
+ DEF_MOD("du2", 722, R8A7790_CLK_ZX),
+ DEF_MOD("du1", 723, R8A7790_CLK_ZX),
+ DEF_MOD("du0", 724, R8A7790_CLK_ZX),
+ DEF_MOD("lvds1", 725, R8A7790_CLK_ZX),
+ DEF_MOD("lvds0", 726, R8A7790_CLK_ZX),
+ DEF_MOD("mlb", 802, R8A7790_CLK_HP),
+ DEF_MOD("vin3", 808, R8A7790_CLK_ZG),
+ DEF_MOD("vin2", 809, R8A7790_CLK_ZG),
+ DEF_MOD("vin1", 810, R8A7790_CLK_ZG),
+ DEF_MOD("vin0", 811, R8A7790_CLK_ZG),
+ DEF_MOD("etheravb", 812, R8A7790_CLK_HP),
+ DEF_MOD("ether", 813, R8A7790_CLK_P),
+ DEF_MOD("sata1", 814, R8A7790_CLK_ZS),
+ DEF_MOD("sata0", 815, R8A7790_CLK_ZS),
+ DEF_MOD("gyro-adc", 901, R8A7790_CLK_P),
+ DEF_MOD("gpio5", 907, R8A7790_CLK_CP),
+ DEF_MOD("gpio4", 908, R8A7790_CLK_CP),
+ DEF_MOD("gpio3", 909, R8A7790_CLK_CP),
+ DEF_MOD("gpio2", 910, R8A7790_CLK_CP),
+ DEF_MOD("gpio1", 911, R8A7790_CLK_CP),
+ DEF_MOD("gpio0", 912, R8A7790_CLK_CP),
+ DEF_MOD("can1", 915, R8A7790_CLK_P),
+ DEF_MOD("can0", 916, R8A7790_CLK_P),
+ DEF_MOD("qspi_mod", 917, R8A7790_CLK_QSPI),
+ DEF_MOD("iicdvfs", 926, R8A7790_CLK_CP),
+ DEF_MOD("i2c3", 928, R8A7790_CLK_HP),
+ DEF_MOD("i2c2", 929, R8A7790_CLK_HP),
+ DEF_MOD("i2c1", 930, R8A7790_CLK_HP),
+ DEF_MOD("i2c0", 931, R8A7790_CLK_HP),
+ DEF_MOD("ssi-all", 1005, R8A7790_CLK_P),
+ DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)),
+ DEF_MOD("scu-all", 1017, R8A7790_CLK_P),
+ DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src9", 1022, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src8", 1023, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src7", 1024, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src0", 1031, MOD_CLK_ID(1017)),
+};
+
+static const unsigned int r8a7790_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(408), /* INTC-SYS (GIC) */
+};
+
+/*
+ * CPG Clock Data
+ */
+
+/*
+ * MD EXTAL PLL0 PLL1 PLL3
+ * 14 13 19 (MHz) *1 *1
+ *---------------------------------------------------
+ * 0 0 0 15 x172/2 x208/2 x106
+ * 0 0 1 15 x172/2 x208/2 x88
+ * 0 1 0 20 x130/2 x156/2 x80
+ * 0 1 1 20 x130/2 x156/2 x66
+ * 1 0 0 26 / 2 x200/2 x240/2 x122
+ * 1 0 1 26 / 2 x200/2 x240/2 x102
+ * 1 1 0 30 / 2 x172/2 x208/2 x106
+ * 1 1 1 30 / 2 x172/2 x208/2 x88
+ *
+ * *1 : Table 7.5a indicates VCO output (PLLx = VCO/2)
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 12) | \
+ (((md) & BIT(13)) >> 12) | \
+ (((md) & BIT(19)) >> 19))
+static const struct rcar_gen2_cpg_pll_config cpg_pll_configs[8] __initconst = {
+ { 1, 208, 106 }, { 1, 208, 88 }, { 1, 156, 80 }, { 1, 156, 66 },
+ { 2, 240, 122 }, { 2, 240, 102 }, { 2, 208, 106 }, { 2, 208, 88 },
+};
+
+static int __init r8a7790_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen2_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+
+ return rcar_gen2_cpg_init(cpg_pll_config, 2, cpg_mode);
+}
+
+const struct cpg_mssr_info r8a7790_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a7790_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a7790_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a7790_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a7790_mod_clks),
+ .num_hw_mod_clks = 12 * 32,
+
+ /* Critical Module Clocks */
+ .crit_mod_clks = r8a7790_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r8a7790_crit_mod_clks),
+
+ /* Callbacks */
+ .init = r8a7790_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen2_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/r8a7791-cpg-mssr.c b/drivers/clk/renesas/r8a7791-cpg-mssr.c
new file mode 100644
index 000000000000..c0b51f9bb278
--- /dev/null
+++ b/drivers/clk/renesas/r8a7791-cpg-mssr.c
@@ -0,0 +1,286 @@
+/*
+ * r8a7791 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2015-2017 Glider bvba
+ *
+ * Based on clk-rcar-gen2.c
+ *
+ * Copyright (C) 2013 Ideas On Board SPRL
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/r8a7791-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen2-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A7791_CLK_OSC,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+ CLK_USB_EXTAL,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL0,
+ CLK_PLL1,
+ CLK_PLL3,
+ CLK_PLL1_DIV2,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static struct cpg_core_clk r8a7791_core_clks[] __initdata = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+ DEF_INPUT("usb_extal", CLK_USB_EXTAL),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN2_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN2_PLL0, CLK_MAIN),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN2_PLL1, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN2_PLL3, CLK_MAIN),
+
+ DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
+
+ /* Core Clock Outputs */
+ DEF_BASE("z", R8A7791_CLK_Z, CLK_TYPE_GEN2_Z, CLK_PLL0),
+ DEF_BASE("lb", R8A7791_CLK_LB, CLK_TYPE_GEN2_LB, CLK_PLL1),
+ DEF_BASE("adsp", R8A7791_CLK_ADSP, CLK_TYPE_GEN2_ADSP, CLK_PLL1),
+ DEF_BASE("sdh", R8A7791_CLK_SDH, CLK_TYPE_GEN2_SDH, CLK_PLL1),
+ DEF_BASE("sd0", R8A7791_CLK_SD0, CLK_TYPE_GEN2_SD0, CLK_PLL1),
+ DEF_BASE("qspi", R8A7791_CLK_QSPI, CLK_TYPE_GEN2_QSPI, CLK_PLL1_DIV2),
+ DEF_BASE("rcan", R8A7791_CLK_RCAN, CLK_TYPE_GEN2_RCAN, CLK_USB_EXTAL),
+
+ DEF_FIXED("zg", R8A7791_CLK_ZG, CLK_PLL1, 3, 1),
+ DEF_FIXED("zx", R8A7791_CLK_ZX, CLK_PLL1, 3, 1),
+ DEF_FIXED("zs", R8A7791_CLK_ZS, CLK_PLL1, 6, 1),
+ DEF_FIXED("hp", R8A7791_CLK_HP, CLK_PLL1, 12, 1),
+ DEF_FIXED("i", R8A7791_CLK_I, CLK_PLL1, 2, 1),
+ DEF_FIXED("b", R8A7791_CLK_B, CLK_PLL1, 12, 1),
+ DEF_FIXED("p", R8A7791_CLK_P, CLK_PLL1, 24, 1),
+ DEF_FIXED("cl", R8A7791_CLK_CL, CLK_PLL1, 48, 1),
+ DEF_FIXED("m2", R8A7791_CLK_M2, CLK_PLL1, 8, 1),
+ DEF_FIXED("zb3", R8A7791_CLK_ZB3, CLK_PLL3, 4, 1),
+ DEF_FIXED("zb3d2", R8A7791_CLK_ZB3D2, CLK_PLL3, 8, 1),
+ DEF_FIXED("ddr", R8A7791_CLK_DDR, CLK_PLL3, 8, 1),
+ DEF_FIXED("mp", R8A7791_CLK_MP, CLK_PLL1_DIV2, 15, 1),
+ DEF_FIXED("cp", R8A7791_CLK_CP, CLK_EXTAL, 2, 1),
+ DEF_FIXED("r", R8A7791_CLK_R, CLK_PLL1, 49152, 1),
+ DEF_FIXED("osc", R8A7791_CLK_OSC, CLK_PLL1, 12288, 1),
+
+ DEF_DIV6P1("sd2", R8A7791_CLK_SD2, CLK_PLL1_DIV2, 0x078),
+ DEF_DIV6P1("sd3", R8A7791_CLK_SD3, CLK_PLL1_DIV2, 0x26c),
+ DEF_DIV6P1("mmc0", R8A7791_CLK_MMC0, CLK_PLL1_DIV2, 0x240),
+ DEF_DIV6P1("ssp", R8A7791_CLK_SSP, CLK_PLL1_DIV2, 0x248),
+ DEF_DIV6P1("ssprs", R8A7791_CLK_SSPRS, CLK_PLL1_DIV2, 0x24c),
+};
+
+static const struct mssr_mod_clk r8a7791_mod_clks[] __initconst = {
+ DEF_MOD("msiof0", 0, R8A7791_CLK_MP),
+ DEF_MOD("vcp0", 101, R8A7791_CLK_ZS),
+ DEF_MOD("vpc0", 103, R8A7791_CLK_ZS),
+ DEF_MOD("jpu", 106, R8A7791_CLK_M2),
+ DEF_MOD("ssp1", 109, R8A7791_CLK_ZS),
+ DEF_MOD("tmu1", 111, R8A7791_CLK_P),
+ DEF_MOD("3dg", 112, R8A7791_CLK_ZG),
+ DEF_MOD("2d-dmac", 115, R8A7791_CLK_ZS),
+ DEF_MOD("fdp1-1", 118, R8A7791_CLK_ZS),
+ DEF_MOD("fdp1-0", 119, R8A7791_CLK_ZS),
+ DEF_MOD("tmu3", 121, R8A7791_CLK_P),
+ DEF_MOD("tmu2", 122, R8A7791_CLK_P),
+ DEF_MOD("cmt0", 124, R8A7791_CLK_R),
+ DEF_MOD("tmu0", 125, R8A7791_CLK_CP),
+ DEF_MOD("vsp1du1", 127, R8A7791_CLK_ZS),
+ DEF_MOD("vsp1du0", 128, R8A7791_CLK_ZS),
+ DEF_MOD("vsp1-sy", 131, R8A7791_CLK_ZS),
+ DEF_MOD("scifa2", 202, R8A7791_CLK_MP),
+ DEF_MOD("scifa1", 203, R8A7791_CLK_MP),
+ DEF_MOD("scifa0", 204, R8A7791_CLK_MP),
+ DEF_MOD("msiof2", 205, R8A7791_CLK_MP),
+ DEF_MOD("scifb0", 206, R8A7791_CLK_MP),
+ DEF_MOD("scifb1", 207, R8A7791_CLK_MP),
+ DEF_MOD("msiof1", 208, R8A7791_CLK_MP),
+ DEF_MOD("scifb2", 216, R8A7791_CLK_MP),
+ DEF_MOD("sys-dmac1", 218, R8A7791_CLK_ZS),
+ DEF_MOD("sys-dmac0", 219, R8A7791_CLK_ZS),
+ DEF_MOD("tpu0", 304, R8A7791_CLK_CP),
+ DEF_MOD("sdhi3", 311, R8A7791_CLK_SD3),
+ DEF_MOD("sdhi2", 312, R8A7791_CLK_SD2),
+ DEF_MOD("sdhi0", 314, R8A7791_CLK_SD0),
+ DEF_MOD("mmcif0", 315, R8A7791_CLK_MMC0),
+ DEF_MOD("iic0", 318, R8A7791_CLK_HP),
+ DEF_MOD("pciec", 319, R8A7791_CLK_MP),
+ DEF_MOD("iic1", 323, R8A7791_CLK_HP),
+ DEF_MOD("usb3.0", 328, R8A7791_CLK_MP),
+ DEF_MOD("cmt1", 329, R8A7791_CLK_R),
+ DEF_MOD("usbhs-dmac0", 330, R8A7791_CLK_HP),
+ DEF_MOD("usbhs-dmac1", 331, R8A7791_CLK_HP),
+ DEF_MOD("irqc", 407, R8A7791_CLK_CP),
+ DEF_MOD("intc-sys", 408, R8A7791_CLK_ZS),
+ DEF_MOD("audio-dmac1", 501, R8A7791_CLK_HP),
+ DEF_MOD("audio-dmac0", 502, R8A7791_CLK_HP),
+ DEF_MOD("adsp_mod", 506, R8A7791_CLK_ADSP),
+ DEF_MOD("thermal", 522, CLK_EXTAL),
+ DEF_MOD("pwm", 523, R8A7791_CLK_P),
+ DEF_MOD("usb-ehci", 703, R8A7791_CLK_MP),
+ DEF_MOD("usbhs", 704, R8A7791_CLK_HP),
+ DEF_MOD("hscif2", 713, R8A7791_CLK_ZS),
+ DEF_MOD("scif5", 714, R8A7791_CLK_P),
+ DEF_MOD("scif4", 715, R8A7791_CLK_P),
+ DEF_MOD("hscif1", 716, R8A7791_CLK_ZS),
+ DEF_MOD("hscif0", 717, R8A7791_CLK_ZS),
+ DEF_MOD("scif3", 718, R8A7791_CLK_P),
+ DEF_MOD("scif2", 719, R8A7791_CLK_P),
+ DEF_MOD("scif1", 720, R8A7791_CLK_P),
+ DEF_MOD("scif0", 721, R8A7791_CLK_P),
+ DEF_MOD("du1", 723, R8A7791_CLK_ZX),
+ DEF_MOD("du0", 724, R8A7791_CLK_ZX),
+ DEF_MOD("lvds0", 726, R8A7791_CLK_ZX),
+ DEF_MOD("ipmmu-sgx", 800, R8A7791_CLK_ZX),
+ DEF_MOD("mlb", 802, R8A7791_CLK_HP),
+ DEF_MOD("vin2", 809, R8A7791_CLK_ZG),
+ DEF_MOD("vin1", 810, R8A7791_CLK_ZG),
+ DEF_MOD("vin0", 811, R8A7791_CLK_ZG),
+ DEF_MOD("etheravb", 812, R8A7791_CLK_HP),
+ DEF_MOD("ether", 813, R8A7791_CLK_P),
+ DEF_MOD("sata1", 814, R8A7791_CLK_ZS),
+ DEF_MOD("sata0", 815, R8A7791_CLK_ZS),
+ DEF_MOD("gyro-adc", 901, R8A7791_CLK_P),
+ DEF_MOD("gpio7", 904, R8A7791_CLK_CP),
+ DEF_MOD("gpio6", 905, R8A7791_CLK_CP),
+ DEF_MOD("gpio5", 907, R8A7791_CLK_CP),
+ DEF_MOD("gpio4", 908, R8A7791_CLK_CP),
+ DEF_MOD("gpio3", 909, R8A7791_CLK_CP),
+ DEF_MOD("gpio2", 910, R8A7791_CLK_CP),
+ DEF_MOD("gpio1", 911, R8A7791_CLK_CP),
+ DEF_MOD("gpio0", 912, R8A7791_CLK_CP),
+ DEF_MOD("can1", 915, R8A7791_CLK_P),
+ DEF_MOD("can0", 916, R8A7791_CLK_P),
+ DEF_MOD("qspi_mod", 917, R8A7791_CLK_QSPI),
+ DEF_MOD("i2c5", 925, R8A7791_CLK_HP),
+ DEF_MOD("iicdvfs", 926, R8A7791_CLK_CP),
+ DEF_MOD("i2c4", 927, R8A7791_CLK_HP),
+ DEF_MOD("i2c3", 928, R8A7791_CLK_HP),
+ DEF_MOD("i2c2", 929, R8A7791_CLK_HP),
+ DEF_MOD("i2c1", 930, R8A7791_CLK_HP),
+ DEF_MOD("i2c0", 931, R8A7791_CLK_HP),
+ DEF_MOD("ssi-all", 1005, R8A7791_CLK_P),
+ DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)),
+ DEF_MOD("scu-all", 1017, R8A7791_CLK_P),
+ DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src9", 1022, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src8", 1023, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src7", 1024, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src0", 1031, MOD_CLK_ID(1017)),
+ DEF_MOD("scifa3", 1106, R8A7791_CLK_MP),
+ DEF_MOD("scifa4", 1107, R8A7791_CLK_MP),
+ DEF_MOD("scifa5", 1108, R8A7791_CLK_MP),
+};
+
+static const unsigned int r8a7791_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(408), /* INTC-SYS (GIC) */
+};
+
+/*
+ * CPG Clock Data
+ */
+
+/*
+ * MD EXTAL PLL0 PLL1 PLL3
+ * 14 13 19 (MHz) *1 *1
+ *---------------------------------------------------
+ * 0 0 0 15 x172/2 x208/2 x106
+ * 0 0 1 15 x172/2 x208/2 x88
+ * 0 1 0 20 x130/2 x156/2 x80
+ * 0 1 1 20 x130/2 x156/2 x66
+ * 1 0 0 26 / 2 x200/2 x240/2 x122
+ * 1 0 1 26 / 2 x200/2 x240/2 x102
+ * 1 1 0 30 / 2 x172/2 x208/2 x106
+ * 1 1 1 30 / 2 x172/2 x208/2 x88
+ *
+ * *1 : Table 7.5a indicates VCO output (PLLx = VCO/2)
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 12) | \
+ (((md) & BIT(13)) >> 12) | \
+ (((md) & BIT(19)) >> 19))
+static const struct rcar_gen2_cpg_pll_config cpg_pll_configs[8] __initconst = {
+ { 1, 208, 106 }, { 1, 208, 88 }, { 1, 156, 80 }, { 1, 156, 66 },
+ { 2, 240, 122 }, { 2, 240, 102 }, { 2, 208, 106 }, { 2, 208, 88 },
+};
+
+static int __init r8a7791_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen2_cpg_pll_config *cpg_pll_config;
+ struct device_node *np = dev->of_node;
+ unsigned int i;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+
+ if (of_device_is_compatible(np, "renesas,r8a7793-cpg-mssr")) {
+ /* R-Car M2-N uses a 1/5 divider for ZG */
+ for (i = 0; i < ARRAY_SIZE(r8a7791_core_clks); i++)
+ if (r8a7791_core_clks[i].id == R8A7791_CLK_ZG) {
+ r8a7791_core_clks[i].div = 5;
+ break;
+ }
+ }
+ return rcar_gen2_cpg_init(cpg_pll_config, 2, cpg_mode);
+}
+
+const struct cpg_mssr_info r8a7791_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a7791_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a7791_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a7791_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a7791_mod_clks),
+ .num_hw_mod_clks = 12 * 32,
+
+ /* Critical Module Clocks */
+ .crit_mod_clks = r8a7791_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r8a7791_crit_mod_clks),
+
+ /* Callbacks */
+ .init = r8a7791_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen2_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/r8a7792-cpg-mssr.c b/drivers/clk/renesas/r8a7792-cpg-mssr.c
new file mode 100644
index 000000000000..a832b9b6f7b0
--- /dev/null
+++ b/drivers/clk/renesas/r8a7792-cpg-mssr.c
@@ -0,0 +1,221 @@
+/*
+ * r8a7792 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2017 Glider bvba
+ *
+ * Based on clk-rcar-gen2.c
+ *
+ * Copyright (C) 2013 Ideas On Board SPRL
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/r8a7792-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen2-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A7792_CLK_OSC,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL0,
+ CLK_PLL1,
+ CLK_PLL3,
+ CLK_PLL1_DIV2,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a7792_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN2_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN2_PLL0, CLK_MAIN),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN2_PLL1, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN2_PLL3, CLK_MAIN),
+
+ DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
+
+ /* Core Clock Outputs */
+ DEF_BASE("lb", R8A7792_CLK_LB, CLK_TYPE_GEN2_LB, CLK_PLL1),
+ DEF_BASE("qspi", R8A7792_CLK_QSPI, CLK_TYPE_GEN2_QSPI, CLK_PLL1_DIV2),
+
+ DEF_FIXED("z", R8A7792_CLK_Z, CLK_PLL0, 1, 1),
+ DEF_FIXED("zg", R8A7792_CLK_ZG, CLK_PLL1, 5, 1),
+ DEF_FIXED("zx", R8A7792_CLK_ZX, CLK_PLL1, 3, 1),
+ DEF_FIXED("zs", R8A7792_CLK_ZS, CLK_PLL1, 6, 1),
+ DEF_FIXED("hp", R8A7792_CLK_HP, CLK_PLL1, 12, 1),
+ DEF_FIXED("i", R8A7792_CLK_I, CLK_PLL1, 3, 1),
+ DEF_FIXED("b", R8A7792_CLK_B, CLK_PLL1, 12, 1),
+ DEF_FIXED("p", R8A7792_CLK_P, CLK_PLL1, 24, 1),
+ DEF_FIXED("cl", R8A7792_CLK_CL, CLK_PLL1, 48, 1),
+ DEF_FIXED("m2", R8A7792_CLK_M2, CLK_PLL1, 8, 1),
+ DEF_FIXED("imp", R8A7792_CLK_IMP, CLK_PLL1, 4, 1),
+ DEF_FIXED("zb3", R8A7792_CLK_ZB3, CLK_PLL3, 4, 1),
+ DEF_FIXED("zb3d2", R8A7792_CLK_ZB3D2, CLK_PLL3, 8, 1),
+ DEF_FIXED("ddr", R8A7792_CLK_DDR, CLK_PLL3, 8, 1),
+ DEF_FIXED("sd", R8A7792_CLK_SD, CLK_PLL1_DIV2, 8, 1),
+ DEF_FIXED("mp", R8A7792_CLK_MP, CLK_PLL1_DIV2, 15, 1),
+ DEF_FIXED("cp", R8A7792_CLK_CP, CLK_PLL1, 48, 1),
+ DEF_FIXED("cpex", R8A7792_CLK_CPEX, CLK_EXTAL, 2, 1),
+ DEF_FIXED("rcan", R8A7792_CLK_RCAN, CLK_PLL1_DIV2, 49, 1),
+ DEF_FIXED("r", R8A7792_CLK_R, CLK_PLL1, 49152, 1),
+ DEF_FIXED("osc", R8A7792_CLK_OSC, CLK_PLL1, 12288, 1),
+};
+
+static const struct mssr_mod_clk r8a7792_mod_clks[] __initconst = {
+ DEF_MOD("msiof0", 0, R8A7792_CLK_MP),
+ DEF_MOD("jpu", 106, R8A7792_CLK_M2),
+ DEF_MOD("tmu1", 111, R8A7792_CLK_P),
+ DEF_MOD("3dg", 112, R8A7792_CLK_ZG),
+ DEF_MOD("2d-dmac", 115, R8A7792_CLK_ZS),
+ DEF_MOD("tmu3", 121, R8A7792_CLK_P),
+ DEF_MOD("tmu2", 122, R8A7792_CLK_P),
+ DEF_MOD("cmt0", 124, R8A7792_CLK_R),
+ DEF_MOD("tmu0", 125, R8A7792_CLK_CP),
+ DEF_MOD("vsp1du1", 127, R8A7792_CLK_ZS),
+ DEF_MOD("vsp1du0", 128, R8A7792_CLK_ZS),
+ DEF_MOD("vsp1-sy", 131, R8A7792_CLK_ZS),
+ DEF_MOD("msiof1", 208, R8A7792_CLK_MP),
+ DEF_MOD("sys-dmac1", 218, R8A7792_CLK_ZS),
+ DEF_MOD("sys-dmac0", 219, R8A7792_CLK_ZS),
+ DEF_MOD("tpu0", 304, R8A7792_CLK_CP),
+ DEF_MOD("sdhi0", 314, R8A7792_CLK_SD),
+ DEF_MOD("cmt1", 329, R8A7792_CLK_R),
+ DEF_MOD("irqc", 407, R8A7792_CLK_CP),
+ DEF_MOD("intc-sys", 408, R8A7792_CLK_ZS),
+ DEF_MOD("audio-dmac0", 502, R8A7792_CLK_HP),
+ DEF_MOD("thermal", 522, CLK_EXTAL),
+ DEF_MOD("pwm", 523, R8A7792_CLK_P),
+ DEF_MOD("hscif1", 716, R8A7792_CLK_ZS),
+ DEF_MOD("hscif0", 717, R8A7792_CLK_ZS),
+ DEF_MOD("scif3", 718, R8A7792_CLK_P),
+ DEF_MOD("scif2", 719, R8A7792_CLK_P),
+ DEF_MOD("scif1", 720, R8A7792_CLK_P),
+ DEF_MOD("scif0", 721, R8A7792_CLK_P),
+ DEF_MOD("du1", 723, R8A7792_CLK_ZX),
+ DEF_MOD("du0", 724, R8A7792_CLK_ZX),
+ DEF_MOD("vin5", 804, R8A7792_CLK_ZG),
+ DEF_MOD("vin4", 805, R8A7792_CLK_ZG),
+ DEF_MOD("vin3", 808, R8A7792_CLK_ZG),
+ DEF_MOD("vin2", 809, R8A7792_CLK_ZG),
+ DEF_MOD("vin1", 810, R8A7792_CLK_ZG),
+ DEF_MOD("vin0", 811, R8A7792_CLK_ZG),
+ DEF_MOD("etheravb", 812, R8A7792_CLK_HP),
+ DEF_MOD("gyro-adc", 901, R8A7792_CLK_P),
+ DEF_MOD("gpio7", 904, R8A7792_CLK_CP),
+ DEF_MOD("gpio6", 905, R8A7792_CLK_CP),
+ DEF_MOD("gpio5", 907, R8A7792_CLK_CP),
+ DEF_MOD("gpio4", 908, R8A7792_CLK_CP),
+ DEF_MOD("gpio3", 909, R8A7792_CLK_CP),
+ DEF_MOD("gpio2", 910, R8A7792_CLK_CP),
+ DEF_MOD("gpio1", 911, R8A7792_CLK_CP),
+ DEF_MOD("gpio0", 912, R8A7792_CLK_CP),
+ DEF_MOD("gpio11", 913, R8A7792_CLK_CP),
+ DEF_MOD("gpio10", 914, R8A7792_CLK_CP),
+ DEF_MOD("can1", 915, R8A7792_CLK_P),
+ DEF_MOD("can0", 916, R8A7792_CLK_P),
+ DEF_MOD("qspi_mod", 917, R8A7792_CLK_QSPI),
+ DEF_MOD("gpio9", 919, R8A7792_CLK_CP),
+ DEF_MOD("gpio8", 921, R8A7792_CLK_CP),
+ DEF_MOD("i2c5", 925, R8A7792_CLK_HP),
+ DEF_MOD("iicdvfs", 926, R8A7792_CLK_CP),
+ DEF_MOD("i2c4", 927, R8A7792_CLK_HP),
+ DEF_MOD("i2c3", 928, R8A7792_CLK_HP),
+ DEF_MOD("i2c2", 929, R8A7792_CLK_HP),
+ DEF_MOD("i2c1", 930, R8A7792_CLK_HP),
+ DEF_MOD("i2c0", 931, R8A7792_CLK_HP),
+ DEF_MOD("ssi-all", 1005, R8A7792_CLK_P),
+ DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)),
+};
+
+static const unsigned int r8a7792_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(408), /* INTC-SYS (GIC) */
+};
+
+/*
+ * CPG Clock Data
+ */
+
+/*
+ * MD EXTAL PLL0 PLL1 PLL3
+ * 14 13 19 (MHz) *1 *2
+ *---------------------------------------------------
+ * 0 0 0 15 x200/3 x208/2 x106
+ * 0 0 1 15 x200/3 x208/2 x88
+ * 0 1 0 20 x150/3 x156/2 x80
+ * 0 1 1 20 x150/3 x156/2 x66
+ * 1 0 0 26 / 2 x230/3 x240/2 x122
+ * 1 0 1 26 / 2 x230/3 x240/2 x102
+ * 1 1 0 30 / 2 x200/3 x208/2 x106
+ * 1 1 1 30 / 2 x200/3 x208/2 x88
+ *
+ * *1 : Table 7.5b indicates VCO output (PLL0 = VCO/3)
+ * *2 : Table 7.5b indicates VCO output (PLL1 = VCO/2)
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 12) | \
+ (((md) & BIT(13)) >> 12) | \
+ (((md) & BIT(19)) >> 19))
+static const struct rcar_gen2_cpg_pll_config cpg_pll_configs[8] __initconst = {
+ { 1, 208, 106, 200 },
+ { 1, 208, 88, 200 },
+ { 1, 156, 80, 150 },
+ { 1, 156, 66, 150 },
+ { 2, 240, 122, 230 },
+ { 2, 240, 102, 230 },
+ { 2, 208, 106, 200 },
+ { 2, 208, 88, 200 },
+};
+
+static int __init r8a7792_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen2_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+
+ return rcar_gen2_cpg_init(cpg_pll_config, 3, cpg_mode);
+}
+
+const struct cpg_mssr_info r8a7792_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a7792_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a7792_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a7792_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a7792_mod_clks),
+ .num_hw_mod_clks = 12 * 32,
+
+ /* Critical Module Clocks */
+ .crit_mod_clks = r8a7792_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r8a7792_crit_mod_clks),
+
+ /* Callbacks */
+ .init = r8a7792_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen2_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/r8a7794-cpg-mssr.c b/drivers/clk/renesas/r8a7794-cpg-mssr.c
new file mode 100644
index 000000000000..ec091a42da54
--- /dev/null
+++ b/drivers/clk/renesas/r8a7794-cpg-mssr.c
@@ -0,0 +1,255 @@
+/*
+ * r8a7794 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2017 Glider bvba
+ *
+ * Based on clk-rcar-gen2.c
+ *
+ * Copyright (C) 2013 Ideas On Board SPRL
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/r8a7794-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen2-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A7794_CLK_OSC,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+ CLK_USB_EXTAL,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL0,
+ CLK_PLL1,
+ CLK_PLL3,
+ CLK_PLL1_DIV2,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a7794_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+ DEF_INPUT("usb_extal", CLK_USB_EXTAL),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN2_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN2_PLL0, CLK_MAIN),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN2_PLL1, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN2_PLL3, CLK_MAIN),
+
+ DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
+
+ /* Core Clock Outputs */
+ DEF_BASE("lb", R8A7794_CLK_LB, CLK_TYPE_GEN2_LB, CLK_PLL1),
+ DEF_BASE("adsp", R8A7794_CLK_ADSP, CLK_TYPE_GEN2_ADSP, CLK_PLL1),
+ DEF_BASE("sdh", R8A7794_CLK_SDH, CLK_TYPE_GEN2_SDH, CLK_PLL1),
+ DEF_BASE("sd0", R8A7794_CLK_SD0, CLK_TYPE_GEN2_SD0, CLK_PLL1),
+ DEF_BASE("qspi", R8A7794_CLK_QSPI, CLK_TYPE_GEN2_QSPI, CLK_PLL1_DIV2),
+ DEF_BASE("rcan", R8A7794_CLK_RCAN, CLK_TYPE_GEN2_RCAN, CLK_USB_EXTAL),
+
+ DEF_FIXED("z2", R8A7794_CLK_Z2, CLK_PLL0, 1, 1),
+ DEF_FIXED("zg", R8A7794_CLK_ZG, CLK_PLL1, 6, 1),
+ DEF_FIXED("zx", R8A7794_CLK_ZX, CLK_PLL1, 3, 1),
+ DEF_FIXED("zs", R8A7794_CLK_ZS, CLK_PLL1, 6, 1),
+ DEF_FIXED("hp", R8A7794_CLK_HP, CLK_PLL1, 12, 1),
+ DEF_FIXED("i", R8A7794_CLK_I, CLK_PLL1, 2, 1),
+ DEF_FIXED("b", R8A7794_CLK_B, CLK_PLL1, 12, 1),
+ DEF_FIXED("p", R8A7794_CLK_P, CLK_PLL1, 24, 1),
+ DEF_FIXED("cl", R8A7794_CLK_CL, CLK_PLL1, 48, 1),
+ DEF_FIXED("cp", R8A7794_CLK_CP, CLK_PLL1, 48, 1),
+ DEF_FIXED("m2", R8A7794_CLK_M2, CLK_PLL1, 8, 1),
+ DEF_FIXED("zb3", R8A7794_CLK_ZB3, CLK_PLL3, 4, 1),
+ DEF_FIXED("zb3d2", R8A7794_CLK_ZB3D2, CLK_PLL3, 8, 1),
+ DEF_FIXED("ddr", R8A7794_CLK_DDR, CLK_PLL3, 8, 1),
+ DEF_FIXED("mp", R8A7794_CLK_MP, CLK_PLL1_DIV2, 15, 1),
+ DEF_FIXED("cpex", R8A7794_CLK_CPEX, CLK_EXTAL, 2, 1),
+ DEF_FIXED("r", R8A7794_CLK_R, CLK_PLL1, 49152, 1),
+ DEF_FIXED("osc", R8A7794_CLK_OSC, CLK_PLL1, 12288, 1),
+
+ DEF_DIV6P1("sd2", R8A7794_CLK_SD2, CLK_PLL1_DIV2, 0x078),
+ DEF_DIV6P1("sd3", R8A7794_CLK_SD3, CLK_PLL1_DIV2, 0x26c),
+ DEF_DIV6P1("mmc0", R8A7794_CLK_MMC0, CLK_PLL1_DIV2, 0x240),
+};
+
+static const struct mssr_mod_clk r8a7794_mod_clks[] __initconst = {
+ DEF_MOD("msiof0", 0, R8A7794_CLK_MP),
+ DEF_MOD("vcp0", 101, R8A7794_CLK_ZS),
+ DEF_MOD("vpc0", 103, R8A7794_CLK_ZS),
+ DEF_MOD("jpu", 106, R8A7794_CLK_M2),
+ DEF_MOD("tmu1", 111, R8A7794_CLK_P),
+ DEF_MOD("3dg", 112, R8A7794_CLK_ZG),
+ DEF_MOD("2d-dmac", 115, R8A7794_CLK_ZS),
+ DEF_MOD("fdp1-0", 119, R8A7794_CLK_ZS),
+ DEF_MOD("tmu3", 121, R8A7794_CLK_P),
+ DEF_MOD("tmu2", 122, R8A7794_CLK_P),
+ DEF_MOD("cmt0", 124, R8A7794_CLK_R),
+ DEF_MOD("tmu0", 125, R8A7794_CLK_CP),
+ DEF_MOD("vsp1du0", 128, R8A7794_CLK_ZS),
+ DEF_MOD("vsp1-sy", 131, R8A7794_CLK_ZS),
+ DEF_MOD("scifa2", 202, R8A7794_CLK_MP),
+ DEF_MOD("scifa1", 203, R8A7794_CLK_MP),
+ DEF_MOD("scifa0", 204, R8A7794_CLK_MP),
+ DEF_MOD("msiof2", 205, R8A7794_CLK_MP),
+ DEF_MOD("scifb0", 206, R8A7794_CLK_MP),
+ DEF_MOD("scifb1", 207, R8A7794_CLK_MP),
+ DEF_MOD("msiof1", 208, R8A7794_CLK_MP),
+ DEF_MOD("scifb2", 216, R8A7794_CLK_MP),
+ DEF_MOD("sys-dmac1", 218, R8A7794_CLK_ZS),
+ DEF_MOD("sys-dmac0", 219, R8A7794_CLK_ZS),
+ DEF_MOD("tpu0", 304, R8A7794_CLK_CP),
+ DEF_MOD("sdhi3", 311, R8A7794_CLK_SD3),
+ DEF_MOD("sdhi2", 312, R8A7794_CLK_SD2),
+ DEF_MOD("sdhi0", 314, R8A7794_CLK_SD0),
+ DEF_MOD("mmcif0", 315, R8A7794_CLK_MMC0),
+ DEF_MOD("iic0", 318, R8A7794_CLK_HP),
+ DEF_MOD("iic1", 323, R8A7794_CLK_HP),
+ DEF_MOD("cmt1", 329, R8A7794_CLK_R),
+ DEF_MOD("usbhs-dmac0", 330, R8A7794_CLK_HP),
+ DEF_MOD("usbhs-dmac1", 331, R8A7794_CLK_HP),
+ DEF_MOD("irqc", 407, R8A7794_CLK_CP),
+ DEF_MOD("intc-sys", 408, R8A7794_CLK_ZS),
+ DEF_MOD("audio-dmac0", 502, R8A7794_CLK_HP),
+ DEF_MOD("adsp_mod", 506, R8A7794_CLK_ADSP),
+ DEF_MOD("pwm", 523, R8A7794_CLK_P),
+ DEF_MOD("usb-ehci", 703, R8A7794_CLK_MP),
+ DEF_MOD("usbhs", 704, R8A7794_CLK_HP),
+ DEF_MOD("hscif2", 713, R8A7794_CLK_ZS),
+ DEF_MOD("scif5", 714, R8A7794_CLK_P),
+ DEF_MOD("scif4", 715, R8A7794_CLK_P),
+ DEF_MOD("hscif1", 716, R8A7794_CLK_ZS),
+ DEF_MOD("hscif0", 717, R8A7794_CLK_ZS),
+ DEF_MOD("scif3", 718, R8A7794_CLK_P),
+ DEF_MOD("scif2", 719, R8A7794_CLK_P),
+ DEF_MOD("scif1", 720, R8A7794_CLK_P),
+ DEF_MOD("scif0", 721, R8A7794_CLK_P),
+ DEF_MOD("du1", 723, R8A7794_CLK_ZX),
+ DEF_MOD("du0", 724, R8A7794_CLK_ZX),
+ DEF_MOD("ipmmu-sgx", 800, R8A7794_CLK_ZX),
+ DEF_MOD("mlb", 802, R8A7794_CLK_HP),
+ DEF_MOD("vin1", 810, R8A7794_CLK_ZG),
+ DEF_MOD("vin0", 811, R8A7794_CLK_ZG),
+ DEF_MOD("etheravb", 812, R8A7794_CLK_HP),
+ DEF_MOD("ether", 813, R8A7794_CLK_P),
+ DEF_MOD("gyro-adc", 901, R8A7794_CLK_P),
+ DEF_MOD("gpio6", 905, R8A7794_CLK_CP),
+ DEF_MOD("gpio5", 907, R8A7794_CLK_CP),
+ DEF_MOD("gpio4", 908, R8A7794_CLK_CP),
+ DEF_MOD("gpio3", 909, R8A7794_CLK_CP),
+ DEF_MOD("gpio2", 910, R8A7794_CLK_CP),
+ DEF_MOD("gpio1", 911, R8A7794_CLK_CP),
+ DEF_MOD("gpio0", 912, R8A7794_CLK_CP),
+ DEF_MOD("can1", 915, R8A7794_CLK_P),
+ DEF_MOD("can0", 916, R8A7794_CLK_P),
+ DEF_MOD("qspi_mod", 917, R8A7794_CLK_QSPI),
+ DEF_MOD("i2c5", 925, R8A7794_CLK_HP),
+ DEF_MOD("i2c4", 927, R8A7794_CLK_HP),
+ DEF_MOD("i2c3", 928, R8A7794_CLK_HP),
+ DEF_MOD("i2c2", 929, R8A7794_CLK_HP),
+ DEF_MOD("i2c1", 930, R8A7794_CLK_HP),
+ DEF_MOD("i2c0", 931, R8A7794_CLK_HP),
+ DEF_MOD("ssi-all", 1005, R8A7794_CLK_P),
+ DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)),
+ DEF_MOD("scu-all", 1017, R8A7794_CLK_P),
+ DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)),
+ DEF_MOD("scifa3", 1106, R8A7794_CLK_MP),
+ DEF_MOD("scifa4", 1107, R8A7794_CLK_MP),
+ DEF_MOD("scifa5", 1108, R8A7794_CLK_MP),
+};
+
+static const unsigned int r8a7794_crit_mod_clks[] __initconst = {
+ MOD_CLK_ID(408), /* INTC-SYS (GIC) */
+};
+
+/*
+ * CPG Clock Data
+ */
+
+/*
+ * MD EXTAL PLL0 PLL1 PLL3
+ * 14 13 19 (MHz) *1 *2
+ *---------------------------------------------------
+ * 0 0 1 15 x200/3 x208/2 x88
+ * 0 1 1 20 x150/3 x156/2 x66
+ * 1 0 1 26 / 2 x230/3 x240/2 x102
+ * 1 1 1 30 / 2 x200/3 x208/2 x88
+ *
+ * *1 : Table 7.5c indicates VCO output (PLL0 = VCO/3)
+ * *2 : Table 7.5c indicates VCO output (PLL1 = VCO/2)
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \
+ (((md) & BIT(13)) >> 13))
+static const struct rcar_gen2_cpg_pll_config cpg_pll_configs[4] __initconst = {
+ { 1, 208, 88, 200 },
+ { 1, 156, 66, 150 },
+ { 2, 240, 102, 230 },
+ { 2, 208, 88, 200 },
+};
+
+static int __init r8a7794_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen2_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+
+ return rcar_gen2_cpg_init(cpg_pll_config, 3, cpg_mode);
+}
+
+const struct cpg_mssr_info r8a7794_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a7794_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a7794_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a7794_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a7794_mod_clks),
+ .num_hw_mod_clks = 12 * 32,
+
+ /* Critical Module Clocks */
+ .crit_mod_clks = r8a7794_crit_mod_clks,
+ .num_crit_mod_clks = ARRAY_SIZE(r8a7794_crit_mod_clks),
+
+ /* Callbacks */
+ .init = r8a7794_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen2_cpg_clk_register,
+};
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index eaa98b488f01..c091a8e024b8 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -141,8 +141,10 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
DEF_MOD("sdif0", 314, R8A7795_CLK_SD0),
DEF_MOD("pcie1", 318, R8A7795_CLK_S3D1),
DEF_MOD("pcie0", 319, R8A7795_CLK_S3D1),
+ DEF_MOD("usb-dmac30", 326, R8A7795_CLK_S3D1),
DEF_MOD("usb3-if1", 327, R8A7795_CLK_S3D1), /* ES1.x */
DEF_MOD("usb3-if0", 328, R8A7795_CLK_S3D1),
+ DEF_MOD("usb-dmac31", 329, R8A7795_CLK_S3D1),
DEF_MOD("usb-dmac0", 330, R8A7795_CLK_S3D1),
DEF_MOD("usb-dmac1", 331, R8A7795_CLK_S3D1),
DEF_MOD("rwdt", 402, R8A7795_CLK_R),
@@ -164,7 +166,7 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
DEF_MOD("hscif1", 519, R8A7795_CLK_S3D1),
DEF_MOD("hscif0", 520, R8A7795_CLK_S3D1),
DEF_MOD("thermal", 522, R8A7795_CLK_CP),
- DEF_MOD("pwm", 523, R8A7795_CLK_S3D4),
+ DEF_MOD("pwm", 523, R8A7795_CLK_S0D12),
DEF_MOD("fcpvd3", 600, R8A7795_CLK_S2D1), /* ES1.x */
DEF_MOD("fcpvd2", 601, R8A7795_CLK_S0D2),
DEF_MOD("fcpvd1", 602, R8A7795_CLK_S0D2),
@@ -189,10 +191,12 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
DEF_MOD("vspi2", 629, R8A7795_CLK_S2D1), /* ES1.x */
DEF_MOD("vspi1", 630, R8A7795_CLK_S0D1),
DEF_MOD("vspi0", 631, R8A7795_CLK_S0D1),
+ DEF_MOD("ehci3", 700, R8A7795_CLK_S3D4),
DEF_MOD("ehci2", 701, R8A7795_CLK_S3D4),
DEF_MOD("ehci1", 702, R8A7795_CLK_S3D4),
DEF_MOD("ehci0", 703, R8A7795_CLK_S3D4),
DEF_MOD("hsusb", 704, R8A7795_CLK_S3D4),
+ DEF_MOD("hsusb3", 705, R8A7795_CLK_S3D4),
DEF_MOD("csi21", 713, R8A7795_CLK_CSI0), /* ES1.x */
DEF_MOD("csi20", 714, R8A7795_CLK_CSI0),
DEF_MOD("csi41", 715, R8A7795_CLK_CSI0),
@@ -218,22 +222,22 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
DEF_MOD("imr2", 821, R8A7795_CLK_S0D2),
DEF_MOD("imr1", 822, R8A7795_CLK_S0D2),
DEF_MOD("imr0", 823, R8A7795_CLK_S0D2),
- DEF_MOD("gpio7", 905, R8A7795_CLK_CP),
- DEF_MOD("gpio6", 906, R8A7795_CLK_CP),
- DEF_MOD("gpio5", 907, R8A7795_CLK_CP),
- DEF_MOD("gpio4", 908, R8A7795_CLK_CP),
- DEF_MOD("gpio3", 909, R8A7795_CLK_CP),
- DEF_MOD("gpio2", 910, R8A7795_CLK_CP),
- DEF_MOD("gpio1", 911, R8A7795_CLK_CP),
- DEF_MOD("gpio0", 912, R8A7795_CLK_CP),
+ DEF_MOD("gpio7", 905, R8A7795_CLK_S3D4),
+ DEF_MOD("gpio6", 906, R8A7795_CLK_S3D4),
+ DEF_MOD("gpio5", 907, R8A7795_CLK_S3D4),
+ DEF_MOD("gpio4", 908, R8A7795_CLK_S3D4),
+ DEF_MOD("gpio3", 909, R8A7795_CLK_S3D4),
+ DEF_MOD("gpio2", 910, R8A7795_CLK_S3D4),
+ DEF_MOD("gpio1", 911, R8A7795_CLK_S3D4),
+ DEF_MOD("gpio0", 912, R8A7795_CLK_S3D4),
DEF_MOD("can-fd", 914, R8A7795_CLK_S3D2),
DEF_MOD("can-if1", 915, R8A7795_CLK_S3D4),
DEF_MOD("can-if0", 916, R8A7795_CLK_S3D4),
- DEF_MOD("i2c6", 918, R8A7795_CLK_S3D2),
- DEF_MOD("i2c5", 919, R8A7795_CLK_S3D2),
+ DEF_MOD("i2c6", 918, R8A7795_CLK_S0D6),
+ DEF_MOD("i2c5", 919, R8A7795_CLK_S0D6),
DEF_MOD("i2c-dvfs", 926, R8A7795_CLK_CP),
- DEF_MOD("i2c4", 927, R8A7795_CLK_S3D2),
- DEF_MOD("i2c3", 928, R8A7795_CLK_S3D2),
+ DEF_MOD("i2c4", 927, R8A7795_CLK_S0D6),
+ DEF_MOD("i2c3", 928, R8A7795_CLK_S0D6),
DEF_MOD("i2c2", 929, R8A7795_CLK_S3D2),
DEF_MOD("i2c1", 930, R8A7795_CLK_S3D2),
DEF_MOD("i2c0", 931, R8A7795_CLK_S3D2),
@@ -346,6 +350,7 @@ static const struct mssr_mod_reparent r8a7795es1_mod_reparent[] __initconst = {
{ MOD_CLK_ID(219), R8A7795_CLK_S3D1 }, /* SYS-DMAC0 */
{ MOD_CLK_ID(501), R8A7795_CLK_S3D1 }, /* AUDMAC1 */
{ MOD_CLK_ID(502), R8A7795_CLK_S3D1 }, /* AUDMAC0 */
+ { MOD_CLK_ID(523), R8A7795_CLK_S3D4 }, /* PWM */
{ MOD_CLK_ID(601), R8A7795_CLK_S2D1 }, /* FCPVD2 */
{ MOD_CLK_ID(602), R8A7795_CLK_S2D1 }, /* FCPVD1 */
{ MOD_CLK_ID(603), R8A7795_CLK_S2D1 }, /* FCPVD0 */
@@ -376,6 +381,18 @@ static const struct mssr_mod_reparent r8a7795es1_mod_reparent[] __initconst = {
{ MOD_CLK_ID(821), R8A7795_CLK_S2D1 }, /* IMR2 */
{ MOD_CLK_ID(822), R8A7795_CLK_S2D1 }, /* IMR1 */
{ MOD_CLK_ID(823), R8A7795_CLK_S2D1 }, /* IMR0 */
+ { MOD_CLK_ID(905), R8A7795_CLK_CP }, /* GPIO7 */
+ { MOD_CLK_ID(906), R8A7795_CLK_CP }, /* GPIO6 */
+ { MOD_CLK_ID(907), R8A7795_CLK_CP }, /* GPIO5 */
+ { MOD_CLK_ID(908), R8A7795_CLK_CP }, /* GPIO4 */
+ { MOD_CLK_ID(909), R8A7795_CLK_CP }, /* GPIO3 */
+ { MOD_CLK_ID(910), R8A7795_CLK_CP }, /* GPIO2 */
+ { MOD_CLK_ID(911), R8A7795_CLK_CP }, /* GPIO1 */
+ { MOD_CLK_ID(912), R8A7795_CLK_CP }, /* GPIO0 */
+ { MOD_CLK_ID(918), R8A7795_CLK_S3D2 }, /* I2C6 */
+ { MOD_CLK_ID(919), R8A7795_CLK_S3D2 }, /* I2C5 */
+ { MOD_CLK_ID(927), R8A7795_CLK_S3D2 }, /* I2C4 */
+ { MOD_CLK_ID(928), R8A7795_CLK_S3D2 }, /* I2C3 */
};
diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
index 9d114b31b073..acc6d0f153e1 100644
--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
@@ -106,6 +106,7 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
DEF_DIV6P1("canfd", R8A7796_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
DEF_DIV6P1("csi0", R8A7796_CLK_CSI0, CLK_PLL1_DIV4, 0x00c),
DEF_DIV6P1("mso", R8A7796_CLK_MSO, CLK_PLL1_DIV4, 0x014),
+ DEF_DIV6P1("hdmi", R8A7796_CLK_HDMI, CLK_PLL1_DIV4, 0x250),
DEF_DIV6_RO("osc", R8A7796_CLK_OSC, CLK_EXTAL, CPG_RCKCR, 8),
DEF_DIV6_RO("r_int", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32),
@@ -135,8 +136,15 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
DEF_MOD("sdif2", 312, R8A7796_CLK_SD2),
DEF_MOD("sdif1", 313, R8A7796_CLK_SD1),
DEF_MOD("sdif0", 314, R8A7796_CLK_SD0),
+ DEF_MOD("pcie1", 318, R8A7796_CLK_S3D1),
+ DEF_MOD("pcie0", 319, R8A7796_CLK_S3D1),
+ DEF_MOD("usb-dmac0", 330, R8A7796_CLK_S3D1),
+ DEF_MOD("usb-dmac1", 331, R8A7796_CLK_S3D1),
DEF_MOD("rwdt", 402, R8A7796_CLK_R),
+ DEF_MOD("intc-ex", 407, R8A7796_CLK_CP),
DEF_MOD("intc-ap", 408, R8A7796_CLK_S3D1),
+ DEF_MOD("audmac1", 501, R8A7796_CLK_S0D3),
+ DEF_MOD("audmac0", 502, R8A7796_CLK_S0D3),
DEF_MOD("drif7", 508, R8A7796_CLK_S3D2),
DEF_MOD("drif6", 509, R8A7796_CLK_S3D2),
DEF_MOD("drif5", 510, R8A7796_CLK_S3D2),
@@ -151,6 +159,7 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
DEF_MOD("hscif1", 519, R8A7796_CLK_S3D1),
DEF_MOD("hscif0", 520, R8A7796_CLK_S3D1),
DEF_MOD("thermal", 522, R8A7796_CLK_CP),
+ DEF_MOD("pwm", 523, R8A7796_CLK_S0D12),
DEF_MOD("fcpvd2", 601, R8A7796_CLK_S0D2),
DEF_MOD("fcpvd1", 602, R8A7796_CLK_S0D2),
DEF_MOD("fcpvd0", 603, R8A7796_CLK_S0D2),
@@ -164,12 +173,16 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
DEF_MOD("vspd0", 623, R8A7796_CLK_S0D2),
DEF_MOD("vspb", 626, R8A7796_CLK_S0D1),
DEF_MOD("vspi0", 631, R8A7796_CLK_S0D1),
+ DEF_MOD("ehci1", 702, R8A7796_CLK_S3D4),
+ DEF_MOD("ehci0", 703, R8A7796_CLK_S3D4),
+ DEF_MOD("hsusb", 704, R8A7796_CLK_S3D4),
DEF_MOD("csi20", 714, R8A7796_CLK_CSI0),
DEF_MOD("csi40", 716, R8A7796_CLK_CSI0),
DEF_MOD("du2", 722, R8A7796_CLK_S2D1),
DEF_MOD("du1", 723, R8A7796_CLK_S2D1),
DEF_MOD("du0", 724, R8A7796_CLK_S2D1),
DEF_MOD("lvds", 727, R8A7796_CLK_S2D1),
+ DEF_MOD("hdmi0", 729, R8A7796_CLK_HDMI),
DEF_MOD("vin7", 804, R8A7796_CLK_S0D2),
DEF_MOD("vin6", 805, R8A7796_CLK_S0D2),
DEF_MOD("vin5", 806, R8A7796_CLK_S0D2),
@@ -200,6 +213,32 @@ static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
DEF_MOD("i2c2", 929, R8A7796_CLK_S3D2),
DEF_MOD("i2c1", 930, R8A7796_CLK_S3D2),
DEF_MOD("i2c0", 931, R8A7796_CLK_S3D2),
+ DEF_MOD("ssi-all", 1005, R8A7796_CLK_S3D4),
+ DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)),
+ DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)),
+ DEF_MOD("scu-all", 1017, R8A7796_CLK_S3D4),
+ DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src9", 1022, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src8", 1023, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src7", 1024, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)),
+ DEF_MOD("scu-src0", 1031, MOD_CLK_ID(1017)),
};
static const unsigned int r8a7796_crit_mod_clks[] __initconst = {
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 99eeec6f24ec..1f607c806f9b 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -257,7 +257,7 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
const struct cpg_mssr_info *info,
struct cpg_mssr_priv *priv)
{
- struct clk *clk = NULL, *parent;
+ struct clk *clk = ERR_PTR(-ENOTSUPP), *parent;
struct device *dev = priv->dev;
unsigned int id = core->id, div = core->div;
const char *parent_name;
@@ -477,7 +477,7 @@ fail_put:
void cpg_mssr_detach_dev(struct generic_pm_domain *unused, struct device *dev)
{
- if (!list_empty(&dev->power.subsys_data->clock_list))
+ if (!pm_clk_no_clocks(dev))
pm_clk_destroy(dev);
}
@@ -627,25 +627,54 @@ static inline int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv)
static const struct of_device_id cpg_mssr_match[] = {
-#ifdef CONFIG_ARCH_R8A7743
+#ifdef CONFIG_CLK_R8A7743
{
.compatible = "renesas,r8a7743-cpg-mssr",
.data = &r8a7743_cpg_mssr_info,
},
#endif
-#ifdef CONFIG_ARCH_R8A7745
+#ifdef CONFIG_CLK_R8A7745
{
.compatible = "renesas,r8a7745-cpg-mssr",
.data = &r8a7745_cpg_mssr_info,
},
#endif
-#ifdef CONFIG_ARCH_R8A7795
+#ifdef CONFIG_CLK_R8A7790
+ {
+ .compatible = "renesas,r8a7790-cpg-mssr",
+ .data = &r8a7790_cpg_mssr_info,
+ },
+#endif
+#ifdef CONFIG_CLK_R8A7791
+ {
+ .compatible = "renesas,r8a7791-cpg-mssr",
+ .data = &r8a7791_cpg_mssr_info,
+ },
+ /* R-Car M2-N is (almost) identical to R-Car M2-W w.r.t. clocks. */
+ {
+ .compatible = "renesas,r8a7793-cpg-mssr",
+ .data = &r8a7791_cpg_mssr_info,
+ },
+#endif
+#ifdef CONFIG_CLK_R8A7792
+ {
+ .compatible = "renesas,r8a7792-cpg-mssr",
+ .data = &r8a7792_cpg_mssr_info,
+ },
+#endif
+#ifdef CONFIG_CLK_R8A7794
+ {
+ .compatible = "renesas,r8a7794-cpg-mssr",
+ .data = &r8a7794_cpg_mssr_info,
+ },
+#endif
+#ifdef CONFIG_CLK_R8A7795
{
.compatible = "renesas,r8a7795-cpg-mssr",
.data = &r8a7795_cpg_mssr_info,
},
#endif
-#ifdef CONFIG_ARCH_R8A7796
+#ifdef CONFIG_CLK_R8A7796
{
.compatible = "renesas,r8a7796-cpg-mssr",
.data = &r8a7796_cpg_mssr_info,
@@ -670,7 +699,7 @@ static int __init cpg_mssr_probe(struct platform_device *pdev)
struct clk **clks;
int error;
- info = of_match_node(cpg_mssr_match, np)->data;
+ info = of_device_get_match_data(dev);
if (info->init) {
error = info->init(dev);
if (error)
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index 148f4f0aa2a4..43d7c7f6832d 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -132,6 +132,10 @@ struct cpg_mssr_info {
extern const struct cpg_mssr_info r8a7743_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7745_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a7790_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a7791_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a7792_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a7794_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7795_cpg_mssr_info;
extern const struct cpg_mssr_info r8a7796_cpg_mssr_info;
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index 26b220c988b2..6f19826cc447 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_RESET_CONTROLLER) += softrst.o
obj-y += clk-rv1108.o
obj-y += clk-rk3036.o
+obj-y += clk-rk3128.o
obj-y += clk-rk3188.o
obj-y += clk-rk3228.o
obj-y += clk-rk3288.o
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index 00d4150e33c3..c3001980dbdc 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -436,6 +436,7 @@ static const char *const rk3036_critical_clocks[] __initconst = {
"aclk_peri",
"hclk_peri",
"pclk_peri",
+ "pclk_ddrupctl",
};
static void __init rk3036_clk_init(struct device_node *np)
diff --git a/drivers/clk/rockchip/clk-rk3128.c b/drivers/clk/rockchip/clk-rk3128.c
new file mode 100644
index 000000000000..e243f2eae68f
--- /dev/null
+++ b/drivers/clk/rockchip/clk-rk3128.c
@@ -0,0 +1,612 @@
+/*
+ * Copyright (c) 2017 Rockchip Electronics Co. Ltd.
+ * Author: Elaine <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
+#include <dt-bindings/clock/rk3128-cru.h>
+#include "clk.h"
+
+#define RK3128_GRF_SOC_STATUS0 0x14c
+
+enum rk3128_plls {
+ apll, dpll, cpll, gpll,
+};
+
+static struct rockchip_pll_rate_table rk3128_pll_rates[] = {
+ /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */
+ RK3036_PLL_RATE(1608000000, 1, 67, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1584000000, 1, 66, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1560000000, 1, 65, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1536000000, 1, 64, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1512000000, 1, 63, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1488000000, 1, 62, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1464000000, 1, 61, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1440000000, 1, 60, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1416000000, 1, 59, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1392000000, 1, 58, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1368000000, 1, 57, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1344000000, 1, 56, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1320000000, 1, 55, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1296000000, 1, 54, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1272000000, 1, 53, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1248000000, 1, 52, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1200000000, 1, 50, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1188000000, 2, 99, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1104000000, 1, 46, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1100000000, 12, 550, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1008000000, 1, 84, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1000000000, 6, 500, 2, 1, 1, 0),
+ RK3036_PLL_RATE(984000000, 1, 82, 2, 1, 1, 0),
+ RK3036_PLL_RATE(960000000, 1, 80, 2, 1, 1, 0),
+ RK3036_PLL_RATE(936000000, 1, 78, 2, 1, 1, 0),
+ RK3036_PLL_RATE(912000000, 1, 76, 2, 1, 1, 0),
+ RK3036_PLL_RATE(900000000, 4, 300, 2, 1, 1, 0),
+ RK3036_PLL_RATE(888000000, 1, 74, 2, 1, 1, 0),
+ RK3036_PLL_RATE(864000000, 1, 72, 2, 1, 1, 0),
+ RK3036_PLL_RATE(840000000, 1, 70, 2, 1, 1, 0),
+ RK3036_PLL_RATE(816000000, 1, 68, 2, 1, 1, 0),
+ RK3036_PLL_RATE(800000000, 6, 400, 2, 1, 1, 0),
+ RK3036_PLL_RATE(700000000, 6, 350, 2, 1, 1, 0),
+ RK3036_PLL_RATE(696000000, 1, 58, 2, 1, 1, 0),
+ RK3036_PLL_RATE(600000000, 1, 75, 3, 1, 1, 0),
+ RK3036_PLL_RATE(594000000, 2, 99, 2, 1, 1, 0),
+ RK3036_PLL_RATE(504000000, 1, 63, 3, 1, 1, 0),
+ RK3036_PLL_RATE(500000000, 6, 250, 2, 1, 1, 0),
+ RK3036_PLL_RATE(408000000, 1, 68, 2, 2, 1, 0),
+ RK3036_PLL_RATE(312000000, 1, 52, 2, 2, 1, 0),
+ RK3036_PLL_RATE(216000000, 1, 72, 4, 2, 1, 0),
+ RK3036_PLL_RATE(96000000, 1, 64, 4, 4, 1, 0),
+ { /* sentinel */ },
+};
+
+#define RK3128_DIV_CPU_MASK 0x1f
+#define RK3128_DIV_CPU_SHIFT 8
+
+#define RK3128_DIV_PERI_MASK 0xf
+#define RK3128_DIV_PERI_SHIFT 0
+#define RK3128_DIV_ACLK_MASK 0x7
+#define RK3128_DIV_ACLK_SHIFT 4
+#define RK3128_DIV_HCLK_MASK 0x3
+#define RK3128_DIV_HCLK_SHIFT 8
+#define RK3128_DIV_PCLK_MASK 0x7
+#define RK3128_DIV_PCLK_SHIFT 12
+
+#define RK3128_CLKSEL1(_core_aclk_div, _pclk_dbg_div) \
+{ \
+ .reg = RK2928_CLKSEL_CON(1), \
+ .val = HIWORD_UPDATE(_pclk_dbg_div, RK3128_DIV_PERI_MASK, \
+ RK3128_DIV_PERI_SHIFT) | \
+ HIWORD_UPDATE(_core_aclk_div, RK3128_DIV_ACLK_MASK, \
+ RK3128_DIV_ACLK_SHIFT), \
+}
+
+#define RK3128_CPUCLK_RATE(_prate, _core_aclk_div, _pclk_dbg_div) \
+{ \
+ .prate = _prate, \
+ .divs = { \
+ RK3128_CLKSEL1(_core_aclk_div, _pclk_dbg_div), \
+ }, \
+}
+
+static struct rockchip_cpuclk_rate_table rk3128_cpuclk_rates[] __initdata = {
+ RK3128_CPUCLK_RATE(1800000000, 1, 7),
+ RK3128_CPUCLK_RATE(1704000000, 1, 7),
+ RK3128_CPUCLK_RATE(1608000000, 1, 7),
+ RK3128_CPUCLK_RATE(1512000000, 1, 7),
+ RK3128_CPUCLK_RATE(1488000000, 1, 5),
+ RK3128_CPUCLK_RATE(1416000000, 1, 5),
+ RK3128_CPUCLK_RATE(1392000000, 1, 5),
+ RK3128_CPUCLK_RATE(1296000000, 1, 5),
+ RK3128_CPUCLK_RATE(1200000000, 1, 5),
+ RK3128_CPUCLK_RATE(1104000000, 1, 5),
+ RK3128_CPUCLK_RATE(1008000000, 1, 5),
+ RK3128_CPUCLK_RATE(912000000, 1, 5),
+ RK3128_CPUCLK_RATE(816000000, 1, 3),
+ RK3128_CPUCLK_RATE(696000000, 1, 3),
+ RK3128_CPUCLK_RATE(600000000, 1, 3),
+ RK3128_CPUCLK_RATE(408000000, 1, 1),
+ RK3128_CPUCLK_RATE(312000000, 1, 1),
+ RK3128_CPUCLK_RATE(216000000, 1, 1),
+ RK3128_CPUCLK_RATE(96000000, 1, 1),
+};
+
+static const struct rockchip_cpuclk_reg_data rk3128_cpuclk_data = {
+ .core_reg = RK2928_CLKSEL_CON(0),
+ .div_core_shift = 0,
+ .div_core_mask = 0x1f,
+ .mux_core_alt = 1,
+ .mux_core_main = 0,
+ .mux_core_shift = 7,
+ .mux_core_mask = 0x1,
+};
+
+PNAME(mux_pll_p) = { "clk_24m", "xin24m" };
+
+PNAME(mux_ddrphy_p) = { "dpll_ddr", "gpll_div2_ddr" };
+PNAME(mux_armclk_p) = { "apll_core", "gpll_div2_core" };
+PNAME(mux_usb480m_p) = { "usb480m_phy", "xin24m" };
+PNAME(mux_aclk_cpu_src_p) = { "cpll", "gpll", "gpll_div2", "gpll_div3" };
+
+PNAME(mux_pll_src_5plls_p) = { "cpll", "gpll", "gpll_div2", "gpll_div3", "usb480m" };
+PNAME(mux_pll_src_4plls_p) = { "cpll", "gpll", "gpll_div2", "usb480m" };
+PNAME(mux_pll_src_3plls_p) = { "cpll", "gpll", "gpll_div2" };
+
+PNAME(mux_aclk_peri_src_p) = { "gpll_peri", "cpll_peri", "gpll_div2_peri", "gpll_div3_peri" };
+PNAME(mux_mmc_src_p) = { "cpll", "gpll", "gpll_div2", "xin24m" };
+PNAME(mux_clk_cif_out_src_p) = { "clk_cif_src", "xin24m" };
+PNAME(mux_sclk_vop_src_p) = { "cpll", "gpll", "gpll_div2", "gpll_div3" };
+
+PNAME(mux_i2s0_p) = { "i2s0_src", "i2s0_frac", "ext_i2s", "xin12m" };
+PNAME(mux_i2s1_pre_p) = { "i2s1_src", "i2s1_frac", "ext_i2s", "xin12m" };
+PNAME(mux_i2s_out_p) = { "i2s1_pre", "xin12m" };
+PNAME(mux_sclk_spdif_p) = { "sclk_spdif_src", "spdif_frac", "xin12m" };
+
+PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" };
+PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" };
+PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" };
+
+PNAME(mux_sclk_gmac_p) = { "sclk_gmac_src", "gmac_clkin" };
+PNAME(mux_sclk_sfc_src_p) = { "cpll", "gpll", "gpll_div2", "xin24m" };
+
+static struct rockchip_pll_clock rk3128_pll_clks[] __initdata = {
+ [apll] = PLL(pll_rk3036, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0),
+ RK2928_MODE_CON, 0, 1, 0, rk3128_pll_rates),
+ [dpll] = PLL(pll_rk3036, PLL_DPLL, "dpll", mux_pll_p, 0, RK2928_PLL_CON(4),
+ RK2928_MODE_CON, 4, 0, 0, NULL),
+ [cpll] = PLL(pll_rk3036, PLL_CPLL, "cpll", mux_pll_p, 0, RK2928_PLL_CON(8),
+ RK2928_MODE_CON, 8, 2, 0, rk3128_pll_rates),
+ [gpll] = PLL(pll_rk3036, PLL_GPLL, "gpll", mux_pll_p, 0, RK2928_PLL_CON(12),
+ RK2928_MODE_CON, 12, 3, ROCKCHIP_PLL_SYNC_RATE, rk3128_pll_rates),
+};
+
+#define MFLAGS CLK_MUX_HIWORD_MASK
+#define DFLAGS CLK_DIVIDER_HIWORD_MASK
+#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+
+static struct rockchip_clk_branch rk3128_i2s0_fracmux __initdata =
+ MUX(0, "i2s0_pre", mux_i2s0_p, CLK_SET_RATE_PARENT,
+ RK2928_CLKSEL_CON(9), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3128_i2s1_fracmux __initdata =
+ MUX(0, "i2s1_pre", mux_i2s1_pre_p, CLK_SET_RATE_PARENT,
+ RK2928_CLKSEL_CON(3), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3128_spdif_fracmux __initdata =
+ MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, CLK_SET_RATE_PARENT,
+ RK2928_CLKSEL_CON(6), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3128_uart0_fracmux __initdata =
+ MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT,
+ RK2928_CLKSEL_CON(13), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3128_uart1_fracmux __initdata =
+ MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT,
+ RK2928_CLKSEL_CON(14), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3128_uart2_fracmux __initdata =
+ MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT,
+ RK2928_CLKSEL_CON(15), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3128_clk_branches[] __initdata = {
+ /*
+ * Clock-Architecture Diagram 1
+ */
+
+ FACTOR(PLL_GPLL_DIV2, "gpll_div2", "gpll", 0, 1, 2),
+ FACTOR(PLL_GPLL_DIV3, "gpll_div3", "gpll", 0, 1, 3),
+
+ DIV(0, "clk_24m", "xin24m", CLK_IGNORE_UNUSED,
+ RK2928_CLKSEL_CON(4), 8, 5, DFLAGS),
+
+ /* PD_DDR */
+ GATE(0, "dpll_ddr", "dpll", CLK_IGNORE_UNUSED,
+ RK2928_CLKGATE_CON(0), 2, GFLAGS),
+ GATE(0, "gpll_div2_ddr", "gpll_div2", CLK_IGNORE_UNUSED,
+ RK2928_CLKGATE_CON(0), 2, GFLAGS),
+ COMPOSITE_NOGATE(0, "ddrphy2x", mux_ddrphy_p, CLK_IGNORE_UNUSED,
+ RK2928_CLKSEL_CON(26), 8, 2, MFLAGS, 0, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO),
+ FACTOR(SCLK_DDRC, "clk_ddrc", "ddrphy2x", 0, 1, 2),
+ FACTOR(0, "clk_ddrphy", "ddrphy2x", 0, 1, 2),
+
+ /* PD_CORE */
+ GATE(0, "apll_core", "apll", CLK_IGNORE_UNUSED,
+ RK2928_CLKGATE_CON(0), 6, GFLAGS),
+ GATE(0, "gpll_div2_core", "gpll_div2", CLK_IGNORE_UNUSED,
+ RK2928_CLKGATE_CON(0), 6, GFLAGS),
+ COMPOSITE_NOMUX(0, "pclk_dbg", "armclk", CLK_IGNORE_UNUSED,
+ RK2928_CLKSEL_CON(1), 0, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK2928_CLKGATE_CON(0), 0, GFLAGS),
+ COMPOSITE_NOMUX(0, "armcore", "armclk", CLK_IGNORE_UNUSED,
+ RK2928_CLKSEL_CON(1), 4, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK2928_CLKGATE_CON(0), 7, GFLAGS),
+
+ /* PD_MISC */
+ MUX(SCLK_USB480M, "usb480m", mux_usb480m_p, CLK_SET_RATE_PARENT,
+ RK2928_MISC_CON, 15, 1, MFLAGS),
+
+ /* PD_CPU */
+ COMPOSITE(0, "aclk_cpu_src", mux_aclk_cpu_src_p, 0,
+ RK2928_CLKSEL_CON(0), 13, 2, MFLAGS, 8, 5, DFLAGS,
+ RK2928_CLKGATE_CON(0), 1, GFLAGS),
+ GATE(ACLK_CPU, "aclk_cpu", "aclk_cpu_src", 0,
+ RK2928_CLKGATE_CON(0), 3, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_CPU, "hclk_cpu", "aclk_cpu_src", 0,
+ RK2928_CLKSEL_CON(1), 8, 2, DFLAGS,
+ RK2928_CLKGATE_CON(0), 4, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_CPU, "pclk_cpu", "aclk_cpu_src", 0,
+ RK2928_CLKSEL_CON(1), 12, 2, DFLAGS,
+ RK2928_CLKGATE_CON(0), 5, GFLAGS),
+ COMPOSITE_NOMUX(SCLK_CRYPTO, "clk_crypto", "aclk_cpu_src", 0,
+ RK2928_CLKSEL_CON(24), 0, 2, DFLAGS,
+ RK2928_CLKGATE_CON(0), 12, GFLAGS),
+
+ /* PD_VIDEO */
+ COMPOSITE(ACLK_VEPU, "aclk_vepu", mux_pll_src_5plls_p, 0,
+ RK2928_CLKSEL_CON(32), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK2928_CLKGATE_CON(3), 9, GFLAGS),
+ FACTOR(HCLK_VEPU, "hclk_vepu", "aclk_vepu", 0, 1, 4),
+
+ COMPOSITE(ACLK_VDPU, "aclk_vdpu", mux_pll_src_5plls_p, 0,
+ RK2928_CLKSEL_CON(32), 13, 3, MFLAGS, 8, 5, DFLAGS,
+ RK2928_CLKGATE_CON(3), 11, GFLAGS),
+ FACTOR_GATE(HCLK_VDPU, "hclk_vdpu", "aclk_vdpu", 0, 1, 4,
+ RK2928_CLKGATE_CON(3), 12, GFLAGS),
+
+ COMPOSITE(SCLK_HEVC_CORE, "sclk_hevc_core", mux_pll_src_5plls_p, 0,
+ RK2928_CLKSEL_CON(34), 13, 3, MFLAGS, 8, 5, DFLAGS,
+ RK2928_CLKGATE_CON(3), 10, GFLAGS),
+
+ /* PD_VIO */
+ COMPOSITE(ACLK_VIO0, "aclk_vio0", mux_pll_src_5plls_p, 0,
+ RK2928_CLKSEL_CON(31), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK2928_CLKGATE_CON(3), 0, GFLAGS),
+ COMPOSITE(ACLK_VIO1, "aclk_vio1", mux_pll_src_5plls_p, 0,
+ RK2928_CLKSEL_CON(31), 13, 3, MFLAGS, 8, 5, DFLAGS,
+ RK2928_CLKGATE_CON(1), 4, GFLAGS),
+ COMPOSITE(HCLK_VIO, "hclk_vio", mux_pll_src_4plls_p, 0,
+ RK2928_CLKSEL_CON(30), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RK2928_CLKGATE_CON(0), 11, GFLAGS),
+
+ /* PD_PERI */
+ GATE(0, "gpll_peri", "gpll", CLK_IGNORE_UNUSED,
+ RK2928_CLKGATE_CON(2), 0, GFLAGS),
+ GATE(0, "cpll_peri", "cpll", CLK_IGNORE_UNUSED,
+ RK2928_CLKGATE_CON(2), 0, GFLAGS),
+ GATE(0, "gpll_div2_peri", "gpll_div2", CLK_IGNORE_UNUSED,
+ RK2928_CLKGATE_CON(2), 0, GFLAGS),
+ GATE(0, "gpll_div3_peri", "gpll_div3", CLK_IGNORE_UNUSED,
+ RK2928_CLKGATE_CON(2), 0, GFLAGS),
+ COMPOSITE_NOGATE(0, "aclk_peri_src", mux_aclk_peri_src_p, 0,
+ RK2928_CLKSEL_CON(10), 14, 2, MFLAGS, 0, 5, DFLAGS),
+ COMPOSITE_NOMUX(PCLK_PERI, "pclk_peri", "aclk_peri_src", 0,
+ RK2928_CLKSEL_CON(10), 12, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
+ RK2928_CLKGATE_CON(2), 3, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "aclk_peri_src", 0,
+ RK2928_CLKSEL_CON(10), 8, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
+ RK2928_CLKGATE_CON(2), 2, GFLAGS),
+ GATE(ACLK_PERI, "aclk_peri", "aclk_peri_src", 0,
+ RK2928_CLKGATE_CON(2), 1, GFLAGS),
+
+ GATE(SCLK_TIMER0, "sclk_timer0", "xin24m", 0,
+ RK2928_CLKGATE_CON(10), 3, GFLAGS),
+ GATE(SCLK_TIMER1, "sclk_timer1", "xin24m", 0,
+ RK2928_CLKGATE_CON(10), 4, GFLAGS),
+ GATE(SCLK_TIMER2, "sclk_timer2", "xin24m", 0,
+ RK2928_CLKGATE_CON(10), 5, GFLAGS),
+ GATE(SCLK_TIMER3, "sclk_timer3", "xin24m", 0,
+ RK2928_CLKGATE_CON(10), 6, GFLAGS),
+ GATE(SCLK_TIMER4, "sclk_timer4", "xin24m", 0,
+ RK2928_CLKGATE_CON(10), 7, GFLAGS),
+ GATE(SCLK_TIMER5, "sclk_timer5", "xin24m", 0,
+ RK2928_CLKGATE_CON(10), 8, GFLAGS),
+
+ GATE(SCLK_PVTM_CORE, "clk_pvtm_core", "xin24m", 0,
+ RK2928_CLKGATE_CON(10), 8, GFLAGS),
+ GATE(SCLK_PVTM_GPU, "clk_pvtm_gpu", "xin24m", 0,
+ RK2928_CLKGATE_CON(10), 8, GFLAGS),
+ GATE(SCLK_PVTM_FUNC, "clk_pvtm_func", "xin24m", 0,
+ RK2928_CLKGATE_CON(10), 8, GFLAGS),
+ GATE(SCLK_MIPI_24M, "clk_mipi_24m", "xin24m", CLK_IGNORE_UNUSED,
+ RK2928_CLKGATE_CON(10), 8, GFLAGS),
+
+ COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0,
+ RK2928_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ RK2928_CLKGATE_CON(2), 11, GFLAGS),
+
+ COMPOSITE(SCLK_SDIO, "sclk_sdio", mux_mmc_src_p, 0,
+ RK2928_CLKSEL_CON(12), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ RK2928_CLKGATE_CON(2), 13, GFLAGS),
+
+ COMPOSITE(SCLK_EMMC, "sclk_emmc", mux_mmc_src_p, 0,
+ RK2928_CLKSEL_CON(12), 14, 2, MFLAGS, 8, 6, DFLAGS,
+ RK2928_CLKGATE_CON(2), 14, GFLAGS),
+
+ DIV(SCLK_PVTM, "clk_pvtm", "clk_pvtm_func", 0,
+ RK2928_CLKSEL_CON(2), 0, 7, DFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 2
+ */
+ COMPOSITE(DCLK_VOP, "dclk_vop", mux_sclk_vop_src_p, 0,
+ RK2928_CLKSEL_CON(27), 0, 2, MFLAGS, 8, 8, DFLAGS,
+ RK2928_CLKGATE_CON(3), 1, GFLAGS),
+ COMPOSITE(SCLK_VOP, "sclk_vop", mux_sclk_vop_src_p, 0,
+ RK2928_CLKSEL_CON(28), 0, 2, MFLAGS, 8, 8, DFLAGS,
+ RK2928_CLKGATE_CON(3), 2, GFLAGS),
+ COMPOSITE(DCLK_EBC, "dclk_ebc", mux_pll_src_3plls_p, 0,
+ RK2928_CLKSEL_CON(23), 0, 2, MFLAGS, 8, 8, DFLAGS,
+ RK2928_CLKGATE_CON(3), 4, GFLAGS),
+
+ FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
+ COMPOSITE_NODIV(SCLK_CIF_SRC, "sclk_cif_src", mux_pll_src_4plls_p, 0,
+ RK2928_CLKSEL_CON(29), 0, 2, MFLAGS,
+ RK2928_CLKGATE_CON(3), 7, GFLAGS),
+ MUX(SCLK_CIF_OUT_SRC, "sclk_cif_out_src", mux_clk_cif_out_src_p, 0,
+ RK2928_CLKSEL_CON(13), 14, 2, MFLAGS),
+ DIV(SCLK_CIF_OUT, "sclk_cif_out", "sclk_cif_out_src", 0,
+ RK2928_CLKSEL_CON(29), 2, 5, DFLAGS),
+
+ COMPOSITE(0, "i2s0_src", mux_pll_src_3plls_p, 0,
+ RK2928_CLKSEL_CON(9), 14, 2, MFLAGS, 0, 7, DFLAGS,
+ RK2928_CLKGATE_CON(4), 4, GFLAGS),
+ COMPOSITE_FRACMUX(0, "i2s0_frac", "i2s0_src", CLK_SET_RATE_PARENT,
+ RK2928_CLKSEL_CON(8), 0,
+ RK2928_CLKGATE_CON(4), 5, GFLAGS,
+ &rk3128_i2s0_fracmux),
+ GATE(SCLK_I2S0, "sclk_i2s0", "i2s0_pre", CLK_SET_RATE_PARENT,
+ RK2928_CLKGATE_CON(4), 6, GFLAGS),
+
+ COMPOSITE(0, "i2s1_src", mux_pll_src_3plls_p, 0,
+ RK2928_CLKSEL_CON(3), 14, 2, MFLAGS, 0, 7, DFLAGS,
+ RK2928_CLKGATE_CON(0), 9, GFLAGS),
+ COMPOSITE_FRACMUX(0, "i2s1_frac", "i2s1_src", CLK_SET_RATE_PARENT,
+ RK2928_CLKSEL_CON(7), 0,
+ RK2928_CLKGATE_CON(0), 10, GFLAGS,
+ &rk3128_i2s1_fracmux),
+ GATE(SCLK_I2S1, "sclk_i2s1", "i2s1_pre", CLK_SET_RATE_PARENT,
+ RK2928_CLKGATE_CON(0), 14, GFLAGS),
+ COMPOSITE_NODIV(SCLK_I2S_OUT, "i2s_out", mux_i2s_out_p, 0,
+ RK2928_CLKSEL_CON(3), 12, 1, MFLAGS,
+ RK2928_CLKGATE_CON(0), 13, GFLAGS),
+
+ COMPOSITE(0, "sclk_spdif_src", mux_pll_src_3plls_p, 0,
+ RK2928_CLKSEL_CON(6), 14, 2, MFLAGS, 0, 7, DFLAGS,
+ RK2928_CLKGATE_CON(2), 10, GFLAGS),
+ COMPOSITE_FRACMUX(0, "spdif_frac", "sclk_spdif_src", CLK_SET_RATE_PARENT,
+ RK2928_CLKSEL_CON(20), 0,
+ RK2928_CLKGATE_CON(2), 12, GFLAGS,
+ &rk3128_spdif_fracmux),
+
+ GATE(0, "jtag", "ext_jtag", CLK_IGNORE_UNUSED,
+ RK2928_CLKGATE_CON(1), 3, GFLAGS),
+
+ GATE(SCLK_OTGPHY0, "sclk_otgphy0", "xin12m", 0,
+ RK2928_CLKGATE_CON(1), 5, GFLAGS),
+ GATE(SCLK_OTGPHY1, "sclk_otgphy1", "xin12m", 0,
+ RK2928_CLKGATE_CON(1), 6, GFLAGS),
+
+ COMPOSITE_NOMUX(SCLK_SARADC, "sclk_saradc", "xin24m", 0,
+ RK2928_CLKSEL_CON(24), 8, 8, DFLAGS,
+ RK2928_CLKGATE_CON(2), 8, GFLAGS),
+
+ COMPOSITE(ACLK_GPU, "aclk_gpu", mux_pll_src_5plls_p, 0,
+ RK2928_CLKSEL_CON(34), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK2928_CLKGATE_CON(3), 13, GFLAGS),
+
+ COMPOSITE(SCLK_SPI0, "sclk_spi0", mux_pll_src_3plls_p, 0,
+ RK2928_CLKSEL_CON(25), 8, 2, MFLAGS, 0, 7, DFLAGS,
+ RK2928_CLKGATE_CON(2), 9, GFLAGS),
+
+ /* PD_UART */
+ COMPOSITE(0, "uart0_src", mux_pll_src_4plls_p, 0,
+ RK2928_CLKSEL_CON(13), 12, 2, MFLAGS, 0, 7, DFLAGS,
+ RK2928_CLKGATE_CON(1), 8, GFLAGS),
+ MUX(0, "uart12_src", mux_pll_src_4plls_p, 0,
+ RK2928_CLKSEL_CON(13), 14, 2, MFLAGS),
+ COMPOSITE_NOMUX(0, "uart1_src", "uart12_src", 0,
+ RK2928_CLKSEL_CON(14), 0, 7, DFLAGS,
+ RK2928_CLKGATE_CON(1), 10, GFLAGS),
+ COMPOSITE_NOMUX(0, "uart2_src", "uart12_src", 0,
+ RK2928_CLKSEL_CON(15), 0, 7, DFLAGS,
+ RK2928_CLKGATE_CON(1), 13, GFLAGS),
+ COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT,
+ RK2928_CLKSEL_CON(17), 0,
+ RK2928_CLKGATE_CON(1), 9, GFLAGS,
+ &rk3128_uart0_fracmux),
+ COMPOSITE_FRACMUX(0, "uart1_frac", "uart1_src", CLK_SET_RATE_PARENT,
+ RK2928_CLKSEL_CON(18), 0,
+ RK2928_CLKGATE_CON(1), 11, GFLAGS,
+ &rk3128_uart1_fracmux),
+ COMPOSITE_FRACMUX(0, "uart2_frac", "uart2_src", CLK_SET_RATE_PARENT,
+ RK2928_CLKSEL_CON(19), 0,
+ RK2928_CLKGATE_CON(1), 13, GFLAGS,
+ &rk3128_uart2_fracmux),
+
+ COMPOSITE(SCLK_MAC_SRC, "sclk_gmac_src", mux_pll_src_3plls_p, 0,
+ RK2928_CLKSEL_CON(5), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK2928_CLKGATE_CON(1), 7, GFLAGS),
+ MUX(SCLK_MAC, "sclk_gmac", mux_sclk_gmac_p, 0,
+ RK2928_CLKSEL_CON(5), 15, 1, MFLAGS),
+ GATE(SCLK_MAC_REFOUT, "sclk_mac_refout", "sclk_gmac", 0,
+ RK2928_CLKGATE_CON(2), 5, GFLAGS),
+ GATE(SCLK_MAC_REF, "sclk_mac_ref", "sclk_gmac", 0,
+ RK2928_CLKGATE_CON(2), 4, GFLAGS),
+ GATE(SCLK_MAC_RX, "sclk_mac_rx", "sclk_gmac", 0,
+ RK2928_CLKGATE_CON(2), 6, GFLAGS),
+ GATE(SCLK_MAC_TX, "sclk_mac_tx", "sclk_gmac", 0,
+ RK2928_CLKGATE_CON(2), 7, GFLAGS),
+
+ COMPOSITE(SCLK_TSP, "sclk_tsp", mux_pll_src_3plls_p, 0,
+ RK2928_CLKSEL_CON(4), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK2928_CLKGATE_CON(1), 14, GFLAGS),
+
+ COMPOSITE(SCLK_NANDC, "sclk_nandc", mux_pll_src_3plls_p, 0,
+ RK2928_CLKSEL_CON(2), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RK2928_CLKGATE_CON(10), 15, GFLAGS),
+
+ COMPOSITE(SCLK_SFC, "sclk_sfc", mux_sclk_sfc_src_p, 0,
+ RK2928_CLKSEL_CON(11), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RK2928_CLKGATE_CON(3), 15, GFLAGS),
+
+ COMPOSITE_NOMUX(PCLK_PMU_PRE, "pclk_pmu_pre", "cpll", 0,
+ RK2928_CLKSEL_CON(29), 8, 6, DFLAGS,
+ RK2928_CLKGATE_CON(1), 0, GFLAGS),
+
+ /*
+ * Clock-Architecture Diagram 3
+ */
+
+ /* PD_VOP */
+ GATE(ACLK_LCDC0, "aclk_lcdc0", "aclk_vio0", 0, RK2928_CLKGATE_CON(6), 0, GFLAGS),
+ GATE(ACLK_CIF, "aclk_cif", "aclk_vio0", 0, RK2928_CLKGATE_CON(6), 5, GFLAGS),
+ GATE(ACLK_RGA, "aclk_rga", "aclk_vio0", 0, RK2928_CLKGATE_CON(6), 11, GFLAGS),
+ GATE(0, "aclk_vio0_niu", "aclk_vio0", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(6), 13, GFLAGS),
+
+ GATE(ACLK_IEP, "aclk_iep", "aclk_vio1", 0, RK2928_CLKGATE_CON(9), 8, GFLAGS),
+ GATE(0, "aclk_vio1_niu", "aclk_vio1", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 10, GFLAGS),
+
+ GATE(HCLK_VIO_H2P, "hclk_vio_h2p", "hclk_vio", 0, RK2928_CLKGATE_CON(9), 5, GFLAGS),
+ GATE(PCLK_MIPI, "pclk_mipi", "hclk_vio", 0, RK2928_CLKGATE_CON(9), 6, GFLAGS),
+ GATE(HCLK_RGA, "hclk_rga", "hclk_vio", 0, RK2928_CLKGATE_CON(6), 10, GFLAGS),
+ GATE(HCLK_LCDC0, "hclk_lcdc0", "hclk_vio", 0, RK2928_CLKGATE_CON(6), 1, GFLAGS),
+ GATE(HCLK_IEP, "hclk_iep", "hclk_vio", 0, RK2928_CLKGATE_CON(9), 7, GFLAGS),
+ GATE(0, "hclk_vio_niu", "hclk_vio", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(6), 12, GFLAGS),
+ GATE(HCLK_CIF, "hclk_cif", "hclk_vio", 0, RK2928_CLKGATE_CON(6), 4, GFLAGS),
+ GATE(HCLK_EBC, "hclk_ebc", "hclk_vio", 0, RK2928_CLKGATE_CON(9), 9, GFLAGS),
+
+ /* PD_PERI */
+ GATE(0, "aclk_peri_axi", "aclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 3, GFLAGS),
+ GATE(ACLK_GMAC, "aclk_gmac", "aclk_peri", 0, RK2928_CLKGATE_CON(10), 10, GFLAGS),
+ GATE(ACLK_DMAC, "aclk_dmac", "aclk_peri", 0, RK2928_CLKGATE_CON(5), 1, GFLAGS),
+ GATE(0, "aclk_peri_niu", "aclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 15, GFLAGS),
+ GATE(0, "aclk_cpu_to_peri", "aclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 2, GFLAGS),
+ GATE(HCLK_GPS, "hclk_gps", "aclk_peri", 0, RK2928_CLKGATE_CON(3), 14, GFLAGS),
+
+ GATE(HCLK_I2S_8CH, "hclk_i2s_8ch", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS),
+ GATE(0, "hclk_peri_matrix", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 0, GFLAGS),
+ GATE(HCLK_I2S_2CH, "hclk_i2s_2ch", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS),
+ GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 13, GFLAGS),
+ GATE(HCLK_HOST2, "hclk_host2", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
+ GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(3), 13, GFLAGS),
+ GATE(0, "hclk_peri_ahb", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 14, GFLAGS),
+ GATE(HCLK_SPDIF, "hclk_spdif", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 9, GFLAGS),
+ GATE(HCLK_TSP, "hclk_tsp", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 12, GFLAGS),
+ GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 10, GFLAGS),
+ GATE(HCLK_SDIO, "hclk_sdio", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 11, GFLAGS),
+ GATE(HCLK_EMMC, "hclk_emmc", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 0, GFLAGS),
+ GATE(0, "hclk_emmc_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(3), 6, GFLAGS),
+ GATE(HCLK_NANDC, "hclk_nandc", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 9, GFLAGS),
+ GATE(HCLK_USBHOST, "hclk_usbhost", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 14, GFLAGS),
+
+ GATE(PCLK_SIM_CARD, "pclk_sim_card", "pclk_peri", 0, RK2928_CLKGATE_CON(9), 12, GFLAGS),
+ GATE(PCLK_GMAC, "pclk_gmac", "pclk_peri", 0, RK2928_CLKGATE_CON(10), 11, GFLAGS),
+ GATE(0, "pclk_peri_axi", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 1, GFLAGS),
+ GATE(PCLK_SPI0, "pclk_spi0", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 12, GFLAGS),
+ GATE(PCLK_UART0, "pclk_uart0", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 0, GFLAGS),
+ GATE(PCLK_UART1, "pclk_uart1", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 1, GFLAGS),
+ GATE(PCLK_UART2, "pclk_uart2", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 2, GFLAGS),
+ GATE(PCLK_PWM, "pclk_pwm", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 10, GFLAGS),
+ GATE(PCLK_WDT, "pclk_wdt", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 15, GFLAGS),
+ GATE(PCLK_I2C0, "pclk_i2c0", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 4, GFLAGS),
+ GATE(PCLK_I2C1, "pclk_i2c1", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 5, GFLAGS),
+ GATE(PCLK_I2C2, "pclk_i2c2", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 6, GFLAGS),
+ GATE(PCLK_I2C3, "pclk_i2c3", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 7, GFLAGS),
+ GATE(PCLK_SARADC, "pclk_saradc", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 14, GFLAGS),
+ GATE(PCLK_EFUSE, "pclk_efuse", "pclk_peri", 0, RK2928_CLKGATE_CON(5), 2, GFLAGS),
+ GATE(PCLK_TIMER, "pclk_timer", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(7), 7, GFLAGS),
+ GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 9, GFLAGS),
+ GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 10, GFLAGS),
+ GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 11, GFLAGS),
+ GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 12, GFLAGS),
+
+ /* PD_BUS */
+ GATE(0, "aclk_initmem", "aclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 12, GFLAGS),
+ GATE(0, "aclk_strc_sys", "aclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 10, GFLAGS),
+
+ GATE(0, "hclk_rom", "hclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 6, GFLAGS),
+ GATE(HCLK_CRYPTO, "hclk_crypto", "hclk_cpu", 0, RK2928_CLKGATE_CON(3), 5, GFLAGS),
+
+ GATE(PCLK_HDMI, "pclk_hdmi", "pclk_cpu", 0, RK2928_CLKGATE_CON(3), 8, GFLAGS),
+ GATE(PCLK_ACODEC, "pclk_acodec", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 14, GFLAGS),
+ GATE(0, "pclk_ddrupctl", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 7, GFLAGS),
+ GATE(0, "pclk_grf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 4, GFLAGS),
+ GATE(0, "pclk_mipiphy", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 0, GFLAGS),
+
+ GATE(0, "pclk_pmu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 2, GFLAGS),
+ GATE(0, "pclk_pmu_niu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 3, GFLAGS),
+
+ /* PD_MMC */
+ MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "sclk_sdmmc", RK3228_SDMMC_CON0, 1),
+ MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3228_SDMMC_CON1, 0),
+
+ MMC(SCLK_SDIO_DRV, "sdio_drv", "sclk_sdio", RK3228_SDIO_CON0, 1),
+ MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "sclk_sdio", RK3228_SDIO_CON1, 0),
+
+ MMC(SCLK_EMMC_DRV, "emmc_drv", "sclk_emmc", RK3228_EMMC_CON0, 1),
+ MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "sclk_emmc", RK3228_EMMC_CON1, 0),
+};
+
+static const char *const rk3128_critical_clocks[] __initconst = {
+ "aclk_cpu",
+ "hclk_cpu",
+ "pclk_cpu",
+ "aclk_peri",
+ "hclk_peri",
+ "pclk_peri",
+};
+
+static void __init rk3128_clk_init(struct device_node *np)
+{
+ struct rockchip_clk_provider *ctx;
+ void __iomem *reg_base;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: could not map cru region\n", __func__);
+ return;
+ }
+
+ ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip clk init failed\n", __func__);
+ iounmap(reg_base);
+ return;
+ }
+
+ rockchip_clk_register_plls(ctx, rk3128_pll_clks,
+ ARRAY_SIZE(rk3128_pll_clks),
+ RK3128_GRF_SOC_STATUS0);
+ rockchip_clk_register_branches(ctx, rk3128_clk_branches,
+ ARRAY_SIZE(rk3128_clk_branches));
+ rockchip_clk_protect_critical(rk3128_critical_clocks,
+ ARRAY_SIZE(rk3128_critical_clocks));
+
+ rockchip_clk_register_armclk(ctx, ARMCLK, "armclk",
+ mux_armclk_p, ARRAY_SIZE(mux_armclk_p),
+ &rk3128_cpuclk_data, rk3128_cpuclk_rates,
+ ARRAY_SIZE(rk3128_cpuclk_rates));
+
+ rockchip_register_softrst(np, 9, reg_base + RK2928_SOFTRST_CON(0),
+ ROCKCHIP_SOFTRST_HIWORD_MASK);
+
+ rockchip_register_restart_notifier(ctx, RK2928_GLB_SRST_FST, NULL);
+
+ rockchip_clk_of_add_provider(np, ctx);
+}
+
+CLK_OF_DECLARE(rk3128_cru, "rockchip,rk3128-cru", rk3128_clk_init);
diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
index db6e5a9e6de6..bb405d9044a3 100644
--- a/drivers/clk/rockchip/clk-rk3228.c
+++ b/drivers/clk/rockchip/clk-rk3228.c
@@ -86,25 +86,43 @@ static struct rockchip_pll_rate_table rk3228_pll_rates[] = {
#define RK3228_DIV_PCLK_MASK 0x7
#define RK3228_DIV_PCLK_SHIFT 12
-#define RK3228_CLKSEL1(_core_peri_div) \
+#define RK3228_CLKSEL1(_core_aclk_div, _core_peri_div) \
{ \
.reg = RK2928_CLKSEL_CON(1), \
.val = HIWORD_UPDATE(_core_peri_div, RK3228_DIV_PERI_MASK, \
- RK3228_DIV_PERI_SHIFT) \
- }
+ RK3228_DIV_PERI_SHIFT) | \
+ HIWORD_UPDATE(_core_aclk_div, RK3228_DIV_ACLK_MASK, \
+ RK3228_DIV_ACLK_SHIFT), \
+}
-#define RK3228_CPUCLK_RATE(_prate, _core_peri_div) \
- { \
- .prate = _prate, \
- .divs = { \
- RK3228_CLKSEL1(_core_peri_div), \
- }, \
+#define RK3228_CPUCLK_RATE(_prate, _core_aclk_div, _core_peri_div) \
+ { \
+ .prate = _prate, \
+ .divs = { \
+ RK3228_CLKSEL1(_core_aclk_div, _core_peri_div), \
+ }, \
}
static struct rockchip_cpuclk_rate_table rk3228_cpuclk_rates[] __initdata = {
- RK3228_CPUCLK_RATE(816000000, 4),
- RK3228_CPUCLK_RATE(600000000, 4),
- RK3228_CPUCLK_RATE(312000000, 4),
+ RK3228_CPUCLK_RATE(1800000000, 1, 7),
+ RK3228_CPUCLK_RATE(1704000000, 1, 7),
+ RK3228_CPUCLK_RATE(1608000000, 1, 7),
+ RK3228_CPUCLK_RATE(1512000000, 1, 7),
+ RK3228_CPUCLK_RATE(1488000000, 1, 5),
+ RK3228_CPUCLK_RATE(1416000000, 1, 5),
+ RK3228_CPUCLK_RATE(1392000000, 1, 5),
+ RK3228_CPUCLK_RATE(1296000000, 1, 5),
+ RK3228_CPUCLK_RATE(1200000000, 1, 5),
+ RK3228_CPUCLK_RATE(1104000000, 1, 5),
+ RK3228_CPUCLK_RATE(1008000000, 1, 5),
+ RK3228_CPUCLK_RATE(912000000, 1, 5),
+ RK3228_CPUCLK_RATE(816000000, 1, 3),
+ RK3228_CPUCLK_RATE(696000000, 1, 3),
+ RK3228_CPUCLK_RATE(600000000, 1, 3),
+ RK3228_CPUCLK_RATE(408000000, 1, 1),
+ RK3228_CPUCLK_RATE(312000000, 1, 1),
+ RK3228_CPUCLK_RATE(216000000, 1, 1),
+ RK3228_CPUCLK_RATE(96000000, 1, 1),
};
static const struct rockchip_cpuclk_reg_data rk3228_cpuclk_data = {
@@ -252,15 +270,15 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
RK2928_CLKGATE_CON(0), 1, GFLAGS),
COMPOSITE_NOGATE(0, "aclk_cpu_src", mux_aclk_cpu_src_p, 0,
RK2928_CLKSEL_CON(0), 13, 2, MFLAGS, 8, 5, DFLAGS),
- GATE(ARMCLK, "aclk_cpu", "aclk_cpu_src", 0,
+ GATE(ACLK_CPU, "aclk_cpu", "aclk_cpu_src", 0,
RK2928_CLKGATE_CON(6), 0, GFLAGS),
- COMPOSITE_NOMUX(0, "hclk_cpu", "aclk_cpu_src", 0,
+ COMPOSITE_NOMUX(HCLK_CPU, "hclk_cpu", "aclk_cpu_src", 0,
RK2928_CLKSEL_CON(1), 8, 2, DFLAGS,
RK2928_CLKGATE_CON(6), 1, GFLAGS),
COMPOSITE_NOMUX(0, "pclk_bus_src", "aclk_cpu_src", 0,
RK2928_CLKSEL_CON(1), 12, 3, DFLAGS,
RK2928_CLKGATE_CON(6), 2, GFLAGS),
- GATE(0, "pclk_cpu", "pclk_bus_src", 0,
+ GATE(PCLK_CPU, "pclk_cpu", "pclk_bus_src", 0,
RK2928_CLKGATE_CON(6), 3, GFLAGS),
GATE(0, "pclk_phy_pre", "pclk_bus_src", 0,
RK2928_CLKGATE_CON(6), 4, GFLAGS),
@@ -268,58 +286,58 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
RK2928_CLKGATE_CON(6), 13, GFLAGS),
/* PD_VIDEO */
- COMPOSITE(0, "aclk_vpu_pre", mux_pll_src_4plls_p, 0,
+ COMPOSITE(ACLK_VPU_PRE, "aclk_vpu_pre", mux_pll_src_4plls_p, 0,
RK2928_CLKSEL_CON(32), 5, 2, MFLAGS, 0, 5, DFLAGS,
RK2928_CLKGATE_CON(3), 11, GFLAGS),
- FACTOR_GATE(0, "hclk_vpu_pre", "aclk_vpu_pre", 0, 1, 4,
+ FACTOR_GATE(HCLK_VPU_PRE, "hclk_vpu_pre", "aclk_vpu_pre", 0, 1, 4,
RK2928_CLKGATE_CON(4), 4, GFLAGS),
- COMPOSITE(0, "aclk_rkvdec_pre", mux_pll_src_4plls_p, 0,
+ COMPOSITE(ACLK_RKVDEC_PRE, "aclk_rkvdec_pre", mux_pll_src_4plls_p, 0,
RK2928_CLKSEL_CON(28), 6, 2, MFLAGS, 0, 5, DFLAGS,
RK2928_CLKGATE_CON(3), 2, GFLAGS),
- FACTOR_GATE(0, "hclk_rkvdec_pre", "aclk_rkvdec_pre", 0, 1, 4,
+ FACTOR_GATE(HCLK_RKVDEC_PRE, "hclk_rkvdec_pre", "aclk_rkvdec_pre", 0, 1, 4,
RK2928_CLKGATE_CON(4), 5, GFLAGS),
- COMPOSITE(0, "sclk_vdec_cabac", mux_pll_src_4plls_p, 0,
+ COMPOSITE(SCLK_VDEC_CABAC, "sclk_vdec_cabac", mux_pll_src_4plls_p, 0,
RK2928_CLKSEL_CON(28), 14, 2, MFLAGS, 8, 5, DFLAGS,
RK2928_CLKGATE_CON(3), 3, GFLAGS),
- COMPOSITE(0, "sclk_vdec_core", mux_pll_src_4plls_p, 0,
+ COMPOSITE(SCLK_VDEC_CORE, "sclk_vdec_core", mux_pll_src_4plls_p, 0,
RK2928_CLKSEL_CON(34), 13, 2, MFLAGS, 8, 5, DFLAGS,
RK2928_CLKGATE_CON(3), 4, GFLAGS),
/* PD_VIO */
- COMPOSITE(0, "aclk_iep_pre", mux_pll_src_4plls_p, 0,
+ COMPOSITE(ACLK_IEP_PRE, "aclk_iep_pre", mux_pll_src_4plls_p, 0,
RK2928_CLKSEL_CON(31), 5, 2, MFLAGS, 0, 5, DFLAGS,
RK2928_CLKGATE_CON(3), 0, GFLAGS),
- DIV(0, "hclk_vio_pre", "aclk_iep_pre", 0,
+ DIV(HCLK_VIO_PRE, "hclk_vio_pre", "aclk_iep_pre", 0,
RK2928_CLKSEL_CON(2), 0, 5, DFLAGS),
- COMPOSITE(0, "aclk_hdcp_pre", mux_pll_src_4plls_p, 0,
+ COMPOSITE(ACLK_HDCP_PRE, "aclk_hdcp_pre", mux_pll_src_4plls_p, 0,
RK2928_CLKSEL_CON(31), 13, 2, MFLAGS, 8, 5, DFLAGS,
RK2928_CLKGATE_CON(1), 4, GFLAGS),
MUX(0, "sclk_rga_src", mux_pll_src_4plls_p, 0,
RK2928_CLKSEL_CON(33), 13, 2, MFLAGS),
- COMPOSITE_NOMUX(0, "aclk_rga_pre", "sclk_rga_src", 0,
+ COMPOSITE_NOMUX(ACLK_RGA_PRE, "aclk_rga_pre", "sclk_rga_src", 0,
RK2928_CLKSEL_CON(33), 8, 5, DFLAGS,
RK2928_CLKGATE_CON(1), 2, GFLAGS),
- COMPOSITE(0, "sclk_rga", mux_sclk_rga_p, 0,
+ COMPOSITE(SCLK_RGA, "sclk_rga", mux_sclk_rga_p, 0,
RK2928_CLKSEL_CON(22), 5, 2, MFLAGS, 0, 5, DFLAGS,
RK2928_CLKGATE_CON(3), 6, GFLAGS),
- COMPOSITE(0, "aclk_vop_pre", mux_pll_src_4plls_p, 0,
+ COMPOSITE(ACLK_VOP_PRE, "aclk_vop_pre", mux_pll_src_4plls_p, 0,
RK2928_CLKSEL_CON(33), 5, 2, MFLAGS, 0, 5, DFLAGS,
RK2928_CLKGATE_CON(1), 1, GFLAGS),
- COMPOSITE(0, "sclk_hdcp", mux_pll_src_3plls_p, 0,
+ COMPOSITE(SCLK_HDCP, "sclk_hdcp", mux_pll_src_3plls_p, 0,
RK2928_CLKSEL_CON(23), 14, 2, MFLAGS, 8, 6, DFLAGS,
RK2928_CLKGATE_CON(3), 5, GFLAGS),
GATE(SCLK_HDMI_HDCP, "sclk_hdmi_hdcp", "xin24m", 0,
RK2928_CLKGATE_CON(3), 7, GFLAGS),
- COMPOSITE(0, "sclk_hdmi_cec", mux_sclk_hdmi_cec_p, 0,
+ COMPOSITE(SCLK_HDMI_CEC, "sclk_hdmi_cec", mux_sclk_hdmi_cec_p, 0,
RK2928_CLKSEL_CON(21), 14, 2, MFLAGS, 0, 14, DFLAGS,
RK2928_CLKGATE_CON(3), 8, GFLAGS),
@@ -354,18 +372,18 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
GATE(SCLK_TIMER5, "sclk_timer5", "xin24m", 0,
RK2928_CLKGATE_CON(6), 10, GFLAGS),
- COMPOSITE(0, "sclk_crypto", mux_pll_src_2plls_p, 0,
+ COMPOSITE(SCLK_CRYPTO, "sclk_crypto", mux_pll_src_2plls_p, 0,
RK2928_CLKSEL_CON(24), 5, 1, MFLAGS, 0, 5, DFLAGS,
RK2928_CLKGATE_CON(2), 7, GFLAGS),
- COMPOSITE(0, "sclk_tsp", mux_pll_src_2plls_p, 0,
+ COMPOSITE(SCLK_TSP, "sclk_tsp", mux_pll_src_2plls_p, 0,
RK2928_CLKSEL_CON(22), 15, 1, MFLAGS, 8, 5, DFLAGS,
RK2928_CLKGATE_CON(2), 6, GFLAGS),
- GATE(0, "sclk_hsadc", "ext_hsadc", 0,
+ GATE(SCLK_HSADC, "sclk_hsadc", "ext_hsadc", 0,
RK2928_CLKGATE_CON(10), 12, GFLAGS),
- COMPOSITE(0, "sclk_wifi", mux_pll_src_cpll_gpll_usb480m_p, 0,
+ COMPOSITE(SCLK_WIFI, "sclk_wifi", mux_pll_src_cpll_gpll_usb480m_p, 0,
RK2928_CLKSEL_CON(23), 5, 2, MFLAGS, 0, 6, DFLAGS,
RK2928_CLKGATE_CON(2), 15, GFLAGS),
@@ -445,12 +463,12 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
RK2928_CLKGATE_CON(2), 12, GFLAGS,
&rk3228_spdif_fracmux),
- GATE(0, "jtag", "ext_jtag", 0,
+ GATE(0, "jtag", "ext_jtag", CLK_IGNORE_UNUSED,
RK2928_CLKGATE_CON(1), 3, GFLAGS),
- GATE(0, "sclk_otgphy0", "xin24m", 0,
+ GATE(SCLK_OTGPHY0, "sclk_otgphy0", "xin24m", 0,
RK2928_CLKGATE_CON(1), 5, GFLAGS),
- GATE(0, "sclk_otgphy1", "xin24m", 0,
+ GATE(SCLK_OTGPHY1, "sclk_otgphy1", "xin24m", 0,
RK2928_CLKGATE_CON(1), 6, GFLAGS),
COMPOSITE_NOMUX(SCLK_TSADC, "sclk_tsadc", "xin24m", 0,
@@ -526,28 +544,28 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
*/
/* PD_VOP */
- GATE(0, "aclk_rga", "aclk_rga_pre", 0, RK2928_CLKGATE_CON(13), 0, GFLAGS),
+ GATE(ACLK_RGA, "aclk_rga", "aclk_rga_pre", 0, RK2928_CLKGATE_CON(13), 0, GFLAGS),
GATE(0, "aclk_rga_noc", "aclk_rga_pre", 0, RK2928_CLKGATE_CON(13), 11, GFLAGS),
- GATE(0, "aclk_iep", "aclk_iep_pre", 0, RK2928_CLKGATE_CON(13), 2, GFLAGS),
+ GATE(ACLK_IEP, "aclk_iep", "aclk_iep_pre", 0, RK2928_CLKGATE_CON(13), 2, GFLAGS),
GATE(0, "aclk_iep_noc", "aclk_iep_pre", 0, RK2928_CLKGATE_CON(13), 9, GFLAGS),
GATE(ACLK_VOP, "aclk_vop", "aclk_vop_pre", 0, RK2928_CLKGATE_CON(13), 5, GFLAGS),
GATE(0, "aclk_vop_noc", "aclk_vop_pre", 0, RK2928_CLKGATE_CON(13), 12, GFLAGS),
- GATE(0, "aclk_hdcp", "aclk_hdcp_pre", 0, RK2928_CLKGATE_CON(14), 10, GFLAGS),
+ GATE(ACLK_HDCP, "aclk_hdcp", "aclk_hdcp_pre", 0, RK2928_CLKGATE_CON(14), 10, GFLAGS),
GATE(0, "aclk_hdcp_noc", "aclk_hdcp_pre", 0, RK2928_CLKGATE_CON(13), 10, GFLAGS),
- GATE(0, "hclk_rga", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(13), 1, GFLAGS),
- GATE(0, "hclk_iep", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(13), 3, GFLAGS),
+ GATE(HCLK_RGA, "hclk_rga", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(13), 1, GFLAGS),
+ GATE(HCLK_IEP, "hclk_iep", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(13), 3, GFLAGS),
GATE(HCLK_VOP, "hclk_vop", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(13), 6, GFLAGS),
GATE(0, "hclk_vio_ahb_arbi", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(13), 7, GFLAGS),
GATE(0, "hclk_vio_noc", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(13), 8, GFLAGS),
GATE(0, "hclk_vop_noc", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(13), 13, GFLAGS),
- GATE(0, "hclk_vio_h2p", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(14), 7, GFLAGS),
- GATE(0, "hclk_hdcp_mmu", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(14), 12, GFLAGS),
+ GATE(HCLK_VIO_H2P, "hclk_vio_h2p", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(14), 7, GFLAGS),
+ GATE(HCLK_HDCP_MMU, "hclk_hdcp_mmu", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(14), 12, GFLAGS),
GATE(PCLK_HDMI_CTRL, "pclk_hdmi_ctrl", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(14), 6, GFLAGS),
- GATE(0, "pclk_vio_h2p", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(14), 8, GFLAGS),
- GATE(0, "pclk_hdcp", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(14), 11, GFLAGS),
+ GATE(PCLK_VIO_H2P, "pclk_vio_h2p", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(14), 8, GFLAGS),
+ GATE(PCLK_HDCP, "pclk_hdcp", "hclk_vio_pre", 0, RK2928_CLKGATE_CON(14), 11, GFLAGS),
/* PD_PERI */
GATE(0, "aclk_peri_noc", "aclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(12), 0, GFLAGS),
@@ -557,12 +575,12 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
GATE(HCLK_SDIO, "hclk_sdio", "hclk_peri", 0, RK2928_CLKGATE_CON(11), 1, GFLAGS),
GATE(HCLK_EMMC, "hclk_emmc", "hclk_peri", 0, RK2928_CLKGATE_CON(11), 2, GFLAGS),
GATE(HCLK_NANDC, "hclk_nandc", "hclk_peri", 0, RK2928_CLKGATE_CON(11), 3, GFLAGS),
- GATE(0, "hclk_host0", "hclk_peri", 0, RK2928_CLKGATE_CON(11), 6, GFLAGS),
+ GATE(HCLK_HOST0, "hclk_host0", "hclk_peri", 0, RK2928_CLKGATE_CON(11), 6, GFLAGS),
GATE(0, "hclk_host0_arb", "hclk_peri", 0, RK2928_CLKGATE_CON(11), 7, GFLAGS),
- GATE(0, "hclk_host1", "hclk_peri", 0, RK2928_CLKGATE_CON(11), 8, GFLAGS),
+ GATE(HCLK_HOST1, "hclk_host1", "hclk_peri", 0, RK2928_CLKGATE_CON(11), 8, GFLAGS),
GATE(0, "hclk_host1_arb", "hclk_peri", 0, RK2928_CLKGATE_CON(11), 9, GFLAGS),
- GATE(0, "hclk_host2", "hclk_peri", 0, RK2928_CLKGATE_CON(11), 10, GFLAGS),
- GATE(0, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(11), 12, GFLAGS),
+ GATE(HCLK_HOST2, "hclk_host2", "hclk_peri", 0, RK2928_CLKGATE_CON(11), 10, GFLAGS),
+ GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(11), 12, GFLAGS),
GATE(0, "hclk_otg_pmu", "hclk_peri", 0, RK2928_CLKGATE_CON(11), 13, GFLAGS),
GATE(0, "hclk_host2_arb", "hclk_peri", 0, RK2928_CLKGATE_CON(11), 14, GFLAGS),
GATE(0, "hclk_peri_noc", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(12), 1, GFLAGS),
@@ -571,7 +589,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
GATE(0, "pclk_peri_noc", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(12), 2, GFLAGS),
/* PD_GPU */
- GATE(0, "aclk_gpu", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 14, GFLAGS),
+ GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 14, GFLAGS),
GATE(0, "aclk_gpu_noc", "aclk_gpu_pre", 0, RK2928_CLKGATE_CON(13), 15, GFLAGS),
/* PD_BUS */
@@ -585,16 +603,16 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
GATE(HCLK_I2S1_8CH, "hclk_i2s1_8ch", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 8, GFLAGS),
GATE(HCLK_I2S2_2CH, "hclk_i2s2_2ch", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 9, GFLAGS),
GATE(HCLK_SPDIF_8CH, "hclk_spdif_8ch", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 10, GFLAGS),
- GATE(0, "hclk_tsp", "hclk_cpu", 0, RK2928_CLKGATE_CON(10), 11, GFLAGS),
- GATE(0, "hclk_crypto_mst", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 11, GFLAGS),
- GATE(0, "hclk_crypto_slv", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 12, GFLAGS),
+ GATE(HCLK_TSP, "hclk_tsp", "hclk_cpu", 0, RK2928_CLKGATE_CON(10), 11, GFLAGS),
+ GATE(HCLK_M_CRYPTO, "hclk_crypto_mst", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 11, GFLAGS),
+ GATE(HCLK_S_CRYPTO, "hclk_crypto_slv", "hclk_cpu", 0, RK2928_CLKGATE_CON(8), 12, GFLAGS),
GATE(0, "pclk_ddrupctl", "pclk_ddr_pre", 0, RK2928_CLKGATE_CON(8), 4, GFLAGS),
GATE(0, "pclk_ddrmon", "pclk_ddr_pre", 0, RK2928_CLKGATE_CON(8), 6, GFLAGS),
GATE(0, "pclk_msch_noc", "pclk_ddr_pre", 0, RK2928_CLKGATE_CON(10), 2, GFLAGS),
- GATE(0, "pclk_efuse_1024", "pclk_cpu", 0, RK2928_CLKGATE_CON(8), 13, GFLAGS),
- GATE(0, "pclk_efuse_256", "pclk_cpu", 0, RK2928_CLKGATE_CON(8), 14, GFLAGS),
+ GATE(PCLK_EFUSE_1024, "pclk_efuse_1024", "pclk_cpu", 0, RK2928_CLKGATE_CON(8), 13, GFLAGS),
+ GATE(PCLK_EFUSE_256, "pclk_efuse_256", "pclk_cpu", 0, RK2928_CLKGATE_CON(8), 14, GFLAGS),
GATE(PCLK_I2C0, "pclk_i2c0", "pclk_cpu", 0, RK2928_CLKGATE_CON(8), 15, GFLAGS),
GATE(PCLK_I2C1, "pclk_i2c1", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 0, GFLAGS),
GATE(PCLK_I2C2, "pclk_i2c2", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 1, GFLAGS),
@@ -622,13 +640,13 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
GATE(0, "pclk_vdacphy", "pclk_phy_pre", 0, RK2928_CLKGATE_CON(10), 8, GFLAGS),
GATE(0, "pclk_phy_noc", "pclk_phy_pre", 0, RK2928_CLKGATE_CON(10), 9, GFLAGS),
- GATE(0, "aclk_vpu", "aclk_vpu_pre", 0, RK2928_CLKGATE_CON(15), 0, GFLAGS),
+ GATE(ACLK_VPU, "aclk_vpu", "aclk_vpu_pre", 0, RK2928_CLKGATE_CON(15), 0, GFLAGS),
GATE(0, "aclk_vpu_noc", "aclk_vpu_pre", 0, RK2928_CLKGATE_CON(15), 4, GFLAGS),
- GATE(0, "aclk_rkvdec", "aclk_rkvdec_pre", 0, RK2928_CLKGATE_CON(15), 2, GFLAGS),
+ GATE(ACLK_RKVDEC, "aclk_rkvdec", "aclk_rkvdec_pre", 0, RK2928_CLKGATE_CON(15), 2, GFLAGS),
GATE(0, "aclk_rkvdec_noc", "aclk_rkvdec_pre", 0, RK2928_CLKGATE_CON(15), 6, GFLAGS),
- GATE(0, "hclk_vpu", "hclk_vpu_pre", 0, RK2928_CLKGATE_CON(15), 1, GFLAGS),
+ GATE(HCLK_VPU, "hclk_vpu", "hclk_vpu_pre", 0, RK2928_CLKGATE_CON(15), 1, GFLAGS),
GATE(0, "hclk_vpu_noc", "hclk_vpu_pre", 0, RK2928_CLKGATE_CON(15), 5, GFLAGS),
- GATE(0, "hclk_rkvdec", "hclk_rkvdec_pre", 0, RK2928_CLKGATE_CON(15), 3, GFLAGS),
+ GATE(HCLK_RKVDEC, "hclk_rkvdec", "hclk_rkvdec_pre", 0, RK2928_CLKGATE_CON(15), 3, GFLAGS),
GATE(0, "hclk_rkvdec_noc", "hclk_rkvdec_pre", 0, RK2928_CLKGATE_CON(15), 7, GFLAGS),
/* PD_MMC */
@@ -644,9 +662,37 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
static const char *const rk3228_critical_clocks[] __initconst = {
"aclk_cpu",
+ "pclk_cpu",
+ "hclk_cpu",
"aclk_peri",
"hclk_peri",
"pclk_peri",
+ "aclk_rga_noc",
+ "aclk_iep_noc",
+ "aclk_vop_noc",
+ "aclk_hdcp_noc",
+ "hclk_vio_ahb_arbi",
+ "hclk_vio_noc",
+ "hclk_vop_noc",
+ "hclk_host0_arb",
+ "hclk_host1_arb",
+ "hclk_host2_arb",
+ "hclk_otg_pmu",
+ "aclk_gpu_noc",
+ "sclk_initmem_mbist",
+ "aclk_initmem",
+ "hclk_rom",
+ "pclk_ddrupctl",
+ "pclk_ddrmon",
+ "pclk_msch_noc",
+ "pclk_stimer",
+ "pclk_ddrphy",
+ "pclk_acodecphy",
+ "pclk_phy_noc",
+ "aclk_vpu_noc",
+ "aclk_rkvdec_noc",
+ "hclk_vpu_noc",
+ "hclk_rkvdec_noc",
};
static void __init rk3228_clk_init(struct device_node *np)
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 68ba7d4105e7..450de24a1b42 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -292,13 +292,13 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
COMPOSITE_NOMUX(0, "aclk_core_mp", "armclk", CLK_IGNORE_UNUSED,
RK3288_CLKSEL_CON(0), 4, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
RK3288_CLKGATE_CON(12), 6, GFLAGS),
- COMPOSITE_NOMUX(0, "atclk", "armclk", 0,
+ COMPOSITE_NOMUX(0, "atclk", "armclk", CLK_IGNORE_UNUSED,
RK3288_CLKSEL_CON(37), 4, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
RK3288_CLKGATE_CON(12), 7, GFLAGS),
COMPOSITE_NOMUX(0, "pclk_dbg_pre", "armclk", CLK_IGNORE_UNUSED,
RK3288_CLKSEL_CON(37), 9, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
RK3288_CLKGATE_CON(12), 8, GFLAGS),
- GATE(0, "pclk_dbg", "pclk_dbg_pre", 0,
+ GATE(0, "pclk_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(12), 9, GFLAGS),
GATE(0, "cs_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(12), 10, GFLAGS),
@@ -626,7 +626,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
INVERTER(SCLK_HSADC, "sclk_hsadc", "sclk_hsadc_out",
RK3288_CLKSEL_CON(22), 7, IFLAGS),
- GATE(0, "jtag", "ext_jtag", 0,
+ GATE(0, "jtag", "ext_jtag", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(4), 14, GFLAGS),
COMPOSITE_NODIV(SCLK_USBPHY480M_SRC, "usbphy480m_src", mux_usbphy480m_p, 0,
@@ -635,7 +635,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0,
RK3288_CLKSEL_CON(29), 0, 2, MFLAGS,
RK3288_CLKGATE_CON(3), 6, GFLAGS),
- GATE(0, "hsicphy12m_xin12m", "xin12m", 0,
+ GATE(0, "hsicphy12m_xin12m", "xin12m", CLK_IGNORE_UNUSED,
RK3288_CLKGATE_CON(13), 9, GFLAGS),
DIV(0, "hsicphy12m_usbphy", "sclk_hsicphy480m", 0,
RK3288_CLKSEL_CON(11), 8, 6, DFLAGS),
@@ -816,6 +816,12 @@ static const char *const rk3288_critical_clocks[] __initconst = {
"pclk_alive_niu",
"pclk_pd_pmu",
"pclk_pmu_niu",
+ "pclk_core_niu",
+ "pclk_ddrupctl0",
+ "pclk_publ0",
+ "pclk_ddrupctl1",
+ "pclk_publ1",
+ "pmu_hclk_otg0",
};
static void __iomem *rk3288_cru_base;
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index 024762d3214d..fc56565379dd 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -638,7 +638,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
GATE(SCLK_MAC_TX, "sclk_mac_tx", "mac_clk", 0,
RK3368_CLKGATE_CON(7), 5, GFLAGS),
- GATE(0, "jtag", "ext_jtag", 0,
+ GATE(0, "jtag", "ext_jtag", CLK_IGNORE_UNUSED,
RK3368_CLKGATE_CON(7), 0, GFLAGS),
COMPOSITE_NODIV(0, "hsic_usbphy_480m", mux_hsic_usbphy480m_p, 0,
@@ -861,6 +861,9 @@ static const char *const rk3368_critical_clocks[] __initconst = {
"pclk_pd_alive",
"pclk_peri",
"hclk_peri",
+ "pclk_ddrphy",
+ "pclk_ddrupctl",
+ "pmu_hclk_otg0",
};
static void __init rk3368_clk_init(struct device_node *np)
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index fa3cbef08776..6847120b61cd 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -1066,13 +1066,13 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
/* cif_testout */
MUX(0, "clk_testout1_pll_src", mux_pll_src_cpll_gpll_npll_p, 0,
RK3399_CLKSEL_CON(38), 6, 2, MFLAGS),
- COMPOSITE(0, "clk_testout1", mux_clk_testout1_p, 0,
+ COMPOSITE(SCLK_TESTCLKOUT1, "clk_testout1", mux_clk_testout1_p, 0,
RK3399_CLKSEL_CON(38), 5, 1, MFLAGS, 0, 5, DFLAGS,
RK3399_CLKGATE_CON(13), 14, GFLAGS),
MUX(0, "clk_testout2_pll_src", mux_pll_src_cpll_gpll_npll_p, 0,
RK3399_CLKSEL_CON(38), 14, 2, MFLAGS),
- COMPOSITE(0, "clk_testout2", mux_clk_testout2_p, 0,
+ COMPOSITE(SCLK_TESTCLKOUT2, "clk_testout2", mux_clk_testout2_p, 0,
RK3399_CLKSEL_CON(38), 13, 1, MFLAGS, 8, 5, DFLAGS,
RK3399_CLKGATE_CON(13), 15, GFLAGS),
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index 8bf7e805fd34..6686e8ba61f9 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -410,7 +410,7 @@ int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
{
struct exynos_cpuclk *cpuclk;
struct clk_init_data init;
- struct clk *clk;
+ struct clk *parent_clk;
int ret = 0;
cpuclk = kzalloc(sizeof(*cpuclk), GFP_KERNEL);
@@ -440,15 +440,15 @@ int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
goto free_cpuclk;
}
- clk = __clk_lookup(parent);
- if (!clk) {
+ parent_clk = __clk_lookup(parent);
+ if (!parent_clk) {
pr_err("%s: could not lookup parent clock %s\n",
__func__, parent);
ret = -EINVAL;
goto free_cpuclk;
}
- ret = clk_notifier_register(clk, &cpuclk->clk_nb);
+ ret = clk_notifier_register(parent_clk, &cpuclk->clk_nb);
if (ret) {
pr_err("%s: failed to register clock notifier for %s\n",
__func__, name);
@@ -463,20 +463,19 @@ int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
goto unregister_clk_nb;
}
- clk = clk_register(NULL, &cpuclk->hw);
- if (IS_ERR(clk)) {
+ ret = clk_hw_register(NULL, &cpuclk->hw);
+ if (ret) {
pr_err("%s: could not register cpuclk %s\n", __func__, name);
- ret = PTR_ERR(clk);
goto free_cpuclk_data;
}
- samsung_clk_add_lookup(ctx, clk, lookup_id);
+ samsung_clk_add_lookup(ctx, &cpuclk->hw, lookup_id);
return 0;
free_cpuclk_data:
kfree(cpuclk->cfg);
unregister_clk_nb:
- clk_notifier_unregister(__clk_lookup(parent), &cpuclk->clk_nb);
+ clk_notifier_unregister(parent_clk, &cpuclk->clk_nb);
free_cpuclk:
kfree(cpuclk);
return ret;
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index cb7df358a27d..1fab56f396d4 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -22,9 +22,8 @@
#include <dt-bindings/clock/exynos-audss-clk.h>
static DEFINE_SPINLOCK(lock);
-static struct clk **clk_table;
static void __iomem *reg_base;
-static struct clk_onecell_data clk_data;
+static struct clk_hw_onecell_data *clk_data;
/*
* On Exynos5420 this will be a clock which has to be enabled before any
* access to audss registers. Typically a child of EPLL.
@@ -74,6 +73,7 @@ struct exynos_audss_clk_drvdata {
static const struct exynos_audss_clk_drvdata exynos4210_drvdata = {
.num_clks = EXYNOS_AUDSS_MAX_CLKS - 1,
+ .enable_epll = 1,
};
static const struct exynos_audss_clk_drvdata exynos5410_drvdata = {
@@ -110,18 +110,18 @@ static void exynos_audss_clk_teardown(void)
int i;
for (i = EXYNOS_MOUT_AUDSS; i < EXYNOS_DOUT_SRP; i++) {
- if (!IS_ERR(clk_table[i]))
- clk_unregister_mux(clk_table[i]);
+ if (!IS_ERR(clk_data->hws[i]))
+ clk_hw_unregister_mux(clk_data->hws[i]);
}
for (; i < EXYNOS_SRP_CLK; i++) {
- if (!IS_ERR(clk_table[i]))
- clk_unregister_divider(clk_table[i]);
+ if (!IS_ERR(clk_data->hws[i]))
+ clk_hw_unregister_divider(clk_data->hws[i]);
}
- for (; i < clk_data.clk_num; i++) {
- if (!IS_ERR(clk_table[i]))
- clk_unregister_gate(clk_table[i]);
+ for (; i < clk_data->num; i++) {
+ if (!IS_ERR(clk_data->hws[i]))
+ clk_hw_unregister_gate(clk_data->hws[i]);
}
}
@@ -133,6 +133,7 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
const char *sclk_pcm_p = "sclk_pcm0";
struct clk *pll_ref, *pll_in, *cdclk, *sclk_audio, *sclk_pcm_in;
const struct exynos_audss_clk_drvdata *variant;
+ struct clk_hw **clk_table;
struct resource *res;
int i, ret = 0;
@@ -149,14 +150,15 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
epll = ERR_PTR(-ENODEV);
- clk_table = devm_kzalloc(&pdev->dev,
- sizeof(struct clk *) * EXYNOS_AUDSS_MAX_CLKS,
+ clk_data = devm_kzalloc(&pdev->dev,
+ sizeof(*clk_data) +
+ sizeof(*clk_data->hws) * EXYNOS_AUDSS_MAX_CLKS,
GFP_KERNEL);
- if (!clk_table)
+ if (!clk_data)
return -ENOMEM;
- clk_data.clks = clk_table;
- clk_data.clk_num = variant->num_clks;
+ clk_data->num = variant->num_clks;
+ clk_table = clk_data->hws;
pll_ref = devm_clk_get(&pdev->dev, "pll_ref");
pll_in = devm_clk_get(&pdev->dev, "pll_in");
@@ -176,7 +178,7 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
}
}
}
- clk_table[EXYNOS_MOUT_AUDSS] = clk_register_mux(NULL, "mout_audss",
+ clk_table[EXYNOS_MOUT_AUDSS] = clk_hw_register_mux(NULL, "mout_audss",
mout_audss_p, ARRAY_SIZE(mout_audss_p),
CLK_SET_RATE_NO_REPARENT,
reg_base + ASS_CLK_SRC, 0, 1, 0, &lock);
@@ -187,53 +189,53 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
mout_i2s_p[1] = __clk_get_name(cdclk);
if (!IS_ERR(sclk_audio))
mout_i2s_p[2] = __clk_get_name(sclk_audio);
- clk_table[EXYNOS_MOUT_I2S] = clk_register_mux(NULL, "mout_i2s",
+ clk_table[EXYNOS_MOUT_I2S] = clk_hw_register_mux(NULL, "mout_i2s",
mout_i2s_p, ARRAY_SIZE(mout_i2s_p),
CLK_SET_RATE_NO_REPARENT,
reg_base + ASS_CLK_SRC, 2, 2, 0, &lock);
- clk_table[EXYNOS_DOUT_SRP] = clk_register_divider(NULL, "dout_srp",
+ clk_table[EXYNOS_DOUT_SRP] = clk_hw_register_divider(NULL, "dout_srp",
"mout_audss", 0, reg_base + ASS_CLK_DIV, 0, 4,
0, &lock);
- clk_table[EXYNOS_DOUT_AUD_BUS] = clk_register_divider(NULL,
+ clk_table[EXYNOS_DOUT_AUD_BUS] = clk_hw_register_divider(NULL,
"dout_aud_bus", "dout_srp", 0,
reg_base + ASS_CLK_DIV, 4, 4, 0, &lock);
- clk_table[EXYNOS_DOUT_I2S] = clk_register_divider(NULL, "dout_i2s",
+ clk_table[EXYNOS_DOUT_I2S] = clk_hw_register_divider(NULL, "dout_i2s",
"mout_i2s", 0, reg_base + ASS_CLK_DIV, 8, 4, 0,
&lock);
- clk_table[EXYNOS_SRP_CLK] = clk_register_gate(NULL, "srp_clk",
+ clk_table[EXYNOS_SRP_CLK] = clk_hw_register_gate(NULL, "srp_clk",
"dout_srp", CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_GATE, 0, 0, &lock);
- clk_table[EXYNOS_I2S_BUS] = clk_register_gate(NULL, "i2s_bus",
+ clk_table[EXYNOS_I2S_BUS] = clk_hw_register_gate(NULL, "i2s_bus",
"dout_aud_bus", CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_GATE, 2, 0, &lock);
- clk_table[EXYNOS_SCLK_I2S] = clk_register_gate(NULL, "sclk_i2s",
+ clk_table[EXYNOS_SCLK_I2S] = clk_hw_register_gate(NULL, "sclk_i2s",
"dout_i2s", CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_GATE, 3, 0, &lock);
- clk_table[EXYNOS_PCM_BUS] = clk_register_gate(NULL, "pcm_bus",
+ clk_table[EXYNOS_PCM_BUS] = clk_hw_register_gate(NULL, "pcm_bus",
"sclk_pcm", CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_GATE, 4, 0, &lock);
sclk_pcm_in = devm_clk_get(&pdev->dev, "sclk_pcm_in");
if (!IS_ERR(sclk_pcm_in))
sclk_pcm_p = __clk_get_name(sclk_pcm_in);
- clk_table[EXYNOS_SCLK_PCM] = clk_register_gate(NULL, "sclk_pcm",
+ clk_table[EXYNOS_SCLK_PCM] = clk_hw_register_gate(NULL, "sclk_pcm",
sclk_pcm_p, CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_GATE, 5, 0, &lock);
if (variant->has_adma_clk) {
- clk_table[EXYNOS_ADMA] = clk_register_gate(NULL, "adma",
+ clk_table[EXYNOS_ADMA] = clk_hw_register_gate(NULL, "adma",
"dout_srp", CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_GATE, 9, 0, &lock);
}
- for (i = 0; i < clk_data.clk_num; i++) {
+ for (i = 0; i < clk_data->num; i++) {
if (IS_ERR(clk_table[i])) {
dev_err(&pdev->dev, "failed to register clock %d\n", i);
ret = PTR_ERR(clk_table[i]);
@@ -241,8 +243,8 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
}
}
- ret = of_clk_add_provider(pdev->dev.of_node, of_clk_src_onecell_get,
- &clk_data);
+ ret = of_clk_add_hw_provider(pdev->dev.of_node, of_clk_hw_onecell_get,
+ clk_data);
if (ret) {
dev_err(&pdev->dev, "failed to add clock provider\n");
goto unregister;
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index 6c6afb87b4ce..a21aea062bae 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -29,10 +29,9 @@ struct exynos_clkout {
struct clk_gate gate;
struct clk_mux mux;
spinlock_t slock;
- struct clk_onecell_data data;
- struct clk *clk_table[EXYNOS_CLKOUT_NR_CLKS];
void __iomem *reg;
u32 pmu_debug_save;
+ struct clk_hw_onecell_data data;
};
static struct exynos_clkout *clkout;
@@ -62,7 +61,9 @@ static void __init exynos_clkout_init(struct device_node *node, u32 mux_mask)
int ret;
int i;
- clkout = kzalloc(sizeof(*clkout), GFP_KERNEL);
+ clkout = kzalloc(sizeof(*clkout) +
+ sizeof(*clkout->data.hws) * EXYNOS_CLKOUT_NR_CLKS,
+ GFP_KERNEL);
if (!clkout)
return;
@@ -100,17 +101,16 @@ static void __init exynos_clkout_init(struct device_node *node, u32 mux_mask)
clkout->mux.shift = EXYNOS_CLKOUT_MUX_SHIFT;
clkout->mux.lock = &clkout->slock;
- clkout->clk_table[0] = clk_register_composite(NULL, "clkout",
+ clkout->data.hws[0] = clk_hw_register_composite(NULL, "clkout",
parent_names, parent_count, &clkout->mux.hw,
&clk_mux_ops, NULL, NULL, &clkout->gate.hw,
&clk_gate_ops, CLK_SET_RATE_PARENT
| CLK_SET_RATE_NO_REPARENT);
- if (IS_ERR(clkout->clk_table[0]))
+ if (IS_ERR(clkout->data.hws[0]))
goto err_unmap;
- clkout->data.clks = clkout->clk_table;
- clkout->data.clk_num = EXYNOS_CLKOUT_NR_CLKS;
- ret = of_clk_add_provider(node, of_clk_src_onecell_get, &clkout->data);
+ clkout->data.num = EXYNOS_CLKOUT_NR_CLKS;
+ ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, &clkout->data);
if (ret)
goto err_clk_unreg;
@@ -119,7 +119,7 @@ static void __init exynos_clkout_init(struct device_node *node, u32 mux_mask)
return;
err_clk_unreg:
- clk_unregister(clkout->clk_table[0]);
+ clk_hw_unregister(clkout->data.hws[0]);
err_unmap:
iounmap(clkout->reg);
clks_put:
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index cdc092a1d9ef..0748a0b333c5 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -487,6 +487,7 @@ PNAME(mout_group12_5800_p) = { "dout_aclkfl1_550_cam", "dout_sclk_sw" };
PNAME(mout_group13_5800_p) = { "dout_osc_div", "mout_sw_aclkfl1_550_cam" };
PNAME(mout_group14_5800_p) = { "dout_aclk550_cam", "dout_sclk_sw" };
PNAME(mout_group15_5800_p) = { "dout_osc_div", "mout_sw_aclk550_cam" };
+PNAME(mout_group16_5800_p) = { "dout_osc_div", "mout_mau_epll_clk" };
/* fixed rate clocks generated outside the soc */
static struct samsung_fixed_rate_clock
@@ -536,8 +537,8 @@ static const struct samsung_mux_clock exynos5800_mux_clks[] __initconst = {
MUX(CLK_MOUT_MX_MSPLL_CCORE, "mout_mx_mspll_ccore",
mout_mx_mspll_ccore_p, SRC_TOP7, 16, 2),
- MUX(0, "mout_mau_epll_clk", mout_mau_epll_clk_5800_p, SRC_TOP7,
- 20, 2),
+ MUX(CLK_MOUT_MAU_EPLL, "mout_mau_epll_clk", mout_mau_epll_clk_5800_p,
+ SRC_TOP7, 20, 2),
MUX(0, "sclk_bpll", mout_bpll_p, SRC_TOP7, 24, 1),
MUX(0, "mout_epll2", mout_epll2_5800_p, SRC_TOP7, 28, 1),
@@ -546,6 +547,8 @@ static const struct samsung_mux_clock exynos5800_mux_clks[] __initconst = {
MUX(0, "mout_aclk432_cam", mout_group6_5800_p, SRC_TOP8, 24, 2),
MUX(0, "mout_aclk432_scaler", mout_group6_5800_p, SRC_TOP8, 28, 2),
+ MUX(CLK_MOUT_USER_MAU_EPLL, "mout_user_mau_epll", mout_group16_5800_p,
+ SRC_TOP9, 8, 1),
MUX(0, "mout_user_aclk550_cam", mout_group15_5800_p,
SRC_TOP9, 16, 1),
MUX(0, "mout_user_aclkfl1_550_cam", mout_group13_5800_p,
@@ -703,7 +706,7 @@ static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = {
MUX(0, "mout_sclk_spll", mout_spll_p, SRC_TOP6, 8, 1),
MUX(0, "mout_sclk_ipll", mout_ipll_p, SRC_TOP6, 12, 1),
MUX(0, "mout_sclk_rpll", mout_rpll_p, SRC_TOP6, 16, 1),
- MUX(0, "mout_sclk_epll", mout_epll_p, SRC_TOP6, 20, 1),
+ MUX(CLK_MOUT_EPLL, "mout_sclk_epll", mout_epll_p, SRC_TOP6, 20, 1),
MUX(0, "mout_sclk_dpll", mout_dpll_p, SRC_TOP6, 24, 1),
MUX(0, "mout_sclk_cpll", mout_cpll_p, SRC_TOP6, 28, 1),
@@ -1277,6 +1280,21 @@ static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __ini
PLL_35XX_RATE(200000000, 200, 3, 3),
};
+static const struct samsung_pll_rate_table exynos5420_epll_24mhz_tbl[] = {
+ PLL_36XX_RATE(600000000U, 100, 2, 1, 0),
+ PLL_36XX_RATE(400000000U, 200, 3, 2, 0),
+ PLL_36XX_RATE(393216000U, 197, 3, 2, 25690),
+ PLL_36XX_RATE(361267200U, 301, 5, 2, 3671),
+ PLL_36XX_RATE(200000000U, 200, 3, 3, 0),
+ PLL_36XX_RATE(196608000U, 197, 3, 3, -25690),
+ PLL_36XX_RATE(180633600U, 301, 5, 3, 3671),
+ PLL_36XX_RATE(131072000U, 131, 3, 3, 4719),
+ PLL_36XX_RATE(100000000U, 200, 3, 4, 0),
+ PLL_36XX_RATE(65536000U, 131, 3, 4, 4719),
+ PLL_36XX_RATE(49152000U, 197, 3, 5, 25690),
+ PLL_36XX_RATE(32768000U, 131, 3, 5, 4719),
+};
+
static struct samsung_pll_clock exynos5x_plls[nr_plls] __initdata = {
[apll] = PLL(pll_2550, CLK_FOUT_APLL, "fout_apll", "fin_pll", APLL_LOCK,
APLL_CON0, NULL),
@@ -1284,7 +1302,7 @@ static struct samsung_pll_clock exynos5x_plls[nr_plls] __initdata = {
CPLL_CON0, NULL),
[dpll] = PLL(pll_2550, CLK_FOUT_DPLL, "fout_dpll", "fin_pll", DPLL_LOCK,
DPLL_CON0, NULL),
- [epll] = PLL(pll_2650, CLK_FOUT_EPLL, "fout_epll", "fin_pll", EPLL_LOCK,
+ [epll] = PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll", EPLL_LOCK,
EPLL_CON0, NULL),
[rpll] = PLL(pll_2650, CLK_FOUT_RPLL, "fout_rpll", "fin_pll", RPLL_LOCK,
RPLL_CON0, NULL),
@@ -1399,6 +1417,7 @@ static void __init exynos5x_clk_init(struct device_node *np,
if (_get_rate("fin_pll") == 24 * MHZ) {
exynos5x_plls[apll].rate_table = exynos5420_pll2550x_24mhz_tbl;
+ exynos5x_plls[epll].rate_table = exynos5420_epll_24mhz_tbl;
exynos5x_plls[kpll].rate_table = exynos5420_pll2550x_24mhz_tbl;
exynos5x_plls[bpll].rate_table = exynos5420_pll2550x_24mhz_tbl;
}
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 52290894857a..037c61484098 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -23,6 +23,10 @@ struct samsung_clk_pll {
struct clk_hw hw;
void __iomem *lock_reg;
void __iomem *con_reg;
+ /* PLL enable control bit offset in @con_reg register */
+ unsigned short enable_offs;
+ /* PLL lock status bit offset in @con_reg register */
+ unsigned short lock_offs;
enum samsung_pll_type type;
unsigned int rate_count;
const struct samsung_pll_rate_table *rate_table;
@@ -61,6 +65,34 @@ static long samsung_pll_round_rate(struct clk_hw *hw,
return rate_table[i - 1].rate;
}
+static int samsung_pll3xxx_enable(struct clk_hw *hw)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 tmp;
+
+ tmp = readl_relaxed(pll->con_reg);
+ tmp |= BIT(pll->enable_offs);
+ writel_relaxed(tmp, pll->con_reg);
+
+ /* wait lock time */
+ do {
+ cpu_relax();
+ tmp = readl_relaxed(pll->con_reg);
+ } while (!(tmp & BIT(pll->lock_offs)));
+
+ return 0;
+}
+
+static void samsung_pll3xxx_disable(struct clk_hw *hw)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 tmp;
+
+ tmp = readl_relaxed(pll->con_reg);
+ tmp &= ~BIT(pll->enable_offs);
+ writel_relaxed(tmp, pll->con_reg);
+}
+
/*
* PLL2126 Clock Type
*/
@@ -142,34 +174,6 @@ static const struct clk_ops samsung_pll3000_clk_ops = {
#define PLL35XX_LOCK_STAT_SHIFT (29)
#define PLL35XX_ENABLE_SHIFT (31)
-static int samsung_pll35xx_enable(struct clk_hw *hw)
-{
- struct samsung_clk_pll *pll = to_clk_pll(hw);
- u32 tmp;
-
- tmp = readl_relaxed(pll->con_reg);
- tmp |= BIT(PLL35XX_ENABLE_SHIFT);
- writel_relaxed(tmp, pll->con_reg);
-
- /* wait_lock_time */
- do {
- cpu_relax();
- tmp = readl_relaxed(pll->con_reg);
- } while (!(tmp & BIT(PLL35XX_LOCK_STAT_SHIFT)));
-
- return 0;
-}
-
-static void samsung_pll35xx_disable(struct clk_hw *hw)
-{
- struct samsung_clk_pll *pll = to_clk_pll(hw);
- u32 tmp;
-
- tmp = readl_relaxed(pll->con_reg);
- tmp &= ~BIT(PLL35XX_ENABLE_SHIFT);
- writel_relaxed(tmp, pll->con_reg);
-}
-
static unsigned long samsung_pll35xx_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -238,12 +242,12 @@ static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate,
(rate->sdiv << PLL35XX_SDIV_SHIFT);
writel_relaxed(tmp, pll->con_reg);
- /* wait_lock_time if enabled */
- if (tmp & BIT(PLL35XX_ENABLE_SHIFT)) {
+ /* Wait until the PLL is locked if it is enabled. */
+ if (tmp & BIT(pll->enable_offs)) {
do {
cpu_relax();
tmp = readl_relaxed(pll->con_reg);
- } while (!(tmp & BIT(PLL35XX_LOCK_STAT_SHIFT)));
+ } while (!(tmp & BIT(pll->lock_offs)));
}
return 0;
}
@@ -252,8 +256,8 @@ static const struct clk_ops samsung_pll35xx_clk_ops = {
.recalc_rate = samsung_pll35xx_recalc_rate,
.round_rate = samsung_pll_round_rate,
.set_rate = samsung_pll35xx_set_rate,
- .enable = samsung_pll35xx_enable,
- .disable = samsung_pll35xx_disable,
+ .enable = samsung_pll3xxx_enable,
+ .disable = samsung_pll3xxx_disable,
};
static const struct clk_ops samsung_pll35xx_clk_min_ops = {
@@ -275,6 +279,7 @@ static const struct clk_ops samsung_pll35xx_clk_min_ops = {
#define PLL36XX_SDIV_SHIFT (0)
#define PLL36XX_KDIV_SHIFT (0)
#define PLL36XX_LOCK_STAT_SHIFT (29)
+#define PLL36XX_ENABLE_SHIFT (31)
static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
@@ -354,10 +359,12 @@ static int samsung_pll36xx_set_rate(struct clk_hw *hw, unsigned long drate,
writel_relaxed(pll_con1, pll->con_reg + 4);
/* wait_lock_time */
- do {
- cpu_relax();
- tmp = readl_relaxed(pll->con_reg);
- } while (!(tmp & (1 << PLL36XX_LOCK_STAT_SHIFT)));
+ if (pll_con0 & BIT(pll->enable_offs)) {
+ do {
+ cpu_relax();
+ tmp = readl_relaxed(pll->con_reg);
+ } while (!(tmp & BIT(pll->lock_offs)));
+ }
return 0;
}
@@ -366,6 +373,8 @@ static const struct clk_ops samsung_pll36xx_clk_ops = {
.recalc_rate = samsung_pll36xx_recalc_rate,
.set_rate = samsung_pll36xx_set_rate,
.round_rate = samsung_pll_round_rate,
+ .enable = samsung_pll3xxx_enable,
+ .disable = samsung_pll3xxx_disable,
};
static const struct clk_ops samsung_pll36xx_clk_min_ops = {
@@ -1244,7 +1253,6 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
void __iomem *base)
{
struct samsung_clk_pll *pll;
- struct clk *clk;
struct clk_init_data init;
int ret, len;
@@ -1288,6 +1296,8 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
case pll_1450x:
case pll_1451x:
case pll_1452x:
+ pll->enable_offs = PLL35XX_ENABLE_SHIFT;
+ pll->lock_offs = PLL35XX_LOCK_STAT_SHIFT;
if (!pll->rate_table)
init.ops = &samsung_pll35xx_clk_min_ops;
else
@@ -1306,6 +1316,8 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
/* clk_ops for 36xx and 2650 are similar */
case pll_36xx:
case pll_2650:
+ pll->enable_offs = PLL36XX_ENABLE_SHIFT;
+ pll->lock_offs = PLL36XX_LOCK_STAT_SHIFT;
if (!pll->rate_table)
init.ops = &samsung_pll36xx_clk_min_ops;
else
@@ -1376,20 +1388,21 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
pll->lock_reg = base + pll_clk->lock_offset;
pll->con_reg = base + pll_clk->con_offset;
- clk = clk_register(NULL, &pll->hw);
- if (IS_ERR(clk)) {
- pr_err("%s: failed to register pll clock %s : %ld\n",
- __func__, pll_clk->name, PTR_ERR(clk));
+ ret = clk_hw_register(NULL, &pll->hw);
+ if (ret) {
+ pr_err("%s: failed to register pll clock %s : %d\n",
+ __func__, pll_clk->name, ret);
kfree(pll);
return;
}
- samsung_clk_add_lookup(ctx, clk, pll_clk->id);
+ samsung_clk_add_lookup(ctx, &pll->hw, pll_clk->id);
if (!pll_clk->alias)
return;
- ret = clk_register_clkdev(clk, pll_clk->alias, pll_clk->dev_name);
+ ret = clk_hw_register_clkdev(&pll->hw, pll_clk->alias,
+ pll_clk->dev_name);
if (ret)
pr_err("%s: failed to register lookup for %s : %d",
__func__, pll_clk->name, ret);
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index a1ca0233cb4b..61eb8abbfd9c 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -103,8 +103,4 @@ struct samsung_pll_rate_table {
unsigned int vsel;
};
-extern struct clk * __init samsung_clk_register_pll2550x(const char *name,
- const char *pname, const void __iomem *reg_base,
- const unsigned long offset);
-
#endif /* __SAMSUNG_CLK_PLL_H */
diff --git a/drivers/clk/samsung/clk-s3c2410-dclk.c b/drivers/clk/samsung/clk-s3c2410-dclk.c
index ae9a595c72d0..077df3e539a7 100644
--- a/drivers/clk/samsung/clk-s3c2410-dclk.c
+++ b/drivers/clk/samsung/clk-s3c2410-dclk.c
@@ -90,13 +90,13 @@ static const struct clk_ops s3c24xx_clkout_ops = {
.determine_rate = __clk_mux_determine_rate,
};
-static struct clk *s3c24xx_register_clkout(struct device *dev, const char *name,
- const char **parent_names, u8 num_parents,
+static struct clk_hw *s3c24xx_register_clkout(struct device *dev,
+ const char *name, const char **parent_names, u8 num_parents,
u8 shift, u32 mask)
{
struct s3c24xx_clkout *clkout;
- struct clk *clk;
struct clk_init_data init;
+ int ret;
/* allocate the clkout */
clkout = kzalloc(sizeof(*clkout), GFP_KERNEL);
@@ -113,9 +113,11 @@ static struct clk *s3c24xx_register_clkout(struct device *dev, const char *name,
clkout->mask = mask;
clkout->hw.init = &init;
- clk = clk_register(dev, &clkout->hw);
+ ret = clk_hw_register(dev, &clkout->hw);
+ if (ret)
+ return ERR_PTR(ret);
- return clk;
+ return &clkout->hw;
}
/*
@@ -125,11 +127,12 @@ static struct clk *s3c24xx_register_clkout(struct device *dev, const char *name,
struct s3c24xx_dclk {
struct device *dev;
void __iomem *base;
- struct clk_onecell_data clk_data;
struct notifier_block dclk0_div_change_nb;
struct notifier_block dclk1_div_change_nb;
spinlock_t dclk_lock;
unsigned long reg_save;
+ /* clk_data must be the last entry in the structure */
+ struct clk_hw_onecell_data clk_data;
};
#define to_s3c24xx_dclk0(x) \
@@ -240,28 +243,23 @@ static int s3c24xx_dclk_probe(struct platform_device *pdev)
{
struct s3c24xx_dclk *s3c24xx_dclk;
struct resource *mem;
- struct clk **clk_table;
struct s3c24xx_dclk_drv_data *dclk_variant;
+ struct clk_hw **clk_table;
int ret, i;
- s3c24xx_dclk = devm_kzalloc(&pdev->dev, sizeof(*s3c24xx_dclk),
- GFP_KERNEL);
+ s3c24xx_dclk = devm_kzalloc(&pdev->dev, sizeof(*s3c24xx_dclk) +
+ sizeof(*s3c24xx_dclk->clk_data.hws) * DCLK_MAX_CLKS,
+ GFP_KERNEL);
if (!s3c24xx_dclk)
return -ENOMEM;
+ clk_table = s3c24xx_dclk->clk_data.hws;
+
s3c24xx_dclk->dev = &pdev->dev;
+ s3c24xx_dclk->clk_data.num = DCLK_MAX_CLKS;
platform_set_drvdata(pdev, s3c24xx_dclk);
spin_lock_init(&s3c24xx_dclk->dclk_lock);
- clk_table = devm_kzalloc(&pdev->dev,
- sizeof(struct clk *) * DCLK_MAX_CLKS,
- GFP_KERNEL);
- if (!clk_table)
- return -ENOMEM;
-
- s3c24xx_dclk->clk_data.clks = clk_table;
- s3c24xx_dclk->clk_data.clk_num = DCLK_MAX_CLKS;
-
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
s3c24xx_dclk->base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(s3c24xx_dclk->base))
@@ -271,29 +269,29 @@ static int s3c24xx_dclk_probe(struct platform_device *pdev)
platform_get_device_id(pdev)->driver_data;
- clk_table[MUX_DCLK0] = clk_register_mux(&pdev->dev, "mux_dclk0",
+ clk_table[MUX_DCLK0] = clk_hw_register_mux(&pdev->dev, "mux_dclk0",
dclk_variant->mux_parent_names,
dclk_variant->mux_num_parents, 0,
s3c24xx_dclk->base, 1, 1, 0,
&s3c24xx_dclk->dclk_lock);
- clk_table[MUX_DCLK1] = clk_register_mux(&pdev->dev, "mux_dclk1",
+ clk_table[MUX_DCLK1] = clk_hw_register_mux(&pdev->dev, "mux_dclk1",
dclk_variant->mux_parent_names,
dclk_variant->mux_num_parents, 0,
s3c24xx_dclk->base, 17, 1, 0,
&s3c24xx_dclk->dclk_lock);
- clk_table[DIV_DCLK0] = clk_register_divider(&pdev->dev, "div_dclk0",
+ clk_table[DIV_DCLK0] = clk_hw_register_divider(&pdev->dev, "div_dclk0",
"mux_dclk0", 0, s3c24xx_dclk->base,
4, 4, 0, &s3c24xx_dclk->dclk_lock);
- clk_table[DIV_DCLK1] = clk_register_divider(&pdev->dev, "div_dclk1",
+ clk_table[DIV_DCLK1] = clk_hw_register_divider(&pdev->dev, "div_dclk1",
"mux_dclk1", 0, s3c24xx_dclk->base,
20, 4, 0, &s3c24xx_dclk->dclk_lock);
- clk_table[GATE_DCLK0] = clk_register_gate(&pdev->dev, "gate_dclk0",
+ clk_table[GATE_DCLK0] = clk_hw_register_gate(&pdev->dev, "gate_dclk0",
"div_dclk0", CLK_SET_RATE_PARENT,
s3c24xx_dclk->base, 0, 0,
&s3c24xx_dclk->dclk_lock);
- clk_table[GATE_DCLK1] = clk_register_gate(&pdev->dev, "gate_dclk1",
+ clk_table[GATE_DCLK1] = clk_hw_register_gate(&pdev->dev, "gate_dclk1",
"div_dclk1", CLK_SET_RATE_PARENT,
s3c24xx_dclk->base, 16, 0,
&s3c24xx_dclk->dclk_lock);
@@ -312,15 +310,16 @@ static int s3c24xx_dclk_probe(struct platform_device *pdev)
goto err_clk_register;
}
- ret = clk_register_clkdev(clk_table[MUX_DCLK0], "dclk0", NULL);
+ ret = clk_hw_register_clkdev(clk_table[MUX_DCLK0], "dclk0", NULL);
if (!ret)
- ret = clk_register_clkdev(clk_table[MUX_DCLK1], "dclk1", NULL);
+ ret = clk_hw_register_clkdev(clk_table[MUX_DCLK1], "dclk1",
+ NULL);
if (!ret)
- ret = clk_register_clkdev(clk_table[MUX_CLKOUT0],
- "clkout0", NULL);
+ ret = clk_hw_register_clkdev(clk_table[MUX_CLKOUT0],
+ "clkout0", NULL);
if (!ret)
- ret = clk_register_clkdev(clk_table[MUX_CLKOUT1],
- "clkout1", NULL);
+ ret = clk_hw_register_clkdev(clk_table[MUX_CLKOUT1],
+ "clkout1", NULL);
if (ret) {
dev_err(&pdev->dev, "failed to register aliases, %d\n", ret);
goto err_clk_register;
@@ -332,12 +331,12 @@ static int s3c24xx_dclk_probe(struct platform_device *pdev)
s3c24xx_dclk->dclk1_div_change_nb.notifier_call =
s3c24xx_dclk1_div_notify;
- ret = clk_notifier_register(clk_table[DIV_DCLK0],
+ ret = clk_notifier_register(clk_table[DIV_DCLK0]->clk,
&s3c24xx_dclk->dclk0_div_change_nb);
if (ret)
goto err_clk_register;
- ret = clk_notifier_register(clk_table[DIV_DCLK1],
+ ret = clk_notifier_register(clk_table[DIV_DCLK1]->clk,
&s3c24xx_dclk->dclk1_div_change_nb);
if (ret)
goto err_dclk_notify;
@@ -345,12 +344,12 @@ static int s3c24xx_dclk_probe(struct platform_device *pdev)
return 0;
err_dclk_notify:
- clk_notifier_unregister(clk_table[DIV_DCLK0],
+ clk_notifier_unregister(clk_table[DIV_DCLK0]->clk,
&s3c24xx_dclk->dclk0_div_change_nb);
err_clk_register:
for (i = 0; i < DCLK_MAX_CLKS; i++)
if (clk_table[i] && !IS_ERR(clk_table[i]))
- clk_unregister(clk_table[i]);
+ clk_hw_unregister(clk_table[i]);
return ret;
}
@@ -358,16 +357,16 @@ err_clk_register:
static int s3c24xx_dclk_remove(struct platform_device *pdev)
{
struct s3c24xx_dclk *s3c24xx_dclk = platform_get_drvdata(pdev);
- struct clk **clk_table = s3c24xx_dclk->clk_data.clks;
+ struct clk_hw **clk_table = s3c24xx_dclk->clk_data.hws;
int i;
- clk_notifier_unregister(clk_table[DIV_DCLK1],
+ clk_notifier_unregister(clk_table[DIV_DCLK1]->clk,
&s3c24xx_dclk->dclk1_div_change_nb);
- clk_notifier_unregister(clk_table[DIV_DCLK0],
+ clk_notifier_unregister(clk_table[DIV_DCLK0]->clk,
&s3c24xx_dclk->dclk0_div_change_nb);
for (i = 0; i < DCLK_MAX_CLKS; i++)
- clk_unregister(clk_table[i]);
+ clk_hw_unregister(clk_table[i]);
return 0;
}
diff --git a/drivers/clk/samsung/clk-s5pv210-audss.c b/drivers/clk/samsung/clk-s5pv210-audss.c
index c66ed2d1450e..b9641414ddc6 100644
--- a/drivers/clk/samsung/clk-s5pv210-audss.c
+++ b/drivers/clk/samsung/clk-s5pv210-audss.c
@@ -24,9 +24,8 @@
#include <dt-bindings/clock/s5pv210-audss.h>
static DEFINE_SPINLOCK(lock);
-static struct clk **clk_table;
static void __iomem *reg_base;
-static struct clk_onecell_data clk_data;
+static struct clk_hw_onecell_data *clk_data;
#define ASS_CLK_SRC 0x0
#define ASS_CLK_DIV 0x4
@@ -71,6 +70,7 @@ static int s5pv210_audss_clk_probe(struct platform_device *pdev)
const char *mout_audss_p[2];
const char *mout_i2s_p[3];
const char *hclk_p;
+ struct clk_hw **clk_table;
struct clk *hclk, *pll_ref, *pll_in, *cdclk, *sclk_audio;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -80,14 +80,16 @@ static int s5pv210_audss_clk_probe(struct platform_device *pdev)
return PTR_ERR(reg_base);
}
- clk_table = devm_kzalloc(&pdev->dev,
- sizeof(struct clk *) * AUDSS_MAX_CLKS,
+ clk_data = devm_kzalloc(&pdev->dev,
+ sizeof(*clk_data) +
+ sizeof(*clk_data->hws) * AUDSS_MAX_CLKS,
GFP_KERNEL);
- if (!clk_table)
+
+ if (!clk_data)
return -ENOMEM;
- clk_data.clks = clk_table;
- clk_data.clk_num = AUDSS_MAX_CLKS;
+ clk_data->num = AUDSS_MAX_CLKS;
+ clk_table = clk_data->hws;
hclk = devm_clk_get(&pdev->dev, "hclk");
if (IS_ERR(hclk)) {
@@ -116,7 +118,7 @@ static int s5pv210_audss_clk_probe(struct platform_device *pdev)
else
mout_audss_p[0] = "xxti";
mout_audss_p[1] = __clk_get_name(pll_in);
- clk_table[CLK_MOUT_AUDSS] = clk_register_mux(NULL, "mout_audss",
+ clk_table[CLK_MOUT_AUDSS] = clk_hw_register_mux(NULL, "mout_audss",
mout_audss_p, ARRAY_SIZE(mout_audss_p),
CLK_SET_RATE_NO_REPARENT,
reg_base + ASS_CLK_SRC, 0, 1, 0, &lock);
@@ -127,44 +129,44 @@ static int s5pv210_audss_clk_probe(struct platform_device *pdev)
else
mout_i2s_p[1] = "iiscdclk0";
mout_i2s_p[2] = __clk_get_name(sclk_audio);
- clk_table[CLK_MOUT_I2S_A] = clk_register_mux(NULL, "mout_i2s_audss",
+ clk_table[CLK_MOUT_I2S_A] = clk_hw_register_mux(NULL, "mout_i2s_audss",
mout_i2s_p, ARRAY_SIZE(mout_i2s_p),
CLK_SET_RATE_NO_REPARENT,
reg_base + ASS_CLK_SRC, 2, 2, 0, &lock);
- clk_table[CLK_DOUT_AUD_BUS] = clk_register_divider(NULL,
+ clk_table[CLK_DOUT_AUD_BUS] = clk_hw_register_divider(NULL,
"dout_aud_bus", "mout_audss", 0,
reg_base + ASS_CLK_DIV, 0, 4, 0, &lock);
- clk_table[CLK_DOUT_I2S_A] = clk_register_divider(NULL, "dout_i2s_audss",
- "mout_i2s_audss", 0, reg_base + ASS_CLK_DIV,
- 4, 4, 0, &lock);
+ clk_table[CLK_DOUT_I2S_A] = clk_hw_register_divider(NULL,
+ "dout_i2s_audss", "mout_i2s_audss", 0,
+ reg_base + ASS_CLK_DIV, 4, 4, 0, &lock);
- clk_table[CLK_I2S] = clk_register_gate(NULL, "i2s_audss",
+ clk_table[CLK_I2S] = clk_hw_register_gate(NULL, "i2s_audss",
"dout_i2s_audss", CLK_SET_RATE_PARENT,
reg_base + ASS_CLK_GATE, 6, 0, &lock);
hclk_p = __clk_get_name(hclk);
- clk_table[CLK_HCLK_I2S] = clk_register_gate(NULL, "hclk_i2s_audss",
+ clk_table[CLK_HCLK_I2S] = clk_hw_register_gate(NULL, "hclk_i2s_audss",
hclk_p, CLK_IGNORE_UNUSED,
reg_base + ASS_CLK_GATE, 5, 0, &lock);
- clk_table[CLK_HCLK_UART] = clk_register_gate(NULL, "hclk_uart_audss",
+ clk_table[CLK_HCLK_UART] = clk_hw_register_gate(NULL, "hclk_uart_audss",
hclk_p, CLK_IGNORE_UNUSED,
reg_base + ASS_CLK_GATE, 4, 0, &lock);
- clk_table[CLK_HCLK_HWA] = clk_register_gate(NULL, "hclk_hwa_audss",
+ clk_table[CLK_HCLK_HWA] = clk_hw_register_gate(NULL, "hclk_hwa_audss",
hclk_p, CLK_IGNORE_UNUSED,
reg_base + ASS_CLK_GATE, 3, 0, &lock);
- clk_table[CLK_HCLK_DMA] = clk_register_gate(NULL, "hclk_dma_audss",
+ clk_table[CLK_HCLK_DMA] = clk_hw_register_gate(NULL, "hclk_dma_audss",
hclk_p, CLK_IGNORE_UNUSED,
reg_base + ASS_CLK_GATE, 2, 0, &lock);
- clk_table[CLK_HCLK_BUF] = clk_register_gate(NULL, "hclk_buf_audss",
+ clk_table[CLK_HCLK_BUF] = clk_hw_register_gate(NULL, "hclk_buf_audss",
hclk_p, CLK_IGNORE_UNUSED,
reg_base + ASS_CLK_GATE, 1, 0, &lock);
- clk_table[CLK_HCLK_RP] = clk_register_gate(NULL, "hclk_rp_audss",
+ clk_table[CLK_HCLK_RP] = clk_hw_register_gate(NULL, "hclk_rp_audss",
hclk_p, CLK_IGNORE_UNUSED,
reg_base + ASS_CLK_GATE, 0, 0, &lock);
- for (i = 0; i < clk_data.clk_num; i++) {
+ for (i = 0; i < clk_data->num; i++) {
if (IS_ERR(clk_table[i])) {
dev_err(&pdev->dev, "failed to register clock %d\n", i);
ret = PTR_ERR(clk_table[i]);
@@ -172,8 +174,8 @@ static int s5pv210_audss_clk_probe(struct platform_device *pdev)
}
}
- ret = of_clk_add_provider(pdev->dev.of_node, of_clk_src_onecell_get,
- &clk_data);
+ ret = of_clk_add_hw_provider(pdev->dev.of_node, of_clk_hw_onecell_get,
+ clk_data);
if (ret) {
dev_err(&pdev->dev, "failed to add clock provider\n");
goto unregister;
@@ -186,9 +188,9 @@ static int s5pv210_audss_clk_probe(struct platform_device *pdev)
return 0;
unregister:
- for (i = 0; i < clk_data.clk_num; i++) {
+ for (i = 0; i < clk_data->num; i++) {
if (!IS_ERR(clk_table[i]))
- clk_unregister(clk_table[i]);
+ clk_hw_unregister(clk_table[i]);
}
return ret;
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index b7d87d6db9dc..7ce0fa86c5ff 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -60,23 +60,18 @@ struct samsung_clk_provider *__init samsung_clk_init(struct device_node *np,
void __iomem *base, unsigned long nr_clks)
{
struct samsung_clk_provider *ctx;
- struct clk **clk_table;
int i;
- ctx = kzalloc(sizeof(struct samsung_clk_provider), GFP_KERNEL);
+ ctx = kzalloc(sizeof(struct samsung_clk_provider) +
+ sizeof(*ctx->clk_data.hws) * nr_clks, GFP_KERNEL);
if (!ctx)
panic("could not allocate clock provider context.\n");
- clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
- if (!clk_table)
- panic("could not allocate clock lookup table\n");
-
for (i = 0; i < nr_clks; ++i)
- clk_table[i] = ERR_PTR(-ENOENT);
+ ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
ctx->reg_base = base;
- ctx->clk_data.clks = clk_table;
- ctx->clk_data.clk_num = nr_clks;
+ ctx->clk_data.num = nr_clks;
spin_lock_init(&ctx->lock);
return ctx;
@@ -86,18 +81,18 @@ void __init samsung_clk_of_add_provider(struct device_node *np,
struct samsung_clk_provider *ctx)
{
if (np) {
- if (of_clk_add_provider(np, of_clk_src_onecell_get,
+ if (of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
&ctx->clk_data))
panic("could not register clk provider\n");
}
}
/* add a clock instance to the clock lookup table used for dt based lookup */
-void samsung_clk_add_lookup(struct samsung_clk_provider *ctx, struct clk *clk,
- unsigned int id)
+void samsung_clk_add_lookup(struct samsung_clk_provider *ctx,
+ struct clk_hw *clk_hw, unsigned int id)
{
- if (ctx->clk_data.clks && id)
- ctx->clk_data.clks[id] = clk;
+ if (id)
+ ctx->clk_data.hws[id] = clk_hw;
}
/* register a list of aliases */
@@ -105,14 +100,9 @@ void __init samsung_clk_register_alias(struct samsung_clk_provider *ctx,
const struct samsung_clock_alias *list,
unsigned int nr_clk)
{
- struct clk *clk;
+ struct clk_hw *clk_hw;
unsigned int idx, ret;
- if (!ctx->clk_data.clks) {
- pr_err("%s: clock table missing\n", __func__);
- return;
- }
-
for (idx = 0; idx < nr_clk; idx++, list++) {
if (!list->id) {
pr_err("%s: clock id missing for index %d\n", __func__,
@@ -120,14 +110,15 @@ void __init samsung_clk_register_alias(struct samsung_clk_provider *ctx,
continue;
}
- clk = ctx->clk_data.clks[list->id];
- if (!clk) {
+ clk_hw = ctx->clk_data.hws[list->id];
+ if (!clk_hw) {
pr_err("%s: failed to find clock %d\n", __func__,
list->id);
continue;
}
- ret = clk_register_clkdev(clk, list->alias, list->dev_name);
+ ret = clk_hw_register_clkdev(clk_hw, list->alias,
+ list->dev_name);
if (ret)
pr_err("%s: failed to register lookup %s\n",
__func__, list->alias);
@@ -139,25 +130,25 @@ void __init samsung_clk_register_fixed_rate(struct samsung_clk_provider *ctx,
const struct samsung_fixed_rate_clock *list,
unsigned int nr_clk)
{
- struct clk *clk;
+ struct clk_hw *clk_hw;
unsigned int idx, ret;
for (idx = 0; idx < nr_clk; idx++, list++) {
- clk = clk_register_fixed_rate(NULL, list->name,
+ clk_hw = clk_hw_register_fixed_rate(NULL, list->name,
list->parent_name, list->flags, list->fixed_rate);
- if (IS_ERR(clk)) {
+ if (IS_ERR(clk_hw)) {
pr_err("%s: failed to register clock %s\n", __func__,
list->name);
continue;
}
- samsung_clk_add_lookup(ctx, clk, list->id);
+ samsung_clk_add_lookup(ctx, clk_hw, list->id);
/*
* Unconditionally add a clock lookup for the fixed rate clocks.
* There are not many of these on any of Samsung platforms.
*/
- ret = clk_register_clkdev(clk, list->name, NULL);
+ ret = clk_hw_register_clkdev(clk_hw, list->name, NULL);
if (ret)
pr_err("%s: failed to register clock lookup for %s",
__func__, list->name);
@@ -168,19 +159,19 @@ void __init samsung_clk_register_fixed_rate(struct samsung_clk_provider *ctx,
void __init samsung_clk_register_fixed_factor(struct samsung_clk_provider *ctx,
const struct samsung_fixed_factor_clock *list, unsigned int nr_clk)
{
- struct clk *clk;
+ struct clk_hw *clk_hw;
unsigned int idx;
for (idx = 0; idx < nr_clk; idx++, list++) {
- clk = clk_register_fixed_factor(NULL, list->name,
+ clk_hw = clk_hw_register_fixed_factor(NULL, list->name,
list->parent_name, list->flags, list->mult, list->div);
- if (IS_ERR(clk)) {
+ if (IS_ERR(clk_hw)) {
pr_err("%s: failed to register clock %s\n", __func__,
list->name);
continue;
}
- samsung_clk_add_lookup(ctx, clk, list->id);
+ samsung_clk_add_lookup(ctx, clk_hw, list->id);
}
}
@@ -189,25 +180,25 @@ void __init samsung_clk_register_mux(struct samsung_clk_provider *ctx,
const struct samsung_mux_clock *list,
unsigned int nr_clk)
{
- struct clk *clk;
+ struct clk_hw *clk_hw;
unsigned int idx, ret;
for (idx = 0; idx < nr_clk; idx++, list++) {
- clk = clk_register_mux(NULL, list->name, list->parent_names,
- list->num_parents, list->flags,
+ clk_hw = clk_hw_register_mux(NULL, list->name,
+ list->parent_names, list->num_parents, list->flags,
ctx->reg_base + list->offset,
list->shift, list->width, list->mux_flags, &ctx->lock);
- if (IS_ERR(clk)) {
+ if (IS_ERR(clk_hw)) {
pr_err("%s: failed to register clock %s\n", __func__,
list->name);
continue;
}
- samsung_clk_add_lookup(ctx, clk, list->id);
+ samsung_clk_add_lookup(ctx, clk_hw, list->id);
/* register a clock lookup only if a clock alias is specified */
if (list->alias) {
- ret = clk_register_clkdev(clk, list->alias,
+ ret = clk_hw_register_clkdev(clk_hw, list->alias,
list->dev_name);
if (ret)
pr_err("%s: failed to register lookup %s\n",
@@ -221,32 +212,32 @@ void __init samsung_clk_register_div(struct samsung_clk_provider *ctx,
const struct samsung_div_clock *list,
unsigned int nr_clk)
{
- struct clk *clk;
+ struct clk_hw *clk_hw;
unsigned int idx, ret;
for (idx = 0; idx < nr_clk; idx++, list++) {
if (list->table)
- clk = clk_register_divider_table(NULL, list->name,
- list->parent_name, list->flags,
+ clk_hw = clk_hw_register_divider_table(NULL,
+ list->name, list->parent_name, list->flags,
ctx->reg_base + list->offset,
list->shift, list->width, list->div_flags,
list->table, &ctx->lock);
else
- clk = clk_register_divider(NULL, list->name,
+ clk_hw = clk_hw_register_divider(NULL, list->name,
list->parent_name, list->flags,
ctx->reg_base + list->offset, list->shift,
list->width, list->div_flags, &ctx->lock);
- if (IS_ERR(clk)) {
+ if (IS_ERR(clk_hw)) {
pr_err("%s: failed to register clock %s\n", __func__,
list->name);
continue;
}
- samsung_clk_add_lookup(ctx, clk, list->id);
+ samsung_clk_add_lookup(ctx, clk_hw, list->id);
/* register a clock lookup only if a clock alias is specified */
if (list->alias) {
- ret = clk_register_clkdev(clk, list->alias,
+ ret = clk_hw_register_clkdev(clk_hw, list->alias,
list->dev_name);
if (ret)
pr_err("%s: failed to register lookup %s\n",
@@ -260,14 +251,14 @@ void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx,
const struct samsung_gate_clock *list,
unsigned int nr_clk)
{
- struct clk *clk;
+ struct clk_hw *clk_hw;
unsigned int idx, ret;
for (idx = 0; idx < nr_clk; idx++, list++) {
- clk = clk_register_gate(NULL, list->name, list->parent_name,
+ clk_hw = clk_hw_register_gate(NULL, list->name, list->parent_name,
list->flags, ctx->reg_base + list->offset,
list->bit_idx, list->gate_flags, &ctx->lock);
- if (IS_ERR(clk)) {
+ if (IS_ERR(clk_hw)) {
pr_err("%s: failed to register clock %s\n", __func__,
list->name);
continue;
@@ -275,14 +266,14 @@ void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx,
/* register a clock lookup only if a clock alias is specified */
if (list->alias) {
- ret = clk_register_clkdev(clk, list->alias,
+ ret = clk_hw_register_clkdev(clk_hw, list->alias,
list->dev_name);
if (ret)
pr_err("%s: failed to register lookup %s\n",
__func__, list->alias);
}
- samsung_clk_add_lookup(ctx, clk, list->id);
+ samsung_clk_add_lookup(ctx, clk_hw, list->id);
}
}
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index da3bdebabf1e..b8ca0dd3a38b 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -16,18 +16,17 @@
#include <linux/clk-provider.h>
#include "clk-pll.h"
-struct clk;
-
/**
* struct samsung_clk_provider: information about clock provider
* @reg_base: virtual address for the register base.
- * @clk_data: holds clock related data like clk* and number of clocks.
* @lock: maintains exclusion between callbacks for a given clock-provider.
+ * @clk_data: holds clock related data like clk_hw* and number of clocks.
*/
struct samsung_clk_provider {
void __iomem *reg_base;
- struct clk_onecell_data clk_data;
spinlock_t lock;
+ /* clk_data must be the last entry due to variable lenght 'hws' array */
+ struct clk_hw_onecell_data clk_data;
};
/**
@@ -367,7 +366,7 @@ extern void __init samsung_clk_of_register_fixed_ext(
const struct of_device_id *clk_matches);
extern void samsung_clk_add_lookup(struct samsung_clk_provider *ctx,
- struct clk *clk, unsigned int id);
+ struct clk_hw *clk_hw, unsigned int id);
extern void __init samsung_clk_register_alias(struct samsung_clk_provider *ctx,
const struct samsung_clock_alias *list,
diff --git a/drivers/clk/socfpga/clk-gate-a10.c b/drivers/clk/socfpga/clk-gate-a10.c
index c2d572748167..36376c542055 100644
--- a/drivers/clk/socfpga/clk-gate-a10.c
+++ b/drivers/clk/socfpga/clk-gate-a10.c
@@ -86,7 +86,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
}
}
- hs_timing = SYSMGR_SDMMC_CTRL_SET(clk_phase[0], clk_phase[1]);
+ hs_timing = SYSMGR_SDMMC_CTRL_SET_AS10(clk_phase[0], clk_phase[1]);
if (!IS_ERR(socfpgaclk->sys_mgr_base_addr))
regmap_write(socfpgaclk->sys_mgr_base_addr,
SYSMGR_SDMMCGRP_CTRL_OFFSET, hs_timing);
diff --git a/drivers/clk/socfpga/clk.h b/drivers/clk/socfpga/clk.h
index 814c7247bf73..9cf1230115b1 100644
--- a/drivers/clk/socfpga/clk.h
+++ b/drivers/clk/socfpga/clk.h
@@ -32,6 +32,9 @@
#define SYSMGR_SDMMC_CTRL_SET(smplsel, drvsel) \
((((smplsel) & 0x7) << 3) | (((drvsel) & 0x7) << 0))
+#define SYSMGR_SDMMC_CTRL_SET_AS10(smplsel, drvsel) \
+ ((((smplsel) & 0x7) << 4) | (((drvsel) & 0x7) << 0))
+
extern void __iomem *clk_mgr_base_addr;
extern void __iomem *clk_mgr_a10_base_addr;
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index eb89c7801f00..7342928c35cd 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -6,157 +6,55 @@ config SUNXI_CCU
if SUNXI_CCU
-# Base clock types
-
-config SUNXI_CCU_DIV
- bool
- select SUNXI_CCU_MUX
-
-config SUNXI_CCU_FRAC
- bool
-
-config SUNXI_CCU_GATE
- def_bool y
-
-config SUNXI_CCU_MUX
- bool
-
-config SUNXI_CCU_MULT
- bool
- select SUNXI_CCU_MUX
-
-config SUNXI_CCU_PHASE
- bool
-
-# Multi-factor clocks
-
-config SUNXI_CCU_NK
- bool
- select SUNXI_CCU_GATE
-
-config SUNXI_CCU_NKM
- bool
- select SUNXI_CCU_GATE
-
-config SUNXI_CCU_NKMP
- bool
- select SUNXI_CCU_GATE
-
-config SUNXI_CCU_NM
- bool
- select SUNXI_CCU_FRAC
- select SUNXI_CCU_GATE
-
-config SUNXI_CCU_MP
- bool
- select SUNXI_CCU_GATE
- select SUNXI_CCU_MUX
-
-# SoC Drivers
-
config SUN50I_A64_CCU
bool "Support for the Allwinner A64 CCU"
- select SUNXI_CCU_DIV
- select SUNXI_CCU_NK
- select SUNXI_CCU_NKM
- select SUNXI_CCU_NKMP
- select SUNXI_CCU_NM
- select SUNXI_CCU_MP
- select SUNXI_CCU_PHASE
default ARM64 && ARCH_SUNXI
depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
config SUN5I_CCU
bool "Support for the Allwinner sun5i family CCM"
- select SUNXI_CCU_DIV
- select SUNXI_CCU_MULT
- select SUNXI_CCU_NK
- select SUNXI_CCU_NKM
- select SUNXI_CCU_NM
- select SUNXI_CCU_MP
- select SUNXI_CCU_PHASE
default MACH_SUN5I
depends on MACH_SUN5I || COMPILE_TEST
config SUN6I_A31_CCU
bool "Support for the Allwinner A31/A31s CCU"
- select SUNXI_CCU_DIV
- select SUNXI_CCU_NK
- select SUNXI_CCU_NKM
- select SUNXI_CCU_NKMP
- select SUNXI_CCU_NM
- select SUNXI_CCU_MP
- select SUNXI_CCU_PHASE
default MACH_SUN6I
depends on MACH_SUN6I || COMPILE_TEST
config SUN8I_A23_CCU
bool "Support for the Allwinner A23 CCU"
- select SUNXI_CCU_DIV
- select SUNXI_CCU_MULT
- select SUNXI_CCU_NK
- select SUNXI_CCU_NKM
- select SUNXI_CCU_NKMP
- select SUNXI_CCU_NM
- select SUNXI_CCU_MP
- select SUNXI_CCU_PHASE
default MACH_SUN8I
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_A33_CCU
bool "Support for the Allwinner A33 CCU"
- select SUNXI_CCU_DIV
- select SUNXI_CCU_MULT
- select SUNXI_CCU_NK
- select SUNXI_CCU_NKM
- select SUNXI_CCU_NKMP
- select SUNXI_CCU_NM
- select SUNXI_CCU_MP
- select SUNXI_CCU_PHASE
default MACH_SUN8I
depends on MACH_SUN8I || COMPILE_TEST
+config SUN8I_A83T_CCU
+ bool "Support for the Allwinner A83T CCU"
+ default MACH_SUN8I
+
config SUN8I_H3_CCU
bool "Support for the Allwinner H3 CCU"
- select SUNXI_CCU_DIV
- select SUNXI_CCU_NK
- select SUNXI_CCU_NKM
- select SUNXI_CCU_NKMP
- select SUNXI_CCU_NM
- select SUNXI_CCU_MP
- select SUNXI_CCU_PHASE
default MACH_SUN8I || (ARM64 && ARCH_SUNXI)
depends on MACH_SUN8I || (ARM64 && ARCH_SUNXI) || COMPILE_TEST
config SUN8I_V3S_CCU
bool "Support for the Allwinner V3s CCU"
- select SUNXI_CCU_DIV
- select SUNXI_CCU_NK
- select SUNXI_CCU_NKM
- select SUNXI_CCU_NKMP
- select SUNXI_CCU_NM
- select SUNXI_CCU_MP
- select SUNXI_CCU_PHASE
default MACH_SUN8I
depends on MACH_SUN8I || COMPILE_TEST
+config SUN8I_DE2_CCU
+ bool "Support for the Allwinner SoCs DE2 CCU"
+
config SUN9I_A80_CCU
bool "Support for the Allwinner A80 CCU"
- select SUNXI_CCU_DIV
- select SUNXI_CCU_MULT
- select SUNXI_CCU_GATE
- select SUNXI_CCU_NKMP
- select SUNXI_CCU_NM
- select SUNXI_CCU_MP
- select SUNXI_CCU_PHASE
default MACH_SUN9I
depends on MACH_SUN9I || COMPILE_TEST
config SUN8I_R_CCU
bool "Support for Allwinner SoCs' PRCM CCUs"
- select SUNXI_CCU_DIV
- select SUNXI_CCU_GATE
- select SUNXI_CCU_MP
default MACH_SUN8I || (ARCH_SUNXI && ARM64)
endif
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index 0ec02fe14c50..0c45fa50283d 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -1,21 +1,21 @@
# Common objects
-obj-$(CONFIG_SUNXI_CCU) += ccu_common.o
-obj-$(CONFIG_SUNXI_CCU) += ccu_reset.o
+lib-$(CONFIG_SUNXI_CCU) += ccu_common.o
+lib-$(CONFIG_SUNXI_CCU) += ccu_reset.o
# Base clock types
-obj-$(CONFIG_SUNXI_CCU_DIV) += ccu_div.o
-obj-$(CONFIG_SUNXI_CCU_FRAC) += ccu_frac.o
-obj-$(CONFIG_SUNXI_CCU_GATE) += ccu_gate.o
-obj-$(CONFIG_SUNXI_CCU_MUX) += ccu_mux.o
-obj-$(CONFIG_SUNXI_CCU_MULT) += ccu_mult.o
-obj-$(CONFIG_SUNXI_CCU_PHASE) += ccu_phase.o
+lib-$(CONFIG_SUNXI_CCU) += ccu_div.o
+lib-$(CONFIG_SUNXI_CCU) += ccu_frac.o
+lib-$(CONFIG_SUNXI_CCU) += ccu_gate.o
+lib-$(CONFIG_SUNXI_CCU) += ccu_mux.o
+lib-$(CONFIG_SUNXI_CCU) += ccu_mult.o
+lib-$(CONFIG_SUNXI_CCU) += ccu_phase.o
# Multi-factor clocks
-obj-$(CONFIG_SUNXI_CCU_NK) += ccu_nk.o
-obj-$(CONFIG_SUNXI_CCU_NKM) += ccu_nkm.o
-obj-$(CONFIG_SUNXI_CCU_NKMP) += ccu_nkmp.o
-obj-$(CONFIG_SUNXI_CCU_NM) += ccu_nm.o
-obj-$(CONFIG_SUNXI_CCU_MP) += ccu_mp.o
+lib-$(CONFIG_SUNXI_CCU) += ccu_nk.o
+lib-$(CONFIG_SUNXI_CCU) += ccu_nkm.o
+lib-$(CONFIG_SUNXI_CCU) += ccu_nkmp.o
+lib-$(CONFIG_SUNXI_CCU) += ccu_nm.o
+lib-$(CONFIG_SUNXI_CCU) += ccu_mp.o
# SoC support
obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o
@@ -23,9 +23,20 @@ obj-$(CONFIG_SUN5I_CCU) += ccu-sun5i.o
obj-$(CONFIG_SUN6I_A31_CCU) += ccu-sun6i-a31.o
obj-$(CONFIG_SUN8I_A23_CCU) += ccu-sun8i-a23.o
obj-$(CONFIG_SUN8I_A33_CCU) += ccu-sun8i-a33.o
+obj-$(CONFIG_SUN8I_A83T_CCU) += ccu-sun8i-a83t.o
obj-$(CONFIG_SUN8I_H3_CCU) += ccu-sun8i-h3.o
obj-$(CONFIG_SUN8I_V3S_CCU) += ccu-sun8i-v3s.o
+obj-$(CONFIG_SUN8I_DE2_CCU) += ccu-sun8i-de2.o
obj-$(CONFIG_SUN8I_R_CCU) += ccu-sun8i-r.o
obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80.o
obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-de.o
obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-usb.o
+
+# The lib-y file goals is supposed to work only in arch/*/lib or lib/. In our
+# case, we want to use that goal, but even though lib.a will be properly
+# generated, it will not be linked in, eventually resulting in a linker error
+# for missing symbols.
+#
+# We can work around that by explicitly adding lib.a to the obj-y goal. This is
+# an undocumented behaviour, but works well for now.
+obj-$(CONFIG_SUNXI_CCU) += lib.a
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index f54114c607df..2bb4cabf802f 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -211,6 +211,9 @@ static SUNXI_CCU_M(axi_clk, "axi", "cpux", 0x050, 0, 2, 0);
static const char * const ahb1_parents[] = { "osc32k", "osc24M",
"axi", "pll-periph0" };
+static const struct ccu_mux_var_prediv ahb1_predivs[] = {
+ { .index = 3, .shift = 6, .width = 2 },
+};
static struct ccu_div ahb1_clk = {
.div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO),
@@ -218,11 +221,8 @@ static struct ccu_div ahb1_clk = {
.shift = 12,
.width = 2,
- .variable_prediv = {
- .index = 3,
- .shift = 6,
- .width = 2,
- },
+ .var_predivs = ahb1_predivs,
+ .n_var_predivs = ARRAY_SIZE(ahb1_predivs),
},
.common = {
diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.h b/drivers/clk/sunxi-ng/ccu-sun5i.h
index 8144487eb7ca..93a275fbd9a9 100644
--- a/drivers/clk/sunxi-ng/ccu-sun5i.h
+++ b/drivers/clk/sunxi-ng/ccu-sun5i.h
@@ -28,15 +28,17 @@
#define CLK_PLL_AUDIO_4X 6
#define CLK_PLL_AUDIO_8X 7
#define CLK_PLL_VIDEO0 8
-#define CLK_PLL_VIDEO0_2X 9
+
+/* The PLL_VIDEO0_2X is exported for HDMI */
+
#define CLK_PLL_VE 10
#define CLK_PLL_DDR_BASE 11
#define CLK_PLL_DDR 12
#define CLK_PLL_DDR_OTHER 13
#define CLK_PLL_PERIPH 14
#define CLK_PLL_VIDEO1 15
-#define CLK_PLL_VIDEO1_2X 16
+/* The PLL_VIDEO1_2X is exported for HDMI */
/* The CPU clock is exported */
#define CLK_AXI 18
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index df97e25aec76..4d6078fca9ac 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -195,6 +195,9 @@ static SUNXI_CCU_DIV_TABLE(axi_clk, "axi", "cpu",
static const char * const ahb1_parents[] = { "osc32k", "osc24M",
"axi", "pll-periph" };
+static const struct ccu_mux_var_prediv ahb1_predivs[] = {
+ { .index = 3, .shift = 6, .width = 2 },
+};
static struct ccu_div ahb1_clk = {
.div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO),
@@ -203,11 +206,8 @@ static struct ccu_div ahb1_clk = {
.shift = 12,
.width = 2,
- .variable_prediv = {
- .index = 3,
- .shift = 6,
- .width = 2,
- },
+ .var_predivs = ahb1_predivs,
+ .n_var_predivs = ARRAY_SIZE(ahb1_predivs),
},
.common = {
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
index 5c6d37bdf247..8a753ed0426d 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
@@ -169,6 +169,9 @@ static SUNXI_CCU_M(axi_clk, "axi", "cpux", 0x050, 0, 2, 0);
static const char * const ahb1_parents[] = { "osc32k", "osc24M",
"axi" , "pll-periph" };
+static const struct ccu_mux_var_prediv ahb1_predivs[] = {
+ { .index = 3, .shift = 6, .width = 2 },
+};
static struct ccu_div ahb1_clk = {
.div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO),
@@ -176,11 +179,8 @@ static struct ccu_div ahb1_clk = {
.shift = 12,
.width = 2,
- .variable_prediv = {
- .index = 3,
- .shift = 6,
- .width = 2,
- },
+ .var_predivs = ahb1_predivs,
+ .n_var_predivs = ARRAY_SIZE(ahb1_predivs),
},
.common = {
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
index 8d38e6510e29..10b38dc46f75 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
@@ -180,6 +180,9 @@ static SUNXI_CCU_M(axi_clk, "axi", "cpux", 0x050, 0, 2, 0);
static const char * const ahb1_parents[] = { "osc32k", "osc24M",
"axi" , "pll-periph" };
+static const struct ccu_mux_var_prediv ahb1_predivs[] = {
+ { .index = 3, .shift = 6, .width = 2 },
+};
static struct ccu_div ahb1_clk = {
.div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO),
@@ -187,11 +190,8 @@ static struct ccu_div ahb1_clk = {
.shift = 12,
.width = 2,
- .variable_prediv = {
- .index = 3,
- .shift = 6,
- .width = 2,
- },
+ .var_predivs = ahb1_predivs,
+ .n_var_predivs = ARRAY_SIZE(ahb1_predivs),
},
.common = {
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
new file mode 100644
index 000000000000..947f9f6e05d2
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
@@ -0,0 +1,922 @@
+/*
+ * Copyright (c) 2017 Chen-Yu Tsai. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+#include "ccu_mux.h"
+#include "ccu_nkmp.h"
+#include "ccu_nm.h"
+#include "ccu_phase.h"
+
+#include "ccu-sun8i-a83t.h"
+
+#define CCU_SUN8I_A83T_LOCK_REG 0x20c
+
+/*
+ * The CPU PLLs are actually NP clocks, with P being /1 or /4. However
+ * P should only be used for output frequencies lower than 228 MHz.
+ * Neither mainline Linux, U-boot, nor the vendor BSPs use these.
+ *
+ * For now we can just model it as a multiplier clock, and force P to /1.
+ */
+#define SUN8I_A83T_PLL_C0CPUX_REG 0x000
+#define SUN8I_A83T_PLL_C1CPUX_REG 0x004
+
+static struct ccu_mult pll_c0cpux_clk = {
+ .enable = BIT(31),
+ .lock = BIT(0),
+ .mult = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0),
+ .common = {
+ .reg = SUN8I_A83T_PLL_C0CPUX_REG,
+ .lock_reg = CCU_SUN8I_A83T_LOCK_REG,
+ .features = CCU_FEATURE_LOCK_REG,
+ .hw.init = CLK_HW_INIT("pll-c0cpux", "osc24M",
+ &ccu_mult_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+static struct ccu_mult pll_c1cpux_clk = {
+ .enable = BIT(31),
+ .lock = BIT(1),
+ .mult = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0),
+ .common = {
+ .reg = SUN8I_A83T_PLL_C1CPUX_REG,
+ .lock_reg = CCU_SUN8I_A83T_LOCK_REG,
+ .features = CCU_FEATURE_LOCK_REG,
+ .hw.init = CLK_HW_INIT("pll-c1cpux", "osc24M",
+ &ccu_mult_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+/*
+ * The Audio PLL has d1, d2 dividers in addition to the usual N, M
+ * factors. Since we only need 2 frequencies from this PLL: 22.5792 MHz
+ * and 24.576 MHz, ignore them for now. Enforce the default for them,
+ * which is d1 = 0, d2 = 1.
+ */
+#define SUN8I_A83T_PLL_AUDIO_REG 0x008
+
+static struct ccu_nm pll_audio_clk = {
+ .enable = BIT(31),
+ .lock = BIT(2),
+ .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0),
+ .m = _SUNXI_CCU_DIV(0, 6),
+ .common = {
+ .reg = SUN8I_A83T_PLL_AUDIO_REG,
+ .lock_reg = CCU_SUN8I_A83T_LOCK_REG,
+ .features = CCU_FEATURE_LOCK_REG,
+ .hw.init = CLK_HW_INIT("pll-audio", "osc24M",
+ &ccu_nm_ops, CLK_SET_RATE_UNGATE),
+ },
+};
+
+/* Some PLLs are input * N / div1 / P. Model them as NKMP with no K */
+static struct ccu_nkmp pll_video0_clk = {
+ .enable = BIT(31),
+ .lock = BIT(3),
+ .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0),
+ .m = _SUNXI_CCU_DIV(16, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 2), /* output divider */
+ .common = {
+ .reg = 0x010,
+ .lock_reg = CCU_SUN8I_A83T_LOCK_REG,
+ .features = CCU_FEATURE_LOCK_REG,
+ .hw.init = CLK_HW_INIT("pll-video0", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+static struct ccu_nkmp pll_ve_clk = {
+ .enable = BIT(31),
+ .lock = BIT(4),
+ .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0),
+ .m = _SUNXI_CCU_DIV(16, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(18, 1), /* output divider */
+ .common = {
+ .reg = 0x018,
+ .lock_reg = CCU_SUN8I_A83T_LOCK_REG,
+ .features = CCU_FEATURE_LOCK_REG,
+ .hw.init = CLK_HW_INIT("pll-ve", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+static struct ccu_nkmp pll_ddr_clk = {
+ .enable = BIT(31),
+ .lock = BIT(5),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(16, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(18, 1), /* output divider */
+ .common = {
+ .reg = 0x020,
+ .lock_reg = CCU_SUN8I_A83T_LOCK_REG,
+ .features = CCU_FEATURE_LOCK_REG,
+ .hw.init = CLK_HW_INIT("pll-ddr", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+static struct ccu_nkmp pll_periph_clk = {
+ .enable = BIT(31),
+ .lock = BIT(6),
+ .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0),
+ .m = _SUNXI_CCU_DIV(16, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(18, 1), /* output divider */
+ .common = {
+ .reg = 0x028,
+ .lock_reg = CCU_SUN8I_A83T_LOCK_REG,
+ .features = CCU_FEATURE_LOCK_REG,
+ .hw.init = CLK_HW_INIT("pll-periph", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+static struct ccu_nkmp pll_gpu_clk = {
+ .enable = BIT(31),
+ .lock = BIT(7),
+ .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0),
+ .m = _SUNXI_CCU_DIV(16, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(18, 1), /* output divider */
+ .common = {
+ .reg = 0x038,
+ .lock_reg = CCU_SUN8I_A83T_LOCK_REG,
+ .features = CCU_FEATURE_LOCK_REG,
+ .hw.init = CLK_HW_INIT("pll-gpu", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+static struct ccu_nkmp pll_hsic_clk = {
+ .enable = BIT(31),
+ .lock = BIT(8),
+ .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0),
+ .m = _SUNXI_CCU_DIV(16, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(18, 1), /* output divider */
+ .common = {
+ .reg = 0x044,
+ .lock_reg = CCU_SUN8I_A83T_LOCK_REG,
+ .features = CCU_FEATURE_LOCK_REG,
+ .hw.init = CLK_HW_INIT("pll-hsic", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+static struct ccu_nkmp pll_de_clk = {
+ .enable = BIT(31),
+ .lock = BIT(9),
+ .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0),
+ .m = _SUNXI_CCU_DIV(16, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(18, 1), /* output divider */
+ .common = {
+ .reg = 0x048,
+ .lock_reg = CCU_SUN8I_A83T_LOCK_REG,
+ .features = CCU_FEATURE_LOCK_REG,
+ .hw.init = CLK_HW_INIT("pll-de", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+static struct ccu_nkmp pll_video1_clk = {
+ .enable = BIT(31),
+ .lock = BIT(10),
+ .n = _SUNXI_CCU_MULT_OFFSET_MIN_MAX(8, 8, 0, 12, 0),
+ .m = _SUNXI_CCU_DIV(16, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 2), /* external divider p */
+ .common = {
+ .reg = 0x04c,
+ .lock_reg = CCU_SUN8I_A83T_LOCK_REG,
+ .features = CCU_FEATURE_LOCK_REG,
+ .hw.init = CLK_HW_INIT("pll-video1", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+static const char * const c0cpux_parents[] = { "osc24M", "pll-c0cpux" };
+static SUNXI_CCU_MUX(c0cpux_clk, "c0cpux", c0cpux_parents,
+ 0x50, 12, 1, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+
+static const char * const c1cpux_parents[] = { "osc24M", "pll-c1cpux" };
+static SUNXI_CCU_MUX(c1cpux_clk, "c1cpux", c1cpux_parents,
+ 0x50, 28, 1, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+
+static SUNXI_CCU_M(axi0_clk, "axi0", "c0cpux", 0x050, 0, 2, 0);
+static SUNXI_CCU_M(axi1_clk, "axi1", "c1cpux", 0x050, 16, 2, 0);
+
+static const char * const ahb1_parents[] = { "osc16M-d512", "osc24M",
+ "pll-periph",
+ "pll-periph" };
+static const struct ccu_mux_var_prediv ahb1_predivs[] = {
+ { .index = 2, .shift = 6, .width = 2 },
+ { .index = 3, .shift = 6, .width = 2 },
+};
+static struct ccu_div ahb1_clk = {
+ .div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO),
+ .mux = {
+ .shift = 12,
+ .width = 2,
+
+ .var_predivs = ahb1_predivs,
+ .n_var_predivs = ARRAY_SIZE(ahb1_predivs),
+ },
+ .common = {
+ .reg = 0x054,
+ .hw.init = CLK_HW_INIT_PARENTS("ahb1",
+ ahb1_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_M(apb1_clk, "apb1", "ahb1", 0x054, 8, 2, 0);
+
+static const char * const apb2_parents[] = { "osc16M-d512", "osc24M",
+ "pll-periph", "pll-periph" };
+
+static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", apb2_parents, 0x058,
+ 0, 5, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static const char * const ahb2_parents[] = { "ahb1", "pll-periph" };
+static const struct ccu_mux_fixed_prediv ahb2_prediv = {
+ .index = 1, .div = 2
+};
+static struct ccu_mux ahb2_clk = {
+ .mux = {
+ .shift = 0,
+ .width = 2,
+ .fixed_predivs = &ahb2_prediv,
+ .n_predivs = 1,
+ },
+ .common = {
+ .reg = 0x05c,
+ .hw.init = CLK_HW_INIT_PARENTS("ahb2",
+ ahb2_parents,
+ &ccu_mux_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE(bus_mipi_dsi_clk, "bus-mipi-dsi", "ahb1",
+ 0x060, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_ss_clk, "bus-ss", "ahb1",
+ 0x060, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_dma_clk, "bus-dma", "ahb1",
+ 0x060, BIT(6), 0);
+static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb1",
+ 0x060, BIT(8), 0);
+static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb1",
+ 0x060, BIT(9), 0);
+static SUNXI_CCU_GATE(bus_mmc2_clk, "bus-mmc2", "ahb1",
+ 0x060, BIT(10), 0);
+static SUNXI_CCU_GATE(bus_nand_clk, "bus-nand", "ahb1",
+ 0x060, BIT(13), 0);
+static SUNXI_CCU_GATE(bus_dram_clk, "bus-dram", "ahb1",
+ 0x060, BIT(14), 0);
+static SUNXI_CCU_GATE(bus_emac_clk, "bus-emac", "ahb2",
+ 0x060, BIT(17), 0);
+static SUNXI_CCU_GATE(bus_hstimer_clk, "bus-hstimer", "ahb1",
+ 0x060, BIT(19), 0);
+static SUNXI_CCU_GATE(bus_spi0_clk, "bus-spi0", "ahb1",
+ 0x060, BIT(20), 0);
+static SUNXI_CCU_GATE(bus_spi1_clk, "bus-spi1", "ahb1",
+ 0x060, BIT(21), 0);
+static SUNXI_CCU_GATE(bus_otg_clk, "bus-otg", "ahb1",
+ 0x060, BIT(24), 0);
+static SUNXI_CCU_GATE(bus_ehci0_clk, "bus-ehci0", "ahb2",
+ 0x060, BIT(26), 0);
+static SUNXI_CCU_GATE(bus_ehci1_clk, "bus-ehci1", "ahb2",
+ 0x060, BIT(27), 0);
+static SUNXI_CCU_GATE(bus_ohci0_clk, "bus-ohci0", "ahb2",
+ 0x060, BIT(29), 0);
+
+static SUNXI_CCU_GATE(bus_ve_clk, "bus-ve", "ahb1",
+ 0x064, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_tcon0_clk, "bus-tcon0", "ahb1",
+ 0x064, BIT(4), 0);
+static SUNXI_CCU_GATE(bus_tcon1_clk, "bus-tcon1", "ahb1",
+ 0x064, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_csi_clk, "bus-csi", "ahb1",
+ 0x064, BIT(8), 0);
+static SUNXI_CCU_GATE(bus_hdmi_clk, "bus-hdmi", "ahb1",
+ 0x064, BIT(11), 0);
+static SUNXI_CCU_GATE(bus_de_clk, "bus-de", "ahb1",
+ 0x064, BIT(12), 0);
+static SUNXI_CCU_GATE(bus_gpu_clk, "bus-gpu", "ahb1",
+ 0x064, BIT(20), 0);
+static SUNXI_CCU_GATE(bus_msgbox_clk, "bus-msgbox", "ahb1",
+ 0x064, BIT(21), 0);
+static SUNXI_CCU_GATE(bus_spinlock_clk, "bus-spinlock", "ahb1",
+ 0x064, BIT(22), 0);
+
+static SUNXI_CCU_GATE(bus_spdif_clk, "bus-spdif", "apb1",
+ 0x068, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_pio_clk, "bus-pio", "apb1",
+ 0x068, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_i2s0_clk, "bus-i2s0", "apb1",
+ 0x068, BIT(12), 0);
+static SUNXI_CCU_GATE(bus_i2s1_clk, "bus-i2s1", "apb1",
+ 0x068, BIT(13), 0);
+static SUNXI_CCU_GATE(bus_i2s2_clk, "bus-i2s2", "apb1",
+ 0x068, BIT(14), 0);
+static SUNXI_CCU_GATE(bus_tdm_clk, "bus-tdm", "apb1",
+ 0x068, BIT(15), 0);
+
+static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb2",
+ 0x06c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_i2c1_clk, "bus-i2c1", "apb2",
+ 0x06c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb2",
+ 0x06c, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_uart0_clk, "bus-uart0", "apb2",
+ 0x06c, BIT(16), 0);
+static SUNXI_CCU_GATE(bus_uart1_clk, "bus-uart1", "apb2",
+ 0x06c, BIT(17), 0);
+static SUNXI_CCU_GATE(bus_uart2_clk, "bus-uart2", "apb2",
+ 0x06c, BIT(18), 0);
+static SUNXI_CCU_GATE(bus_uart3_clk, "bus-uart3", "apb2",
+ 0x06c, BIT(19), 0);
+static SUNXI_CCU_GATE(bus_uart4_clk, "bus-uart4", "apb2",
+ 0x06c, BIT(20), 0);
+
+static const char * const cci400_parents[] = { "osc24M", "pll-periph",
+ "pll-hsic" };
+static struct ccu_div cci400_clk = {
+ .div = _SUNXI_CCU_DIV_FLAGS(0, 2, 0),
+ .mux = _SUNXI_CCU_MUX(24, 2),
+ .common = {
+ .reg = 0x078,
+ .hw.init = CLK_HW_INIT_PARENTS("cci400",
+ cci400_parents,
+ &ccu_div_ops,
+ CLK_IS_CRITICAL),
+ },
+};
+
+static const char * const mod0_default_parents[] = { "osc24M", "pll-periph" };
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(nand_clk, "nand", mod0_default_parents,
+ 0x080,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc0_clk, "mmc0", mod0_default_parents,
+ 0x088,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_PHASE(mmc0_sample_clk, "mmc0-sample", "mmc0",
+ 0x088, 20, 3, 0);
+static SUNXI_CCU_PHASE(mmc0_output_clk, "mmc0-output", "mmc0",
+ 0x088, 8, 3, 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc1_clk, "mmc1", mod0_default_parents,
+ 0x08c,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_PHASE(mmc1_sample_clk, "mmc1-sample", "mmc1",
+ 0x08c, 20, 3, 0);
+static SUNXI_CCU_PHASE(mmc1_output_clk, "mmc1-output", "mmc1",
+ 0x08c, 8, 3, 0);
+
+/* TODO Support MMC2 clock's new timing mode. */
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc2_clk, "mmc2", mod0_default_parents,
+ 0x090,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_PHASE(mmc2_sample_clk, "mmc2-sample", "mmc2",
+ 0x090, 20, 3, 0);
+static SUNXI_CCU_PHASE(mmc2_output_clk, "mmc2-output", "mmc2",
+ 0x090, 8, 3, 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(ss_clk, "ss", mod0_default_parents,
+ 0x09c,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi0_clk, "spi0", mod0_default_parents,
+ 0x0a0,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 4, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents,
+ 0x0a4,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 4, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_M_WITH_GATE(i2s0_clk, "i2s0", "pll-audio",
+ 0x0b0, 0, 4, BIT(31), CLK_SET_RATE_PARENT);
+static SUNXI_CCU_M_WITH_GATE(i2s1_clk, "i2s1", "pll-audio",
+ 0x0b4, 0, 4, BIT(31), CLK_SET_RATE_PARENT);
+static SUNXI_CCU_M_WITH_GATE(i2s2_clk, "i2s2", "pll-audio",
+ 0x0b8, 0, 4, BIT(31), CLK_SET_RATE_PARENT);
+static SUNXI_CCU_M_WITH_GATE(tdm_clk, "tdm", "pll-audio",
+ 0x0bc, 0, 4, BIT(31), CLK_SET_RATE_PARENT);
+static SUNXI_CCU_M_WITH_GATE(spdif_clk, "spdif", "pll-audio",
+ 0x0c0, 0, 4, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M",
+ 0x0cc, BIT(8), 0);
+static SUNXI_CCU_GATE(usb_phy1_clk, "usb-phy1", "osc24M",
+ 0x0cc, BIT(9), 0);
+static SUNXI_CCU_GATE(usb_hsic_clk, "usb-hsic", "pll-hsic",
+ 0x0cc, BIT(10), 0);
+static struct ccu_gate usb_hsic_12m_clk = {
+ .enable = BIT(11),
+ .common = {
+ .reg = 0x0cc,
+ .prediv = 2,
+ .features = CCU_FEATURE_ALL_PREDIV,
+ .hw.init = CLK_HW_INIT("usb-hsic-12m", "osc24M",
+ &ccu_gate_ops, 0),
+ }
+};
+static SUNXI_CCU_GATE(usb_ohci0_clk, "usb-ohci0", "osc24M",
+ 0x0cc, BIT(16), 0);
+
+/* TODO divider has minimum of 2 */
+static SUNXI_CCU_M(dram_clk, "dram", "pll-ddr", 0x0f4, 0, 4, CLK_IS_CRITICAL);
+
+static SUNXI_CCU_GATE(dram_ve_clk, "dram-ve", "dram",
+ 0x100, BIT(0), 0);
+static SUNXI_CCU_GATE(dram_csi_clk, "dram-csi", "dram",
+ 0x100, BIT(1), 0);
+
+static const char * const tcon0_parents[] = { "pll-video0" };
+static SUNXI_CCU_MUX_WITH_GATE(tcon0_clk, "tcon0", tcon0_parents,
+ 0x118, 24, 3, BIT(31), CLK_SET_RATE_PARENT);
+
+static const char * const tcon1_parents[] = { "pll-video1" };
+static SUNXI_CCU_MUX_WITH_GATE(tcon1_clk, "tcon1", tcon1_parents,
+ 0x11c, 24, 3, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(csi_misc_clk, "csi-misc", "osc24M", 0x130, BIT(16), 0);
+
+static SUNXI_CCU_GATE(mipi_csi_clk, "mipi-csi", "osc24M", 0x130, BIT(31), 0);
+
+static const char * const csi_mclk_parents[] = { "pll-de", "osc24M" };
+static const u8 csi_mclk_table[] = { 3, 5 };
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(csi_mclk_clk, "csi-mclk",
+ csi_mclk_parents, csi_mclk_table,
+ 0x134,
+ 0, 5, /* M */
+ 10, 3, /* mux */
+ BIT(15), /* gate */
+ 0);
+
+static const char * const csi_sclk_parents[] = { "pll-periph", "pll-ve" };
+static const u8 csi_sclk_table[] = { 0, 5 };
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(csi_sclk_clk, "csi-sclk",
+ csi_sclk_parents, csi_sclk_table,
+ 0x134,
+ 16, 4, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve", 0x13c,
+ 16, 3, BIT(31), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M", 0x144, BIT(31), 0);
+
+static const char * const hdmi_parents[] = { "pll-video1" };
+static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", hdmi_parents,
+ 0x150,
+ 0, 4, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE(hdmi_slow_clk, "hdmi-slow", "osc24M", 0x154, BIT(31), 0);
+
+static const char * const mbus_parents[] = { "osc24M", "pll-periph",
+ "pll-ddr" };
+static SUNXI_CCU_M_WITH_MUX_GATE(mbus_clk, "mbus", mbus_parents,
+ 0x15c,
+ 0, 3, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ CLK_IS_CRITICAL);
+
+static const char * const mipi_dsi0_parents[] = { "pll-video0" };
+static const u8 mipi_dsi0_table[] = { 8 };
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(mipi_dsi0_clk, "mipi-dsi0",
+ mipi_dsi0_parents, mipi_dsi0_table,
+ 0x168,
+ 0, 4, /* M */
+ 24, 4, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static const char * const mipi_dsi1_parents[] = { "osc24M", "pll-video0" };
+static const u8 mipi_dsi1_table[] = { 0, 9 };
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(mipi_dsi1_clk, "mipi-dsi1",
+ mipi_dsi1_parents, mipi_dsi1_table,
+ 0x16c,
+ 0, 4, /* M */
+ 24, 4, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_M_WITH_GATE(gpu_core_clk, "gpu-core", "pll-gpu", 0x1a0,
+ 0, 3, BIT(31), CLK_SET_RATE_PARENT);
+
+static const char * const gpu_memory_parents[] = { "pll-gpu", "pll-ddr" };
+static SUNXI_CCU_M_WITH_MUX_GATE(gpu_memory_clk, "gpu-memory",
+ gpu_memory_parents,
+ 0x1a4,
+ 0, 3, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_M_WITH_GATE(gpu_hyd_clk, "gpu-hyd", "pll-gpu", 0x1a8,
+ 0, 3, BIT(31), CLK_SET_RATE_PARENT);
+
+static struct ccu_common *sun8i_a83t_ccu_clks[] = {
+ &pll_c0cpux_clk.common,
+ &pll_c1cpux_clk.common,
+ &pll_audio_clk.common,
+ &pll_video0_clk.common,
+ &pll_ve_clk.common,
+ &pll_ddr_clk.common,
+ &pll_periph_clk.common,
+ &pll_gpu_clk.common,
+ &pll_hsic_clk.common,
+ &pll_de_clk.common,
+ &pll_video1_clk.common,
+ &c0cpux_clk.common,
+ &c1cpux_clk.common,
+ &axi0_clk.common,
+ &axi1_clk.common,
+ &ahb1_clk.common,
+ &ahb2_clk.common,
+ &apb1_clk.common,
+ &apb2_clk.common,
+ &bus_mipi_dsi_clk.common,
+ &bus_ss_clk.common,
+ &bus_dma_clk.common,
+ &bus_mmc0_clk.common,
+ &bus_mmc1_clk.common,
+ &bus_mmc2_clk.common,
+ &bus_nand_clk.common,
+ &bus_dram_clk.common,
+ &bus_emac_clk.common,
+ &bus_hstimer_clk.common,
+ &bus_spi0_clk.common,
+ &bus_spi1_clk.common,
+ &bus_otg_clk.common,
+ &bus_ehci0_clk.common,
+ &bus_ehci1_clk.common,
+ &bus_ohci0_clk.common,
+ &bus_ve_clk.common,
+ &bus_tcon0_clk.common,
+ &bus_tcon1_clk.common,
+ &bus_csi_clk.common,
+ &bus_hdmi_clk.common,
+ &bus_de_clk.common,
+ &bus_gpu_clk.common,
+ &bus_msgbox_clk.common,
+ &bus_spinlock_clk.common,
+ &bus_spdif_clk.common,
+ &bus_pio_clk.common,
+ &bus_i2s0_clk.common,
+ &bus_i2s1_clk.common,
+ &bus_i2s2_clk.common,
+ &bus_tdm_clk.common,
+ &bus_i2c0_clk.common,
+ &bus_i2c1_clk.common,
+ &bus_i2c2_clk.common,
+ &bus_uart0_clk.common,
+ &bus_uart1_clk.common,
+ &bus_uart2_clk.common,
+ &bus_uart3_clk.common,
+ &bus_uart4_clk.common,
+ &cci400_clk.common,
+ &nand_clk.common,
+ &mmc0_clk.common,
+ &mmc0_sample_clk.common,
+ &mmc0_output_clk.common,
+ &mmc1_clk.common,
+ &mmc1_sample_clk.common,
+ &mmc1_output_clk.common,
+ &mmc2_clk.common,
+ &mmc2_sample_clk.common,
+ &mmc2_output_clk.common,
+ &ss_clk.common,
+ &spi0_clk.common,
+ &spi1_clk.common,
+ &i2s0_clk.common,
+ &i2s1_clk.common,
+ &i2s2_clk.common,
+ &tdm_clk.common,
+ &spdif_clk.common,
+ &usb_phy0_clk.common,
+ &usb_phy1_clk.common,
+ &usb_hsic_clk.common,
+ &usb_hsic_12m_clk.common,
+ &usb_ohci0_clk.common,
+ &dram_clk.common,
+ &dram_ve_clk.common,
+ &dram_csi_clk.common,
+ &tcon0_clk.common,
+ &tcon1_clk.common,
+ &csi_misc_clk.common,
+ &mipi_csi_clk.common,
+ &csi_mclk_clk.common,
+ &csi_sclk_clk.common,
+ &ve_clk.common,
+ &avs_clk.common,
+ &hdmi_clk.common,
+ &hdmi_slow_clk.common,
+ &mbus_clk.common,
+ &mipi_dsi0_clk.common,
+ &mipi_dsi1_clk.common,
+ &gpu_core_clk.common,
+ &gpu_memory_clk.common,
+ &gpu_hyd_clk.common,
+};
+
+static struct clk_hw_onecell_data sun8i_a83t_hw_clks = {
+ .hws = {
+ [CLK_PLL_C0CPUX] = &pll_c0cpux_clk.common.hw,
+ [CLK_PLL_C1CPUX] = &pll_c1cpux_clk.common.hw,
+ [CLK_PLL_AUDIO] = &pll_audio_clk.common.hw,
+ [CLK_PLL_VIDEO0] = &pll_video0_clk.common.hw,
+ [CLK_PLL_VE] = &pll_ve_clk.common.hw,
+ [CLK_PLL_DDR] = &pll_ddr_clk.common.hw,
+ [CLK_PLL_PERIPH] = &pll_periph_clk.common.hw,
+ [CLK_PLL_GPU] = &pll_gpu_clk.common.hw,
+ [CLK_PLL_HSIC] = &pll_hsic_clk.common.hw,
+ [CLK_PLL_DE] = &pll_de_clk.common.hw,
+ [CLK_PLL_VIDEO1] = &pll_video1_clk.common.hw,
+ [CLK_C0CPUX] = &c0cpux_clk.common.hw,
+ [CLK_C1CPUX] = &c1cpux_clk.common.hw,
+ [CLK_AXI0] = &axi0_clk.common.hw,
+ [CLK_AXI1] = &axi1_clk.common.hw,
+ [CLK_AHB1] = &ahb1_clk.common.hw,
+ [CLK_AHB2] = &ahb2_clk.common.hw,
+ [CLK_APB1] = &apb1_clk.common.hw,
+ [CLK_APB2] = &apb2_clk.common.hw,
+ [CLK_BUS_MIPI_DSI] = &bus_mipi_dsi_clk.common.hw,
+ [CLK_BUS_SS] = &bus_ss_clk.common.hw,
+ [CLK_BUS_DMA] = &bus_dma_clk.common.hw,
+ [CLK_BUS_MMC0] = &bus_mmc0_clk.common.hw,
+ [CLK_BUS_MMC1] = &bus_mmc1_clk.common.hw,
+ [CLK_BUS_MMC2] = &bus_mmc2_clk.common.hw,
+ [CLK_BUS_NAND] = &bus_nand_clk.common.hw,
+ [CLK_BUS_DRAM] = &bus_dram_clk.common.hw,
+ [CLK_BUS_EMAC] = &bus_emac_clk.common.hw,
+ [CLK_BUS_HSTIMER] = &bus_hstimer_clk.common.hw,
+ [CLK_BUS_SPI0] = &bus_spi0_clk.common.hw,
+ [CLK_BUS_SPI1] = &bus_spi1_clk.common.hw,
+ [CLK_BUS_OTG] = &bus_otg_clk.common.hw,
+ [CLK_BUS_EHCI0] = &bus_ehci0_clk.common.hw,
+ [CLK_BUS_EHCI1] = &bus_ehci1_clk.common.hw,
+ [CLK_BUS_OHCI0] = &bus_ohci0_clk.common.hw,
+ [CLK_BUS_VE] = &bus_ve_clk.common.hw,
+ [CLK_BUS_TCON0] = &bus_tcon0_clk.common.hw,
+ [CLK_BUS_TCON1] = &bus_tcon1_clk.common.hw,
+ [CLK_BUS_CSI] = &bus_csi_clk.common.hw,
+ [CLK_BUS_HDMI] = &bus_hdmi_clk.common.hw,
+ [CLK_BUS_DE] = &bus_de_clk.common.hw,
+ [CLK_BUS_GPU] = &bus_gpu_clk.common.hw,
+ [CLK_BUS_MSGBOX] = &bus_msgbox_clk.common.hw,
+ [CLK_BUS_SPINLOCK] = &bus_spinlock_clk.common.hw,
+ [CLK_BUS_SPDIF] = &bus_spdif_clk.common.hw,
+ [CLK_BUS_PIO] = &bus_pio_clk.common.hw,
+ [CLK_BUS_I2S0] = &bus_i2s0_clk.common.hw,
+ [CLK_BUS_I2S1] = &bus_i2s1_clk.common.hw,
+ [CLK_BUS_I2S2] = &bus_i2s2_clk.common.hw,
+ [CLK_BUS_TDM] = &bus_tdm_clk.common.hw,
+ [CLK_BUS_I2C0] = &bus_i2c0_clk.common.hw,
+ [CLK_BUS_I2C1] = &bus_i2c1_clk.common.hw,
+ [CLK_BUS_I2C2] = &bus_i2c2_clk.common.hw,
+ [CLK_BUS_UART0] = &bus_uart0_clk.common.hw,
+ [CLK_BUS_UART1] = &bus_uart1_clk.common.hw,
+ [CLK_BUS_UART2] = &bus_uart2_clk.common.hw,
+ [CLK_BUS_UART3] = &bus_uart3_clk.common.hw,
+ [CLK_BUS_UART4] = &bus_uart4_clk.common.hw,
+ [CLK_CCI400] = &cci400_clk.common.hw,
+ [CLK_NAND] = &nand_clk.common.hw,
+ [CLK_MMC0] = &mmc0_clk.common.hw,
+ [CLK_MMC0_SAMPLE] = &mmc0_sample_clk.common.hw,
+ [CLK_MMC0_OUTPUT] = &mmc0_output_clk.common.hw,
+ [CLK_MMC1] = &mmc1_clk.common.hw,
+ [CLK_MMC1_SAMPLE] = &mmc1_sample_clk.common.hw,
+ [CLK_MMC1_OUTPUT] = &mmc1_output_clk.common.hw,
+ [CLK_MMC2] = &mmc2_clk.common.hw,
+ [CLK_MMC2_SAMPLE] = &mmc2_sample_clk.common.hw,
+ [CLK_MMC2_OUTPUT] = &mmc2_output_clk.common.hw,
+ [CLK_SS] = &ss_clk.common.hw,
+ [CLK_SPI0] = &spi0_clk.common.hw,
+ [CLK_SPI1] = &spi1_clk.common.hw,
+ [CLK_I2S0] = &i2s0_clk.common.hw,
+ [CLK_I2S1] = &i2s1_clk.common.hw,
+ [CLK_I2S2] = &i2s2_clk.common.hw,
+ [CLK_TDM] = &tdm_clk.common.hw,
+ [CLK_SPDIF] = &spdif_clk.common.hw,
+ [CLK_USB_PHY0] = &usb_phy0_clk.common.hw,
+ [CLK_USB_PHY1] = &usb_phy1_clk.common.hw,
+ [CLK_USB_HSIC] = &usb_hsic_clk.common.hw,
+ [CLK_USB_HSIC_12M] = &usb_hsic_12m_clk.common.hw,
+ [CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw,
+ [CLK_DRAM] = &dram_clk.common.hw,
+ [CLK_DRAM_VE] = &dram_ve_clk.common.hw,
+ [CLK_DRAM_CSI] = &dram_csi_clk.common.hw,
+ [CLK_TCON0] = &tcon0_clk.common.hw,
+ [CLK_TCON1] = &tcon1_clk.common.hw,
+ [CLK_CSI_MISC] = &csi_misc_clk.common.hw,
+ [CLK_MIPI_CSI] = &mipi_csi_clk.common.hw,
+ [CLK_CSI_MCLK] = &csi_mclk_clk.common.hw,
+ [CLK_CSI_SCLK] = &csi_sclk_clk.common.hw,
+ [CLK_VE] = &ve_clk.common.hw,
+ [CLK_AVS] = &avs_clk.common.hw,
+ [CLK_HDMI] = &hdmi_clk.common.hw,
+ [CLK_HDMI_SLOW] = &hdmi_slow_clk.common.hw,
+ [CLK_MBUS] = &mbus_clk.common.hw,
+ [CLK_MIPI_DSI0] = &mipi_dsi0_clk.common.hw,
+ [CLK_MIPI_DSI1] = &mipi_dsi1_clk.common.hw,
+ [CLK_GPU_CORE] = &gpu_core_clk.common.hw,
+ [CLK_GPU_MEMORY] = &gpu_memory_clk.common.hw,
+ [CLK_GPU_HYD] = &gpu_hyd_clk.common.hw,
+ },
+ .num = CLK_NUMBER,
+};
+
+static struct ccu_reset_map sun8i_a83t_ccu_resets[] = {
+ [RST_USB_PHY0] = { 0x0cc, BIT(0) },
+ [RST_USB_PHY1] = { 0x0cc, BIT(1) },
+ [RST_USB_HSIC] = { 0x0cc, BIT(2) },
+ [RST_DRAM] = { 0x0f4, BIT(31) },
+ [RST_MBUS] = { 0x0fc, BIT(31) },
+ [RST_BUS_MIPI_DSI] = { 0x2c0, BIT(1) },
+ [RST_BUS_SS] = { 0x2c0, BIT(5) },
+ [RST_BUS_DMA] = { 0x2c0, BIT(6) },
+ [RST_BUS_MMC0] = { 0x2c0, BIT(8) },
+ [RST_BUS_MMC1] = { 0x2c0, BIT(9) },
+ [RST_BUS_MMC2] = { 0x2c0, BIT(10) },
+ [RST_BUS_NAND] = { 0x2c0, BIT(13) },
+ [RST_BUS_DRAM] = { 0x2c0, BIT(14) },
+ [RST_BUS_EMAC] = { 0x2c0, BIT(17) },
+ [RST_BUS_HSTIMER] = { 0x2c0, BIT(19) },
+ [RST_BUS_SPI0] = { 0x2c0, BIT(20) },
+ [RST_BUS_SPI1] = { 0x2c0, BIT(21) },
+ [RST_BUS_OTG] = { 0x2c0, BIT(24) },
+ [RST_BUS_EHCI0] = { 0x2c0, BIT(26) },
+ [RST_BUS_EHCI1] = { 0x2c0, BIT(27) },
+ [RST_BUS_OHCI0] = { 0x2c0, BIT(29) },
+ [RST_BUS_VE] = { 0x2c4, BIT(0) },
+ [RST_BUS_TCON0] = { 0x2c4, BIT(4) },
+ [RST_BUS_TCON1] = { 0x2c4, BIT(5) },
+ [RST_BUS_CSI] = { 0x2c4, BIT(8) },
+ [RST_BUS_HDMI0] = { 0x2c4, BIT(10) },
+ [RST_BUS_HDMI1] = { 0x2c4, BIT(11) },
+ [RST_BUS_DE] = { 0x2c4, BIT(12) },
+ [RST_BUS_GPU] = { 0x2c4, BIT(20) },
+ [RST_BUS_MSGBOX] = { 0x2c4, BIT(21) },
+ [RST_BUS_SPINLOCK] = { 0x2c4, BIT(22) },
+ [RST_BUS_LVDS] = { 0x2c8, BIT(0) },
+ [RST_BUS_SPDIF] = { 0x2d0, BIT(1) },
+ [RST_BUS_I2S0] = { 0x2d0, BIT(12) },
+ [RST_BUS_I2S1] = { 0x2d0, BIT(13) },
+ [RST_BUS_I2S2] = { 0x2d0, BIT(14) },
+ [RST_BUS_TDM] = { 0x2d0, BIT(15) },
+ [RST_BUS_I2C0] = { 0x2d8, BIT(0) },
+ [RST_BUS_I2C1] = { 0x2d8, BIT(1) },
+ [RST_BUS_I2C2] = { 0x2d8, BIT(2) },
+ [RST_BUS_UART0] = { 0x2d8, BIT(16) },
+ [RST_BUS_UART1] = { 0x2d8, BIT(17) },
+ [RST_BUS_UART2] = { 0x2d8, BIT(18) },
+ [RST_BUS_UART3] = { 0x2d8, BIT(19) },
+ [RST_BUS_UART4] = { 0x2d8, BIT(20) },
+};
+
+static const struct sunxi_ccu_desc sun8i_a83t_ccu_desc = {
+ .ccu_clks = sun8i_a83t_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_a83t_ccu_clks),
+
+ .hw_clks = &sun8i_a83t_hw_clks,
+
+ .resets = sun8i_a83t_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun8i_a83t_ccu_resets),
+};
+
+#define SUN8I_A83T_PLL_P_SHIFT 16
+#define SUN8I_A83T_PLL_N_SHIFT 8
+#define SUN8I_A83T_PLL_N_WIDTH 8
+
+static void sun8i_a83t_cpu_pll_fixup(void __iomem *reg)
+{
+ u32 val = readl(reg);
+
+ /* bail out if P divider is not used */
+ if (!(val & BIT(SUN8I_A83T_PLL_P_SHIFT)))
+ return;
+
+ /*
+ * If P is used, output should be less than 288 MHz. When we
+ * set P to 1, we should also decrease the multiplier so the
+ * output doesn't go out of range, but not too much such that
+ * the multiplier stays above 12, the minimal operation value.
+ *
+ * To keep it simple, set the multiplier to 17, the reset value.
+ */
+ val &= ~GENMASK(SUN8I_A83T_PLL_N_SHIFT + SUN8I_A83T_PLL_N_WIDTH - 1,
+ SUN8I_A83T_PLL_N_SHIFT);
+ val |= 17 << SUN8I_A83T_PLL_N_SHIFT;
+
+ /* And clear P */
+ val &= ~BIT(SUN8I_A83T_PLL_P_SHIFT);
+
+ writel(val, reg);
+}
+
+static int sun8i_a83t_ccu_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ void __iomem *reg;
+ u32 val;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ /* Enforce d1 = 0, d2 = 0 for Audio PLL */
+ val = readl(reg + SUN8I_A83T_PLL_AUDIO_REG);
+ val &= ~(BIT(16) | BIT(18));
+ writel(val, reg + SUN8I_A83T_PLL_AUDIO_REG);
+
+ /* Enforce P = 1 for both CPU cluster PLLs */
+ sun8i_a83t_cpu_pll_fixup(reg + SUN8I_A83T_PLL_C0CPUX_REG);
+ sun8i_a83t_cpu_pll_fixup(reg + SUN8I_A83T_PLL_C1CPUX_REG);
+
+ return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun8i_a83t_ccu_desc);
+}
+
+static const struct of_device_id sun8i_a83t_ccu_ids[] = {
+ { .compatible = "allwinner,sun8i-a83t-ccu" },
+ { }
+};
+
+static struct platform_driver sun8i_a83t_ccu_driver = {
+ .probe = sun8i_a83t_ccu_probe,
+ .driver = {
+ .name = "sun8i-a83t-ccu",
+ .of_match_table = sun8i_a83t_ccu_ids,
+ },
+};
+builtin_platform_driver(sun8i_a83t_ccu_driver);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.h b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.h
new file mode 100644
index 000000000000..d67edaf76748
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2016 Chen-Yu Tsai
+ *
+ * Chen-Yu Tsai <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CCU_SUN8I_A83T_H_
+#define _CCU_SUN8I_A83T_H_
+
+#include <dt-bindings/clock/sun8i-a83t-ccu.h>
+#include <dt-bindings/reset/sun8i-a83t-ccu.h>
+
+#define CLK_PLL_C0CPUX 0
+#define CLK_PLL_C1CPUX 1
+#define CLK_PLL_AUDIO 2
+#define CLK_PLL_VIDEO0 3
+#define CLK_PLL_VE 4
+#define CLK_PLL_DDR 5
+
+/* pll-periph is exported to the PRCM block */
+
+#define CLK_PLL_GPU 7
+#define CLK_PLL_HSIC 8
+
+/* pll-de is exported for the display engine */
+
+#define CLK_PLL_VIDEO1 10
+
+/* The CPUX clocks are exported */
+
+#define CLK_AXI0 13
+#define CLK_AXI1 14
+#define CLK_AHB1 15
+#define CLK_AHB2 16
+#define CLK_APB1 17
+#define CLK_APB2 18
+
+/* bus gates exported */
+
+#define CLK_CCI400 58
+
+/* module and usb clocks exported */
+
+#define CLK_DRAM 82
+
+/* dram gates and more module clocks exported */
+
+#define CLK_MBUS 95
+
+/* more module clocks exported */
+
+#define CLK_NUMBER (CLK_GPU_HYD + 1)
+
+#endif /* _CCU_SUN8I_A83T_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
new file mode 100644
index 000000000000..5cdaf52669e4
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2017 Icenowy Zheng <[email protected]>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "ccu_common.h"
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_reset.h"
+
+#include "ccu-sun8i-de2.h"
+
+static SUNXI_CCU_GATE(bus_mixer0_clk, "bus-mixer0", "bus-de",
+ 0x04, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_mixer1_clk, "bus-mixer1", "bus-de",
+ 0x04, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_wb_clk, "bus-wb", "bus-de",
+ 0x04, BIT(2), 0);
+
+static SUNXI_CCU_GATE(mixer0_clk, "mixer0", "mixer0-div",
+ 0x00, BIT(0), CLK_SET_RATE_PARENT);
+static SUNXI_CCU_GATE(mixer1_clk, "mixer1", "mixer1-div",
+ 0x00, BIT(1), CLK_SET_RATE_PARENT);
+static SUNXI_CCU_GATE(wb_clk, "wb", "wb-div",
+ 0x00, BIT(2), CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_M(mixer0_div_clk, "mixer0-div", "de", 0x0c, 0, 4,
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_M(mixer1_div_clk, "mixer1-div", "de", 0x0c, 4, 4,
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_M(wb_div_clk, "wb-div", "de", 0x0c, 8, 4,
+ CLK_SET_RATE_PARENT);
+
+static struct ccu_common *sun8i_a83t_de2_clks[] = {
+ &mixer0_clk.common,
+ &mixer1_clk.common,
+ &wb_clk.common,
+
+ &bus_mixer0_clk.common,
+ &bus_mixer1_clk.common,
+ &bus_wb_clk.common,
+
+ &mixer0_div_clk.common,
+ &mixer1_div_clk.common,
+ &wb_div_clk.common,
+};
+
+static struct ccu_common *sun8i_v3s_de2_clks[] = {
+ &mixer0_clk.common,
+ &wb_clk.common,
+
+ &bus_mixer0_clk.common,
+ &bus_wb_clk.common,
+
+ &mixer0_div_clk.common,
+ &wb_div_clk.common,
+};
+
+static struct clk_hw_onecell_data sun8i_a83t_de2_hw_clks = {
+ .hws = {
+ [CLK_MIXER0] = &mixer0_clk.common.hw,
+ [CLK_MIXER1] = &mixer1_clk.common.hw,
+ [CLK_WB] = &wb_clk.common.hw,
+
+ [CLK_BUS_MIXER0] = &bus_mixer0_clk.common.hw,
+ [CLK_BUS_MIXER1] = &bus_mixer1_clk.common.hw,
+ [CLK_BUS_WB] = &bus_wb_clk.common.hw,
+
+ [CLK_MIXER0_DIV] = &mixer0_div_clk.common.hw,
+ [CLK_MIXER1_DIV] = &mixer1_div_clk.common.hw,
+ [CLK_WB_DIV] = &wb_div_clk.common.hw,
+ },
+ .num = CLK_NUMBER,
+};
+
+static struct clk_hw_onecell_data sun8i_v3s_de2_hw_clks = {
+ .hws = {
+ [CLK_MIXER0] = &mixer0_clk.common.hw,
+ [CLK_WB] = &wb_clk.common.hw,
+
+ [CLK_BUS_MIXER0] = &bus_mixer0_clk.common.hw,
+ [CLK_BUS_WB] = &bus_wb_clk.common.hw,
+
+ [CLK_MIXER0_DIV] = &mixer0_div_clk.common.hw,
+ [CLK_WB_DIV] = &wb_div_clk.common.hw,
+ },
+ .num = CLK_NUMBER,
+};
+
+static struct ccu_reset_map sun8i_a83t_de2_resets[] = {
+ [RST_MIXER0] = { 0x08, BIT(0) },
+ /*
+ * For A83T, H3 and R40, mixer1 reset line is shared with wb, so
+ * only RST_WB is exported here.
+ * For V3s there's just no mixer1, so it also shares this struct.
+ */
+ [RST_WB] = { 0x08, BIT(2) },
+};
+
+static struct ccu_reset_map sun50i_a64_de2_resets[] = {
+ [RST_MIXER0] = { 0x08, BIT(0) },
+ [RST_MIXER1] = { 0x08, BIT(1) },
+ [RST_WB] = { 0x08, BIT(2) },
+};
+
+static const struct sunxi_ccu_desc sun8i_a83t_de2_clk_desc = {
+ .ccu_clks = sun8i_a83t_de2_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_a83t_de2_clks),
+
+ .hw_clks = &sun8i_a83t_de2_hw_clks,
+
+ .resets = sun8i_a83t_de2_resets,
+ .num_resets = ARRAY_SIZE(sun8i_a83t_de2_resets),
+};
+
+static const struct sunxi_ccu_desc sun50i_a64_de2_clk_desc = {
+ .ccu_clks = sun8i_a83t_de2_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_a83t_de2_clks),
+
+ .hw_clks = &sun8i_a83t_de2_hw_clks,
+
+ .resets = sun50i_a64_de2_resets,
+ .num_resets = ARRAY_SIZE(sun50i_a64_de2_resets),
+};
+
+static const struct sunxi_ccu_desc sun8i_v3s_de2_clk_desc = {
+ .ccu_clks = sun8i_v3s_de2_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_v3s_de2_clks),
+
+ .hw_clks = &sun8i_v3s_de2_hw_clks,
+
+ .resets = sun8i_a83t_de2_resets,
+ .num_resets = ARRAY_SIZE(sun8i_a83t_de2_resets),
+};
+
+static int sunxi_de2_clk_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct clk *bus_clk, *mod_clk;
+ struct reset_control *rstc;
+ void __iomem *reg;
+ const struct sunxi_ccu_desc *ccu_desc;
+ int ret;
+
+ ccu_desc = of_device_get_match_data(&pdev->dev);
+ if (!ccu_desc)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ bus_clk = devm_clk_get(&pdev->dev, "bus");
+ if (IS_ERR(bus_clk)) {
+ ret = PTR_ERR(bus_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Couldn't get bus clk: %d\n", ret);
+ return ret;
+ }
+
+ mod_clk = devm_clk_get(&pdev->dev, "mod");
+ if (IS_ERR(mod_clk)) {
+ ret = PTR_ERR(mod_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Couldn't get mod clk: %d\n", ret);
+ return ret;
+ }
+
+ rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(rstc)) {
+ ret = PTR_ERR(rstc);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Couldn't get reset control: %d\n", ret);
+ return ret;
+ }
+
+ /* The clocks need to be enabled for us to access the registers */
+ ret = clk_prepare_enable(bus_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't enable bus clk: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(mod_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't enable mod clk: %d\n", ret);
+ goto err_disable_bus_clk;
+ }
+
+ /* The reset control needs to be asserted for the controls to work */
+ ret = reset_control_deassert(rstc);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Couldn't deassert reset control: %d\n", ret);
+ goto err_disable_mod_clk;
+ }
+
+ ret = sunxi_ccu_probe(pdev->dev.of_node, reg, ccu_desc);
+ if (ret)
+ goto err_assert_reset;
+
+ return 0;
+
+err_assert_reset:
+ reset_control_assert(rstc);
+err_disable_mod_clk:
+ clk_disable_unprepare(mod_clk);
+err_disable_bus_clk:
+ clk_disable_unprepare(bus_clk);
+ return ret;
+}
+
+static const struct of_device_id sunxi_de2_clk_ids[] = {
+ {
+ .compatible = "allwinner,sun8i-a83t-de2-clk",
+ .data = &sun8i_a83t_de2_clk_desc,
+ },
+ {
+ .compatible = "allwinner,sun8i-v3s-de2-clk",
+ .data = &sun8i_v3s_de2_clk_desc,
+ },
+ {
+ .compatible = "allwinner,sun50i-h5-de2-clk",
+ .data = &sun50i_a64_de2_clk_desc,
+ },
+ /*
+ * The Allwinner A64 SoC needs some bit to be poke in syscon to make
+ * DE2 really working.
+ * So there's currently no A64 compatible here.
+ * H5 shares the same reset line with A64, so here H5 is using the
+ * clock description of A64.
+ */
+ { }
+};
+
+static struct platform_driver sunxi_de2_clk_driver = {
+ .probe = sunxi_de2_clk_probe,
+ .driver = {
+ .name = "sunxi-de2-clks",
+ .of_match_table = sunxi_de2_clk_ids,
+ },
+};
+builtin_platform_driver(sunxi_de2_clk_driver);
diff --git a/drivers/media/platform/vimc/vimc-capture.h b/drivers/clk/sunxi-ng/ccu-sun8i-de2.h
index 581a813abdf1..530c006e0ae9 100644
--- a/drivers/media/platform/vimc/vimc-capture.h
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.h
@@ -1,7 +1,5 @@
/*
- * vimc-capture.h Virtual Media Controller Driver
- *
- * Copyright (C) 2015-2017 Helen Koike <[email protected]>
+ * Copyright 2016 Icenowy Zheng <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -12,17 +10,19 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
*/
-#ifndef _VIMC_CAPTURE_H_
-#define _VIMC_CAPTURE_H_
+#ifndef _CCU_SUN8I_DE2_H_
+#define _CCU_SUN8I_DE2_H_
+
+#include <dt-bindings/clock/sun8i-de2.h>
+#include <dt-bindings/reset/sun8i-de2.h>
-#include "vimc-core.h"
+/* Intermediary clock dividers are not exported */
+#define CLK_MIXER0_DIV 3
+#define CLK_MIXER1_DIV 4
+#define CLK_WB_DIV 5
-struct vimc_ent_device *vimc_cap_create(struct v4l2_device *v4l2_dev,
- const char *const name,
- u16 num_pads,
- const unsigned long *pads_flag);
+#define CLK_NUMBER (CLK_WB + 1)
-#endif
+#endif /* _CCU_SUN8I_DE2_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index 4cbc1b701b7c..62e4f0d2b2fc 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -141,6 +141,9 @@ static SUNXI_CCU_M(axi_clk, "axi", "cpux", 0x050, 0, 2, 0);
static const char * const ahb1_parents[] = { "osc32k", "osc24M",
"axi" , "pll-periph0" };
+static const struct ccu_mux_var_prediv ahb1_predivs[] = {
+ { .index = 3, .shift = 6, .width = 2 },
+};
static struct ccu_div ahb1_clk = {
.div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO),
@@ -148,11 +151,8 @@ static struct ccu_div ahb1_clk = {
.shift = 12,
.width = 2,
- .variable_prediv = {
- .index = 3,
- .shift = 6,
- .width = 2,
- },
+ .var_predivs = ahb1_predivs,
+ .n_var_predivs = ARRAY_SIZE(ahb1_predivs),
},
.common = {
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
index 119f47b568ea..e54816ec1dbe 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
@@ -27,6 +27,11 @@
static const char * const ar100_parents[] = { "osc32k", "osc24M",
"pll-periph0", "iosc" };
+static const char * const a83t_ar100_parents[] = { "osc16M-d512", "osc24M",
+ "pll-periph0", "iosc" };
+static const struct ccu_mux_var_prediv ar100_predivs[] = {
+ { .index = 2, .shift = 8, .width = 5 },
+};
static struct ccu_div ar100_clk = {
.div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO),
@@ -35,11 +40,8 @@ static struct ccu_div ar100_clk = {
.shift = 16,
.width = 2,
- .variable_prediv = {
- .index = 2,
- .shift = 8,
- .width = 5,
- },
+ .var_predivs = ar100_predivs,
+ .n_var_predivs = ARRAY_SIZE(ar100_predivs),
},
.common = {
@@ -52,6 +54,27 @@ static struct ccu_div ar100_clk = {
},
};
+static struct ccu_div a83t_ar100_clk = {
+ .div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO),
+
+ .mux = {
+ .shift = 16,
+ .width = 2,
+
+ .var_predivs = ar100_predivs,
+ .n_var_predivs = ARRAY_SIZE(ar100_predivs),
+ },
+
+ .common = {
+ .reg = 0x00,
+ .features = CCU_FEATURE_VARIABLE_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("ar100",
+ a83t_ar100_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
static CLK_FIXED_FACTOR(ahb0_clk, "ahb0", "ar100", 1, 1, 0);
static struct ccu_div apb0_clk = {
@@ -66,6 +89,8 @@ static struct ccu_div apb0_clk = {
},
};
+static SUNXI_CCU_M(a83t_apb0_clk, "apb0", "ahb0", 0x0c, 0, 2, 0);
+
static SUNXI_CCU_GATE(apb0_pio_clk, "apb0-pio", "apb0",
0x28, BIT(0), 0);
static SUNXI_CCU_GATE(apb0_ir_clk, "apb0-ir", "apb0",
@@ -90,6 +115,46 @@ static SUNXI_CCU_MP_WITH_MUX_GATE(ir_clk, "ir",
BIT(31), /* gate */
0);
+static const char *const a83t_r_mod0_parents[] = { "osc16M", "osc24M" };
+static const struct ccu_mux_fixed_prediv a83t_ir_predivs[] = {
+ { .index = 0, .div = 16 },
+};
+static struct ccu_mp a83t_ir_clk = {
+ .enable = BIT(31),
+
+ .m = _SUNXI_CCU_DIV(0, 4),
+ .p = _SUNXI_CCU_DIV(16, 2),
+
+ .mux = {
+ .shift = 24,
+ .width = 2,
+ .fixed_predivs = a83t_ir_predivs,
+ .n_predivs = ARRAY_SIZE(a83t_ir_predivs),
+ },
+
+ .common = {
+ .reg = 0x54,
+ .features = CCU_FEATURE_VARIABLE_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("ir",
+ a83t_r_mod0_parents,
+ &ccu_mp_ops,
+ 0),
+ },
+};
+
+static struct ccu_common *sun8i_a83t_r_ccu_clks[] = {
+ &a83t_ar100_clk.common,
+ &a83t_apb0_clk.common,
+ &apb0_pio_clk.common,
+ &apb0_ir_clk.common,
+ &apb0_timer_clk.common,
+ &apb0_rsb_clk.common,
+ &apb0_uart_clk.common,
+ &apb0_i2c_clk.common,
+ &apb0_twd_clk.common,
+ &a83t_ir_clk.common,
+};
+
static struct ccu_common *sun8i_h3_r_ccu_clks[] = {
&ar100_clk.common,
&apb0_clk.common,
@@ -115,6 +180,23 @@ static struct ccu_common *sun50i_a64_r_ccu_clks[] = {
&ir_clk.common,
};
+static struct clk_hw_onecell_data sun8i_a83t_r_hw_clks = {
+ .hws = {
+ [CLK_AR100] = &a83t_ar100_clk.common.hw,
+ [CLK_AHB0] = &ahb0_clk.hw,
+ [CLK_APB0] = &a83t_apb0_clk.common.hw,
+ [CLK_APB0_PIO] = &apb0_pio_clk.common.hw,
+ [CLK_APB0_IR] = &apb0_ir_clk.common.hw,
+ [CLK_APB0_TIMER] = &apb0_timer_clk.common.hw,
+ [CLK_APB0_RSB] = &apb0_rsb_clk.common.hw,
+ [CLK_APB0_UART] = &apb0_uart_clk.common.hw,
+ [CLK_APB0_I2C] = &apb0_i2c_clk.common.hw,
+ [CLK_APB0_TWD] = &apb0_twd_clk.common.hw,
+ [CLK_IR] = &a83t_ir_clk.common.hw,
+ },
+ .num = CLK_NUMBER,
+};
+
static struct clk_hw_onecell_data sun8i_h3_r_hw_clks = {
.hws = {
[CLK_AR100] = &ar100_clk.common.hw,
@@ -148,6 +230,14 @@ static struct clk_hw_onecell_data sun50i_a64_r_hw_clks = {
.num = CLK_NUMBER,
};
+static struct ccu_reset_map sun8i_a83t_r_ccu_resets[] = {
+ [RST_APB0_IR] = { 0xb0, BIT(1) },
+ [RST_APB0_TIMER] = { 0xb0, BIT(2) },
+ [RST_APB0_RSB] = { 0xb0, BIT(3) },
+ [RST_APB0_UART] = { 0xb0, BIT(4) },
+ [RST_APB0_I2C] = { 0xb0, BIT(6) },
+};
+
static struct ccu_reset_map sun8i_h3_r_ccu_resets[] = {
[RST_APB0_IR] = { 0xb0, BIT(1) },
[RST_APB0_TIMER] = { 0xb0, BIT(2) },
@@ -163,6 +253,16 @@ static struct ccu_reset_map sun50i_a64_r_ccu_resets[] = {
[RST_APB0_I2C] = { 0xb0, BIT(6) },
};
+static const struct sunxi_ccu_desc sun8i_a83t_r_ccu_desc = {
+ .ccu_clks = sun8i_a83t_r_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun8i_a83t_r_ccu_clks),
+
+ .hw_clks = &sun8i_a83t_r_hw_clks,
+
+ .resets = sun8i_a83t_r_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun8i_a83t_r_ccu_resets),
+};
+
static const struct sunxi_ccu_desc sun8i_h3_r_ccu_desc = {
.ccu_clks = sun8i_h3_r_ccu_clks,
.num_ccu_clks = ARRAY_SIZE(sun8i_h3_r_ccu_clks),
@@ -198,6 +298,13 @@ static void __init sunxi_r_ccu_init(struct device_node *node,
sunxi_ccu_probe(node, reg, desc);
}
+static void __init sun8i_a83t_r_ccu_setup(struct device_node *node)
+{
+ sunxi_r_ccu_init(node, &sun8i_a83t_r_ccu_desc);
+}
+CLK_OF_DECLARE(sun8i_a83t_r_ccu, "allwinner,sun8i-a83t-r-ccu",
+ sun8i_a83t_r_ccu_setup);
+
static void __init sun8i_h3_r_ccu_setup(struct device_node *node)
{
sunxi_r_ccu_init(node, &sun8i_h3_r_ccu_desc);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index 6297add857b5..a34a78d7fb28 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -132,6 +132,9 @@ static SUNXI_CCU_M(axi_clk, "axi", "cpu", 0x050, 0, 2, 0);
static const char * const ahb1_parents[] = { "osc32k", "osc24M",
"axi", "pll-periph0" };
+static const struct ccu_mux_var_prediv ahb1_predivs[] = {
+ { .index = 3, .shift = 6, .width = 2 },
+};
static struct ccu_div ahb1_clk = {
.div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO),
@@ -139,11 +142,8 @@ static struct ccu_div ahb1_clk = {
.shift = 12,
.width = 2,
- .variable_prediv = {
- .index = 3,
- .shift = 6,
- .width = 2,
- },
+ .var_predivs = ahb1_predivs,
+ .n_var_predivs = ARRAY_SIZE(ahb1_predivs),
},
.common = {
diff --git a/drivers/clk/sunxi-ng/ccu_div.c b/drivers/clk/sunxi-ng/ccu_div.c
index 4057e6021aa9..c0e5c10d0091 100644
--- a/drivers/clk/sunxi-ng/ccu_div.c
+++ b/drivers/clk/sunxi-ng/ccu_div.c
@@ -14,23 +14,17 @@
#include "ccu_div.h"
static unsigned long ccu_div_round_rate(struct ccu_mux_internal *mux,
- unsigned long parent_rate,
+ struct clk_hw *parent,
+ unsigned long *parent_rate,
unsigned long rate,
void *data)
{
struct ccu_div *cd = data;
- unsigned long val;
-
- /*
- * We can't use divider_round_rate that assumes that there's
- * several parents, while we might be called to evaluate
- * several different parents.
- */
- val = divider_get_val(rate, parent_rate, cd->div.table, cd->div.width,
- cd->div.flags);
- return divider_recalc_rate(&cd->common.hw, parent_rate, val,
- cd->div.table, cd->div.flags);
+ return divider_round_rate_parent(&cd->common.hw, parent,
+ rate, parent_rate,
+ cd->div.table, cd->div.width,
+ cd->div.flags);
}
static void ccu_div_disable(struct clk_hw *hw)
@@ -65,8 +59,8 @@ static unsigned long ccu_div_recalc_rate(struct clk_hw *hw,
val = reg >> cd->div.shift;
val &= (1 << cd->div.width) - 1;
- ccu_mux_helper_adjust_parent_for_prediv(&cd->common, &cd->mux, -1,
- &parent_rate);
+ parent_rate = ccu_mux_helper_apply_prediv(&cd->common, &cd->mux, -1,
+ parent_rate);
return divider_recalc_rate(hw, parent_rate, val, cd->div.table,
cd->div.flags);
@@ -77,18 +71,6 @@ static int ccu_div_determine_rate(struct clk_hw *hw,
{
struct ccu_div *cd = hw_to_ccu_div(hw);
- if (clk_hw_get_num_parents(hw) == 1) {
- req->rate = divider_round_rate(hw, req->rate,
- &req->best_parent_rate,
- cd->div.table,
- cd->div.width,
- cd->div.flags);
-
- req->best_parent_hw = clk_hw_get_parent(hw);
-
- return 0;
- }
-
return ccu_mux_helper_determine_rate(&cd->common, &cd->mux,
req, ccu_div_round_rate, cd);
}
@@ -101,8 +83,8 @@ static int ccu_div_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long val;
u32 reg;
- ccu_mux_helper_adjust_parent_for_prediv(&cd->common, &cd->mux, -1,
- &parent_rate);
+ parent_rate = ccu_mux_helper_apply_prediv(&cd->common, &cd->mux, -1,
+ parent_rate);
val = divider_get_val(rate, parent_rate, cd->div.table, cd->div.width,
cd->div.flags);
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index b583f186a804..b917ad7a386c 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -41,7 +41,8 @@ static void ccu_mp_find_best(unsigned long parent, unsigned long rate,
}
static unsigned long ccu_mp_round_rate(struct ccu_mux_internal *mux,
- unsigned long parent_rate,
+ struct clk_hw *hw,
+ unsigned long *parent_rate,
unsigned long rate,
void *data)
{
@@ -52,9 +53,9 @@ static unsigned long ccu_mp_round_rate(struct ccu_mux_internal *mux,
max_m = cmp->m.max ?: 1 << cmp->m.width;
max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
- ccu_mp_find_best(parent_rate, rate, max_m, max_p, &m, &p);
+ ccu_mp_find_best(*parent_rate, rate, max_m, max_p, &m, &p);
- return parent_rate / p / m;
+ return *parent_rate / p / m;
}
static void ccu_mp_disable(struct clk_hw *hw)
@@ -86,8 +87,8 @@ static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw,
u32 reg;
/* Adjust parent_rate according to pre-dividers */
- ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux,
- -1, &parent_rate);
+ parent_rate = ccu_mux_helper_apply_prediv(&cmp->common, &cmp->mux, -1,
+ parent_rate);
reg = readl(cmp->common.base + cmp->common.reg);
@@ -122,8 +123,8 @@ static int ccu_mp_set_rate(struct clk_hw *hw, unsigned long rate,
u32 reg;
/* Adjust parent_rate according to pre-dividers */
- ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux,
- -1, &parent_rate);
+ parent_rate = ccu_mux_helper_apply_prediv(&cmp->common, &cmp->mux, -1,
+ parent_rate);
max_m = cmp->m.max ?: 1 << cmp->m.width;
max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
diff --git a/drivers/clk/sunxi-ng/ccu_mult.c b/drivers/clk/sunxi-ng/ccu_mult.c
index 671141359895..20d0300867f2 100644
--- a/drivers/clk/sunxi-ng/ccu_mult.c
+++ b/drivers/clk/sunxi-ng/ccu_mult.c
@@ -33,9 +33,10 @@ static void ccu_mult_find_best(unsigned long parent, unsigned long rate,
}
static unsigned long ccu_mult_round_rate(struct ccu_mux_internal *mux,
- unsigned long parent_rate,
- unsigned long rate,
- void *data)
+ struct clk_hw *parent,
+ unsigned long *parent_rate,
+ unsigned long rate,
+ void *data)
{
struct ccu_mult *cm = data;
struct _ccu_mult _cm;
@@ -47,9 +48,9 @@ static unsigned long ccu_mult_round_rate(struct ccu_mux_internal *mux,
else
_cm.max = (1 << cm->mult.width) + cm->mult.offset - 1;
- ccu_mult_find_best(parent_rate, rate, &_cm);
+ ccu_mult_find_best(*parent_rate, rate, &_cm);
- return parent_rate * _cm.mult;
+ return *parent_rate * _cm.mult;
}
static void ccu_mult_disable(struct clk_hw *hw)
@@ -87,8 +88,8 @@ static unsigned long ccu_mult_recalc_rate(struct clk_hw *hw,
val = reg >> cm->mult.shift;
val &= (1 << cm->mult.width) - 1;
- ccu_mux_helper_adjust_parent_for_prediv(&cm->common, &cm->mux, -1,
- &parent_rate);
+ parent_rate = ccu_mux_helper_apply_prediv(&cm->common, &cm->mux, -1,
+ parent_rate);
return parent_rate * (val + cm->mult.offset);
}
@@ -115,8 +116,8 @@ static int ccu_mult_set_rate(struct clk_hw *hw, unsigned long rate,
else
ccu_frac_helper_disable(&cm->common, &cm->frac);
- ccu_mux_helper_adjust_parent_for_prediv(&cm->common, &cm->mux, -1,
- &parent_rate);
+ parent_rate = ccu_mux_helper_apply_prediv(&cm->common, &cm->mux, -1,
+ parent_rate);
_cm.min = cm->mult.min;
diff --git a/drivers/clk/sunxi-ng/ccu_mux.c b/drivers/clk/sunxi-ng/ccu_mux.c
index c6bb1f523232..312664155a54 100644
--- a/drivers/clk/sunxi-ng/ccu_mux.c
+++ b/drivers/clk/sunxi-ng/ccu_mux.c
@@ -15,24 +15,20 @@
#include "ccu_gate.h"
#include "ccu_mux.h"
-void ccu_mux_helper_adjust_parent_for_prediv(struct ccu_common *common,
- struct ccu_mux_internal *cm,
- int parent_index,
- unsigned long *parent_rate)
+static u16 ccu_mux_get_prediv(struct ccu_common *common,
+ struct ccu_mux_internal *cm,
+ int parent_index)
{
u16 prediv = 1;
u32 reg;
- int i;
if (!((common->features & CCU_FEATURE_FIXED_PREDIV) ||
(common->features & CCU_FEATURE_VARIABLE_PREDIV) ||
(common->features & CCU_FEATURE_ALL_PREDIV)))
- return;
+ return 1;
- if (common->features & CCU_FEATURE_ALL_PREDIV) {
- *parent_rate = *parent_rate / common->prediv;
- return;
- }
+ if (common->features & CCU_FEATURE_ALL_PREDIV)
+ return common->prediv;
reg = readl(common->base + common->reg);
if (parent_index < 0) {
@@ -40,28 +36,52 @@ void ccu_mux_helper_adjust_parent_for_prediv(struct ccu_common *common,
parent_index &= (1 << cm->width) - 1;
}
- if (common->features & CCU_FEATURE_FIXED_PREDIV)
+ if (common->features & CCU_FEATURE_FIXED_PREDIV) {
+ int i;
+
for (i = 0; i < cm->n_predivs; i++)
if (parent_index == cm->fixed_predivs[i].index)
prediv = cm->fixed_predivs[i].div;
+ }
- if (common->features & CCU_FEATURE_VARIABLE_PREDIV)
- if (parent_index == cm->variable_prediv.index) {
- u8 div;
+ if (common->features & CCU_FEATURE_VARIABLE_PREDIV) {
+ int i;
- div = reg >> cm->variable_prediv.shift;
- div &= (1 << cm->variable_prediv.width) - 1;
- prediv = div + 1;
- }
+ for (i = 0; i < cm->n_var_predivs; i++)
+ if (parent_index == cm->var_predivs[i].index) {
+ u8 div;
+
+ div = reg >> cm->var_predivs[i].shift;
+ div &= (1 << cm->var_predivs[i].width) - 1;
+ prediv = div + 1;
+ }
+ }
+
+ return prediv;
+}
- *parent_rate = *parent_rate / prediv;
+unsigned long ccu_mux_helper_apply_prediv(struct ccu_common *common,
+ struct ccu_mux_internal *cm,
+ int parent_index,
+ unsigned long parent_rate)
+{
+ return parent_rate / ccu_mux_get_prediv(common, cm, parent_index);
+}
+
+static unsigned long ccu_mux_helper_unapply_prediv(struct ccu_common *common,
+ struct ccu_mux_internal *cm,
+ int parent_index,
+ unsigned long parent_rate)
+{
+ return parent_rate * ccu_mux_get_prediv(common, cm, parent_index);
}
int ccu_mux_helper_determine_rate(struct ccu_common *common,
struct ccu_mux_internal *cm,
struct clk_rate_request *req,
unsigned long (*round)(struct ccu_mux_internal *,
- unsigned long,
+ struct clk_hw *,
+ unsigned long *,
unsigned long,
void *),
void *data)
@@ -75,41 +95,43 @@ int ccu_mux_helper_determine_rate(struct ccu_common *common,
best_parent = clk_hw_get_parent(hw);
best_parent_rate = clk_hw_get_rate(best_parent);
+ adj_parent_rate = ccu_mux_helper_apply_prediv(common, cm, -1,
+ best_parent_rate);
- adj_parent_rate = best_parent_rate;
- ccu_mux_helper_adjust_parent_for_prediv(common, cm, -1,
- &adj_parent_rate);
+ best_rate = round(cm, best_parent, &adj_parent_rate,
+ req->rate, data);
- best_rate = round(cm, adj_parent_rate, req->rate, data);
+ /*
+ * adj_parent_rate might have been modified by our clock.
+ * Unapply the pre-divider if there's one, and give
+ * the actual frequency the parent needs to run at.
+ */
+ best_parent_rate = ccu_mux_helper_unapply_prediv(common, cm, -1,
+ adj_parent_rate);
goto out;
}
for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
- unsigned long tmp_rate, parent_rate, adj_parent_rate;
+ unsigned long tmp_rate, parent_rate;
struct clk_hw *parent;
parent = clk_hw_get_parent_by_index(hw, i);
if (!parent)
continue;
- if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
- struct clk_rate_request parent_req = *req;
- int ret = __clk_determine_rate(parent, &parent_req);
-
- if (ret)
- continue;
+ parent_rate = ccu_mux_helper_apply_prediv(common, cm, i,
+ clk_hw_get_rate(parent));
- parent_rate = parent_req.rate;
- } else {
- parent_rate = clk_hw_get_rate(parent);
- }
+ tmp_rate = round(cm, parent, &parent_rate, req->rate, data);
- adj_parent_rate = parent_rate;
- ccu_mux_helper_adjust_parent_for_prediv(common, cm, i,
- &adj_parent_rate);
-
- tmp_rate = round(cm, adj_parent_rate, req->rate, data);
+ /*
+ * parent_rate might have been modified by our clock.
+ * Unapply the pre-divider if there's one, and give
+ * the actual frequency the parent needs to run at.
+ */
+ parent_rate = ccu_mux_helper_unapply_prediv(common, cm, i,
+ parent_rate);
if (tmp_rate == req->rate) {
best_parent = parent;
best_parent_rate = parent_rate;
@@ -217,10 +239,8 @@ static unsigned long ccu_mux_recalc_rate(struct clk_hw *hw,
{
struct ccu_mux *cm = hw_to_ccu_mux(hw);
- ccu_mux_helper_adjust_parent_for_prediv(&cm->common, &cm->mux, -1,
- &parent_rate);
-
- return parent_rate;
+ return ccu_mux_helper_apply_prediv(&cm->common, &cm->mux, -1,
+ parent_rate);
}
const struct clk_ops ccu_mux_ops = {
diff --git a/drivers/clk/sunxi-ng/ccu_mux.h b/drivers/clk/sunxi-ng/ccu_mux.h
index 47aba3a48245..f20c0bd62a47 100644
--- a/drivers/clk/sunxi-ng/ccu_mux.h
+++ b/drivers/clk/sunxi-ng/ccu_mux.h
@@ -10,6 +10,12 @@ struct ccu_mux_fixed_prediv {
u16 div;
};
+struct ccu_mux_var_prediv {
+ u8 index;
+ u8 shift;
+ u8 width;
+};
+
struct ccu_mux_internal {
u8 shift;
u8 width;
@@ -18,11 +24,8 @@ struct ccu_mux_internal {
const struct ccu_mux_fixed_prediv *fixed_predivs;
u8 n_predivs;
- struct {
- u8 index;
- u8 shift;
- u8 width;
- } variable_prediv;
+ const struct ccu_mux_var_prediv *var_predivs;
+ u8 n_var_predivs;
};
#define _SUNXI_CCU_MUX_TABLE(_shift, _width, _table) \
@@ -78,15 +81,16 @@ static inline struct ccu_mux *hw_to_ccu_mux(struct clk_hw *hw)
extern const struct clk_ops ccu_mux_ops;
-void ccu_mux_helper_adjust_parent_for_prediv(struct ccu_common *common,
- struct ccu_mux_internal *cm,
- int parent_index,
- unsigned long *parent_rate);
+unsigned long ccu_mux_helper_apply_prediv(struct ccu_common *common,
+ struct ccu_mux_internal *cm,
+ int parent_index,
+ unsigned long parent_rate);
int ccu_mux_helper_determine_rate(struct ccu_common *common,
struct ccu_mux_internal *cm,
struct clk_rate_request *req,
unsigned long (*round)(struct ccu_mux_internal *,
- unsigned long,
+ struct clk_hw *,
+ unsigned long *,
unsigned long,
void *),
void *data);
diff --git a/drivers/clk/sunxi-ng/ccu_nkm.c b/drivers/clk/sunxi-ng/ccu_nkm.c
index cba84afe1cf1..44b16dc8fea6 100644
--- a/drivers/clk/sunxi-ng/ccu_nkm.c
+++ b/drivers/clk/sunxi-ng/ccu_nkm.c
@@ -102,7 +102,8 @@ static unsigned long ccu_nkm_recalc_rate(struct clk_hw *hw,
}
static unsigned long ccu_nkm_round_rate(struct ccu_mux_internal *mux,
- unsigned long parent_rate,
+ struct clk_hw *hw,
+ unsigned long *parent_rate,
unsigned long rate,
void *data)
{
@@ -116,9 +117,9 @@ static unsigned long ccu_nkm_round_rate(struct ccu_mux_internal *mux,
_nkm.min_m = 1;
_nkm.max_m = nkm->m.max ?: 1 << nkm->m.width;
- ccu_nkm_find_best(parent_rate, rate, &_nkm);
+ ccu_nkm_find_best(*parent_rate, rate, &_nkm);
- return parent_rate * _nkm.n * _nkm.k / _nkm.m;
+ return *parent_rate * _nkm.n * _nkm.k / _nkm.m;
}
static int ccu_nkm_determine_rate(struct clk_hw *hw,
diff --git a/drivers/clk/sunxi-ng/ccu_reset.h b/drivers/clk/sunxi-ng/ccu_reset.h
index 36a4679210bd..ff8f5ebca435 100644
--- a/drivers/clk/sunxi-ng/ccu_reset.h
+++ b/drivers/clk/sunxi-ng/ccu_reset.h
@@ -15,6 +15,7 @@
#define _CCU_RESET_H_
#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
struct ccu_reset_map {
u16 reg;
diff --git a/drivers/clk/ti/Makefile b/drivers/clk/ti/Makefile
index 0deac9821039..edb9f471e525 100644
--- a/drivers/clk/ti/Makefile
+++ b/drivers/clk/ti/Makefile
@@ -3,7 +3,8 @@ ifeq ($(CONFIG_ARCH_OMAP2PLUS), y)
obj-y += clk.o autoidle.o clockdomain.o
clk-common = dpll.o composite.o divider.o gate.o \
fixed-factor.o mux.o apll.o \
- clkt_dpll.o clkt_iclk.o clkt_dflt.o
+ clkt_dpll.o clkt_iclk.o clkt_dflt.o \
+ clkctrl.o
obj-$(CONFIG_SOC_AM33XX) += $(clk-common) clk-33xx.o dpll3xxx.o
obj-$(CONFIG_SOC_TI81XX) += $(clk-common) fapll.o clk-814x.o clk-816x.o
obj-$(CONFIG_ARCH_OMAP2) += $(clk-common) interface.o clk-2xxx.o
diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
index 1c8bb83003bf..2005f032c02f 100644
--- a/drivers/clk/ti/clk-44xx.c
+++ b/drivers/clk/ti/clk-44xx.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk/ti.h>
+#include <dt-bindings/clock/omap4.h>
#include "clock.h"
@@ -33,6 +34,668 @@
*/
#define OMAP4_DPLL_USB_DEFFREQ 960000000
+static const struct omap_clkctrl_reg_data omap4_mpuss_clkctrl_regs[] __initconst = {
+ { OMAP4_MPU_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_mpu_m2_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_tesla_clkctrl_regs[] __initconst = {
+ { OMAP4_DSP_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_m4x2_ck" },
+ { 0 },
+};
+
+static const char * const omap4_aess_fclk_parents[] __initconst = {
+ "abe_clk",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data omap4_aess_fclk_data __initconst = {
+ .max_div = 2,
+};
+
+static const struct omap_clkctrl_bit_data omap4_aess_bit_data[] __initconst = {
+ { 24, TI_CLK_DIVIDER, omap4_aess_fclk_parents, &omap4_aess_fclk_data },
+ { 0 },
+};
+
+static const char * const omap4_func_dmic_abe_gfclk_parents[] __initconst = {
+ "dmic_sync_mux_ck",
+ "pad_clks_ck",
+ "slimbus_clk",
+ NULL,
+};
+
+static const char * const omap4_dmic_sync_mux_ck_parents[] __initconst = {
+ "abe_24m_fclk",
+ "syc_clk_div_ck",
+ "func_24m_clk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_dmic_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_func_dmic_abe_gfclk_parents, NULL },
+ { 26, TI_CLK_MUX, omap4_dmic_sync_mux_ck_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap4_func_mcasp_abe_gfclk_parents[] __initconst = {
+ "mcasp_sync_mux_ck",
+ "pad_clks_ck",
+ "slimbus_clk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_mcasp_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_func_mcasp_abe_gfclk_parents, NULL },
+ { 26, TI_CLK_MUX, omap4_dmic_sync_mux_ck_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap4_func_mcbsp1_gfclk_parents[] __initconst = {
+ "mcbsp1_sync_mux_ck",
+ "pad_clks_ck",
+ "slimbus_clk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_mcbsp1_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_func_mcbsp1_gfclk_parents, NULL },
+ { 26, TI_CLK_MUX, omap4_dmic_sync_mux_ck_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap4_func_mcbsp2_gfclk_parents[] __initconst = {
+ "mcbsp2_sync_mux_ck",
+ "pad_clks_ck",
+ "slimbus_clk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_mcbsp2_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_func_mcbsp2_gfclk_parents, NULL },
+ { 26, TI_CLK_MUX, omap4_dmic_sync_mux_ck_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap4_func_mcbsp3_gfclk_parents[] __initconst = {
+ "mcbsp3_sync_mux_ck",
+ "pad_clks_ck",
+ "slimbus_clk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_mcbsp3_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_func_mcbsp3_gfclk_parents, NULL },
+ { 26, TI_CLK_MUX, omap4_dmic_sync_mux_ck_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap4_slimbus1_fclk_0_parents[] __initconst = {
+ "abe_24m_fclk",
+ NULL,
+};
+
+static const char * const omap4_slimbus1_fclk_1_parents[] __initconst = {
+ "func_24m_clk",
+ NULL,
+};
+
+static const char * const omap4_slimbus1_fclk_2_parents[] __initconst = {
+ "pad_clks_ck",
+ NULL,
+};
+
+static const char * const omap4_slimbus1_slimbus_clk_parents[] __initconst = {
+ "slimbus_clk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_slimbus1_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_slimbus1_fclk_0_parents, NULL },
+ { 9, TI_CLK_GATE, omap4_slimbus1_fclk_1_parents, NULL },
+ { 10, TI_CLK_GATE, omap4_slimbus1_fclk_2_parents, NULL },
+ { 11, TI_CLK_GATE, omap4_slimbus1_slimbus_clk_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap4_timer5_sync_mux_parents[] __initconst = {
+ "syc_clk_div_ck",
+ "sys_32k_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_timer5_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_timer5_sync_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_timer6_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_timer5_sync_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_timer7_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_timer5_sync_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_timer8_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_timer5_sync_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_abe_clkctrl_regs[] __initconst = {
+ { OMAP4_L4_ABE_CLKCTRL, NULL, 0, "ocp_abe_iclk" },
+ { OMAP4_AESS_CLKCTRL, omap4_aess_bit_data, CLKF_SW_SUP, "aess_fclk" },
+ { OMAP4_MCPDM_CLKCTRL, NULL, CLKF_SW_SUP, "pad_clks_ck" },
+ { OMAP4_DMIC_CLKCTRL, omap4_dmic_bit_data, CLKF_SW_SUP, "func_dmic_abe_gfclk" },
+ { OMAP4_MCASP_CLKCTRL, omap4_mcasp_bit_data, CLKF_SW_SUP, "func_mcasp_abe_gfclk" },
+ { OMAP4_MCBSP1_CLKCTRL, omap4_mcbsp1_bit_data, CLKF_SW_SUP, "func_mcbsp1_gfclk" },
+ { OMAP4_MCBSP2_CLKCTRL, omap4_mcbsp2_bit_data, CLKF_SW_SUP, "func_mcbsp2_gfclk" },
+ { OMAP4_MCBSP3_CLKCTRL, omap4_mcbsp3_bit_data, CLKF_SW_SUP, "func_mcbsp3_gfclk" },
+ { OMAP4_SLIMBUS1_CLKCTRL, omap4_slimbus1_bit_data, CLKF_SW_SUP, "slimbus1_fclk_0" },
+ { OMAP4_TIMER5_CLKCTRL, omap4_timer5_bit_data, CLKF_SW_SUP, "timer5_sync_mux" },
+ { OMAP4_TIMER6_CLKCTRL, omap4_timer6_bit_data, CLKF_SW_SUP, "timer6_sync_mux" },
+ { OMAP4_TIMER7_CLKCTRL, omap4_timer7_bit_data, CLKF_SW_SUP, "timer7_sync_mux" },
+ { OMAP4_TIMER8_CLKCTRL, omap4_timer8_bit_data, CLKF_SW_SUP, "timer8_sync_mux" },
+ { OMAP4_WD_TIMER3_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_l4_ao_clkctrl_regs[] __initconst = {
+ { OMAP4_SMARTREFLEX_MPU_CLKCTRL, NULL, CLKF_SW_SUP, "l4_wkup_clk_mux_ck" },
+ { OMAP4_SMARTREFLEX_IVA_CLKCTRL, NULL, CLKF_SW_SUP, "l4_wkup_clk_mux_ck" },
+ { OMAP4_SMARTREFLEX_CORE_CLKCTRL, NULL, CLKF_SW_SUP, "l4_wkup_clk_mux_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_l3_1_clkctrl_regs[] __initconst = {
+ { OMAP4_L3_MAIN_1_CLKCTRL, NULL, 0, "l3_div_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_l3_2_clkctrl_regs[] __initconst = {
+ { OMAP4_L3_MAIN_2_CLKCTRL, NULL, 0, "l3_div_ck" },
+ { OMAP4_GPMC_CLKCTRL, NULL, CLKF_HW_SUP, "l3_div_ck" },
+ { OMAP4_OCMC_RAM_CLKCTRL, NULL, 0, "l3_div_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_ducati_clkctrl_regs[] __initconst = {
+ { OMAP4_IPU_CLKCTRL, NULL, CLKF_HW_SUP, "ducati_clk_mux_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_l3_dma_clkctrl_regs[] __initconst = {
+ { OMAP4_DMA_SYSTEM_CLKCTRL, NULL, 0, "l3_div_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_l3_emif_clkctrl_regs[] __initconst = {
+ { OMAP4_DMM_CLKCTRL, NULL, 0, "l3_div_ck" },
+ { OMAP4_EMIF1_CLKCTRL, NULL, CLKF_HW_SUP, "ddrphy_ck" },
+ { OMAP4_EMIF2_CLKCTRL, NULL, CLKF_HW_SUP, "ddrphy_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_d2d_clkctrl_regs[] __initconst = {
+ { OMAP4_C2C_CLKCTRL, NULL, 0, "div_core_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_l4_cfg_clkctrl_regs[] __initconst = {
+ { OMAP4_L4_CFG_CLKCTRL, NULL, 0, "l4_div_ck" },
+ { OMAP4_SPINLOCK_CLKCTRL, NULL, 0, "l4_div_ck" },
+ { OMAP4_MAILBOX_CLKCTRL, NULL, 0, "l4_div_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_l3_instr_clkctrl_regs[] __initconst = {
+ { OMAP4_L3_MAIN_3_CLKCTRL, NULL, CLKF_HW_SUP, "l3_div_ck" },
+ { OMAP4_L3_INSTR_CLKCTRL, NULL, CLKF_HW_SUP, "l3_div_ck" },
+ { OMAP4_OCP_WP_NOC_CLKCTRL, NULL, CLKF_HW_SUP, "l3_div_ck" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_ivahd_clkctrl_regs[] __initconst = {
+ { OMAP4_IVA_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_m5x2_ck" },
+ { OMAP4_SL2IF_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_m5x2_ck" },
+ { 0 },
+};
+
+static const char * const omap4_iss_ctrlclk_parents[] __initconst = {
+ "func_96m_fclk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_iss_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_iss_ctrlclk_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap4_fdif_fck_parents[] __initconst = {
+ "dpll_per_m4x2_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data omap4_fdif_fck_data __initconst = {
+ .max_div = 4,
+};
+
+static const struct omap_clkctrl_bit_data omap4_fdif_bit_data[] __initconst = {
+ { 24, TI_CLK_DIVIDER, omap4_fdif_fck_parents, &omap4_fdif_fck_data },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_iss_clkctrl_regs[] __initconst = {
+ { OMAP4_ISS_CLKCTRL, omap4_iss_bit_data, CLKF_SW_SUP, "ducati_clk_mux_ck" },
+ { OMAP4_FDIF_CLKCTRL, omap4_fdif_bit_data, CLKF_SW_SUP, "fdif_fck" },
+ { 0 },
+};
+
+static const char * const omap4_dss_dss_clk_parents[] __initconst = {
+ "dpll_per_m5x2_ck",
+ NULL,
+};
+
+static const char * const omap4_dss_48mhz_clk_parents[] __initconst = {
+ "func_48mc_fclk",
+ NULL,
+};
+
+static const char * const omap4_dss_sys_clk_parents[] __initconst = {
+ "syc_clk_div_ck",
+ NULL,
+};
+
+static const char * const omap4_dss_tv_clk_parents[] __initconst = {
+ "extalt_clkin_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_dss_core_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_dss_dss_clk_parents, NULL },
+ { 9, TI_CLK_GATE, omap4_dss_48mhz_clk_parents, NULL },
+ { 10, TI_CLK_GATE, omap4_dss_sys_clk_parents, NULL },
+ { 11, TI_CLK_GATE, omap4_dss_tv_clk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_l3_dss_clkctrl_regs[] __initconst = {
+ { OMAP4_DSS_CORE_CLKCTRL, omap4_dss_core_bit_data, CLKF_SW_SUP, "dss_dss_clk" },
+ { 0 },
+};
+
+static const char * const omap4_sgx_clk_mux_parents[] __initconst = {
+ "dpll_core_m7x2_ck",
+ "dpll_per_m7x2_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_gpu_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_sgx_clk_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_l3_gfx_clkctrl_regs[] __initconst = {
+ { OMAP4_GPU_CLKCTRL, omap4_gpu_bit_data, CLKF_SW_SUP, "sgx_clk_mux" },
+ { 0 },
+};
+
+static const char * const omap4_hsmmc1_fclk_parents[] __initconst = {
+ "func_64m_fclk",
+ "func_96m_fclk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_mmc1_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_hsmmc1_fclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_mmc2_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_hsmmc1_fclk_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap4_hsi_fck_parents[] __initconst = {
+ "dpll_per_m2x2_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data omap4_hsi_fck_data __initconst = {
+ .max_div = 4,
+};
+
+static const struct omap_clkctrl_bit_data omap4_hsi_bit_data[] __initconst = {
+ { 24, TI_CLK_DIVIDER, omap4_hsi_fck_parents, &omap4_hsi_fck_data },
+ { 0 },
+};
+
+static const char * const omap4_usb_host_hs_utmi_p1_clk_parents[] __initconst = {
+ "utmi_p1_gfclk",
+ NULL,
+};
+
+static const char * const omap4_usb_host_hs_utmi_p2_clk_parents[] __initconst = {
+ "utmi_p2_gfclk",
+ NULL,
+};
+
+static const char * const omap4_usb_host_hs_utmi_p3_clk_parents[] __initconst = {
+ "init_60m_fclk",
+ NULL,
+};
+
+static const char * const omap4_usb_host_hs_hsic480m_p1_clk_parents[] __initconst = {
+ "dpll_usb_m2_ck",
+ NULL,
+};
+
+static const char * const omap4_utmi_p1_gfclk_parents[] __initconst = {
+ "init_60m_fclk",
+ "xclk60mhsp1_ck",
+ NULL,
+};
+
+static const char * const omap4_utmi_p2_gfclk_parents[] __initconst = {
+ "init_60m_fclk",
+ "xclk60mhsp2_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_usb_host_hs_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_usb_host_hs_utmi_p1_clk_parents, NULL },
+ { 9, TI_CLK_GATE, omap4_usb_host_hs_utmi_p2_clk_parents, NULL },
+ { 10, TI_CLK_GATE, omap4_usb_host_hs_utmi_p3_clk_parents, NULL },
+ { 11, TI_CLK_GATE, omap4_usb_host_hs_utmi_p3_clk_parents, NULL },
+ { 12, TI_CLK_GATE, omap4_usb_host_hs_utmi_p3_clk_parents, NULL },
+ { 13, TI_CLK_GATE, omap4_usb_host_hs_hsic480m_p1_clk_parents, NULL },
+ { 14, TI_CLK_GATE, omap4_usb_host_hs_hsic480m_p1_clk_parents, NULL },
+ { 15, TI_CLK_GATE, omap4_dss_48mhz_clk_parents, NULL },
+ { 24, TI_CLK_MUX, omap4_utmi_p1_gfclk_parents, NULL },
+ { 25, TI_CLK_MUX, omap4_utmi_p2_gfclk_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap4_usb_otg_hs_xclk_parents[] __initconst = {
+ "otg_60m_gfclk",
+ NULL,
+};
+
+static const char * const omap4_otg_60m_gfclk_parents[] __initconst = {
+ "utmi_phy_clkout_ck",
+ "xclk60motg_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_usb_otg_hs_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_usb_otg_hs_xclk_parents, NULL },
+ { 24, TI_CLK_MUX, omap4_otg_60m_gfclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_usb_tll_hs_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_usb_host_hs_utmi_p3_clk_parents, NULL },
+ { 9, TI_CLK_GATE, omap4_usb_host_hs_utmi_p3_clk_parents, NULL },
+ { 10, TI_CLK_GATE, omap4_usb_host_hs_utmi_p3_clk_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap4_ocp2scp_usb_phy_phy_48m_parents[] __initconst = {
+ "func_48m_fclk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_ocp2scp_usb_phy_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_ocp2scp_usb_phy_phy_48m_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_l3_init_clkctrl_regs[] __initconst = {
+ { OMAP4_MMC1_CLKCTRL, omap4_mmc1_bit_data, CLKF_SW_SUP, "hsmmc1_fclk" },
+ { OMAP4_MMC2_CLKCTRL, omap4_mmc2_bit_data, CLKF_SW_SUP, "hsmmc2_fclk" },
+ { OMAP4_HSI_CLKCTRL, omap4_hsi_bit_data, CLKF_HW_SUP, "hsi_fck" },
+ { OMAP4_USB_HOST_HS_CLKCTRL, omap4_usb_host_hs_bit_data, CLKF_SW_SUP, "init_60m_fclk" },
+ { OMAP4_USB_OTG_HS_CLKCTRL, omap4_usb_otg_hs_bit_data, CLKF_HW_SUP, "l3_div_ck" },
+ { OMAP4_USB_TLL_HS_CLKCTRL, omap4_usb_tll_hs_bit_data, CLKF_HW_SUP, "l4_div_ck" },
+ { OMAP4_USB_HOST_FS_CLKCTRL, NULL, CLKF_SW_SUP, "func_48mc_fclk" },
+ { OMAP4_OCP2SCP_USB_PHY_CLKCTRL, omap4_ocp2scp_usb_phy_bit_data, CLKF_HW_SUP, "ocp2scp_usb_phy_phy_48m" },
+ { 0 },
+};
+
+static const char * const omap4_cm2_dm10_mux_parents[] __initconst = {
+ "sys_clkin_ck",
+ "sys_32k_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_timer10_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_cm2_dm10_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_timer11_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_cm2_dm10_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_timer2_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_cm2_dm10_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_timer3_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_cm2_dm10_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_timer4_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_cm2_dm10_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_timer9_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_cm2_dm10_mux_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap4_gpio2_dbclk_parents[] __initconst = {
+ "sys_32k_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_gpio2_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_gpio2_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_gpio3_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_gpio2_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_gpio4_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_gpio2_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_gpio5_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_gpio2_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_gpio6_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_gpio2_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap4_per_mcbsp4_gfclk_parents[] __initconst = {
+ "mcbsp4_sync_mux_ck",
+ "pad_clks_ck",
+ NULL,
+};
+
+static const char * const omap4_mcbsp4_sync_mux_ck_parents[] __initconst = {
+ "func_96m_fclk",
+ "per_abe_nc_fclk",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_mcbsp4_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_per_mcbsp4_gfclk_parents, NULL },
+ { 25, TI_CLK_MUX, omap4_mcbsp4_sync_mux_ck_parents, NULL },
+ { 0 },
+};
+
+static const char * const omap4_slimbus2_fclk_0_parents[] __initconst = {
+ "func_24mc_fclk",
+ NULL,
+};
+
+static const char * const omap4_slimbus2_fclk_1_parents[] __initconst = {
+ "per_abe_24m_fclk",
+ NULL,
+};
+
+static const char * const omap4_slimbus2_slimbus_clk_parents[] __initconst = {
+ "pad_slimbus_core_clks_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_bit_data omap4_slimbus2_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_slimbus2_fclk_0_parents, NULL },
+ { 9, TI_CLK_GATE, omap4_slimbus2_fclk_1_parents, NULL },
+ { 10, TI_CLK_GATE, omap4_slimbus2_slimbus_clk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_l4_per_clkctrl_regs[] __initconst = {
+ { OMAP4_TIMER10_CLKCTRL, omap4_timer10_bit_data, CLKF_SW_SUP, "cm2_dm10_mux" },
+ { OMAP4_TIMER11_CLKCTRL, omap4_timer11_bit_data, CLKF_SW_SUP, "cm2_dm11_mux" },
+ { OMAP4_TIMER2_CLKCTRL, omap4_timer2_bit_data, CLKF_SW_SUP, "cm2_dm2_mux" },
+ { OMAP4_TIMER3_CLKCTRL, omap4_timer3_bit_data, CLKF_SW_SUP, "cm2_dm3_mux" },
+ { OMAP4_TIMER4_CLKCTRL, omap4_timer4_bit_data, CLKF_SW_SUP, "cm2_dm4_mux" },
+ { OMAP4_TIMER9_CLKCTRL, omap4_timer9_bit_data, CLKF_SW_SUP, "cm2_dm9_mux" },
+ { OMAP4_ELM_CLKCTRL, NULL, 0, "l4_div_ck" },
+ { OMAP4_GPIO2_CLKCTRL, omap4_gpio2_bit_data, CLKF_HW_SUP, "l4_div_ck" },
+ { OMAP4_GPIO3_CLKCTRL, omap4_gpio3_bit_data, CLKF_HW_SUP, "l4_div_ck" },
+ { OMAP4_GPIO4_CLKCTRL, omap4_gpio4_bit_data, CLKF_HW_SUP, "l4_div_ck" },
+ { OMAP4_GPIO5_CLKCTRL, omap4_gpio5_bit_data, CLKF_HW_SUP, "l4_div_ck" },
+ { OMAP4_GPIO6_CLKCTRL, omap4_gpio6_bit_data, CLKF_HW_SUP, "l4_div_ck" },
+ { OMAP4_HDQ1W_CLKCTRL, NULL, CLKF_SW_SUP, "func_12m_fclk" },
+ { OMAP4_I2C1_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { OMAP4_I2C2_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { OMAP4_I2C3_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { OMAP4_I2C4_CLKCTRL, NULL, CLKF_SW_SUP, "func_96m_fclk" },
+ { OMAP4_L4_PER_CLKCTRL, NULL, 0, "l4_div_ck" },
+ { OMAP4_MCBSP4_CLKCTRL, omap4_mcbsp4_bit_data, CLKF_SW_SUP, "per_mcbsp4_gfclk" },
+ { OMAP4_MCSPI1_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP4_MCSPI2_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP4_MCSPI3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP4_MCSPI4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP4_MMC3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP4_MMC4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP4_SLIMBUS2_CLKCTRL, omap4_slimbus2_bit_data, CLKF_SW_SUP, "slimbus2_fclk_0" },
+ { OMAP4_UART1_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP4_UART2_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP4_UART3_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP4_UART4_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { OMAP4_MMC5_CLKCTRL, NULL, CLKF_SW_SUP, "func_48m_fclk" },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_gpio1_bit_data[] __initconst = {
+ { 8, TI_CLK_GATE, omap4_gpio2_dbclk_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_bit_data omap4_timer1_bit_data[] __initconst = {
+ { 24, TI_CLK_MUX, omap4_cm2_dm10_mux_parents, NULL },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_l4_wkup_clkctrl_regs[] __initconst = {
+ { OMAP4_L4_WKUP_CLKCTRL, NULL, 0, "l4_wkup_clk_mux_ck" },
+ { OMAP4_WD_TIMER2_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
+ { OMAP4_GPIO1_CLKCTRL, omap4_gpio1_bit_data, CLKF_HW_SUP, "l4_wkup_clk_mux_ck" },
+ { OMAP4_TIMER1_CLKCTRL, omap4_timer1_bit_data, CLKF_SW_SUP, "dmt1_clk_mux" },
+ { OMAP4_COUNTER_32K_CLKCTRL, NULL, 0, "sys_32k_ck" },
+ { OMAP4_KBD_CLKCTRL, NULL, CLKF_SW_SUP, "sys_32k_ck" },
+ { 0 },
+};
+
+static const char * const omap4_pmd_stm_clock_mux_ck_parents[] __initconst = {
+ "sys_clkin_ck",
+ "dpll_core_m6x2_ck",
+ "tie_low_clock_ck",
+ NULL,
+};
+
+static const char * const omap4_trace_clk_div_div_ck_parents[] __initconst = {
+ "pmd_trace_clk_mux_ck",
+ NULL,
+};
+
+static const int omap4_trace_clk_div_div_ck_divs[] __initconst = {
+ 0,
+ 1,
+ 2,
+ 0,
+ 4,
+ -1,
+};
+
+static const struct omap_clkctrl_div_data omap4_trace_clk_div_div_ck_data __initconst = {
+ .dividers = omap4_trace_clk_div_div_ck_divs,
+};
+
+static const char * const omap4_stm_clk_div_ck_parents[] __initconst = {
+ "pmd_stm_clock_mux_ck",
+ NULL,
+};
+
+static const struct omap_clkctrl_div_data omap4_stm_clk_div_ck_data __initconst = {
+ .max_div = 64,
+};
+
+static const struct omap_clkctrl_bit_data omap4_debugss_bit_data[] __initconst = {
+ { 20, TI_CLK_MUX, omap4_pmd_stm_clock_mux_ck_parents, NULL },
+ { 22, TI_CLK_MUX, omap4_pmd_stm_clock_mux_ck_parents, NULL },
+ { 24, TI_CLK_DIVIDER, omap4_trace_clk_div_div_ck_parents, &omap4_trace_clk_div_div_ck_data },
+ { 27, TI_CLK_DIVIDER, omap4_stm_clk_div_ck_parents, &omap4_stm_clk_div_ck_data },
+ { 0 },
+};
+
+static const struct omap_clkctrl_reg_data omap4_emu_sys_clkctrl_regs[] __initconst = {
+ { OMAP4_DEBUGSS_CLKCTRL, omap4_debugss_bit_data, 0, "trace_clk_div_ck" },
+ { 0 },
+};
+
+const struct omap_clkctrl_data omap4_clkctrl_data[] __initconst = {
+ { 0x4a004320, omap4_mpuss_clkctrl_regs },
+ { 0x4a004420, omap4_tesla_clkctrl_regs },
+ { 0x4a004520, omap4_abe_clkctrl_regs },
+ { 0x4a008620, omap4_l4_ao_clkctrl_regs },
+ { 0x4a008720, omap4_l3_1_clkctrl_regs },
+ { 0x4a008820, omap4_l3_2_clkctrl_regs },
+ { 0x4a008920, omap4_ducati_clkctrl_regs },
+ { 0x4a008a20, omap4_l3_dma_clkctrl_regs },
+ { 0x4a008b20, omap4_l3_emif_clkctrl_regs },
+ { 0x4a008c20, omap4_d2d_clkctrl_regs },
+ { 0x4a008d20, omap4_l4_cfg_clkctrl_regs },
+ { 0x4a008e20, omap4_l3_instr_clkctrl_regs },
+ { 0x4a008f20, omap4_ivahd_clkctrl_regs },
+ { 0x4a009020, omap4_iss_clkctrl_regs },
+ { 0x4a009120, omap4_l3_dss_clkctrl_regs },
+ { 0x4a009220, omap4_l3_gfx_clkctrl_regs },
+ { 0x4a009320, omap4_l3_init_clkctrl_regs },
+ { 0x4a009420, omap4_l4_per_clkctrl_regs },
+ { 0x4a307820, omap4_l4_wkup_clkctrl_regs },
+ { 0x4a307a20, omap4_emu_sys_clkctrl_regs },
+ { 0 },
+};
+
static struct ti_dt_clk omap44xx_clks[] = {
DT_CLK("smp_twd", NULL, "mpu_periphclk"),
DT_CLK("omapdss_dss", "ick", "dss_fck"),
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
new file mode 100644
index 000000000000..53e71d0503ec
--- /dev/null
+++ b/drivers/clk/ti/clkctrl.c
@@ -0,0 +1,497 @@
+/*
+ * OMAP clkctrl clock support
+ *
+ * Copyright (C) 2017 Texas Instruments, Inc.
+ *
+ * Tero Kristo <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+#include <linux/delay.h>
+#include "clock.h"
+
+#define NO_IDLEST 0x1
+
+#define OMAP4_MODULEMODE_MASK 0x3
+
+#define MODULEMODE_HWCTRL 0x1
+#define MODULEMODE_SWCTRL 0x2
+
+#define OMAP4_IDLEST_MASK (0x3 << 16)
+#define OMAP4_IDLEST_SHIFT 16
+
+#define CLKCTRL_IDLEST_FUNCTIONAL 0x0
+#define CLKCTRL_IDLEST_INTERFACE_IDLE 0x2
+#define CLKCTRL_IDLEST_DISABLED 0x3
+
+/* These timeouts are in us */
+#define OMAP4_MAX_MODULE_READY_TIME 2000
+#define OMAP4_MAX_MODULE_DISABLE_TIME 5000
+
+static bool _early_timeout = true;
+
+struct omap_clkctrl_provider {
+ void __iomem *base;
+ struct list_head clocks;
+};
+
+struct omap_clkctrl_clk {
+ struct clk_hw *clk;
+ u16 reg_offset;
+ int bit_offset;
+ struct list_head node;
+};
+
+union omap4_timeout {
+ u32 cycles;
+ ktime_t start;
+};
+
+static const struct omap_clkctrl_data default_clkctrl_data[] __initconst = {
+ { 0 },
+};
+
+static u32 _omap4_idlest(u32 val)
+{
+ val &= OMAP4_IDLEST_MASK;
+ val >>= OMAP4_IDLEST_SHIFT;
+
+ return val;
+}
+
+static bool _omap4_is_idle(u32 val)
+{
+ val = _omap4_idlest(val);
+
+ return val == CLKCTRL_IDLEST_DISABLED;
+}
+
+static bool _omap4_is_ready(u32 val)
+{
+ val = _omap4_idlest(val);
+
+ return val == CLKCTRL_IDLEST_FUNCTIONAL ||
+ val == CLKCTRL_IDLEST_INTERFACE_IDLE;
+}
+
+static bool _omap4_is_timeout(union omap4_timeout *time, u32 timeout)
+{
+ if (unlikely(_early_timeout)) {
+ if (time->cycles++ < timeout) {
+ udelay(1);
+ return false;
+ }
+ } else {
+ if (!ktime_to_ns(time->start)) {
+ time->start = ktime_get();
+ return false;
+ }
+
+ if (ktime_us_delta(ktime_get(), time->start) < timeout) {
+ cpu_relax();
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static int __init _omap4_disable_early_timeout(void)
+{
+ _early_timeout = false;
+
+ return 0;
+}
+arch_initcall(_omap4_disable_early_timeout);
+
+static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ u32 val;
+ int ret;
+ union omap4_timeout timeout = { 0 };
+
+ if (!clk->enable_bit)
+ return 0;
+
+ if (clk->clkdm) {
+ ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
+ if (ret) {
+ WARN(1,
+ "%s: could not enable %s's clockdomain %s: %d\n",
+ __func__, clk_hw_get_name(hw),
+ clk->clkdm_name, ret);
+ return ret;
+ }
+ }
+
+ val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
+
+ val &= ~OMAP4_MODULEMODE_MASK;
+ val |= clk->enable_bit;
+
+ ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
+
+ if (clk->flags & NO_IDLEST)
+ return 0;
+
+ /* Wait until module is enabled */
+ while (!_omap4_is_ready(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) {
+ if (_omap4_is_timeout(&timeout, OMAP4_MAX_MODULE_READY_TIME)) {
+ pr_err("%s: failed to enable\n", clk_hw_get_name(hw));
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+static void _omap4_clkctrl_clk_disable(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ u32 val;
+ union omap4_timeout timeout = { 0 };
+
+ if (!clk->enable_bit)
+ return;
+
+ val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
+
+ val &= ~OMAP4_MODULEMODE_MASK;
+
+ ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
+
+ if (clk->flags & NO_IDLEST)
+ goto exit;
+
+ /* Wait until module is disabled */
+ while (!_omap4_is_idle(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) {
+ if (_omap4_is_timeout(&timeout,
+ OMAP4_MAX_MODULE_DISABLE_TIME)) {
+ pr_err("%s: failed to disable\n", clk_hw_get_name(hw));
+ break;
+ }
+ }
+
+exit:
+ if (clk->clkdm)
+ ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
+}
+
+static int _omap4_clkctrl_clk_is_enabled(struct clk_hw *hw)
+{
+ struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+ u32 val;
+
+ val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
+
+ if (val & clk->enable_bit)
+ return 1;
+
+ return 0;
+}
+
+static const struct clk_ops omap4_clkctrl_clk_ops = {
+ .enable = _omap4_clkctrl_clk_enable,
+ .disable = _omap4_clkctrl_clk_disable,
+ .is_enabled = _omap4_clkctrl_clk_is_enabled,
+};
+
+static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct omap_clkctrl_provider *provider = data;
+ struct omap_clkctrl_clk *entry;
+
+ if (clkspec->args_count != 2)
+ return ERR_PTR(-EINVAL);
+
+ pr_debug("%s: looking for %x:%x\n", __func__,
+ clkspec->args[0], clkspec->args[1]);
+
+ list_for_each_entry(entry, &provider->clocks, node) {
+ if (entry->reg_offset == clkspec->args[0] &&
+ entry->bit_offset == clkspec->args[1])
+ break;
+ }
+
+ if (!entry)
+ return ERR_PTR(-EINVAL);
+
+ return entry->clk;
+}
+
+static int __init
+_ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider,
+ struct device_node *node, struct clk_hw *clk_hw,
+ u16 offset, u8 bit, const char * const *parents,
+ int num_parents, const struct clk_ops *ops)
+{
+ struct clk_init_data init = { NULL };
+ struct clk *clk;
+ struct omap_clkctrl_clk *clkctrl_clk;
+ int ret = 0;
+
+ init.name = kasprintf(GFP_KERNEL, "%s:%s:%04x:%d", node->parent->name,
+ node->name, offset, bit);
+ clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
+ if (!init.name || !clkctrl_clk) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ clk_hw->init = &init;
+ init.parent_names = parents;
+ init.num_parents = num_parents;
+ init.ops = ops;
+ init.flags = CLK_IS_BASIC;
+
+ clk = ti_clk_register(NULL, clk_hw, init.name);
+ if (IS_ERR_OR_NULL(clk)) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ clkctrl_clk->reg_offset = offset;
+ clkctrl_clk->bit_offset = bit;
+ clkctrl_clk->clk = clk_hw;
+
+ list_add(&clkctrl_clk->node, &provider->clocks);
+
+ return 0;
+
+cleanup:
+ kfree(init.name);
+ kfree(clkctrl_clk);
+ return ret;
+}
+
+static void __init
+_ti_clkctrl_setup_gate(struct omap_clkctrl_provider *provider,
+ struct device_node *node, u16 offset,
+ const struct omap_clkctrl_bit_data *data,
+ void __iomem *reg)
+{
+ struct clk_hw_omap *clk_hw;
+
+ clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+ if (!clk_hw)
+ return;
+
+ clk_hw->enable_bit = data->bit;
+ clk_hw->enable_reg.ptr = reg;
+
+ if (_ti_clkctrl_clk_register(provider, node, &clk_hw->hw, offset,
+ data->bit, data->parents, 1,
+ &omap_gate_clk_ops))
+ kfree(clk_hw);
+}
+
+static void __init
+_ti_clkctrl_setup_mux(struct omap_clkctrl_provider *provider,
+ struct device_node *node, u16 offset,
+ const struct omap_clkctrl_bit_data *data,
+ void __iomem *reg)
+{
+ struct clk_omap_mux *mux;
+ int num_parents = 0;
+ const char * const *pname;
+
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return;
+
+ pname = data->parents;
+ while (*pname) {
+ num_parents++;
+ pname++;
+ }
+
+ mux->mask = num_parents;
+ mux->mask = (1 << fls(mux->mask)) - 1;
+
+ mux->shift = data->bit;
+ mux->reg.ptr = reg;
+
+ if (_ti_clkctrl_clk_register(provider, node, &mux->hw, offset,
+ data->bit, data->parents, num_parents,
+ &ti_clk_mux_ops))
+ kfree(mux);
+}
+
+static void __init
+_ti_clkctrl_setup_div(struct omap_clkctrl_provider *provider,
+ struct device_node *node, u16 offset,
+ const struct omap_clkctrl_bit_data *data,
+ void __iomem *reg)
+{
+ struct clk_omap_divider *div;
+ const struct omap_clkctrl_div_data *div_data = data->data;
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return;
+
+ div->reg.ptr = reg;
+ div->shift = data->bit;
+
+ if (ti_clk_parse_divider_data((int *)div_data->dividers,
+ div_data->max_div, 0, 0,
+ &div->width, &div->table)) {
+ pr_err("%s: Data parsing for %s:%04x:%d failed\n", __func__,
+ node->name, offset, data->bit);
+ kfree(div);
+ return;
+ }
+
+ if (_ti_clkctrl_clk_register(provider, node, &div->hw, offset,
+ data->bit, data->parents, 1,
+ &ti_clk_divider_ops))
+ kfree(div);
+}
+
+static void __init
+_ti_clkctrl_setup_subclks(struct omap_clkctrl_provider *provider,
+ struct device_node *node,
+ const struct omap_clkctrl_reg_data *data,
+ void __iomem *reg)
+{
+ const struct omap_clkctrl_bit_data *bits = data->bit_data;
+
+ if (!bits)
+ return;
+
+ while (bits->bit) {
+ switch (bits->type) {
+ case TI_CLK_GATE:
+ _ti_clkctrl_setup_gate(provider, node, data->offset,
+ bits, reg);
+ break;
+
+ case TI_CLK_DIVIDER:
+ _ti_clkctrl_setup_div(provider, node, data->offset,
+ bits, reg);
+ break;
+
+ case TI_CLK_MUX:
+ _ti_clkctrl_setup_mux(provider, node, data->offset,
+ bits, reg);
+ break;
+
+ default:
+ pr_err("%s: bad subclk type: %d\n", __func__,
+ bits->type);
+ return;
+ }
+ bits++;
+ }
+}
+
+static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
+{
+ struct omap_clkctrl_provider *provider;
+ const struct omap_clkctrl_data *data = default_clkctrl_data;
+ const struct omap_clkctrl_reg_data *reg_data;
+ struct clk_init_data init = { NULL };
+ struct clk_hw_omap *hw;
+ struct clk *clk;
+ struct omap_clkctrl_clk *clkctrl_clk;
+ const __be32 *addrp;
+ u32 addr;
+
+ addrp = of_get_address(node, 0, NULL, NULL);
+ addr = (u32)of_translate_address(node, addrp);
+
+#ifdef CONFIG_ARCH_OMAP4
+ if (of_machine_is_compatible("ti,omap4"))
+ data = omap4_clkctrl_data;
+#endif
+
+ while (data->addr) {
+ if (addr == data->addr)
+ break;
+
+ data++;
+ }
+
+ if (!data->addr) {
+ pr_err("%s not found from clkctrl data.\n", node->name);
+ return;
+ }
+
+ provider = kzalloc(sizeof(*provider), GFP_KERNEL);
+ if (!provider)
+ return;
+
+ provider->base = of_iomap(node, 0);
+
+ INIT_LIST_HEAD(&provider->clocks);
+
+ /* Generate clocks */
+ reg_data = data->regs;
+
+ while (reg_data->parent) {
+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return;
+
+ hw->enable_reg.ptr = provider->base + reg_data->offset;
+
+ _ti_clkctrl_setup_subclks(provider, node, reg_data,
+ hw->enable_reg.ptr);
+
+ if (reg_data->flags & CLKF_SW_SUP)
+ hw->enable_bit = MODULEMODE_SWCTRL;
+ if (reg_data->flags & CLKF_HW_SUP)
+ hw->enable_bit = MODULEMODE_HWCTRL;
+ if (reg_data->flags & CLKF_NO_IDLEST)
+ hw->flags |= NO_IDLEST;
+
+ init.parent_names = &reg_data->parent;
+ init.num_parents = 1;
+ init.flags = 0;
+ init.name = kasprintf(GFP_KERNEL, "%s:%s:%04x:%d",
+ node->parent->name, node->name,
+ reg_data->offset, 0);
+ clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
+ if (!init.name || !clkctrl_clk)
+ goto cleanup;
+
+ init.ops = &omap4_clkctrl_clk_ops;
+ hw->hw.init = &init;
+
+ clk = ti_clk_register(NULL, &hw->hw, init.name);
+ if (IS_ERR_OR_NULL(clk))
+ goto cleanup;
+
+ clkctrl_clk->reg_offset = reg_data->offset;
+ clkctrl_clk->clk = &hw->hw;
+
+ list_add(&clkctrl_clk->node, &provider->clocks);
+
+ reg_data++;
+ }
+
+ of_clk_add_hw_provider(node, _ti_omap4_clkctrl_xlate, provider);
+ return;
+
+cleanup:
+ kfree(hw);
+ kfree(init.name);
+ kfree(clkctrl_clk);
+}
+CLK_OF_DECLARE(ti_omap4_clkctrl_clock, "ti,clkctrl",
+ _ti_omap4_clkctrl_setup);
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
index 3f7b26540be8..561dbe99ced7 100644
--- a/drivers/clk/ti/clock.h
+++ b/drivers/clk/ti/clock.h
@@ -203,6 +203,37 @@ struct ti_dt_clk {
.node_name = name, \
}
+/* CLKCTRL type definitions */
+struct omap_clkctrl_div_data {
+ const int *dividers;
+ int max_div;
+};
+
+struct omap_clkctrl_bit_data {
+ u8 bit;
+ u8 type;
+ const char * const *parents;
+ const void *data;
+};
+
+struct omap_clkctrl_reg_data {
+ u16 offset;
+ const struct omap_clkctrl_bit_data *bit_data;
+ u16 flags;
+ const char *parent;
+};
+
+struct omap_clkctrl_data {
+ u32 addr;
+ const struct omap_clkctrl_reg_data *regs;
+};
+
+extern const struct omap_clkctrl_data omap4_clkctrl_data[];
+
+#define CLKF_SW_SUP BIT(0)
+#define CLKF_HW_SUP BIT(1)
+#define CLKF_NO_IDLEST BIT(2)
+
typedef void (*ti_of_clk_init_cb_t)(struct clk_hw *, struct device_node *);
struct clk *ti_clk_register_gate(struct ti_clk *setup);
diff --git a/drivers/clk/uniphier/clk-uniphier-sys.c b/drivers/clk/uniphier/clk-uniphier-sys.c
index c8027d909429..ad0218182a9f 100644
--- a/drivers/clk/uniphier/clk-uniphier-sys.c
+++ b/drivers/clk/uniphier/clk-uniphier-sys.c
@@ -29,11 +29,18 @@
UNIPHIER_CLK_FACTOR("sd-200m", -1, "spll", 1, 10), \
UNIPHIER_CLK_FACTOR("sd-133m", -1, "spll", 1, 15)
+/* Denali driver requires clk_x rate (clk: 50MHz, clk_x & ecc_clk: 200MHz) */
#define UNIPHIER_SLD3_SYS_CLK_NAND(idx) \
- UNIPHIER_CLK_GATE("nand", (idx), NULL, 0x2104, 2)
+ UNIPHIER_CLK_FACTOR("nand-200m", -1, "spll", 1, 8), \
+ UNIPHIER_CLK_GATE("nand", (idx), "nand-200m", 0x2104, 2)
+
+#define UNIPHIER_PRO5_SYS_CLK_NAND(idx) \
+ UNIPHIER_CLK_FACTOR("nand-200m", -1, "spll", 1, 12), \
+ UNIPHIER_CLK_GATE("nand", (idx), "nand-200m", 0x2104, 2)
#define UNIPHIER_LD11_SYS_CLK_NAND(idx) \
- UNIPHIER_CLK_GATE("nand", (idx), NULL, 0x210c, 0)
+ UNIPHIER_CLK_FACTOR("nand-200m", -1, "spll", 1, 10), \
+ UNIPHIER_CLK_GATE("nand", (idx), "nand-200m", 0x210c, 0)
#define UNIPHIER_LD11_SYS_CLK_EMMC(idx) \
UNIPHIER_CLK_GATE("emmc", (idx), NULL, 0x210c, 2)
@@ -114,7 +121,7 @@ const struct uniphier_clk_data uniphier_pro5_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("dapll2", -1, "ref", 144, 125), /* 2949.12 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "dapll2", 1, 40),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48),
- UNIPHIER_SLD3_SYS_CLK_NAND(2),
+ UNIPHIER_PRO5_SYS_CLK_NAND(2),
UNIPHIER_PRO5_SYS_CLK_SD,
UNIPHIER_SLD3_SYS_CLK_STDMAC(8), /* HSC */
UNIPHIER_PRO4_SYS_CLK_GIO(12), /* PCIe, USB3 */
@@ -127,7 +134,7 @@ const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[] = {
UNIPHIER_CLK_FACTOR("spll", -1, "ref", 96, 1), /* 2400 MHz */
UNIPHIER_CLK_FACTOR("uart", 0, "spll", 1, 27),
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48),
- UNIPHIER_SLD3_SYS_CLK_NAND(2),
+ UNIPHIER_PRO5_SYS_CLK_NAND(2),
UNIPHIER_PRO5_SYS_CLK_SD,
UNIPHIER_SLD3_SYS_CLK_STDMAC(8), /* HSC, RLE */
/* GIO is always clock-enabled: no function for 0x2104 bit6 */
diff --git a/drivers/clk/versatile/Makefile b/drivers/clk/versatile/Makefile
index 794130402c8d..58b54b814a6d 100644
--- a/drivers/clk/versatile/Makefile
+++ b/drivers/clk/versatile/Makefile
@@ -1,6 +1,5 @@
# Makefile for Versatile-specific clocks
obj-$(CONFIG_ICST) += icst.o clk-icst.o clk-versatile.o
obj-$(CONFIG_INTEGRATOR_IMPD1) += clk-impd1.o
-obj-$(CONFIG_ARCH_REALVIEW) += clk-realview.o
obj-$(CONFIG_CLK_SP810) += clk-sp810.o
obj-$(CONFIG_CLK_VEXPRESS_OSC) += clk-vexpress-osc.o
diff --git a/drivers/clk/versatile/clk-realview.c b/drivers/clk/versatile/clk-realview.c
deleted file mode 100644
index 6fdfee3232f4..000000000000
--- a/drivers/clk/versatile/clk-realview.c
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Clock driver for the ARM RealView boards
- * Copyright (C) 2012 Linus Walleij
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/clkdev.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/clk-provider.h>
-
-#include "icst.h"
-#include "clk-icst.h"
-
-#define REALVIEW_SYS_OSC0_OFFSET 0x0C
-#define REALVIEW_SYS_OSC1_OFFSET 0x10
-#define REALVIEW_SYS_OSC2_OFFSET 0x14
-#define REALVIEW_SYS_OSC3_OFFSET 0x18
-#define REALVIEW_SYS_OSC4_OFFSET 0x1C /* OSC1 for RealView/AB */
-#define REALVIEW_SYS_LOCK_OFFSET 0x20
-
-/*
- * Implementation of the ARM RealView clock trees.
- */
-
-static const struct icst_params realview_oscvco_params = {
- .ref = 24000000,
- .vco_max = ICST307_VCO_MAX,
- .vco_min = ICST307_VCO_MIN,
- .vd_min = 4 + 8,
- .vd_max = 511 + 8,
- .rd_min = 1 + 2,
- .rd_max = 127 + 2,
- .s2div = icst307_s2div,
- .idx2s = icst307_idx2s,
-};
-
-static const struct clk_icst_desc realview_osc0_desc __initconst = {
- .params = &realview_oscvco_params,
- .vco_offset = REALVIEW_SYS_OSC0_OFFSET,
- .lock_offset = REALVIEW_SYS_LOCK_OFFSET,
-};
-
-static const struct clk_icst_desc realview_osc4_desc __initconst = {
- .params = &realview_oscvco_params,
- .vco_offset = REALVIEW_SYS_OSC4_OFFSET,
- .lock_offset = REALVIEW_SYS_LOCK_OFFSET,
-};
-
-/*
- * realview_clk_init() - set up the RealView clock tree
- */
-void __init realview_clk_init(void __iomem *sysbase, bool is_pb1176)
-{
- struct clk *clk;
-
- /* APB clock dummy */
- clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, 0, 0);
- clk_register_clkdev(clk, "apb_pclk", NULL);
-
- /* 24 MHz clock */
- clk = clk_register_fixed_rate(NULL, "clk24mhz", NULL, 0, 24000000);
- clk_register_clkdev(clk, NULL, "dev:uart0");
- clk_register_clkdev(clk, NULL, "dev:uart1");
- clk_register_clkdev(clk, NULL, "dev:uart2");
- clk_register_clkdev(clk, NULL, "fpga:kmi0");
- clk_register_clkdev(clk, NULL, "fpga:kmi1");
- clk_register_clkdev(clk, NULL, "fpga:mmc0");
- clk_register_clkdev(clk, NULL, "dev:ssp0");
- if (is_pb1176) {
- /*
- * UART3 is on the dev chip in PB1176
- * UART4 only exists in PB1176
- */
- clk_register_clkdev(clk, NULL, "dev:uart3");
- clk_register_clkdev(clk, NULL, "dev:uart4");
- } else
- clk_register_clkdev(clk, NULL, "fpga:uart3");
-
-
- /* 1 MHz clock */
- clk = clk_register_fixed_rate(NULL, "clk1mhz", NULL, 0, 1000000);
- clk_register_clkdev(clk, NULL, "sp804");
-
- /* ICST VCO clock */
- if (is_pb1176)
- clk = icst_clk_register(NULL, &realview_osc0_desc,
- "osc0", NULL, sysbase);
- else
- clk = icst_clk_register(NULL, &realview_osc4_desc,
- "osc4", NULL, sysbase);
-
- clk_register_clkdev(clk, NULL, "dev:clcd");
- clk_register_clkdev(clk, NULL, "issp:clcd");
-}
diff --git a/drivers/clk/zte/clk-zx296718.c b/drivers/clk/zte/clk-zx296718.c
index a10962988ba8..27f853d4c76b 100644
--- a/drivers/clk/zte/clk-zx296718.c
+++ b/drivers/clk/zte/clk-zx296718.c
@@ -932,10 +932,10 @@ PNAME(audio_timer_p) = {
};
static struct zx_clk_mux audio_mux_clk[] = {
- MUX(0, "i2s0_wclk_mux", audio_wclk_common_p, AUDIO_I2S0_CLK, 0, 1),
- MUX(0, "i2s1_wclk_mux", audio_wclk_common_p, AUDIO_I2S1_CLK, 0, 1),
- MUX(0, "i2s2_wclk_mux", audio_wclk_common_p, AUDIO_I2S2_CLK, 0, 1),
- MUX(0, "i2s3_wclk_mux", audio_wclk_common_p, AUDIO_I2S3_CLK, 0, 1),
+ MUX(I2S0_WCLK_MUX, "i2s0_wclk_mux", audio_wclk_common_p, AUDIO_I2S0_CLK, 0, 1),
+ MUX(I2S1_WCLK_MUX, "i2s1_wclk_mux", audio_wclk_common_p, AUDIO_I2S1_CLK, 0, 1),
+ MUX(I2S2_WCLK_MUX, "i2s2_wclk_mux", audio_wclk_common_p, AUDIO_I2S2_CLK, 0, 1),
+ MUX(I2S3_WCLK_MUX, "i2s3_wclk_mux", audio_wclk_common_p, AUDIO_I2S3_CLK, 0, 1),
MUX(0, "i2c0_wclk_mux", audio_wclk_common_p, AUDIO_I2C0_CLK, 0, 1),
MUX(0, "spdif0_wclk_mux", audio_wclk_common_p, AUDIO_SPDIF0_CLK, 0, 1),
MUX(0, "spdif1_wclk_mux", audio_wclk_common_p, AUDIO_SPDIF1_CLK, 0, 1),
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index 12409a519cc5..37b0698b7193 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -32,18 +32,18 @@ static struct cpuidle_driver powernv_idle_driver = {
.owner = THIS_MODULE,
};
-static int max_idle_state;
-static struct cpuidle_state *cpuidle_state_table;
+static int max_idle_state __read_mostly;
+static struct cpuidle_state *cpuidle_state_table __read_mostly;
struct stop_psscr_table {
u64 val;
u64 mask;
};
-static struct stop_psscr_table stop_psscr_table[CPUIDLE_STATE_MAX];
+static struct stop_psscr_table stop_psscr_table[CPUIDLE_STATE_MAX] __read_mostly;
-static u64 snooze_timeout;
-static bool snooze_timeout_en;
+static u64 snooze_timeout __read_mostly;
+static bool snooze_timeout_en __read_mostly;
static int snooze_loop(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
@@ -51,21 +51,30 @@ static int snooze_loop(struct cpuidle_device *dev,
{
u64 snooze_exit_time;
- local_irq_enable();
set_thread_flag(TIF_POLLING_NRFLAG);
+ local_irq_enable();
+
snooze_exit_time = get_tb() + snooze_timeout;
ppc64_runlatch_off();
HMT_very_low();
while (!need_resched()) {
- if (likely(snooze_timeout_en) && get_tb() > snooze_exit_time)
+ if (likely(snooze_timeout_en) && get_tb() > snooze_exit_time) {
+ /*
+ * Task has not woken up but we are exiting the polling
+ * loop anyway. Require a barrier after polling is
+ * cleared to order subsequent test of need_resched().
+ */
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+ smp_mb();
break;
+ }
}
HMT_medium();
ppc64_runlatch_on();
clear_thread_flag(TIF_POLLING_NRFLAG);
- smp_mb();
+
return index;
}
@@ -73,9 +82,8 @@ static int nap_loop(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
- ppc64_runlatch_off();
- power7_idle();
- ppc64_runlatch_on();
+ power7_idle_type(PNV_THREAD_NAP);
+
return index;
}
@@ -98,7 +106,8 @@ static int fastsleep_loop(struct cpuidle_device *dev,
new_lpcr &= ~LPCR_PECE1;
mtspr(SPRN_LPCR, new_lpcr);
- power7_sleep();
+
+ power7_idle_type(PNV_THREAD_SLEEP);
mtspr(SPRN_LPCR, old_lpcr);
@@ -110,10 +119,8 @@ static int stop_loop(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
- ppc64_runlatch_off();
- power9_idle_stop(stop_psscr_table[index].val,
+ power9_idle_type(stop_psscr_table[index].val,
stop_psscr_table[index].mask);
- ppc64_runlatch_on();
return index;
}
@@ -354,6 +361,7 @@ static int powernv_add_idle_states(void)
for (i = 0; i < dt_idle_states; i++) {
unsigned int exit_latency, target_residency;
+ bool stops_timebase = false;
/*
* If an idle state has exit latency beyond
* POWERNV_THRESHOLD_LATENCY_NS then don't use it
@@ -381,6 +389,9 @@ static int powernv_add_idle_states(void)
}
}
+ if (flags[i] & OPAL_PM_TIMEBASE_STOP)
+ stops_timebase = true;
+
/*
* For nap and fastsleep, use default target_residency
* values if f/w does not expose it.
@@ -392,8 +403,7 @@ static int powernv_add_idle_states(void)
add_powernv_state(nr_idle_states, "Nap",
CPUIDLE_FLAG_NONE, nap_loop,
target_residency, exit_latency, 0, 0);
- } else if ((flags[i] & OPAL_PM_STOP_INST_FAST) &&
- !(flags[i] & OPAL_PM_TIMEBASE_STOP)) {
+ } else if (has_stop_states && !stops_timebase) {
add_powernv_state(nr_idle_states, names[i],
CPUIDLE_FLAG_NONE, stop_loop,
target_residency, exit_latency,
@@ -405,8 +415,8 @@ static int powernv_add_idle_states(void)
* within this config dependency check.
*/
#ifdef CONFIG_TICK_ONESHOT
- if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
- flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) {
+ else if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
+ flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) {
if (!rc)
target_residency = 300000;
/* Add FASTSLEEP state */
@@ -414,14 +424,15 @@ static int powernv_add_idle_states(void)
CPUIDLE_FLAG_TIMER_STOP,
fastsleep_loop,
target_residency, exit_latency, 0, 0);
- } else if ((flags[i] & OPAL_PM_STOP_INST_DEEP) &&
- (flags[i] & OPAL_PM_TIMEBASE_STOP)) {
+ } else if (has_stop_states && stops_timebase) {
add_powernv_state(nr_idle_states, names[i],
CPUIDLE_FLAG_TIMER_STOP, stop_loop,
target_residency, exit_latency,
psscr_val[i], psscr_mask[i]);
}
#endif
+ else
+ continue;
nr_idle_states++;
}
out:
diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
index 166ccd711ec9..e9b3853d93ea 100644
--- a/drivers/cpuidle/cpuidle-pseries.c
+++ b/drivers/cpuidle/cpuidle-pseries.c
@@ -25,10 +25,10 @@ struct cpuidle_driver pseries_idle_driver = {
.owner = THIS_MODULE,
};
-static int max_idle_state;
-static struct cpuidle_state *cpuidle_state_table;
-static u64 snooze_timeout;
-static bool snooze_timeout_en;
+static int max_idle_state __read_mostly;
+static struct cpuidle_state *cpuidle_state_table __read_mostly;
+static u64 snooze_timeout __read_mostly;
+static bool snooze_timeout_en __read_mostly;
static inline void idle_loop_prolog(unsigned long *in_purr)
{
@@ -62,21 +62,29 @@ static int snooze_loop(struct cpuidle_device *dev,
unsigned long in_purr;
u64 snooze_exit_time;
+ set_thread_flag(TIF_POLLING_NRFLAG);
+
idle_loop_prolog(&in_purr);
local_irq_enable();
- set_thread_flag(TIF_POLLING_NRFLAG);
snooze_exit_time = get_tb() + snooze_timeout;
while (!need_resched()) {
HMT_low();
HMT_very_low();
- if (snooze_timeout_en && get_tb() > snooze_exit_time)
+ if (likely(snooze_timeout_en) && get_tb() > snooze_exit_time) {
+ /*
+ * Task has not woken up but we are exiting the polling
+ * loop anyway. Require a barrier after polling is
+ * cleared to order subsequent test of need_resched().
+ */
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+ smp_mb();
break;
+ }
}
HMT_medium();
clear_thread_flag(TIF_POLLING_NRFLAG);
- smp_mb();
idle_loop_epilog(in_purr);
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 5b5efcc52cb5..baffae817259 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -686,7 +686,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, blp)))
- goto err;
+ goto err_in;
for_each_sg(sgl, sg, n, i) {
int y = sg_nctr;
@@ -699,7 +699,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
DMA_BIDIRECTIONAL);
bufl->bufers[y].len = sg->length;
if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
- goto err;
+ goto err_in;
sg_nctr++;
}
bufl->num_bufs = sg_nctr;
@@ -717,10 +717,10 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
buflout = kzalloc_node(sz_out, GFP_ATOMIC,
dev_to_node(&GET_DEV(inst->accel_dev)));
if (unlikely(!buflout))
- goto err;
+ goto err_in;
bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, bloutp)))
- goto err;
+ goto err_out;
bufers = buflout->bufers;
for_each_sg(sglout, sg, n, i) {
int y = sg_nctr;
@@ -732,7 +732,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
sg->length,
DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
- goto err;
+ goto err_out;
bufers[y].len = sg->length;
sg_nctr++;
}
@@ -747,9 +747,20 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
qat_req->buf.sz_out = 0;
}
return 0;
-err:
- dev_err(dev, "Failed to map buf for dma\n");
- sg_nctr = 0;
+
+err_out:
+ n = sg_nents(sglout);
+ for (i = 0; i < n; i++)
+ if (!dma_mapping_error(dev, buflout->bufers[i].addr))
+ dma_unmap_single(dev, buflout->bufers[i].addr,
+ buflout->bufers[i].len,
+ DMA_BIDIRECTIONAL);
+ if (!dma_mapping_error(dev, bloutp))
+ dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
+ kfree(buflout);
+
+err_in:
+ n = sg_nents(sgl);
for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, bufl->bufers[i].addr))
dma_unmap_single(dev, bufl->bufers[i].addr,
@@ -759,17 +770,8 @@ err:
if (!dma_mapping_error(dev, blp))
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
kfree(bufl);
- if (sgl != sglout && buflout) {
- n = sg_nents(sglout);
- for (i = 0; i < n; i++)
- if (!dma_mapping_error(dev, buflout->bufers[i].addr))
- dma_unmap_single(dev, buflout->bufers[i].addr,
- buflout->bufers[i].len,
- DMA_BIDIRECTIONAL);
- if (!dma_mapping_error(dev, bloutp))
- dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
- kfree(buflout);
- }
+
+ dev_err(dev, "Failed to map buf for dma\n");
return -ENOMEM;
}
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 006e657dfcb9..12943d19bfc4 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -499,6 +499,7 @@ static int dax_open(struct inode *inode, struct file *filp)
inode->i_mapping = __dax_inode->i_mapping;
inode->i_mapping->host = __dax_inode;
filp->f_mapping = inode->i_mapping;
+ filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
filp->private_data = dev_dax;
inode->i_flags = S_DAX;
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 922d0823f8ec..ce9e563e6e1d 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -18,6 +18,7 @@
#include <linux/cdev.h>
#include <linux/hash.h>
#include <linux/slab.h>
+#include <linux/uio.h>
#include <linux/dax.h>
#include <linux/fs.h>
@@ -115,13 +116,20 @@ int __bdev_dax_supported(struct super_block *sb, int blocksize)
EXPORT_SYMBOL_GPL(__bdev_dax_supported);
#endif
+enum dax_device_flags {
+ /* !alive + rcu grace period == no new operations / mappings */
+ DAXDEV_ALIVE,
+ /* gate whether dax_flush() calls the low level flush routine */
+ DAXDEV_WRITE_CACHE,
+};
+
/**
* struct dax_device - anchor object for dax services
* @inode: core vfs
* @cdev: optional character interface for "device dax"
* @host: optional name for lookups where the device path is not available
* @private: dax driver private data
- * @alive: !alive + rcu grace period == no new operations / mappings
+ * @flags: state and boolean properties
*/
struct dax_device {
struct hlist_node list;
@@ -129,10 +137,75 @@ struct dax_device {
struct cdev cdev;
const char *host;
void *private;
- bool alive;
+ unsigned long flags;
const struct dax_operations *ops;
};
+static ssize_t write_cache_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
+ ssize_t rc;
+
+ WARN_ON_ONCE(!dax_dev);
+ if (!dax_dev)
+ return -ENXIO;
+
+ rc = sprintf(buf, "%d\n", !!test_bit(DAXDEV_WRITE_CACHE,
+ &dax_dev->flags));
+ put_dax(dax_dev);
+ return rc;
+}
+
+static ssize_t write_cache_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ bool write_cache;
+ int rc = strtobool(buf, &write_cache);
+ struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
+
+ WARN_ON_ONCE(!dax_dev);
+ if (!dax_dev)
+ return -ENXIO;
+
+ if (rc)
+ len = rc;
+ else if (write_cache)
+ set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
+ else
+ clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
+
+ put_dax(dax_dev);
+ return len;
+}
+static DEVICE_ATTR_RW(write_cache);
+
+static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, typeof(*dev), kobj);
+ struct dax_device *dax_dev = dax_get_by_host(dev_name(dev));
+
+ WARN_ON_ONCE(!dax_dev);
+ if (!dax_dev)
+ return 0;
+
+ if (a == &dev_attr_write_cache.attr && !dax_dev->ops->flush)
+ return 0;
+ return a->mode;
+}
+
+static struct attribute *dax_attributes[] = {
+ &dev_attr_write_cache.attr,
+ NULL,
+};
+
+struct attribute_group dax_attribute_group = {
+ .name = "dax",
+ .attrs = dax_attributes,
+ .is_visible = dax_visible,
+};
+EXPORT_SYMBOL_GPL(dax_attribute_group);
+
/**
* dax_direct_access() - translate a device pgoff to an absolute pfn
* @dax_dev: a dax_device instance representing the logical memory range
@@ -172,10 +245,43 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
}
EXPORT_SYMBOL_GPL(dax_direct_access);
+size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+ size_t bytes, struct iov_iter *i)
+{
+ if (!dax_alive(dax_dev))
+ return 0;
+
+ return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i);
+}
+EXPORT_SYMBOL_GPL(dax_copy_from_iter);
+
+void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+ size_t size)
+{
+ if (!dax_alive(dax_dev))
+ return;
+
+ if (!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags))
+ return;
+
+ if (dax_dev->ops->flush)
+ dax_dev->ops->flush(dax_dev, pgoff, addr, size);
+}
+EXPORT_SYMBOL_GPL(dax_flush);
+
+void dax_write_cache(struct dax_device *dax_dev, bool wc)
+{
+ if (wc)
+ set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
+ else
+ clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
+}
+EXPORT_SYMBOL_GPL(dax_write_cache);
+
bool dax_alive(struct dax_device *dax_dev)
{
lockdep_assert_held(&dax_srcu);
- return dax_dev->alive;
+ return test_bit(DAXDEV_ALIVE, &dax_dev->flags);
}
EXPORT_SYMBOL_GPL(dax_alive);
@@ -195,7 +301,7 @@ void kill_dax(struct dax_device *dax_dev)
if (!dax_dev)
return;
- dax_dev->alive = false;
+ clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
synchronize_srcu(&dax_srcu);
@@ -239,7 +345,7 @@ static void dax_destroy_inode(struct inode *inode)
{
struct dax_device *dax_dev = to_dax_dev(inode);
- WARN_ONCE(dax_dev->alive,
+ WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags),
"kill_dax() must be called before final iput()\n");
call_rcu(&inode->i_rcu, dax_i_callback);
}
@@ -291,7 +397,7 @@ static struct dax_device *dax_dev_get(dev_t devt)
dax_dev = to_dax_dev(inode);
if (inode->i_state & I_NEW) {
- dax_dev->alive = true;
+ set_bit(DAXDEV_ALIVE, &dax_dev->flags);
inode->i_cdev = &dax_dev->cdev;
inode->i_mode = S_IFCHR;
inode->i_flags = S_DAX;
diff --git a/drivers/dio/dio.c b/drivers/dio/dio.c
index 830184529109..0d0677f23916 100644
--- a/drivers/dio/dio.c
+++ b/drivers/dio/dio.c
@@ -116,7 +116,6 @@ int __init dio_find(int deviceid)
*/
int scode, id;
u_char prid, secid, i;
- mm_segment_t fs;
for (scode = 0; scode < DIO_SCMAX; scode++) {
void *va;
@@ -135,17 +134,12 @@ int __init dio_find(int deviceid)
else
va = ioremap(pa, PAGE_SIZE);
- fs = get_fs();
- set_fs(KERNEL_DS);
-
- if (get_user(i, (unsigned char *)va + DIO_IDOFF)) {
- set_fs(fs);
+ if (probe_kernel_read(&i, (unsigned char *)va + DIO_IDOFF, 1)) {
if (scode >= DIOII_SCBASE)
iounmap(va);
continue; /* no board present at that select code */
}
- set_fs(fs);
prid = DIO_ID(va);
if (DIO_NEEDSSECID(prid)) {
@@ -170,7 +164,6 @@ int __init dio_find(int deviceid)
static int __init dio_init(void)
{
int scode;
- mm_segment_t fs;
int i;
struct dio_dev *dev;
int error;
@@ -214,18 +207,12 @@ static int __init dio_init(void)
else
va = ioremap(pa, PAGE_SIZE);
- fs = get_fs();
- set_fs(KERNEL_DS);
-
- if (get_user(i, (unsigned char *)va + DIO_IDOFF)) {
- set_fs(fs);
+ if (probe_kernel_read(&i, (unsigned char *)va + DIO_IDOFF, 1)) {
if (scode >= DIOII_SCBASE)
iounmap(va);
continue; /* no board present at that select code */
}
- set_fs(fs);
-
/* Found a board, allocate it an entry in the list */
dev = kzalloc(sizeof(struct dio_dev), GFP_KERNEL);
if (!dev)
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 24e8597b2c3e..fa8f9c07ce73 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -62,8 +62,10 @@ config AMBA_PL08X
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
- Platform has a PL08x DMAC device
- which can provide DMA engine support
+ Say yes if your platform has a PL08x DMAC device which can
+ provide DMA engine support. This includes the original ARM
+ PL080 and PL081, Samsungs PL080 derivative and Faraday
+ Technology's FTDMAC020 PL080 derivative.
config AMCC_PPC440SPE_ADMA
tristate "AMCC PPC440SPe ADMA support"
@@ -99,6 +101,21 @@ config AXI_DMAC
controller is often used in Analog Device's reference designs for FPGA
platforms.
+config BCM_SBA_RAID
+ tristate "Broadcom SBA RAID engine support"
+ depends on ARM64 || COMPILE_TEST
+ depends on MAILBOX && RAID6_PQ
+ select DMA_ENGINE
+ select DMA_ENGINE_RAID
+ select ASYNC_TX_DISABLE_XOR_VAL_DMA
+ select ASYNC_TX_DISABLE_PQ_VAL_DMA
+ default ARCH_BCM_IPROC
+ help
+ Enable support for Broadcom SBA RAID Engine. The SBA RAID
+ engine is available on most of the Broadcom iProc SoCs. It
+ has the capability to offload memcpy, xor and pq computation
+ for raid5/6.
+
config COH901318
bool "ST-Ericsson COH901318 DMA support"
select DMA_ENGINE
@@ -354,13 +371,12 @@ config MV_XOR_V2
config MXS_DMA
bool "MXS DMA support"
- depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q || SOC_IMX6UL
+ depends on ARCH_MXS || ARCH_MXC || COMPILE_TEST
select STMP_DEVICE
select DMA_ENGINE
help
Support the MXS DMA engine. This engine including APBH-DMA
- and APBX-DMA is integrated into Freescale
- i.MX23/28/MX6Q/MX6DL/MX6UL chips.
+ and APBX-DMA is integrated into some Freescale chips.
config MX3_IPU
bool "MX3x Image Processing Unit support"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 0b723e94d9e6..d12ab2985ed1 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
+obj-$(CONFIG_BCM_SBA_RAID) += bcm-sba-raid.o
obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o
obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 6bb8813ca275..13cc95c0474c 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1,9 +1,10 @@
/*
* Copyright (c) 2006 ARM Ltd.
* Copyright (c) 2010 ST-Ericsson SA
+ * Copyirght (c) 2017 Linaro Ltd.
*
* Author: Peter Pearse <[email protected]>
- * Author: Linus Walleij <[email protected]>
+ * Author: Linus Walleij <[email protected]>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -110,11 +111,12 @@ struct pl08x_driver_data;
* @channels: the number of channels available in this variant
* @signals: the number of request signals available from the hardware
* @dualmaster: whether this version supports dual AHB masters or not.
- * @nomadik: whether the channels have Nomadik security extension bits
- * that need to be checked for permission before use and some registers are
- * missing
- * @pl080s: whether this version is a PL080S, which has separate register and
- * LLI word for transfer size.
+ * @nomadik: whether this variant is a ST Microelectronics Nomadik, where the
+ * channels have Nomadik security extension bits that need to be checked
+ * for permission before use and some registers are missing
+ * @pl080s: whether this variant is a Samsung PL080S, which has separate
+ * register and LLI word for transfer size.
+ * @ftdmac020: whether this variant is a Faraday Technology FTDMAC020
* @max_transfer_size: the maximum single element transfer size for this
* PL08x variant.
*/
@@ -125,6 +127,7 @@ struct vendor_data {
bool dualmaster;
bool nomadik;
bool pl080s;
+ bool ftdmac020;
u32 max_transfer_size;
};
@@ -148,19 +151,34 @@ struct pl08x_bus_data {
* @id: physical index to this channel
* @base: memory base address for this physical channel
* @reg_config: configuration address for this physical channel
+ * @reg_control: control address for this physical channel
+ * @reg_src: transfer source address register
+ * @reg_dst: transfer destination address register
+ * @reg_lli: transfer LLI address register
+ * @reg_busy: if the variant has a special per-channel busy register,
+ * this contains a pointer to it
* @lock: a lock to use when altering an instance of this struct
* @serving: the virtual channel currently being served by this physical
* channel
* @locked: channel unavailable for the system, e.g. dedicated to secure
* world
+ * @ftdmac020: channel is on a FTDMAC020
+ * @pl080s: channel is on a PL08s
*/
struct pl08x_phy_chan {
unsigned int id;
void __iomem *base;
void __iomem *reg_config;
+ void __iomem *reg_control;
+ void __iomem *reg_src;
+ void __iomem *reg_dst;
+ void __iomem *reg_lli;
+ void __iomem *reg_busy;
spinlock_t lock;
struct pl08x_dma_chan *serving;
bool locked;
+ bool ftdmac020;
+ bool pl080s;
};
/**
@@ -253,8 +271,9 @@ struct pl08x_dma_chan {
/**
* struct pl08x_driver_data - the local state holder for the PL08x
- * @slave: slave engine for this instance
+ * @slave: optional slave engine for this instance
* @memcpy: memcpy engine for this instance
+ * @has_slave: the PL08x has a slave engine (routed signals)
* @base: virtual memory base (remapped) for the PL08x
* @adev: the corresponding AMBA (PrimeCell) bus entry
* @vd: vendor data for this PL08x variant
@@ -269,6 +288,7 @@ struct pl08x_dma_chan {
struct pl08x_driver_data {
struct dma_device slave;
struct dma_device memcpy;
+ bool has_slave;
void __iomem *base;
struct amba_device *adev;
const struct vendor_data *vd;
@@ -360,10 +380,24 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
{
unsigned int val;
+ /* If we have a special busy register, take a shortcut */
+ if (ch->reg_busy) {
+ val = readl(ch->reg_busy);
+ return !!(val & BIT(ch->id));
+ }
val = readl(ch->reg_config);
return val & PL080_CONFIG_ACTIVE;
}
+/*
+ * pl08x_write_lli() - Write an LLI into the DMA controller.
+ *
+ * The PL08x derivatives support linked lists, but the first item of the
+ * list containing the source, destination, control word and next LLI is
+ * ignored. Instead the driver has to write those values directly into the
+ * SRC, DST, LLI and control registers. On FTDMAC020 also the SIZE
+ * register need to be set up for the first transfer.
+ */
static void pl08x_write_lli(struct pl08x_driver_data *pl08x,
struct pl08x_phy_chan *phychan, const u32 *lli, u32 ccfg)
{
@@ -381,11 +415,112 @@ static void pl08x_write_lli(struct pl08x_driver_data *pl08x,
phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], ccfg);
- writel_relaxed(lli[PL080_LLI_SRC], phychan->base + PL080_CH_SRC_ADDR);
- writel_relaxed(lli[PL080_LLI_DST], phychan->base + PL080_CH_DST_ADDR);
- writel_relaxed(lli[PL080_LLI_LLI], phychan->base + PL080_CH_LLI);
- writel_relaxed(lli[PL080_LLI_CCTL], phychan->base + PL080_CH_CONTROL);
+ writel_relaxed(lli[PL080_LLI_SRC], phychan->reg_src);
+ writel_relaxed(lli[PL080_LLI_DST], phychan->reg_dst);
+ writel_relaxed(lli[PL080_LLI_LLI], phychan->reg_lli);
+
+ /*
+ * The FTMAC020 has a different layout in the CCTL word of the LLI
+ * and the CCTL register which is split in CSR and SIZE registers.
+ * Convert the LLI item CCTL into the proper values to write into
+ * the CSR and SIZE registers.
+ */
+ if (phychan->ftdmac020) {
+ u32 llictl = lli[PL080_LLI_CCTL];
+ u32 val = 0;
+
+ /* Write the transfer size (12 bits) to the size register */
+ writel_relaxed(llictl & FTDMAC020_LLI_TRANSFER_SIZE_MASK,
+ phychan->base + FTDMAC020_CH_SIZE);
+ /*
+ * Then write the control bits 28..16 to the control register
+ * by shuffleing the bits around to where they are in the
+ * main register. The mapping is as follows:
+ * Bit 28: TC_MSK - mask on all except last LLI
+ * Bit 27..25: SRC_WIDTH
+ * Bit 24..22: DST_WIDTH
+ * Bit 21..20: SRCAD_CTRL
+ * Bit 19..17: DSTAD_CTRL
+ * Bit 17: SRC_SEL
+ * Bit 16: DST_SEL
+ */
+ if (llictl & FTDMAC020_LLI_TC_MSK)
+ val |= FTDMAC020_CH_CSR_TC_MSK;
+ val |= ((llictl & FTDMAC020_LLI_SRC_WIDTH_MSK) >>
+ (FTDMAC020_LLI_SRC_WIDTH_SHIFT -
+ FTDMAC020_CH_CSR_SRC_WIDTH_SHIFT));
+ val |= ((llictl & FTDMAC020_LLI_DST_WIDTH_MSK) >>
+ (FTDMAC020_LLI_DST_WIDTH_SHIFT -
+ FTDMAC020_CH_CSR_DST_WIDTH_SHIFT));
+ val |= ((llictl & FTDMAC020_LLI_SRCAD_CTL_MSK) >>
+ (FTDMAC020_LLI_SRCAD_CTL_SHIFT -
+ FTDMAC020_CH_CSR_SRCAD_CTL_SHIFT));
+ val |= ((llictl & FTDMAC020_LLI_DSTAD_CTL_MSK) >>
+ (FTDMAC020_LLI_DSTAD_CTL_SHIFT -
+ FTDMAC020_CH_CSR_DSTAD_CTL_SHIFT));
+ if (llictl & FTDMAC020_LLI_SRC_SEL)
+ val |= FTDMAC020_CH_CSR_SRC_SEL;
+ if (llictl & FTDMAC020_LLI_DST_SEL)
+ val |= FTDMAC020_CH_CSR_DST_SEL;
+
+ /*
+ * Set up the bits that exist in the CSR but are not
+ * part the LLI, i.e. only gets written to the control
+ * register right here.
+ *
+ * FIXME: do not just handle memcpy, also handle slave DMA.
+ */
+ switch (pl08x->pd->memcpy_burst_size) {
+ default:
+ case PL08X_BURST_SZ_1:
+ val |= PL080_BSIZE_1 <<
+ FTDMAC020_CH_CSR_SRC_SIZE_SHIFT;
+ break;
+ case PL08X_BURST_SZ_4:
+ val |= PL080_BSIZE_4 <<
+ FTDMAC020_CH_CSR_SRC_SIZE_SHIFT;
+ break;
+ case PL08X_BURST_SZ_8:
+ val |= PL080_BSIZE_8 <<
+ FTDMAC020_CH_CSR_SRC_SIZE_SHIFT;
+ break;
+ case PL08X_BURST_SZ_16:
+ val |= PL080_BSIZE_16 <<
+ FTDMAC020_CH_CSR_SRC_SIZE_SHIFT;
+ break;
+ case PL08X_BURST_SZ_32:
+ val |= PL080_BSIZE_32 <<
+ FTDMAC020_CH_CSR_SRC_SIZE_SHIFT;
+ break;
+ case PL08X_BURST_SZ_64:
+ val |= PL080_BSIZE_64 <<
+ FTDMAC020_CH_CSR_SRC_SIZE_SHIFT;
+ break;
+ case PL08X_BURST_SZ_128:
+ val |= PL080_BSIZE_128 <<
+ FTDMAC020_CH_CSR_SRC_SIZE_SHIFT;
+ break;
+ case PL08X_BURST_SZ_256:
+ val |= PL080_BSIZE_256 <<
+ FTDMAC020_CH_CSR_SRC_SIZE_SHIFT;
+ break;
+ }
+
+ /* Protection flags */
+ if (pl08x->pd->memcpy_prot_buff)
+ val |= FTDMAC020_CH_CSR_PROT2;
+ if (pl08x->pd->memcpy_prot_cache)
+ val |= FTDMAC020_CH_CSR_PROT3;
+ /* We are the kernel, so we are in privileged mode */
+ val |= FTDMAC020_CH_CSR_PROT1;
+
+ writel_relaxed(val, phychan->reg_control);
+ } else {
+ /* Bits are just identical */
+ writel_relaxed(lli[PL080_LLI_CCTL], phychan->reg_control);
+ }
+ /* Second control word on the PL080s */
if (pl08x->vd->pl080s)
writel_relaxed(lli[PL080S_LLI_CCTL2],
phychan->base + PL080S_CH_CONTROL2);
@@ -423,11 +558,25 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
cpu_relax();
/* Do not access config register until channel shows as inactive */
- val = readl(phychan->reg_config);
- while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
+ if (phychan->ftdmac020) {
+ val = readl(phychan->reg_config);
+ while (val & FTDMAC020_CH_CFG_BUSY)
+ val = readl(phychan->reg_config);
+
+ val = readl(phychan->reg_control);
+ while (val & FTDMAC020_CH_CSR_EN)
+ val = readl(phychan->reg_control);
+
+ writel(val | FTDMAC020_CH_CSR_EN,
+ phychan->reg_control);
+ } else {
val = readl(phychan->reg_config);
+ while ((val & PL080_CONFIG_ACTIVE) ||
+ (val & PL080_CONFIG_ENABLE))
+ val = readl(phychan->reg_config);
- writel(val | PL080_CONFIG_ENABLE, phychan->reg_config);
+ writel(val | PL080_CONFIG_ENABLE, phychan->reg_config);
+ }
}
/*
@@ -445,6 +594,14 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
u32 val;
int timeout;
+ if (ch->ftdmac020) {
+ /* Use the enable bit on the FTDMAC020 */
+ val = readl(ch->reg_control);
+ val &= ~FTDMAC020_CH_CSR_EN;
+ writel(val, ch->reg_control);
+ return;
+ }
+
/* Set the HALT bit and wait for the FIFO to drain */
val = readl(ch->reg_config);
val |= PL080_CONFIG_HALT;
@@ -464,6 +621,14 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
{
u32 val;
+ /* Use the enable bit on the FTDMAC020 */
+ if (ch->ftdmac020) {
+ val = readl(ch->reg_control);
+ val |= FTDMAC020_CH_CSR_EN;
+ writel(val, ch->reg_control);
+ return;
+ }
+
/* Clear the HALT bit */
val = readl(ch->reg_config);
val &= ~PL080_CONFIG_HALT;
@@ -479,25 +644,68 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
struct pl08x_phy_chan *ch)
{
- u32 val = readl(ch->reg_config);
+ u32 val;
+ /* The layout for the FTDMAC020 is different */
+ if (ch->ftdmac020) {
+ /* Disable all interrupts */
+ val = readl(ch->reg_config);
+ val |= (FTDMAC020_CH_CFG_INT_ABT_MASK |
+ FTDMAC020_CH_CFG_INT_ERR_MASK |
+ FTDMAC020_CH_CFG_INT_TC_MASK);
+ writel(val, ch->reg_config);
+
+ /* Abort and disable channel */
+ val = readl(ch->reg_control);
+ val &= ~FTDMAC020_CH_CSR_EN;
+ val |= FTDMAC020_CH_CSR_ABT;
+ writel(val, ch->reg_control);
+
+ /* Clear ABT and ERR interrupt flags */
+ writel(BIT(ch->id) | BIT(ch->id + 16),
+ pl08x->base + PL080_ERR_CLEAR);
+ writel(BIT(ch->id), pl08x->base + PL080_TC_CLEAR);
+
+ return;
+ }
+
+ val = readl(ch->reg_config);
val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
PL080_CONFIG_TC_IRQ_MASK);
-
writel(val, ch->reg_config);
writel(BIT(ch->id), pl08x->base + PL080_ERR_CLEAR);
writel(BIT(ch->id), pl08x->base + PL080_TC_CLEAR);
}
-static inline u32 get_bytes_in_cctl(u32 cctl)
+static u32 get_bytes_in_phy_channel(struct pl08x_phy_chan *ch)
{
- /* The source width defines the number of bytes */
- u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
+ u32 val;
+ u32 bytes;
+
+ if (ch->ftdmac020) {
+ bytes = readl(ch->base + FTDMAC020_CH_SIZE);
- cctl &= PL080_CONTROL_SWIDTH_MASK;
+ val = readl(ch->reg_control);
+ val &= FTDMAC020_CH_CSR_SRC_WIDTH_MSK;
+ val >>= FTDMAC020_CH_CSR_SRC_WIDTH_SHIFT;
+ } else if (ch->pl080s) {
+ val = readl(ch->base + PL080S_CH_CONTROL2);
+ bytes = val & PL080S_CONTROL_TRANSFER_SIZE_MASK;
- switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
+ val = readl(ch->reg_control);
+ val &= PL080_CONTROL_SWIDTH_MASK;
+ val >>= PL080_CONTROL_SWIDTH_SHIFT;
+ } else {
+ /* Plain PL08x */
+ val = readl(ch->reg_control);
+ bytes = val & PL080_CONTROL_TRANSFER_SIZE_MASK;
+
+ val &= PL080_CONTROL_SWIDTH_MASK;
+ val >>= PL080_CONTROL_SWIDTH_SHIFT;
+ }
+
+ switch (val) {
case PL080_WIDTH_8BIT:
break;
case PL080_WIDTH_16BIT:
@@ -510,14 +718,35 @@ static inline u32 get_bytes_in_cctl(u32 cctl)
return bytes;
}
-static inline u32 get_bytes_in_cctl_pl080s(u32 cctl, u32 cctl1)
+static u32 get_bytes_in_lli(struct pl08x_phy_chan *ch, const u32 *llis_va)
{
- /* The source width defines the number of bytes */
- u32 bytes = cctl1 & PL080S_CONTROL_TRANSFER_SIZE_MASK;
+ u32 val;
+ u32 bytes;
+
+ if (ch->ftdmac020) {
+ val = llis_va[PL080_LLI_CCTL];
+ bytes = val & FTDMAC020_LLI_TRANSFER_SIZE_MASK;
+
+ val = llis_va[PL080_LLI_CCTL];
+ val &= FTDMAC020_LLI_SRC_WIDTH_MSK;
+ val >>= FTDMAC020_LLI_SRC_WIDTH_SHIFT;
+ } else if (ch->pl080s) {
+ val = llis_va[PL080S_LLI_CCTL2];
+ bytes = val & PL080S_CONTROL_TRANSFER_SIZE_MASK;
+
+ val = llis_va[PL080_LLI_CCTL];
+ val &= PL080_CONTROL_SWIDTH_MASK;
+ val >>= PL080_CONTROL_SWIDTH_SHIFT;
+ } else {
+ /* Plain PL08x */
+ val = llis_va[PL080_LLI_CCTL];
+ bytes = val & PL080_CONTROL_TRANSFER_SIZE_MASK;
- cctl &= PL080_CONTROL_SWIDTH_MASK;
+ val &= PL080_CONTROL_SWIDTH_MASK;
+ val >>= PL080_CONTROL_SWIDTH_SHIFT;
+ }
- switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
+ switch (val) {
case PL080_WIDTH_8BIT:
break;
case PL080_WIDTH_16BIT:
@@ -552,15 +781,10 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
* Follow the LLIs to get the number of remaining
* bytes in the currently active transaction.
*/
- clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
+ clli = readl(ch->reg_lli) & ~PL080_LLI_LM_AHB2;
/* First get the remaining bytes in the active transfer */
- if (pl08x->vd->pl080s)
- bytes = get_bytes_in_cctl_pl080s(
- readl(ch->base + PL080_CH_CONTROL),
- readl(ch->base + PL080S_CH_CONTROL2));
- else
- bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
+ bytes = get_bytes_in_phy_channel(ch);
if (!clli)
return bytes;
@@ -581,12 +805,7 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
llis_va_limit = llis_va + llis_max_words;
for (; llis_va < llis_va_limit; llis_va += pl08x->lli_words) {
- if (pl08x->vd->pl080s)
- bytes += get_bytes_in_cctl_pl080s(
- llis_va[PL080_LLI_CCTL],
- llis_va[PL080S_LLI_CCTL2]);
- else
- bytes += get_bytes_in_cctl(llis_va[PL080_LLI_CCTL]);
+ bytes += get_bytes_in_lli(ch, llis_va);
/*
* A LLI pointer going backward terminates the LLI list
@@ -705,7 +924,7 @@ static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
break;
}
- if (!next) {
+ if (!next && pl08x->has_slave) {
list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node)
if (p->state == PL08X_CHAN_WAITING) {
next = p;
@@ -746,9 +965,30 @@ static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
* LLI handling
*/
-static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded)
+static inline unsigned int
+pl08x_get_bytes_for_lli(struct pl08x_driver_data *pl08x,
+ u32 cctl,
+ bool source)
{
- switch (coded) {
+ u32 val;
+
+ if (pl08x->vd->ftdmac020) {
+ if (source)
+ val = (cctl & FTDMAC020_LLI_SRC_WIDTH_MSK) >>
+ FTDMAC020_LLI_SRC_WIDTH_SHIFT;
+ else
+ val = (cctl & FTDMAC020_LLI_DST_WIDTH_MSK) >>
+ FTDMAC020_LLI_DST_WIDTH_SHIFT;
+ } else {
+ if (source)
+ val = (cctl & PL080_CONTROL_SWIDTH_MASK) >>
+ PL080_CONTROL_SWIDTH_SHIFT;
+ else
+ val = (cctl & PL080_CONTROL_DWIDTH_MASK) >>
+ PL080_CONTROL_DWIDTH_SHIFT;
+ }
+
+ switch (val) {
case PL080_WIDTH_8BIT:
return 1;
case PL080_WIDTH_16BIT:
@@ -762,49 +1002,106 @@ static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded)
return 0;
}
-static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
- size_t tsize)
+static inline u32 pl08x_lli_control_bits(struct pl08x_driver_data *pl08x,
+ u32 cctl,
+ u8 srcwidth, u8 dstwidth,
+ size_t tsize)
{
u32 retbits = cctl;
- /* Remove all src, dst and transfer size bits */
- retbits &= ~PL080_CONTROL_DWIDTH_MASK;
- retbits &= ~PL080_CONTROL_SWIDTH_MASK;
- retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
+ /*
+ * Remove all src, dst and transfer size bits, then set the
+ * width and size according to the parameters. The bit offsets
+ * are different in the FTDMAC020 so we need to accound for this.
+ */
+ if (pl08x->vd->ftdmac020) {
+ retbits &= ~FTDMAC020_LLI_DST_WIDTH_MSK;
+ retbits &= ~FTDMAC020_LLI_SRC_WIDTH_MSK;
+ retbits &= ~FTDMAC020_LLI_TRANSFER_SIZE_MASK;
+
+ switch (srcwidth) {
+ case 1:
+ retbits |= PL080_WIDTH_8BIT <<
+ FTDMAC020_LLI_SRC_WIDTH_SHIFT;
+ break;
+ case 2:
+ retbits |= PL080_WIDTH_16BIT <<
+ FTDMAC020_LLI_SRC_WIDTH_SHIFT;
+ break;
+ case 4:
+ retbits |= PL080_WIDTH_32BIT <<
+ FTDMAC020_LLI_SRC_WIDTH_SHIFT;
+ break;
+ default:
+ BUG();
+ break;
+ }
- /* Then set the bits according to the parameters */
- switch (srcwidth) {
- case 1:
- retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT;
- break;
- case 2:
- retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT;
- break;
- case 4:
- retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT;
- break;
- default:
- BUG();
- break;
- }
+ switch (dstwidth) {
+ case 1:
+ retbits |= PL080_WIDTH_8BIT <<
+ FTDMAC020_LLI_DST_WIDTH_SHIFT;
+ break;
+ case 2:
+ retbits |= PL080_WIDTH_16BIT <<
+ FTDMAC020_LLI_DST_WIDTH_SHIFT;
+ break;
+ case 4:
+ retbits |= PL080_WIDTH_32BIT <<
+ FTDMAC020_LLI_DST_WIDTH_SHIFT;
+ break;
+ default:
+ BUG();
+ break;
+ }
- switch (dstwidth) {
- case 1:
- retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
- break;
- case 2:
- retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
- break;
- case 4:
- retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
- break;
- default:
- BUG();
- break;
+ tsize &= FTDMAC020_LLI_TRANSFER_SIZE_MASK;
+ retbits |= tsize << FTDMAC020_LLI_TRANSFER_SIZE_SHIFT;
+ } else {
+ retbits &= ~PL080_CONTROL_DWIDTH_MASK;
+ retbits &= ~PL080_CONTROL_SWIDTH_MASK;
+ retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
+
+ switch (srcwidth) {
+ case 1:
+ retbits |= PL080_WIDTH_8BIT <<
+ PL080_CONTROL_SWIDTH_SHIFT;
+ break;
+ case 2:
+ retbits |= PL080_WIDTH_16BIT <<
+ PL080_CONTROL_SWIDTH_SHIFT;
+ break;
+ case 4:
+ retbits |= PL080_WIDTH_32BIT <<
+ PL080_CONTROL_SWIDTH_SHIFT;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ switch (dstwidth) {
+ case 1:
+ retbits |= PL080_WIDTH_8BIT <<
+ PL080_CONTROL_DWIDTH_SHIFT;
+ break;
+ case 2:
+ retbits |= PL080_WIDTH_16BIT <<
+ PL080_CONTROL_DWIDTH_SHIFT;
+ break;
+ case 4:
+ retbits |= PL080_WIDTH_32BIT <<
+ PL080_CONTROL_DWIDTH_SHIFT;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ tsize &= PL080_CONTROL_TRANSFER_SIZE_MASK;
+ retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
}
- tsize &= PL080_CONTROL_TRANSFER_SIZE_MASK;
- retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
return retbits;
}
@@ -825,13 +1122,35 @@ struct pl08x_lli_build_data {
* - prefers the destination bus if both available
* - prefers bus with fixed address (i.e. peripheral)
*/
-static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
- struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
+static void pl08x_choose_master_bus(struct pl08x_driver_data *pl08x,
+ struct pl08x_lli_build_data *bd,
+ struct pl08x_bus_data **mbus,
+ struct pl08x_bus_data **sbus,
+ u32 cctl)
{
- if (!(cctl & PL080_CONTROL_DST_INCR)) {
+ bool dst_incr;
+ bool src_incr;
+
+ /*
+ * The FTDMAC020 only supports memory-to-memory transfer, so
+ * source and destination always increase.
+ */
+ if (pl08x->vd->ftdmac020) {
+ dst_incr = true;
+ src_incr = true;
+ } else {
+ dst_incr = !!(cctl & PL080_CONTROL_DST_INCR);
+ src_incr = !!(cctl & PL080_CONTROL_SRC_INCR);
+ }
+
+ /*
+ * If either bus is not advancing, i.e. it is a peripheral, that
+ * one becomes master
+ */
+ if (!dst_incr) {
*mbus = &bd->dstbus;
*sbus = &bd->srcbus;
- } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
+ } else if (!src_incr) {
*mbus = &bd->srcbus;
*sbus = &bd->dstbus;
} else {
@@ -869,10 +1188,16 @@ static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
if (pl08x->vd->pl080s)
llis_va[PL080S_LLI_CCTL2] = cctl2;
- if (cctl & PL080_CONTROL_SRC_INCR)
+ if (pl08x->vd->ftdmac020) {
+ /* FIXME: only memcpy so far so both increase */
bd->srcbus.addr += len;
- if (cctl & PL080_CONTROL_DST_INCR)
bd->dstbus.addr += len;
+ } else {
+ if (cctl & PL080_CONTROL_SRC_INCR)
+ bd->srcbus.addr += len;
+ if (cctl & PL080_CONTROL_DST_INCR)
+ bd->dstbus.addr += len;
+ }
BUG_ON(bd->remainder < len);
@@ -883,12 +1208,12 @@ static inline void prep_byte_width_lli(struct pl08x_driver_data *pl08x,
struct pl08x_lli_build_data *bd, u32 *cctl, u32 len,
int num_llis, size_t *total_bytes)
{
- *cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
+ *cctl = pl08x_lli_control_bits(pl08x, *cctl, 1, 1, len);
pl08x_fill_lli_for_desc(pl08x, bd, num_llis, len, *cctl, len);
(*total_bytes) += len;
}
-#ifdef VERBOSE_DEBUG
+#if 1
static void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
const u32 *llis_va, int num_llis)
{
@@ -953,14 +1278,10 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
cctl = txd->cctl;
/* Find maximum width of the source bus */
- bd.srcbus.maxwidth =
- pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
- PL080_CONTROL_SWIDTH_SHIFT);
+ bd.srcbus.maxwidth = pl08x_get_bytes_for_lli(pl08x, cctl, true);
/* Find maximum width of the destination bus */
- bd.dstbus.maxwidth =
- pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
- PL080_CONTROL_DWIDTH_SHIFT);
+ bd.dstbus.maxwidth = pl08x_get_bytes_for_lli(pl08x, cctl, false);
list_for_each_entry(dsg, &txd->dsg_list, node) {
total_bytes = 0;
@@ -972,7 +1293,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
bd.srcbus.buswidth = bd.srcbus.maxwidth;
bd.dstbus.buswidth = bd.dstbus.maxwidth;
- pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
+ pl08x_choose_master_bus(pl08x, &bd, &mbus, &sbus, cctl);
dev_vdbg(&pl08x->adev->dev,
"src=0x%08llx%s/%u dst=0x%08llx%s/%u len=%zu\n",
@@ -1009,8 +1330,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
* supported. Thus, we can't have scattered addresses.
*/
if (!bd.remainder) {
- u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
- PL080_CONFIG_FLOW_CONTROL_SHIFT;
+ u32 fc;
+
+ /* FTDMAC020 only does memory-to-memory */
+ if (pl08x->vd->ftdmac020)
+ fc = PL080_FLOW_MEM2MEM;
+ else
+ fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
+ PL080_CONFIG_FLOW_CONTROL_SHIFT;
if (!((fc >= PL080_FLOW_SRC2DST_DST) &&
(fc <= PL080_FLOW_SRC2DST_SRC))) {
dev_err(&pl08x->adev->dev, "%s sg len can't be zero",
@@ -1027,8 +1354,9 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
return 0;
}
- cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
- bd.dstbus.buswidth, 0);
+ cctl = pl08x_lli_control_bits(pl08x, cctl,
+ bd.srcbus.buswidth, bd.dstbus.buswidth,
+ 0);
pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
0, cctl, 0);
break;
@@ -1107,8 +1435,9 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
"size 0x%08zx (remainder 0x%08zx)\n",
__func__, lli_len, bd.remainder);
- cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
- bd.dstbus.buswidth, tsize);
+ cctl = pl08x_lli_control_bits(pl08x, cctl,
+ bd.srcbus.buswidth, bd.dstbus.buswidth,
+ tsize);
pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
lli_len, cctl, tsize);
total_bytes += lli_len;
@@ -1151,7 +1480,10 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
/* The final LLI terminates the LLI. */
last_lli[PL080_LLI_LLI] = 0;
/* The final LLI element shall also fire an interrupt. */
- last_lli[PL080_LLI_CCTL] |= PL080_CONTROL_TC_IRQ_EN;
+ if (pl08x->vd->ftdmac020)
+ last_lli[PL080_LLI_CCTL] &= ~FTDMAC020_LLI_TC_MSK;
+ else
+ last_lli[PL080_LLI_CCTL] |= PL080_CONTROL_TC_IRQ_EN;
}
pl08x_dump_lli(pl08x, llis_va, num_llis);
@@ -1317,14 +1649,25 @@ static const struct burst_table burst_sizes[] = {
* will be routed to each port. We try to have source and destination
* on separate ports, but always respect the allowable settings.
*/
-static u32 pl08x_select_bus(u8 src, u8 dst)
+static u32 pl08x_select_bus(bool ftdmac020, u8 src, u8 dst)
{
u32 cctl = 0;
+ u32 dst_ahb2;
+ u32 src_ahb2;
+
+ /* The FTDMAC020 use different bits to indicate src/dst bus */
+ if (ftdmac020) {
+ dst_ahb2 = FTDMAC020_LLI_DST_SEL;
+ src_ahb2 = FTDMAC020_LLI_SRC_SEL;
+ } else {
+ dst_ahb2 = PL080_CONTROL_DST_AHB2;
+ src_ahb2 = PL080_CONTROL_SRC_AHB2;
+ }
if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
- cctl |= PL080_CONTROL_DST_AHB2;
+ cctl |= dst_ahb2;
if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
- cctl |= PL080_CONTROL_SRC_AHB2;
+ cctl |= src_ahb2;
return cctl;
}
@@ -1412,14 +1755,134 @@ static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan)
{
struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
- if (txd) {
+ if (txd)
INIT_LIST_HEAD(&txd->dsg_list);
+ return txd;
+}
- /* Always enable error and terminal interrupts */
- txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
- PL080_CONFIG_TC_IRQ_MASK;
+static u32 pl08x_memcpy_cctl(struct pl08x_driver_data *pl08x)
+{
+ u32 cctl = 0;
+
+ /* Conjure cctl */
+ switch (pl08x->pd->memcpy_burst_size) {
+ default:
+ dev_err(&pl08x->adev->dev,
+ "illegal burst size for memcpy, set to 1\n");
+ /* Fall through */
+ case PL08X_BURST_SZ_1:
+ cctl |= PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT |
+ PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT;
+ break;
+ case PL08X_BURST_SZ_4:
+ cctl |= PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT |
+ PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT;
+ break;
+ case PL08X_BURST_SZ_8:
+ cctl |= PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT |
+ PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT;
+ break;
+ case PL08X_BURST_SZ_16:
+ cctl |= PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT |
+ PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT;
+ break;
+ case PL08X_BURST_SZ_32:
+ cctl |= PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT |
+ PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT;
+ break;
+ case PL08X_BURST_SZ_64:
+ cctl |= PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT |
+ PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT;
+ break;
+ case PL08X_BURST_SZ_128:
+ cctl |= PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT |
+ PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT;
+ break;
+ case PL08X_BURST_SZ_256:
+ cctl |= PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT |
+ PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT;
+ break;
}
- return txd;
+
+ switch (pl08x->pd->memcpy_bus_width) {
+ default:
+ dev_err(&pl08x->adev->dev,
+ "illegal bus width for memcpy, set to 8 bits\n");
+ /* Fall through */
+ case PL08X_BUS_WIDTH_8_BITS:
+ cctl |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT |
+ PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
+ break;
+ case PL08X_BUS_WIDTH_16_BITS:
+ cctl |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT |
+ PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
+ break;
+ case PL08X_BUS_WIDTH_32_BITS:
+ cctl |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT |
+ PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
+ break;
+ }
+
+ /* Protection flags */
+ if (pl08x->pd->memcpy_prot_buff)
+ cctl |= PL080_CONTROL_PROT_BUFF;
+ if (pl08x->pd->memcpy_prot_cache)
+ cctl |= PL080_CONTROL_PROT_CACHE;
+
+ /* We are the kernel, so we are in privileged mode */
+ cctl |= PL080_CONTROL_PROT_SYS;
+
+ /* Both to be incremented or the code will break */
+ cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
+
+ if (pl08x->vd->dualmaster)
+ cctl |= pl08x_select_bus(false,
+ pl08x->mem_buses,
+ pl08x->mem_buses);
+
+ return cctl;
+}
+
+static u32 pl08x_ftdmac020_memcpy_cctl(struct pl08x_driver_data *pl08x)
+{
+ u32 cctl = 0;
+
+ /* Conjure cctl */
+ switch (pl08x->pd->memcpy_bus_width) {
+ default:
+ dev_err(&pl08x->adev->dev,
+ "illegal bus width for memcpy, set to 8 bits\n");
+ /* Fall through */
+ case PL08X_BUS_WIDTH_8_BITS:
+ cctl |= PL080_WIDTH_8BIT << FTDMAC020_LLI_SRC_WIDTH_SHIFT |
+ PL080_WIDTH_8BIT << FTDMAC020_LLI_DST_WIDTH_SHIFT;
+ break;
+ case PL08X_BUS_WIDTH_16_BITS:
+ cctl |= PL080_WIDTH_16BIT << FTDMAC020_LLI_SRC_WIDTH_SHIFT |
+ PL080_WIDTH_16BIT << FTDMAC020_LLI_DST_WIDTH_SHIFT;
+ break;
+ case PL08X_BUS_WIDTH_32_BITS:
+ cctl |= PL080_WIDTH_32BIT << FTDMAC020_LLI_SRC_WIDTH_SHIFT |
+ PL080_WIDTH_32BIT << FTDMAC020_LLI_DST_WIDTH_SHIFT;
+ break;
+ }
+
+ /*
+ * By default mask the TC IRQ on all LLIs, it will be unmasked on
+ * the last LLI item by other code.
+ */
+ cctl |= FTDMAC020_LLI_TC_MSK;
+
+ /*
+ * Both to be incremented so leave bits FTDMAC020_LLI_SRCAD_CTL
+ * and FTDMAC020_LLI_DSTAD_CTL as zero
+ */
+ if (pl08x->vd->dualmaster)
+ cctl |= pl08x_select_bus(true,
+ pl08x->mem_buses,
+ pl08x->mem_buses);
+
+ return cctl;
}
/*
@@ -1452,18 +1915,16 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
dsg->src_addr = src;
dsg->dst_addr = dest;
dsg->len = len;
-
- /* Set platform data for m2m */
- txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
- txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy &
- ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
-
- /* Both to be incremented or the code will break */
- txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
-
- if (pl08x->vd->dualmaster)
- txd->cctl |= pl08x_select_bus(pl08x->mem_buses,
- pl08x->mem_buses);
+ if (pl08x->vd->ftdmac020) {
+ /* Writing CCFG zero ENABLES all interrupts */
+ txd->ccfg = 0;
+ txd->cctl = pl08x_ftdmac020_memcpy_cctl(pl08x);
+ } else {
+ txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
+ PL080_CONFIG_TC_IRQ_MASK |
+ PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+ txd->cctl = pl08x_memcpy_cctl(pl08x);
+ }
ret = pl08x_fill_llis_for_desc(plchan->host, txd);
if (!ret) {
@@ -1527,7 +1988,7 @@ static struct pl08x_txd *pl08x_init_txd(
return NULL;
}
- txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses);
+ txd->cctl = cctl | pl08x_select_bus(false, src_buses, dst_buses);
if (plchan->cfg.device_fc)
tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
@@ -1536,7 +1997,9 @@ static struct pl08x_txd *pl08x_init_txd(
tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER :
PL080_FLOW_PER2MEM;
- txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+ txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
+ PL080_CONFIG_TC_IRQ_MASK |
+ tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
ret = pl08x_request_mux(plchan);
if (ret < 0) {
@@ -1813,6 +2276,11 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
/* The Nomadik variant does not have the config register */
if (pl08x->vd->nomadik)
return;
+ /* The FTDMAC020 variant does this in another register */
+ if (pl08x->vd->ftdmac020) {
+ writel(PL080_CONFIG_ENABLE, pl08x->base + FTDMAC020_CSR);
+ return;
+ }
writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
}
@@ -1925,9 +2393,16 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
chan->signal = i;
pl08x_dma_slave_init(chan);
} else {
- chan->cd = &pl08x->pd->memcpy_channel;
+ chan->cd = kzalloc(sizeof(*chan->cd), GFP_KERNEL);
+ if (!chan->cd) {
+ kfree(chan);
+ return -ENOMEM;
+ }
+ chan->cd->bus_id = "memcpy";
+ chan->cd->periph_buses = pl08x->pd->mem_buses;
chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
if (!chan->name) {
+ kfree(chan->cd);
kfree(chan);
return -ENOMEM;
}
@@ -2009,12 +2484,15 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
pl08x_state_str(chan->state));
}
- seq_printf(s, "\nPL08x virtual slave channels:\n");
- seq_printf(s, "CHANNEL:\tSTATE:\n");
- seq_printf(s, "--------\t------\n");
- list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) {
- seq_printf(s, "%s\t\t%s\n", chan->name,
- pl08x_state_str(chan->state));
+ if (pl08x->has_slave) {
+ seq_printf(s, "\nPL08x virtual slave channels:\n");
+ seq_printf(s, "CHANNEL:\tSTATE:\n");
+ seq_printf(s, "--------\t------\n");
+ list_for_each_entry(chan, &pl08x->slave.channels,
+ vc.chan.device_node) {
+ seq_printf(s, "%s\t\t%s\n", chan->name,
+ pl08x_state_str(chan->state));
+ }
}
return 0;
@@ -2052,6 +2530,10 @@ static struct dma_chan *pl08x_find_chan_id(struct pl08x_driver_data *pl08x,
{
struct pl08x_dma_chan *chan;
+ /* Trying to get a slave channel from something with no slave support */
+ if (!pl08x->has_slave)
+ return NULL;
+
list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) {
if (chan->signal == id)
return &chan->vc.chan;
@@ -2099,7 +2581,6 @@ static int pl08x_of_probe(struct amba_device *adev,
{
struct pl08x_platform_data *pd;
struct pl08x_channel_data *chanp = NULL;
- u32 cctl_memcpy = 0;
u32 val;
int ret;
int i;
@@ -2139,36 +2620,28 @@ static int pl08x_of_probe(struct amba_device *adev,
dev_err(&adev->dev, "illegal burst size for memcpy, set to 1\n");
/* Fall through */
case 1:
- cctl_memcpy |= PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT |
- PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT;
+ pd->memcpy_burst_size = PL08X_BURST_SZ_1;
break;
case 4:
- cctl_memcpy |= PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT |
- PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT;
+ pd->memcpy_burst_size = PL08X_BURST_SZ_4;
break;
case 8:
- cctl_memcpy |= PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT |
- PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT;
+ pd->memcpy_burst_size = PL08X_BURST_SZ_8;
break;
case 16:
- cctl_memcpy |= PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT |
- PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT;
+ pd->memcpy_burst_size = PL08X_BURST_SZ_16;
break;
case 32:
- cctl_memcpy |= PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT |
- PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT;
+ pd->memcpy_burst_size = PL08X_BURST_SZ_32;
break;
case 64:
- cctl_memcpy |= PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT |
- PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT;
+ pd->memcpy_burst_size = PL08X_BURST_SZ_64;
break;
case 128:
- cctl_memcpy |= PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT |
- PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT;
+ pd->memcpy_burst_size = PL08X_BURST_SZ_128;
break;
case 256:
- cctl_memcpy |= PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT |
- PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT;
+ pd->memcpy_burst_size = PL08X_BURST_SZ_256;
break;
}
@@ -2182,48 +2655,40 @@ static int pl08x_of_probe(struct amba_device *adev,
dev_err(&adev->dev, "illegal bus width for memcpy, set to 8 bits\n");
/* Fall through */
case 8:
- cctl_memcpy |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT |
- PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
+ pd->memcpy_bus_width = PL08X_BUS_WIDTH_8_BITS;
break;
case 16:
- cctl_memcpy |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT |
- PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
+ pd->memcpy_bus_width = PL08X_BUS_WIDTH_16_BITS;
break;
case 32:
- cctl_memcpy |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT |
- PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
+ pd->memcpy_bus_width = PL08X_BUS_WIDTH_32_BITS;
break;
}
- /* This is currently the only thing making sense */
- cctl_memcpy |= PL080_CONTROL_PROT_SYS;
-
- /* Set up memcpy channel */
- pd->memcpy_channel.bus_id = "memcpy";
- pd->memcpy_channel.cctl_memcpy = cctl_memcpy;
- /* Use the buses that can access memory, obviously */
- pd->memcpy_channel.periph_buses = pd->mem_buses;
-
/*
* Allocate channel data for all possible slave channels (one
* for each possible signal), channels will then be allocated
* for a device and have it's AHB interfaces set up at
* translation time.
*/
- chanp = devm_kcalloc(&adev->dev,
- pl08x->vd->signals,
- sizeof(struct pl08x_channel_data),
- GFP_KERNEL);
- if (!chanp)
- return -ENOMEM;
+ if (pl08x->vd->signals) {
+ chanp = devm_kcalloc(&adev->dev,
+ pl08x->vd->signals,
+ sizeof(struct pl08x_channel_data),
+ GFP_KERNEL);
+ if (!chanp)
+ return -ENOMEM;
- pd->slave_channels = chanp;
- for (i = 0; i < pl08x->vd->signals; i++) {
- /* chanp->periph_buses will be assigned at translation */
- chanp->bus_id = kasprintf(GFP_KERNEL, "slave%d", i);
- chanp++;
+ pd->slave_channels = chanp;
+ for (i = 0; i < pl08x->vd->signals; i++) {
+ /*
+ * chanp->periph_buses will be assigned at translation
+ */
+ chanp->bus_id = kasprintf(GFP_KERNEL, "slave%d", i);
+ chanp++;
+ }
+ pd->num_slave_channels = pl08x->vd->signals;
}
- pd->num_slave_channels = pl08x->vd->signals;
pl08x->pd = pd;
@@ -2242,7 +2707,7 @@ static inline int pl08x_of_probe(struct amba_device *adev,
static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
{
struct pl08x_driver_data *pl08x;
- const struct vendor_data *vd = id->data;
+ struct vendor_data *vd = id->data;
struct device_node *np = adev->dev.of_node;
u32 tsfr_size;
int ret = 0;
@@ -2268,6 +2733,34 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->adev = adev;
pl08x->vd = vd;
+ pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
+ if (!pl08x->base) {
+ ret = -ENOMEM;
+ goto out_no_ioremap;
+ }
+
+ if (vd->ftdmac020) {
+ u32 val;
+
+ val = readl(pl08x->base + FTDMAC020_REVISION);
+ dev_info(&pl08x->adev->dev, "FTDMAC020 %d.%d rel %d\n",
+ (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff);
+ val = readl(pl08x->base + FTDMAC020_FEATURE);
+ dev_info(&pl08x->adev->dev, "FTDMAC020 %d channels, "
+ "%s built-in bridge, %s, %s linked lists\n",
+ (val >> 12) & 0x0f,
+ (val & BIT(10)) ? "no" : "has",
+ (val & BIT(9)) ? "AHB0 and AHB1" : "AHB0",
+ (val & BIT(8)) ? "supports" : "does not support");
+
+ /* Vendor data from feature register */
+ if (!(val & BIT(8)))
+ dev_warn(&pl08x->adev->dev,
+ "linked lists not supported, required\n");
+ vd->channels = (val >> 12) & 0x0f;
+ vd->dualmaster = !!(val & BIT(9));
+ }
+
/* Initialize memcpy engine */
dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
pl08x->memcpy.dev = &adev->dev;
@@ -2284,25 +2777,38 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM);
pl08x->memcpy.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+ if (vd->ftdmac020)
+ pl08x->memcpy.copy_align = DMAENGINE_ALIGN_4_BYTES;
- /* Initialize slave engine */
- dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
- dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask);
- pl08x->slave.dev = &adev->dev;
- pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
- pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
- pl08x->slave.device_tx_status = pl08x_dma_tx_status;
- pl08x->slave.device_issue_pending = pl08x_issue_pending;
- pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
- pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic;
- pl08x->slave.device_config = pl08x_config;
- pl08x->slave.device_pause = pl08x_pause;
- pl08x->slave.device_resume = pl08x_resume;
- pl08x->slave.device_terminate_all = pl08x_terminate_all;
- pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS;
- pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
- pl08x->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
- pl08x->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+
+ /*
+ * Initialize slave engine, if the block has no signals, that means
+ * we have no slave support.
+ */
+ if (vd->signals) {
+ pl08x->has_slave = true;
+ dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
+ dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask);
+ pl08x->slave.dev = &adev->dev;
+ pl08x->slave.device_free_chan_resources =
+ pl08x_free_chan_resources;
+ pl08x->slave.device_prep_dma_interrupt =
+ pl08x_prep_dma_interrupt;
+ pl08x->slave.device_tx_status = pl08x_dma_tx_status;
+ pl08x->slave.device_issue_pending = pl08x_issue_pending;
+ pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
+ pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic;
+ pl08x->slave.device_config = pl08x_config;
+ pl08x->slave.device_pause = pl08x_pause;
+ pl08x->slave.device_resume = pl08x_resume;
+ pl08x->slave.device_terminate_all = pl08x_terminate_all;
+ pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS;
+ pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
+ pl08x->slave.directions =
+ BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ pl08x->slave.residue_granularity =
+ DMA_RESIDUE_GRANULARITY_SEGMENT;
+ }
/* Get the platform data */
pl08x->pd = dev_get_platdata(&adev->dev);
@@ -2344,19 +2850,18 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
goto out_no_lli_pool;
}
- pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
- if (!pl08x->base) {
- ret = -ENOMEM;
- goto out_no_ioremap;
- }
-
/* Turn on the PL08x */
pl08x_ensure_on(pl08x);
- /* Attach the interrupt handler */
- writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
+ /* Clear any pending interrupts */
+ if (vd->ftdmac020)
+ /* This variant has error IRQs in bits 16-19 */
+ writel(0x0000FFFF, pl08x->base + PL080_ERR_CLEAR);
+ else
+ writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
+ /* Attach the interrupt handler */
ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x);
if (ret) {
dev_err(&adev->dev, "%s failed to request interrupt %d\n",
@@ -2377,7 +2882,25 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
ch->id = i;
ch->base = pl08x->base + PL080_Cx_BASE(i);
- ch->reg_config = ch->base + vd->config_offset;
+ if (vd->ftdmac020) {
+ /* FTDMA020 has a special channel busy register */
+ ch->reg_busy = ch->base + FTDMAC020_CH_BUSY;
+ ch->reg_config = ch->base + FTDMAC020_CH_CFG;
+ ch->reg_control = ch->base + FTDMAC020_CH_CSR;
+ ch->reg_src = ch->base + FTDMAC020_CH_SRC_ADDR;
+ ch->reg_dst = ch->base + FTDMAC020_CH_DST_ADDR;
+ ch->reg_lli = ch->base + FTDMAC020_CH_LLP;
+ ch->ftdmac020 = true;
+ } else {
+ ch->reg_config = ch->base + vd->config_offset;
+ ch->reg_control = ch->base + PL080_CH_CONTROL;
+ ch->reg_src = ch->base + PL080_CH_SRC_ADDR;
+ ch->reg_dst = ch->base + PL080_CH_DST_ADDR;
+ ch->reg_lli = ch->base + PL080_CH_LLI;
+ }
+ if (vd->pl080s)
+ ch->pl080s = true;
+
spin_lock_init(&ch->lock);
/*
@@ -2410,13 +2933,15 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
}
/* Register slave channels */
- ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
- pl08x->pd->num_slave_channels, true);
- if (ret < 0) {
- dev_warn(&pl08x->adev->dev,
- "%s failed to enumerate slave channels - %d\n",
- __func__, ret);
- goto out_no_slave;
+ if (pl08x->has_slave) {
+ ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
+ pl08x->pd->num_slave_channels, true);
+ if (ret < 0) {
+ dev_warn(&pl08x->adev->dev,
+ "%s failed to enumerate slave channels - %d\n",
+ __func__, ret);
+ goto out_no_slave;
+ }
}
ret = dma_async_device_register(&pl08x->memcpy);
@@ -2427,12 +2952,14 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
goto out_no_memcpy_reg;
}
- ret = dma_async_device_register(&pl08x->slave);
- if (ret) {
- dev_warn(&pl08x->adev->dev,
+ if (pl08x->has_slave) {
+ ret = dma_async_device_register(&pl08x->slave);
+ if (ret) {
+ dev_warn(&pl08x->adev->dev,
"%s failed to register slave as an async device - %d\n",
__func__, ret);
- goto out_no_slave_reg;
+ goto out_no_slave_reg;
+ }
}
amba_set_drvdata(adev, pl08x);
@@ -2446,7 +2973,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
out_no_slave_reg:
dma_async_device_unregister(&pl08x->memcpy);
out_no_memcpy_reg:
- pl08x_free_virtual_channels(&pl08x->slave);
+ if (pl08x->has_slave)
+ pl08x_free_virtual_channels(&pl08x->slave);
out_no_slave:
pl08x_free_virtual_channels(&pl08x->memcpy);
out_no_memcpy:
@@ -2454,11 +2982,11 @@ out_no_memcpy:
out_no_phychans:
free_irq(adev->irq[0], pl08x);
out_no_irq:
- iounmap(pl08x->base);
-out_no_ioremap:
dma_pool_destroy(pl08x->pool);
out_no_lli_pool:
out_no_platdata:
+ iounmap(pl08x->base);
+out_no_ioremap:
kfree(pl08x);
out_no_pl08x:
amba_release_regions(adev);
@@ -2499,6 +3027,12 @@ static struct vendor_data vendor_pl081 = {
.max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
};
+static struct vendor_data vendor_ftdmac020 = {
+ .config_offset = PL080_CH_CONFIG,
+ .ftdmac020 = true,
+ .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
+};
+
static struct amba_id pl08x_ids[] = {
/* Samsung PL080S variant */
{
@@ -2524,6 +3058,12 @@ static struct amba_id pl08x_ids[] = {
.mask = 0x00ffffff,
.data = &vendor_nomadik,
},
+ /* Faraday Technology FTDMAC020 */
+ {
+ .id = 0x0003b080,
+ .mask = 0x000fffff,
+ .data = &vendor_ftdmac020,
+ },
{ 0, 0 },
};
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
new file mode 100644
index 000000000000..e41bbc7cb094
--- /dev/null
+++ b/drivers/dma/bcm-sba-raid.c
@@ -0,0 +1,1785 @@
+/*
+ * Copyright (C) 2017 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Broadcom SBA RAID Driver
+ *
+ * The Broadcom stream buffer accelerator (SBA) provides offloading
+ * capabilities for RAID operations. The SBA offload engine is accessible
+ * via Broadcom SoC specific ring manager. Two or more offload engines
+ * can share same Broadcom SoC specific ring manager due to this Broadcom
+ * SoC specific ring manager driver is implemented as a mailbox controller
+ * driver and offload engine drivers are implemented as mallbox clients.
+ *
+ * Typically, Broadcom SoC specific ring manager will implement larger
+ * number of hardware rings over one or more SBA hardware devices. By
+ * design, the internal buffer size of SBA hardware device is limited
+ * but all offload operations supported by SBA can be broken down into
+ * multiple small size requests and executed parallely on multiple SBA
+ * hardware devices for achieving high through-put.
+ *
+ * The Broadcom SBA RAID driver does not require any register programming
+ * except submitting request to SBA hardware device via mailbox channels.
+ * This driver implements a DMA device with one DMA channel using a set
+ * of mailbox channels provided by Broadcom SoC specific ring manager
+ * driver. To exploit parallelism (as described above), all DMA request
+ * coming to SBA RAID DMA channel are broken down to smaller requests
+ * and submitted to multiple mailbox channels in round-robin fashion.
+ * For having more SBA DMA channels, we can create more SBA device nodes
+ * in Broadcom SoC specific DTS based on number of hardware rings supported
+ * by Broadcom SoC ring manager.
+ */
+
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/list.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox/brcm-message.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/raid/pq.h>
+
+#include "dmaengine.h"
+
+/* SBA command related defines */
+#define SBA_TYPE_SHIFT 48
+#define SBA_TYPE_MASK GENMASK(1, 0)
+#define SBA_TYPE_A 0x0
+#define SBA_TYPE_B 0x2
+#define SBA_TYPE_C 0x3
+#define SBA_USER_DEF_SHIFT 32
+#define SBA_USER_DEF_MASK GENMASK(15, 0)
+#define SBA_R_MDATA_SHIFT 24
+#define SBA_R_MDATA_MASK GENMASK(7, 0)
+#define SBA_C_MDATA_MS_SHIFT 18
+#define SBA_C_MDATA_MS_MASK GENMASK(1, 0)
+#define SBA_INT_SHIFT 17
+#define SBA_INT_MASK BIT(0)
+#define SBA_RESP_SHIFT 16
+#define SBA_RESP_MASK BIT(0)
+#define SBA_C_MDATA_SHIFT 8
+#define SBA_C_MDATA_MASK GENMASK(7, 0)
+#define SBA_C_MDATA_BNUMx_SHIFT(__bnum) (2 * (__bnum))
+#define SBA_C_MDATA_BNUMx_MASK GENMASK(1, 0)
+#define SBA_C_MDATA_DNUM_SHIFT 5
+#define SBA_C_MDATA_DNUM_MASK GENMASK(4, 0)
+#define SBA_C_MDATA_LS(__v) ((__v) & 0xff)
+#define SBA_C_MDATA_MS(__v) (((__v) >> 8) & 0x3)
+#define SBA_CMD_SHIFT 0
+#define SBA_CMD_MASK GENMASK(3, 0)
+#define SBA_CMD_ZERO_BUFFER 0x4
+#define SBA_CMD_ZERO_ALL_BUFFERS 0x8
+#define SBA_CMD_LOAD_BUFFER 0x9
+#define SBA_CMD_XOR 0xa
+#define SBA_CMD_GALOIS_XOR 0xb
+#define SBA_CMD_WRITE_BUFFER 0xc
+#define SBA_CMD_GALOIS 0xe
+
+/* Driver helper macros */
+#define to_sba_request(tx) \
+ container_of(tx, struct sba_request, tx)
+#define to_sba_device(dchan) \
+ container_of(dchan, struct sba_device, dma_chan)
+
+enum sba_request_state {
+ SBA_REQUEST_STATE_FREE = 1,
+ SBA_REQUEST_STATE_ALLOCED = 2,
+ SBA_REQUEST_STATE_PENDING = 3,
+ SBA_REQUEST_STATE_ACTIVE = 4,
+ SBA_REQUEST_STATE_RECEIVED = 5,
+ SBA_REQUEST_STATE_COMPLETED = 6,
+ SBA_REQUEST_STATE_ABORTED = 7,
+};
+
+struct sba_request {
+ /* Global state */
+ struct list_head node;
+ struct sba_device *sba;
+ enum sba_request_state state;
+ bool fence;
+ /* Chained requests management */
+ struct sba_request *first;
+ struct list_head next;
+ unsigned int next_count;
+ atomic_t next_pending_count;
+ /* BRCM message data */
+ void *resp;
+ dma_addr_t resp_dma;
+ struct brcm_sba_command *cmds;
+ struct brcm_message msg;
+ struct dma_async_tx_descriptor tx;
+};
+
+enum sba_version {
+ SBA_VER_1 = 0,
+ SBA_VER_2
+};
+
+struct sba_device {
+ /* Underlying device */
+ struct device *dev;
+ /* DT configuration parameters */
+ enum sba_version ver;
+ /* Derived configuration parameters */
+ u32 max_req;
+ u32 hw_buf_size;
+ u32 hw_resp_size;
+ u32 max_pq_coefs;
+ u32 max_pq_srcs;
+ u32 max_cmd_per_req;
+ u32 max_xor_srcs;
+ u32 max_resp_pool_size;
+ u32 max_cmds_pool_size;
+ /* Maibox client and Mailbox channels */
+ struct mbox_client client;
+ int mchans_count;
+ atomic_t mchans_current;
+ struct mbox_chan **mchans;
+ struct device *mbox_dev;
+ /* DMA device and DMA channel */
+ struct dma_device dma_dev;
+ struct dma_chan dma_chan;
+ /* DMA channel resources */
+ void *resp_base;
+ dma_addr_t resp_dma_base;
+ void *cmds_base;
+ dma_addr_t cmds_dma_base;
+ spinlock_t reqs_lock;
+ struct sba_request *reqs;
+ bool reqs_fence;
+ struct list_head reqs_alloc_list;
+ struct list_head reqs_pending_list;
+ struct list_head reqs_active_list;
+ struct list_head reqs_received_list;
+ struct list_head reqs_completed_list;
+ struct list_head reqs_aborted_list;
+ struct list_head reqs_free_list;
+ int reqs_free_count;
+};
+
+/* ====== SBA command helper routines ===== */
+
+static inline u64 __pure sba_cmd_enc(u64 cmd, u32 val, u32 shift, u32 mask)
+{
+ cmd &= ~((u64)mask << shift);
+ cmd |= ((u64)(val & mask) << shift);
+ return cmd;
+}
+
+static inline u32 __pure sba_cmd_load_c_mdata(u32 b0)
+{
+ return b0 & SBA_C_MDATA_BNUMx_MASK;
+}
+
+static inline u32 __pure sba_cmd_write_c_mdata(u32 b0)
+{
+ return b0 & SBA_C_MDATA_BNUMx_MASK;
+}
+
+static inline u32 __pure sba_cmd_xor_c_mdata(u32 b1, u32 b0)
+{
+ return (b0 & SBA_C_MDATA_BNUMx_MASK) |
+ ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1));
+}
+
+static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0)
+{
+ return (b0 & SBA_C_MDATA_BNUMx_MASK) |
+ ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1)) |
+ ((d & SBA_C_MDATA_DNUM_MASK) << SBA_C_MDATA_DNUM_SHIFT);
+}
+
+/* ====== Channel resource management routines ===== */
+
+static struct sba_request *sba_alloc_request(struct sba_device *sba)
+{
+ unsigned long flags;
+ struct sba_request *req = NULL;
+
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+
+ req = list_first_entry_or_null(&sba->reqs_free_list,
+ struct sba_request, node);
+ if (req) {
+ list_move_tail(&req->node, &sba->reqs_alloc_list);
+ req->state = SBA_REQUEST_STATE_ALLOCED;
+ req->fence = false;
+ req->first = req;
+ INIT_LIST_HEAD(&req->next);
+ req->next_count = 1;
+ atomic_set(&req->next_pending_count, 1);
+
+ sba->reqs_free_count--;
+
+ dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
+ }
+
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
+
+ return req;
+}
+
+/* Note: Must be called with sba->reqs_lock held */
+static void _sba_pending_request(struct sba_device *sba,
+ struct sba_request *req)
+{
+ lockdep_assert_held(&sba->reqs_lock);
+ req->state = SBA_REQUEST_STATE_PENDING;
+ list_move_tail(&req->node, &sba->reqs_pending_list);
+ if (list_empty(&sba->reqs_active_list))
+ sba->reqs_fence = false;
+}
+
+/* Note: Must be called with sba->reqs_lock held */
+static bool _sba_active_request(struct sba_device *sba,
+ struct sba_request *req)
+{
+ lockdep_assert_held(&sba->reqs_lock);
+ if (list_empty(&sba->reqs_active_list))
+ sba->reqs_fence = false;
+ if (sba->reqs_fence)
+ return false;
+ req->state = SBA_REQUEST_STATE_ACTIVE;
+ list_move_tail(&req->node, &sba->reqs_active_list);
+ if (req->fence)
+ sba->reqs_fence = true;
+ return true;
+}
+
+/* Note: Must be called with sba->reqs_lock held */
+static void _sba_abort_request(struct sba_device *sba,
+ struct sba_request *req)
+{
+ lockdep_assert_held(&sba->reqs_lock);
+ req->state = SBA_REQUEST_STATE_ABORTED;
+ list_move_tail(&req->node, &sba->reqs_aborted_list);
+ if (list_empty(&sba->reqs_active_list))
+ sba->reqs_fence = false;
+}
+
+/* Note: Must be called with sba->reqs_lock held */
+static void _sba_free_request(struct sba_device *sba,
+ struct sba_request *req)
+{
+ lockdep_assert_held(&sba->reqs_lock);
+ req->state = SBA_REQUEST_STATE_FREE;
+ list_move_tail(&req->node, &sba->reqs_free_list);
+ if (list_empty(&sba->reqs_active_list))
+ sba->reqs_fence = false;
+ sba->reqs_free_count++;
+}
+
+static void sba_received_request(struct sba_request *req)
+{
+ unsigned long flags;
+ struct sba_device *sba = req->sba;
+
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+ req->state = SBA_REQUEST_STATE_RECEIVED;
+ list_move_tail(&req->node, &sba->reqs_received_list);
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
+}
+
+static void sba_complete_chained_requests(struct sba_request *req)
+{
+ unsigned long flags;
+ struct sba_request *nreq;
+ struct sba_device *sba = req->sba;
+
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+
+ req->state = SBA_REQUEST_STATE_COMPLETED;
+ list_move_tail(&req->node, &sba->reqs_completed_list);
+ list_for_each_entry(nreq, &req->next, next) {
+ nreq->state = SBA_REQUEST_STATE_COMPLETED;
+ list_move_tail(&nreq->node, &sba->reqs_completed_list);
+ }
+ if (list_empty(&sba->reqs_active_list))
+ sba->reqs_fence = false;
+
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
+}
+
+static void sba_free_chained_requests(struct sba_request *req)
+{
+ unsigned long flags;
+ struct sba_request *nreq;
+ struct sba_device *sba = req->sba;
+
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+
+ _sba_free_request(sba, req);
+ list_for_each_entry(nreq, &req->next, next)
+ _sba_free_request(sba, nreq);
+
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
+}
+
+static void sba_chain_request(struct sba_request *first,
+ struct sba_request *req)
+{
+ unsigned long flags;
+ struct sba_device *sba = req->sba;
+
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+
+ list_add_tail(&req->next, &first->next);
+ req->first = first;
+ first->next_count++;
+ atomic_set(&first->next_pending_count, first->next_count);
+
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
+}
+
+static void sba_cleanup_nonpending_requests(struct sba_device *sba)
+{
+ unsigned long flags;
+ struct sba_request *req, *req1;
+
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+
+ /* Freeup all alloced request */
+ list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node)
+ _sba_free_request(sba, req);
+
+ /* Freeup all received request */
+ list_for_each_entry_safe(req, req1, &sba->reqs_received_list, node)
+ _sba_free_request(sba, req);
+
+ /* Freeup all completed request */
+ list_for_each_entry_safe(req, req1, &sba->reqs_completed_list, node)
+ _sba_free_request(sba, req);
+
+ /* Set all active requests as aborted */
+ list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node)
+ _sba_abort_request(sba, req);
+
+ /*
+ * Note: We expect that aborted request will be eventually
+ * freed by sba_receive_message()
+ */
+
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
+}
+
+static void sba_cleanup_pending_requests(struct sba_device *sba)
+{
+ unsigned long flags;
+ struct sba_request *req, *req1;
+
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+
+ /* Freeup all pending request */
+ list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node)
+ _sba_free_request(sba, req);
+
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
+}
+
+/* ====== DMAENGINE callbacks ===== */
+
+static void sba_free_chan_resources(struct dma_chan *dchan)
+{
+ /*
+ * Channel resources are pre-alloced so we just free-up
+ * whatever we can so that we can re-use pre-alloced
+ * channel resources next time.
+ */
+ sba_cleanup_nonpending_requests(to_sba_device(dchan));
+}
+
+static int sba_device_terminate_all(struct dma_chan *dchan)
+{
+ /* Cleanup all pending requests */
+ sba_cleanup_pending_requests(to_sba_device(dchan));
+
+ return 0;
+}
+
+static int sba_send_mbox_request(struct sba_device *sba,
+ struct sba_request *req)
+{
+ int mchans_idx, ret = 0;
+
+ /* Select mailbox channel in round-robin fashion */
+ mchans_idx = atomic_inc_return(&sba->mchans_current);
+ mchans_idx = mchans_idx % sba->mchans_count;
+
+ /* Send message for the request */
+ req->msg.error = 0;
+ ret = mbox_send_message(sba->mchans[mchans_idx], &req->msg);
+ if (ret < 0) {
+ dev_err(sba->dev, "send message failed with error %d", ret);
+ return ret;
+ }
+ ret = req->msg.error;
+ if (ret < 0) {
+ dev_err(sba->dev, "message error %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sba_issue_pending(struct dma_chan *dchan)
+{
+ int ret;
+ unsigned long flags;
+ struct sba_request *req, *req1;
+ struct sba_device *sba = to_sba_device(dchan);
+
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+
+ /* Process all pending request */
+ list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node) {
+ /* Try to make request active */
+ if (!_sba_active_request(sba, req))
+ break;
+
+ /* Send request to mailbox channel */
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
+ ret = sba_send_mbox_request(sba, req);
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+
+ /* If something went wrong then keep request pending */
+ if (ret < 0) {
+ _sba_pending_request(sba, req);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
+}
+
+static dma_cookie_t sba_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ unsigned long flags;
+ dma_cookie_t cookie;
+ struct sba_device *sba;
+ struct sba_request *req, *nreq;
+
+ if (unlikely(!tx))
+ return -EINVAL;
+
+ sba = to_sba_device(tx->chan);
+ req = to_sba_request(tx);
+
+ /* Assign cookie and mark all chained requests pending */
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+ cookie = dma_cookie_assign(tx);
+ _sba_pending_request(sba, req);
+ list_for_each_entry(nreq, &req->next, next)
+ _sba_pending_request(sba, nreq);
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
+
+ return cookie;
+}
+
+static enum dma_status sba_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ int mchan_idx;
+ enum dma_status ret;
+ struct sba_device *sba = to_sba_device(dchan);
+
+ for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++)
+ mbox_client_peek_data(sba->mchans[mchan_idx]);
+
+ ret = dma_cookie_status(dchan, cookie, txstate);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ return dma_cookie_status(dchan, cookie, txstate);
+}
+
+static void sba_fillup_interrupt_msg(struct sba_request *req,
+ struct brcm_sba_command *cmds,
+ struct brcm_message *msg)
+{
+ u64 cmd;
+ u32 c_mdata;
+ struct brcm_sba_command *cmdsp = cmds;
+
+ /* Type-B command to load dummy data into buf0 */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ c_mdata = sba_cmd_load_c_mdata(0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
+ cmdsp->data = req->resp_dma;
+ cmdsp->data_len = req->sba->hw_resp_size;
+ cmdsp++;
+
+ /* Type-A command to write buf0 to dummy location */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ cmd = sba_cmd_enc(cmd, 0x1,
+ SBA_RESP_SHIFT, SBA_RESP_MASK);
+ c_mdata = sba_cmd_write_c_mdata(0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
+ if (req->sba->hw_resp_size) {
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
+ cmdsp->resp = req->resp_dma;
+ cmdsp->resp_len = req->sba->hw_resp_size;
+ }
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
+ cmdsp->data = req->resp_dma;
+ cmdsp->data_len = req->sba->hw_resp_size;
+ cmdsp++;
+
+ /* Fillup brcm_message */
+ msg->type = BRCM_MESSAGE_SBA;
+ msg->sba.cmds = cmds;
+ msg->sba.cmds_count = cmdsp - cmds;
+ msg->ctx = req;
+ msg->error = 0;
+}
+
+static struct dma_async_tx_descriptor *
+sba_prep_dma_interrupt(struct dma_chan *dchan, unsigned long flags)
+{
+ struct sba_request *req = NULL;
+ struct sba_device *sba = to_sba_device(dchan);
+
+ /* Alloc new request */
+ req = sba_alloc_request(sba);
+ if (!req)
+ return NULL;
+
+ /*
+ * Force fence so that no requests are submitted
+ * until DMA callback for this request is invoked.
+ */
+ req->fence = true;
+
+ /* Fillup request message */
+ sba_fillup_interrupt_msg(req, req->cmds, &req->msg);
+
+ /* Init async_tx descriptor */
+ req->tx.flags = flags;
+ req->tx.cookie = -EBUSY;
+
+ return &req->tx;
+}
+
+static void sba_fillup_memcpy_msg(struct sba_request *req,
+ struct brcm_sba_command *cmds,
+ struct brcm_message *msg,
+ dma_addr_t msg_offset, size_t msg_len,
+ dma_addr_t dst, dma_addr_t src)
+{
+ u64 cmd;
+ u32 c_mdata;
+ struct brcm_sba_command *cmdsp = cmds;
+
+ /* Type-B command to load data into buf0 */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ c_mdata = sba_cmd_load_c_mdata(0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
+ cmdsp->data = src + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+
+ /* Type-A command to write buf0 */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ cmd = sba_cmd_enc(cmd, 0x1,
+ SBA_RESP_SHIFT, SBA_RESP_MASK);
+ c_mdata = sba_cmd_write_c_mdata(0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
+ if (req->sba->hw_resp_size) {
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
+ cmdsp->resp = req->resp_dma;
+ cmdsp->resp_len = req->sba->hw_resp_size;
+ }
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
+ cmdsp->data = dst + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+
+ /* Fillup brcm_message */
+ msg->type = BRCM_MESSAGE_SBA;
+ msg->sba.cmds = cmds;
+ msg->sba.cmds_count = cmdsp - cmds;
+ msg->ctx = req;
+ msg->error = 0;
+}
+
+static struct sba_request *
+sba_prep_dma_memcpy_req(struct sba_device *sba,
+ dma_addr_t off, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct sba_request *req = NULL;
+
+ /* Alloc new request */
+ req = sba_alloc_request(sba);
+ if (!req)
+ return NULL;
+ req->fence = (flags & DMA_PREP_FENCE) ? true : false;
+
+ /* Fillup request message */
+ sba_fillup_memcpy_msg(req, req->cmds, &req->msg,
+ off, len, dst, src);
+
+ /* Init async_tx descriptor */
+ req->tx.flags = flags;
+ req->tx.cookie = -EBUSY;
+
+ return req;
+}
+
+static struct dma_async_tx_descriptor *
+sba_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ size_t req_len;
+ dma_addr_t off = 0;
+ struct sba_device *sba = to_sba_device(dchan);
+ struct sba_request *first = NULL, *req;
+
+ /* Create chained requests where each request is upto hw_buf_size */
+ while (len) {
+ req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
+
+ req = sba_prep_dma_memcpy_req(sba, off, dst, src,
+ req_len, flags);
+ if (!req) {
+ if (first)
+ sba_free_chained_requests(first);
+ return NULL;
+ }
+
+ if (first)
+ sba_chain_request(first, req);
+ else
+ first = req;
+
+ off += req_len;
+ len -= req_len;
+ }
+
+ return (first) ? &first->tx : NULL;
+}
+
+static void sba_fillup_xor_msg(struct sba_request *req,
+ struct brcm_sba_command *cmds,
+ struct brcm_message *msg,
+ dma_addr_t msg_offset, size_t msg_len,
+ dma_addr_t dst, dma_addr_t *src, u32 src_cnt)
+{
+ u64 cmd;
+ u32 c_mdata;
+ unsigned int i;
+ struct brcm_sba_command *cmdsp = cmds;
+
+ /* Type-B command to load data into buf0 */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ c_mdata = sba_cmd_load_c_mdata(0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
+ cmdsp->data = src[0] + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+
+ /* Type-B commands to xor data with buf0 and put it back in buf0 */
+ for (i = 1; i < src_cnt; i++) {
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ c_mdata = sba_cmd_xor_c_mdata(0, 0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
+ cmdsp->data = src[i] + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+ }
+
+ /* Type-A command to write buf0 */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ cmd = sba_cmd_enc(cmd, 0x1,
+ SBA_RESP_SHIFT, SBA_RESP_MASK);
+ c_mdata = sba_cmd_write_c_mdata(0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
+ if (req->sba->hw_resp_size) {
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
+ cmdsp->resp = req->resp_dma;
+ cmdsp->resp_len = req->sba->hw_resp_size;
+ }
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
+ cmdsp->data = dst + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+
+ /* Fillup brcm_message */
+ msg->type = BRCM_MESSAGE_SBA;
+ msg->sba.cmds = cmds;
+ msg->sba.cmds_count = cmdsp - cmds;
+ msg->ctx = req;
+ msg->error = 0;
+}
+
+struct sba_request *
+sba_prep_dma_xor_req(struct sba_device *sba,
+ dma_addr_t off, dma_addr_t dst, dma_addr_t *src,
+ u32 src_cnt, size_t len, unsigned long flags)
+{
+ struct sba_request *req = NULL;
+
+ /* Alloc new request */
+ req = sba_alloc_request(sba);
+ if (!req)
+ return NULL;
+ req->fence = (flags & DMA_PREP_FENCE) ? true : false;
+
+ /* Fillup request message */
+ sba_fillup_xor_msg(req, req->cmds, &req->msg,
+ off, len, dst, src, src_cnt);
+
+ /* Init async_tx descriptor */
+ req->tx.flags = flags;
+ req->tx.cookie = -EBUSY;
+
+ return req;
+}
+
+static struct dma_async_tx_descriptor *
+sba_prep_dma_xor(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src,
+ u32 src_cnt, size_t len, unsigned long flags)
+{
+ size_t req_len;
+ dma_addr_t off = 0;
+ struct sba_device *sba = to_sba_device(dchan);
+ struct sba_request *first = NULL, *req;
+
+ /* Sanity checks */
+ if (unlikely(src_cnt > sba->max_xor_srcs))
+ return NULL;
+
+ /* Create chained requests where each request is upto hw_buf_size */
+ while (len) {
+ req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
+
+ req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt,
+ req_len, flags);
+ if (!req) {
+ if (first)
+ sba_free_chained_requests(first);
+ return NULL;
+ }
+
+ if (first)
+ sba_chain_request(first, req);
+ else
+ first = req;
+
+ off += req_len;
+ len -= req_len;
+ }
+
+ return (first) ? &first->tx : NULL;
+}
+
+static void sba_fillup_pq_msg(struct sba_request *req,
+ bool pq_continue,
+ struct brcm_sba_command *cmds,
+ struct brcm_message *msg,
+ dma_addr_t msg_offset, size_t msg_len,
+ dma_addr_t *dst_p, dma_addr_t *dst_q,
+ const u8 *scf, dma_addr_t *src, u32 src_cnt)
+{
+ u64 cmd;
+ u32 c_mdata;
+ unsigned int i;
+ struct brcm_sba_command *cmdsp = cmds;
+
+ if (pq_continue) {
+ /* Type-B command to load old P into buf0 */
+ if (dst_p) {
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ c_mdata = sba_cmd_load_c_mdata(0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
+ cmdsp->data = *dst_p + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+ }
+
+ /* Type-B command to load old Q into buf1 */
+ if (dst_q) {
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ c_mdata = sba_cmd_load_c_mdata(1);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
+ cmdsp->data = *dst_q + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+ }
+ } else {
+ /* Type-A command to zero all buffers */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
+ cmdsp++;
+ }
+
+ /* Type-B commands for generate P onto buf0 and Q onto buf1 */
+ for (i = 0; i < src_cnt; i++) {
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ c_mdata = sba_cmd_pq_c_mdata(raid6_gflog[scf[i]], 1, 0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
+ SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS_XOR,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
+ cmdsp->data = src[i] + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+ }
+
+ /* Type-A command to write buf0 */
+ if (dst_p) {
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ cmd = sba_cmd_enc(cmd, 0x1,
+ SBA_RESP_SHIFT, SBA_RESP_MASK);
+ c_mdata = sba_cmd_write_c_mdata(0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
+ if (req->sba->hw_resp_size) {
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
+ cmdsp->resp = req->resp_dma;
+ cmdsp->resp_len = req->sba->hw_resp_size;
+ }
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
+ cmdsp->data = *dst_p + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+ }
+
+ /* Type-A command to write buf1 */
+ if (dst_q) {
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ cmd = sba_cmd_enc(cmd, 0x1,
+ SBA_RESP_SHIFT, SBA_RESP_MASK);
+ c_mdata = sba_cmd_write_c_mdata(1);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
+ if (req->sba->hw_resp_size) {
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
+ cmdsp->resp = req->resp_dma;
+ cmdsp->resp_len = req->sba->hw_resp_size;
+ }
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
+ cmdsp->data = *dst_q + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+ }
+
+ /* Fillup brcm_message */
+ msg->type = BRCM_MESSAGE_SBA;
+ msg->sba.cmds = cmds;
+ msg->sba.cmds_count = cmdsp - cmds;
+ msg->ctx = req;
+ msg->error = 0;
+}
+
+struct sba_request *
+sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off,
+ dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src,
+ u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
+{
+ struct sba_request *req = NULL;
+
+ /* Alloc new request */
+ req = sba_alloc_request(sba);
+ if (!req)
+ return NULL;
+ req->fence = (flags & DMA_PREP_FENCE) ? true : false;
+
+ /* Fillup request messages */
+ sba_fillup_pq_msg(req, dmaf_continue(flags),
+ req->cmds, &req->msg,
+ off, len, dst_p, dst_q, scf, src, src_cnt);
+
+ /* Init async_tx descriptor */
+ req->tx.flags = flags;
+ req->tx.cookie = -EBUSY;
+
+ return req;
+}
+
+static void sba_fillup_pq_single_msg(struct sba_request *req,
+ bool pq_continue,
+ struct brcm_sba_command *cmds,
+ struct brcm_message *msg,
+ dma_addr_t msg_offset, size_t msg_len,
+ dma_addr_t *dst_p, dma_addr_t *dst_q,
+ dma_addr_t src, u8 scf)
+{
+ u64 cmd;
+ u32 c_mdata;
+ u8 pos, dpos = raid6_gflog[scf];
+ struct brcm_sba_command *cmdsp = cmds;
+
+ if (!dst_p)
+ goto skip_p;
+
+ if (pq_continue) {
+ /* Type-B command to load old P into buf0 */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ c_mdata = sba_cmd_load_c_mdata(0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
+ cmdsp->data = *dst_p + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+
+ /*
+ * Type-B commands to xor data with buf0 and put it
+ * back in buf0
+ */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ c_mdata = sba_cmd_xor_c_mdata(0, 0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
+ cmdsp->data = src + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+ } else {
+ /* Type-B command to load old P into buf0 */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ c_mdata = sba_cmd_load_c_mdata(0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
+ cmdsp->data = src + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+ }
+
+ /* Type-A command to write buf0 */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ cmd = sba_cmd_enc(cmd, 0x1,
+ SBA_RESP_SHIFT, SBA_RESP_MASK);
+ c_mdata = sba_cmd_write_c_mdata(0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
+ if (req->sba->hw_resp_size) {
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
+ cmdsp->resp = req->resp_dma;
+ cmdsp->resp_len = req->sba->hw_resp_size;
+ }
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
+ cmdsp->data = *dst_p + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+
+skip_p:
+ if (!dst_q)
+ goto skip_q;
+
+ /* Type-A command to zero all buffers */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
+ cmdsp++;
+
+ if (dpos == 255)
+ goto skip_q_computation;
+ pos = (dpos < req->sba->max_pq_coefs) ?
+ dpos : (req->sba->max_pq_coefs - 1);
+
+ /*
+ * Type-B command to generate initial Q from data
+ * and store output into buf0
+ */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ c_mdata = sba_cmd_pq_c_mdata(pos, 0, 0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
+ SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
+ cmdsp->data = src + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+
+ dpos -= pos;
+
+ /* Multiple Type-A command to generate final Q */
+ while (dpos) {
+ pos = (dpos < req->sba->max_pq_coefs) ?
+ dpos : (req->sba->max_pq_coefs - 1);
+
+ /*
+ * Type-A command to generate Q with buf0 and
+ * buf1 store result in buf0
+ */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ c_mdata = sba_cmd_pq_c_mdata(pos, 0, 1);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
+ SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
+ cmdsp++;
+
+ dpos -= pos;
+ }
+
+skip_q_computation:
+ if (pq_continue) {
+ /*
+ * Type-B command to XOR previous output with
+ * buf0 and write it into buf0
+ */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ c_mdata = sba_cmd_xor_c_mdata(0, 0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
+ cmdsp->data = *dst_q + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+ }
+
+ /* Type-A command to write buf0 */
+ cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
+ SBA_TYPE_SHIFT, SBA_TYPE_MASK);
+ cmd = sba_cmd_enc(cmd, msg_len,
+ SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
+ cmd = sba_cmd_enc(cmd, 0x1,
+ SBA_RESP_SHIFT, SBA_RESP_MASK);
+ c_mdata = sba_cmd_write_c_mdata(0);
+ cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
+ SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
+ cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
+ SBA_CMD_SHIFT, SBA_CMD_MASK);
+ cmdsp->cmd = cmd;
+ *cmdsp->cmd_dma = cpu_to_le64(cmd);
+ cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
+ if (req->sba->hw_resp_size) {
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
+ cmdsp->resp = req->resp_dma;
+ cmdsp->resp_len = req->sba->hw_resp_size;
+ }
+ cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
+ cmdsp->data = *dst_q + msg_offset;
+ cmdsp->data_len = msg_len;
+ cmdsp++;
+
+skip_q:
+ /* Fillup brcm_message */
+ msg->type = BRCM_MESSAGE_SBA;
+ msg->sba.cmds = cmds;
+ msg->sba.cmds_count = cmdsp - cmds;
+ msg->ctx = req;
+ msg->error = 0;
+}
+
+struct sba_request *
+sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off,
+ dma_addr_t *dst_p, dma_addr_t *dst_q,
+ dma_addr_t src, u8 scf, size_t len,
+ unsigned long flags)
+{
+ struct sba_request *req = NULL;
+
+ /* Alloc new request */
+ req = sba_alloc_request(sba);
+ if (!req)
+ return NULL;
+ req->fence = (flags & DMA_PREP_FENCE) ? true : false;
+
+ /* Fillup request messages */
+ sba_fillup_pq_single_msg(req, dmaf_continue(flags),
+ req->cmds, &req->msg, off, len,
+ dst_p, dst_q, src, scf);
+
+ /* Init async_tx descriptor */
+ req->tx.flags = flags;
+ req->tx.cookie = -EBUSY;
+
+ return req;
+}
+
+static struct dma_async_tx_descriptor *
+sba_prep_dma_pq(struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src,
+ u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
+{
+ u32 i, dst_q_index;
+ size_t req_len;
+ bool slow = false;
+ dma_addr_t off = 0;
+ dma_addr_t *dst_p = NULL, *dst_q = NULL;
+ struct sba_device *sba = to_sba_device(dchan);
+ struct sba_request *first = NULL, *req;
+
+ /* Sanity checks */
+ if (unlikely(src_cnt > sba->max_pq_srcs))
+ return NULL;
+ for (i = 0; i < src_cnt; i++)
+ if (sba->max_pq_coefs <= raid6_gflog[scf[i]])
+ slow = true;
+
+ /* Figure-out P and Q destination addresses */
+ if (!(flags & DMA_PREP_PQ_DISABLE_P))
+ dst_p = &dst[0];
+ if (!(flags & DMA_PREP_PQ_DISABLE_Q))
+ dst_q = &dst[1];
+
+ /* Create chained requests where each request is upto hw_buf_size */
+ while (len) {
+ req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
+
+ if (slow) {
+ dst_q_index = src_cnt;
+
+ if (dst_q) {
+ for (i = 0; i < src_cnt; i++) {
+ if (*dst_q == src[i]) {
+ dst_q_index = i;
+ break;
+ }
+ }
+ }
+
+ if (dst_q_index < src_cnt) {
+ i = dst_q_index;
+ req = sba_prep_dma_pq_single_req(sba,
+ off, dst_p, dst_q, src[i], scf[i],
+ req_len, flags | DMA_PREP_FENCE);
+ if (!req)
+ goto fail;
+
+ if (first)
+ sba_chain_request(first, req);
+ else
+ first = req;
+
+ flags |= DMA_PREP_CONTINUE;
+ }
+
+ for (i = 0; i < src_cnt; i++) {
+ if (dst_q_index == i)
+ continue;
+
+ req = sba_prep_dma_pq_single_req(sba,
+ off, dst_p, dst_q, src[i], scf[i],
+ req_len, flags | DMA_PREP_FENCE);
+ if (!req)
+ goto fail;
+
+ if (first)
+ sba_chain_request(first, req);
+ else
+ first = req;
+
+ flags |= DMA_PREP_CONTINUE;
+ }
+ } else {
+ req = sba_prep_dma_pq_req(sba, off,
+ dst_p, dst_q, src, src_cnt,
+ scf, req_len, flags);
+ if (!req)
+ goto fail;
+
+ if (first)
+ sba_chain_request(first, req);
+ else
+ first = req;
+ }
+
+ off += req_len;
+ len -= req_len;
+ }
+
+ return (first) ? &first->tx : NULL;
+
+fail:
+ if (first)
+ sba_free_chained_requests(first);
+ return NULL;
+}
+
+/* ====== Mailbox callbacks ===== */
+
+static void sba_dma_tx_actions(struct sba_request *req)
+{
+ struct dma_async_tx_descriptor *tx = &req->tx;
+
+ WARN_ON(tx->cookie < 0);
+
+ if (tx->cookie > 0) {
+ dma_cookie_complete(tx);
+
+ /*
+ * Call the callback (must not sleep or submit new
+ * operations to this channel)
+ */
+ if (tx->callback)
+ tx->callback(tx->callback_param);
+
+ dma_descriptor_unmap(tx);
+ }
+
+ /* Run dependent operations */
+ dma_run_dependencies(tx);
+
+ /* If waiting for 'ack' then move to completed list */
+ if (!async_tx_test_ack(&req->tx))
+ sba_complete_chained_requests(req);
+ else
+ sba_free_chained_requests(req);
+}
+
+static void sba_receive_message(struct mbox_client *cl, void *msg)
+{
+ unsigned long flags;
+ struct brcm_message *m = msg;
+ struct sba_request *req = m->ctx, *req1;
+ struct sba_device *sba = req->sba;
+
+ /* Error count if message has error */
+ if (m->error < 0)
+ dev_err(sba->dev, "%s got message with error %d",
+ dma_chan_name(&sba->dma_chan), m->error);
+
+ /* Mark request as received */
+ sba_received_request(req);
+
+ /* Wait for all chained requests to be completed */
+ if (atomic_dec_return(&req->first->next_pending_count))
+ goto done;
+
+ /* Point to first request */
+ req = req->first;
+
+ /* Update request */
+ if (req->state == SBA_REQUEST_STATE_RECEIVED)
+ sba_dma_tx_actions(req);
+ else
+ sba_free_chained_requests(req);
+
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+
+ /* Re-check all completed request waiting for 'ack' */
+ list_for_each_entry_safe(req, req1, &sba->reqs_completed_list, node) {
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
+ sba_dma_tx_actions(req);
+ spin_lock_irqsave(&sba->reqs_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&sba->reqs_lock, flags);
+
+done:
+ /* Try to submit pending request */
+ sba_issue_pending(&sba->dma_chan);
+}
+
+/* ====== Platform driver routines ===== */
+
+static int sba_prealloc_channel_resources(struct sba_device *sba)
+{
+ int i, j, p, ret = 0;
+ struct sba_request *req = NULL;
+
+ sba->resp_base = dma_alloc_coherent(sba->dma_dev.dev,
+ sba->max_resp_pool_size,
+ &sba->resp_dma_base, GFP_KERNEL);
+ if (!sba->resp_base)
+ return -ENOMEM;
+
+ sba->cmds_base = dma_alloc_coherent(sba->dma_dev.dev,
+ sba->max_cmds_pool_size,
+ &sba->cmds_dma_base, GFP_KERNEL);
+ if (!sba->cmds_base) {
+ ret = -ENOMEM;
+ goto fail_free_resp_pool;
+ }
+
+ spin_lock_init(&sba->reqs_lock);
+ sba->reqs_fence = false;
+ INIT_LIST_HEAD(&sba->reqs_alloc_list);
+ INIT_LIST_HEAD(&sba->reqs_pending_list);
+ INIT_LIST_HEAD(&sba->reqs_active_list);
+ INIT_LIST_HEAD(&sba->reqs_received_list);
+ INIT_LIST_HEAD(&sba->reqs_completed_list);
+ INIT_LIST_HEAD(&sba->reqs_aborted_list);
+ INIT_LIST_HEAD(&sba->reqs_free_list);
+
+ sba->reqs = devm_kcalloc(sba->dev, sba->max_req,
+ sizeof(*req), GFP_KERNEL);
+ if (!sba->reqs) {
+ ret = -ENOMEM;
+ goto fail_free_cmds_pool;
+ }
+
+ for (i = 0, p = 0; i < sba->max_req; i++) {
+ req = &sba->reqs[i];
+ INIT_LIST_HEAD(&req->node);
+ req->sba = sba;
+ req->state = SBA_REQUEST_STATE_FREE;
+ INIT_LIST_HEAD(&req->next);
+ req->next_count = 1;
+ atomic_set(&req->next_pending_count, 0);
+ req->fence = false;
+ req->resp = sba->resp_base + p;
+ req->resp_dma = sba->resp_dma_base + p;
+ p += sba->hw_resp_size;
+ req->cmds = devm_kcalloc(sba->dev, sba->max_cmd_per_req,
+ sizeof(*req->cmds), GFP_KERNEL);
+ if (!req->cmds) {
+ ret = -ENOMEM;
+ goto fail_free_cmds_pool;
+ }
+ for (j = 0; j < sba->max_cmd_per_req; j++) {
+ req->cmds[j].cmd = 0;
+ req->cmds[j].cmd_dma = sba->cmds_base +
+ (i * sba->max_cmd_per_req + j) * sizeof(u64);
+ req->cmds[j].cmd_dma_addr = sba->cmds_dma_base +
+ (i * sba->max_cmd_per_req + j) * sizeof(u64);
+ req->cmds[j].flags = 0;
+ }
+ memset(&req->msg, 0, sizeof(req->msg));
+ dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
+ req->tx.tx_submit = sba_tx_submit;
+ req->tx.phys = req->resp_dma;
+ list_add_tail(&req->node, &sba->reqs_free_list);
+ }
+
+ sba->reqs_free_count = sba->max_req;
+
+ return 0;
+
+fail_free_cmds_pool:
+ dma_free_coherent(sba->dma_dev.dev,
+ sba->max_cmds_pool_size,
+ sba->cmds_base, sba->cmds_dma_base);
+fail_free_resp_pool:
+ dma_free_coherent(sba->dma_dev.dev,
+ sba->max_resp_pool_size,
+ sba->resp_base, sba->resp_dma_base);
+ return ret;
+}
+
+static void sba_freeup_channel_resources(struct sba_device *sba)
+{
+ dmaengine_terminate_all(&sba->dma_chan);
+ dma_free_coherent(sba->dma_dev.dev, sba->max_cmds_pool_size,
+ sba->cmds_base, sba->cmds_dma_base);
+ dma_free_coherent(sba->dma_dev.dev, sba->max_resp_pool_size,
+ sba->resp_base, sba->resp_dma_base);
+ sba->resp_base = NULL;
+ sba->resp_dma_base = 0;
+}
+
+static int sba_async_register(struct sba_device *sba)
+{
+ int ret;
+ struct dma_device *dma_dev = &sba->dma_dev;
+
+ /* Initialize DMA channel cookie */
+ sba->dma_chan.device = dma_dev;
+ dma_cookie_init(&sba->dma_chan);
+
+ /* Initialize DMA device capability mask */
+ dma_cap_zero(dma_dev->cap_mask);
+ dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_cap_set(DMA_XOR, dma_dev->cap_mask);
+ dma_cap_set(DMA_PQ, dma_dev->cap_mask);
+
+ /*
+ * Set mailbox channel device as the base device of
+ * our dma_device because the actual memory accesses
+ * will be done by mailbox controller
+ */
+ dma_dev->dev = sba->mbox_dev;
+
+ /* Set base prep routines */
+ dma_dev->device_free_chan_resources = sba_free_chan_resources;
+ dma_dev->device_terminate_all = sba_device_terminate_all;
+ dma_dev->device_issue_pending = sba_issue_pending;
+ dma_dev->device_tx_status = sba_tx_status;
+
+ /* Set interrupt routine */
+ if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
+ dma_dev->device_prep_dma_interrupt = sba_prep_dma_interrupt;
+
+ /* Set memcpy routine */
+ if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
+ dma_dev->device_prep_dma_memcpy = sba_prep_dma_memcpy;
+
+ /* Set xor routine and capability */
+ if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
+ dma_dev->device_prep_dma_xor = sba_prep_dma_xor;
+ dma_dev->max_xor = sba->max_xor_srcs;
+ }
+
+ /* Set pq routine and capability */
+ if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
+ dma_dev->device_prep_dma_pq = sba_prep_dma_pq;
+ dma_set_maxpq(dma_dev, sba->max_pq_srcs, 0);
+ }
+
+ /* Initialize DMA device channel list */
+ INIT_LIST_HEAD(&dma_dev->channels);
+ list_add_tail(&sba->dma_chan.device_node, &dma_dev->channels);
+
+ /* Register with Linux async DMA framework*/
+ ret = dma_async_device_register(dma_dev);
+ if (ret) {
+ dev_err(sba->dev, "async device register error %d", ret);
+ return ret;
+ }
+
+ dev_info(sba->dev, "%s capabilities: %s%s%s%s\n",
+ dma_chan_name(&sba->dma_chan),
+ dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "interrupt " : "",
+ dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "memcpy " : "",
+ dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
+ dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "");
+
+ return 0;
+}
+
+static int sba_probe(struct platform_device *pdev)
+{
+ int i, ret = 0, mchans_count;
+ struct sba_device *sba;
+ struct platform_device *mbox_pdev;
+ struct of_phandle_args args;
+
+ /* Allocate main SBA struct */
+ sba = devm_kzalloc(&pdev->dev, sizeof(*sba), GFP_KERNEL);
+ if (!sba)
+ return -ENOMEM;
+
+ sba->dev = &pdev->dev;
+ platform_set_drvdata(pdev, sba);
+
+ /* Determine SBA version from DT compatible string */
+ if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba"))
+ sba->ver = SBA_VER_1;
+ else if (of_device_is_compatible(sba->dev->of_node,
+ "brcm,iproc-sba-v2"))
+ sba->ver = SBA_VER_2;
+ else
+ return -ENODEV;
+
+ /* Derived Configuration parameters */
+ switch (sba->ver) {
+ case SBA_VER_1:
+ sba->max_req = 1024;
+ sba->hw_buf_size = 4096;
+ sba->hw_resp_size = 8;
+ sba->max_pq_coefs = 6;
+ sba->max_pq_srcs = 6;
+ break;
+ case SBA_VER_2:
+ sba->max_req = 1024;
+ sba->hw_buf_size = 4096;
+ sba->hw_resp_size = 8;
+ sba->max_pq_coefs = 30;
+ /*
+ * We can support max_pq_srcs == max_pq_coefs because
+ * we are limited by number of SBA commands that we can
+ * fit in one message for underlying ring manager HW.
+ */
+ sba->max_pq_srcs = 12;
+ break;
+ default:
+ return -EINVAL;
+ }
+ sba->max_cmd_per_req = sba->max_pq_srcs + 3;
+ sba->max_xor_srcs = sba->max_cmd_per_req - 1;
+ sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size;
+ sba->max_cmds_pool_size = sba->max_req *
+ sba->max_cmd_per_req * sizeof(u64);
+
+ /* Setup mailbox client */
+ sba->client.dev = &pdev->dev;
+ sba->client.rx_callback = sba_receive_message;
+ sba->client.tx_block = false;
+ sba->client.knows_txdone = false;
+ sba->client.tx_tout = 0;
+
+ /* Number of channels equals number of mailbox channels */
+ ret = of_count_phandle_with_args(pdev->dev.of_node,
+ "mboxes", "#mbox-cells");
+ if (ret <= 0)
+ return -ENODEV;
+ mchans_count = ret;
+ sba->mchans_count = 0;
+ atomic_set(&sba->mchans_current, 0);
+
+ /* Allocate mailbox channel array */
+ sba->mchans = devm_kcalloc(&pdev->dev, sba->mchans_count,
+ sizeof(*sba->mchans), GFP_KERNEL);
+ if (!sba->mchans)
+ return -ENOMEM;
+
+ /* Request mailbox channels */
+ for (i = 0; i < mchans_count; i++) {
+ sba->mchans[i] = mbox_request_channel(&sba->client, i);
+ if (IS_ERR(sba->mchans[i])) {
+ ret = PTR_ERR(sba->mchans[i]);
+ goto fail_free_mchans;
+ }
+ sba->mchans_count++;
+ }
+
+ /* Find-out underlying mailbox device */
+ ret = of_parse_phandle_with_args(pdev->dev.of_node,
+ "mboxes", "#mbox-cells", 0, &args);
+ if (ret)
+ goto fail_free_mchans;
+ mbox_pdev = of_find_device_by_node(args.np);
+ of_node_put(args.np);
+ if (!mbox_pdev) {
+ ret = -ENODEV;
+ goto fail_free_mchans;
+ }
+ sba->mbox_dev = &mbox_pdev->dev;
+
+ /* All mailbox channels should be of same ring manager device */
+ for (i = 1; i < mchans_count; i++) {
+ ret = of_parse_phandle_with_args(pdev->dev.of_node,
+ "mboxes", "#mbox-cells", i, &args);
+ if (ret)
+ goto fail_free_mchans;
+ mbox_pdev = of_find_device_by_node(args.np);
+ of_node_put(args.np);
+ if (sba->mbox_dev != &mbox_pdev->dev) {
+ ret = -EINVAL;
+ goto fail_free_mchans;
+ }
+ }
+
+ /* Register DMA device with linux async framework */
+ ret = sba_async_register(sba);
+ if (ret)
+ goto fail_free_mchans;
+
+ /* Prealloc channel resource */
+ ret = sba_prealloc_channel_resources(sba);
+ if (ret)
+ goto fail_async_dev_unreg;
+
+ /* Print device info */
+ dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels",
+ dma_chan_name(&sba->dma_chan), sba->ver+1,
+ sba->mchans_count);
+
+ return 0;
+
+fail_async_dev_unreg:
+ dma_async_device_unregister(&sba->dma_dev);
+fail_free_mchans:
+ for (i = 0; i < sba->mchans_count; i++)
+ mbox_free_channel(sba->mchans[i]);
+ return ret;
+}
+
+static int sba_remove(struct platform_device *pdev)
+{
+ int i;
+ struct sba_device *sba = platform_get_drvdata(pdev);
+
+ sba_freeup_channel_resources(sba);
+
+ dma_async_device_unregister(&sba->dma_dev);
+
+ for (i = 0; i < sba->mchans_count; i++)
+ mbox_free_channel(sba->mchans[i]);
+
+ return 0;
+}
+
+static const struct of_device_id sba_of_match[] = {
+ { .compatible = "brcm,iproc-sba", },
+ { .compatible = "brcm,iproc-sba-v2", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sba_of_match);
+
+static struct platform_driver sba_driver = {
+ .probe = sba_probe,
+ .remove = sba_remove,
+ .driver = {
+ .name = "bcm-sba-raid",
+ .of_match_table = sba_of_match,
+ },
+};
+module_platform_driver(sba_driver);
+
+MODULE_DESCRIPTION("Broadcom SBA RAID driver");
+MODULE_AUTHOR("Anup Patel <[email protected]>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
index 5a37b9fcf40d..04b9728c1d26 100644
--- a/drivers/dma/dw/Kconfig
+++ b/drivers/dma/dw/Kconfig
@@ -6,17 +6,12 @@ config DW_DMAC_CORE
tristate
select DMA_ENGINE
-config DW_DMAC_BIG_ENDIAN_IO
- bool
-
config DW_DMAC
tristate "Synopsys DesignWare AHB DMA platform driver"
select DW_DMAC_CORE
- select DW_DMAC_BIG_ENDIAN_IO if AVR32
- default y if CPU_AT32AP7000
help
Support the Synopsys DesignWare AHB DMA controller. This
- can be integrated in chips such as the Atmel AT32ap7000.
+ can be integrated in chips such as the Intel Cherrytrail.
config DW_DMAC_PCI
tristate "Synopsys DesignWare AHB DMA PCI driver"
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index e500950dad82..f43e6dafe446 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -561,92 +561,14 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
dwc_descriptor_complete(dwc, bad_desc, true);
}
-/* --------------------- Cyclic DMA API extensions -------------------- */
-
-dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
-{
- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- return channel_readl(dwc, SAR);
-}
-EXPORT_SYMBOL(dw_dma_get_src_addr);
-
-dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
-{
- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- return channel_readl(dwc, DAR);
-}
-EXPORT_SYMBOL(dw_dma_get_dst_addr);
-
-/* Called with dwc->lock held and all DMAC interrupts disabled */
-static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
- u32 status_block, u32 status_err, u32 status_xfer)
-{
- unsigned long flags;
-
- if (status_block & dwc->mask) {
- void (*callback)(void *param);
- void *callback_param;
-
- dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
- channel_readl(dwc, LLP));
- dma_writel(dw, CLEAR.BLOCK, dwc->mask);
-
- callback = dwc->cdesc->period_callback;
- callback_param = dwc->cdesc->period_callback_param;
-
- if (callback)
- callback(callback_param);
- }
-
- /*
- * Error and transfer complete are highly unlikely, and will most
- * likely be due to a configuration error by the user.
- */
- if (unlikely(status_err & dwc->mask) ||
- unlikely(status_xfer & dwc->mask)) {
- unsigned int i;
-
- dev_err(chan2dev(&dwc->chan),
- "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n",
- status_xfer ? "xfer" : "error");
-
- spin_lock_irqsave(&dwc->lock, flags);
-
- dwc_dump_chan_regs(dwc);
-
- dwc_chan_disable(dw, dwc);
-
- /* Make sure DMA does not restart by loading a new list */
- channel_writel(dwc, LLP, 0);
- channel_writel(dwc, CTL_LO, 0);
- channel_writel(dwc, CTL_HI, 0);
-
- dma_writel(dw, CLEAR.BLOCK, dwc->mask);
- dma_writel(dw, CLEAR.ERROR, dwc->mask);
- dma_writel(dw, CLEAR.XFER, dwc->mask);
-
- for (i = 0; i < dwc->cdesc->periods; i++)
- dwc_dump_lli(dwc, dwc->cdesc->desc[i]);
-
- spin_unlock_irqrestore(&dwc->lock, flags);
- }
-
- /* Re-enable interrupts */
- channel_set_bit(dw, MASK.BLOCK, dwc->mask);
-}
-
-/* ------------------------------------------------------------------------- */
-
static void dw_dma_tasklet(unsigned long data)
{
struct dw_dma *dw = (struct dw_dma *)data;
struct dw_dma_chan *dwc;
- u32 status_block;
u32 status_xfer;
u32 status_err;
unsigned int i;
- status_block = dma_readl(dw, RAW.BLOCK);
status_xfer = dma_readl(dw, RAW.XFER);
status_err = dma_readl(dw, RAW.ERROR);
@@ -655,8 +577,7 @@ static void dw_dma_tasklet(unsigned long data)
for (i = 0; i < dw->dma.chancnt; i++) {
dwc = &dw->chan[i];
if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
- dwc_handle_cyclic(dw, dwc, status_block, status_err,
- status_xfer);
+ dev_vdbg(dw->dma.dev, "Cyclic xfer is not implemented\n");
else if (status_err & (1 << i))
dwc_handle_error(dw, dwc);
else if (status_xfer & (1 << i))
@@ -1264,255 +1185,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
}
-/* --------------------- Cyclic DMA API extensions -------------------- */
-
-/**
- * dw_dma_cyclic_start - start the cyclic DMA transfer
- * @chan: the DMA channel to start
- *
- * Must be called with soft interrupts disabled. Returns zero on success or
- * -errno on failure.
- */
-int dw_dma_cyclic_start(struct dma_chan *chan)
-{
- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- struct dw_dma *dw = to_dw_dma(chan->device);
- unsigned long flags;
-
- if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
- dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
- return -ENODEV;
- }
-
- spin_lock_irqsave(&dwc->lock, flags);
-
- /* Enable interrupts to perform cyclic transfer */
- channel_set_bit(dw, MASK.BLOCK, dwc->mask);
-
- dwc_dostart(dwc, dwc->cdesc->desc[0]);
-
- spin_unlock_irqrestore(&dwc->lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL(dw_dma_cyclic_start);
-
-/**
- * dw_dma_cyclic_stop - stop the cyclic DMA transfer
- * @chan: the DMA channel to stop
- *
- * Must be called with soft interrupts disabled.
- */
-void dw_dma_cyclic_stop(struct dma_chan *chan)
-{
- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
- unsigned long flags;
-
- spin_lock_irqsave(&dwc->lock, flags);
-
- dwc_chan_disable(dw, dwc);
-
- spin_unlock_irqrestore(&dwc->lock, flags);
-}
-EXPORT_SYMBOL(dw_dma_cyclic_stop);
-
-/**
- * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
- * @chan: the DMA channel to prepare
- * @buf_addr: physical DMA address where the buffer starts
- * @buf_len: total number of bytes for the entire buffer
- * @period_len: number of bytes for each period
- * @direction: transfer direction, to or from device
- *
- * Must be called before trying to start the transfer. Returns a valid struct
- * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
- */
-struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
- dma_addr_t buf_addr, size_t buf_len, size_t period_len,
- enum dma_transfer_direction direction)
-{
- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- struct dma_slave_config *sconfig = &dwc->dma_sconfig;
- struct dw_cyclic_desc *cdesc;
- struct dw_cyclic_desc *retval = NULL;
- struct dw_desc *desc;
- struct dw_desc *last = NULL;
- u8 lms = DWC_LLP_LMS(dwc->dws.m_master);
- unsigned long was_cyclic;
- unsigned int reg_width;
- unsigned int periods;
- unsigned int i;
- unsigned long flags;
-
- spin_lock_irqsave(&dwc->lock, flags);
- if (dwc->nollp) {
- spin_unlock_irqrestore(&dwc->lock, flags);
- dev_dbg(chan2dev(&dwc->chan),
- "channel doesn't support LLP transfers\n");
- return ERR_PTR(-EINVAL);
- }
-
- if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
- spin_unlock_irqrestore(&dwc->lock, flags);
- dev_dbg(chan2dev(&dwc->chan),
- "queue and/or active list are not empty\n");
- return ERR_PTR(-EBUSY);
- }
-
- was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
- spin_unlock_irqrestore(&dwc->lock, flags);
- if (was_cyclic) {
- dev_dbg(chan2dev(&dwc->chan),
- "channel already prepared for cyclic DMA\n");
- return ERR_PTR(-EBUSY);
- }
-
- retval = ERR_PTR(-EINVAL);
-
- if (unlikely(!is_slave_direction(direction)))
- goto out_err;
-
- dwc->direction = direction;
-
- if (direction == DMA_MEM_TO_DEV)
- reg_width = __ffs(sconfig->dst_addr_width);
- else
- reg_width = __ffs(sconfig->src_addr_width);
-
- periods = buf_len / period_len;
-
- /* Check for too big/unaligned periods and unaligned DMA buffer. */
- if (period_len > (dwc->block_size << reg_width))
- goto out_err;
- if (unlikely(period_len & ((1 << reg_width) - 1)))
- goto out_err;
- if (unlikely(buf_addr & ((1 << reg_width) - 1)))
- goto out_err;
-
- retval = ERR_PTR(-ENOMEM);
-
- cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
- if (!cdesc)
- goto out_err;
-
- cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
- if (!cdesc->desc)
- goto out_err_alloc;
-
- for (i = 0; i < periods; i++) {
- desc = dwc_desc_get(dwc);
- if (!desc)
- goto out_err_desc_get;
-
- switch (direction) {
- case DMA_MEM_TO_DEV:
- lli_write(desc, dar, sconfig->dst_addr);
- lli_write(desc, sar, buf_addr + period_len * i);
- lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan)
- | DWC_CTLL_DST_WIDTH(reg_width)
- | DWC_CTLL_SRC_WIDTH(reg_width)
- | DWC_CTLL_DST_FIX
- | DWC_CTLL_SRC_INC
- | DWC_CTLL_INT_EN));
-
- lli_set(desc, ctllo, sconfig->device_fc ?
- DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
- DWC_CTLL_FC(DW_DMA_FC_D_M2P));
-
- break;
- case DMA_DEV_TO_MEM:
- lli_write(desc, dar, buf_addr + period_len * i);
- lli_write(desc, sar, sconfig->src_addr);
- lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan)
- | DWC_CTLL_SRC_WIDTH(reg_width)
- | DWC_CTLL_DST_WIDTH(reg_width)
- | DWC_CTLL_DST_INC
- | DWC_CTLL_SRC_FIX
- | DWC_CTLL_INT_EN));
-
- lli_set(desc, ctllo, sconfig->device_fc ?
- DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
- DWC_CTLL_FC(DW_DMA_FC_D_P2M));
-
- break;
- default:
- break;
- }
-
- lli_write(desc, ctlhi, period_len >> reg_width);
- cdesc->desc[i] = desc;
-
- if (last)
- lli_write(last, llp, desc->txd.phys | lms);
-
- last = desc;
- }
-
- /* Let's make a cyclic list */
- lli_write(last, llp, cdesc->desc[0]->txd.phys | lms);
-
- dev_dbg(chan2dev(&dwc->chan),
- "cyclic prepared buf %pad len %zu period %zu periods %d\n",
- &buf_addr, buf_len, period_len, periods);
-
- cdesc->periods = periods;
- dwc->cdesc = cdesc;
-
- return cdesc;
-
-out_err_desc_get:
- while (i--)
- dwc_desc_put(dwc, cdesc->desc[i]);
-out_err_alloc:
- kfree(cdesc);
-out_err:
- clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
- return (struct dw_cyclic_desc *)retval;
-}
-EXPORT_SYMBOL(dw_dma_cyclic_prep);
-
-/**
- * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
- * @chan: the DMA channel to free
- */
-void dw_dma_cyclic_free(struct dma_chan *chan)
-{
- struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
- struct dw_dma *dw = to_dw_dma(dwc->chan.device);
- struct dw_cyclic_desc *cdesc = dwc->cdesc;
- unsigned int i;
- unsigned long flags;
-
- dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
-
- if (!cdesc)
- return;
-
- spin_lock_irqsave(&dwc->lock, flags);
-
- dwc_chan_disable(dw, dwc);
-
- dma_writel(dw, CLEAR.BLOCK, dwc->mask);
- dma_writel(dw, CLEAR.ERROR, dwc->mask);
- dma_writel(dw, CLEAR.XFER, dwc->mask);
-
- spin_unlock_irqrestore(&dwc->lock, flags);
-
- for (i = 0; i < cdesc->periods; i++)
- dwc_desc_put(dwc, cdesc->desc[i]);
-
- kfree(cdesc->desc);
- kfree(cdesc);
-
- dwc->cdesc = NULL;
-
- clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
-}
-EXPORT_SYMBOL(dw_dma_cyclic_free);
-
-/*----------------------------------------------------------------------*/
-
int dw_dma_probe(struct dw_dma_chip *chip)
{
struct dw_dma_platform_data *pdata;
@@ -1642,7 +1314,7 @@ int dw_dma_probe(struct dw_dma_chip *chip)
if (autocfg) {
unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r];
- unsigned int dwc_params = dma_readl_native(addr);
+ unsigned int dwc_params = readl(addr);
dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
dwc_params);
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index c639c60b825a..bc31fe802061 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -306,8 +306,12 @@ static int dw_resume_early(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct dw_dma_chip *chip = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = clk_prepare_enable(chip->clk);
+ if (ret)
+ return ret;
- clk_prepare_enable(chip->clk);
return dw_dma_enable(chip);
}
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index 32a328721c88..09e7dfdbb790 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -116,20 +116,6 @@ struct dw_dma_regs {
DW_REG(GLOBAL_CFG);
};
-/*
- * Big endian I/O access when reading and writing to the DMA controller
- * registers. This is needed on some platforms, like the Atmel AVR32
- * architecture.
- */
-
-#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
-#define dma_readl_native ioread32be
-#define dma_writel_native iowrite32be
-#else
-#define dma_readl_native readl
-#define dma_writel_native writel
-#endif
-
/* Bitfields in DW_PARAMS */
#define DW_PARAMS_NR_CHAN 8 /* number of channels */
#define DW_PARAMS_NR_MASTER 11 /* number of AHB masters */
@@ -280,7 +266,6 @@ struct dw_dma_chan {
unsigned long flags;
struct list_head active_list;
struct list_head queue;
- struct dw_cyclic_desc *cdesc;
unsigned int descs_allocated;
@@ -302,9 +287,9 @@ __dwc_regs(struct dw_dma_chan *dwc)
}
#define channel_readl(dwc, name) \
- dma_readl_native(&(__dwc_regs(dwc)->name))
+ readl(&(__dwc_regs(dwc)->name))
#define channel_writel(dwc, name, val) \
- dma_writel_native((val), &(__dwc_regs(dwc)->name))
+ writel((val), &(__dwc_regs(dwc)->name))
static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
{
@@ -333,9 +318,9 @@ static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
}
#define dma_readl(dw, name) \
- dma_readl_native(&(__dw_regs(dw)->name))
+ readl(&(__dw_regs(dw)->name))
#define dma_writel(dw, name, val) \
- dma_writel_native((val), &(__dw_regs(dw)->name))
+ writel((val), &(__dw_regs(dw)->name))
#define idma32_readq(dw, name) \
hi_lo_readq(&(__dw_regs(dw)->name))
@@ -352,43 +337,30 @@ static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
return container_of(ddev, struct dw_dma, dma);
}
-#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
-typedef __be32 __dw32;
-#else
-typedef __le32 __dw32;
-#endif
-
/* LLI == Linked List Item; a.k.a. DMA block descriptor */
struct dw_lli {
/* values that are not changed by hardware */
- __dw32 sar;
- __dw32 dar;
- __dw32 llp; /* chain to next lli */
- __dw32 ctllo;
+ __le32 sar;
+ __le32 dar;
+ __le32 llp; /* chain to next lli */
+ __le32 ctllo;
/* values that may get written back: */
- __dw32 ctlhi;
+ __le32 ctlhi;
/* sstat and dstat can snapshot peripheral register state.
* silicon config may discard either or both...
*/
- __dw32 sstat;
- __dw32 dstat;
+ __le32 sstat;
+ __le32 dstat;
};
struct dw_desc {
/* FIRST values the hardware uses */
struct dw_lli lli;
-#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
-#define lli_set(d, reg, v) ((d)->lli.reg |= cpu_to_be32(v))
-#define lli_clear(d, reg, v) ((d)->lli.reg &= ~cpu_to_be32(v))
-#define lli_read(d, reg) be32_to_cpu((d)->lli.reg)
-#define lli_write(d, reg, v) ((d)->lli.reg = cpu_to_be32(v))
-#else
#define lli_set(d, reg, v) ((d)->lli.reg |= cpu_to_le32(v))
#define lli_clear(d, reg, v) ((d)->lli.reg &= ~cpu_to_le32(v))
#define lli_read(d, reg) le32_to_cpu((d)->lli.reg)
#define lli_write(d, reg, v) ((d)->lli.reg = cpu_to_le32(v))
-#endif
/* THEN values for driver housekeeping */
struct list_head desc_node;
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
index 90d29f90acfb..493dc6c59d1d 100644
--- a/drivers/dma/fsl_raid.c
+++ b/drivers/dma/fsl_raid.c
@@ -877,7 +877,7 @@ static int fsl_re_remove(struct platform_device *ofdev)
return 0;
}
-static struct of_device_id fsl_re_ids[] = {
+static const struct of_device_id fsl_re_ids[] = {
{ .compatible = "fsl,raideng-v1.0", },
{}
};
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 51c75bf2b9b6..3b8b752ede2d 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -269,6 +269,7 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
case 2:
case 4:
case 8:
+ mode &= ~FSL_DMA_MR_SAHTS_MASK;
mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
break;
}
@@ -301,6 +302,7 @@ static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
case 2:
case 4:
case 8:
+ mode &= ~FSL_DMA_MR_DAHTS_MASK;
mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
break;
}
@@ -327,7 +329,8 @@ static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
BUG_ON(size > 1024);
mode = get_mr(chan);
- mode |= (__ilog2(size) << 24) & 0x0f000000;
+ mode &= ~FSL_DMA_MR_BWC_MASK;
+ mode |= (__ilog2(size) << 24) & FSL_DMA_MR_BWC_MASK;
set_mr(chan, mode);
}
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 31bffccdcc75..4787d485dd76 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -36,6 +36,10 @@
#define FSL_DMA_MR_DAHE 0x00002000
#define FSL_DMA_MR_SAHE 0x00001000
+#define FSL_DMA_MR_SAHTS_MASK 0x0000C000
+#define FSL_DMA_MR_DAHTS_MASK 0x00030000
+#define FSL_DMA_MR_BWC_MASK 0x0f000000
+
/*
* Bandwidth/pause control determines how many bytes a given
* channel is allowed to transfer before the DMA engine pauses
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index ab0fb804fb1e..f681df8f0ed3 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -888,7 +888,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
sg_init_table(imxdmac->sg_list, periods);
for (i = 0; i < periods; i++) {
- imxdmac->sg_list[i].page_link = 0;
+ sg_assign_page(&imxdmac->sg_list[i], NULL);
imxdmac->sg_list[i].offset = 0;
imxdmac->sg_list[i].dma_address = dma_addr;
sg_dma_len(&imxdmac->sg_list[i]) = period_len;
@@ -896,10 +896,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
}
/* close the loop */
- imxdmac->sg_list[periods].offset = 0;
- sg_dma_len(&imxdmac->sg_list[periods]) = 0;
- imxdmac->sg_list[periods].page_link =
- ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
+ sg_chain(imxdmac->sg_list, periods + 1, imxdmac->sg_list);
desc->type = IMXDMA_DESC_CYCLIC;
desc->sg = imxdmac->sg_list;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 085993cb2ccc..a67ec1bdc4e0 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1323,7 +1323,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
}
if (period_len > 0xffff) {
- dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n",
+ dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n",
channel, period_len, 0xffff);
goto err_out;
}
@@ -1347,7 +1347,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
if (i + 1 == num_periods)
param |= BD_WRAP;
- dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
+ dev_dbg(sdma->dev, "entry %d: count: %zu dma: %#llx %s%s\n",
i, period_len, (u64)dma_addr,
param & BD_WRAP ? "wrap" : "",
param & BD_INTR ? " intr" : "");
@@ -1755,19 +1755,26 @@ static int sdma_probe(struct platform_device *pdev)
if (IS_ERR(sdma->clk_ahb))
return PTR_ERR(sdma->clk_ahb);
- clk_prepare(sdma->clk_ipg);
- clk_prepare(sdma->clk_ahb);
+ ret = clk_prepare(sdma->clk_ipg);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare(sdma->clk_ahb);
+ if (ret)
+ goto err_clk;
ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
sdma);
if (ret)
- return ret;
+ goto err_irq;
sdma->irq = irq;
sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
- if (!sdma->script_addrs)
- return -ENOMEM;
+ if (!sdma->script_addrs) {
+ ret = -ENOMEM;
+ goto err_irq;
+ }
/* initially no scripts available */
saddr_arr = (s32 *)sdma->script_addrs;
@@ -1882,6 +1889,10 @@ err_register:
dma_async_device_unregister(&sdma->dma_device);
err_init:
kfree(sdma->script_addrs);
+err_irq:
+ clk_unprepare(sdma->clk_ahb);
+err_clk:
+ clk_unprepare(sdma->clk_ipg);
return ret;
}
@@ -1893,6 +1904,8 @@ static int sdma_remove(struct platform_device *pdev)
devm_free_irq(&pdev->dev, sdma->irq, sdma);
dma_async_device_unregister(&sdma->dma_device);
kfree(sdma->script_addrs);
+ clk_unprepare(sdma->clk_ahb);
+ clk_unprepare(sdma->clk_ipg);
/* Kill the tasklet */
for (i = 0; i < MAX_DMA_CHANNELS; i++) {
struct sdma_channel *sdmac = &sdma->channel[i];
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 0b9b6b07db9e..eab2fdda29ec 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -336,10 +336,10 @@ struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
}
if (dca3_tag_map_invalid(ioatdca->tag_map)) {
- WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND,
- "%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
- dev_driver_string(&pdev->dev),
- dev_name(&pdev->dev));
+ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
+ pr_warn_once("%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
+ dev_driver_string(&pdev->dev),
+ dev_name(&pdev->dev));
free_dca_provider(dca);
return NULL;
}
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 6ad4384b3fa8..ed8ed1192775 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -839,8 +839,6 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
goto free_resources;
}
- for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
- dma_srcs[i] = DMA_ERROR_CODE;
for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
@@ -910,8 +908,6 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
xor_val_result = 1;
- for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
- dma_srcs[i] = DMA_ERROR_CODE;
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
@@ -965,8 +961,6 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
op = IOAT_OP_XOR_VAL;
xor_val_result = 0;
- for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
- dma_srcs[i] = DMA_ERROR_CODE;
for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
DMA_TO_DEVICE);
@@ -1017,18 +1011,14 @@ static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
goto free_resources;
dma_unmap:
if (op == IOAT_OP_XOR) {
- if (dest_dma != DMA_ERROR_CODE)
- dma_unmap_page(dev, dest_dma, PAGE_SIZE,
- DMA_FROM_DEVICE);
- for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
- if (dma_srcs[i] != DMA_ERROR_CODE)
- dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
- DMA_TO_DEVICE);
+ while (--i >= 0)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
+ DMA_TO_DEVICE);
+ dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
} else if (op == IOAT_OP_XOR_VAL) {
- for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
- if (dma_srcs[i] != DMA_ERROR_CODE)
- dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
- DMA_TO_DEVICE);
+ while (--i >= 0)
+ dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
+ DMA_TO_DEVICE);
}
free_resources:
dma->device_free_chan_resources(dma_chan);
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index f3e211f8f6c5..f652a0e0f5a2 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -42,6 +42,7 @@
#define MV_XOR_V2_DMA_IMSG_THRD_OFF 0x018
#define MV_XOR_V2_DMA_IMSG_THRD_MASK 0x7FFF
#define MV_XOR_V2_DMA_IMSG_THRD_SHIFT 0x0
+#define MV_XOR_V2_DMA_IMSG_TIMER_EN BIT(18)
#define MV_XOR_V2_DMA_DESQ_AWATTR_OFF 0x01C
/* Same flags as MV_XOR_V2_DMA_DESQ_ARATTR_OFF */
#define MV_XOR_V2_DMA_DESQ_ALLOC_OFF 0x04C
@@ -55,6 +56,9 @@
#define MV_XOR_V2_DMA_DESQ_STOP_OFF 0x800
#define MV_XOR_V2_DMA_DESQ_DEALLOC_OFF 0x804
#define MV_XOR_V2_DMA_DESQ_ADD_OFF 0x808
+#define MV_XOR_V2_DMA_IMSG_TMOT 0x810
+#define MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK 0x1FFF
+#define MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT 0
/* XOR Global registers */
#define MV_XOR_V2_GLOB_BW_CTRL 0x4
@@ -90,6 +94,13 @@
*/
#define MV_XOR_V2_DESC_NUM 1024
+/*
+ * Threshold values for descriptors and timeout, determined by
+ * experimentation as giving a good level of performance.
+ */
+#define MV_XOR_V2_DONE_IMSG_THRD 0x14
+#define MV_XOR_V2_TIMER_THRD 0xB0
+
/**
* struct mv_xor_v2_descriptor - DMA HW descriptor
* @desc_id: used by S/W and is not affected by H/W.
@@ -246,6 +257,29 @@ static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev)
return MV_XOR_V2_EXT_DESC_SIZE;
}
+/*
+ * Set the IMSG threshold
+ */
+static inline
+void mv_xor_v2_enable_imsg_thrd(struct mv_xor_v2_device *xor_dev)
+{
+ u32 reg;
+
+ /* Configure threshold of number of descriptors, and enable timer */
+ reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
+ reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
+ reg |= (MV_XOR_V2_DONE_IMSG_THRD << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
+ reg |= MV_XOR_V2_DMA_IMSG_TIMER_EN;
+ writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
+
+ /* Configure Timer Threshold */
+ reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT);
+ reg &= (~MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK <<
+ MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT);
+ reg |= (MV_XOR_V2_TIMER_THRD << MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT);
+ writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT);
+}
+
static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
{
struct mv_xor_v2_device *xor_dev = data;
@@ -501,9 +535,6 @@ static void mv_xor_v2_issue_pending(struct dma_chan *chan)
mv_xor_v2_add_desc_to_desq(xor_dev, xor_dev->npendings);
xor_dev->npendings = 0;
- /* Activate the channel */
- writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
-
spin_unlock_bh(&xor_dev->lock);
}
@@ -665,6 +696,27 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
return 0;
}
+static int mv_xor_v2_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev);
+
+ /* Set this bit to disable to stop the XOR unit. */
+ writel(0x1, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
+
+ return 0;
+}
+
+static int mv_xor_v2_resume(struct platform_device *dev)
+{
+ struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev);
+
+ mv_xor_v2_set_desc_size(xor_dev);
+ mv_xor_v2_enable_imsg_thrd(xor_dev);
+ mv_xor_v2_descq_init(xor_dev);
+
+ return 0;
+}
+
static int mv_xor_v2_probe(struct platform_device *pdev)
{
struct mv_xor_v2_device *xor_dev;
@@ -795,6 +847,8 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
list_add_tail(&xor_dev->dmachan.device_node,
&dma_dev->channels);
+ mv_xor_v2_enable_imsg_thrd(xor_dev);
+
mv_xor_v2_descq_init(xor_dev);
ret = dma_async_device_register(dma_dev);
@@ -844,6 +898,8 @@ MODULE_DEVICE_TABLE(of, mv_xor_v2_dt_ids);
static struct platform_driver mv_xor_v2_driver = {
.probe = mv_xor_v2_probe,
+ .suspend = mv_xor_v2_suspend,
+ .resume = mv_xor_v2_resume,
.remove = mv_xor_v2_remove,
.driver = {
.name = "mv_xor_v2",
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index e217268c7098..41d167921fab 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -617,7 +617,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
if (period_len > MAX_XFER_BYTES) {
dev_err(mxs_dma->dma_device.dev,
- "maximum period size exceeded: %d > %d\n",
+ "maximum period size exceeded: %zu > %d\n",
period_len, MAX_XFER_BYTES);
goto err_out;
}
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index e90a7a0d760a..b19ee04567b5 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -443,7 +443,10 @@ struct dma_pl330_chan {
/* For D-to-M and M-to-D channels */
int burst_sz; /* the peripheral fifo width */
int burst_len; /* the number of burst */
- dma_addr_t fifo_addr;
+ phys_addr_t fifo_addr;
+ /* DMA-mapped view of the FIFO; may differ if an IOMMU is present */
+ dma_addr_t fifo_dma;
+ enum dma_data_direction dir;
/* for cyclic capability */
bool cyclic;
@@ -538,11 +541,6 @@ struct _xfer_spec {
struct dma_pl330_desc *desc;
};
-static inline bool _queue_empty(struct pl330_thread *thrd)
-{
- return thrd->req[0].desc == NULL && thrd->req[1].desc == NULL;
-}
-
static inline bool _queue_full(struct pl330_thread *thrd)
{
return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL;
@@ -564,23 +562,6 @@ static inline u32 get_revision(u32 periph_id)
return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK;
}
-static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
- enum pl330_dst da, u16 val)
-{
- if (dry_run)
- return SZ_DMAADDH;
-
- buf[0] = CMD_DMAADDH;
- buf[0] |= (da << 1);
- buf[1] = val;
- buf[2] = val >> 8;
-
- PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
- da == 1 ? "DA" : "SA", val);
-
- return SZ_DMAADDH;
-}
-
static inline u32 _emit_END(unsigned dry_run, u8 buf[])
{
if (dry_run)
@@ -738,18 +719,6 @@ static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
return SZ_DMAMOV;
}
-static inline u32 _emit_NOP(unsigned dry_run, u8 buf[])
-{
- if (dry_run)
- return SZ_DMANOP;
-
- buf[0] = CMD_DMANOP;
-
- PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n");
-
- return SZ_DMANOP;
-}
-
static inline u32 _emit_RMB(unsigned dry_run, u8 buf[])
{
if (dry_run)
@@ -817,39 +786,6 @@ static inline u32 _emit_STP(unsigned dry_run, u8 buf[],
return SZ_DMASTP;
}
-static inline u32 _emit_STZ(unsigned dry_run, u8 buf[])
-{
- if (dry_run)
- return SZ_DMASTZ;
-
- buf[0] = CMD_DMASTZ;
-
- PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n");
-
- return SZ_DMASTZ;
-}
-
-static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev,
- unsigned invalidate)
-{
- if (dry_run)
- return SZ_DMAWFE;
-
- buf[0] = CMD_DMAWFE;
-
- ev &= 0x1f;
- ev <<= 3;
- buf[1] = ev;
-
- if (invalidate)
- buf[1] |= (1 << 1);
-
- PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n",
- ev >> 3, invalidate ? ", I" : "");
-
- return SZ_DMAWFE;
-}
-
static inline u32 _emit_WFP(unsigned dry_run, u8 buf[],
enum pl330_cond cond, u8 peri)
{
@@ -2120,11 +2056,60 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
return 1;
}
+/*
+ * We need the data direction between the DMAC (the dma-mapping "device") and
+ * the FIFO (the dmaengine "dev"), from the FIFO's point of view. Confusing!
+ */
+static enum dma_data_direction
+pl330_dma_slave_map_dir(enum dma_transfer_direction dir)
+{
+ switch (dir) {
+ case DMA_MEM_TO_DEV:
+ return DMA_FROM_DEVICE;
+ case DMA_DEV_TO_MEM:
+ return DMA_TO_DEVICE;
+ case DMA_DEV_TO_DEV:
+ return DMA_BIDIRECTIONAL;
+ default:
+ return DMA_NONE;
+ }
+}
+
+static void pl330_unprep_slave_fifo(struct dma_pl330_chan *pch)
+{
+ if (pch->dir != DMA_NONE)
+ dma_unmap_resource(pch->chan.device->dev, pch->fifo_dma,
+ 1 << pch->burst_sz, pch->dir, 0);
+ pch->dir = DMA_NONE;
+}
+
+
+static bool pl330_prep_slave_fifo(struct dma_pl330_chan *pch,
+ enum dma_transfer_direction dir)
+{
+ struct device *dev = pch->chan.device->dev;
+ enum dma_data_direction dma_dir = pl330_dma_slave_map_dir(dir);
+
+ /* Already mapped for this config? */
+ if (pch->dir == dma_dir)
+ return true;
+
+ pl330_unprep_slave_fifo(pch);
+ pch->fifo_dma = dma_map_resource(dev, pch->fifo_addr,
+ 1 << pch->burst_sz, dma_dir, 0);
+ if (dma_mapping_error(dev, pch->fifo_dma))
+ return false;
+
+ pch->dir = dma_dir;
+ return true;
+}
+
static int pl330_config(struct dma_chan *chan,
struct dma_slave_config *slave_config)
{
struct dma_pl330_chan *pch = to_pchan(chan);
+ pl330_unprep_slave_fifo(pch);
if (slave_config->direction == DMA_MEM_TO_DEV) {
if (slave_config->dst_addr)
pch->fifo_addr = slave_config->dst_addr;
@@ -2235,6 +2220,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
spin_unlock_irqrestore(&pl330->lock, flags);
pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
+ pl330_unprep_slave_fifo(pch);
}
static int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
@@ -2564,6 +2550,9 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
return NULL;
}
+ if (!pl330_prep_slave_fifo(pch, direction))
+ return NULL;
+
for (i = 0; i < len / period_len; i++) {
desc = pl330_get_desc(pch);
if (!desc) {
@@ -2593,12 +2582,12 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 0;
src = dma_addr;
- dst = pch->fifo_addr;
+ dst = pch->fifo_dma;
break;
case DMA_DEV_TO_MEM:
desc->rqcfg.src_inc = 0;
desc->rqcfg.dst_inc = 1;
- src = pch->fifo_addr;
+ src = pch->fifo_dma;
dst = dma_addr;
break;
default:
@@ -2711,12 +2700,12 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct dma_pl330_chan *pch = to_pchan(chan);
struct scatterlist *sg;
int i;
- dma_addr_t addr;
if (unlikely(!pch || !sgl || !sg_len))
return NULL;
- addr = pch->fifo_addr;
+ if (!pl330_prep_slave_fifo(pch, direction))
+ return NULL;
first = NULL;
@@ -2742,13 +2731,13 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (direction == DMA_MEM_TO_DEV) {
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 0;
- fill_px(&desc->px,
- addr, sg_dma_address(sg), sg_dma_len(sg));
+ fill_px(&desc->px, pch->fifo_dma, sg_dma_address(sg),
+ sg_dma_len(sg));
} else {
desc->rqcfg.src_inc = 0;
desc->rqcfg.dst_inc = 1;
- fill_px(&desc->px,
- sg_dma_address(sg), addr, sg_dma_len(sg));
+ fill_px(&desc->px, sg_dma_address(sg), pch->fifo_dma,
+ sg_dma_len(sg));
}
desc->rqcfg.brst_size = pch->burst_sz;
@@ -2906,6 +2895,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pch->thread = NULL;
pch->chan.device = pd;
pch->dmac = pl330;
+ pch->dir = DMA_NONE;
/* Add the channel to the DMAC list */
list_add_tail(&pch->chan.device_node, &pd->channels);
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 5072a7d306d4..34fb6afd229b 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -1,7 +1,7 @@
/*
* Qualcomm Technologies HIDMA DMA engine interface
*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -210,6 +210,7 @@ static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
INIT_LIST_HEAD(&mchan->prepared);
INIT_LIST_HEAD(&mchan->active);
INIT_LIST_HEAD(&mchan->completed);
+ INIT_LIST_HEAD(&mchan->queued);
spin_lock_init(&mchan->lock);
list_add_tail(&mchan->chan.device_node, &ddev->channels);
@@ -230,9 +231,15 @@ static void hidma_issue_pending(struct dma_chan *dmach)
struct hidma_chan *mchan = to_hidma_chan(dmach);
struct hidma_dev *dmadev = mchan->dmadev;
unsigned long flags;
+ struct hidma_desc *qdesc, *next;
int status;
spin_lock_irqsave(&mchan->lock, flags);
+ list_for_each_entry_safe(qdesc, next, &mchan->queued, node) {
+ hidma_ll_queue_request(dmadev->lldev, qdesc->tre_ch);
+ list_move_tail(&qdesc->node, &mchan->active);
+ }
+
if (!mchan->running) {
struct hidma_desc *desc = list_first_entry(&mchan->active,
struct hidma_desc,
@@ -315,17 +322,18 @@ static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
pm_runtime_put_autosuspend(dmadev->ddev.dev);
return -ENODEV;
}
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
mdesc = container_of(txd, struct hidma_desc, desc);
spin_lock_irqsave(&mchan->lock, irqflags);
- /* Move descriptor to active */
- list_move_tail(&mdesc->node, &mchan->active);
+ /* Move descriptor to queued */
+ list_move_tail(&mdesc->node, &mchan->queued);
/* Update cookie */
cookie = dma_cookie_assign(txd);
- hidma_ll_queue_request(dmadev->lldev, mdesc->tre_ch);
spin_unlock_irqrestore(&mchan->lock, irqflags);
return cookie;
@@ -431,6 +439,7 @@ static int hidma_terminate_channel(struct dma_chan *chan)
list_splice_init(&mchan->active, &list);
list_splice_init(&mchan->prepared, &list);
list_splice_init(&mchan->completed, &list);
+ list_splice_init(&mchan->queued, &list);
spin_unlock_irqrestore(&mchan->lock, irqflags);
/* this suspends the existing transfer */
@@ -795,8 +804,11 @@ static int hidma_probe(struct platform_device *pdev)
device_property_read_u32(&pdev->dev, "desc-count",
&dmadev->nr_descriptors);
- if (!dmadev->nr_descriptors && nr_desc_prm)
+ if (nr_desc_prm) {
+ dev_info(&pdev->dev, "overriding number of descriptors as %d\n",
+ nr_desc_prm);
dmadev->nr_descriptors = nr_desc_prm;
+ }
if (!dmadev->nr_descriptors)
dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h
index c7d014235c32..41e0aa283828 100644
--- a/drivers/dma/qcom/hidma.h
+++ b/drivers/dma/qcom/hidma.h
@@ -104,6 +104,7 @@ struct hidma_chan {
struct dma_chan chan;
struct list_head free;
struct list_head prepared;
+ struct list_head queued;
struct list_head active;
struct list_head completed;
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index f847d32cc4b5..5a0991bc4787 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -1,7 +1,7 @@
/*
* Qualcomm Technologies HIDMA DMA engine Management interface
*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -49,6 +49,26 @@
#define HIDMA_AUTOSUSPEND_TIMEOUT 2000
#define HIDMA_MAX_CHANNEL_WEIGHT 15
+static unsigned int max_write_request;
+module_param(max_write_request, uint, 0644);
+MODULE_PARM_DESC(max_write_request,
+ "maximum write burst (default: ACPI/DT value)");
+
+static unsigned int max_read_request;
+module_param(max_read_request, uint, 0644);
+MODULE_PARM_DESC(max_read_request,
+ "maximum read burst (default: ACPI/DT value)");
+
+static unsigned int max_wr_xactions;
+module_param(max_wr_xactions, uint, 0644);
+MODULE_PARM_DESC(max_wr_xactions,
+ "maximum number of write transactions (default: ACPI/DT value)");
+
+static unsigned int max_rd_xactions;
+module_param(max_rd_xactions, uint, 0644);
+MODULE_PARM_DESC(max_rd_xactions,
+ "maximum number of read transactions (default: ACPI/DT value)");
+
int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev)
{
unsigned int i;
@@ -207,12 +227,25 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
goto out;
}
+ if (max_write_request) {
+ dev_info(&pdev->dev, "overriding max-write-burst-bytes: %d\n",
+ max_write_request);
+ mgmtdev->max_write_request = max_write_request;
+ } else
+ max_write_request = mgmtdev->max_write_request;
+
rc = device_property_read_u32(&pdev->dev, "max-read-burst-bytes",
&mgmtdev->max_read_request);
if (rc) {
dev_err(&pdev->dev, "max-read-burst-bytes missing\n");
goto out;
}
+ if (max_read_request) {
+ dev_info(&pdev->dev, "overriding max-read-burst-bytes: %d\n",
+ max_read_request);
+ mgmtdev->max_read_request = max_read_request;
+ } else
+ max_read_request = mgmtdev->max_read_request;
rc = device_property_read_u32(&pdev->dev, "max-write-transactions",
&mgmtdev->max_wr_xactions);
@@ -220,6 +253,12 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "max-write-transactions missing\n");
goto out;
}
+ if (max_wr_xactions) {
+ dev_info(&pdev->dev, "overriding max-write-transactions: %d\n",
+ max_wr_xactions);
+ mgmtdev->max_wr_xactions = max_wr_xactions;
+ } else
+ max_wr_xactions = mgmtdev->max_wr_xactions;
rc = device_property_read_u32(&pdev->dev, "max-read-transactions",
&mgmtdev->max_rd_xactions);
@@ -227,6 +266,12 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "max-read-transactions missing\n");
goto out;
}
+ if (max_rd_xactions) {
+ dev_info(&pdev->dev, "overriding max-read-transactions: %d\n",
+ max_rd_xactions);
+ mgmtdev->max_rd_xactions = max_rd_xactions;
+ } else
+ max_rd_xactions = mgmtdev->max_rd_xactions;
mgmtdev->priority = devm_kcalloc(&pdev->dev,
mgmtdev->dma_channels,
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index bd261c9e9664..ffcadca53243 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -144,6 +144,7 @@ struct rcar_dmac_chan_map {
* @chan: base DMA channel object
* @iomem: channel I/O memory base
* @index: index of this channel in the controller
+ * @irq: channel IRQ
* @src: slave memory address and size on the source side
* @dst: slave memory address and size on the destination side
* @mid_rid: hardware MID/RID for the DMA client using this channel
@@ -161,6 +162,7 @@ struct rcar_dmac_chan {
struct dma_chan chan;
void __iomem *iomem;
unsigned int index;
+ int irq;
struct rcar_dmac_chan_slave src;
struct rcar_dmac_chan_slave dst;
@@ -1008,7 +1010,11 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
rcar_dmac_chan_halt(rchan);
spin_unlock_irq(&rchan->lock);
- /* Now no new interrupts will occur */
+ /*
+ * Now no new interrupts will occur, but one might already be
+ * running. Wait for it to finish before freeing resources.
+ */
+ synchronize_irq(rchan->irq);
if (rchan->mid_rid >= 0) {
/* The caller is holding dma_list_mutex */
@@ -1366,6 +1372,13 @@ done:
spin_unlock_irqrestore(&rchan->lock, flags);
}
+static void rcar_dmac_device_synchronize(struct dma_chan *chan)
+{
+ struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+
+ synchronize_irq(rchan->irq);
+}
+
/* -----------------------------------------------------------------------------
* IRQ handling
*/
@@ -1650,7 +1663,6 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
struct dma_chan *chan = &rchan->chan;
char pdev_irqname[5];
char *irqname;
- int irq;
int ret;
rchan->index = index;
@@ -1667,8 +1679,8 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
/* Request the channel interrupt. */
sprintf(pdev_irqname, "ch%u", index);
- irq = platform_get_irq_byname(pdev, pdev_irqname);
- if (irq < 0) {
+ rchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
+ if (rchan->irq < 0) {
dev_err(dmac->dev, "no IRQ specified for channel %u\n", index);
return -ENODEV;
}
@@ -1678,11 +1690,13 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
if (!irqname)
return -ENOMEM;
- ret = devm_request_threaded_irq(dmac->dev, irq, rcar_dmac_isr_channel,
+ ret = devm_request_threaded_irq(dmac->dev, rchan->irq,
+ rcar_dmac_isr_channel,
rcar_dmac_isr_channel_thread, 0,
irqname, rchan);
if (ret) {
- dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", irq, ret);
+ dev_err(dmac->dev, "failed to request IRQ %u (%d)\n",
+ rchan->irq, ret);
return ret;
}
@@ -1846,6 +1860,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
engine->device_terminate_all = rcar_dmac_chan_terminate_all;
engine->device_tx_status = rcar_dmac_tx_status;
engine->device_issue_pending = rcar_dmac_issue_pending;
+ engine->device_synchronize = rcar_dmac_device_synchronize;
ret = dma_async_device_register(engine);
if (ret < 0)
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index a6620b671d1d..c3052fbfd092 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2528,10 +2528,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
dma_addr += period_len;
}
- sg[periods].offset = 0;
- sg_dma_len(&sg[periods]) = 0;
- sg[periods].page_link =
- ((unsigned long)sg | 0x01) & ~0x02;
+ sg_chain(sg, periods + 1, sg);
txd = d40_prep_sg(chan, sg, sg, periods, direction,
DMA_PREP_INTERRUPT);
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 3722b9d8d9fe..b9d75a54c896 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1494,35 +1494,7 @@ static int tegra_dma_remove(struct platform_device *pdev)
static int tegra_dma_runtime_suspend(struct device *dev)
{
struct tegra_dma *tdma = dev_get_drvdata(dev);
-
- clk_disable_unprepare(tdma->dma_clk);
- return 0;
-}
-
-static int tegra_dma_runtime_resume(struct device *dev)
-{
- struct tegra_dma *tdma = dev_get_drvdata(dev);
- int ret;
-
- ret = clk_prepare_enable(tdma->dma_clk);
- if (ret < 0) {
- dev_err(dev, "clk_enable failed: %d\n", ret);
- return ret;
- }
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int tegra_dma_pm_suspend(struct device *dev)
-{
- struct tegra_dma *tdma = dev_get_drvdata(dev);
int i;
- int ret;
-
- /* Enable clock before accessing register */
- ret = pm_runtime_get_sync(dev);
- if (ret < 0)
- return ret;
tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL);
for (i = 0; i < tdma->chip_data->nr_channels; i++) {
@@ -1543,21 +1515,21 @@ static int tegra_dma_pm_suspend(struct device *dev)
TEGRA_APBDMA_CHAN_WCOUNT);
}
- /* Disable clock */
- pm_runtime_put(dev);
+ clk_disable_unprepare(tdma->dma_clk);
+
return 0;
}
-static int tegra_dma_pm_resume(struct device *dev)
+static int tegra_dma_runtime_resume(struct device *dev)
{
struct tegra_dma *tdma = dev_get_drvdata(dev);
- int i;
- int ret;
+ int i, ret;
- /* Enable clock before accessing register */
- ret = pm_runtime_get_sync(dev);
- if (ret < 0)
+ ret = clk_prepare_enable(tdma->dma_clk);
+ if (ret < 0) {
+ dev_err(dev, "clk_enable failed: %d\n", ret);
return ret;
+ }
tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen);
tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
@@ -1582,16 +1554,14 @@ static int tegra_dma_pm_resume(struct device *dev)
(ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB));
}
- /* Disable clock */
- pm_runtime_put(dev);
return 0;
}
-#endif
static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_dma_runtime_suspend, tegra_dma_runtime_resume,
NULL)
- SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static const struct of_device_id tegra_dma_of_match[] = {
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 6d221e5c72ee..47f64192d2fd 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -794,9 +794,6 @@ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy(
chan = to_chan(dchan);
- if (len > ZYNQMP_DMA_MAX_TRANS_LEN)
- return NULL;
-
desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN);
spin_lock_bh(&chan->lock);
diff --git a/drivers/firmware/tegra/ivc.c b/drivers/firmware/tegra/ivc.c
index 29ecfd815320..a01461d63f68 100644
--- a/drivers/firmware/tegra/ivc.c
+++ b/drivers/firmware/tegra/ivc.c
@@ -646,12 +646,12 @@ int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx,
if (peer) {
ivc->rx.phys = dma_map_single(peer, rx, queue_size,
DMA_BIDIRECTIONAL);
- if (ivc->rx.phys == DMA_ERROR_CODE)
+ if (dma_mapping_error(peer, ivc->rx.phys))
return -ENOMEM;
ivc->tx.phys = dma_map_single(peer, tx, queue_size,
DMA_BIDIRECTIONAL);
- if (ivc->tx.phys == DMA_ERROR_CODE) {
+ if (dma_mapping_error(peer, ivc->tx.phys)) {
dma_unmap_single(peer, ivc->rx.phys, queue_size,
DMA_BIDIRECTIONAL);
return -ENOMEM;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 23ca51ee6b28..f235eae04c16 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -242,6 +242,17 @@ config GPIO_ICH
If unsure, say N.
+config GPIO_INGENIC
+ tristate "Ingenic JZ47xx SoCs GPIO support"
+ depends on OF
+ depends on MACH_INGENIC || COMPILE_TEST
+ select GPIOLIB_IRQCHIP
+ help
+ Say yes here to support the GPIO functionality present on the
+ JZ4740 and JZ4780 SoCs from Ingenic.
+
+ If unsure, say N.
+
config GPIO_IOP
tristate "Intel IOP GPIO"
depends on ARCH_IOP32X || ARCH_IOP33X || COMPILE_TEST
@@ -326,9 +337,10 @@ config GPIO_MPC8XXX
config GPIO_MVEBU
def_bool y
- depends on PLAT_ORION
+ depends on PLAT_ORION || ARCH_MVEBU
depends on OF_GPIO
select GENERIC_IRQ_CHIP
+ select REGMAP_MMIO
config GPIO_MXC
def_bool y
@@ -504,12 +516,13 @@ config GPIO_XILINX
config GPIO_XLP
tristate "Netlogic XLP GPIO support"
- depends on OF_GPIO && (CPU_XLP || ARCH_VULCAN || ARCH_THUNDER2 || COMPILE_TEST)
+ depends on OF_GPIO && (CPU_XLP || ARCH_THUNDER2 || COMPILE_TEST)
select GPIOLIB_IRQCHIP
help
This driver provides support for GPIO interface on Netlogic XLP MIPS64
SoCs. Currently supported XLP variants are XLP8XX, XLP3XX, XLP2XX,
- XLP9XX and XLP5XX.
+ XLP9XX and XLP5XX. The same GPIO controller block is also present in
+ Cavium's ThunderX2 CN99XX SoCs.
If unsure, say N.
@@ -952,6 +965,16 @@ config GPIO_LP873X
This driver can also be built as a module. If so, the module will be
called gpio-lp873x.
+config GPIO_LP87565
+ tristate "TI LP87565 GPIO"
+ depends on MFD_TI_LP87565
+ help
+ This driver supports the GPIO on TI Lp873565 PMICs. 3 GPIOs are present
+ on LP87565 PMICs.
+
+ This driver can also be built as a module. If so, the module will be
+ called gpio-lp87565.
+
config GPIO_MAX77620
tristate "GPIO support for PMIC MAX77620 and MAX20024"
depends on MFD_MAX77620
@@ -1225,22 +1248,11 @@ config GPIO_PISOSR
GPIO driver for SPI compatible parallel-in/serial-out shift
registers. These are input only devices.
-endmenu
-
-menu "SPI or I2C GPIO expanders"
- depends on (SPI_MASTER && !I2C) || I2C
-
-config GPIO_MCP23S08
- tristate "Microchip MCP23xxx I/O expander"
- depends on OF_GPIO
- select GPIOLIB_IRQCHIP
- select REGMAP_I2C if I2C
- select REGMAP if SPI_MASTER
+config GPIO_XRA1403
+ tristate "EXAR XRA1403 16-bit GPIO expander"
+ select REGMAP_SPI
help
- SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
- I/O expanders.
- This provides a GPIO interface supporting inputs and outputs.
- The I2C versions of the chips can be used as interrupt-controller.
+ GPIO driver for EXAR XRA1403 16-bit SPI-based GPIO expander.
endmenu
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 68b96277d9fa..a9fda6c55113 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_GPIO_GPIO_MM) += gpio-gpio-mm.o
obj-$(CONFIG_GPIO_GRGPIO) += gpio-grgpio.o
obj-$(CONFIG_HTC_EGPIO) += gpio-htc-egpio.o
obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
+obj-$(CONFIG_GPIO_INGENIC) += gpio-ingenic.o
obj-$(CONFIG_GPIO_IOP) += gpio-iop.o
obj-$(CONFIG_GPIO_IT87) += gpio-it87.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
@@ -66,6 +67,7 @@ obj-$(CONFIG_GPIO_LP3943) += gpio-lp3943.o
obj-$(CONFIG_GPIO_LPC18XX) += gpio-lpc18xx.o
obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o
obj-$(CONFIG_GPIO_LP873X) += gpio-lp873x.o
+obj-$(CONFIG_GPIO_LP87565) += gpio-lp87565.o
obj-$(CONFIG_GPIO_LYNXPOINT) += gpio-lynxpoint.o
obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
@@ -77,7 +79,6 @@ obj-$(CONFIG_GPIO_MENZ127) += gpio-menz127.o
obj-$(CONFIG_GPIO_MERRIFIELD) += gpio-merrifield.o
obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o
obj-$(CONFIG_GPIO_MC9S08DZ60) += gpio-mc9s08dz60.o
-obj-$(CONFIG_GPIO_MCP23S08) += gpio-mcp23s08.o
obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o
obj-$(CONFIG_GPIO_MM_LANTIQ) += gpio-mm-lantiq.o
obj-$(CONFIG_GPIO_MOCKUP) += gpio-mockup.o
@@ -141,6 +142,7 @@ obj-$(CONFIG_GPIO_XGENE) += gpio-xgene.o
obj-$(CONFIG_GPIO_XGENE_SB) += gpio-xgene-sb.o
obj-$(CONFIG_GPIO_XILINX) += gpio-xilinx.o
obj-$(CONFIG_GPIO_XLP) += gpio-xlp.o
+obj-$(CONFIG_GPIO_XRA1403) += gpio-xra1403.o
obj-$(CONFIG_GPIO_XTENSA) += gpio-xtensa.o
obj-$(CONFIG_GPIO_ZEVIO) += gpio-zevio.o
obj-$(CONFIG_GPIO_ZYNQ) += gpio-zynq.o
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index c0f718b12317..e717f8dc3966 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -16,7 +16,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/i2c/adp5588.h>
+#include <linux/platform_data/adp5588.h>
#define DRV_NAME "adp5588-gpio"
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index cd23fd727f95..d4e6ba0301bc 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -33,9 +33,23 @@ static int arizona_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
{
struct arizona_gpio *arizona_gpio = gpiochip_get_data(chip);
struct arizona *arizona = arizona_gpio->arizona;
+ bool persistent = gpiochip_line_is_persistent(chip, offset);
+ bool change;
+ int ret;
- return regmap_update_bits(arizona->regmap, ARIZONA_GPIO1_CTRL + offset,
- ARIZONA_GPN_DIR, ARIZONA_GPN_DIR);
+ ret = regmap_update_bits_check(arizona->regmap,
+ ARIZONA_GPIO1_CTRL + offset,
+ ARIZONA_GPN_DIR, ARIZONA_GPN_DIR,
+ &change);
+ if (ret < 0)
+ return ret;
+
+ if (change && persistent) {
+ pm_runtime_mark_last_busy(chip->parent);
+ pm_runtime_put_autosuspend(chip->parent);
+ }
+
+ return 0;
}
static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -85,6 +99,21 @@ static int arizona_gpio_direction_out(struct gpio_chip *chip,
{
struct arizona_gpio *arizona_gpio = gpiochip_get_data(chip);
struct arizona *arizona = arizona_gpio->arizona;
+ bool persistent = gpiochip_line_is_persistent(chip, offset);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(arizona->regmap, ARIZONA_GPIO1_CTRL + offset, &val);
+ if (ret < 0)
+ return ret;
+
+ if ((val & ARIZONA_GPN_DIR) && persistent) {
+ ret = pm_runtime_get_sync(chip->parent);
+ if (ret < 0) {
+ dev_err(chip->parent, "Failed to resume: %d\n", ret);
+ return ret;
+ }
+ }
if (value)
value = ARIZONA_GPN_LVL;
@@ -158,6 +187,8 @@ static int arizona_gpio_probe(struct platform_device *pdev)
else
arizona_gpio->gpio_chip.base = -1;
+ pm_runtime_enable(&pdev->dev);
+
ret = devm_gpiochip_add_data(&pdev->dev, &arizona_gpio->gpio_chip,
arizona_gpio);
if (ret < 0) {
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index ac173575d3f6..65cb359308e3 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -437,6 +437,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
{
unsigned gpio, bank;
int irq;
+ int ret;
struct clk *clk;
u32 binten = 0;
unsigned ngpio, bank_irq;
@@ -480,12 +481,15 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
PTR_ERR(clk));
return PTR_ERR(clk);
}
- clk_prepare_enable(clk);
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
if (!pdata->gpio_unbanked) {
irq = devm_irq_alloc_descs(dev, -1, 0, ngpio, 0);
if (irq < 0) {
dev_err(dev, "Couldn't allocate IRQ numbers\n");
+ clk_disable_unprepare(clk);
return irq;
}
@@ -494,6 +498,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
chips);
if (!irq_domain) {
dev_err(dev, "Couldn't register an IRQ domain\n");
+ clk_disable_unprepare(clk);
return -ENODEV;
}
}
@@ -562,8 +567,10 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
sizeof(struct
davinci_gpio_irq_data),
GFP_KERNEL);
- if (!irqdata)
+ if (!irqdata) {
+ clk_disable_unprepare(clk);
return -ENOMEM;
+ }
irqdata->regs = g;
irqdata->bank_num = bank;
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index f051c4552af5..c07ada9c7af6 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -288,7 +288,8 @@ static int dwapb_irq_set_type(struct irq_data *d, u32 type)
irq_setup_alt_chip(d, type);
dwapb_write(gpio, GPIO_INTTYPE_LEVEL, level);
- dwapb_write(gpio, GPIO_INT_POLARITY, polarity);
+ if (type != IRQ_TYPE_EDGE_BOTH)
+ dwapb_write(gpio, GPIO_INT_POLARITY, polarity);
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
return 0;
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index 081076771217..fb8d304cfa17 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -31,6 +31,7 @@ struct exar_gpio_chip {
int index;
void __iomem *regs;
char name[20];
+ unsigned int first_pin;
};
static void exar_update(struct gpio_chip *chip, unsigned int reg, int val,
@@ -51,11 +52,12 @@ static void exar_update(struct gpio_chip *chip, unsigned int reg, int val,
static int exar_set_direction(struct gpio_chip *chip, int direction,
unsigned int offset)
{
- unsigned int bank = offset / 8;
- unsigned int addr;
+ struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
+ unsigned int addr = (offset + exar_gpio->first_pin) / 8 ?
+ EXAR_OFFSET_MPIOSEL_HI : EXAR_OFFSET_MPIOSEL_LO;
+ unsigned int bit = (offset + exar_gpio->first_pin) % 8;
- addr = bank ? EXAR_OFFSET_MPIOSEL_HI : EXAR_OFFSET_MPIOSEL_LO;
- exar_update(chip, addr, direction, offset % 8);
+ exar_update(chip, addr, direction, bit);
return 0;
}
@@ -68,41 +70,38 @@ static int exar_get(struct gpio_chip *chip, unsigned int reg)
value = readb(exar_gpio->regs + reg);
mutex_unlock(&exar_gpio->lock);
- return !!value;
+ return value;
}
static int exar_get_direction(struct gpio_chip *chip, unsigned int offset)
{
- unsigned int bank = offset / 8;
- unsigned int addr;
- int val;
-
- addr = bank ? EXAR_OFFSET_MPIOSEL_HI : EXAR_OFFSET_MPIOSEL_LO;
- val = exar_get(chip, addr) >> (offset % 8);
+ struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
+ unsigned int addr = (offset + exar_gpio->first_pin) / 8 ?
+ EXAR_OFFSET_MPIOSEL_HI : EXAR_OFFSET_MPIOSEL_LO;
+ unsigned int bit = (offset + exar_gpio->first_pin) % 8;
- return !!val;
+ return !!(exar_get(chip, addr) & BIT(bit));
}
static int exar_get_value(struct gpio_chip *chip, unsigned int offset)
{
- unsigned int bank = offset / 8;
- unsigned int addr;
- int val;
-
- addr = bank ? EXAR_OFFSET_MPIOLVL_LO : EXAR_OFFSET_MPIOLVL_HI;
- val = exar_get(chip, addr) >> (offset % 8);
+ struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
+ unsigned int addr = (offset + exar_gpio->first_pin) / 8 ?
+ EXAR_OFFSET_MPIOLVL_HI : EXAR_OFFSET_MPIOLVL_LO;
+ unsigned int bit = (offset + exar_gpio->first_pin) % 8;
- return !!val;
+ return !!(exar_get(chip, addr) & BIT(bit));
}
static void exar_set_value(struct gpio_chip *chip, unsigned int offset,
int value)
{
- unsigned int bank = offset / 8;
- unsigned int addr;
+ struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
+ unsigned int addr = (offset + exar_gpio->first_pin) / 8 ?
+ EXAR_OFFSET_MPIOLVL_HI : EXAR_OFFSET_MPIOLVL_LO;
+ unsigned int bit = (offset + exar_gpio->first_pin) % 8;
- addr = bank ? EXAR_OFFSET_MPIOLVL_HI : EXAR_OFFSET_MPIOLVL_LO;
- exar_update(chip, addr, value, offset % 8);
+ exar_update(chip, addr, value, bit);
}
static int exar_direction_output(struct gpio_chip *chip, unsigned int offset,
@@ -119,27 +118,30 @@ static int exar_direction_input(struct gpio_chip *chip, unsigned int offset)
static int gpio_exar_probe(struct platform_device *pdev)
{
- struct pci_dev *pcidev = platform_get_drvdata(pdev);
+ struct pci_dev *pcidev = to_pci_dev(pdev->dev.parent);
struct exar_gpio_chip *exar_gpio;
+ u32 first_pin, ngpios;
void __iomem *p;
int index, ret;
- if (pcidev->vendor != PCI_VENDOR_ID_EXAR)
- return -ENODEV;
-
/*
- * Map the pci device to get the register addresses.
- * We will need to read and write those registers to control
- * the GPIO pins.
- * Using managed functions will save us from unmaping on exit.
- * As the device is enabled using managed functions by the
- * UART driver we can also use managed functions here.
+ * The UART driver must have mapped region 0 prior to registering this
+ * device - use it.
*/
- p = pcim_iomap(pcidev, 0, 0);
+ p = pcim_iomap_table(pcidev)[0];
if (!p)
return -ENOMEM;
- exar_gpio = devm_kzalloc(&pcidev->dev, sizeof(*exar_gpio), GFP_KERNEL);
+ ret = device_property_read_u32(&pdev->dev, "linux,first-pin",
+ &first_pin);
+ if (ret)
+ return ret;
+
+ ret = device_property_read_u32(&pdev->dev, "ngpios", &ngpios);
+ if (ret)
+ return ret;
+
+ exar_gpio = devm_kzalloc(&pdev->dev, sizeof(*exar_gpio), GFP_KERNEL);
if (!exar_gpio)
return -ENOMEM;
@@ -149,18 +151,19 @@ static int gpio_exar_probe(struct platform_device *pdev)
sprintf(exar_gpio->name, "exar_gpio%d", index);
exar_gpio->gpio_chip.label = exar_gpio->name;
- exar_gpio->gpio_chip.parent = &pcidev->dev;
+ exar_gpio->gpio_chip.parent = &pdev->dev;
exar_gpio->gpio_chip.direction_output = exar_direction_output;
exar_gpio->gpio_chip.direction_input = exar_direction_input;
exar_gpio->gpio_chip.get_direction = exar_get_direction;
exar_gpio->gpio_chip.get = exar_get_value;
exar_gpio->gpio_chip.set = exar_set_value;
exar_gpio->gpio_chip.base = -1;
- exar_gpio->gpio_chip.ngpio = 16;
+ exar_gpio->gpio_chip.ngpio = ngpios;
exar_gpio->regs = p;
exar_gpio->index = index;
+ exar_gpio->first_pin = first_pin;
- ret = devm_gpiochip_add_data(&pcidev->dev,
+ ret = devm_gpiochip_add_data(&pdev->dev,
&exar_gpio->gpio_chip, exar_gpio);
if (ret)
goto err_destroy;
diff --git a/drivers/gpio/gpio-ingenic.c b/drivers/gpio/gpio-ingenic.c
new file mode 100644
index 000000000000..254780730b95
--- /dev/null
+++ b/drivers/gpio/gpio-ingenic.c
@@ -0,0 +1,394 @@
+/*
+ * Ingenic JZ47xx GPIO driver
+ *
+ * Copyright (c) 2017 Paul Cercueil <[email protected]>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/regmap.h>
+
+#define GPIO_PIN 0x00
+#define GPIO_MSK 0x20
+
+#define JZ4740_GPIO_DATA 0x10
+#define JZ4740_GPIO_SELECT 0x50
+#define JZ4740_GPIO_DIR 0x60
+#define JZ4740_GPIO_TRIG 0x70
+#define JZ4740_GPIO_FLAG 0x80
+
+#define JZ4770_GPIO_INT 0x10
+#define JZ4770_GPIO_PAT1 0x30
+#define JZ4770_GPIO_PAT0 0x40
+#define JZ4770_GPIO_FLAG 0x50
+
+#define REG_SET(x) ((x) + 0x4)
+#define REG_CLEAR(x) ((x) + 0x8)
+
+enum jz_version {
+ ID_JZ4740,
+ ID_JZ4770,
+ ID_JZ4780,
+};
+
+struct ingenic_gpio_chip {
+ struct regmap *map;
+ struct gpio_chip gc;
+ struct irq_chip irq_chip;
+ unsigned int irq, reg_base;
+ enum jz_version version;
+};
+
+static u32 gpio_ingenic_read_reg(struct ingenic_gpio_chip *jzgc, u8 reg)
+{
+ unsigned int val;
+
+ regmap_read(jzgc->map, jzgc->reg_base + reg, &val);
+
+ return (u32) val;
+}
+
+static void gpio_ingenic_set_bit(struct ingenic_gpio_chip *jzgc,
+ u8 reg, u8 offset, bool set)
+{
+ if (set)
+ reg = REG_SET(reg);
+ else
+ reg = REG_CLEAR(reg);
+
+ regmap_write(jzgc->map, jzgc->reg_base + reg, BIT(offset));
+}
+
+static inline bool gpio_get_value(struct ingenic_gpio_chip *jzgc, u8 offset)
+{
+ unsigned int val = gpio_ingenic_read_reg(jzgc, GPIO_PIN);
+
+ return !!(val & BIT(offset));
+}
+
+static void gpio_set_value(struct ingenic_gpio_chip *jzgc, u8 offset, int value)
+{
+ if (jzgc->version >= ID_JZ4770)
+ gpio_ingenic_set_bit(jzgc, JZ4770_GPIO_PAT0, offset, !!value);
+ else
+ gpio_ingenic_set_bit(jzgc, JZ4740_GPIO_DATA, offset, !!value);
+}
+
+static void irq_set_type(struct ingenic_gpio_chip *jzgc,
+ u8 offset, unsigned int type)
+{
+ u8 reg1, reg2;
+
+ if (jzgc->version >= ID_JZ4770) {
+ reg1 = JZ4770_GPIO_PAT1;
+ reg2 = JZ4770_GPIO_PAT0;
+ } else {
+ reg1 = JZ4740_GPIO_TRIG;
+ reg2 = JZ4740_GPIO_DIR;
+ }
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ gpio_ingenic_set_bit(jzgc, reg2, offset, true);
+ gpio_ingenic_set_bit(jzgc, reg1, offset, true);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ gpio_ingenic_set_bit(jzgc, reg2, offset, false);
+ gpio_ingenic_set_bit(jzgc, reg1, offset, true);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ gpio_ingenic_set_bit(jzgc, reg2, offset, true);
+ gpio_ingenic_set_bit(jzgc, reg1, offset, false);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ default:
+ gpio_ingenic_set_bit(jzgc, reg2, offset, false);
+ gpio_ingenic_set_bit(jzgc, reg1, offset, false);
+ break;
+ }
+}
+
+static void ingenic_gpio_irq_mask(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+
+ gpio_ingenic_set_bit(jzgc, GPIO_MSK, irqd->hwirq, true);
+}
+
+static void ingenic_gpio_irq_unmask(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+
+ gpio_ingenic_set_bit(jzgc, GPIO_MSK, irqd->hwirq, false);
+}
+
+static void ingenic_gpio_irq_enable(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+ int irq = irqd->hwirq;
+
+ if (jzgc->version >= ID_JZ4770)
+ gpio_ingenic_set_bit(jzgc, JZ4770_GPIO_INT, irq, true);
+ else
+ gpio_ingenic_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, true);
+
+ ingenic_gpio_irq_unmask(irqd);
+}
+
+static void ingenic_gpio_irq_disable(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+ int irq = irqd->hwirq;
+
+ ingenic_gpio_irq_mask(irqd);
+
+ if (jzgc->version >= ID_JZ4770)
+ gpio_ingenic_set_bit(jzgc, JZ4770_GPIO_INT, irq, false);
+ else
+ gpio_ingenic_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false);
+}
+
+static void ingenic_gpio_irq_ack(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+ int irq = irqd->hwirq;
+ bool high;
+
+ if (irqd_get_trigger_type(irqd) == IRQ_TYPE_EDGE_BOTH) {
+ /*
+ * Switch to an interrupt for the opposite edge to the one that
+ * triggered the interrupt being ACKed.
+ */
+ high = gpio_get_value(jzgc, irq);
+ if (high)
+ irq_set_type(jzgc, irq, IRQ_TYPE_EDGE_FALLING);
+ else
+ irq_set_type(jzgc, irq, IRQ_TYPE_EDGE_RISING);
+ }
+
+ if (jzgc->version >= ID_JZ4770)
+ gpio_ingenic_set_bit(jzgc, JZ4770_GPIO_FLAG, irq, false);
+ else
+ gpio_ingenic_set_bit(jzgc, JZ4740_GPIO_DATA, irq, true);
+}
+
+static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_EDGE_FALLING:
+ irq_set_handler_locked(irqd, handle_edge_irq);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ case IRQ_TYPE_LEVEL_LOW:
+ irq_set_handler_locked(irqd, handle_level_irq);
+ break;
+ default:
+ irq_set_handler_locked(irqd, handle_bad_irq);
+ }
+
+ if (type == IRQ_TYPE_EDGE_BOTH) {
+ /*
+ * The hardware does not support interrupts on both edges. The
+ * best we can do is to set up a single-edge interrupt and then
+ * switch to the opposing edge when ACKing the interrupt.
+ */
+ bool high = gpio_get_value(jzgc, irqd->hwirq);
+
+ type = high ? IRQ_TYPE_EDGE_FALLING : IRQ_TYPE_EDGE_RISING;
+ }
+
+ irq_set_type(jzgc, irqd->hwirq, type);
+ return 0;
+}
+
+static int ingenic_gpio_irq_set_wake(struct irq_data *irqd, unsigned int on)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+
+ return irq_set_irq_wake(jzgc->irq, on);
+}
+
+static void ingenic_gpio_irq_handler(struct irq_desc *desc)
+{
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+ struct irq_chip *irq_chip = irq_data_get_irq_chip(&desc->irq_data);
+ unsigned long flag, i;
+
+ chained_irq_enter(irq_chip, desc);
+
+ if (jzgc->version >= ID_JZ4770)
+ flag = gpio_ingenic_read_reg(jzgc, JZ4770_GPIO_FLAG);
+ else
+ flag = gpio_ingenic_read_reg(jzgc, JZ4740_GPIO_FLAG);
+
+ for_each_set_bit(i, &flag, 32)
+ generic_handle_irq(irq_linear_revmap(gc->irqdomain, i));
+ chained_irq_exit(irq_chip, desc);
+}
+
+static void ingenic_gpio_set(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+
+ gpio_set_value(jzgc, offset, value);
+}
+
+static int ingenic_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
+
+ return (int) gpio_get_value(jzgc, offset);
+}
+
+static int ingenic_gpio_direction_input(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ return pinctrl_gpio_direction_input(gc->base + offset);
+}
+
+static int ingenic_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ ingenic_gpio_set(gc, offset, value);
+ return pinctrl_gpio_direction_output(gc->base + offset);
+}
+
+static const struct of_device_id ingenic_gpio_of_match[] = {
+ { .compatible = "ingenic,jz4740-gpio", .data = (void *)ID_JZ4740 },
+ { .compatible = "ingenic,jz4770-gpio", .data = (void *)ID_JZ4770 },
+ { .compatible = "ingenic,jz4780-gpio", .data = (void *)ID_JZ4780 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ingenic_gpio_of_match);
+
+static int ingenic_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id = of_match_device(
+ ingenic_gpio_of_match, dev);
+ struct ingenic_gpio_chip *jzgc;
+ u32 bank;
+ int err;
+
+ jzgc = devm_kzalloc(dev, sizeof(*jzgc), GFP_KERNEL);
+ if (!jzgc)
+ return -ENOMEM;
+
+ jzgc->map = dev_get_drvdata(dev->parent);
+ if (!jzgc->map) {
+ dev_err(dev, "Cannot get parent regmap\n");
+ return -ENXIO;
+ }
+
+ err = of_property_read_u32(dev->of_node, "reg", &bank);
+ if (err) {
+ dev_err(dev, "Cannot read \"reg\" property: %i\n", err);
+ return err;
+ }
+
+ jzgc->reg_base = bank * 0x100;
+
+ jzgc->gc.label = devm_kasprintf(dev, GFP_KERNEL, "GPIO%c", 'A' + bank);
+ if (!jzgc->gc.label)
+ return -ENOMEM;
+
+ /* DO NOT EXPAND THIS: FOR BACKWARD GPIO NUMBERSPACE COMPATIBIBILITY
+ * ONLY: WORK TO TRANSITION CONSUMERS TO USE THE GPIO DESCRIPTOR API IN
+ * <linux/gpio/consumer.h> INSTEAD.
+ */
+ jzgc->gc.base = bank * 32;
+
+ jzgc->gc.ngpio = 32;
+ jzgc->gc.parent = dev;
+ jzgc->gc.of_node = dev->of_node;
+ jzgc->gc.owner = THIS_MODULE;
+ jzgc->version = (enum jz_version)of_id->data;
+
+ jzgc->gc.set = ingenic_gpio_set;
+ jzgc->gc.get = ingenic_gpio_get;
+ jzgc->gc.direction_input = ingenic_gpio_direction_input;
+ jzgc->gc.direction_output = ingenic_gpio_direction_output;
+
+ if (of_property_read_bool(dev->of_node, "gpio-ranges")) {
+ jzgc->gc.request = gpiochip_generic_request;
+ jzgc->gc.free = gpiochip_generic_free;
+ }
+
+ err = devm_gpiochip_add_data(dev, &jzgc->gc, jzgc);
+ if (err)
+ return err;
+
+ jzgc->irq = irq_of_parse_and_map(dev->of_node, 0);
+ if (!jzgc->irq)
+ return -EINVAL;
+
+ jzgc->irq_chip.name = jzgc->gc.label;
+ jzgc->irq_chip.irq_enable = ingenic_gpio_irq_enable;
+ jzgc->irq_chip.irq_disable = ingenic_gpio_irq_disable;
+ jzgc->irq_chip.irq_unmask = ingenic_gpio_irq_unmask;
+ jzgc->irq_chip.irq_mask = ingenic_gpio_irq_mask;
+ jzgc->irq_chip.irq_ack = ingenic_gpio_irq_ack;
+ jzgc->irq_chip.irq_set_type = ingenic_gpio_irq_set_type;
+ jzgc->irq_chip.irq_set_wake = ingenic_gpio_irq_set_wake;
+ jzgc->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND;
+
+ err = gpiochip_irqchip_add(&jzgc->gc, &jzgc->irq_chip, 0,
+ handle_level_irq, IRQ_TYPE_NONE);
+ if (err)
+ return err;
+
+ gpiochip_set_chained_irqchip(&jzgc->gc, &jzgc->irq_chip,
+ jzgc->irq, ingenic_gpio_irq_handler);
+ return 0;
+}
+
+static int ingenic_gpio_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver ingenic_gpio_driver = {
+ .driver = {
+ .name = "gpio-ingenic",
+ .of_match_table = of_match_ptr(ingenic_gpio_of_match),
+ },
+ .probe = ingenic_gpio_probe,
+ .remove = ingenic_gpio_remove,
+};
+
+static int __init ingenic_gpio_drv_register(void)
+{
+ return platform_driver_register(&ingenic_gpio_driver);
+}
+subsys_initcall(ingenic_gpio_drv_register);
+
+static void __exit ingenic_gpio_drv_unregister(void)
+{
+ platform_driver_unregister(&ingenic_gpio_driver);
+}
+module_exit(ingenic_gpio_drv_unregister);
+
+MODULE_AUTHOR("Paul Cercueil <[email protected]>");
+MODULE_DESCRIPTION("Ingenic JZ47xx GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-lp87565.c b/drivers/gpio/gpio-lp87565.c
new file mode 100644
index 000000000000..6313c50bb91b
--- /dev/null
+++ b/drivers/gpio/gpio-lp87565.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
+ * Keerthy <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
+ *
+ * Based on the LP873X driver
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/lp87565.h>
+
+struct lp87565_gpio {
+ struct gpio_chip chip;
+ struct regmap *map;
+};
+
+static int lp87565_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct lp87565_gpio *gpio = gpiochip_get_data(chip);
+ int ret, val;
+
+ ret = regmap_read(gpio->map, LP87565_REG_GPIO_CONFIG, &val);
+ if (ret < 0)
+ return ret;
+
+ return !(val & BIT(offset));
+}
+
+static int lp87565_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct lp87565_gpio *gpio = gpiochip_get_data(chip);
+
+ return regmap_update_bits(gpio->map,
+ LP87565_REG_GPIO_CONFIG,
+ BIT(offset), 0);
+}
+
+static int lp87565_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ struct lp87565_gpio *gpio = gpiochip_get_data(chip);
+
+ return regmap_update_bits(gpio->map,
+ LP87565_REG_GPIO_CONFIG,
+ BIT(offset), !value ? BIT(offset) : 0);
+}
+
+static int lp87565_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct lp87565_gpio *gpio = gpiochip_get_data(chip);
+ int ret, val;
+
+ ret = regmap_read(gpio->map, LP87565_REG_GPIO_IN, &val);
+ if (ret < 0)
+ return ret;
+
+ return !!(val & BIT(offset));
+}
+
+static void lp87565_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct lp87565_gpio *gpio = gpiochip_get_data(chip);
+
+ regmap_update_bits(gpio->map, LP87565_REG_GPIO_OUT,
+ BIT(offset), value ? BIT(offset) : 0);
+}
+
+static int lp87565_gpio_request(struct gpio_chip *gc, unsigned int offset)
+{
+ struct lp87565_gpio *gpio = gpiochip_get_data(gc);
+ int ret;
+
+ switch (offset) {
+ case 0:
+ case 1:
+ case 2:
+ /*
+ * MUX can program the pin to be in EN1/2/3 pin mode
+ * Or GPIO1/2/3 mode.
+ * Setup the GPIO*_SEL MUX to GPIO mode
+ */
+ ret = regmap_update_bits(gpio->map,
+ LP87565_REG_PIN_FUNCTION,
+ BIT(offset), BIT(offset));
+ if (ret)
+ return ret;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int lp87565_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long config)
+{
+ struct lp87565_gpio *gpio = gpiochip_get_data(gc);
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ return regmap_update_bits(gpio->map,
+ LP87565_REG_GPIO_CONFIG,
+ BIT(offset +
+ __ffs(LP87565_GOIO1_OD)),
+ BIT(offset +
+ __ffs(LP87565_GOIO1_OD)));
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ return regmap_update_bits(gpio->map,
+ LP87565_REG_GPIO_CONFIG,
+ BIT(offset +
+ __ffs(LP87565_GOIO1_OD)), 0);
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static const struct gpio_chip template_chip = {
+ .label = "lp87565-gpio",
+ .owner = THIS_MODULE,
+ .request = lp87565_gpio_request,
+ .get_direction = lp87565_gpio_get_direction,
+ .direction_input = lp87565_gpio_direction_input,
+ .direction_output = lp87565_gpio_direction_output,
+ .get = lp87565_gpio_get,
+ .set = lp87565_gpio_set,
+ .set_config = lp87565_gpio_set_config,
+ .base = -1,
+ .ngpio = 3,
+ .can_sleep = true,
+};
+
+static int lp87565_gpio_probe(struct platform_device *pdev)
+{
+ struct lp87565_gpio *gpio;
+ struct lp87565 *lp87565;
+ int ret;
+
+ gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ lp87565 = dev_get_drvdata(pdev->dev.parent);
+ gpio->chip = template_chip;
+ gpio->chip.parent = lp87565->dev;
+ gpio->map = lp87565->regmap;
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id lp87565_gpio_id_table[] = {
+ { "lp87565-q1-gpio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, lp87565_gpio_id_table);
+
+static struct platform_driver lp87565_gpio_driver = {
+ .driver = {
+ .name = "lp87565-gpio",
+ },
+ .probe = lp87565_gpio_probe,
+ .id_table = lp87565_gpio_id_table,
+};
+module_platform_driver(lp87565_gpio_driver);
+
+MODULE_AUTHOR("Keerthy <[email protected]>");
+MODULE_DESCRIPTION("LP87565 GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index 4ea4c6a1313b..7f4d26ce5f23 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -20,7 +20,7 @@
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
-#include <linux/i2c/max732x.h>
+#include <linux/platform_data/max732x.h>
#include <linux/of.h>
diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c
index 9dbdc3672f5e..ec8560298805 100644
--- a/drivers/gpio/gpio-merrifield.c
+++ b/drivers/gpio/gpio-merrifield.c
@@ -11,7 +11,6 @@
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
-#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index 78896a869fd9..74fdce096c26 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -385,14 +385,18 @@ static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
return ret;
}
-static void ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
- unsigned int irq_start, unsigned int num)
+static int ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
+ unsigned int irq_start,
+ unsigned int num)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
gc = irq_alloc_generic_chip("ioh_gpio", 1, irq_start, chip->base,
handle_simple_irq);
+ if (!gc)
+ return -ENOMEM;
+
gc->private = chip;
ct = gc->chip_types;
@@ -404,6 +408,8 @@ static void ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+
+ return 0;
}
static int ioh_gpio_probe(struct pci_dev *pdev,
@@ -468,7 +474,11 @@ static int ioh_gpio_probe(struct pci_dev *pdev,
goto err_gpiochip_add;
}
chip->irq_base = irq_base;
- ioh_gpio_alloc_generic_chip(chip, irq_base, num_ports[j]);
+
+ ret = ioh_gpio_alloc_generic_chip(chip,
+ irq_base, num_ports[j]);
+ if (ret)
+ goto err_gpiochip_add;
}
chip = chip_save;
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index c6dadac70593..a6565e128f9e 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2014 Kamlakant Patel <[email protected]>
* Copyright (C) 2015-2016 Bamvor Jian Zhang <[email protected]>
+ * Copyright (C) 2017 Bartosz Golaszewski <[email protected]>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -27,10 +28,15 @@
#define GPIO_MOCKUP_NAME "gpio-mockup"
#define GPIO_MOCKUP_MAX_GC 10
+/*
+ * We're storing two values per chip: the GPIO base and the number
+ * of GPIO lines.
+ */
+#define GPIO_MOCKUP_MAX_RANGES (GPIO_MOCKUP_MAX_GC * 2)
enum {
- DIR_IN = 0,
- DIR_OUT,
+ GPIO_MOCKUP_DIR_OUT = 0,
+ GPIO_MOCKUP_DIR_IN = 1,
};
/*
@@ -41,6 +47,7 @@ enum {
struct gpio_mockup_line_status {
int dir;
bool value;
+ bool irq_enabled;
};
struct gpio_mockup_irq_context {
@@ -61,7 +68,7 @@ struct gpio_mockup_dbgfs_private {
int offset;
};
-static int gpio_mockup_ranges[GPIO_MOCKUP_MAX_GC << 1];
+static int gpio_mockup_ranges[GPIO_MOCKUP_MAX_RANGES];
static int gpio_mockup_params_nr;
module_param_array(gpio_mockup_ranges, int, &gpio_mockup_params_nr, 0400);
@@ -93,7 +100,7 @@ static int gpio_mockup_dirout(struct gpio_chip *gc, unsigned int offset,
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
gpio_mockup_set(gc, offset, value);
- chip->lines[offset].dir = DIR_OUT;
+ chip->lines[offset].dir = GPIO_MOCKUP_DIR_OUT;
return 0;
}
@@ -102,7 +109,7 @@ static int gpio_mockup_dirin(struct gpio_chip *gc, unsigned int offset)
{
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
- chip->lines[offset].dir = DIR_IN;
+ chip->lines[offset].dir = GPIO_MOCKUP_DIR_IN;
return 0;
}
@@ -121,7 +128,7 @@ static int gpio_mockup_name_lines(struct device *dev,
char **names;
int i;
- names = devm_kzalloc(dev, sizeof(char *) * gc->ngpio, GFP_KERNEL);
+ names = devm_kcalloc(dev, gc->ngpio, sizeof(char *), GFP_KERNEL);
if (!names)
return -ENOMEM;
@@ -142,12 +149,21 @@ static int gpio_mockup_to_irq(struct gpio_chip *chip, unsigned int offset)
return chip->irq_base + offset;
}
-/*
- * While we should generally support irqmask and irqunmask, this driver is
- * for testing purposes only so we don't care.
- */
-static void gpio_mockup_irqmask(struct irq_data *d) { }
-static void gpio_mockup_irqunmask(struct irq_data *d) { }
+static void gpio_mockup_irqmask(struct irq_data *data)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
+
+ chip->lines[data->irq - gc->irq_base].irq_enabled = false;
+}
+
+static void gpio_mockup_irqunmask(struct irq_data *data)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
+
+ chip->lines[data->irq - gc->irq_base].irq_enabled = true;
+}
static struct irq_chip gpio_mockup_irqchip = {
.name = GPIO_MOCKUP_NAME,
@@ -178,6 +194,7 @@ static int gpio_mockup_irqchip_setup(struct device *dev,
for (i = 0; i < gc->ngpio; i++) {
irq_set_chip(irq_base + i, gc->irqchip);
+ irq_set_chip_data(irq_base + i, gc);
irq_set_handler(irq_base + i, &handle_simple_irq);
irq_modify_status(irq_base + i,
IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
@@ -197,8 +214,13 @@ static ssize_t gpio_mockup_event_write(struct file *file,
struct seq_file *sfile;
struct gpio_desc *desc;
struct gpio_chip *gc;
- int val;
- char buf;
+ int rv, val;
+
+ rv = kstrtoint_from_user(usr_buf, size, 0, &val);
+ if (rv)
+ return rv;
+ if (val != 0 && val != 1)
+ return -EINVAL;
sfile = file->private_data;
priv = sfile->private;
@@ -206,19 +228,11 @@ static ssize_t gpio_mockup_event_write(struct file *file,
chip = priv->chip;
gc = &chip->gc;
- if (copy_from_user(&buf, usr_buf, 1))
- return -EFAULT;
-
- if (buf == '0')
- val = 0;
- else if (buf == '1')
- val = 1;
- else
- return -EINVAL;
-
- gpiod_set_value_cansleep(desc, val);
- priv->chip->irq_ctx.irq = gc->irq_base + priv->offset;
- irq_work_queue(&priv->chip->irq_ctx.work);
+ if (chip->lines[priv->offset].irq_enabled) {
+ gpiod_set_value_cansleep(desc, val);
+ priv->chip->irq_ctx.irq = gc->irq_base + priv->offset;
+ irq_work_queue(&priv->chip->irq_ctx.work);
+ }
return size;
}
@@ -294,8 +308,8 @@ static int gpio_mockup_add(struct device *dev,
gc->get_direction = gpio_mockup_get_direction;
gc->to_irq = gpio_mockup_to_irq;
- chip->lines = devm_kzalloc(dev, sizeof(*chip->lines) * gc->ngpio,
- GFP_KERNEL);
+ chip->lines = devm_kcalloc(dev, gc->ngpio,
+ sizeof(*chip->lines), GFP_KERNEL);
if (!chip->lines)
return -ENOMEM;
@@ -321,23 +335,24 @@ static int gpio_mockup_add(struct device *dev,
static int gpio_mockup_probe(struct platform_device *pdev)
{
- struct gpio_mockup_chip *chips;
+ int ret, i, base, ngpio, num_chips;
struct device *dev = &pdev->dev;
- int ret, i, base, ngpio;
+ struct gpio_mockup_chip *chips;
char *chip_name;
- if (gpio_mockup_params_nr < 2)
+ if (gpio_mockup_params_nr < 2 || (gpio_mockup_params_nr % 2))
return -EINVAL;
- chips = devm_kzalloc(dev,
- sizeof(*chips) * (gpio_mockup_params_nr >> 1),
- GFP_KERNEL);
+ /* Each chip is described by two values. */
+ num_chips = gpio_mockup_params_nr / 2;
+
+ chips = devm_kcalloc(dev, num_chips, sizeof(*chips), GFP_KERNEL);
if (!chips)
return -ENOMEM;
platform_set_drvdata(pdev, chips);
- for (i = 0; i < gpio_mockup_params_nr >> 1; i++) {
+ for (i = 0; i < num_chips; i++) {
base = gpio_mockup_ranges[i * 2];
if (base == -1)
@@ -355,18 +370,16 @@ static int gpio_mockup_probe(struct platform_device *pdev)
ret = gpio_mockup_add(dev, &chips[i],
chip_name, base, ngpio);
} else {
- ret = -1;
+ ret = -EINVAL;
}
if (ret) {
- dev_err(dev, "gpio<%d..%d> add failed\n",
- base, base < 0 ? ngpio : base + ngpio);
+ dev_err(dev,
+ "adding gpiochip failed: %d (base: %d, ngpio: %d)\n",
+ ret, base, base < 0 ? ngpio : base + ngpio);
return ret;
}
-
- dev_info(dev, "gpio<%d..%d> add successful!",
- base, base + ngpio);
}
return 0;
@@ -420,5 +433,6 @@ module_exit(mock_device_exit);
MODULE_AUTHOR("Kamlakant Patel <[email protected]>");
MODULE_AUTHOR("Bamvor Jian Zhang <[email protected]>");
+MODULE_AUTHOR("Bartosz Golaszewski <[email protected]>");
MODULE_DESCRIPTION("GPIO Testing driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index c83ea68be792..e338c3743562 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -33,21 +33,23 @@
* interrupts.
*/
+#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/io.h>
#include <linux/irq.h>
-#include <linux/slab.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
-#include <linux/io.h>
-#include <linux/of_irq.h>
+#include <linux/mfd/syscon.h>
#include <linux/of_device.h>
-#include <linux/pwm.h>
-#include <linux/clk.h>
+#include <linux/of_irq.h>
#include <linux/pinctrl/consumer.h>
-#include <linux/irqchip/chained_irq.h>
#include <linux/platform_device.h>
-#include <linux/bitops.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
#include "gpiolib.h"
@@ -87,6 +89,7 @@
#define MVEBU_GPIO_SOC_VARIANT_ORION 0x1
#define MVEBU_GPIO_SOC_VARIANT_MV78200 0x2
#define MVEBU_GPIO_SOC_VARIANT_ARMADAXP 0x3
+#define MVEBU_GPIO_SOC_VARIANT_A8K 0x4
#define MVEBU_MAX_GPIO_PER_BANK 32
@@ -106,9 +109,9 @@ struct mvebu_pwm {
struct mvebu_gpio_chip {
struct gpio_chip chip;
- spinlock_t lock;
- void __iomem *membase;
- void __iomem *percpu_membase;
+ struct regmap *regs;
+ u32 offset;
+ struct regmap *percpu_regs;
int irqbase;
struct irq_domain *domain;
int soc_variant;
@@ -130,92 +133,152 @@ struct mvebu_gpio_chip {
* Functions returning addresses of individual registers for a given
* GPIO controller.
*/
-static void __iomem *mvebu_gpioreg_out(struct mvebu_gpio_chip *mvchip)
-{
- return mvchip->membase + GPIO_OUT_OFF;
-}
-static void __iomem *mvebu_gpioreg_blink(struct mvebu_gpio_chip *mvchip)
+static void mvebu_gpioreg_edge_cause(struct mvebu_gpio_chip *mvchip,
+ struct regmap **map, unsigned int *offset)
{
- return mvchip->membase + GPIO_BLINK_EN_OFF;
-}
+ int cpu;
-static void __iomem *mvebu_gpioreg_blink_counter_select(struct mvebu_gpio_chip
- *mvchip)
-{
- return mvchip->membase + GPIO_BLINK_CNT_SELECT_OFF;
+ switch (mvchip->soc_variant) {
+ case MVEBU_GPIO_SOC_VARIANT_ORION:
+ case MVEBU_GPIO_SOC_VARIANT_MV78200:
+ case MVEBU_GPIO_SOC_VARIANT_A8K:
+ *map = mvchip->regs;
+ *offset = GPIO_EDGE_CAUSE_OFF + mvchip->offset;
+ break;
+ case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
+ cpu = smp_processor_id();
+ *map = mvchip->percpu_regs;
+ *offset = GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu);
+ break;
+ default:
+ BUG();
+ }
}
-static void __iomem *mvebu_gpioreg_io_conf(struct mvebu_gpio_chip *mvchip)
+static u32
+mvebu_gpio_read_edge_cause(struct mvebu_gpio_chip *mvchip)
{
- return mvchip->membase + GPIO_IO_CONF_OFF;
-}
+ struct regmap *map;
+ unsigned int offset;
+ u32 val;
-static void __iomem *mvebu_gpioreg_in_pol(struct mvebu_gpio_chip *mvchip)
-{
- return mvchip->membase + GPIO_IN_POL_OFF;
+ mvebu_gpioreg_edge_cause(mvchip, &map, &offset);
+ regmap_read(map, offset, &val);
+
+ return val;
}
-static void __iomem *mvebu_gpioreg_data_in(struct mvebu_gpio_chip *mvchip)
+static void
+mvebu_gpio_write_edge_cause(struct mvebu_gpio_chip *mvchip, u32 val)
{
- return mvchip->membase + GPIO_DATA_IN_OFF;
+ struct regmap *map;
+ unsigned int offset;
+
+ mvebu_gpioreg_edge_cause(mvchip, &map, &offset);
+ regmap_write(map, offset, val);
}
-static void __iomem *mvebu_gpioreg_edge_cause(struct mvebu_gpio_chip *mvchip)
+static inline void
+mvebu_gpioreg_edge_mask(struct mvebu_gpio_chip *mvchip,
+ struct regmap **map, unsigned int *offset)
{
int cpu;
switch (mvchip->soc_variant) {
case MVEBU_GPIO_SOC_VARIANT_ORION:
+ case MVEBU_GPIO_SOC_VARIANT_A8K:
+ *map = mvchip->regs;
+ *offset = GPIO_EDGE_MASK_OFF + mvchip->offset;
+ break;
case MVEBU_GPIO_SOC_VARIANT_MV78200:
- return mvchip->membase + GPIO_EDGE_CAUSE_OFF;
+ cpu = smp_processor_id();
+ *map = mvchip->regs;
+ *offset = GPIO_EDGE_MASK_MV78200_OFF(cpu);
+ break;
case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
cpu = smp_processor_id();
- return mvchip->percpu_membase +
- GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu);
+ *map = mvchip->percpu_regs;
+ *offset = GPIO_EDGE_MASK_ARMADAXP_OFF(cpu);
+ break;
default:
BUG();
}
}
-static void __iomem *mvebu_gpioreg_edge_mask(struct mvebu_gpio_chip *mvchip)
+static u32
+mvebu_gpio_read_edge_mask(struct mvebu_gpio_chip *mvchip)
{
- int cpu;
+ struct regmap *map;
+ unsigned int offset;
+ u32 val;
- switch (mvchip->soc_variant) {
- case MVEBU_GPIO_SOC_VARIANT_ORION:
- return mvchip->membase + GPIO_EDGE_MASK_OFF;
- case MVEBU_GPIO_SOC_VARIANT_MV78200:
- cpu = smp_processor_id();
- return mvchip->membase + GPIO_EDGE_MASK_MV78200_OFF(cpu);
- case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
- cpu = smp_processor_id();
- return mvchip->percpu_membase +
- GPIO_EDGE_MASK_ARMADAXP_OFF(cpu);
- default:
- BUG();
- }
+ mvebu_gpioreg_edge_mask(mvchip, &map, &offset);
+ regmap_read(map, offset, &val);
+
+ return val;
}
-static void __iomem *mvebu_gpioreg_level_mask(struct mvebu_gpio_chip *mvchip)
+static void
+mvebu_gpio_write_edge_mask(struct mvebu_gpio_chip *mvchip, u32 val)
+{
+ struct regmap *map;
+ unsigned int offset;
+
+ mvebu_gpioreg_edge_mask(mvchip, &map, &offset);
+ regmap_write(map, offset, val);
+}
+
+static void
+mvebu_gpioreg_level_mask(struct mvebu_gpio_chip *mvchip,
+ struct regmap **map, unsigned int *offset)
{
int cpu;
switch (mvchip->soc_variant) {
case MVEBU_GPIO_SOC_VARIANT_ORION:
- return mvchip->membase + GPIO_LEVEL_MASK_OFF;
+ case MVEBU_GPIO_SOC_VARIANT_A8K:
+ *map = mvchip->regs;
+ *offset = GPIO_LEVEL_MASK_OFF + mvchip->offset;
+ break;
case MVEBU_GPIO_SOC_VARIANT_MV78200:
cpu = smp_processor_id();
- return mvchip->membase + GPIO_LEVEL_MASK_MV78200_OFF(cpu);
+ *map = mvchip->regs;
+ *offset = GPIO_LEVEL_MASK_MV78200_OFF(cpu);
+ break;
case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
cpu = smp_processor_id();
- return mvchip->percpu_membase +
- GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu);
+ *map = mvchip->percpu_regs;
+ *offset = GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu);
+ break;
default:
BUG();
}
}
+static u32
+mvebu_gpio_read_level_mask(struct mvebu_gpio_chip *mvchip)
+{
+ struct regmap *map;
+ unsigned int offset;
+ u32 val;
+
+ mvebu_gpioreg_level_mask(mvchip, &map, &offset);
+ regmap_read(map, offset, &val);
+
+ return val;
+}
+
+static void
+mvebu_gpio_write_level_mask(struct mvebu_gpio_chip *mvchip, u32 val)
+{
+ struct regmap *map;
+ unsigned int offset;
+
+ mvebu_gpioreg_level_mask(mvchip, &map, &offset);
+ regmap_write(map, offset, val);
+}
+
/*
* Functions returning addresses of individual registers for a given
* PWM controller.
@@ -236,17 +299,9 @@ static void __iomem *mvebu_pwmreg_blink_off_duration(struct mvebu_pwm *mvpwm)
static void mvebu_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
{
struct mvebu_gpio_chip *mvchip = gpiochip_get_data(chip);
- unsigned long flags;
- u32 u;
- spin_lock_irqsave(&mvchip->lock, flags);
- u = readl_relaxed(mvebu_gpioreg_out(mvchip));
- if (value)
- u |= BIT(pin);
- else
- u &= ~BIT(pin);
- writel_relaxed(u, mvebu_gpioreg_out(mvchip));
- spin_unlock_irqrestore(&mvchip->lock, flags);
+ regmap_update_bits(mvchip->regs, GPIO_OUT_OFF + mvchip->offset,
+ BIT(pin), value ? BIT(pin) : 0);
}
static int mvebu_gpio_get(struct gpio_chip *chip, unsigned int pin)
@@ -254,11 +309,18 @@ static int mvebu_gpio_get(struct gpio_chip *chip, unsigned int pin)
struct mvebu_gpio_chip *mvchip = gpiochip_get_data(chip);
u32 u;
- if (readl_relaxed(mvebu_gpioreg_io_conf(mvchip)) & BIT(pin)) {
- u = readl_relaxed(mvebu_gpioreg_data_in(mvchip)) ^
- readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
+ regmap_read(mvchip->regs, GPIO_IO_CONF_OFF + mvchip->offset, &u);
+
+ if (u & BIT(pin)) {
+ u32 data_in, in_pol;
+
+ regmap_read(mvchip->regs, GPIO_DATA_IN_OFF + mvchip->offset,
+ &data_in);
+ regmap_read(mvchip->regs, GPIO_IN_POL_OFF + mvchip->offset,
+ &in_pol);
+ u = data_in ^ in_pol;
} else {
- u = readl_relaxed(mvebu_gpioreg_out(mvchip));
+ regmap_read(mvchip->regs, GPIO_OUT_OFF + mvchip->offset, &u);
}
return (u >> pin) & 1;
@@ -268,25 +330,15 @@ static void mvebu_gpio_blink(struct gpio_chip *chip, unsigned int pin,
int value)
{
struct mvebu_gpio_chip *mvchip = gpiochip_get_data(chip);
- unsigned long flags;
- u32 u;
- spin_lock_irqsave(&mvchip->lock, flags);
- u = readl_relaxed(mvebu_gpioreg_blink(mvchip));
- if (value)
- u |= BIT(pin);
- else
- u &= ~BIT(pin);
- writel_relaxed(u, mvebu_gpioreg_blink(mvchip));
- spin_unlock_irqrestore(&mvchip->lock, flags);
+ regmap_update_bits(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset,
+ BIT(pin), value ? BIT(pin) : 0);
}
static int mvebu_gpio_direction_input(struct gpio_chip *chip, unsigned int pin)
{
struct mvebu_gpio_chip *mvchip = gpiochip_get_data(chip);
- unsigned long flags;
int ret;
- u32 u;
/*
* Check with the pinctrl driver whether this pin is usable as
@@ -296,11 +348,8 @@ static int mvebu_gpio_direction_input(struct gpio_chip *chip, unsigned int pin)
if (ret)
return ret;
- spin_lock_irqsave(&mvchip->lock, flags);
- u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip));
- u |= BIT(pin);
- writel_relaxed(u, mvebu_gpioreg_io_conf(mvchip));
- spin_unlock_irqrestore(&mvchip->lock, flags);
+ regmap_update_bits(mvchip->regs, GPIO_IO_CONF_OFF + mvchip->offset,
+ BIT(pin), BIT(pin));
return 0;
}
@@ -309,9 +358,7 @@ static int mvebu_gpio_direction_output(struct gpio_chip *chip, unsigned int pin,
int value)
{
struct mvebu_gpio_chip *mvchip = gpiochip_get_data(chip);
- unsigned long flags;
int ret;
- u32 u;
/*
* Check with the pinctrl driver whether this pin is usable as
@@ -324,11 +371,8 @@ static int mvebu_gpio_direction_output(struct gpio_chip *chip, unsigned int pin,
mvebu_gpio_blink(chip, pin, 0);
mvebu_gpio_set(chip, pin, value);
- spin_lock_irqsave(&mvchip->lock, flags);
- u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip));
- u &= ~BIT(pin);
- writel_relaxed(u, mvebu_gpioreg_io_conf(mvchip));
- spin_unlock_irqrestore(&mvchip->lock, flags);
+ regmap_update_bits(mvchip->regs, GPIO_IO_CONF_OFF + mvchip->offset,
+ BIT(pin), 0);
return 0;
}
@@ -350,7 +394,7 @@ static void mvebu_gpio_irq_ack(struct irq_data *d)
u32 mask = d->mask;
irq_gc_lock(gc);
- writel_relaxed(~mask, mvebu_gpioreg_edge_cause(mvchip));
+ mvebu_gpio_write_edge_cause(mvchip, ~mask);
irq_gc_unlock(gc);
}
@@ -363,8 +407,7 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
irq_gc_lock(gc);
ct->mask_cache_priv &= ~mask;
-
- writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip));
+ mvebu_gpio_write_edge_mask(mvchip, ct->mask_cache_priv);
irq_gc_unlock(gc);
}
@@ -377,7 +420,7 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
irq_gc_lock(gc);
ct->mask_cache_priv |= mask;
- writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip));
+ mvebu_gpio_write_edge_mask(mvchip, ct->mask_cache_priv);
irq_gc_unlock(gc);
}
@@ -390,7 +433,7 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d)
irq_gc_lock(gc);
ct->mask_cache_priv &= ~mask;
- writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip));
+ mvebu_gpio_write_level_mask(mvchip, ct->mask_cache_priv);
irq_gc_unlock(gc);
}
@@ -403,7 +446,7 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
irq_gc_lock(gc);
ct->mask_cache_priv |= mask;
- writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip));
+ mvebu_gpio_write_level_mask(mvchip, ct->mask_cache_priv);
irq_gc_unlock(gc);
}
@@ -443,8 +486,8 @@ static int mvebu_gpio_irq_set_type(struct irq_data *d, unsigned int type)
pin = d->hwirq;
- u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip)) & BIT(pin);
- if (!u)
+ regmap_read(mvchip->regs, GPIO_IO_CONF_OFF + mvchip->offset, &u);
+ if ((u & BIT(pin)) == 0)
return -EINVAL;
type &= IRQ_TYPE_SENSE_MASK;
@@ -462,31 +505,35 @@ static int mvebu_gpio_irq_set_type(struct irq_data *d, unsigned int type)
switch (type) {
case IRQ_TYPE_EDGE_RISING:
case IRQ_TYPE_LEVEL_HIGH:
- u = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
- u &= ~BIT(pin);
- writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip));
+ regmap_update_bits(mvchip->regs,
+ GPIO_IN_POL_OFF + mvchip->offset,
+ BIT(pin), 0);
break;
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_LEVEL_LOW:
- u = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
- u |= BIT(pin);
- writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip));
+ regmap_update_bits(mvchip->regs,
+ GPIO_IN_POL_OFF + mvchip->offset,
+ BIT(pin), BIT(pin));
break;
case IRQ_TYPE_EDGE_BOTH: {
- u32 v;
+ u32 data_in, in_pol, val;
- v = readl_relaxed(mvebu_gpioreg_in_pol(mvchip)) ^
- readl_relaxed(mvebu_gpioreg_data_in(mvchip));
+ regmap_read(mvchip->regs,
+ GPIO_IN_POL_OFF + mvchip->offset, &in_pol);
+ regmap_read(mvchip->regs,
+ GPIO_DATA_IN_OFF + mvchip->offset, &data_in);
/*
* set initial polarity based on current input level
*/
- u = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
- if (v & BIT(pin))
- u |= BIT(pin); /* falling */
+ if ((data_in ^ in_pol) & BIT(pin))
+ val = BIT(pin); /* falling */
else
- u &= ~BIT(pin); /* rising */
- writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip));
+ val = 0; /* raising */
+
+ regmap_update_bits(mvchip->regs,
+ GPIO_IN_POL_OFF + mvchip->offset,
+ BIT(pin), val);
break;
}
}
@@ -497,7 +544,7 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc)
{
struct mvebu_gpio_chip *mvchip = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
- u32 cause, type;
+ u32 cause, type, data_in, level_mask, edge_cause, edge_mask;
int i;
if (mvchip == NULL)
@@ -505,10 +552,12 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc)
chained_irq_enter(chip, desc);
- cause = readl_relaxed(mvebu_gpioreg_data_in(mvchip)) &
- readl_relaxed(mvebu_gpioreg_level_mask(mvchip));
- cause |= readl_relaxed(mvebu_gpioreg_edge_cause(mvchip)) &
- readl_relaxed(mvebu_gpioreg_edge_mask(mvchip));
+ regmap_read(mvchip->regs, GPIO_DATA_IN_OFF + mvchip->offset, &data_in);
+ level_mask = mvebu_gpio_read_level_mask(mvchip);
+ edge_cause = mvebu_gpio_read_edge_cause(mvchip);
+ edge_mask = mvebu_gpio_read_edge_mask(mvchip);
+
+ cause = (data_in ^ level_mask) | (edge_cause & edge_mask);
for (i = 0; i < mvchip->chip.ngpio; i++) {
int irq;
@@ -523,9 +572,13 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc)
/* Swap polarity (race with GPIO line) */
u32 polarity;
- polarity = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
+ regmap_read(mvchip->regs,
+ GPIO_IN_POL_OFF + mvchip->offset,
+ &polarity);
polarity ^= BIT(i);
- writel_relaxed(polarity, mvebu_gpioreg_in_pol(mvchip));
+ regmap_write(mvchip->regs,
+ GPIO_IN_POL_OFF + mvchip->offset,
+ polarity);
}
generic_handle_irq(irq);
@@ -628,7 +681,7 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip,
state->period = 1;
}
- u = readl_relaxed(mvebu_gpioreg_blink(mvchip));
+ regmap_read(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset, &u);
if (u)
state->enabled = true;
else
@@ -691,8 +744,8 @@ static void __maybe_unused mvebu_pwm_suspend(struct mvebu_gpio_chip *mvchip)
{
struct mvebu_pwm *mvpwm = mvchip->mvpwm;
- mvpwm->blink_select =
- readl_relaxed(mvebu_gpioreg_blink_counter_select(mvchip));
+ regmap_read(mvchip->regs, GPIO_BLINK_CNT_SELECT_OFF + mvchip->offset,
+ &mvpwm->blink_select);
mvpwm->blink_on_duration =
readl_relaxed(mvebu_pwmreg_blink_on_duration(mvpwm));
mvpwm->blink_off_duration =
@@ -703,8 +756,8 @@ static void __maybe_unused mvebu_pwm_resume(struct mvebu_gpio_chip *mvchip)
{
struct mvebu_pwm *mvpwm = mvchip->mvpwm;
- writel_relaxed(mvpwm->blink_select,
- mvebu_gpioreg_blink_counter_select(mvchip));
+ regmap_write(mvchip->regs, GPIO_BLINK_CNT_SELECT_OFF + mvchip->offset,
+ mvpwm->blink_select);
writel_relaxed(mvpwm->blink_on_duration,
mvebu_pwmreg_blink_on_duration(mvpwm));
writel_relaxed(mvpwm->blink_off_duration,
@@ -747,7 +800,8 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
set = U32_MAX;
else
return -EINVAL;
- writel_relaxed(set, mvebu_gpioreg_blink_counter_select(mvchip));
+ regmap_write(mvchip->regs,
+ GPIO_BLINK_CNT_SELECT_OFF + mvchip->offset, set);
mvpwm = devm_kzalloc(dev, sizeof(struct mvebu_pwm), GFP_KERNEL);
if (!mvpwm)
@@ -790,14 +844,14 @@ static void mvebu_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
u32 out, io_conf, blink, in_pol, data_in, cause, edg_msk, lvl_msk;
int i;
- out = readl_relaxed(mvebu_gpioreg_out(mvchip));
- io_conf = readl_relaxed(mvebu_gpioreg_io_conf(mvchip));
- blink = readl_relaxed(mvebu_gpioreg_blink(mvchip));
- in_pol = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
- data_in = readl_relaxed(mvebu_gpioreg_data_in(mvchip));
- cause = readl_relaxed(mvebu_gpioreg_edge_cause(mvchip));
- edg_msk = readl_relaxed(mvebu_gpioreg_edge_mask(mvchip));
- lvl_msk = readl_relaxed(mvebu_gpioreg_level_mask(mvchip));
+ regmap_read(mvchip->regs, GPIO_OUT_OFF + mvchip->offset, &out);
+ regmap_read(mvchip->regs, GPIO_IO_CONF_OFF + mvchip->offset, &io_conf);
+ regmap_read(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset, &blink);
+ regmap_read(mvchip->regs, GPIO_IN_POL_OFF + mvchip->offset, &in_pol);
+ regmap_read(mvchip->regs, GPIO_DATA_IN_OFF + mvchip->offset, &data_in);
+ cause = mvebu_gpio_read_edge_cause(mvchip);
+ edg_msk = mvebu_gpio_read_edge_mask(mvchip);
+ lvl_msk = mvebu_gpio_read_level_mask(mvchip);
for (i = 0; i < chip->ngpio; i++) {
const char *label;
@@ -856,6 +910,10 @@ static const struct of_device_id mvebu_gpio_of_match[] = {
.data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION,
},
{
+ .compatible = "marvell,armada-8k-gpio",
+ .data = (void *) MVEBU_GPIO_SOC_VARIANT_A8K,
+ },
+ {
/* sentinel */
},
};
@@ -865,36 +923,41 @@ static int mvebu_gpio_suspend(struct platform_device *pdev, pm_message_t state)
struct mvebu_gpio_chip *mvchip = platform_get_drvdata(pdev);
int i;
- mvchip->out_reg = readl(mvebu_gpioreg_out(mvchip));
- mvchip->io_conf_reg = readl(mvebu_gpioreg_io_conf(mvchip));
- mvchip->blink_en_reg = readl(mvebu_gpioreg_blink(mvchip));
- mvchip->in_pol_reg = readl(mvebu_gpioreg_in_pol(mvchip));
+ regmap_read(mvchip->regs, GPIO_OUT_OFF + mvchip->offset,
+ &mvchip->out_reg);
+ regmap_read(mvchip->regs, GPIO_IO_CONF_OFF + mvchip->offset,
+ &mvchip->io_conf_reg);
+ regmap_read(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset,
+ &mvchip->blink_en_reg);
+ regmap_read(mvchip->regs, GPIO_IN_POL_OFF + mvchip->offset,
+ &mvchip->in_pol_reg);
switch (mvchip->soc_variant) {
case MVEBU_GPIO_SOC_VARIANT_ORION:
- mvchip->edge_mask_regs[0] =
- readl(mvchip->membase + GPIO_EDGE_MASK_OFF);
- mvchip->level_mask_regs[0] =
- readl(mvchip->membase + GPIO_LEVEL_MASK_OFF);
+ case MVEBU_GPIO_SOC_VARIANT_A8K:
+ regmap_read(mvchip->regs, GPIO_EDGE_MASK_OFF + mvchip->offset,
+ &mvchip->edge_mask_regs[0]);
+ regmap_read(mvchip->regs, GPIO_LEVEL_MASK_OFF + mvchip->offset,
+ &mvchip->level_mask_regs[0]);
break;
case MVEBU_GPIO_SOC_VARIANT_MV78200:
for (i = 0; i < 2; i++) {
- mvchip->edge_mask_regs[i] =
- readl(mvchip->membase +
- GPIO_EDGE_MASK_MV78200_OFF(i));
- mvchip->level_mask_regs[i] =
- readl(mvchip->membase +
- GPIO_LEVEL_MASK_MV78200_OFF(i));
+ regmap_read(mvchip->regs,
+ GPIO_EDGE_MASK_MV78200_OFF(i),
+ &mvchip->edge_mask_regs[i]);
+ regmap_read(mvchip->regs,
+ GPIO_LEVEL_MASK_MV78200_OFF(i),
+ &mvchip->level_mask_regs[i]);
}
break;
case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
for (i = 0; i < 4; i++) {
- mvchip->edge_mask_regs[i] =
- readl(mvchip->membase +
- GPIO_EDGE_MASK_ARMADAXP_OFF(i));
- mvchip->level_mask_regs[i] =
- readl(mvchip->membase +
- GPIO_LEVEL_MASK_ARMADAXP_OFF(i));
+ regmap_read(mvchip->regs,
+ GPIO_EDGE_MASK_ARMADAXP_OFF(i),
+ &mvchip->edge_mask_regs[i]);
+ regmap_read(mvchip->regs,
+ GPIO_LEVEL_MASK_ARMADAXP_OFF(i),
+ &mvchip->level_mask_regs[i]);
}
break;
default:
@@ -912,35 +975,41 @@ static int mvebu_gpio_resume(struct platform_device *pdev)
struct mvebu_gpio_chip *mvchip = platform_get_drvdata(pdev);
int i;
- writel(mvchip->out_reg, mvebu_gpioreg_out(mvchip));
- writel(mvchip->io_conf_reg, mvebu_gpioreg_io_conf(mvchip));
- writel(mvchip->blink_en_reg, mvebu_gpioreg_blink(mvchip));
- writel(mvchip->in_pol_reg, mvebu_gpioreg_in_pol(mvchip));
+ regmap_write(mvchip->regs, GPIO_OUT_OFF + mvchip->offset,
+ mvchip->out_reg);
+ regmap_write(mvchip->regs, GPIO_IO_CONF_OFF + mvchip->offset,
+ mvchip->io_conf_reg);
+ regmap_write(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset,
+ mvchip->blink_en_reg);
+ regmap_write(mvchip->regs, GPIO_IN_POL_OFF + mvchip->offset,
+ mvchip->in_pol_reg);
switch (mvchip->soc_variant) {
case MVEBU_GPIO_SOC_VARIANT_ORION:
- writel(mvchip->edge_mask_regs[0],
- mvchip->membase + GPIO_EDGE_MASK_OFF);
- writel(mvchip->level_mask_regs[0],
- mvchip->membase + GPIO_LEVEL_MASK_OFF);
+ case MVEBU_GPIO_SOC_VARIANT_A8K:
+ regmap_write(mvchip->regs, GPIO_EDGE_MASK_OFF + mvchip->offset,
+ mvchip->edge_mask_regs[0]);
+ regmap_write(mvchip->regs, GPIO_LEVEL_MASK_OFF + mvchip->offset,
+ mvchip->level_mask_regs[0]);
break;
case MVEBU_GPIO_SOC_VARIANT_MV78200:
for (i = 0; i < 2; i++) {
- writel(mvchip->edge_mask_regs[i],
- mvchip->membase + GPIO_EDGE_MASK_MV78200_OFF(i));
- writel(mvchip->level_mask_regs[i],
- mvchip->membase +
- GPIO_LEVEL_MASK_MV78200_OFF(i));
+ regmap_write(mvchip->regs,
+ GPIO_EDGE_MASK_MV78200_OFF(i),
+ mvchip->edge_mask_regs[i]);
+ regmap_write(mvchip->regs,
+ GPIO_LEVEL_MASK_MV78200_OFF(i),
+ mvchip->level_mask_regs[i]);
}
break;
case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
for (i = 0; i < 4; i++) {
- writel(mvchip->edge_mask_regs[i],
- mvchip->membase +
- GPIO_EDGE_MASK_ARMADAXP_OFF(i));
- writel(mvchip->level_mask_regs[i],
- mvchip->membase +
- GPIO_LEVEL_MASK_ARMADAXP_OFF(i));
+ regmap_write(mvchip->regs,
+ GPIO_EDGE_MASK_ARMADAXP_OFF(i),
+ mvchip->edge_mask_regs[i]);
+ regmap_write(mvchip->regs,
+ GPIO_LEVEL_MASK_ARMADAXP_OFF(i),
+ mvchip->level_mask_regs[i]);
}
break;
default:
@@ -953,12 +1022,73 @@ static int mvebu_gpio_resume(struct platform_device *pdev)
return 0;
}
+static const struct regmap_config mvebu_gpio_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+};
+
+static int mvebu_gpio_probe_raw(struct platform_device *pdev,
+ struct mvebu_gpio_chip *mvchip)
+{
+ struct resource *res;
+ void __iomem *base;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ mvchip->regs = devm_regmap_init_mmio(&pdev->dev, base,
+ &mvebu_gpio_regmap_config);
+ if (IS_ERR(mvchip->regs))
+ return PTR_ERR(mvchip->regs);
+
+ /*
+ * For the legacy SoCs, the regmap directly maps to the GPIO
+ * registers, so no offset is needed.
+ */
+ mvchip->offset = 0;
+
+ /*
+ * The Armada XP has a second range of registers for the
+ * per-CPU registers
+ */
+ if (mvchip->soc_variant == MVEBU_GPIO_SOC_VARIANT_ARMADAXP) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ mvchip->percpu_regs =
+ devm_regmap_init_mmio(&pdev->dev, base,
+ &mvebu_gpio_regmap_config);
+ if (IS_ERR(mvchip->percpu_regs))
+ return PTR_ERR(mvchip->percpu_regs);
+ }
+
+ return 0;
+}
+
+static int mvebu_gpio_probe_syscon(struct platform_device *pdev,
+ struct mvebu_gpio_chip *mvchip)
+{
+ mvchip->regs = syscon_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(mvchip->regs))
+ return PTR_ERR(mvchip->regs);
+
+ if (of_property_read_u32(pdev->dev.of_node, "offset", &mvchip->offset))
+ return -EINVAL;
+
+ return 0;
+}
+
static int mvebu_gpio_probe(struct platform_device *pdev)
{
struct mvebu_gpio_chip *mvchip;
const struct of_device_id *match;
struct device_node *np = pdev->dev.of_node;
- struct resource *res;
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
unsigned int ngpios;
@@ -1016,53 +1146,47 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
mvchip->chip.of_node = np;
mvchip->chip.dbg_show = mvebu_gpio_dbg_show;
- spin_lock_init(&mvchip->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mvchip->membase = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mvchip->membase))
- return PTR_ERR(mvchip->membase);
+ if (soc_variant == MVEBU_GPIO_SOC_VARIANT_A8K)
+ err = mvebu_gpio_probe_syscon(pdev, mvchip);
+ else
+ err = mvebu_gpio_probe_raw(pdev, mvchip);
- /*
- * The Armada XP has a second range of registers for the
- * per-CPU registers
- */
- if (soc_variant == MVEBU_GPIO_SOC_VARIANT_ARMADAXP) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- mvchip->percpu_membase = devm_ioremap_resource(&pdev->dev,
- res);
- if (IS_ERR(mvchip->percpu_membase))
- return PTR_ERR(mvchip->percpu_membase);
- }
+ if (err)
+ return err;
/*
* Mask and clear GPIO interrupts.
*/
switch (soc_variant) {
case MVEBU_GPIO_SOC_VARIANT_ORION:
- writel_relaxed(0, mvchip->membase + GPIO_EDGE_CAUSE_OFF);
- writel_relaxed(0, mvchip->membase + GPIO_EDGE_MASK_OFF);
- writel_relaxed(0, mvchip->membase + GPIO_LEVEL_MASK_OFF);
+ case MVEBU_GPIO_SOC_VARIANT_A8K:
+ regmap_write(mvchip->regs,
+ GPIO_EDGE_CAUSE_OFF + mvchip->offset, 0);
+ regmap_write(mvchip->regs,
+ GPIO_EDGE_MASK_OFF + mvchip->offset, 0);
+ regmap_write(mvchip->regs,
+ GPIO_LEVEL_MASK_OFF + mvchip->offset, 0);
break;
case MVEBU_GPIO_SOC_VARIANT_MV78200:
- writel_relaxed(0, mvchip->membase + GPIO_EDGE_CAUSE_OFF);
+ regmap_write(mvchip->regs, GPIO_EDGE_CAUSE_OFF, 0);
for (cpu = 0; cpu < 2; cpu++) {
- writel_relaxed(0, mvchip->membase +
- GPIO_EDGE_MASK_MV78200_OFF(cpu));
- writel_relaxed(0, mvchip->membase +
- GPIO_LEVEL_MASK_MV78200_OFF(cpu));
+ regmap_write(mvchip->regs,
+ GPIO_EDGE_MASK_MV78200_OFF(cpu), 0);
+ regmap_write(mvchip->regs,
+ GPIO_LEVEL_MASK_MV78200_OFF(cpu), 0);
}
break;
case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
- writel_relaxed(0, mvchip->membase + GPIO_EDGE_CAUSE_OFF);
- writel_relaxed(0, mvchip->membase + GPIO_EDGE_MASK_OFF);
- writel_relaxed(0, mvchip->membase + GPIO_LEVEL_MASK_OFF);
+ regmap_write(mvchip->regs, GPIO_EDGE_CAUSE_OFF, 0);
+ regmap_write(mvchip->regs, GPIO_EDGE_MASK_OFF, 0);
+ regmap_write(mvchip->regs, GPIO_LEVEL_MASK_OFF, 0);
for (cpu = 0; cpu < 4; cpu++) {
- writel_relaxed(0, mvchip->percpu_membase +
- GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu));
- writel_relaxed(0, mvchip->percpu_membase +
- GPIO_EDGE_MASK_ARMADAXP_OFF(cpu));
- writel_relaxed(0, mvchip->percpu_membase +
- GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu));
+ regmap_write(mvchip->percpu_regs,
+ GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu), 0);
+ regmap_write(mvchip->percpu_regs,
+ GPIO_EDGE_MASK_ARMADAXP_OFF(cpu), 0);
+ regmap_write(mvchip->percpu_regs,
+ GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu), 0);
}
break;
default:
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 8ddf9302ce3b..a4fd78b9c0e4 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -20,7 +20,7 @@
#include <linux/gpio.h>
#include <linux/i2c.h>
-#include <linux/i2c/pcf857x.h>
+#include <linux/platform_data/pcf857x.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index 71bc6da11337..f6600f8ada52 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -331,14 +331,18 @@ static irqreturn_t pch_gpio_handler(int irq, void *dev_id)
return ret;
}
-static void pch_gpio_alloc_generic_chip(struct pch_gpio *chip,
- unsigned int irq_start, unsigned int num)
+static int pch_gpio_alloc_generic_chip(struct pch_gpio *chip,
+ unsigned int irq_start,
+ unsigned int num)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
gc = irq_alloc_generic_chip("pch_gpio", 1, irq_start, chip->base,
handle_simple_irq);
+ if (!gc)
+ return -ENOMEM;
+
gc->private = chip;
ct = gc->chip_types;
@@ -349,6 +353,8 @@ static void pch_gpio_alloc_generic_chip(struct pch_gpio *chip,
irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+
+ return 0;
}
static int pch_gpio_probe(struct pci_dev *pdev,
@@ -425,7 +431,10 @@ static int pch_gpio_probe(struct pci_dev *pdev,
goto err_request_irq;
}
- pch_gpio_alloc_generic_chip(chip, irq_base, gpio_pins[chip->ioh]);
+ ret = pch_gpio_alloc_generic_chip(chip, irq_base,
+ gpio_pins[chip->ioh]);
+ if (ret)
+ goto err_request_irq;
end:
return 0;
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 31ad288846af..4a1536a050bc 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -344,6 +344,10 @@ static const struct gpio_rcar_info gpio_rcar_info_gen2 = {
static const struct of_device_id gpio_rcar_of_table[] = {
{
+ .compatible = "renesas,gpio-r8a7743",
+ /* RZ/G1 GPIO is identical to R-Car Gen2. */
+ .data = &gpio_rcar_info_gen2,
+ }, {
.compatible = "renesas,gpio-r8a7790",
.data = &gpio_rcar_info_gen2,
}, {
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
index 39df0620fa38..9e705162da8d 100644
--- a/drivers/gpio/gpio-sta2x11.c
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -320,13 +320,16 @@ static irqreturn_t gsta_gpio_handler(int irq, void *dev_id)
return ret;
}
-static void gsta_alloc_irq_chip(struct gsta_gpio *chip)
+static int gsta_alloc_irq_chip(struct gsta_gpio *chip)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
gc = irq_alloc_generic_chip(KBUILD_MODNAME, 1, chip->irq_base,
chip->reg_base, handle_simple_irq);
+ if (!gc)
+ return -ENOMEM;
+
gc->private = chip;
ct = gc->chip_types;
@@ -350,6 +353,8 @@ static void gsta_alloc_irq_chip(struct gsta_gpio *chip)
}
gc->irq_cnt = i - gc->irq_base;
}
+
+ return 0;
}
/* The platform device used here is instantiated by the MFD device */
@@ -400,7 +405,10 @@ static int gsta_probe(struct platform_device *dev)
return err;
}
chip->irq_base = err;
- gsta_alloc_irq_chip(chip);
+
+ err = gsta_alloc_irq_chip(chip);
+ if (err)
+ return err;
err = devm_request_irq(&dev->dev, pdev->irq, gsta_gpio_handler,
IRQF_SHARED, KBUILD_MODNAME, chip);
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index 7b1bc20be209..85341eab795d 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -108,19 +108,14 @@ struct wcove_gpio {
static inline unsigned int to_reg(int gpio, enum ctrl_register reg_type)
{
unsigned int reg;
- int bank;
- if (gpio < BANK0_NR_PINS)
- bank = 0;
- else if (gpio < BANK0_NR_PINS + BANK1_NR_PINS)
- bank = 1;
- else
- bank = 2;
+ if (gpio >= WCOVE_GPIO_NUM)
+ return -EOPNOTSUPP;
if (reg_type == CTRL_IN)
- reg = GPIO_IN_CTRL_BASE + bank;
+ reg = GPIO_IN_CTRL_BASE + gpio;
else
- reg = GPIO_OUT_CTRL_BASE + bank;
+ reg = GPIO_OUT_CTRL_BASE + gpio;
return reg;
}
@@ -145,7 +140,10 @@ static void wcove_update_irq_mask(struct wcove_gpio *wg, int gpio)
static void wcove_update_irq_ctrl(struct wcove_gpio *wg, int gpio)
{
- unsigned int reg = to_reg(gpio, CTRL_IN);
+ int reg = to_reg(gpio, CTRL_IN);
+
+ if (reg < 0)
+ return;
regmap_update_bits(wg->regmap, reg, CTLI_INTCNT_BE, wg->intcnt);
}
@@ -153,27 +151,36 @@ static void wcove_update_irq_ctrl(struct wcove_gpio *wg, int gpio)
static int wcove_gpio_dir_in(struct gpio_chip *chip, unsigned int gpio)
{
struct wcove_gpio *wg = gpiochip_get_data(chip);
+ int reg = to_reg(gpio, CTRL_OUT);
+
+ if (reg < 0)
+ return 0;
- return regmap_write(wg->regmap, to_reg(gpio, CTRL_OUT),
- CTLO_INPUT_SET);
+ return regmap_write(wg->regmap, reg, CTLO_INPUT_SET);
}
static int wcove_gpio_dir_out(struct gpio_chip *chip, unsigned int gpio,
int value)
{
struct wcove_gpio *wg = gpiochip_get_data(chip);
+ int reg = to_reg(gpio, CTRL_OUT);
- return regmap_write(wg->regmap, to_reg(gpio, CTRL_OUT),
- CTLO_OUTPUT_SET | value);
+ if (reg < 0)
+ return 0;
+
+ return regmap_write(wg->regmap, reg, CTLO_OUTPUT_SET | value);
}
static int wcove_gpio_get_direction(struct gpio_chip *chip, unsigned int gpio)
{
struct wcove_gpio *wg = gpiochip_get_data(chip);
unsigned int val;
- int ret;
+ int ret, reg = to_reg(gpio, CTRL_OUT);
+
+ if (reg < 0)
+ return 0;
- ret = regmap_read(wg->regmap, to_reg(gpio, CTRL_OUT), &val);
+ ret = regmap_read(wg->regmap, reg, &val);
if (ret)
return ret;
@@ -184,9 +191,12 @@ static int wcove_gpio_get(struct gpio_chip *chip, unsigned int gpio)
{
struct wcove_gpio *wg = gpiochip_get_data(chip);
unsigned int val;
- int ret;
+ int ret, reg = to_reg(gpio, CTRL_IN);
- ret = regmap_read(wg->regmap, to_reg(gpio, CTRL_IN), &val);
+ if (reg < 0)
+ return 0;
+
+ ret = regmap_read(wg->regmap, reg, &val);
if (ret)
return ret;
@@ -197,25 +207,33 @@ static void wcove_gpio_set(struct gpio_chip *chip,
unsigned int gpio, int value)
{
struct wcove_gpio *wg = gpiochip_get_data(chip);
+ int reg = to_reg(gpio, CTRL_OUT);
+
+ if (reg < 0)
+ return;
if (value)
- regmap_update_bits(wg->regmap, to_reg(gpio, CTRL_OUT), 1, 1);
+ regmap_update_bits(wg->regmap, reg, 1, 1);
else
- regmap_update_bits(wg->regmap, to_reg(gpio, CTRL_OUT), 1, 0);
+ regmap_update_bits(wg->regmap, reg, 1, 0);
}
static int wcove_gpio_set_config(struct gpio_chip *chip, unsigned int gpio,
unsigned long config)
{
struct wcove_gpio *wg = gpiochip_get_data(chip);
+ int reg = to_reg(gpio, CTRL_OUT);
+
+ if (reg < 0)
+ return 0;
switch (pinconf_to_config_param(config)) {
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
- return regmap_update_bits(wg->regmap, to_reg(gpio, CTRL_OUT),
- CTLO_DRV_MASK, CTLO_DRV_OD);
+ return regmap_update_bits(wg->regmap, reg, CTLO_DRV_MASK,
+ CTLO_DRV_OD);
case PIN_CONFIG_DRIVE_PUSH_PULL:
- return regmap_update_bits(wg->regmap, to_reg(gpio, CTRL_OUT),
- CTLO_DRV_MASK, CTLO_DRV_CMOS);
+ return regmap_update_bits(wg->regmap, reg, CTLO_DRV_MASK,
+ CTLO_DRV_CMOS);
default:
break;
}
@@ -228,6 +246,9 @@ static int wcove_irq_type(struct irq_data *data, unsigned int type)
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct wcove_gpio *wg = gpiochip_get_data(chip);
+ if (data->hwirq >= WCOVE_GPIO_NUM)
+ return 0;
+
switch (type) {
case IRQ_TYPE_NONE:
wg->intcnt = CTLI_INTCNT_DIS;
@@ -278,6 +299,9 @@ static void wcove_irq_unmask(struct irq_data *data)
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct wcove_gpio *wg = gpiochip_get_data(chip);
+ if (data->hwirq >= WCOVE_GPIO_NUM)
+ return;
+
wg->set_irq_mask = false;
wg->update |= UPDATE_IRQ_MASK;
}
@@ -287,6 +311,9 @@ static void wcove_irq_mask(struct irq_data *data)
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct wcove_gpio *wg = gpiochip_get_data(chip);
+ if (data->hwirq >= WCOVE_GPIO_NUM)
+ return;
+
wg->set_irq_mask = true;
wg->update |= UPDATE_IRQ_MASK;
}
@@ -401,7 +428,7 @@ static int wcove_gpio_probe(struct platform_device *pdev)
if (!wg)
return -ENOMEM;
- wg->regmap_irq_chip = pmic->irq_chip_data_level2;
+ wg->regmap_irq_chip = pmic->irq_chip_data;
platform_set_drvdata(pdev, wg);
@@ -449,6 +476,18 @@ static int wcove_gpio_probe(struct platform_device *pdev)
gpiochip_set_nested_irqchip(&wg->chip, &wcove_irqchip, virq);
+ /* Enable GPIO0 interrupts */
+ ret = regmap_update_bits(wg->regmap, IRQ_MASK_BASE, GPIO_IRQ0_MASK,
+ 0x00);
+ if (ret)
+ return ret;
+
+ /* Enable GPIO1 interrupts */
+ ret = regmap_update_bits(wg->regmap, IRQ_MASK_BASE + 1, GPIO_IRQ1_MASK,
+ 0x00);
+ if (ret)
+ return ret;
+
return 0;
}
diff --git a/drivers/gpio/gpio-xra1403.c b/drivers/gpio/gpio-xra1403.c
new file mode 100644
index 000000000000..0230e4b7a2fb
--- /dev/null
+++ b/drivers/gpio/gpio-xra1403.c
@@ -0,0 +1,237 @@
+/*
+ * GPIO driver for EXAR XRA1403 16-bit GPIO expander
+ *
+ * Copyright (c) 2017, General Electric Company
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/seq_file.h>
+#include <linux/spi/spi.h>
+#include <linux/regmap.h>
+
+/* XRA1403 registers */
+#define XRA_GSR 0x00 /* GPIO State */
+#define XRA_OCR 0x02 /* Output Control */
+#define XRA_PIR 0x04 /* Input Polarity Inversion */
+#define XRA_GCR 0x06 /* GPIO Configuration */
+#define XRA_PUR 0x08 /* Input Internal Pull-up Resistor Enable/Disable */
+#define XRA_IER 0x0A /* Input Interrupt Enable */
+#define XRA_TSCR 0x0C /* Output Three-State Control */
+#define XRA_ISR 0x0E /* Input Interrupt Status */
+#define XRA_REIR 0x10 /* Input Rising Edge Interrupt Enable */
+#define XRA_FEIR 0x12 /* Input Falling Edge Interrupt Enable */
+#define XRA_IFR 0x14 /* Input Filter Enable/Disable */
+
+struct xra1403 {
+ struct gpio_chip chip;
+ struct regmap *regmap;
+};
+
+static const struct regmap_config xra1403_regmap_cfg = {
+ .reg_bits = 7,
+ .pad_bits = 1,
+ .val_bits = 8,
+
+ .max_register = XRA_IFR | 0x01,
+};
+
+static unsigned int to_reg(unsigned int reg, unsigned int offset)
+{
+ return reg + (offset > 7);
+}
+
+static int xra1403_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+ struct xra1403 *xra = gpiochip_get_data(chip);
+
+ return regmap_update_bits(xra->regmap, to_reg(XRA_GCR, offset),
+ BIT(offset % 8), BIT(offset % 8));
+}
+
+static int xra1403_direction_output(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ int ret;
+ struct xra1403 *xra = gpiochip_get_data(chip);
+
+ ret = regmap_update_bits(xra->regmap, to_reg(XRA_GCR, offset),
+ BIT(offset % 8), 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(xra->regmap, to_reg(XRA_OCR, offset),
+ BIT(offset % 8), value ? BIT(offset % 8) : 0);
+
+ return ret;
+}
+
+static int xra1403_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ int ret;
+ unsigned int val;
+ struct xra1403 *xra = gpiochip_get_data(chip);
+
+ ret = regmap_read(xra->regmap, to_reg(XRA_GCR, offset), &val);
+ if (ret)
+ return ret;
+
+ return !!(val & BIT(offset % 8));
+}
+
+static int xra1403_get(struct gpio_chip *chip, unsigned int offset)
+{
+ int ret;
+ unsigned int val;
+ struct xra1403 *xra = gpiochip_get_data(chip);
+
+ ret = regmap_read(xra->regmap, to_reg(XRA_GSR, offset), &val);
+ if (ret)
+ return ret;
+
+ return !!(val & BIT(offset % 8));
+}
+
+static void xra1403_set(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ int ret;
+ struct xra1403 *xra = gpiochip_get_data(chip);
+
+ ret = regmap_update_bits(xra->regmap, to_reg(XRA_OCR, offset),
+ BIT(offset % 8), value ? BIT(offset % 8) : 0);
+ if (ret)
+ dev_err(chip->parent, "Failed to set pin: %d, ret: %d\n",
+ offset, ret);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void xra1403_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+ int reg;
+ struct xra1403 *xra = gpiochip_get_data(chip);
+ int value[xra1403_regmap_cfg.max_register];
+ int i;
+ unsigned int gcr;
+ unsigned int gsr;
+
+ seq_puts(s, "xra reg:");
+ for (reg = 0; reg <= xra1403_regmap_cfg.max_register; reg++)
+ seq_printf(s, " %2.2x", reg);
+ seq_puts(s, "\n value:");
+ for (reg = 0; reg < xra1403_regmap_cfg.max_register; reg++) {
+ regmap_read(xra->regmap, reg, &value[reg]);
+ seq_printf(s, " %2.2x", value[reg]);
+ }
+ seq_puts(s, "\n");
+
+ gcr = value[XRA_GCR + 1] << 8 | value[XRA_GCR];
+ gsr = value[XRA_GSR + 1] << 8 | value[XRA_GSR];
+ for (i = 0; i < chip->ngpio; i++) {
+ const char *label = gpiochip_is_requested(chip, i);
+
+ if (!label)
+ continue;
+
+ seq_printf(s, " gpio-%-3d (%-12s) %s %s\n",
+ chip->base + i, label,
+ (gcr & BIT(i)) ? "in" : "out",
+ (gsr & BIT(i)) ? "hi" : "lo");
+ }
+}
+#else
+#define xra1403_dbg_show NULL
+#endif
+
+static int xra1403_probe(struct spi_device *spi)
+{
+ struct xra1403 *xra;
+ struct gpio_desc *reset_gpio;
+ int ret;
+
+ xra = devm_kzalloc(&spi->dev, sizeof(*xra), GFP_KERNEL);
+ if (!xra)
+ return -ENOMEM;
+
+ /* bring the chip out of reset if reset pin is provided*/
+ reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(reset_gpio))
+ dev_warn(&spi->dev, "Could not get reset-gpios\n");
+
+ xra->chip.direction_input = xra1403_direction_input;
+ xra->chip.direction_output = xra1403_direction_output;
+ xra->chip.get_direction = xra1403_get_direction;
+ xra->chip.get = xra1403_get;
+ xra->chip.set = xra1403_set;
+
+ xra->chip.dbg_show = xra1403_dbg_show;
+
+ xra->chip.ngpio = 16;
+ xra->chip.label = "xra1403";
+
+ xra->chip.base = -1;
+ xra->chip.can_sleep = true;
+ xra->chip.parent = &spi->dev;
+ xra->chip.owner = THIS_MODULE;
+
+ xra->regmap = devm_regmap_init_spi(spi, &xra1403_regmap_cfg);
+ if (IS_ERR(xra->regmap)) {
+ ret = PTR_ERR(xra->regmap);
+ dev_err(&spi->dev, "Failed to allocate regmap: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_gpiochip_add_data(&spi->dev, &xra->chip, xra);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Unable to register gpiochip\n");
+ return ret;
+ }
+
+ spi_set_drvdata(spi, xra);
+
+ return 0;
+}
+
+static const struct spi_device_id xra1403_ids[] = {
+ { "xra1403" },
+ {},
+};
+MODULE_DEVICE_TABLE(spi, xra1403_ids);
+
+static const struct of_device_id xra1403_spi_of_match[] = {
+ { .compatible = "exar,xra1403" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xra1403_spi_of_match);
+
+static struct spi_driver xra1403_driver = {
+ .probe = xra1403_probe,
+ .id_table = xra1403_ids,
+ .driver = {
+ .name = "xra1403",
+ .of_match_table = of_match_ptr(xra1403_spi_of_match),
+ },
+};
+
+module_spi_driver(xra1403_driver);
+
+MODULE_AUTHOR("Nandor Han <[email protected]>");
+MODULE_AUTHOR("Semi Malinen <[email protected]>");
+MODULE_DESCRIPTION("GPIO expander driver for EXAR XRA1403");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 6b4d10d6e10f..df0851464006 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -96,8 +96,8 @@
/* GPIO upper 16 bit mask */
#define ZYNQ_GPIO_UPPER_MASK 0xFFFF0000
-/* For GPIO quirks */
-#define ZYNQ_GPIO_QUIRK_FOO BIT(0)
+/* set to differentiate zynq from zynqmp, 0=zynqmp, 1=zynq */
+#define ZYNQ_GPIO_QUIRK_IS_ZYNQ BIT(0)
/**
* struct zynq_gpio - gpio device private data structure
@@ -136,6 +136,17 @@ static struct irq_chip zynq_gpio_level_irqchip;
static struct irq_chip zynq_gpio_edge_irqchip;
/**
+ * zynq_gpio_is_zynq - test if HW is zynq or zynqmp
+ * @gpio: Pointer to driver data struct
+ *
+ * Return: 0 if zynqmp, 1 if zynq.
+ */
+static int zynq_gpio_is_zynq(struct zynq_gpio *gpio)
+{
+ return !!(gpio->p_data->quirks & ZYNQ_GPIO_QUIRK_IS_ZYNQ);
+}
+
+/**
* zynq_gpio_get_bank_pin - Get the bank number and pin number within that bank
* for a given pin in the GPIO device
* @pin_num: gpio pin number within the device
@@ -242,18 +253,16 @@ static void zynq_gpio_set_value(struct gpio_chip *chip, unsigned int pin,
static int zynq_gpio_dir_in(struct gpio_chip *chip, unsigned int pin)
{
u32 reg;
- bool is_zynq_gpio;
unsigned int bank_num, bank_pin_num;
struct zynq_gpio *gpio = gpiochip_get_data(chip);
- is_zynq_gpio = gpio->p_data->quirks & ZYNQ_GPIO_QUIRK_FOO;
zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio);
/*
* On zynq bank 0 pins 7 and 8 are special and cannot be used
* as inputs.
*/
- if (is_zynq_gpio && bank_num == 0 &&
+ if (zynq_gpio_is_zynq(gpio) && bank_num == 0 &&
(bank_pin_num == 7 || bank_pin_num == 8))
return -EINVAL;
@@ -637,7 +646,7 @@ static const struct zynq_platform_data zynqmp_gpio_def = {
static const struct zynq_platform_data zynq_gpio_def = {
.label = "zynq_gpio",
- .quirks = ZYNQ_GPIO_QUIRK_FOO,
+ .quirks = ZYNQ_GPIO_QUIRK_IS_ZYNQ,
.ngpio = ZYNQ_GPIO_NR_GPIOS,
.max_bank = ZYNQ_GPIO_MAX_BANK,
.bank_min[0] = ZYNQ_GPIO_BANK0_PIN_MIN(),
@@ -651,9 +660,8 @@ static const struct zynq_platform_data zynq_gpio_def = {
};
static const struct of_device_id zynq_gpio_of_match[] = {
- { .compatible = "xlnx,zynq-gpio-1.0", .data = (void *)&zynq_gpio_def },
- { .compatible = "xlnx,zynqmp-gpio-1.0",
- .data = (void *)&zynqmp_gpio_def },
+ { .compatible = "xlnx,zynq-gpio-1.0", .data = &zynq_gpio_def },
+ { .compatible = "xlnx,zynqmp-gpio-1.0", .data = &zynqmp_gpio_def },
{ /* end of table */ }
};
MODULE_DEVICE_TABLE(of, zynq_gpio_of_match);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 8fa5fcd00e9a..c9b42dd12dfa 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -165,6 +165,23 @@ static void acpi_gpio_chip_dh(acpi_handle handle, void *data)
/* The address of this function is used as a key. */
}
+bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
+ struct acpi_resource_gpio **agpio)
+{
+ struct acpi_resource_gpio *gpio;
+
+ if (ares->type != ACPI_RESOURCE_TYPE_GPIO)
+ return false;
+
+ gpio = &ares->data.gpio;
+ if (gpio->connection_type != ACPI_RESOURCE_GPIO_TYPE_INT)
+ return false;
+
+ *agpio = gpio;
+ return true;
+}
+EXPORT_SYMBOL_GPL(acpi_gpio_get_irq_resource);
+
static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
void *context)
{
@@ -178,11 +195,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
unsigned long irqflags;
int ret, pin, irq;
- if (ares->type != ACPI_RESOURCE_TYPE_GPIO)
- return AE_OK;
-
- agpio = &ares->data.gpio;
- if (agpio->connection_type != ACPI_RESOURCE_GPIO_TYPE_INT)
+ if (!acpi_gpio_get_irq_resource(ares, &agpio))
return AE_OK;
handle = ACPI_HANDLE(chip->parent);
@@ -423,6 +436,59 @@ static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
return false;
}
+static enum gpiod_flags
+acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio)
+{
+ bool pull_up = agpio->pin_config == ACPI_PIN_CONFIG_PULLUP;
+
+ switch (agpio->io_restriction) {
+ case ACPI_IO_RESTRICT_INPUT:
+ return GPIOD_IN;
+ case ACPI_IO_RESTRICT_OUTPUT:
+ /*
+ * ACPI GPIO resources don't contain an initial value for the
+ * GPIO. Therefore we deduce that value from the pull field
+ * instead. If the pin is pulled up we assume default to be
+ * high, otherwise low.
+ */
+ return pull_up ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ default:
+ /*
+ * Assume that the BIOS has configured the direction and pull
+ * accordingly.
+ */
+ return GPIOD_ASIS;
+ }
+}
+
+int
+acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update)
+{
+ int ret = 0;
+
+ /*
+ * Check if the BIOS has IoRestriction with explicitly set direction
+ * and update @flags accordingly. Otherwise use whatever caller asked
+ * for.
+ */
+ if (update & GPIOD_FLAGS_BIT_DIR_SET) {
+ enum gpiod_flags diff = *flags ^ update;
+
+ /*
+ * Check if caller supplied incompatible GPIO initialization
+ * flags.
+ *
+ * Return %-EINVAL to notify that firmware has different
+ * settings and we are going to use them.
+ */
+ if (((*flags & GPIOD_FLAGS_BIT_DIR_SET) && (diff & GPIOD_FLAGS_BIT_DIR_OUT)) ||
+ ((*flags & GPIOD_FLAGS_BIT_DIR_OUT) && (diff & GPIOD_FLAGS_BIT_DIR_VAL)))
+ ret = -EINVAL;
+ *flags = update;
+ }
+ return ret;
+}
+
struct acpi_gpio_lookup {
struct acpi_gpio_info info;
int index;
@@ -460,8 +526,11 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
* - ACPI_ACTIVE_HIGH == GPIO_ACTIVE_HIGH
*/
if (lookup->info.gpioint) {
+ lookup->info.flags = GPIOD_IN;
lookup->info.polarity = agpio->polarity;
lookup->info.triggering = agpio->triggering;
+ } else {
+ lookup->info.flags = acpi_gpio_to_gpiod_flags(agpio);
}
}
@@ -588,18 +657,19 @@ static struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev,
struct gpio_desc *acpi_find_gpio(struct device *dev,
const char *con_id,
unsigned int idx,
- enum gpiod_flags flags,
+ enum gpiod_flags *dflags,
enum gpio_lookup_flags *lookupflags)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
struct acpi_gpio_info info;
struct gpio_desc *desc;
char propname[32];
+ int err;
int i;
/* Try first from _DSD */
for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
- if (con_id && strcmp(con_id, "gpios")) {
+ if (con_id) {
snprintf(propname, sizeof(propname), "%s-%s",
con_id, gpio_suffixes[i]);
} else {
@@ -622,17 +692,21 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
desc = acpi_get_gpiod_by_index(adev, NULL, idx, &info);
if (IS_ERR(desc))
return desc;
+ }
- if ((flags == GPIOD_OUT_LOW || flags == GPIOD_OUT_HIGH) &&
- info.gpioint) {
- dev_dbg(dev, "refusing GpioInt() entry when doing GPIOD_OUT_* lookup\n");
- return ERR_PTR(-ENOENT);
- }
+ if (info.gpioint &&
+ (*dflags == GPIOD_OUT_LOW || *dflags == GPIOD_OUT_HIGH)) {
+ dev_dbg(dev, "refusing GpioInt() entry when doing GPIOD_OUT_* lookup\n");
+ return ERR_PTR(-ENOENT);
}
if (info.polarity == GPIO_ACTIVE_LOW)
*lookupflags |= GPIO_ACTIVE_LOW;
+ err = acpi_gpio_update_gpiod_flags(dflags, info.flags);
+ if (err)
+ dev_dbg(dev, "Override GPIO initialization flags\n");
+
return desc;
}
@@ -686,12 +760,16 @@ struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
* used to translate from the GPIO offset in the resource to the Linux IRQ
* number.
*
+ * The function is idempotent, though each time it runs it will configure GPIO
+ * pin direction according to the flags in GpioInt resource.
+ *
* Return: Linux IRQ number (>%0) on success, negative errno on failure.
*/
int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
{
int idx, i;
unsigned int irq_flags;
+ int ret;
for (i = 0, idx = 0; idx <= index; i++) {
struct acpi_gpio_info info;
@@ -704,6 +782,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
return PTR_ERR(desc);
if (info.gpioint && idx++ == index) {
+ char label[32];
int irq;
if (IS_ERR(desc))
@@ -713,6 +792,11 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
if (irq < 0)
return irq;
+ snprintf(label, sizeof(label), "GpioInt() %d", index);
+ ret = gpiod_configure_flags(desc, label, 0, info.flags);
+ if (ret < 0)
+ return ret;
+
irq_flags = acpi_dev_get_irq_type(info.triggering,
info.polarity);
@@ -740,7 +824,6 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
struct acpi_resource *ares;
int pin_index = (int)address;
acpi_status status;
- bool pull_up;
int length;
int i;
@@ -755,7 +838,6 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
}
agpio = &ares->data.gpio;
- pull_up = agpio->pin_config == ACPI_PIN_CONFIG_PULLUP;
if (WARN_ON(agpio->io_restriction == ACPI_IO_RESTRICT_INPUT &&
function == ACPI_WRITE)) {
@@ -806,35 +888,23 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
}
if (!found) {
- desc = gpiochip_request_own_desc(chip, pin,
- "ACPI:OpRegion");
+ enum gpiod_flags flags = acpi_gpio_to_gpiod_flags(agpio);
+ const char *label = "ACPI:OpRegion";
+ int err;
+
+ desc = gpiochip_request_own_desc(chip, pin, label);
if (IS_ERR(desc)) {
status = AE_ERROR;
mutex_unlock(&achip->conn_lock);
goto out;
}
- switch (agpio->io_restriction) {
- case ACPI_IO_RESTRICT_INPUT:
- gpiod_direction_input(desc);
- break;
- case ACPI_IO_RESTRICT_OUTPUT:
- /*
- * ACPI GPIO resources don't contain an
- * initial value for the GPIO. Therefore we
- * deduce that value from the pull field
- * instead. If the pin is pulled up we
- * assume default to be high, otherwise
- * low.
- */
- gpiod_direction_output(desc, pull_up);
- break;
- default:
- /*
- * Assume that the BIOS has configured the
- * direction and pull accordingly.
- */
- break;
+ err = gpiod_configure_flags(desc, label, 0, flags);
+ if (err < 0) {
+ status = AE_NOT_CONFIGURED;
+ gpiochip_free_own_desc(desc);
+ mutex_unlock(&achip->conn_lock);
+ goto out;
}
conn = kzalloc(sizeof(*conn), GFP_KERNEL);
@@ -1089,7 +1159,7 @@ int acpi_gpio_count(struct device *dev, const char *con_id)
/* Try first from _DSD */
for (i = 0; i < ARRAY_SIZE(gpio_suffixes); i++) {
- if (con_id && strcmp(con_id, "gpios"))
+ if (con_id)
snprintf(propname, sizeof(propname), "%s-%s",
con_id, gpio_suffixes[i]);
else
@@ -1119,6 +1189,9 @@ int acpi_gpio_count(struct device *dev, const char *con_id)
struct list_head resource_list;
unsigned int crs_count = 0;
+ if (!acpi_can_fallback_to_crs(adev, con_id))
+ return count;
+
INIT_LIST_HEAD(&resource_list);
acpi_dev_get_resources(adev, &resource_list,
acpi_find_gpio_count, &crs_count);
@@ -1129,45 +1202,11 @@ int acpi_gpio_count(struct device *dev, const char *con_id)
return count ? count : -ENOENT;
}
-struct acpi_crs_lookup {
- struct list_head node;
- struct acpi_device *adev;
- const char *con_id;
-};
-
-static DEFINE_MUTEX(acpi_crs_lookup_lock);
-static LIST_HEAD(acpi_crs_lookup_list);
-
bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
{
- struct acpi_crs_lookup *l, *lookup = NULL;
-
/* Never allow fallback if the device has properties */
if (adev->data.properties || adev->driver_gpios)
return false;
- mutex_lock(&acpi_crs_lookup_lock);
-
- list_for_each_entry(l, &acpi_crs_lookup_list, node) {
- if (l->adev == adev) {
- lookup = l;
- break;
- }
- }
-
- if (!lookup) {
- lookup = kmalloc(sizeof(*lookup), GFP_KERNEL);
- if (lookup) {
- lookup->adev = adev;
- lookup->con_id = kstrdup(con_id, GFP_KERNEL);
- list_add_tail(&lookup->node, &acpi_crs_lookup_list);
- }
- }
-
- mutex_unlock(&acpi_crs_lookup_lock);
-
- return lookup &&
- ((!lookup->con_id && !con_id) ||
- (lookup->con_id && con_id &&
- strcmp(lookup->con_id, con_id) == 0));
+ return con_id == NULL;
}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index b13b7c7c335f..54ce8dc58ad0 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -153,6 +153,9 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
*flags |= GPIO_OPEN_SOURCE;
}
+ if (of_flags & OF_GPIO_SLEEP_MAY_LOOSE_VALUE)
+ *flags |= GPIO_SLEEP_MAY_LOOSE_VALUE;
+
return desc;
}
@@ -236,7 +239,7 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
*
* This is only used by of_gpiochip_add to request/set GPIO initial
* configuration.
- * It retures error if it fails otherwise 0 on success.
+ * It returns error if it fails otherwise 0 on success.
*/
static int of_gpiochip_scan_gpios(struct gpio_chip *chip)
{
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index a42a1eea5714..9568708a550b 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1,4 +1,4 @@
-#include <linux/bitops.h>
+#include <linux/bitmap.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -1472,8 +1472,6 @@ static struct gpio_chip *find_chip_by_name(const char *name)
static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip)
{
- int i;
-
if (!gpiochip->irq_need_valid_mask)
return 0;
@@ -1483,8 +1481,7 @@ static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip)
return -ENOMEM;
/* Assume by default all GPIOs are valid */
- for (i = 0; i < gpiochip->ngpio; i++)
- set_bit(i, gpiochip->irq_valid_mask);
+ bitmap_fill(gpiochip->irq_valid_mask, gpiochip->ngpio);
return 0;
}
@@ -2870,6 +2867,16 @@ bool gpiochip_line_is_open_source(struct gpio_chip *chip, unsigned int offset)
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_open_source);
+bool gpiochip_line_is_persistent(struct gpio_chip *chip, unsigned int offset)
+{
+ if (offset >= chip->ngpio)
+ return false;
+
+ return !test_bit(FLAG_SLEEP_MAY_LOOSE_VALUE,
+ &chip->gpiodev->descs[offset].flags);
+}
+EXPORT_SYMBOL_GPL(gpiochip_line_is_persistent);
+
/**
* gpiod_get_raw_value_cansleep() - return a gpio's raw value
* @desc: gpio whose value will be returned
@@ -3009,6 +3016,7 @@ void gpiod_add_lookup_table(struct gpiod_lookup_table *table)
mutex_unlock(&gpio_lookup_lock);
}
+EXPORT_SYMBOL_GPL(gpiod_add_lookup_table);
/**
* gpiod_remove_lookup_table() - unregister GPIO device consumers
@@ -3022,6 +3030,7 @@ void gpiod_remove_lookup_table(struct gpiod_lookup_table *table)
mutex_unlock(&gpio_lookup_lock);
}
+EXPORT_SYMBOL_GPL(gpiod_remove_lookup_table);
static struct gpiod_lookup_table *gpiod_find_lookup_table(struct device *dev)
{
@@ -3213,7 +3222,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_optional);
* requested function and/or index, or another IS_ERR() code if an error
* occurred while trying to acquire the GPIO.
*/
-static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
+int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
unsigned long lflags, enum gpiod_flags dflags)
{
int status;
@@ -3224,6 +3233,8 @@ static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
set_bit(FLAG_OPEN_DRAIN, &desc->flags);
if (lflags & GPIO_OPEN_SOURCE)
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+ if (lflags & GPIO_SLEEP_MAY_LOOSE_VALUE)
+ set_bit(FLAG_SLEEP_MAY_LOOSE_VALUE, &desc->flags);
/* No particular flag request, return here... */
if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
@@ -3273,7 +3284,7 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
desc = of_find_gpio(dev, con_id, idx, &lookupflags);
} else if (ACPI_COMPANION(dev)) {
dev_dbg(dev, "using ACPI for GPIO lookup\n");
- desc = acpi_find_gpio(dev, con_id, idx, flags, &lookupflags);
+ desc = acpi_find_gpio(dev, con_id, idx, &flags, &lookupflags);
}
}
@@ -3354,8 +3365,12 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
struct acpi_gpio_info info;
desc = acpi_node_get_gpiod(fwnode, propname, index, &info);
- if (!IS_ERR(desc))
+ if (!IS_ERR(desc)) {
active_low = info.polarity == GPIO_ACTIVE_LOW;
+ ret = acpi_gpio_update_gpiod_flags(&dflags, info.flags);
+ if (ret)
+ pr_debug("Override GPIO initialization flags\n");
+ }
}
if (IS_ERR(desc))
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 2495b7ee1b42..a8be286eff86 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -75,11 +75,13 @@ struct gpio_device {
/**
* struct acpi_gpio_info - ACPI GPIO specific information
+ * @flags: GPIO initialization flags
* @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo
* @polarity: interrupt polarity as provided by ACPI
* @triggering: triggering type as provided by ACPI
*/
struct acpi_gpio_info {
+ enum gpiod_flags flags;
bool gpioint;
int polarity;
int triggering;
@@ -121,10 +123,13 @@ void acpi_gpiochip_remove(struct gpio_chip *chip);
void acpi_gpiochip_request_interrupts(struct gpio_chip *chip);
void acpi_gpiochip_free_interrupts(struct gpio_chip *chip);
+int acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags,
+ enum gpiod_flags update);
+
struct gpio_desc *acpi_find_gpio(struct device *dev,
const char *con_id,
unsigned int idx,
- enum gpiod_flags flags,
+ enum gpiod_flags *dflags,
enum gpio_lookup_flags *lookupflags);
struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
const char *propname, int index,
@@ -143,9 +148,15 @@ acpi_gpiochip_request_interrupts(struct gpio_chip *chip) { }
static inline void
acpi_gpiochip_free_interrupts(struct gpio_chip *chip) { }
+static inline int
+acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update)
+{
+ return 0;
+}
+
static inline struct gpio_desc *
acpi_find_gpio(struct device *dev, const char *con_id,
- unsigned int idx, enum gpiod_flags flags,
+ unsigned int idx, enum gpiod_flags *dflags,
enum gpio_lookup_flags *lookupflags)
{
return ERR_PTR(-ENOENT);
@@ -190,6 +201,7 @@ struct gpio_desc {
#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
#define FLAG_IS_HOGGED 11 /* GPIO is hogged */
+#define FLAG_SLEEP_MAY_LOOSE_VALUE 12 /* GPIO may loose value in sleep */
/* Connection label */
const char *label;
@@ -199,6 +211,8 @@ struct gpio_desc {
int gpiod_request(struct gpio_desc *desc, const char *label);
void gpiod_free(struct gpio_desc *desc);
+int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
+ unsigned long lflags, enum gpiod_flags dflags);
int gpiod_hog(struct gpio_desc *desc, const char *name,
unsigned long lflags, enum gpiod_flags dflags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 43ca16b6eee2..bbac5d5d1fcf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1152,16 +1152,12 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
return;
if (state == VGA_SWITCHEROO_ON) {
- unsigned d3_delay = dev->pdev->d3_delay;
-
pr_info("amdgpu: switched on\n");
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
amdgpu_device_resume(dev, true, true);
- dev->pdev->d3_delay = d3_delay;
-
dev->switch_power_state = DRM_SWITCH_POWER_ON;
drm_kms_helper_poll_enable(dev);
} else {
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
index 2a7eb6817c36..92e6b08ea64a 100644
--- a/drivers/gpu/drm/armada/armada_fb.c
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -133,7 +133,7 @@ static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
}
/* Framebuffer objects must have a valid device address for scanout */
- if (obj->dev_addr == DMA_ERROR_CODE) {
+ if (!obj->mapped) {
ret = -EINVAL;
goto err_unref;
}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index d6c2a5d190eb..a76ca21d063b 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -175,6 +175,7 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
obj->phys_addr = obj->linear->start;
obj->dev_addr = obj->linear->start;
+ obj->mapped = true;
}
DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
@@ -205,7 +206,6 @@ armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
return NULL;
drm_gem_private_object_init(dev, &obj->obj, size);
- obj->dev_addr = DMA_ERROR_CODE;
DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
@@ -229,8 +229,6 @@ static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
return NULL;
}
- obj->dev_addr = DMA_ERROR_CODE;
-
mapping = obj->obj.filp->f_mapping;
mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
@@ -610,5 +608,6 @@ int armada_gem_map_import(struct armada_gem_object *dobj)
return -EINVAL;
}
dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
+ dobj->mapped = true;
return 0;
}
diff --git a/drivers/gpu/drm/armada/armada_gem.h b/drivers/gpu/drm/armada/armada_gem.h
index b88d2b9853c7..6e524e0676bb 100644
--- a/drivers/gpu/drm/armada/armada_gem.h
+++ b/drivers/gpu/drm/armada/armada_gem.h
@@ -16,6 +16,7 @@ struct armada_gem_object {
void *addr;
phys_addr_t phys_addr;
resource_size_t dev_addr;
+ bool mapped;
struct drm_mm_node *linear; /* for linear backed */
struct page *page; /* for page backed */
struct sg_table *sgt; /* for imported */
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
index cf92ebfe6ab7..67469c26bae8 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -11,6 +11,7 @@
#include <sound/hdmi-codec.h>
#include <sound/pcm.h>
#include <sound/soc.h>
+#include <linux/of_graph.h>
#include "adv7511.h"
@@ -182,10 +183,31 @@ static void audio_shutdown(struct device *dev, void *data)
{
}
+static int adv7511_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
+ struct device_node *endpoint)
+{
+ struct of_endpoint of_ep;
+ int ret;
+
+ ret = of_graph_parse_endpoint(endpoint, &of_ep);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * HDMI sound should be located as reg = <2>
+ * Then, it is sound port 0
+ */
+ if (of_ep.port == 2)
+ return 0;
+
+ return -EINVAL;
+}
+
static const struct hdmi_codec_ops adv7511_codec_ops = {
.hw_params = adv7511_hdmi_hw_params,
.audio_shutdown = audio_shutdown,
.audio_startup = audio_startup,
+ .get_dai_id = adv7511_hdmi_i2s_get_dai_id,
};
static struct hdmi_codec_pdata codec_data = {
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index aaf287d2e91d..b2cf59f54c88 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -82,9 +82,30 @@ static void dw_hdmi_i2s_audio_shutdown(struct device *dev, void *data)
hdmi_write(audio, HDMI_AUD_CONF0_SW_RESET, HDMI_AUD_CONF0);
}
+static int dw_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
+ struct device_node *endpoint)
+{
+ struct of_endpoint of_ep;
+ int ret;
+
+ ret = of_graph_parse_endpoint(endpoint, &of_ep);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * HDMI sound should be located as reg = <2>
+ * Then, it is sound port 0
+ */
+ if (of_ep.port == 2)
+ return 0;
+
+ return -EINVAL;
+}
+
static struct hdmi_codec_ops dw_hdmi_i2s_ops = {
.hw_params = dw_hdmi_i2s_hw_params,
.audio_shutdown = dw_hdmi_i2s_audio_shutdown,
+ .get_dai_id = dw_hdmi_i2s_get_dai_id,
};
static int snd_dw_hdmi_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index adb1dd7fde5f..1ee84dd802d4 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -1258,11 +1258,11 @@ int drm_legacy_addbufs(struct drm_device *dev, void *data,
* lock, preventing of allocating more buffers after this call. Information
* about each requested buffer is then copied into user space.
*/
-int drm_legacy_infobufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int __drm_legacy_infobufs(struct drm_device *dev,
+ void *data, int *p,
+ int (*f)(void *, int, struct drm_buf_entry *))
{
struct drm_device_dma *dma = dev->dma;
- struct drm_buf_info *request = data;
int i;
int count;
@@ -1290,26 +1290,12 @@ int drm_legacy_infobufs(struct drm_device *dev, void *data,
DRM_DEBUG("count = %d\n", count);
- if (request->count >= count) {
+ if (*p >= count) {
for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
- if (dma->bufs[i].buf_count) {
- struct drm_buf_desc __user *to =
- &request->list[count];
- struct drm_buf_entry *from = &dma->bufs[i];
- if (copy_to_user(&to->count,
- &from->buf_count,
- sizeof(from->buf_count)) ||
- copy_to_user(&to->size,
- &from->buf_size,
- sizeof(from->buf_size)) ||
- copy_to_user(&to->low_mark,
- &from->low_mark,
- sizeof(from->low_mark)) ||
- copy_to_user(&to->high_mark,
- &from->high_mark,
- sizeof(from->high_mark)))
+ struct drm_buf_entry *from = &dma->bufs[i];
+ if (from->buf_count) {
+ if (f(data, count, from) < 0)
return -EFAULT;
-
DRM_DEBUG("%d %d %d %d %d\n",
i,
dma->bufs[i].buf_count,
@@ -1320,11 +1306,29 @@ int drm_legacy_infobufs(struct drm_device *dev, void *data,
}
}
}
- request->count = count;
+ *p = count;
return 0;
}
+static int copy_one_buf(void *data, int count, struct drm_buf_entry *from)
+{
+ struct drm_buf_info *request = data;
+ struct drm_buf_desc __user *to = &request->list[count];
+ struct drm_buf_desc v = {.count = from->buf_count,
+ .size = from->buf_size,
+ .low_mark = from->low_mark,
+ .high_mark = from->high_mark};
+ return copy_to_user(to, &v, offsetof(struct drm_buf_desc, flags));
+}
+
+int drm_legacy_infobufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_buf_info *request = data;
+ return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf);
+}
+
/**
* Specifies a low and high water mark for buffer allocation
*
@@ -1439,15 +1443,15 @@ int drm_legacy_freebufs(struct drm_device *dev, void *data,
* offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
* drm_mmap_dma().
*/
-int drm_legacy_mapbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p,
+ void __user **v,
+ int (*f)(void *, int, unsigned long,
+ struct drm_buf *),
+ struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
int retcode = 0;
- const int zero = 0;
unsigned long virtual;
- unsigned long address;
- struct drm_buf_map *request = data;
int i;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
@@ -1467,7 +1471,7 @@ int drm_legacy_mapbufs(struct drm_device *dev, void *data,
dev->buf_use++; /* Can't allocate more after this call */
spin_unlock(&dev->buf_lock);
- if (request->count >= dma->buf_count) {
+ if (*p >= dma->buf_count) {
if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))
|| (drm_core_check_feature(dev, DRIVER_SG)
&& (dma->flags & _DRM_DMA_USE_SG))) {
@@ -1492,41 +1496,51 @@ int drm_legacy_mapbufs(struct drm_device *dev, void *data,
retcode = (signed long)virtual;
goto done;
}
- request->virtual = (void __user *)virtual;
+ *v = (void __user *)virtual;
for (i = 0; i < dma->buf_count; i++) {
- if (copy_to_user(&request->list[i].idx,
- &dma->buflist[i]->idx,
- sizeof(request->list[0].idx))) {
- retcode = -EFAULT;
- goto done;
- }
- if (copy_to_user(&request->list[i].total,
- &dma->buflist[i]->total,
- sizeof(request->list[0].total))) {
- retcode = -EFAULT;
- goto done;
- }
- if (copy_to_user(&request->list[i].used,
- &zero, sizeof(zero))) {
- retcode = -EFAULT;
- goto done;
- }
- address = virtual + dma->buflist[i]->offset; /* *** */
- if (copy_to_user(&request->list[i].address,
- &address, sizeof(address))) {
+ if (f(data, i, virtual, dma->buflist[i]) < 0) {
retcode = -EFAULT;
goto done;
}
}
}
done:
- request->count = dma->buf_count;
- DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
+ *p = dma->buf_count;
+ DRM_DEBUG("%d buffers, retcode = %d\n", *p, retcode);
return retcode;
}
+static int map_one_buf(void *data, int idx, unsigned long virtual,
+ struct drm_buf *buf)
+{
+ struct drm_buf_map *request = data;
+ unsigned long address = virtual + buf->offset; /* *** */
+
+ if (copy_to_user(&request->list[idx].idx, &buf->idx,
+ sizeof(request->list[0].idx)))
+ return -EFAULT;
+ if (copy_to_user(&request->list[idx].total, &buf->total,
+ sizeof(request->list[0].total)))
+ return -EFAULT;
+ if (clear_user(&request->list[idx].used, sizeof(int)))
+ return -EFAULT;
+ if (copy_to_user(&request->list[idx].address, &address,
+ sizeof(address)))
+ return -EFAULT;
+ return 0;
+}
+
+int drm_legacy_mapbufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_buf_map *request = data;
+ return __drm_legacy_mapbufs(dev, data, &request->count,
+ &request->virtual, map_one_buf,
+ file_priv);
+}
+
int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 3d8e8f878924..14dfa9c83d1d 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -143,3 +143,6 @@ static inline int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
return 0;
}
#endif
+drm_ioctl_t drm_version;
+drm_ioctl_t drm_getunique;
+drm_ioctl_t drm_getclient;
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index ae386783e3ea..0b2d8c4a2fa5 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -32,6 +32,9 @@
#include <linux/export.h>
#include <drm/drmP.h>
+#include "drm_legacy.h"
+#include "drm_internal.h"
+#include "drm_crtc_internal.h"
#define DRM_IOCTL_VERSION32 DRM_IOWR(0x00, drm_version32_t)
#define DRM_IOCTL_GET_UNIQUE32 DRM_IOWR(0x01, drm_unique32_t)
@@ -87,39 +90,28 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_version32_t v32;
- struct drm_version __user *version;
+ struct drm_version v;
int err;
if (copy_from_user(&v32, (void __user *)arg, sizeof(v32)))
return -EFAULT;
- version = compat_alloc_user_space(sizeof(*version));
- if (!version)
- return -EFAULT;
- if (__put_user(v32.name_len, &version->name_len)
- || __put_user((void __user *)(unsigned long)v32.name,
- &version->name)
- || __put_user(v32.date_len, &version->date_len)
- || __put_user((void __user *)(unsigned long)v32.date,
- &version->date)
- || __put_user(v32.desc_len, &version->desc_len)
- || __put_user((void __user *)(unsigned long)v32.desc,
- &version->desc))
- return -EFAULT;
-
- err = drm_ioctl(file,
- DRM_IOCTL_VERSION, (unsigned long)version);
+ v = (struct drm_version) {
+ .name_len = v32.name_len,
+ .name = compat_ptr(v32.name),
+ .date_len = v32.date_len,
+ .date = compat_ptr(v32.date),
+ .desc_len = v32.desc_len,
+ .desc = compat_ptr(v32.desc),
+ };
+ err = drm_ioctl_kernel(file, drm_version, &v,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW);
if (err)
return err;
- if (__get_user(v32.version_major, &version->version_major)
- || __get_user(v32.version_minor, &version->version_minor)
- || __get_user(v32.version_patchlevel, &version->version_patchlevel)
- || __get_user(v32.name_len, &version->name_len)
- || __get_user(v32.date_len, &version->date_len)
- || __get_user(v32.desc_len, &version->desc_len))
- return -EFAULT;
-
+ v32.version_major = v.version_major;
+ v32.version_minor = v.version_minor;
+ v32.version_patchlevel = v.version_patchlevel;
if (copy_to_user((void __user *)arg, &v32, sizeof(v32)))
return -EFAULT;
return 0;
@@ -134,26 +126,21 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_unique32_t uq32;
- struct drm_unique __user *u;
+ struct drm_unique uq;
int err;
if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
return -EFAULT;
+ uq = (struct drm_unique){
+ .unique_len = uq32.unique_len,
+ .unique = compat_ptr(uq32.unique),
+ };
- u = compat_alloc_user_space(sizeof(*u));
- if (!u)
- return -EFAULT;
- if (__put_user(uq32.unique_len, &u->unique_len)
- || __put_user((void __user *)(unsigned long)uq32.unique,
- &u->unique))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_GET_UNIQUE, (unsigned long)u);
+ err = drm_ioctl_kernel(file, drm_getunique, &uq, DRM_UNLOCKED);
if (err)
return err;
- if (__get_user(uq32.unique_len, &u->unique_len))
- return -EFAULT;
+ uq32.unique_len = uq.unique_len;
if (copy_to_user((void __user *)arg, &uq32, sizeof(uq32)))
return -EFAULT;
return 0;
@@ -162,21 +149,8 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd,
static int compat_drm_setunique(struct file *file, unsigned int cmd,
unsigned long arg)
{
- drm_unique32_t uq32;
- struct drm_unique __user *u;
-
- if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
- return -EFAULT;
-
- u = compat_alloc_user_space(sizeof(*u));
- if (!u)
- return -EFAULT;
- if (__put_user(uq32.unique_len, &u->unique_len)
- || __put_user((void __user *)(unsigned long)uq32.unique,
- &u->unique))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_SET_UNIQUE, (unsigned long)u);
+ /* it's dead */
+ return -EINVAL;
}
typedef struct drm_map32 {
@@ -193,32 +167,23 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd,
{
drm_map32_t __user *argp = (void __user *)arg;
drm_map32_t m32;
- struct drm_map __user *map;
- int idx, err;
- void *handle;
-
- if (get_user(idx, &argp->offset))
- return -EFAULT;
+ struct drm_map map;
+ int err;
- map = compat_alloc_user_space(sizeof(*map));
- if (!map)
- return -EFAULT;
- if (__put_user(idx, &map->offset))
+ if (copy_from_user(&m32, argp, sizeof(m32)))
return -EFAULT;
- err = drm_ioctl(file, DRM_IOCTL_GET_MAP, (unsigned long)map);
+ map.offset = m32.offset;
+ err = drm_ioctl_kernel(file, drm_legacy_getmap_ioctl, &map, DRM_UNLOCKED);
if (err)
return err;
- if (__get_user(m32.offset, &map->offset)
- || __get_user(m32.size, &map->size)
- || __get_user(m32.type, &map->type)
- || __get_user(m32.flags, &map->flags)
- || __get_user(handle, &map->handle)
- || __get_user(m32.mtrr, &map->mtrr))
- return -EFAULT;
-
- m32.handle = (unsigned long)handle;
+ m32.offset = map.offset;
+ m32.size = map.size;
+ m32.type = map.type;
+ m32.flags = map.flags;
+ m32.handle = ptr_to_compat(map.handle);
+ m32.mtrr = map.mtrr;
if (copy_to_user(argp, &m32, sizeof(m32)))
return -EFAULT;
return 0;
@@ -230,35 +195,28 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
{
drm_map32_t __user *argp = (void __user *)arg;
drm_map32_t m32;
- struct drm_map __user *map;
+ struct drm_map map;
int err;
- void *handle;
if (copy_from_user(&m32, argp, sizeof(m32)))
return -EFAULT;
- map = compat_alloc_user_space(sizeof(*map));
- if (!map)
- return -EFAULT;
- if (__put_user(m32.offset, &map->offset)
- || __put_user(m32.size, &map->size)
- || __put_user(m32.type, &map->type)
- || __put_user(m32.flags, &map->flags))
- return -EFAULT;
+ map.offset = m32.offset;
+ map.size = m32.size;
+ map.type = m32.type;
+ map.flags = m32.flags;
- err = drm_ioctl(file, DRM_IOCTL_ADD_MAP, (unsigned long)map);
+ err = drm_ioctl_kernel(file, drm_legacy_addmap_ioctl, &map,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
if (err)
return err;
- if (__get_user(m32.offset, &map->offset)
- || __get_user(m32.mtrr, &map->mtrr)
- || __get_user(handle, &map->handle))
- return -EFAULT;
-
- m32.handle = (unsigned long)handle;
- if (m32.handle != (unsigned long)handle)
+ m32.offset = map.offset;
+ m32.mtrr = map.mtrr;
+ m32.handle = ptr_to_compat(map.handle);
+ if (map.handle != compat_ptr(m32.handle))
pr_err_ratelimited("compat_drm_addmap truncated handle %p for type %d offset %x\n",
- handle, m32.type, m32.offset);
+ map.handle, m32.type, m32.offset);
if (copy_to_user(argp, &m32, sizeof(m32)))
return -EFAULT;
@@ -270,19 +228,13 @@ static int compat_drm_rmmap(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_map32_t __user *argp = (void __user *)arg;
- struct drm_map __user *map;
+ struct drm_map map;
u32 handle;
if (get_user(handle, &argp->handle))
return -EFAULT;
-
- map = compat_alloc_user_space(sizeof(*map));
- if (!map)
- return -EFAULT;
- if (__put_user((void *)(unsigned long)handle, &map->handle))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RM_MAP, (unsigned long)map);
+ map.handle = compat_ptr(handle);
+ return drm_ioctl_kernel(file, drm_legacy_rmmap_ioctl, &map, DRM_AUTH);
}
typedef struct drm_client32 {
@@ -299,29 +251,24 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
{
drm_client32_t c32;
drm_client32_t __user *argp = (void __user *)arg;
- struct drm_client __user *client;
- int idx, err;
+ struct drm_client client;
+ int err;
- if (get_user(idx, &argp->idx))
+ if (copy_from_user(&c32, argp, sizeof(c32)))
return -EFAULT;
- client = compat_alloc_user_space(sizeof(*client));
- if (!client)
- return -EFAULT;
- if (__put_user(idx, &client->idx))
- return -EFAULT;
+ client.idx = c32.idx;
- err = drm_ioctl(file, DRM_IOCTL_GET_CLIENT, (unsigned long)client);
+ err = drm_ioctl_kernel(file, drm_getclient, &client, DRM_UNLOCKED);
if (err)
return err;
- if (__get_user(c32.idx, &client->idx)
- || __get_user(c32.auth, &client->auth)
- || __get_user(c32.pid, &client->pid)
- || __get_user(c32.uid, &client->uid)
- || __get_user(c32.magic, &client->magic)
- || __get_user(c32.iocs, &client->iocs))
- return -EFAULT;
+ c32.idx = client.idx;
+ c32.auth = client.auth;
+ c32.pid = client.pid;
+ c32.uid = client.uid;
+ c32.magic = client.magic;
+ c32.iocs = client.iocs;
if (copy_to_user(argp, &c32, sizeof(c32)))
return -EFAULT;
@@ -339,28 +286,14 @@ typedef struct drm_stats32 {
static int compat_drm_getstats(struct file *file, unsigned int cmd,
unsigned long arg)
{
- drm_stats32_t s32;
drm_stats32_t __user *argp = (void __user *)arg;
- struct drm_stats __user *stats;
- int i, err;
-
- memset(&s32, 0, sizeof(drm_stats32_t));
- stats = compat_alloc_user_space(sizeof(*stats));
- if (!stats)
- return -EFAULT;
+ int err;
- err = drm_ioctl(file, DRM_IOCTL_GET_STATS, (unsigned long)stats);
+ err = drm_ioctl_kernel(file, drm_noop, NULL, DRM_UNLOCKED);
if (err)
return err;
- if (__get_user(s32.count, &stats->count))
- return -EFAULT;
- for (i = 0; i < 15; ++i)
- if (__get_user(s32.data[i].value, &stats->data[i].value)
- || __get_user(s32.data[i].type, &stats->data[i].type))
- return -EFAULT;
-
- if (copy_to_user(argp, &s32, sizeof(s32)))
+ if (clear_user(argp, sizeof(drm_stats32_t)))
return -EFAULT;
return 0;
}
@@ -378,26 +311,28 @@ static int compat_drm_addbufs(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_buf_desc32_t __user *argp = (void __user *)arg;
- struct drm_buf_desc __user *buf;
+ drm_buf_desc32_t desc32;
+ struct drm_buf_desc desc;
int err;
- unsigned long agp_start;
- buf = compat_alloc_user_space(sizeof(*buf));
- if (!buf || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)))
+ if (copy_from_user(&desc32, argp, sizeof(drm_buf_desc32_t)))
return -EFAULT;
- if (__copy_in_user(buf, argp, offsetof(drm_buf_desc32_t, agp_start))
- || __get_user(agp_start, &argp->agp_start)
- || __put_user(agp_start, &buf->agp_start))
- return -EFAULT;
+ desc = (struct drm_buf_desc){
+ desc32.count, desc32.size, desc32.low_mark, desc32.high_mark,
+ desc32.flags, desc32.agp_start
+ };
- err = drm_ioctl(file, DRM_IOCTL_ADD_BUFS, (unsigned long)buf);
+ err = drm_ioctl_kernel(file, drm_legacy_addbufs, &desc,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
if (err)
return err;
- if (__copy_in_user(argp, buf, offsetof(drm_buf_desc32_t, agp_start))
- || __get_user(agp_start, &buf->agp_start)
- || __put_user(agp_start, &argp->agp_start))
+ desc32 = (drm_buf_desc32_t){
+ desc.count, desc.size, desc.low_mark, desc.high_mark,
+ desc.flags, desc.agp_start
+ };
+ if (copy_to_user(argp, &desc32, sizeof(drm_buf_desc32_t)))
return -EFAULT;
return 0;
@@ -408,21 +343,17 @@ static int compat_drm_markbufs(struct file *file, unsigned int cmd,
{
drm_buf_desc32_t b32;
drm_buf_desc32_t __user *argp = (void __user *)arg;
- struct drm_buf_desc __user *buf;
+ struct drm_buf_desc buf;
if (copy_from_user(&b32, argp, sizeof(b32)))
return -EFAULT;
- buf = compat_alloc_user_space(sizeof(*buf));
- if (!buf)
- return -EFAULT;
-
- if (__put_user(b32.size, &buf->size)
- || __put_user(b32.low_mark, &buf->low_mark)
- || __put_user(b32.high_mark, &buf->high_mark))
- return -EFAULT;
+ buf.size = b32.size;
+ buf.low_mark = b32.low_mark;
+ buf.high_mark = b32.high_mark;
- return drm_ioctl(file, DRM_IOCTL_MARK_BUFS, (unsigned long)buf);
+ return drm_ioctl_kernel(file, drm_legacy_markbufs, &buf,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
typedef struct drm_buf_info32 {
@@ -430,52 +361,42 @@ typedef struct drm_buf_info32 {
u32 list;
} drm_buf_info32_t;
+static int copy_one_buf32(void *data, int count, struct drm_buf_entry *from)
+{
+ drm_buf_info32_t *request = data;
+ drm_buf_desc32_t __user *to = compat_ptr(request->list);
+ drm_buf_desc32_t v = {.count = from->buf_count,
+ .size = from->buf_size,
+ .low_mark = from->low_mark,
+ .high_mark = from->high_mark};
+ return copy_to_user(to + count, &v, offsetof(drm_buf_desc32_t, flags));
+}
+
+static int drm_legacy_infobufs32(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_buf_info32_t *request = data;
+ return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf32);
+}
+
static int compat_drm_infobufs(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_buf_info32_t req32;
drm_buf_info32_t __user *argp = (void __user *)arg;
- drm_buf_desc32_t __user *to;
- struct drm_buf_info __user *request;
- struct drm_buf_desc __user *list;
- size_t nbytes;
- int i, err;
- int count, actual;
+ int err;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
- count = req32.count;
- to = (drm_buf_desc32_t __user *) (unsigned long)req32.list;
- if (count < 0)
- count = 0;
- if (count > 0
- && !access_ok(VERIFY_WRITE, to, count * sizeof(drm_buf_desc32_t)))
- return -EFAULT;
-
- nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc);
- request = compat_alloc_user_space(nbytes);
- if (!request)
- return -EFAULT;
- list = (struct drm_buf_desc *) (request + 1);
-
- if (__put_user(count, &request->count)
- || __put_user(list, &request->list))
- return -EFAULT;
+ if (req32.count < 0)
+ req32.count = 0;
- err = drm_ioctl(file, DRM_IOCTL_INFO_BUFS, (unsigned long)request);
+ err = drm_ioctl_kernel(file, drm_legacy_infobufs32, &req32, DRM_AUTH);
if (err)
return err;
- if (__get_user(actual, &request->count))
- return -EFAULT;
- if (count >= actual)
- for (i = 0; i < actual; ++i)
- if (__copy_in_user(&to[i], &list[i],
- offsetof(struct drm_buf_desc, flags)))
- return -EFAULT;
-
- if (__put_user(actual, &argp->count))
+ if (put_user(req32.count, &argp->count))
return -EFAULT;
return 0;
@@ -494,54 +415,52 @@ typedef struct drm_buf_map32 {
u32 list; /**< Buffer information */
} drm_buf_map32_t;
+static int map_one_buf32(void *data, int idx, unsigned long virtual,
+ struct drm_buf *buf)
+{
+ drm_buf_map32_t *request = data;
+ drm_buf_pub32_t __user *to = compat_ptr(request->list) + idx;
+ drm_buf_pub32_t v;
+
+ v.idx = buf->idx;
+ v.total = buf->total;
+ v.used = 0;
+ v.address = virtual + buf->offset;
+ if (copy_to_user(to, &v, sizeof(v)))
+ return -EFAULT;
+ return 0;
+}
+
+static int drm_legacy_mapbufs32(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ drm_buf_map32_t *request = data;
+ void __user *v;
+ int err = __drm_legacy_mapbufs(dev, data, &request->count,
+ &v, map_one_buf32,
+ file_priv);
+ request->virtual = ptr_to_compat(v);
+ return err;
+}
+
static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_buf_map32_t __user *argp = (void __user *)arg;
drm_buf_map32_t req32;
- drm_buf_pub32_t __user *list32;
- struct drm_buf_map __user *request;
- struct drm_buf_pub __user *list;
- int i, err;
- int count, actual;
- size_t nbytes;
- void __user *addr;
+ int err;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
- count = req32.count;
- list32 = (void __user *)(unsigned long)req32.list;
-
- if (count < 0)
+ if (req32.count < 0)
return -EINVAL;
- nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub);
- request = compat_alloc_user_space(nbytes);
- if (!request)
- return -EFAULT;
- list = (struct drm_buf_pub *) (request + 1);
-
- if (__put_user(count, &request->count)
- || __put_user(list, &request->list))
- return -EFAULT;
- err = drm_ioctl(file, DRM_IOCTL_MAP_BUFS, (unsigned long)request);
+ err = drm_ioctl_kernel(file, drm_legacy_mapbufs32, &req32, DRM_AUTH);
if (err)
return err;
- if (__get_user(actual, &request->count))
- return -EFAULT;
- if (count >= actual)
- for (i = 0; i < actual; ++i)
- if (__copy_in_user(&list32[i], &list[i],
- offsetof(struct drm_buf_pub, address))
- || __get_user(addr, &list[i].address)
- || __put_user((unsigned long)addr,
- &list32[i].address))
- return -EFAULT;
-
- if (__put_user(actual, &argp->count)
- || __get_user(addr, &request->virtual)
- || __put_user((unsigned long)addr, &argp->virtual))
+ if (put_user(req32.count, &argp->count)
+ || put_user(req32.virtual, &argp->virtual))
return -EFAULT;
return 0;
@@ -556,21 +475,15 @@ static int compat_drm_freebufs(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_buf_free32_t req32;
- struct drm_buf_free __user *request;
+ struct drm_buf_free request;
drm_buf_free32_t __user *argp = (void __user *)arg;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
- request = compat_alloc_user_space(sizeof(*request));
- if (!request)
- return -EFAULT;
- if (__put_user(req32.count, &request->count)
- || __put_user((int __user *)(unsigned long)req32.list,
- &request->list))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_FREE_BUFS, (unsigned long)request);
+ request.count = req32.count;
+ request.list = compat_ptr(req32.list);
+ return drm_ioctl_kernel(file, drm_legacy_freebufs, &request, DRM_AUTH);
}
typedef struct drm_ctx_priv_map32 {
@@ -582,48 +495,36 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_ctx_priv_map32_t req32;
- struct drm_ctx_priv_map __user *request;
+ struct drm_ctx_priv_map request;
drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
- request = compat_alloc_user_space(sizeof(*request));
- if (!request)
- return -EFAULT;
- if (__put_user(req32.ctx_id, &request->ctx_id)
- || __put_user((void *)(unsigned long)req32.handle,
- &request->handle))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request);
+ request.ctx_id = req32.ctx_id;
+ request.handle = compat_ptr(req32.handle);
+ return drm_ioctl_kernel(file, drm_legacy_setsareactx, &request,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
unsigned long arg)
{
- struct drm_ctx_priv_map __user *request;
+ struct drm_ctx_priv_map req;
+ drm_ctx_priv_map32_t req32;
drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
int err;
- unsigned int ctx_id;
- void *handle;
-
- if (!access_ok(VERIFY_WRITE, argp, sizeof(*argp))
- || __get_user(ctx_id, &argp->ctx_id))
- return -EFAULT;
- request = compat_alloc_user_space(sizeof(*request));
- if (!request)
- return -EFAULT;
- if (__put_user(ctx_id, &request->ctx_id))
+ if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
- err = drm_ioctl(file, DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request);
+ req.ctx_id = req32.ctx_id;
+ err = drm_ioctl_kernel(file, drm_legacy_getsareactx, &req, DRM_AUTH);
if (err)
return err;
- if (__get_user(handle, &request->handle)
- || __put_user((unsigned long)handle, &argp->handle))
+ req32.handle = ptr_to_compat(req.handle);
+ if (copy_to_user(argp, &req32, sizeof(req32)))
return -EFAULT;
return 0;
@@ -639,26 +540,20 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd,
{
drm_ctx_res32_t __user *argp = (void __user *)arg;
drm_ctx_res32_t res32;
- struct drm_ctx_res __user *res;
+ struct drm_ctx_res res;
int err;
if (copy_from_user(&res32, argp, sizeof(res32)))
return -EFAULT;
- res = compat_alloc_user_space(sizeof(*res));
- if (!res)
- return -EFAULT;
- if (__put_user(res32.count, &res->count)
- || __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts,
- &res->contexts))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_RES_CTX, (unsigned long)res);
+ res.count = res32.count;
+ res.contexts = compat_ptr(res32.contexts);
+ err = drm_ioctl_kernel(file, drm_legacy_resctx, &res, DRM_AUTH);
if (err)
return err;
- if (__get_user(res32.count, &res->count)
- || __put_user(res32.count, &argp->count))
+ res32.count = res.count;
+ if (copy_to_user(argp, &res32, sizeof(res32)))
return -EFAULT;
return 0;
@@ -682,38 +577,26 @@ static int compat_drm_dma(struct file *file, unsigned int cmd,
{
drm_dma32_t d32;
drm_dma32_t __user *argp = (void __user *)arg;
- struct drm_dma __user *d;
+ struct drm_dma d;
int err;
if (copy_from_user(&d32, argp, sizeof(d32)))
return -EFAULT;
- d = compat_alloc_user_space(sizeof(*d));
- if (!d)
- return -EFAULT;
-
- if (__put_user(d32.context, &d->context)
- || __put_user(d32.send_count, &d->send_count)
- || __put_user((int __user *)(unsigned long)d32.send_indices,
- &d->send_indices)
- || __put_user((int __user *)(unsigned long)d32.send_sizes,
- &d->send_sizes)
- || __put_user(d32.flags, &d->flags)
- || __put_user(d32.request_count, &d->request_count)
- || __put_user((int __user *)(unsigned long)d32.request_indices,
- &d->request_indices)
- || __put_user((int __user *)(unsigned long)d32.request_sizes,
- &d->request_sizes))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_DMA, (unsigned long)d);
+ d.context = d32.context;
+ d.send_count = d32.send_count;
+ d.send_indices = compat_ptr(d32.send_indices);
+ d.send_sizes = compat_ptr(d32.send_sizes);
+ d.flags = d32.flags;
+ d.request_count = d32.request_count;
+ d.request_indices = compat_ptr(d32.request_indices);
+ d.request_sizes = compat_ptr(d32.request_sizes);
+ err = drm_ioctl_kernel(file, drm_legacy_dma_ioctl, &d, DRM_AUTH);
if (err)
return err;
- if (__get_user(d32.request_size, &d->request_size)
- || __get_user(d32.granted_count, &d->granted_count)
- || __put_user(d32.request_size, &argp->request_size)
- || __put_user(d32.granted_count, &argp->granted_count))
+ if (put_user(d.request_size, &argp->request_size)
+ || put_user(d.granted_count, &argp->granted_count))
return -EFAULT;
return 0;
@@ -728,17 +611,13 @@ static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_agp_mode32_t __user *argp = (void __user *)arg;
- drm_agp_mode32_t m32;
- struct drm_agp_mode __user *mode;
-
- if (get_user(m32.mode, &argp->mode))
- return -EFAULT;
+ struct drm_agp_mode mode;
- mode = compat_alloc_user_space(sizeof(*mode));
- if (put_user(m32.mode, &mode->mode))
+ if (get_user(mode.mode, &argp->mode))
return -EFAULT;
- return drm_ioctl(file, DRM_IOCTL_AGP_ENABLE, (unsigned long)mode);
+ return drm_ioctl_kernel(file, drm_agp_enable_ioctl, &mode,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
typedef struct drm_agp_info32 {
@@ -760,28 +639,22 @@ static int compat_drm_agp_info(struct file *file, unsigned int cmd,
{
drm_agp_info32_t __user *argp = (void __user *)arg;
drm_agp_info32_t i32;
- struct drm_agp_info __user *info;
+ struct drm_agp_info info;
int err;
- info = compat_alloc_user_space(sizeof(*info));
- if (!info)
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_AGP_INFO, (unsigned long)info);
+ err = drm_ioctl_kernel(file, drm_agp_info_ioctl, &info, DRM_AUTH);
if (err)
return err;
- if (__get_user(i32.agp_version_major, &info->agp_version_major)
- || __get_user(i32.agp_version_minor, &info->agp_version_minor)
- || __get_user(i32.mode, &info->mode)
- || __get_user(i32.aperture_base, &info->aperture_base)
- || __get_user(i32.aperture_size, &info->aperture_size)
- || __get_user(i32.memory_allowed, &info->memory_allowed)
- || __get_user(i32.memory_used, &info->memory_used)
- || __get_user(i32.id_vendor, &info->id_vendor)
- || __get_user(i32.id_device, &info->id_device))
- return -EFAULT;
-
+ i32.agp_version_major = info.agp_version_major;
+ i32.agp_version_minor = info.agp_version_minor;
+ i32.mode = info.mode;
+ i32.aperture_base = info.aperture_base;
+ i32.aperture_size = info.aperture_size;
+ i32.memory_allowed = info.memory_allowed;
+ i32.memory_used = info.memory_used;
+ i32.id_vendor = info.id_vendor;
+ i32.id_device = info.id_device;
if (copy_to_user(argp, &i32, sizeof(i32)))
return -EFAULT;
@@ -800,26 +673,24 @@ static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
{
drm_agp_buffer32_t __user *argp = (void __user *)arg;
drm_agp_buffer32_t req32;
- struct drm_agp_buffer __user *request;
+ struct drm_agp_buffer request;
int err;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
- request = compat_alloc_user_space(sizeof(*request));
- if (!request
- || __put_user(req32.size, &request->size)
- || __put_user(req32.type, &request->type))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_AGP_ALLOC, (unsigned long)request);
+ request.size = req32.size;
+ request.type = req32.type;
+ err = drm_ioctl_kernel(file, drm_agp_alloc_ioctl, &request,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
if (err)
return err;
- if (__get_user(req32.handle, &request->handle)
- || __get_user(req32.physical, &request->physical)
- || copy_to_user(argp, &req32, sizeof(req32))) {
- drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request);
+ req32.handle = request.handle;
+ req32.physical = request.physical;
+ if (copy_to_user(argp, &req32, sizeof(req32))) {
+ drm_ioctl_kernel(file, drm_agp_free_ioctl, &request,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
return -EFAULT;
}
@@ -830,16 +701,13 @@ static int compat_drm_agp_free(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_agp_buffer32_t __user *argp = (void __user *)arg;
- struct drm_agp_buffer __user *request;
- u32 handle;
+ struct drm_agp_buffer request;
- request = compat_alloc_user_space(sizeof(*request));
- if (!request
- || get_user(handle, &argp->handle)
- || __put_user(handle, &request->handle))
+ if (get_user(request.handle, &argp->handle))
return -EFAULT;
- return drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request);
+ return drm_ioctl_kernel(file, drm_agp_free_ioctl, &request,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
typedef struct drm_agp_binding32 {
@@ -852,34 +720,28 @@ static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
{
drm_agp_binding32_t __user *argp = (void __user *)arg;
drm_agp_binding32_t req32;
- struct drm_agp_binding __user *request;
+ struct drm_agp_binding request;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
- request = compat_alloc_user_space(sizeof(*request));
- if (!request
- || __put_user(req32.handle, &request->handle)
- || __put_user(req32.offset, &request->offset))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_AGP_BIND, (unsigned long)request);
+ request.handle = req32.handle;
+ request.offset = req32.offset;
+ return drm_ioctl_kernel(file, drm_agp_bind_ioctl, &request,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_agp_binding32_t __user *argp = (void __user *)arg;
- struct drm_agp_binding __user *request;
- u32 handle;
+ struct drm_agp_binding request;
- request = compat_alloc_user_space(sizeof(*request));
- if (!request
- || get_user(handle, &argp->handle)
- || __put_user(handle, &request->handle))
+ if (get_user(request.handle, &argp->handle))
return -EFAULT;
- return drm_ioctl(file, DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
+ return drm_ioctl_kernel(file, drm_agp_unbind_ioctl, &request,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
#endif /* CONFIG_AGP */
@@ -892,23 +754,19 @@ static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_scatter_gather32_t __user *argp = (void __user *)arg;
- struct drm_scatter_gather __user *request;
+ struct drm_scatter_gather request;
int err;
- unsigned long x;
- request = compat_alloc_user_space(sizeof(*request));
- if (!request || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
- || __get_user(x, &argp->size)
- || __put_user(x, &request->size))
+ if (get_user(request.size, &argp->size))
return -EFAULT;
- err = drm_ioctl(file, DRM_IOCTL_SG_ALLOC, (unsigned long)request);
+ err = drm_ioctl_kernel(file, drm_legacy_sg_alloc, &request,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
if (err)
return err;
/* XXX not sure about the handle conversion here... */
- if (__get_user(x, &request->handle)
- || __put_user(x >> PAGE_SHIFT, &argp->handle))
+ if (put_user(request.handle >> PAGE_SHIFT, &argp->handle))
return -EFAULT;
return 0;
@@ -918,19 +776,17 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_scatter_gather32_t __user *argp = (void __user *)arg;
- struct drm_scatter_gather __user *request;
+ struct drm_scatter_gather request;
unsigned long x;
- request = compat_alloc_user_space(sizeof(*request));
- if (!request || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
- || __get_user(x, &argp->handle)
- || __put_user(x << PAGE_SHIFT, &request->handle))
+ if (get_user(x, &argp->handle))
return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_SG_FREE, (unsigned long)request);
+ request.handle = x << PAGE_SHIFT;
+ return drm_ioctl_kernel(file, drm_legacy_sg_free, &request,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
-#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+#if defined(CONFIG_X86)
typedef struct drm_update_draw32 {
drm_drawable_t handle;
unsigned int type;
@@ -943,22 +799,11 @@ static int compat_drm_update_draw(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_update_draw32_t update32;
- struct drm_update_draw __user *request;
- int err;
-
if (copy_from_user(&update32, (void __user *)arg, sizeof(update32)))
return -EFAULT;
- request = compat_alloc_user_space(sizeof(*request));
- if (!request ||
- __put_user(update32.handle, &request->handle) ||
- __put_user(update32.type, &request->type) ||
- __put_user(update32.num, &request->num) ||
- __put_user(update32.data, &request->data))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_UPDATE_DRAW, (unsigned long)request);
- return err;
+ return drm_ioctl_kernel(file, drm_noop, NULL,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
#endif
@@ -985,36 +830,30 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
{
drm_wait_vblank32_t __user *argp = (void __user *)arg;
drm_wait_vblank32_t req32;
- union drm_wait_vblank __user *request;
+ union drm_wait_vblank req;
int err;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
- request = compat_alloc_user_space(sizeof(*request));
- if (!request
- || __put_user(req32.request.type, &request->request.type)
- || __put_user(req32.request.sequence, &request->request.sequence)
- || __put_user(req32.request.signal, &request->request.signal))
- return -EFAULT;
-
- err = drm_ioctl(file, DRM_IOCTL_WAIT_VBLANK, (unsigned long)request);
+ req.request.type = req32.request.type;
+ req.request.sequence = req32.request.sequence;
+ req.request.signal = req32.request.signal;
+ err = drm_ioctl_kernel(file, drm_wait_vblank, &req, DRM_UNLOCKED);
if (err)
return err;
- if (__get_user(req32.reply.type, &request->reply.type)
- || __get_user(req32.reply.sequence, &request->reply.sequence)
- || __get_user(req32.reply.tval_sec, &request->reply.tval_sec)
- || __get_user(req32.reply.tval_usec, &request->reply.tval_usec))
- return -EFAULT;
-
+ req32.reply.type = req.reply.type;
+ req32.reply.sequence = req.reply.sequence;
+ req32.reply.tval_sec = req.reply.tval_sec;
+ req32.reply.tval_usec = req.reply.tval_usec;
if (copy_to_user(argp, &req32, sizeof(req32)))
return -EFAULT;
return 0;
}
-#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+#if defined(CONFIG_X86)
typedef struct drm_mode_fb_cmd232 {
u32 fb_id;
u32 width;
@@ -1031,82 +870,67 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct drm_mode_fb_cmd232 __user *argp = (void __user *)arg;
- struct drm_mode_fb_cmd232 req32;
- struct drm_mode_fb_cmd2 __user *req64;
- int i;
+ struct drm_mode_fb_cmd2 req64;
int err;
- if (copy_from_user(&req32, argp, sizeof(req32)))
+ if (copy_from_user(&req64, argp,
+ offsetof(drm_mode_fb_cmd232_t, modifier)))
return -EFAULT;
- req64 = compat_alloc_user_space(sizeof(*req64));
-
- if (!access_ok(VERIFY_WRITE, req64, sizeof(*req64))
- || __put_user(req32.width, &req64->width)
- || __put_user(req32.height, &req64->height)
- || __put_user(req32.pixel_format, &req64->pixel_format)
- || __put_user(req32.flags, &req64->flags))
+ if (copy_from_user(&req64.modifier, &argp->modifier,
+ sizeof(req64.modifier)))
return -EFAULT;
- for (i = 0; i < 4; i++) {
- if (__put_user(req32.handles[i], &req64->handles[i]))
- return -EFAULT;
- if (__put_user(req32.pitches[i], &req64->pitches[i]))
- return -EFAULT;
- if (__put_user(req32.offsets[i], &req64->offsets[i]))
- return -EFAULT;
- if (__put_user(req32.modifier[i], &req64->modifier[i]))
- return -EFAULT;
- }
-
- err = drm_ioctl(file, DRM_IOCTL_MODE_ADDFB2, (unsigned long)req64);
+ err = drm_ioctl_kernel(file, drm_mode_addfb2, &req64,
+ DRM_CONTROL_ALLOW|DRM_UNLOCKED);
if (err)
return err;
- if (__get_user(req32.fb_id, &req64->fb_id))
- return -EFAULT;
-
- if (copy_to_user(argp, &req32, sizeof(req32)))
+ if (put_user(req64.fb_id, &argp->fb_id))
return -EFAULT;
return 0;
}
#endif
-static drm_ioctl_compat_t *drm_compat_ioctls[] = {
- [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
- [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
- [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
- [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT32)] = compat_drm_getclient,
- [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS32)] = compat_drm_getstats,
- [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE32)] = compat_drm_setunique,
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP32)] = compat_drm_addmap,
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS32)] = compat_drm_addbufs,
- [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS32)] = compat_drm_markbufs,
- [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS32)] = compat_drm_infobufs,
- [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS32)] = compat_drm_mapbufs,
- [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS32)] = compat_drm_freebufs,
- [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP32)] = compat_drm_rmmap,
- [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX32)] = compat_drm_setsareactx,
- [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX32)] = compat_drm_getsareactx,
- [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX32)] = compat_drm_resctx,
- [DRM_IOCTL_NR(DRM_IOCTL_DMA32)] = compat_drm_dma,
+static struct {
+ drm_ioctl_compat_t *fn;
+ char *name;
+} drm_compat_ioctls[] = {
+#define DRM_IOCTL32_DEF(n, f) [DRM_IOCTL_NR(n##32)] = {.fn = f, .name = #n}
+ DRM_IOCTL32_DEF(DRM_IOCTL_VERSION, compat_drm_version),
+ DRM_IOCTL32_DEF(DRM_IOCTL_GET_UNIQUE, compat_drm_getunique),
+ DRM_IOCTL32_DEF(DRM_IOCTL_GET_MAP, compat_drm_getmap),
+ DRM_IOCTL32_DEF(DRM_IOCTL_GET_CLIENT, compat_drm_getclient),
+ DRM_IOCTL32_DEF(DRM_IOCTL_GET_STATS, compat_drm_getstats),
+ DRM_IOCTL32_DEF(DRM_IOCTL_SET_UNIQUE, compat_drm_setunique),
+ DRM_IOCTL32_DEF(DRM_IOCTL_ADD_MAP, compat_drm_addmap),
+ DRM_IOCTL32_DEF(DRM_IOCTL_ADD_BUFS, compat_drm_addbufs),
+ DRM_IOCTL32_DEF(DRM_IOCTL_MARK_BUFS, compat_drm_markbufs),
+ DRM_IOCTL32_DEF(DRM_IOCTL_INFO_BUFS, compat_drm_infobufs),
+ DRM_IOCTL32_DEF(DRM_IOCTL_MAP_BUFS, compat_drm_mapbufs),
+ DRM_IOCTL32_DEF(DRM_IOCTL_FREE_BUFS, compat_drm_freebufs),
+ DRM_IOCTL32_DEF(DRM_IOCTL_RM_MAP, compat_drm_rmmap),
+ DRM_IOCTL32_DEF(DRM_IOCTL_SET_SAREA_CTX, compat_drm_setsareactx),
+ DRM_IOCTL32_DEF(DRM_IOCTL_GET_SAREA_CTX, compat_drm_getsareactx),
+ DRM_IOCTL32_DEF(DRM_IOCTL_RES_CTX, compat_drm_resctx),
+ DRM_IOCTL32_DEF(DRM_IOCTL_DMA, compat_drm_dma),
#if IS_ENABLED(CONFIG_AGP)
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE32)] = compat_drm_agp_enable,
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO32)] = compat_drm_agp_info,
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC32)] = compat_drm_agp_alloc,
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE32)] = compat_drm_agp_free,
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND32)] = compat_drm_agp_bind,
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND32)] = compat_drm_agp_unbind,
+ DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ENABLE, compat_drm_agp_enable),
+ DRM_IOCTL32_DEF(DRM_IOCTL_AGP_INFO, compat_drm_agp_info),
+ DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ALLOC, compat_drm_agp_alloc),
+ DRM_IOCTL32_DEF(DRM_IOCTL_AGP_FREE, compat_drm_agp_free),
+ DRM_IOCTL32_DEF(DRM_IOCTL_AGP_BIND, compat_drm_agp_bind),
+ DRM_IOCTL32_DEF(DRM_IOCTL_AGP_UNBIND, compat_drm_agp_unbind),
#endif
- [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC32)] = compat_drm_sg_alloc,
- [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE32)] = compat_drm_sg_free,
+ DRM_IOCTL32_DEF(DRM_IOCTL_SG_ALLOC, compat_drm_sg_alloc),
+ DRM_IOCTL32_DEF(DRM_IOCTL_SG_FREE, compat_drm_sg_free),
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
- [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
+ DRM_IOCTL32_DEF(DRM_IOCTL_UPDATE_DRAW, compat_drm_update_draw),
#endif
- [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
+ DRM_IOCTL32_DEF(DRM_IOCTL_WAIT_VBLANK, compat_drm_wait_vblank),
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
- [DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2,
+ DRM_IOCTL32_DEF(DRM_IOCTL_MODE_ADDFB2, compat_drm_mode_addfb2),
#endif
};
@@ -1127,6 +951,7 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
+ struct drm_file *file_priv = filp->private_data;
drm_ioctl_compat_t *fn;
int ret;
@@ -1137,13 +962,18 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (nr >= ARRAY_SIZE(drm_compat_ioctls))
return drm_ioctl(filp, cmd, arg);
- fn = drm_compat_ioctls[nr];
-
- if (fn != NULL)
- ret = (*fn) (filp, cmd, arg);
- else
- ret = drm_ioctl(filp, cmd, arg);
+ fn = drm_compat_ioctls[nr].fn;
+ if (!fn)
+ return drm_ioctl(filp, cmd, arg);
+ DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
+ task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->kdev->devt),
+ file_priv->authenticated,
+ drm_compat_ioctls[nr].name);
+ ret = (*fn)(filp, cmd, arg);
+ if (ret)
+ DRM_DEBUG("ret = %d\n", ret);
return ret;
}
EXPORT_SYMBOL(drm_compat_ioctl);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 865e3ee4d743..335821153776 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -107,7 +107,7 @@
*
* Copies the bus id from drm_device::unique into user space.
*/
-static int drm_getunique(struct drm_device *dev, void *data,
+int drm_getunique(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_unique *u = data;
@@ -172,7 +172,7 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
* Searches for the client with the specified index and copies its information
* into userspace
*/
-static int drm_getclient(struct drm_device *dev, void *data,
+int drm_getclient(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_client *client = data;
@@ -461,7 +461,7 @@ static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
*
* Fills in the version information in \p arg.
*/
-static int drm_version(struct drm_device *dev, void *data,
+int drm_version(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_version *version = data;
@@ -694,6 +694,33 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
* structure.
*/
+long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata,
+ u32 flags)
+{
+ struct drm_file *file_priv = file->private_data;
+ struct drm_device *dev = file_priv->minor->dev;
+ int retcode;
+
+ if (drm_device_is_unplugged(dev))
+ return -ENODEV;
+
+ retcode = drm_ioctl_permit(flags, file_priv);
+ if (unlikely(retcode))
+ return retcode;
+
+ /* Enforce sane locking for modern driver ioctls. */
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY) ||
+ (flags & DRM_UNLOCKED))
+ retcode = func(dev, kdata, file_priv);
+ else {
+ mutex_lock(&drm_global_mutex);
+ retcode = func(dev, kdata, file_priv);
+ mutex_unlock(&drm_global_mutex);
+ }
+ return retcode;
+}
+EXPORT_SYMBOL(drm_ioctl_kernel);
+
/**
* drm_ioctl - ioctl callback implementation for DRM drivers
* @filp: file this ioctl is called on
@@ -762,10 +789,6 @@ long drm_ioctl(struct file *filp,
goto err_i1;
}
- retcode = drm_ioctl_permit(ioctl->flags, file_priv);
- if (unlikely(retcode))
- goto err_i1;
-
if (ksize <= sizeof(stack_kdata)) {
kdata = stack_kdata;
} else {
@@ -784,16 +807,7 @@ long drm_ioctl(struct file *filp,
if (ksize > in_size)
memset(kdata + in_size, 0, ksize - in_size);
- /* Enforce sane locking for modern driver ioctls. */
- if (!drm_core_check_feature(dev, DRIVER_LEGACY) ||
- (ioctl->flags & DRM_UNLOCKED))
- retcode = func(dev, kdata, file_priv);
- else {
- mutex_lock(&drm_global_mutex);
- retcode = func(dev, kdata, file_priv);
- mutex_unlock(&drm_global_mutex);
- }
-
+ retcode = drm_ioctl_kernel(filp, func, kdata, ioctl->flags);
if (copy_to_user((void __user *)arg, kdata, out_size) != 0)
retcode = -EFAULT;
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index e4bb5ad747c8..280fbeb846ff 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -74,6 +74,13 @@ int drm_legacy_freebufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_mapbufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_dma_ioctl(struct drm_device *d, void *v, struct drm_file *f);
+int __drm_legacy_infobufs(struct drm_device *, void *, int *,
+ int (*)(void *, int, struct drm_buf_entry *));
+int __drm_legacy_mapbufs(struct drm_device *, void *, int *,
+ void __user **,
+ int (*)(void *, int, unsigned long, struct drm_buf *),
+ struct drm_file *);
+
#ifdef CONFIG_DRM_VM
void drm_legacy_vma_flush(struct drm_device *d);
#else
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index c77a5aced81a..d48fd7c918f8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -181,8 +181,8 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
- if (index >= MAX_FB_BUFFER)
- return DMA_ERROR_CODE;
+ if (WARN_ON_ONCE(index >= MAX_FB_BUFFER))
+ return 0;
return exynos_fb->dma_addr[index];
}
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index 45cf363d25ad..a45bb22275a7 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -159,6 +159,8 @@ extern int mga_dma_bootstrap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mga_dma_init(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern int mga_getparam(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
extern int mga_dma_flush(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mga_dma_reset(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
index 729bfd56b55f..245fb2e359cf 100644
--- a/drivers/gpu/drm/mga/mga_ioc32.c
+++ b/drivers/gpu/drm/mga/mga_ioc32.c
@@ -61,46 +61,25 @@ static int compat_mga_init(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_mga_init32_t init32;
- drm_mga_init_t __user *init;
- int err = 0, i;
+ drm_mga_init_t init;
if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
return -EFAULT;
- init = compat_alloc_user_space(sizeof(*init));
- if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
- || __put_user(init32.func, &init->func)
- || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
- || __put_user(init32.chipset, &init->chipset)
- || __put_user(init32.sgram, &init->sgram)
- || __put_user(init32.maccess, &init->maccess)
- || __put_user(init32.fb_cpp, &init->fb_cpp)
- || __put_user(init32.front_offset, &init->front_offset)
- || __put_user(init32.front_pitch, &init->front_pitch)
- || __put_user(init32.back_offset, &init->back_offset)
- || __put_user(init32.back_pitch, &init->back_pitch)
- || __put_user(init32.depth_cpp, &init->depth_cpp)
- || __put_user(init32.depth_offset, &init->depth_offset)
- || __put_user(init32.depth_pitch, &init->depth_pitch)
- || __put_user(init32.fb_offset, &init->fb_offset)
- || __put_user(init32.mmio_offset, &init->mmio_offset)
- || __put_user(init32.status_offset, &init->status_offset)
- || __put_user(init32.warp_offset, &init->warp_offset)
- || __put_user(init32.primary_offset, &init->primary_offset)
- || __put_user(init32.buffers_offset, &init->buffers_offset))
- return -EFAULT;
-
- for (i = 0; i < MGA_NR_TEX_HEAPS; i++) {
- err |=
- __put_user(init32.texture_offset[i],
- &init->texture_offset[i]);
- err |=
- __put_user(init32.texture_size[i], &init->texture_size[i]);
- }
- if (err)
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_MGA_INIT, (unsigned long)init);
+ init.func = init32.func;
+ init.sarea_priv_offset = init32.sarea_priv_offset;
+ memcpy(&init.chipset, &init32.chipset,
+ offsetof(drm_mga_init_t, fb_offset) -
+ offsetof(drm_mga_init_t, chipset));
+ init.fb_offset = init32.fb_offset;
+ init.mmio_offset = init32.mmio_offset;
+ init.status_offset = init32.status_offset;
+ init.warp_offset = init32.warp_offset;
+ init.primary_offset = init32.primary_offset;
+ init.buffers_offset = init32.buffers_offset;
+
+ return drm_ioctl_kernel(file, mga_dma_init, &init,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
typedef struct drm_mga_getparam32 {
@@ -112,19 +91,14 @@ static int compat_mga_getparam(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_mga_getparam32_t getparam32;
- drm_mga_getparam_t __user *getparam;
+ drm_mga_getparam_t getparam;
if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32)))
return -EFAULT;
- getparam = compat_alloc_user_space(sizeof(*getparam));
- if (!access_ok(VERIFY_WRITE, getparam, sizeof(*getparam))
- || __put_user(getparam32.param, &getparam->param)
- || __put_user((void __user *)(unsigned long)getparam32.value,
- &getparam->value))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
+ getparam.param = getparam32.param;
+ getparam.value = compat_ptr(getparam32.value);
+ return drm_ioctl_kernel(file, mga_getparam, &getparam, DRM_AUTH);
}
typedef struct drm_mga_drm_bootstrap32 {
@@ -141,48 +115,33 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_mga_dma_bootstrap32_t dma_bootstrap32;
- drm_mga_dma_bootstrap_t __user *dma_bootstrap;
+ drm_mga_dma_bootstrap_t dma_bootstrap;
int err;
if (copy_from_user(&dma_bootstrap32, (void __user *)arg,
sizeof(dma_bootstrap32)))
return -EFAULT;
- dma_bootstrap = compat_alloc_user_space(sizeof(*dma_bootstrap));
- if (!access_ok(VERIFY_WRITE, dma_bootstrap, sizeof(*dma_bootstrap))
- || __put_user(dma_bootstrap32.texture_handle,
- &dma_bootstrap->texture_handle)
- || __put_user(dma_bootstrap32.texture_size,
- &dma_bootstrap->texture_size)
- || __put_user(dma_bootstrap32.primary_size,
- &dma_bootstrap->primary_size)
- || __put_user(dma_bootstrap32.secondary_bin_count,
- &dma_bootstrap->secondary_bin_count)
- || __put_user(dma_bootstrap32.secondary_bin_size,
- &dma_bootstrap->secondary_bin_size)
- || __put_user(dma_bootstrap32.agp_mode, &dma_bootstrap->agp_mode)
- || __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size))
- return -EFAULT;
+ dma_bootstrap.texture_handle = dma_bootstrap32.texture_handle;
+ dma_bootstrap.texture_size = dma_bootstrap32.texture_size;
+ dma_bootstrap.primary_size = dma_bootstrap32.primary_size;
+ dma_bootstrap.secondary_bin_count = dma_bootstrap32.secondary_bin_count;
+ dma_bootstrap.secondary_bin_size = dma_bootstrap32.secondary_bin_size;
+ dma_bootstrap.agp_mode = dma_bootstrap32.agp_mode;
+ dma_bootstrap.agp_size = dma_bootstrap32.agp_size;
- err = drm_ioctl(file, DRM_IOCTL_MGA_DMA_BOOTSTRAP,
- (unsigned long)dma_bootstrap);
+ err = drm_ioctl_kernel(file, mga_dma_bootstrap, &dma_bootstrap,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
if (err)
return err;
- if (__get_user(dma_bootstrap32.texture_handle,
- &dma_bootstrap->texture_handle)
- || __get_user(dma_bootstrap32.texture_size,
- &dma_bootstrap->texture_size)
- || __get_user(dma_bootstrap32.primary_size,
- &dma_bootstrap->primary_size)
- || __get_user(dma_bootstrap32.secondary_bin_count,
- &dma_bootstrap->secondary_bin_count)
- || __get_user(dma_bootstrap32.secondary_bin_size,
- &dma_bootstrap->secondary_bin_size)
- || __get_user(dma_bootstrap32.agp_mode, &dma_bootstrap->agp_mode)
- || __get_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size))
- return -EFAULT;
-
+ dma_bootstrap32.texture_handle = dma_bootstrap.texture_handle;
+ dma_bootstrap32.texture_size = dma_bootstrap.texture_size;
+ dma_bootstrap32.primary_size = dma_bootstrap.primary_size;
+ dma_bootstrap32.secondary_bin_count = dma_bootstrap.secondary_bin_count;
+ dma_bootstrap32.secondary_bin_size = dma_bootstrap.secondary_bin_size;
+ dma_bootstrap32.agp_mode = dma_bootstrap.agp_mode;
+ dma_bootstrap32.agp_size = dma_bootstrap.agp_size;
if (copy_to_user((void __user *)arg, &dma_bootstrap32,
sizeof(dma_bootstrap32)))
return -EFAULT;
@@ -190,10 +149,14 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
return 0;
}
-drm_ioctl_compat_t *mga_compat_ioctls[] = {
- [DRM_MGA_INIT] = compat_mga_init,
- [DRM_MGA_GETPARAM] = compat_mga_getparam,
- [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
+static struct {
+ drm_ioctl_compat_t *fn;
+ char *name;
+} mga_compat_ioctls[] = {
+#define DRM_IOCTL32_DEF(n, f)[DRM_##n] = {.fn = f, .name = #n}
+ DRM_IOCTL32_DEF(MGA_INIT, compat_mga_init),
+ DRM_IOCTL32_DEF(MGA_GETPARAM, compat_mga_getparam),
+ DRM_IOCTL32_DEF(MGA_DMA_BOOTSTRAP, compat_mga_dma_bootstrap),
};
/**
@@ -208,19 +171,27 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
+ struct drm_file *file_priv = filp->private_data;
drm_ioctl_compat_t *fn = NULL;
int ret;
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
-
- if (fn != NULL)
- ret = (*fn) (filp, cmd, arg);
- else
- ret = drm_ioctl(filp, cmd, arg);
-
+ if (nr >= DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
+ return drm_ioctl(filp, cmd, arg);
+
+ fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE].fn;
+ if (!fn)
+ return drm_ioctl(filp, cmd, arg);
+
+ DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
+ task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->kdev->devt),
+ file_priv->authenticated,
+ mga_compat_ioctls[nr - DRM_COMMAND_BASE].name);
+ ret = (*fn) (filp, cmd, arg);
+ if (ret)
+ DRM_DEBUG("ret = %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index 792f924496fc..e5f6b735f575 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -1005,7 +1005,7 @@ static int mga_dma_blit(struct drm_device *dev, void *data, struct drm_file *fil
return 0;
}
-static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
+int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_getparam_t *param = data;
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 08bd17d3925c..640f8b3f9443 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -105,7 +105,6 @@ radeon-y += \
vce_v2_0.o \
radeon_kfd.o
-radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
radeon-$(CONFIG_ACPI) += radeon_acpi.o
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0a6444d72000..997131d58c7f 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -113,7 +113,6 @@ static inline bool radeon_is_atpx_hybrid(void) { return false; }
#endif
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
-#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
struct radeon_px_quirk {
u32 chip_vendor;
@@ -140,8 +139,6 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
* https://bugs.freedesktop.org/show_bug.cgi?id=101491
*/
{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
- /* macbook pro 8.2 */
- { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
{ 0, 0, 0, 0, 0 },
};
@@ -1245,25 +1242,17 @@ static void radeon_check_arguments(struct radeon_device *rdev)
static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct radeon_device *rdev = dev->dev_private;
if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
return;
if (state == VGA_SWITCHEROO_ON) {
- unsigned d3_delay = dev->pdev->d3_delay;
-
pr_info("radeon: switched on\n");
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- if (d3_delay < 20 && (rdev->px_quirk_flags & RADEON_PX_QUIRK_LONG_WAKEUP))
- dev->pdev->d3_delay = 20;
-
radeon_resume_kms(dev, true, true);
- dev->pdev->d3_delay = d3_delay;
-
dev->switch_power_state = DRM_SWITCH_POWER_ON;
drm_kms_helper_poll_enable(dev);
} else {
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 93d45aa5c3d4..e25cb51ce0ca 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -38,6 +38,7 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
+#include <linux/compat.h>
#include <drm/drm_gem.h>
#include <drm/drm_fb_helper.h>
@@ -154,8 +155,6 @@ void radeon_gem_prime_unpin(struct drm_gem_object *obj);
struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *);
void *radeon_gem_prime_vmap(struct drm_gem_object *obj);
void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
-extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg);
/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)
@@ -505,6 +504,21 @@ long radeon_drm_ioctl(struct file *filp,
return ret;
}
+#ifdef CONFIG_COMPAT
+static long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+ int ret;
+
+ if (nr < DRM_COMMAND_BASE)
+ return drm_compat_ioctl(filp, cmd, arg);
+
+ ret = radeon_drm_ioctl(filp, cmd, arg);
+
+ return ret;
+}
+#endif
+
static const struct dev_pm_ops radeon_pm_ops = {
.suspend = radeon_pmops_suspend,
.resume = radeon_pmops_resume,
diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
deleted file mode 100644
index 0b98ea134579..000000000000
--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
+++ /dev/null
@@ -1,424 +0,0 @@
-/**
- * \file radeon_ioc32.c
- *
- * 32-bit ioctl compatibility routines for the Radeon DRM.
- *
- * \author Paul Mackerras <[email protected]>
- *
- * Copyright (C) Paul Mackerras 2005
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-#include <linux/compat.h>
-
-#include <drm/drmP.h>
-#include <drm/radeon_drm.h>
-#include "radeon_drv.h"
-
-typedef struct drm_radeon_init32 {
- int func;
- u32 sarea_priv_offset;
- int is_pci;
- int cp_mode;
- int gart_size;
- int ring_size;
- int usec_timeout;
-
- unsigned int fb_bpp;
- unsigned int front_offset, front_pitch;
- unsigned int back_offset, back_pitch;
- unsigned int depth_bpp;
- unsigned int depth_offset, depth_pitch;
-
- u32 fb_offset;
- u32 mmio_offset;
- u32 ring_offset;
- u32 ring_rptr_offset;
- u32 buffers_offset;
- u32 gart_textures_offset;
-} drm_radeon_init32_t;
-
-static int compat_radeon_cp_init(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_radeon_init32_t init32;
- drm_radeon_init_t __user *init;
-
- if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
- return -EFAULT;
-
- init = compat_alloc_user_space(sizeof(*init));
- if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
- || __put_user(init32.func, &init->func)
- || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
- || __put_user(init32.is_pci, &init->is_pci)
- || __put_user(init32.cp_mode, &init->cp_mode)
- || __put_user(init32.gart_size, &init->gart_size)
- || __put_user(init32.ring_size, &init->ring_size)
- || __put_user(init32.usec_timeout, &init->usec_timeout)
- || __put_user(init32.fb_bpp, &init->fb_bpp)
- || __put_user(init32.front_offset, &init->front_offset)
- || __put_user(init32.front_pitch, &init->front_pitch)
- || __put_user(init32.back_offset, &init->back_offset)
- || __put_user(init32.back_pitch, &init->back_pitch)
- || __put_user(init32.depth_bpp, &init->depth_bpp)
- || __put_user(init32.depth_offset, &init->depth_offset)
- || __put_user(init32.depth_pitch, &init->depth_pitch)
- || __put_user(init32.fb_offset, &init->fb_offset)
- || __put_user(init32.mmio_offset, &init->mmio_offset)
- || __put_user(init32.ring_offset, &init->ring_offset)
- || __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset)
- || __put_user(init32.buffers_offset, &init->buffers_offset)
- || __put_user(init32.gart_textures_offset,
- &init->gart_textures_offset))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init);
-}
-
-typedef struct drm_radeon_clear32 {
- unsigned int flags;
- unsigned int clear_color;
- unsigned int clear_depth;
- unsigned int color_mask;
- unsigned int depth_mask; /* misnamed field: should be stencil */
- u32 depth_boxes;
-} drm_radeon_clear32_t;
-
-static int compat_radeon_cp_clear(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_radeon_clear32_t clr32;
- drm_radeon_clear_t __user *clr;
-
- if (copy_from_user(&clr32, (void __user *)arg, sizeof(clr32)))
- return -EFAULT;
-
- clr = compat_alloc_user_space(sizeof(*clr));
- if (!access_ok(VERIFY_WRITE, clr, sizeof(*clr))
- || __put_user(clr32.flags, &clr->flags)
- || __put_user(clr32.clear_color, &clr->clear_color)
- || __put_user(clr32.clear_depth, &clr->clear_depth)
- || __put_user(clr32.color_mask, &clr->color_mask)
- || __put_user(clr32.depth_mask, &clr->depth_mask)
- || __put_user((void __user *)(unsigned long)clr32.depth_boxes,
- &clr->depth_boxes))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr);
-}
-
-typedef struct drm_radeon_stipple32 {
- u32 mask;
-} drm_radeon_stipple32_t;
-
-static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_radeon_stipple32_t __user *argp = (void __user *)arg;
- drm_radeon_stipple_t __user *request;
- u32 mask;
-
- if (get_user(mask, &argp->mask))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user((unsigned int __user *)(unsigned long)mask,
- &request->mask))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request);
-}
-
-typedef struct drm_radeon_tex_image32 {
- unsigned int x, y; /* Blit coordinates */
- unsigned int width, height;
- u32 data;
-} drm_radeon_tex_image32_t;
-
-typedef struct drm_radeon_texture32 {
- unsigned int offset;
- int pitch;
- int format;
- int width; /* Texture image coordinates */
- int height;
- u32 image;
-} drm_radeon_texture32_t;
-
-static int compat_radeon_cp_texture(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_radeon_texture32_t req32;
- drm_radeon_texture_t __user *request;
- drm_radeon_tex_image32_t img32;
- drm_radeon_tex_image_t __user *image;
-
- if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
- return -EFAULT;
- if (req32.image == 0)
- return -EINVAL;
- if (copy_from_user(&img32, (void __user *)(unsigned long)req32.image,
- sizeof(img32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request) + sizeof(*image));
- if (!access_ok(VERIFY_WRITE, request,
- sizeof(*request) + sizeof(*image)))
- return -EFAULT;
- image = (drm_radeon_tex_image_t __user *) (request + 1);
-
- if (__put_user(req32.offset, &request->offset)
- || __put_user(req32.pitch, &request->pitch)
- || __put_user(req32.format, &request->format)
- || __put_user(req32.width, &request->width)
- || __put_user(req32.height, &request->height)
- || __put_user(image, &request->image)
- || __put_user(img32.x, &image->x)
- || __put_user(img32.y, &image->y)
- || __put_user(img32.width, &image->width)
- || __put_user(img32.height, &image->height)
- || __put_user((const void __user *)(unsigned long)img32.data,
- &image->data))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request);
-}
-
-typedef struct drm_radeon_vertex2_32 {
- int idx; /* Index of vertex buffer */
- int discard; /* Client finished with buffer? */
- int nr_states;
- u32 state;
- int nr_prims;
- u32 prim;
-} drm_radeon_vertex2_32_t;
-
-static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_radeon_vertex2_32_t req32;
- drm_radeon_vertex2_t __user *request;
-
- if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user(req32.idx, &request->idx)
- || __put_user(req32.discard, &request->discard)
- || __put_user(req32.nr_states, &request->nr_states)
- || __put_user((void __user *)(unsigned long)req32.state,
- &request->state)
- || __put_user(req32.nr_prims, &request->nr_prims)
- || __put_user((void __user *)(unsigned long)req32.prim,
- &request->prim))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request);
-}
-
-typedef struct drm_radeon_cmd_buffer32 {
- int bufsz;
- u32 buf;
- int nbox;
- u32 boxes;
-} drm_radeon_cmd_buffer32_t;
-
-static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_radeon_cmd_buffer32_t req32;
- drm_radeon_cmd_buffer_t __user *request;
-
- if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user(req32.bufsz, &request->bufsz)
- || __put_user((void __user *)(unsigned long)req32.buf,
- &request->buf)
- || __put_user(req32.nbox, &request->nbox)
- || __put_user((void __user *)(unsigned long)req32.boxes,
- &request->boxes))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request);
-}
-
-typedef struct drm_radeon_getparam32 {
- int param;
- u32 value;
-} drm_radeon_getparam32_t;
-
-static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_radeon_getparam32_t req32;
- drm_radeon_getparam_t __user *request;
-
- if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user(req32.param, &request->param)
- || __put_user((void __user *)(unsigned long)req32.value,
- &request->value))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request);
-}
-
-typedef struct drm_radeon_mem_alloc32 {
- int region;
- int alignment;
- int size;
- u32 region_offset; /* offset from start of fb or GART */
-} drm_radeon_mem_alloc32_t;
-
-static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_radeon_mem_alloc32_t req32;
- drm_radeon_mem_alloc_t __user *request;
-
- if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user(req32.region, &request->region)
- || __put_user(req32.alignment, &request->alignment)
- || __put_user(req32.size, &request->size)
- || __put_user((int __user *)(unsigned long)req32.region_offset,
- &request->region_offset))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RADEON_ALLOC, (unsigned long)request);
-}
-
-typedef struct drm_radeon_irq_emit32 {
- u32 irq_seq;
-} drm_radeon_irq_emit32_t;
-
-static int compat_radeon_irq_emit(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_radeon_irq_emit32_t req32;
- drm_radeon_irq_emit_t __user *request;
-
- if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user((int __user *)(unsigned long)req32.irq_seq,
- &request->irq_seq))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request);
-}
-
-/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */
-#if defined (CONFIG_X86_64) || defined(CONFIG_IA64)
-typedef struct drm_radeon_setparam32 {
- int param;
- u64 value;
-} __attribute__((packed)) drm_radeon_setparam32_t;
-
-static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_radeon_setparam32_t req32;
- drm_radeon_setparam_t __user *request;
-
- if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
- return -EFAULT;
-
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
- || __put_user(req32.param, &request->param)
- || __put_user((void __user *)(unsigned long)req32.value,
- &request->value))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
-}
-#else
-#define compat_radeon_cp_setparam NULL
-#endif /* X86_64 || IA64 */
-
-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
- [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
- [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
- [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
- [DRM_RADEON_TEXTURE] = compat_radeon_cp_texture,
- [DRM_RADEON_VERTEX2] = compat_radeon_cp_vertex2,
- [DRM_RADEON_CMDBUF] = compat_radeon_cp_cmdbuf,
- [DRM_RADEON_GETPARAM] = compat_radeon_cp_getparam,
- [DRM_RADEON_SETPARAM] = compat_radeon_cp_setparam,
- [DRM_RADEON_ALLOC] = compat_radeon_mem_alloc,
- [DRM_RADEON_IRQ_EMIT] = compat_radeon_irq_emit,
-};
-
-/**
- * Called whenever a 32-bit process running under a 64-bit kernel
- * performs an ioctl on /dev/dri/card<n>.
- *
- * \param filp file pointer.
- * \param cmd command.
- * \param arg user argument.
- * \return zero on success or negative number on failure.
- */
-long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- unsigned int nr = DRM_IOCTL_NR(cmd);
- drm_ioctl_compat_t *fn = NULL;
- int ret;
-
- if (nr < DRM_COMMAND_BASE)
- return drm_compat_ioctl(filp, cmd, arg);
-
- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(radeon_compat_ioctls))
- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
-
- if (fn != NULL)
- ret = (*fn) (filp, cmd, arg);
- else
- ret = drm_ioctl(filp, cmd, arg);
-
- return ret;
-}
-
-long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- unsigned int nr = DRM_IOCTL_NR(cmd);
- int ret;
-
- if (nr < DRM_COMMAND_BASE)
- return drm_compat_ioctl(filp, cmd, arg);
-
- ret = radeon_drm_ioctl(filp, cmd, arg);
-
- return ret;
-}
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 0fabd410efd9..4bbb8dea4727 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -332,7 +332,6 @@ static struct vmbus_channel *alloc_channel(void)
if (!channel)
return NULL;
- spin_lock_init(&channel->inbound_lock);
spin_lock_init(&channel->lock);
INIT_LIST_HEAD(&channel->sc_list);
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig
index 73a401662853..6e0a5539a9ea 100644
--- a/drivers/hwspinlock/Kconfig
+++ b/drivers/hwspinlock/Kconfig
@@ -2,16 +2,13 @@
# Generic HWSPINLOCK framework
#
-# HWSPINLOCK always gets selected by whoever wants it.
-config HWSPINLOCK
- tristate
-
-menu "Hardware Spinlock drivers"
+menuconfig HWSPINLOCK
+ tristate "Hardware Spinlock drivers"
config HWSPINLOCK_OMAP
tristate "OMAP Hardware Spinlock device"
+ depends on HWSPINLOCK
depends on ARCH_OMAP4 || SOC_OMAP5 || SOC_DRA7XX || SOC_AM33XX || SOC_AM43XX
- select HWSPINLOCK
help
Say y here to support the OMAP Hardware Spinlock device (firstly
introduced in OMAP4).
@@ -20,8 +17,8 @@ config HWSPINLOCK_OMAP
config HWSPINLOCK_QCOM
tristate "Qualcomm Hardware Spinlock device"
+ depends on HWSPINLOCK
depends on ARCH_QCOM
- select HWSPINLOCK
select MFD_SYSCON
help
Say y here to support the Qualcomm Hardware Mutex functionality, which
@@ -32,8 +29,8 @@ config HWSPINLOCK_QCOM
config HWSPINLOCK_SIRF
tristate "SIRF Hardware Spinlock device"
+ depends on HWSPINLOCK
depends on ARCH_SIRF
- select HWSPINLOCK
help
Say y here to support the SIRF Hardware Spinlock device, which
provides a synchronisation mechanism for the various processors
@@ -42,15 +39,22 @@ config HWSPINLOCK_SIRF
It's safe to say n here if you're not interested in SIRF hardware
spinlock or just want a bare minimum kernel.
+config HWSPINLOCK_SPRD
+ tristate "SPRD Hardware Spinlock device"
+ depends on ARCH_SPRD
+ depends on HWSPINLOCK
+ help
+ Say y here to support the SPRD Hardware Spinlock device.
+
+ If unsure, say N.
+
config HSEM_U8500
tristate "STE Hardware Semaphore functionality"
+ depends on HWSPINLOCK
depends on ARCH_U8500
- select HWSPINLOCK
help
Say y here to support the STE Hardware Semaphore functionality, which
provides a synchronisation mechanism for the various processor on the
SoC.
If unsure, say N.
-
-endmenu
diff --git a/drivers/hwspinlock/Makefile b/drivers/hwspinlock/Makefile
index 6b59cb5a4f3a..14928aa7cc5a 100644
--- a/drivers/hwspinlock/Makefile
+++ b/drivers/hwspinlock/Makefile
@@ -6,4 +6,5 @@ obj-$(CONFIG_HWSPINLOCK) += hwspinlock_core.o
obj-$(CONFIG_HWSPINLOCK_OMAP) += omap_hwspinlock.o
obj-$(CONFIG_HWSPINLOCK_QCOM) += qcom_hwspinlock.o
obj-$(CONFIG_HWSPINLOCK_SIRF) += sirf_hwspinlock.o
+obj-$(CONFIG_HWSPINLOCK_SPRD) += sprd_hwspinlock.o
obj-$(CONFIG_HSEM_U8500) += u8500_hsem.o
diff --git a/drivers/hwspinlock/sprd_hwspinlock.c b/drivers/hwspinlock/sprd_hwspinlock.c
new file mode 100644
index 000000000000..638e64ac18f5
--- /dev/null
+++ b/drivers/hwspinlock/sprd_hwspinlock.c
@@ -0,0 +1,183 @@
+/*
+ * Spreadtrum hardware spinlock driver
+ * Copyright (C) 2017 Spreadtrum - http://www.spreadtrum.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/hwspinlock.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include "hwspinlock_internal.h"
+
+/* hwspinlock registers definition */
+#define HWSPINLOCK_RECCTRL 0x4
+#define HWSPINLOCK_MASTERID(_X_) (0x80 + 0x4 * (_X_))
+#define HWSPINLOCK_TOKEN(_X_) (0x800 + 0x4 * (_X_))
+
+/* unlocked value */
+#define HWSPINLOCK_NOTTAKEN 0x55aa10c5
+/* bits definition of RECCTRL reg */
+#define HWSPINLOCK_USER_BITS 0x1
+
+/* hwspinlock number */
+#define SPRD_HWLOCKS_NUM 32
+
+struct sprd_hwspinlock_dev {
+ void __iomem *base;
+ struct clk *clk;
+ struct hwspinlock_device bank;
+};
+
+/* try to lock the hardware spinlock */
+static int sprd_hwspinlock_trylock(struct hwspinlock *lock)
+{
+ struct sprd_hwspinlock_dev *sprd_hwlock =
+ dev_get_drvdata(lock->bank->dev);
+ void __iomem *addr = lock->priv;
+ int user_id, lock_id;
+
+ if (!readl(addr))
+ return 1;
+
+ lock_id = hwlock_to_id(lock);
+ /* get the hardware spinlock master/user id */
+ user_id = readl(sprd_hwlock->base + HWSPINLOCK_MASTERID(lock_id));
+ dev_warn(sprd_hwlock->bank.dev,
+ "hwspinlock [%d] lock failed and master/user id = %d!\n",
+ lock_id, user_id);
+ return 0;
+}
+
+/* unlock the hardware spinlock */
+static void sprd_hwspinlock_unlock(struct hwspinlock *lock)
+{
+ void __iomem *lock_addr = lock->priv;
+
+ writel(HWSPINLOCK_NOTTAKEN, lock_addr);
+}
+
+/* The specs recommended below number as the retry delay time */
+static void sprd_hwspinlock_relax(struct hwspinlock *lock)
+{
+ ndelay(10);
+}
+
+static const struct hwspinlock_ops sprd_hwspinlock_ops = {
+ .trylock = sprd_hwspinlock_trylock,
+ .unlock = sprd_hwspinlock_unlock,
+ .relax = sprd_hwspinlock_relax,
+};
+
+static int sprd_hwspinlock_probe(struct platform_device *pdev)
+{
+ struct sprd_hwspinlock_dev *sprd_hwlock;
+ struct hwspinlock *lock;
+ struct resource *res;
+ int i, ret;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ sprd_hwlock = devm_kzalloc(&pdev->dev,
+ sizeof(struct sprd_hwspinlock_dev) +
+ SPRD_HWLOCKS_NUM * sizeof(*lock),
+ GFP_KERNEL);
+ if (!sprd_hwlock)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sprd_hwlock->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sprd_hwlock->base))
+ return PTR_ERR(sprd_hwlock->base);
+
+ sprd_hwlock->clk = devm_clk_get(&pdev->dev, "enable");
+ if (IS_ERR(sprd_hwlock->clk)) {
+ dev_err(&pdev->dev, "get hwspinlock clock failed!\n");
+ return PTR_ERR(sprd_hwlock->clk);
+ }
+
+ clk_prepare_enable(sprd_hwlock->clk);
+
+ /* set the hwspinlock to record user id to identify subsystems */
+ writel(HWSPINLOCK_USER_BITS, sprd_hwlock->base + HWSPINLOCK_RECCTRL);
+
+ for (i = 0; i < SPRD_HWLOCKS_NUM; i++) {
+ lock = &sprd_hwlock->bank.lock[i];
+ lock->priv = sprd_hwlock->base + HWSPINLOCK_TOKEN(i);
+ }
+
+ platform_set_drvdata(pdev, sprd_hwlock);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = hwspin_lock_register(&sprd_hwlock->bank, &pdev->dev,
+ &sprd_hwspinlock_ops, 0, SPRD_HWLOCKS_NUM);
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
+ clk_disable_unprepare(sprd_hwlock->clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sprd_hwspinlock_remove(struct platform_device *pdev)
+{
+ struct sprd_hwspinlock_dev *sprd_hwlock = platform_get_drvdata(pdev);
+
+ hwspin_lock_unregister(&sprd_hwlock->bank);
+ pm_runtime_disable(&pdev->dev);
+ clk_disable_unprepare(sprd_hwlock->clk);
+ return 0;
+}
+
+static const struct of_device_id sprd_hwspinlock_of_match[] = {
+ { .compatible = "sprd,hwspinlock-r3p0", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sprd_hwspinlock_of_match);
+
+static struct platform_driver sprd_hwspinlock_driver = {
+ .probe = sprd_hwspinlock_probe,
+ .remove = sprd_hwspinlock_remove,
+ .driver = {
+ .name = "sprd_hwspinlock",
+ .of_match_table = of_match_ptr(sprd_hwspinlock_of_match),
+ },
+};
+
+static int __init sprd_hwspinlock_init(void)
+{
+ return platform_driver_register(&sprd_hwspinlock_driver);
+}
+postcore_initcall(sprd_hwspinlock_init);
+
+static void __exit sprd_hwspinlock_exit(void)
+{
+ platform_driver_unregister(&sprd_hwspinlock_driver);
+}
+module_exit(sprd_hwspinlock_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Hardware spinlock driver for Spreadtrum");
+MODULE_AUTHOR("Baolin Wang <[email protected]>");
+MODULE_AUTHOR("Lanqing Liu <[email protected]>");
+MODULE_AUTHOR("Long Cheng <[email protected]>");
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 631eaa9daf65..a5dfab6adf49 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -376,7 +376,8 @@ static void ib_policy_change_task(struct work_struct *work)
WARN_ONCE(ret,
"ib_get_cached_subnet_prefix err: %d, this should never happen here\n",
ret);
- ib_security_cache_change(dev, i, sp);
+ if (!ret)
+ ib_security_cache_change(dev, i, sp);
}
}
up_read(&lists_rwsem);
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index 3e8c38953912..70ad19c4c73e 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -120,21 +120,25 @@ static int check_qp_port_pkey_settings(struct ib_ports_pkeys *pps,
return 0;
if (pps->main.state != IB_PORT_PKEY_NOT_VALID) {
- get_pkey_and_subnet_prefix(&pps->main,
- &pkey,
- &subnet_prefix);
+ ret = get_pkey_and_subnet_prefix(&pps->main,
+ &pkey,
+ &subnet_prefix);
+ if (ret)
+ return ret;
ret = enforce_qp_pkey_security(pkey,
subnet_prefix,
sec);
+ if (ret)
+ return ret;
}
- if (ret)
- return ret;
if (pps->alt.state != IB_PORT_PKEY_NOT_VALID) {
- get_pkey_and_subnet_prefix(&pps->alt,
- &pkey,
- &subnet_prefix);
+ ret = get_pkey_and_subnet_prefix(&pps->alt,
+ &pkey,
+ &subnet_prefix);
+ if (ret)
+ return ret;
ret = enforce_qp_pkey_security(pkey,
subnet_prefix,
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 0ad3b05405d8..8ba9bfb073d1 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1935,6 +1935,11 @@ static int modify_qp(struct ib_uverbs_file *file,
goto out;
}
+ if (!rdma_is_port_valid(qp->device, cmd->base.port_num)) {
+ ret = -EINVAL;
+ goto release_qp;
+ }
+
attr->qp_state = cmd->base.qp_state;
attr->cur_qp_state = cmd->base.cur_qp_state;
attr->path_mtu = cmd->base.path_mtu;
@@ -2548,6 +2553,9 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
+ if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num))
+ return -EINVAL;
+
INIT_UDATA(&udata, buf + sizeof(cmd),
(unsigned long)cmd.response + sizeof(resp),
in_len - sizeof(cmd), out_len - sizeof(resp));
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 90e7b77d68e8..2d19f9bb434d 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1779,7 +1779,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
ibdev->alloc_hw_stats = alloc_hw_stats;
ibdev->get_hw_stats = get_hw_stats;
ibdev->alloc_rdma_netdev = hfi1_vnic_alloc_rn;
- ibdev->free_rdma_netdev = hfi1_vnic_free_rn;
/* keep process mad in the driver */
ibdev->process_mad = hfi1_process_mad;
diff --git a/drivers/infiniband/hw/hfi1/vnic.h b/drivers/infiniband/hw/hfi1/vnic.h
index e2c455299b53..4a621cde4abb 100644
--- a/drivers/infiniband/hw/hfi1/vnic.h
+++ b/drivers/infiniband/hw/hfi1/vnic.h
@@ -176,7 +176,6 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
const char *name,
unsigned char name_assign_type,
void (*setup)(struct net_device *));
-void hfi1_vnic_free_rn(struct net_device *netdev);
int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
struct hfi1_vnic_vport_info *vinfo,
struct sk_buff *skb, u64 pbc, u8 plen);
diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
index b601c2929f8f..339f0cdd56d6 100644
--- a/drivers/infiniband/hw/hfi1/vnic_main.c
+++ b/drivers/infiniband/hw/hfi1/vnic_main.c
@@ -833,6 +833,15 @@ static const struct net_device_ops hfi1_netdev_ops = {
.ndo_get_stats64 = hfi1_vnic_get_stats64,
};
+static void hfi1_vnic_free_rn(struct net_device *netdev)
+{
+ struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
+
+ hfi1_vnic_deinit(vinfo);
+ mutex_destroy(&vinfo->lock);
+ free_netdev(netdev);
+}
+
struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
u8 port_num,
enum rdma_netdev_t type,
@@ -864,6 +873,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
vinfo->num_tx_q = dd->chip_sdma_engines;
vinfo->num_rx_q = HFI1_NUM_VNIC_CTXT;
vinfo->netdev = netdev;
+ rn->free_rdma_netdev = hfi1_vnic_free_rn;
rn->set_id = hfi1_vnic_set_vesw_id;
netdev->features = NETIF_F_HIGHDMA | NETIF_F_SG;
@@ -892,12 +902,3 @@ init_fail:
free_netdev(netdev);
return ERR_PTR(rc);
}
-
-void hfi1_vnic_free_rn(struct net_device *netdev)
-{
- struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
-
- hfi1_vnic_deinit(vinfo);
- mutex_destroy(&vinfo->lock);
- free_netdev(netdev);
-}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index dc2f59e33971..a7f2e60085c4 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -3528,6 +3528,11 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
return num_counters;
}
+static void mlx5_ib_free_rdma_netdev(struct net_device *netdev)
+{
+ return mlx5_rdma_netdev_free(netdev);
+}
+
static struct net_device*
mlx5_ib_alloc_rdma_netdev(struct ib_device *hca,
u8 port_num,
@@ -3536,16 +3541,19 @@ mlx5_ib_alloc_rdma_netdev(struct ib_device *hca,
unsigned char name_assign_type,
void (*setup)(struct net_device *))
{
+ struct net_device *netdev;
+ struct rdma_netdev *rn;
+
if (type != RDMA_NETDEV_IPOIB)
return ERR_PTR(-EOPNOTSUPP);
- return mlx5_rdma_netdev_alloc(to_mdev(hca)->mdev, hca,
- name, setup);
-}
-
-static void mlx5_ib_free_rdma_netdev(struct net_device *netdev)
-{
- return mlx5_rdma_netdev_free(netdev);
+ netdev = mlx5_rdma_netdev_alloc(to_mdev(hca)->mdev, hca,
+ name, setup);
+ if (likely(!IS_ERR_OR_NULL(netdev))) {
+ rn = netdev_priv(netdev);
+ rn->free_rdma_netdev = mlx5_ib_free_rdma_netdev;
+ }
+ return netdev;
}
static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
@@ -3678,10 +3686,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
dev->ib_dev.get_port_immutable = mlx5_port_immutable;
dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
- if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) {
+ if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads))
dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
- dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev;
- }
+
if (mlx5_core_is_pf(mdev)) {
dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index d129625af0a7..6e86eeee370e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1893,6 +1893,7 @@ static struct net_device
rn->send = ipoib_send;
rn->attach_mcast = ipoib_mcast_attach;
rn->detach_mcast = ipoib_mcast_detach;
+ rn->free_rdma_netdev = free_netdev;
rn->hca = hca;
dev->netdev_ops = &ipoib_netdev_default_pf;
@@ -2288,6 +2289,8 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
return;
list_for_each_entry_safe(priv, tmp, dev_list, list) {
+ struct rdma_netdev *rn = netdev_priv(priv->dev);
+
ib_unregister_event_handler(&priv->event_handler);
flush_workqueue(ipoib_workqueue);
@@ -2304,10 +2307,7 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
flush_workqueue(priv->wq);
unregister_netdev(priv->dev);
- if (device->free_rdma_netdev)
- device->free_rdma_netdev(priv->dev);
- else
- free_netdev(priv->dev);
+ rn->free_rdma_netdev(priv->dev);
list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list)
kfree(cpriv);
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c
index d66540e24885..62390e9e0023 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c
@@ -146,15 +146,15 @@ static void vnic_get_ethtool_stats(struct net_device *netdev,
int i;
memset(&vstats, 0, sizeof(vstats));
- mutex_lock(&adapter->stats_lock);
+ spin_lock(&adapter->stats_lock);
adapter->rn_ops->ndo_get_stats64(netdev, &vstats.netstats);
+ spin_unlock(&adapter->stats_lock);
for (i = 0; i < VNIC_STATS_LEN; i++) {
char *p = (char *)&vstats + vnic_gstrings_stats[i].stat_offset;
data[i] = (vnic_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- mutex_unlock(&adapter->stats_lock);
}
/* vnic_get_strings - get strings */
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
index 6bba886bec1f..ca29e6d5aedc 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h
@@ -214,7 +214,7 @@ struct opa_vnic_adapter {
struct mutex mactbl_lock;
/* Lock used to protect access to vnic counters */
- struct mutex stats_lock;
+ spinlock_t stats_lock;
u8 flow_tbl[OPA_VNIC_FLOW_TBL_SIZE];
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
index fcf75323d62a..1a3c25364b64 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
@@ -69,9 +69,9 @@ static void opa_vnic_get_stats64(struct net_device *netdev,
struct opa_vnic_stats vstats;
memset(&vstats, 0, sizeof(vstats));
- mutex_lock(&adapter->stats_lock);
+ spin_lock(&adapter->stats_lock);
adapter->rn_ops->ndo_get_stats64(netdev, &vstats.netstats);
- mutex_unlock(&adapter->stats_lock);
+ spin_unlock(&adapter->stats_lock);
memcpy(stats, &vstats.netstats, sizeof(*stats));
}
@@ -323,13 +323,13 @@ struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev,
else if (IS_ERR(netdev))
return ERR_CAST(netdev);
+ rn = netdev_priv(netdev);
adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
if (!adapter) {
rc = -ENOMEM;
goto adapter_err;
}
- rn = netdev_priv(netdev);
rn->clnt_priv = adapter;
rn->hca = ibdev;
rn->port_num = port_num;
@@ -344,7 +344,7 @@ struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev,
netdev->hard_header_len += OPA_VNIC_SKB_HEADROOM;
mutex_init(&adapter->lock);
mutex_init(&adapter->mactbl_lock);
- mutex_init(&adapter->stats_lock);
+ spin_lock_init(&adapter->stats_lock);
SET_NETDEV_DEV(netdev, ibdev->dev.parent);
@@ -364,10 +364,9 @@ struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev,
netdev_err:
mutex_destroy(&adapter->lock);
mutex_destroy(&adapter->mactbl_lock);
- mutex_destroy(&adapter->stats_lock);
kfree(adapter);
adapter_err:
- ibdev->free_rdma_netdev(netdev);
+ rn->free_rdma_netdev(netdev);
return ERR_PTR(rc);
}
@@ -376,14 +375,13 @@ adapter_err:
void opa_vnic_rem_netdev(struct opa_vnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- struct ib_device *ibdev = adapter->ibdev;
+ struct rdma_netdev *rn = netdev_priv(netdev);
v_info("removing\n");
unregister_netdev(netdev);
opa_vnic_release_mac_tbl(adapter);
mutex_destroy(&adapter->lock);
mutex_destroy(&adapter->mactbl_lock);
- mutex_destroy(&adapter->stats_lock);
kfree(adapter);
- ibdev->free_rdma_netdev(netdev);
+ rn->free_rdma_netdev(netdev);
}
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
index 875694f9a7f9..cf768dd78d1b 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
@@ -794,7 +794,7 @@ void opa_vnic_vema_send_trap(struct opa_vnic_adapter *adapter,
send_buf = ib_create_send_mad(port->mad_agent, 1, pkey_idx, 0,
IB_MGMT_VENDOR_HDR, IB_MGMT_MAD_DATA,
- GFP_KERNEL, OPA_MGMT_BASE_VERSION);
+ GFP_ATOMIC, OPA_MGMT_BASE_VERSION);
if (IS_ERR(send_buf)) {
c_err("%s:Couldn't allocate send buf\n", __func__);
goto err_sndbuf;
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c
index a51bf977f4d6..c2733964379c 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c
@@ -89,9 +89,9 @@ void opa_vnic_get_summary_counters(struct opa_vnic_adapter *adapter,
u64 *src;
memset(&vstats, 0, sizeof(vstats));
- mutex_lock(&adapter->stats_lock);
+ spin_lock(&adapter->stats_lock);
adapter->rn_ops->ndo_get_stats64(adapter->netdev, &vstats.netstats);
- mutex_unlock(&adapter->stats_lock);
+ spin_unlock(&adapter->stats_lock);
cntrs->vp_instance = cpu_to_be16(adapter->vport_num);
cntrs->vesw_id = cpu_to_be16(adapter->info.vesw.vesw_id);
@@ -128,9 +128,9 @@ void opa_vnic_get_error_counters(struct opa_vnic_adapter *adapter,
struct opa_vnic_stats vstats;
memset(&vstats, 0, sizeof(vstats));
- mutex_lock(&adapter->stats_lock);
+ spin_lock(&adapter->stats_lock);
adapter->rn_ops->ndo_get_stats64(adapter->netdev, &vstats.netstats);
- mutex_unlock(&adapter->stats_lock);
+ spin_unlock(&adapter->stats_lock);
cntrs->vp_instance = cpu_to_be16(adapter->vport_num);
cntrs->vesw_id = cpu_to_be16(adapter->info.vesw.vesw_id);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 067d648028a2..7e6842bd525c 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -481,7 +481,7 @@ EXPORT_SYMBOL(input_inject_event);
void input_alloc_absinfo(struct input_dev *dev)
{
if (!dev->absinfo)
- dev->absinfo = kcalloc(ABS_CNT, sizeof(struct input_absinfo),
+ dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo),
GFP_KERNEL);
WARN(!dev->absinfo, "%s(): kcalloc() failed?\n", __func__);
@@ -1126,7 +1126,7 @@ static void input_seq_print_bitmap(struct seq_file *seq, const char *name,
* If no output was produced print a single 0.
*/
if (skip_empty)
- seq_puts(seq, "0");
+ seq_putc(seq, '0');
seq_putc(seq, '\n');
}
@@ -1144,7 +1144,7 @@ static int input_devices_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "P: Phys=%s\n", dev->phys ? dev->phys : "");
seq_printf(seq, "S: Sysfs=%s\n", path ? path : "");
seq_printf(seq, "U: Uniq=%s\n", dev->uniq ? dev->uniq : "");
- seq_printf(seq, "H: Handlers=");
+ seq_puts(seq, "H: Handlers=");
list_for_each_entry(handle, &dev->h_list, d_node)
seq_printf(seq, "%s ", handle->name);
@@ -1783,7 +1783,7 @@ struct input_dev *input_allocate_device(void)
static atomic_t input_no = ATOMIC_INIT(-1);
struct input_dev *dev;
- dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev) {
dev->dev.type = &input_dev_type;
dev->dev.class = &input_class;
@@ -1849,7 +1849,7 @@ struct input_dev *devm_input_allocate_device(struct device *dev)
struct input_devres *devres;
devres = devres_alloc(devm_input_device_release,
- sizeof(struct input_devres), GFP_KERNEL);
+ sizeof(*devres), GFP_KERNEL);
if (!devres)
return NULL;
@@ -2099,7 +2099,7 @@ int input_register_device(struct input_dev *dev)
if (dev->devres_managed) {
devres = devres_alloc(devm_input_device_unregister,
- sizeof(struct input_devres), GFP_KERNEL);
+ sizeof(*devres), GFP_KERNEL);
if (!devres)
return -ENOMEM;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index def96cd2479b..298a6ba51411 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -126,13 +126,18 @@ static const struct xpad_device {
u8 mapping;
u8 xtype;
} xpad_device[] = {
+ { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
+ { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
+ { 0x044f, 0x0f10, "Thrustmaster Modena GT Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 },
{ 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX },
{ 0x045e, 0x0285, "Microsoft X-Box pad (Japan)", 0, XTYPE_XBOX },
{ 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX },
+ { 0x045e, 0x0288, "Microsoft Xbox Controller S v2", 0, XTYPE_XBOX },
{ 0x045e, 0x0289, "Microsoft X-Box pad v2 (US)", 0, XTYPE_XBOX },
{ 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 },
+ { 0x045e, 0x028f, "Microsoft X-Box 360 pad v2", 0, XTYPE_XBOX360 },
{ 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE },
@@ -145,28 +150,52 @@ static const struct xpad_device {
{ 0x046d, 0xc242, "Logitech Chillstream Controller", 0, XTYPE_XBOX360 },
{ 0x046d, 0xca84, "Logitech Xbox Cordless Controller", 0, XTYPE_XBOX },
{ 0x046d, 0xca88, "Logitech Compact Controller for Xbox", 0, XTYPE_XBOX },
+ { 0x046d, 0xca8a, "Logitech Precision Vibration Feedback Wheel", 0, XTYPE_XBOX },
+ { 0x046d, 0xcaa3, "Logitech DriveFx Racing Wheel", 0, XTYPE_XBOX360 },
{ 0x056e, 0x2004, "Elecom JC-U3613M", 0, XTYPE_XBOX360 },
{ 0x05fd, 0x1007, "Mad Catz Controller (unverified)", 0, XTYPE_XBOX },
{ 0x05fd, 0x107a, "InterAct 'PowerPad Pro' X-Box pad (Germany)", 0, XTYPE_XBOX },
+ { 0x05fe, 0x3030, "Chic Controller", 0, XTYPE_XBOX },
+ { 0x05fe, 0x3031, "Chic Controller", 0, XTYPE_XBOX },
+ { 0x062a, 0x0020, "Logic3 Xbox GamePad", 0, XTYPE_XBOX },
+ { 0x062a, 0x0033, "Competition Pro Steering Wheel", 0, XTYPE_XBOX },
+ { 0x06a3, 0x0200, "Saitek Racing Wheel", 0, XTYPE_XBOX },
+ { 0x06a3, 0x0201, "Saitek Adrenalin", 0, XTYPE_XBOX },
+ { 0x06a3, 0xf51a, "Saitek P3600", 0, XTYPE_XBOX360 },
+ { 0x0738, 0x4506, "Mad Catz 4506 Wireless Controller", 0, XTYPE_XBOX },
{ 0x0738, 0x4516, "Mad Catz Control Pad", 0, XTYPE_XBOX },
+ { 0x0738, 0x4520, "Mad Catz Control Pad Pro", 0, XTYPE_XBOX },
{ 0x0738, 0x4522, "Mad Catz LumiCON", 0, XTYPE_XBOX },
{ 0x0738, 0x4526, "Mad Catz Control Pad Pro", 0, XTYPE_XBOX },
+ { 0x0738, 0x4530, "Mad Catz Universal MC2 Racing Wheel and Pedals", 0, XTYPE_XBOX },
{ 0x0738, 0x4536, "Mad Catz MicroCON", 0, XTYPE_XBOX },
{ 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX },
+ { 0x0738, 0x4586, "Mad Catz MicroCon Wireless Controller", 0, XTYPE_XBOX },
+ { 0x0738, 0x4588, "Mad Catz Blaster", 0, XTYPE_XBOX },
+ { 0x0738, 0x45ff, "Mad Catz Beat Pad (w/ Handle)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
{ 0x0738, 0x4718, "Mad Catz Street Fighter IV FightStick SE", 0, XTYPE_XBOX360 },
{ 0x0738, 0x4726, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 },
{ 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0738, 0x4736, "Mad Catz MicroCon Gamepad", 0, XTYPE_XBOX360 },
{ 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0738, 0x4740, "Mad Catz Beat Pad", 0, XTYPE_XBOX360 },
+ { 0x0738, 0x4743, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
+ { 0x0738, 0x4758, "Mad Catz Arcade Game Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0738, 0x4a01, "Mad Catz FightStick TE 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
+ { 0x0738, 0x9871, "Mad Catz Portable Drum", 0, XTYPE_XBOX360 },
{ 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 },
+ { 0x0738, 0xb738, "Mad Catz MVC2TE Stick 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
{ 0x0738, 0xcb02, "Saitek Cyborg Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0738, 0xcb03, "Saitek P3200 Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x0738, 0xcb29, "Saitek Aviator Stick AV8R02", 0, XTYPE_XBOX360 },
{ 0x0738, 0xf738, "Super SFIV FightStick TE S", 0, XTYPE_XBOX360 },
+ { 0x07ff, 0xffff, "Mad Catz GamePad", 0, XTYPE_XBOX360 },
+ { 0x0c12, 0x0005, "Intec wireless", 0, XTYPE_XBOX },
+ { 0x0c12, 0x8801, "Nyko Xbox Controller", 0, XTYPE_XBOX },
{ 0x0c12, 0x8802, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
{ 0x0c12, 0x8809, "RedOctane Xbox Dance Pad", DANCEPAD_MAP_CONFIG, XTYPE_XBOX },
{ 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX },
@@ -174,37 +203,63 @@ static const struct xpad_device {
{ 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX },
{ 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX },
+ { 0x0e4c, 0x1103, "Radica Gamester Reflex", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX },
{ 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX },
+ { 0x0e4c, 0x3510, "Radica Gamester", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0005, "Eclipse wireless Controller", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0006, "Edge wireless Controller", 0, XTYPE_XBOX },
+ { 0x0e6f, 0x0008, "After Glow Pro Controller", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0e6f, 0x0113, "Afterglow AX.1 Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x011f, "Rock Candy Gamepad Wired Controller", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0131, "PDP EA Sports Controller", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0133, "Xbox 360 Wired Controller", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0139, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x013a, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x0147, "PDP Marvel Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x015c, "PDP Xbox One Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
+ { 0x0e6f, 0x0161, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x0162, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x0163, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x0164, "PDP Battlefield One", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x0165, "PDP Titanfall 2", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0201, "Pelican PL-3601 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0413, "Afterglow AX.1 Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0501, "PDP Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0xf900, "PDP Afterglow AX.1", 0, XTYPE_XBOX360 },
{ 0x0e8f, 0x0201, "SmartJoy Frag Xpad/PS2 adaptor", 0, XTYPE_XBOX },
{ 0x0e8f, 0x3008, "Generic xbox control (dealextreme)", 0, XTYPE_XBOX },
{ 0x0f0d, 0x000a, "Hori Co. DOA4 FightStick", 0, XTYPE_XBOX360 },
+ { 0x0f0d, 0x000c, "Hori PadEX Turbo", 0, XTYPE_XBOX360 },
{ 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0f0d, 0x001b, "Hori Real Arcade Pro VX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0f0d, 0x0063, "Hori Real Arcade Pro Hayabusa (USA) Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x0f0d, 0x0067, "HORIPAD ONE", 0, XTYPE_XBOXONE },
+ { 0x0f0d, 0x0078, "Hori Real Arcade Pro V Kai Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
+ { 0x0f30, 0x010b, "Philips Recoil", 0, XTYPE_XBOX },
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
{ 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
+ { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
{ 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
+ { 0x12ab, 0x0303, "Mortal Kombat Klassic FightStick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", 0, XTYPE_XBOX360 },
{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
+ { 0x1430, 0xf801, "RedOctane Controller", 0, XTYPE_XBOX360 },
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
{ 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
+ { 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
{ 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
{ 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
@@ -215,22 +270,44 @@ static const struct xpad_device {
{ 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0x0130, "Ion Drum Rocker", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xf018, "Mad Catz Street Fighter IV SE Fighting Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1bad, 0xf019, "Mad Catz Brawlstick for Xbox 360", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1bad, 0xf021, "Mad Cats Ghost Recon FS GamePad", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xf023, "MLG Pro Circuit Controller (Xbox)", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf025, "Mad Catz Call Of Duty", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf027, "Mad Catz FPS Pro", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xf028, "Street Fighter IV FightPad", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xf02e, "Mad Catz Fightpad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf030, "Mad Catz Xbox 360 MC2 MicroCon Racing Wheel", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf036, "Mad Catz MicroCon GamePad Pro", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xf038, "Street Fighter IV FightStick TE", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf039, "Mad Catz MvC2 TE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1bad, 0xf03a, "Mad Catz SFxT Fightstick Pro", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf03d, "Street Fighter IV Arcade Stick TE - Chun Li", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf03e, "Mad Catz MLG FightStick TE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf03f, "Mad Catz FightStick SoulCaliber", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf042, "Mad Catz FightStick TES+", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf080, "Mad Catz FightStick TE2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf501, "HoriPad EX2 Turbo", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf502, "Hori Real Arcade Pro.VX SA", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf503, "Hori Fighting Stick VX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf504, "Hori Real Arcade Pro. EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf505, "Hori Fighting Stick EX2B", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf506, "Hori Real Arcade Pro.EX Premium VLX", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xf900, "Harmonix Xbox 360 Controller", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xf901, "Gamestop Xbox 360 Controller", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xf903, "Tron Xbox 360 controller", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf904, "PDP Versus Fighting Pad", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf906, "MortalKombat FightStick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x530a, "Xbox 360 Pro EX Controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x531a, "PowerA Pro Ex", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5397, "FUS1ON Tournament Controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x541a, "PowerA Xbox One Mini Wired Controller", 0, XTYPE_XBOXONE },
@@ -238,12 +315,19 @@ static const struct xpad_device {
{ 0x24c6, 0x543a, "PowerA Xbox One wired controller", 0, XTYPE_XBOXONE },
{ 0x24c6, 0x5500, "Hori XBOX 360 EX 2 with Turbo", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5501, "Hori Real Arcade Pro VX-SA", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x5502, "Hori Fighting Stick VX Alt", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5503, "Hori Fighting Edge", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5506, "Hori SOULCALIBUR V Stick", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x550d, "Hori GEM Xbox controller", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x550e, "Hori Real Arcade Pro V Kai 360", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x24c6, 0x551a, "PowerA FUSION Pro Controller", 0, XTYPE_XBOXONE },
+ { 0x24c6, 0x561a, "PowerA FUSION Controller", 0, XTYPE_XBOXONE },
+ { 0x24c6, 0x5b00, "ThrustMaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5b02, "Thrustmaster, Inc. GPX Controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0xfafe, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
{ 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
};
@@ -331,13 +415,16 @@ static struct usb_device_id xpad_table[] = {
XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft X-Box One controllers */
XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
XPAD_XBOX360_VENDOR(0x056e), /* Elecom JC-U3613M */
+ XPAD_XBOX360_VENDOR(0x06a3), /* Saitek P3600 */
XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
{ USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */
+ XPAD_XBOX360_VENDOR(0x07ff), /* Mad Catz GamePad */
XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */
XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
+ XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */
XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 97acd6524ad7..4c4ab1ced235 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -178,6 +178,17 @@ config KEYBOARD_CLPS711X
To compile this driver as a module, choose M here: the
module will be called clps711x-keypad.
+config KEYBOARD_DLINK_DIR685
+ tristate "D-Link DIR-685 touchkeys support"
+ depends on I2C
+ default ARCH_GEMINI
+ help
+ If you say yes here you get support for the D-Link DIR-685
+ touchkeys.
+
+ To compile this driver as a module, choose M here: the
+ module will be called dlink-dir685-touchkeys.
+
config KEYBOARD_LKKBD
tristate "DECstation/VAXstation LK201/LK401 keyboard"
select SERIO
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 7d9acff819a7..d2338bacdad1 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_KEYBOARD_CAP11XX) += cap11xx.o
obj-$(CONFIG_KEYBOARD_CLPS711X) += clps711x-keypad.o
obj-$(CONFIG_KEYBOARD_CROS_EC) += cros_ec_keyb.o
obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o
+obj-$(CONFIG_KEYBOARD_DLINK_DIR685) += dlink-dir685-touchkeys.o
obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
obj-$(CONFIG_KEYBOARD_GOLDFISH_EVENTS) += goldfish_events.o
obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 53fe9a3fb620..f9d273c8b306 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -20,7 +20,7 @@
#include <linux/gpio.h>
#include <linux/slab.h>
-#include <linux/i2c/adp5588.h>
+#include <linux/platform_data/adp5588.h>
/* Key Event Register xy */
#define KEY_EV_PRESSED (1 << 7)
diff --git a/drivers/input/keyboard/dlink-dir685-touchkeys.c b/drivers/input/keyboard/dlink-dir685-touchkeys.c
new file mode 100644
index 000000000000..88e321b76397
--- /dev/null
+++ b/drivers/input/keyboard/dlink-dir685-touchkeys.c
@@ -0,0 +1,155 @@
+/*
+ * D-Link DIR-685 router I2C-based Touchkeys input driver
+ * Copyright (C) 2017 Linus Walleij <[email protected]>
+ *
+ * This is a one-off touchkey controller based on the Cypress Semiconductor
+ * CY8C214 MCU with some firmware in its internal 8KB flash. The circuit
+ * board inside the router is named E119921
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+
+struct dir685_touchkeys {
+ struct device *dev;
+ struct i2c_client *client;
+ struct input_dev *input;
+ unsigned long cur_key;
+ u16 codes[7];
+};
+
+static irqreturn_t dir685_tk_irq_thread(int irq, void *data)
+{
+ struct dir685_touchkeys *tk = data;
+ const int num_bits = min_t(int, ARRAY_SIZE(tk->codes), 16);
+ unsigned long changed;
+ u8 buf[6];
+ unsigned long key;
+ int i;
+ int err;
+
+ memset(buf, 0, sizeof(buf));
+ err = i2c_master_recv(tk->client, buf, sizeof(buf));
+ if (err != sizeof(buf)) {
+ dev_err(tk->dev, "short read %d\n", err);
+ return IRQ_HANDLED;
+ }
+
+ dev_dbg(tk->dev, "IN: %*ph\n", (int)sizeof(buf), buf);
+ key = be16_to_cpup((__be16 *) &buf[4]);
+
+ /* Figure out if any bits went high or low since last message */
+ changed = tk->cur_key ^ key;
+ for_each_set_bit(i, &changed, num_bits) {
+ dev_dbg(tk->dev, "key %d is %s\n", i,
+ test_bit(i, &key) ? "down" : "up");
+ input_report_key(tk->input, tk->codes[i], test_bit(i, &key));
+ }
+
+ /* Store currently down keys */
+ tk->cur_key = key;
+ input_sync(tk->input);
+
+ return IRQ_HANDLED;
+}
+
+static int dir685_tk_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct dir685_touchkeys *tk;
+ struct device *dev = &client->dev;
+ u8 bl_data[] = { 0xa7, 0x40 };
+ int err;
+ int i;
+
+ tk = devm_kzalloc(&client->dev, sizeof(*tk), GFP_KERNEL);
+ if (!tk)
+ return -ENOMEM;
+
+ tk->input = devm_input_allocate_device(dev);
+ if (!tk->input)
+ return -ENOMEM;
+
+ tk->client = client;
+ tk->dev = dev;
+
+ tk->input->keycodesize = sizeof(u16);
+ tk->input->keycodemax = ARRAY_SIZE(tk->codes);
+ tk->input->keycode = tk->codes;
+ tk->codes[0] = KEY_UP;
+ tk->codes[1] = KEY_DOWN;
+ tk->codes[2] = KEY_LEFT;
+ tk->codes[3] = KEY_RIGHT;
+ tk->codes[4] = KEY_ENTER;
+ tk->codes[5] = KEY_WPS_BUTTON;
+ /*
+ * This key appears in the vendor driver, but I have
+ * not been able to activate it.
+ */
+ tk->codes[6] = KEY_RESERVED;
+
+ __set_bit(EV_KEY, tk->input->evbit);
+ for (i = 0; i < ARRAY_SIZE(tk->codes); i++)
+ __set_bit(tk->codes[i], tk->input->keybit);
+ __clear_bit(KEY_RESERVED, tk->input->keybit);
+
+ tk->input->name = "D-Link DIR-685 touchkeys";
+ tk->input->id.bustype = BUS_I2C;
+
+ err = input_register_device(tk->input);
+ if (err)
+ return err;
+
+ /* Set the brightness to max level */
+ err = i2c_master_send(client, bl_data, sizeof(bl_data));
+ if (err != sizeof(bl_data))
+ dev_warn(tk->dev, "error setting brightness level\n");
+
+ if (!client->irq) {
+ dev_err(dev, "no IRQ on the I2C device\n");
+ return -ENODEV;
+ }
+ err = devm_request_threaded_irq(dev, client->irq,
+ NULL, dir685_tk_irq_thread,
+ IRQF_ONESHOT,
+ "dir685-tk", tk);
+ if (err) {
+ dev_err(dev, "can't request IRQ\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id dir685_tk_id[] = {
+ { "dir685tk", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, dir685_tk_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id dir685_tk_of_match[] = {
+ { .compatible = "dlink,dir685-touchkeys" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dir685_tk_of_match);
+#endif
+
+static struct i2c_driver dir685_tk_i2c_driver = {
+ .driver = {
+ .name = "dlin-dir685-touchkeys",
+ .of_match_table = of_match_ptr(dir685_tk_of_match),
+ },
+ .probe = dir685_tk_probe,
+ .id_table = dir685_tk_id,
+};
+module_i2c_driver(dir685_tk_i2c_driver);
+
+MODULE_AUTHOR("Linus Walleij <[email protected]>");
+MODULE_DESCRIPTION("D-Link DIR-685 touchkeys driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index 21bea52d4365..04a5d7e134d7 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -30,8 +30,8 @@
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/leds.h>
+#include <linux/platform_data/lm8323.h>
#include <linux/pm.h>
-#include <linux/i2c/lm8323.h>
#include <linux/slab.h>
/* Commands to send to the chip. */
diff --git a/drivers/input/keyboard/mcs_touchkey.c b/drivers/input/keyboard/mcs_touchkey.c
index 31090d71a685..be56d4f262a7 100644
--- a/drivers/input/keyboard/mcs_touchkey.c
+++ b/drivers/input/keyboard/mcs_touchkey.c
@@ -13,11 +13,11 @@
#include <linux/module.h>
#include <linux/i2c.h>
-#include <linux/i2c/mcs.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/irq.h>
#include <linux/slab.h>
+#include <linux/platform_data/mcs.h>
#include <linux/pm.h>
/* MCS5000 Touchkey */
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
index 400869e61a06..38c79ebff033 100644
--- a/drivers/input/misc/axp20x-pek.c
+++ b/drivers/input/misc/axp20x-pek.c
@@ -253,6 +253,9 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
return error;
}
+ if (axp20x_pek->axp20x->variant == AXP288_ID)
+ enable_irq_wake(axp20x_pek->irq_dbr);
+
return 0;
}
@@ -331,10 +334,35 @@ static int axp20x_pek_probe(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused axp20x_pek_resume_noirq(struct device *dev)
+{
+ struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev);
+
+ if (axp20x_pek->axp20x->variant != AXP288_ID)
+ return 0;
+
+ /*
+ * Clear interrupts from button presses during suspend, to avoid
+ * a wakeup power-button press getting reported to userspace.
+ */
+ regmap_write(axp20x_pek->axp20x->regmap,
+ AXP20X_IRQ1_STATE + AXP288_IRQ_POKN / 8,
+ BIT(AXP288_IRQ_POKN % 8));
+
+ return 0;
+}
+
+static const struct dev_pm_ops axp20x_pek_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .resume_noirq = axp20x_pek_resume_noirq,
+#endif
+};
+
static struct platform_driver axp20x_pek_driver = {
.probe = axp20x_pek_probe,
.driver = {
.name = "axp20x-pek",
+ .pm = &axp20x_pek_pm_ops,
},
};
module_platform_driver(axp20x_pek_driver);
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 690148f9940e..eb770613a9bd 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -135,14 +135,17 @@ static int xenkbd_probe(struct xenbus_device *dev,
goto error_nomem;
/* Set input abs params to match backend screen res */
- abs = xenbus_read_unsigned(dev->otherend, "feature-abs-pointer", 0);
- ptr_size[KPARAM_X] = xenbus_read_unsigned(dev->otherend, "width",
+ abs = xenbus_read_unsigned(dev->otherend,
+ XENKBD_FIELD_FEAT_ABS_POINTER, 0);
+ ptr_size[KPARAM_X] = xenbus_read_unsigned(dev->otherend,
+ XENKBD_FIELD_WIDTH,
ptr_size[KPARAM_X]);
- ptr_size[KPARAM_Y] = xenbus_read_unsigned(dev->otherend, "height",
+ ptr_size[KPARAM_Y] = xenbus_read_unsigned(dev->otherend,
+ XENKBD_FIELD_HEIGHT,
ptr_size[KPARAM_Y]);
if (abs) {
ret = xenbus_write(XBT_NIL, dev->nodename,
- "request-abs-pointer", "1");
+ XENKBD_FIELD_REQ_ABS_POINTER, "1");
if (ret) {
pr_warn("xenkbd: can't request abs-pointer\n");
abs = 0;
@@ -271,14 +274,15 @@ static int xenkbd_connect_backend(struct xenbus_device *dev,
xenbus_dev_fatal(dev, ret, "starting transaction");
goto error_irqh;
}
- ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
+ ret = xenbus_printf(xbt, dev->nodename, XENKBD_FIELD_RING_REF, "%lu",
virt_to_gfn(info->page));
if (ret)
goto error_xenbus;
- ret = xenbus_printf(xbt, dev->nodename, "page-gref", "%u", info->gref);
+ ret = xenbus_printf(xbt, dev->nodename, XENKBD_FIELD_RING_GREF,
+ "%u", info->gref);
if (ret)
goto error_xenbus;
- ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
+ ret = xenbus_printf(xbt, dev->nodename, XENKBD_FIELD_EVT_CHANNEL, "%u",
evtchn);
if (ret)
goto error_xenbus;
@@ -353,7 +357,7 @@ static void xenkbd_backend_changed(struct xenbus_device *dev,
}
static const struct xenbus_device_id xenkbd_ids[] = {
- { "vkbd" },
+ { XENKBD_DRIVER_NAME },
{ "" }
};
@@ -390,4 +394,4 @@ module_exit(xenkbd_cleanup);
MODULE_DESCRIPTION("Xen virtual keyboard/pointer device frontend");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("xen:vkbd");
+MODULE_ALIAS("xen:" XENKBD_DRIVER_NAME);
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
index c0ec26118732..61c202436250 100644
--- a/drivers/input/mouse/elan_i2c.h
+++ b/drivers/input/mouse/elan_i2c.h
@@ -58,7 +58,7 @@ struct elan_transport_ops {
int (*get_version)(struct i2c_client *client, bool iap, u8 *version);
int (*get_sm_version)(struct i2c_client *client,
- u8* ic_type, u8 *version);
+ u16 *ic_type, u8 *version);
int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum);
int (*get_product_id)(struct i2c_client *client, u16 *id);
@@ -82,6 +82,7 @@ struct elan_transport_ops {
int (*get_report)(struct i2c_client *client, u8 *report);
int (*get_pressure_adjustment)(struct i2c_client *client,
int *adjustment);
+ int (*get_pattern)(struct i2c_client *client, u8 *pattern);
};
extern const struct elan_transport_ops elan_smbus_ops, elan_i2c_ops;
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index d5ab9ddef3e3..3b616cb7c67f 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -5,7 +5,7 @@
*
* Author: 林政維 (Duson Lin) <[email protected]>
* Author: KT Liao <[email protected]>
- * Version: 1.6.2
+ * Version: 1.6.3
*
* Based on cyapa driver:
* copyright (c) 2011-2012 Cypress Semiconductor, Inc.
@@ -41,7 +41,7 @@
#include "elan_i2c.h"
#define DRIVER_NAME "elan_i2c"
-#define ELAN_DRIVER_VERSION "1.6.2"
+#define ELAN_DRIVER_VERSION "1.6.3"
#define ELAN_VENDOR_ID 0x04f3
#define ETP_MAX_PRESSURE 255
#define ETP_FWIDTH_REDUCE 90
@@ -78,6 +78,7 @@ struct elan_tp_data {
unsigned int x_res;
unsigned int y_res;
+ u8 pattern;
u16 product_id;
u8 fw_version;
u8 sm_version;
@@ -85,7 +86,7 @@ struct elan_tp_data {
u16 fw_checksum;
int pressure_adjustment;
u8 mode;
- u8 ic_type;
+ u16 ic_type;
u16 fw_validpage_count;
u16 fw_signature_address;
@@ -96,10 +97,10 @@ struct elan_tp_data {
bool baseline_ready;
};
-static int elan_get_fwinfo(u8 iap_version, u16 *validpage_count,
+static int elan_get_fwinfo(u16 ic_type, u16 *validpage_count,
u16 *signature_address)
{
- switch (iap_version) {
+ switch (ic_type) {
case 0x00:
case 0x06:
case 0x08:
@@ -119,6 +120,9 @@ static int elan_get_fwinfo(u8 iap_version, u16 *validpage_count,
case 0x0E:
*validpage_count = 640;
break;
+ case 0x10:
+ *validpage_count = 1024;
+ break;
default:
/* unknown ic type clear value */
*validpage_count = 0;
@@ -305,6 +309,7 @@ static int elan_initialize(struct elan_tp_data *data)
static int elan_query_device_info(struct elan_tp_data *data)
{
int error;
+ u16 ic_type;
error = data->ops->get_version(data->client, false, &data->fw_version);
if (error)
@@ -324,7 +329,16 @@ static int elan_query_device_info(struct elan_tp_data *data)
if (error)
return error;
- error = elan_get_fwinfo(data->iap_version, &data->fw_validpage_count,
+ error = data->ops->get_pattern(data->client, &data->pattern);
+ if (error)
+ return error;
+
+ if (data->pattern == 0x01)
+ ic_type = data->ic_type;
+ else
+ ic_type = data->iap_version;
+
+ error = elan_get_fwinfo(ic_type, &data->fw_validpage_count,
&data->fw_signature_address);
if (error)
dev_warn(&data->client->dev,
@@ -1077,6 +1091,13 @@ static int elan_probe(struct i2c_client *client,
return error;
}
+ /* Make sure there is something at this address */
+ error = i2c_smbus_read_byte(client);
+ if (error < 0) {
+ dev_dbg(&client->dev, "nothing at this address: %d\n", error);
+ return -ENXIO;
+ }
+
/* Initialize the touchpad. */
error = elan_initialize(data);
if (error)
@@ -1101,10 +1122,13 @@ static int elan_probe(struct i2c_client *client,
"Elan Touchpad Extra Information:\n"
" Max ABS X,Y: %d,%d\n"
" Width X,Y: %d,%d\n"
- " Resolution X,Y: %d,%d (dots/mm)\n",
+ " Resolution X,Y: %d,%d (dots/mm)\n"
+ " ic type: 0x%x\n"
+ " info pattern: 0x%x\n",
data->max_x, data->max_y,
data->width_x, data->width_y,
- data->x_res, data->y_res);
+ data->x_res, data->y_res,
+ data->ic_type, data->pattern);
/* Set up input device properties based on queried parameters. */
error = elan_setup_input_device(data);
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
index f431da07f861..80172f25974d 100644
--- a/drivers/input/mouse/elan_i2c_i2c.c
+++ b/drivers/input/mouse/elan_i2c_i2c.c
@@ -34,9 +34,12 @@
#define ETP_I2C_DESC_CMD 0x0001
#define ETP_I2C_REPORT_DESC_CMD 0x0002
#define ETP_I2C_STAND_CMD 0x0005
+#define ETP_I2C_PATTERN_CMD 0x0100
#define ETP_I2C_UNIQUEID_CMD 0x0101
#define ETP_I2C_FW_VERSION_CMD 0x0102
-#define ETP_I2C_SM_VERSION_CMD 0x0103
+#define ETP_I2C_IC_TYPE_CMD 0x0103
+#define ETP_I2C_OSM_VERSION_CMD 0x0103
+#define ETP_I2C_NSM_VERSION_CMD 0x0104
#define ETP_I2C_XY_TRACENUM_CMD 0x0105
#define ETP_I2C_MAX_X_AXIS_CMD 0x0106
#define ETP_I2C_MAX_Y_AXIS_CMD 0x0107
@@ -239,12 +242,34 @@ static int elan_i2c_get_baseline_data(struct i2c_client *client,
return 0;
}
+static int elan_i2c_get_pattern(struct i2c_client *client, u8 *pattern)
+{
+ int error;
+ u8 val[3];
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_PATTERN_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get pattern: %d\n", error);
+ return error;
+ }
+ *pattern = val[1];
+
+ return 0;
+}
+
static int elan_i2c_get_version(struct i2c_client *client,
bool iap, u8 *version)
{
int error;
+ u8 pattern_ver;
u8 val[3];
+ error = elan_i2c_get_pattern(client, &pattern_ver);
+ if (error) {
+ dev_err(&client->dev, "failed to get pattern version\n");
+ return error;
+ }
+
error = elan_i2c_read_cmd(client,
iap ? ETP_I2C_IAP_VERSION_CMD :
ETP_I2C_FW_VERSION_CMD,
@@ -255,24 +280,54 @@ static int elan_i2c_get_version(struct i2c_client *client,
return error;
}
- *version = val[0];
+ if (pattern_ver == 0x01)
+ *version = iap ? val[1] : val[0];
+ else
+ *version = val[0];
return 0;
}
static int elan_i2c_get_sm_version(struct i2c_client *client,
- u8 *ic_type, u8 *version)
+ u16 *ic_type, u8 *version)
{
int error;
+ u8 pattern_ver;
u8 val[3];
- error = elan_i2c_read_cmd(client, ETP_I2C_SM_VERSION_CMD, val);
+ error = elan_i2c_get_pattern(client, &pattern_ver);
if (error) {
- dev_err(&client->dev, "failed to get SM version: %d\n", error);
+ dev_err(&client->dev, "failed to get pattern version\n");
return error;
}
- *version = val[0];
- *ic_type = val[1];
+ if (pattern_ver == 0x01) {
+ error = elan_i2c_read_cmd(client, ETP_I2C_IC_TYPE_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get ic type: %d\n",
+ error);
+ return error;
+ }
+ *ic_type = be16_to_cpup((__be16 *)val);
+
+ error = elan_i2c_read_cmd(client, ETP_I2C_NSM_VERSION_CMD,
+ val);
+ if (error) {
+ dev_err(&client->dev, "failed to get SM version: %d\n",
+ error);
+ return error;
+ }
+ *version = val[1];
+ } else {
+ error = elan_i2c_read_cmd(client, ETP_I2C_OSM_VERSION_CMD, val);
+ if (error) {
+ dev_err(&client->dev, "failed to get SM version: %d\n",
+ error);
+ return error;
+ }
+ *version = val[0];
+ *ic_type = val[1];
+ }
+
return 0;
}
@@ -641,5 +696,7 @@ const struct elan_transport_ops elan_i2c_ops = {
.write_fw_block = elan_i2c_write_fw_block,
.finish_fw_update = elan_i2c_finish_fw_update,
+ .get_pattern = elan_i2c_get_pattern,
+
.get_report = elan_i2c_get_report,
};
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
index e23b2495d52e..df7a57ca7331 100644
--- a/drivers/input/mouse/elan_i2c_smbus.c
+++ b/drivers/input/mouse/elan_i2c_smbus.c
@@ -166,7 +166,7 @@ static int elan_smbus_get_version(struct i2c_client *client,
}
static int elan_smbus_get_sm_version(struct i2c_client *client,
- u8 *ic_type, u8 *version)
+ u16 *ic_type, u8 *version)
{
int error;
u8 val[3];
@@ -495,6 +495,12 @@ static int elan_smbus_finish_fw_update(struct i2c_client *client,
return 0;
}
+static int elan_smbus_get_pattern(struct i2c_client *client, u8 *pattern)
+{
+ *pattern = 0;
+ return 0;
+}
+
const struct elan_transport_ops elan_smbus_ops = {
.initialize = elan_smbus_initialize,
.sleep_control = elan_smbus_sleep_control,
@@ -524,4 +530,5 @@ const struct elan_transport_ops elan_smbus_ops = {
.finish_fw_update = elan_smbus_finish_fw_update,
.get_report = elan_smbus_get_report,
+ .get_pattern = elan_smbus_get_pattern,
};
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index f1fa1f172107..791993215ea3 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1711,6 +1711,17 @@ int elantech_init(struct psmouse *psmouse)
etd->samples[0], etd->samples[1], etd->samples[2]);
}
+ if (etd->samples[1] == 0x74 && etd->hw_version == 0x03) {
+ /*
+ * This module has a bug which makes absolute mode
+ * unusable, so let's abort so we'll be using standard
+ * PS/2 protocol.
+ */
+ psmouse_info(psmouse,
+ "absolute mode broken, forcing standard PS/2 protocol\n");
+ goto init_fail;
+ }
+
if (elantech_set_absolute_mode(psmouse)) {
psmouse_err(psmouse,
"failed to put touchpad into absolute mode.\n");
diff --git a/drivers/input/rmi4/rmi_f34v7.c b/drivers/input/rmi4/rmi_f34v7.c
index 10c0d11b72c9..3991d2943660 100644
--- a/drivers/input/rmi4/rmi_f34v7.c
+++ b/drivers/input/rmi4/rmi_f34v7.c
@@ -9,13 +9,14 @@
* the Free Software Foundation.
*/
+#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/rmi.h>
#include <linux/firmware.h>
-#include <asm/unaligned.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
+#include <asm/unaligned.h>
#include "rmi_driver.h"
#include "rmi_f34.h"
@@ -464,7 +465,7 @@ static int rmi_f34v7_read_queries_bl_version(struct f34_data *f34)
static int rmi_f34v7_read_queries(struct f34_data *f34)
{
int ret;
- int i, j;
+ int i;
u8 base;
int offset;
u8 *ptable;
@@ -518,10 +519,7 @@ static int rmi_f34v7_read_queries(struct f34_data *f34)
query_1_7.partition_support[1] & HAS_GUEST_CODE;
if (query_0 & HAS_CONFIG_ID) {
- char f34_ctrl[CONFIG_ID_SIZE];
- int i = 0;
- u8 *p = f34->configuration_id;
- *p = '\0';
+ u8 f34_ctrl[CONFIG_ID_SIZE];
ret = rmi_read_block(f34->fn->rmi_dev,
f34->fn->fd.control_base_addr,
@@ -531,13 +529,11 @@ static int rmi_f34v7_read_queries(struct f34_data *f34)
return ret;
/* Eat leading zeros */
- while (i < sizeof(f34_ctrl) && !f34_ctrl[i])
- i++;
+ for (i = 0; i < sizeof(f34_ctrl) - 1 && !f34_ctrl[i]; i++)
+ /* Empty */;
- for (; i < sizeof(f34_ctrl); i++)
- p += snprintf(p, f34->configuration_id
- + sizeof(f34->configuration_id) - p,
- "%02X", f34_ctrl[i]);
+ snprintf(f34->configuration_id, sizeof(f34->configuration_id),
+ "%*phN", (int)sizeof(f34_ctrl) - i, f34_ctrl + i);
rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "Configuration ID: %s\n",
f34->configuration_id);
@@ -545,9 +541,7 @@ static int rmi_f34v7_read_queries(struct f34_data *f34)
f34->v7.partitions = 0;
for (i = 0; i < sizeof(query_1_7.partition_support); i++)
- for (j = 0; j < 8; j++)
- if (query_1_7.partition_support[i] & (1 << j))
- f34->v7.partitions++;
+ f34->v7.partitions += hweight8(query_1_7.partition_support[i]);
rmi_dbg(RMI_DEBUG_FN, &f34->fn->dev, "%s: Supported partitions: %*ph\n",
__func__, sizeof(query_1_7.partition_support),
diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c
index 559c99ca6592..1bfdae4b0d99 100644
--- a/drivers/input/serio/hp_sdc.c
+++ b/drivers/input/serio/hp_sdc.c
@@ -1001,7 +1001,6 @@ static int __init hp_sdc_register(void)
uint8_t tq_init_seq[5];
struct semaphore tq_init_sem;
#if defined(__mc68000__)
- mm_segment_t fs;
unsigned char i;
#endif
@@ -1026,11 +1025,8 @@ static int __init hp_sdc_register(void)
hp_sdc.base_io = (unsigned long) 0xf0428000;
hp_sdc.data_io = (unsigned long) hp_sdc.base_io + 1;
hp_sdc.status_io = (unsigned long) hp_sdc.base_io + 3;
- fs = get_fs();
- set_fs(KERNEL_DS);
- if (!get_user(i, (unsigned char *)hp_sdc.data_io))
+ if (!probe_kernel_read(&i, (unsigned char *)hp_sdc.data_io, 1))
hp_sdc.dev = (void *)1;
- set_fs(fs);
hp_sdc.dev_err = hp_sdc_init();
#endif
if (hp_sdc.dev == NULL) {
diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c
index 12a3ad83296d..bb0349fa64bc 100644
--- a/drivers/input/sparse-keymap.c
+++ b/drivers/input/sparse-keymap.c
@@ -224,20 +224,6 @@ int sparse_keymap_setup(struct input_dev *dev,
EXPORT_SYMBOL(sparse_keymap_setup);
/**
- * sparse_keymap_free - free memory allocated for sparse keymap
- * @dev: Input device using sparse keymap
- *
- * This function used to free memory allocated by sparse keymap
- * in an input device that was set up by sparse_keymap_setup().
- * Since sparse_keymap_setup() now uses a managed allocation for the
- * keymap copy, use of this function is deprecated.
- */
-void sparse_keymap_free(struct input_dev *dev)
-{
-}
-EXPORT_SYMBOL(sparse_keymap_free);
-
-/**
* sparse_keymap_report_entry - report event corresponding to given key entry
* @dev: Input device for which event should be reported
* @ke: key entry describing event
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index cf26ca49ae6d..64b30fe273fd 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -1114,6 +1114,17 @@ config TOUCHSCREEN_ST1232
To compile this driver as a module, choose M here: the
module will be called st1232_ts.
+config TOUCHSCREEN_STMFTS
+ tristate "STMicroelectronics STMFTS touchscreen"
+ depends on I2C
+ depends on LEDS_CLASS
+ help
+ Say Y here if you want support for STMicroelectronics
+ STMFTS touchscreen.
+
+ To compile this driver as a module, choose M here: the
+ module will be called stmfts.
+
config TOUCHSCREEN_STMPE
tristate "STMicroelectronics STMPE touchscreens"
depends on MFD_STMPE
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 18e476948e44..6badce87037b 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -67,6 +67,7 @@ obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
obj-$(CONFIG_TOUCHSCREEN_SILEAD) += silead.o
obj-$(CONFIG_TOUCHSCREEN_SIS_I2C) += sis_i2c.o
obj-$(CONFIG_TOUCHSCREEN_ST1232) += st1232.o
+obj-$(CONFIG_TOUCHSCREEN_STMFTS) += stmfts.o
obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o
obj-$(CONFIG_TOUCHSCREEN_SUN4I) += sun4i-ts.o
obj-$(CONFIG_TOUCHSCREEN_SUR40) += sur40.o
diff --git a/drivers/input/touchscreen/mcs5000_ts.c b/drivers/input/touchscreen/mcs5000_ts.c
index 90fc07dc98a6..8868573133ab 100644
--- a/drivers/input/touchscreen/mcs5000_ts.c
+++ b/drivers/input/touchscreen/mcs5000_ts.c
@@ -15,10 +15,10 @@
#include <linux/module.h>
#include <linux/i2c.h>
-#include <linux/i2c/mcs.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/irq.h>
+#include <linux/platform_data/mcs.h>
#include <linux/slab.h>
/* Registers */
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 1fafc9f57af6..e5eeb6311f7d 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -11,9 +11,9 @@
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/i2c.h>
-#include <linux/i2c/mms114.h>
#include <linux/input/mt.h>
#include <linux/interrupt.h>
+#include <linux/platform_data/mms114.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index 41d58e88cc8a..3b3db8c868e0 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -264,7 +264,11 @@ static int s3c2410ts_probe(struct platform_device *pdev)
return -ENOENT;
}
- clk_prepare_enable(ts.clock);
+ ret = clk_prepare_enable(ts.clock);
+ if (ret) {
+ dev_err(dev, "Failed! to enabled clocks\n");
+ goto err_clk_get;
+ }
dev_dbg(dev, "got and enabled clocks\n");
ts.irq_tc = ret = platform_get_irq(pdev, 0);
@@ -353,7 +357,9 @@ static int s3c2410ts_probe(struct platform_device *pdev)
err_iomap:
iounmap(ts.io);
err_clk:
+ clk_disable_unprepare(ts.clock);
del_timer_sync(&touch_timer);
+ err_clk_get:
clk_put(ts.clock);
return ret;
}
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
new file mode 100644
index 000000000000..157fdb4bb2e8
--- /dev/null
+++ b/drivers/input/touchscreen/stmfts.c
@@ -0,0 +1,822 @@
+/*
+ * Copyright (c) 2017 Samsung Electronics Co., Ltd.
+ * Author: Andi Shyti <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * STMicroelectronics FTS Touchscreen device driver
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+
+/* I2C commands */
+#define STMFTS_READ_INFO 0x80
+#define STMFTS_READ_STATUS 0x84
+#define STMFTS_READ_ONE_EVENT 0x85
+#define STMFTS_READ_ALL_EVENT 0x86
+#define STMFTS_LATEST_EVENT 0x87
+#define STMFTS_SLEEP_IN 0x90
+#define STMFTS_SLEEP_OUT 0x91
+#define STMFTS_MS_MT_SENSE_OFF 0x92
+#define STMFTS_MS_MT_SENSE_ON 0x93
+#define STMFTS_SS_HOVER_SENSE_OFF 0x94
+#define STMFTS_SS_HOVER_SENSE_ON 0x95
+#define STMFTS_MS_KEY_SENSE_OFF 0x9a
+#define STMFTS_MS_KEY_SENSE_ON 0x9b
+#define STMFTS_SYSTEM_RESET 0xa0
+#define STMFTS_CLEAR_EVENT_STACK 0xa1
+#define STMFTS_FULL_FORCE_CALIBRATION 0xa2
+#define STMFTS_MS_CX_TUNING 0xa3
+#define STMFTS_SS_CX_TUNING 0xa4
+
+/* events */
+#define STMFTS_EV_NO_EVENT 0x00
+#define STMFTS_EV_MULTI_TOUCH_DETECTED 0x02
+#define STMFTS_EV_MULTI_TOUCH_ENTER 0x03
+#define STMFTS_EV_MULTI_TOUCH_LEAVE 0x04
+#define STMFTS_EV_MULTI_TOUCH_MOTION 0x05
+#define STMFTS_EV_HOVER_ENTER 0x07
+#define STMFTS_EV_HOVER_LEAVE 0x08
+#define STMFTS_EV_HOVER_MOTION 0x09
+#define STMFTS_EV_KEY_STATUS 0x0e
+#define STMFTS_EV_ERROR 0x0f
+#define STMFTS_EV_CONTROLLER_READY 0x10
+#define STMFTS_EV_SLEEP_OUT_CONTROLLER_READY 0x11
+#define STMFTS_EV_STATUS 0x16
+#define STMFTS_EV_DEBUG 0xdb
+
+/* multi touch related event masks */
+#define STMFTS_MASK_EVENT_ID 0x0f
+#define STMFTS_MASK_TOUCH_ID 0xf0
+#define STMFTS_MASK_LEFT_EVENT 0x0f
+#define STMFTS_MASK_X_MSB 0x0f
+#define STMFTS_MASK_Y_LSB 0xf0
+
+/* key related event masks */
+#define STMFTS_MASK_KEY_NO_TOUCH 0x00
+#define STMFTS_MASK_KEY_MENU 0x01
+#define STMFTS_MASK_KEY_BACK 0x02
+
+#define STMFTS_EVENT_SIZE 8
+#define STMFTS_STACK_DEPTH 32
+#define STMFTS_DATA_MAX_SIZE (STMFTS_EVENT_SIZE * STMFTS_STACK_DEPTH)
+#define STMFTS_MAX_FINGERS 10
+#define STMFTS_DEV_NAME "stmfts"
+
+enum stmfts_regulators {
+ STMFTS_REGULATOR_VDD,
+ STMFTS_REGULATOR_AVDD,
+};
+
+struct stmfts_data {
+ struct i2c_client *client;
+ struct input_dev *input;
+ struct led_classdev led_cdev;
+ struct mutex mutex;
+
+ struct touchscreen_properties prop;
+
+ struct regulator_bulk_data regulators[2];
+
+ /*
+ * Presence of ledvdd will be used also to check
+ * whether the LED is supported.
+ */
+ struct regulator *ledvdd;
+
+ u16 chip_id;
+ u8 chip_ver;
+ u16 fw_ver;
+ u8 config_id;
+ u8 config_ver;
+
+ u8 data[STMFTS_DATA_MAX_SIZE];
+
+ struct completion cmd_done;
+
+ bool use_key;
+ bool led_status;
+ bool hover_enabled;
+ bool running;
+};
+
+static void stmfts_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct stmfts_data *sdata = container_of(led_cdev,
+ struct stmfts_data, led_cdev);
+ int err;
+
+ if (value == sdata->led_status || !sdata->ledvdd)
+ return;
+
+ if (!value) {
+ regulator_disable(sdata->ledvdd);
+ } else {
+ err = regulator_enable(sdata->ledvdd);
+ if (err)
+ dev_warn(&sdata->client->dev,
+ "failed to disable ledvdd regulator: %d\n",
+ err);
+ }
+
+ sdata->led_status = value;
+}
+
+static enum led_brightness stmfts_brightness_get(struct led_classdev *led_cdev)
+{
+ struct stmfts_data *sdata = container_of(led_cdev,
+ struct stmfts_data, led_cdev);
+
+ return !!regulator_is_enabled(sdata->ledvdd);
+}
+
+/*
+ * We can't simply use i2c_smbus_read_i2c_block_data because we
+ * need to read more than 255 bytes (
+ */
+static int stmfts_read_events(struct stmfts_data *sdata)
+{
+ u8 cmd = STMFTS_READ_ALL_EVENT;
+ struct i2c_msg msgs[2] = {
+ {
+ .addr = sdata->client->addr,
+ .len = 1,
+ .buf = &cmd,
+ },
+ {
+ .addr = sdata->client->addr,
+ .flags = I2C_M_RD,
+ .len = STMFTS_DATA_MAX_SIZE,
+ .buf = sdata->data,
+ },
+ };
+ int ret;
+
+ ret = i2c_transfer(sdata->client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ return ret;
+
+ return ret == ARRAY_SIZE(msgs) ? 0 : -EIO;
+}
+
+static void stmfts_report_contact_event(struct stmfts_data *sdata,
+ const u8 event[])
+{
+ u8 slot_id = (event[0] & STMFTS_MASK_TOUCH_ID) >> 4;
+ u16 x = event[1] | ((event[2] & STMFTS_MASK_X_MSB) << 8);
+ u16 y = (event[2] >> 4) | (event[3] << 4);
+ u8 maj = event[4];
+ u8 min = event[5];
+ u8 orientation = event[6];
+ u8 area = event[7];
+
+ input_mt_slot(sdata->input, slot_id);
+
+ input_mt_report_slot_state(sdata->input, MT_TOOL_FINGER, true);
+ input_report_abs(sdata->input, ABS_MT_POSITION_X, x);
+ input_report_abs(sdata->input, ABS_MT_POSITION_Y, y);
+ input_report_abs(sdata->input, ABS_MT_TOUCH_MAJOR, maj);
+ input_report_abs(sdata->input, ABS_MT_TOUCH_MINOR, min);
+ input_report_abs(sdata->input, ABS_MT_PRESSURE, area);
+ input_report_abs(sdata->input, ABS_MT_ORIENTATION, orientation);
+
+ input_sync(sdata->input);
+}
+
+static void stmfts_report_contact_release(struct stmfts_data *sdata,
+ const u8 event[])
+{
+ u8 slot_id = (event[0] & STMFTS_MASK_TOUCH_ID) >> 4;
+
+ input_mt_slot(sdata->input, slot_id);
+ input_mt_report_slot_state(sdata->input, MT_TOOL_FINGER, false);
+
+ input_sync(sdata->input);
+}
+
+static void stmfts_report_hover_event(struct stmfts_data *sdata,
+ const u8 event[])
+{
+ u16 x = (event[2] << 4) | (event[4] >> 4);
+ u16 y = (event[3] << 4) | (event[4] & STMFTS_MASK_Y_LSB);
+ u8 z = event[5];
+
+ input_report_abs(sdata->input, ABS_X, x);
+ input_report_abs(sdata->input, ABS_Y, y);
+ input_report_abs(sdata->input, ABS_DISTANCE, z);
+
+ input_sync(sdata->input);
+}
+
+static void stmfts_report_key_event(struct stmfts_data *sdata, const u8 event[])
+{
+ switch (event[2]) {
+ case 0:
+ input_report_key(sdata->input, KEY_BACK, 0);
+ input_report_key(sdata->input, KEY_MENU, 0);
+ break;
+
+ case STMFTS_MASK_KEY_BACK:
+ input_report_key(sdata->input, KEY_BACK, 1);
+ break;
+
+ case STMFTS_MASK_KEY_MENU:
+ input_report_key(sdata->input, KEY_MENU, 1);
+ break;
+
+ default:
+ dev_warn(&sdata->client->dev,
+ "unknown key event: %#02x\n", event[2]);
+ break;
+ }
+
+ input_sync(sdata->input);
+}
+
+static void stmfts_parse_events(struct stmfts_data *sdata)
+{
+ int i;
+
+ for (i = 0; i < STMFTS_STACK_DEPTH; i++) {
+ u8 *event = &sdata->data[i * STMFTS_EVENT_SIZE];
+
+ switch (event[0]) {
+
+ case STMFTS_EV_CONTROLLER_READY:
+ case STMFTS_EV_SLEEP_OUT_CONTROLLER_READY:
+ case STMFTS_EV_STATUS:
+ complete(&sdata->cmd_done);
+ /* fall through */
+
+ case STMFTS_EV_NO_EVENT:
+ case STMFTS_EV_DEBUG:
+ return;
+ }
+
+ switch (event[0] & STMFTS_MASK_EVENT_ID) {
+
+ case STMFTS_EV_MULTI_TOUCH_ENTER:
+ case STMFTS_EV_MULTI_TOUCH_MOTION:
+ stmfts_report_contact_event(sdata, event);
+ break;
+
+ case STMFTS_EV_MULTI_TOUCH_LEAVE:
+ stmfts_report_contact_release(sdata, event);
+ break;
+
+ case STMFTS_EV_HOVER_ENTER:
+ case STMFTS_EV_HOVER_LEAVE:
+ case STMFTS_EV_HOVER_MOTION:
+ stmfts_report_hover_event(sdata, event);
+ break;
+
+ case STMFTS_EV_KEY_STATUS:
+ stmfts_report_key_event(sdata, event);
+ break;
+
+ case STMFTS_EV_ERROR:
+ dev_warn(&sdata->client->dev,
+ "error code: 0x%x%x%x%x%x%x",
+ event[6], event[5], event[4],
+ event[3], event[2], event[1]);
+ break;
+
+ default:
+ dev_err(&sdata->client->dev,
+ "unknown event %#02x\n", event[0]);
+ }
+ }
+}
+
+static irqreturn_t stmfts_irq_handler(int irq, void *dev)
+{
+ struct stmfts_data *sdata = dev;
+ int err;
+
+ mutex_lock(&sdata->mutex);
+
+ err = stmfts_read_events(sdata);
+ if (unlikely(err))
+ dev_err(&sdata->client->dev,
+ "failed to read events: %d\n", err);
+ else
+ stmfts_parse_events(sdata);
+
+ mutex_unlock(&sdata->mutex);
+ return IRQ_HANDLED;
+}
+
+static int stmfts_command(struct stmfts_data *sdata, const u8 cmd)
+{
+ int err;
+
+ reinit_completion(&sdata->cmd_done);
+
+ err = i2c_smbus_write_byte(sdata->client, cmd);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&sdata->cmd_done,
+ msecs_to_jiffies(1000)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int stmfts_input_open(struct input_dev *dev)
+{
+ struct stmfts_data *sdata = input_get_drvdata(dev);
+ int err;
+
+ err = pm_runtime_get_sync(&sdata->client->dev);
+ if (err < 0)
+ return err;
+
+ err = i2c_smbus_write_byte(sdata->client, STMFTS_MS_MT_SENSE_ON);
+ if (err)
+ return err;
+
+ mutex_lock(&sdata->mutex);
+ sdata->running = true;
+
+ if (sdata->hover_enabled) {
+ err = i2c_smbus_write_byte(sdata->client,
+ STMFTS_SS_HOVER_SENSE_ON);
+ if (err)
+ dev_warn(&sdata->client->dev,
+ "failed to enable hover\n");
+ }
+ mutex_unlock(&sdata->mutex);
+
+ if (sdata->use_key) {
+ err = i2c_smbus_write_byte(sdata->client,
+ STMFTS_MS_KEY_SENSE_ON);
+ if (err)
+ /* I can still use only the touch screen */
+ dev_warn(&sdata->client->dev,
+ "failed to enable touchkey\n");
+ }
+
+ return 0;
+}
+
+static void stmfts_input_close(struct input_dev *dev)
+{
+ struct stmfts_data *sdata = input_get_drvdata(dev);
+ int err;
+
+ err = i2c_smbus_write_byte(sdata->client, STMFTS_MS_MT_SENSE_OFF);
+ if (err)
+ dev_warn(&sdata->client->dev,
+ "failed to disable touchscreen: %d\n", err);
+
+ mutex_lock(&sdata->mutex);
+
+ sdata->running = false;
+
+ if (sdata->hover_enabled) {
+ err = i2c_smbus_write_byte(sdata->client,
+ STMFTS_SS_HOVER_SENSE_OFF);
+ if (err)
+ dev_warn(&sdata->client->dev,
+ "failed to disable hover: %d\n", err);
+ }
+ mutex_unlock(&sdata->mutex);
+
+ if (sdata->use_key) {
+ err = i2c_smbus_write_byte(sdata->client,
+ STMFTS_MS_KEY_SENSE_OFF);
+ if (err)
+ dev_warn(&sdata->client->dev,
+ "failed to disable touchkey: %d\n", err);
+ }
+
+ pm_runtime_put_sync(&sdata->client->dev);
+}
+
+static ssize_t stmfts_sysfs_chip_id(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stmfts_data *sdata = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%#x\n", sdata->chip_id);
+}
+
+static ssize_t stmfts_sysfs_chip_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stmfts_data *sdata = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", sdata->chip_ver);
+}
+
+static ssize_t stmfts_sysfs_fw_ver(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stmfts_data *sdata = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", sdata->fw_ver);
+}
+
+static ssize_t stmfts_sysfs_config_id(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stmfts_data *sdata = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%#x\n", sdata->config_id);
+}
+
+static ssize_t stmfts_sysfs_config_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stmfts_data *sdata = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", sdata->config_ver);
+}
+
+static ssize_t stmfts_sysfs_read_status(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stmfts_data *sdata = dev_get_drvdata(dev);
+ u8 status[4];
+ int err;
+
+ err = i2c_smbus_read_i2c_block_data(sdata->client, STMFTS_READ_STATUS,
+ sizeof(status), status);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%#02x\n", status[0]);
+}
+
+static ssize_t stmfts_sysfs_hover_enable_read(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stmfts_data *sdata = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", sdata->hover_enabled);
+}
+
+static ssize_t stmfts_sysfs_hover_enable_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct stmfts_data *sdata = dev_get_drvdata(dev);
+ unsigned long value;
+ int err = 0;
+
+ if (kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ mutex_lock(&sdata->mutex);
+
+ if (value & sdata->hover_enabled)
+ goto out;
+
+ if (sdata->running)
+ err = i2c_smbus_write_byte(sdata->client,
+ value ? STMFTS_SS_HOVER_SENSE_ON :
+ STMFTS_SS_HOVER_SENSE_OFF);
+
+ if (!err)
+ sdata->hover_enabled = !!value;
+
+out:
+ mutex_unlock(&sdata->mutex);
+
+ return len;
+}
+
+static DEVICE_ATTR(chip_id, 0444, stmfts_sysfs_chip_id, NULL);
+static DEVICE_ATTR(chip_version, 0444, stmfts_sysfs_chip_version, NULL);
+static DEVICE_ATTR(fw_ver, 0444, stmfts_sysfs_fw_ver, NULL);
+static DEVICE_ATTR(config_id, 0444, stmfts_sysfs_config_id, NULL);
+static DEVICE_ATTR(config_version, 0444, stmfts_sysfs_config_version, NULL);
+static DEVICE_ATTR(status, 0444, stmfts_sysfs_read_status, NULL);
+static DEVICE_ATTR(hover_enable, 0644, stmfts_sysfs_hover_enable_read,
+ stmfts_sysfs_hover_enable_write);
+
+static struct attribute *stmfts_sysfs_attrs[] = {
+ &dev_attr_chip_id.attr,
+ &dev_attr_chip_version.attr,
+ &dev_attr_fw_ver.attr,
+ &dev_attr_config_id.attr,
+ &dev_attr_config_version.attr,
+ &dev_attr_status.attr,
+ &dev_attr_hover_enable.attr,
+ NULL
+};
+
+static struct attribute_group stmfts_attribute_group = {
+ .attrs = stmfts_sysfs_attrs
+};
+
+static int stmfts_power_on(struct stmfts_data *sdata)
+{
+ int err;
+ u8 reg[8];
+
+ err = regulator_bulk_enable(ARRAY_SIZE(sdata->regulators),
+ sdata->regulators);
+ if (err)
+ return err;
+
+ /*
+ * The datasheet does not specify the power on time, but considering
+ * that the reset time is < 10ms, I sleep 20ms to be sure
+ */
+ msleep(20);
+
+ err = i2c_smbus_read_i2c_block_data(sdata->client, STMFTS_READ_INFO,
+ sizeof(reg), reg);
+ if (err < 0)
+ return err;
+ if (err != sizeof(reg))
+ return -EIO;
+
+ sdata->chip_id = be16_to_cpup((__be16 *)&reg[6]);
+ sdata->chip_ver = reg[0];
+ sdata->fw_ver = be16_to_cpup((__be16 *)&reg[2]);
+ sdata->config_id = reg[4];
+ sdata->config_ver = reg[5];
+
+ enable_irq(sdata->client->irq);
+
+ msleep(50);
+
+ err = stmfts_command(sdata, STMFTS_SYSTEM_RESET);
+ if (err)
+ return err;
+
+ err = stmfts_command(sdata, STMFTS_SLEEP_OUT);
+ if (err)
+ return err;
+
+ /* optional tuning */
+ err = stmfts_command(sdata, STMFTS_MS_CX_TUNING);
+ if (err)
+ dev_warn(&sdata->client->dev,
+ "failed to perform mutual auto tune: %d\n", err);
+
+ /* optional tuning */
+ err = stmfts_command(sdata, STMFTS_SS_CX_TUNING);
+ if (err)
+ dev_warn(&sdata->client->dev,
+ "failed to perform self auto tune: %d\n", err);
+
+ err = stmfts_command(sdata, STMFTS_FULL_FORCE_CALIBRATION);
+ if (err)
+ return err;
+
+ /*
+ * At this point no one is using the touchscreen
+ * and I don't really care about the return value
+ */
+ (void) i2c_smbus_write_byte(sdata->client, STMFTS_SLEEP_IN);
+
+ return 0;
+}
+
+static void stmfts_power_off(void *data)
+{
+ struct stmfts_data *sdata = data;
+
+ disable_irq(sdata->client->irq);
+ regulator_bulk_disable(ARRAY_SIZE(sdata->regulators),
+ sdata->regulators);
+}
+
+/* This function is void because I don't want to prevent using the touch key
+ * only because the LEDs don't get registered
+ */
+static int stmfts_enable_led(struct stmfts_data *sdata)
+{
+ int err;
+
+ /* get the regulator for powering the leds on */
+ sdata->ledvdd = devm_regulator_get(&sdata->client->dev, "ledvdd");
+ if (IS_ERR(sdata->ledvdd))
+ return PTR_ERR(sdata->ledvdd);
+
+ sdata->led_cdev.name = STMFTS_DEV_NAME;
+ sdata->led_cdev.max_brightness = LED_ON;
+ sdata->led_cdev.brightness = LED_OFF;
+ sdata->led_cdev.brightness_set = stmfts_brightness_set;
+ sdata->led_cdev.brightness_get = stmfts_brightness_get;
+
+ err = devm_led_classdev_register(&sdata->client->dev, &sdata->led_cdev);
+ if (err) {
+ devm_regulator_put(sdata->ledvdd);
+ return err;
+ }
+
+ return 0;
+}
+
+static int stmfts_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int err;
+ struct stmfts_data *sdata;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -ENODEV;
+
+ sdata = devm_kzalloc(&client->dev, sizeof(*sdata), GFP_KERNEL);
+ if (!sdata)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, sdata);
+
+ sdata->client = client;
+ mutex_init(&sdata->mutex);
+ init_completion(&sdata->cmd_done);
+
+ sdata->regulators[STMFTS_REGULATOR_VDD].supply = "vdd";
+ sdata->regulators[STMFTS_REGULATOR_AVDD].supply = "avdd";
+ err = devm_regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(sdata->regulators),
+ sdata->regulators);
+ if (err)
+ return err;
+
+ sdata->input = devm_input_allocate_device(&client->dev);
+ if (!sdata->input)
+ return -ENOMEM;
+
+ sdata->input->name = STMFTS_DEV_NAME;
+ sdata->input->id.bustype = BUS_I2C;
+ sdata->input->open = stmfts_input_open;
+ sdata->input->close = stmfts_input_close;
+
+ touchscreen_parse_properties(sdata->input, true, &sdata->prop);
+
+ input_set_abs_params(sdata->input, ABS_MT_POSITION_X, 0,
+ sdata->prop.max_x, 0, 0);
+ input_set_abs_params(sdata->input, ABS_MT_POSITION_Y, 0,
+ sdata->prop.max_y, 0, 0);
+ input_set_abs_params(sdata->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+ input_set_abs_params(sdata->input, ABS_MT_TOUCH_MINOR, 0, 255, 0, 0);
+ input_set_abs_params(sdata->input, ABS_MT_ORIENTATION, 0, 255, 0, 0);
+ input_set_abs_params(sdata->input, ABS_MT_PRESSURE, 0, 255, 0, 0);
+ input_set_abs_params(sdata->input, ABS_DISTANCE, 0, 255, 0, 0);
+
+ sdata->use_key = device_property_read_bool(&client->dev,
+ "touch-key-connected");
+ if (sdata->use_key) {
+ input_set_capability(sdata->input, EV_KEY, KEY_MENU);
+ input_set_capability(sdata->input, EV_KEY, KEY_BACK);
+ }
+
+ err = input_mt_init_slots(sdata->input,
+ STMFTS_MAX_FINGERS, INPUT_MT_DIRECT);
+ if (err)
+ return err;
+
+ input_set_drvdata(sdata->input, sdata);
+
+ err = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, stmfts_irq_handler,
+ IRQF_ONESHOT,
+ "stmfts_irq", sdata);
+ if (err)
+ return err;
+
+ /* stmfts_power_on expects interrupt to be disabled */
+ disable_irq(client->irq);
+
+ dev_dbg(&client->dev, "initializing ST-Microelectronics FTS...\n");
+
+ err = stmfts_power_on(sdata);
+ if (err)
+ return err;
+
+ err = devm_add_action_or_reset(&client->dev, stmfts_power_off, sdata);
+ if (err)
+ return err;
+
+ err = input_register_device(sdata->input);
+ if (err)
+ return err;
+
+ if (sdata->use_key) {
+ err = stmfts_enable_led(sdata);
+ if (err) {
+ /*
+ * Even if the LEDs have failed to be initialized and
+ * used in the driver, I can still use the device even
+ * without LEDs. The ledvdd regulator pointer will be
+ * used as a flag.
+ */
+ dev_warn(&client->dev, "unable to use touchkey leds\n");
+ sdata->ledvdd = NULL;
+ }
+ }
+
+ err = sysfs_create_group(&sdata->client->dev.kobj,
+ &stmfts_attribute_group);
+ if (err)
+ return err;
+
+ pm_runtime_enable(&client->dev);
+
+ return 0;
+}
+
+static int stmfts_remove(struct i2c_client *client)
+{
+ pm_runtime_disable(&client->dev);
+ sysfs_remove_group(&client->dev.kobj, &stmfts_attribute_group);
+
+ return 0;
+}
+
+static int __maybe_unused stmfts_runtime_suspend(struct device *dev)
+{
+ struct stmfts_data *sdata = dev_get_drvdata(dev);
+ int ret;
+
+ ret = i2c_smbus_write_byte(sdata->client, STMFTS_SLEEP_IN);
+ if (ret)
+ dev_warn(dev, "failed to suspend device: %d\n", ret);
+
+ return ret;
+}
+
+static int __maybe_unused stmfts_runtime_resume(struct device *dev)
+{
+ struct stmfts_data *sdata = dev_get_drvdata(dev);
+ int ret;
+
+ ret = i2c_smbus_write_byte(sdata->client, STMFTS_SLEEP_OUT);
+ if (ret)
+ dev_err(dev, "failed to resume device: %d\n", ret);
+
+ return ret;
+}
+
+static int __maybe_unused stmfts_suspend(struct device *dev)
+{
+ struct stmfts_data *sdata = dev_get_drvdata(dev);
+
+ stmfts_power_off(sdata);
+
+ return 0;
+}
+
+static int __maybe_unused stmfts_resume(struct device *dev)
+{
+ struct stmfts_data *sdata = dev_get_drvdata(dev);
+
+ return stmfts_power_on(sdata);
+}
+
+static const struct dev_pm_ops stmfts_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(stmfts_suspend, stmfts_resume)
+ SET_RUNTIME_PM_OPS(stmfts_runtime_suspend, stmfts_runtime_resume, NULL)
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id stmfts_of_match[] = {
+ { .compatible = "st,stmfts", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, stmfts_of_match);
+#endif
+
+static const struct i2c_device_id stmfts_id[] = {
+ { "stmfts", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, stmfts_id);
+
+static struct i2c_driver stmfts_driver = {
+ .driver = {
+ .name = STMFTS_DEV_NAME,
+ .of_match_table = of_match_ptr(stmfts_of_match),
+ .pm = &stmfts_pm_ops,
+ },
+ .probe = stmfts_probe,
+ .remove = stmfts_remove,
+ .id_table = stmfts_id,
+};
+
+module_i2c_driver(stmfts_driver);
+
+MODULE_AUTHOR("Andi Shyti <[email protected]>");
+MODULE_DESCRIPTION("STMicroelectronics FTS Touch Screen");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/tsc2007_core.c b/drivers/input/touchscreen/tsc2007_core.c
index fc7384936011..8342e0c48a53 100644
--- a/drivers/input/touchscreen/tsc2007_core.c
+++ b/drivers/input/touchscreen/tsc2007_core.c
@@ -25,9 +25,9 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
-#include <linux/i2c/tsc2007.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
+#include <linux/platform_data/tsc2007.h>
#include "tsc2007.h"
int tsc2007_xfer(struct tsc2007 *tsc, u8 cmd)
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 5c9759ed22ca..f16d0f26ee24 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -54,6 +54,8 @@
#include "amd_iommu_types.h"
#include "irq_remapping.h"
+#define AMD_IOMMU_MAPPING_ERROR 0
+
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
#define LOOP_TIMEOUT 100000
@@ -2394,7 +2396,7 @@ static dma_addr_t __map_single(struct device *dev,
paddr &= PAGE_MASK;
address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask);
- if (address == DMA_ERROR_CODE)
+ if (address == AMD_IOMMU_MAPPING_ERROR)
goto out;
prot = dir2prot(direction);
@@ -2431,7 +2433,7 @@ out_unmap:
dma_ops_free_iova(dma_dom, address, pages);
- return DMA_ERROR_CODE;
+ return AMD_IOMMU_MAPPING_ERROR;
}
/*
@@ -2483,7 +2485,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
if (PTR_ERR(domain) == -EINVAL)
return (dma_addr_t)paddr;
else if (IS_ERR(domain))
- return DMA_ERROR_CODE;
+ return AMD_IOMMU_MAPPING_ERROR;
dma_mask = *dev->dma_mask;
dma_dom = to_dma_ops_domain(domain);
@@ -2560,7 +2562,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
npages = sg_num_pages(dev, sglist, nelems);
address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask);
- if (address == DMA_ERROR_CODE)
+ if (address == AMD_IOMMU_MAPPING_ERROR)
goto out_err;
prot = dir2prot(direction);
@@ -2683,7 +2685,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
*dma_addr = __map_single(dev, dma_dom, page_to_phys(page),
size, DMA_BIDIRECTIONAL, dma_mask);
- if (*dma_addr == DMA_ERROR_CODE)
+ if (*dma_addr == AMD_IOMMU_MAPPING_ERROR)
goto out_free;
return page_address(page);
@@ -2729,9 +2731,16 @@ free_mem:
*/
static int amd_iommu_dma_supported(struct device *dev, u64 mask)
{
+ if (!x86_dma_supported(dev, mask))
+ return 0;
return check_device(dev);
}
+static int amd_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == AMD_IOMMU_MAPPING_ERROR;
+}
+
static const struct dma_map_ops amd_iommu_dma_ops = {
.alloc = alloc_coherent,
.free = free_coherent,
@@ -2740,6 +2749,7 @@ static const struct dma_map_ops amd_iommu_dma_ops = {
.map_sg = map_sg,
.unmap_sg = unmap_sg,
.dma_supported = amd_iommu_dma_supported,
+ .mapping_error = amd_iommu_mapping_error,
};
static int init_reserved_iova_ranges(void)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 62618e77bedc..9403336f1fa6 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -31,6 +31,8 @@
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
+#define IOMMU_MAPPING_ERROR 0
+
struct iommu_dma_msi_page {
struct list_head list;
dma_addr_t iova;
@@ -500,7 +502,7 @@ void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
{
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size);
__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
- *handle = DMA_ERROR_CODE;
+ *handle = IOMMU_MAPPING_ERROR;
}
/**
@@ -533,7 +535,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
dma_addr_t iova;
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
- *handle = DMA_ERROR_CODE;
+ *handle = IOMMU_MAPPING_ERROR;
min_size = alloc_sizes & -alloc_sizes;
if (min_size < PAGE_SIZE) {
@@ -627,11 +629,11 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
if (!iova)
- return DMA_ERROR_CODE;
+ return IOMMU_MAPPING_ERROR;
if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
iommu_dma_free_iova(cookie, iova, size);
- return DMA_ERROR_CODE;
+ return IOMMU_MAPPING_ERROR;
}
return iova + iova_off;
}
@@ -671,7 +673,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
s->offset += s_iova_off;
s->length = s_length;
- sg_dma_address(s) = DMA_ERROR_CODE;
+ sg_dma_address(s) = IOMMU_MAPPING_ERROR;
sg_dma_len(s) = 0;
/*
@@ -714,11 +716,11 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
int i;
for_each_sg(sg, s, nents, i) {
- if (sg_dma_address(s) != DMA_ERROR_CODE)
+ if (sg_dma_address(s) != IOMMU_MAPPING_ERROR)
s->offset += sg_dma_address(s);
if (sg_dma_len(s))
s->length = sg_dma_len(s);
- sg_dma_address(s) = DMA_ERROR_CODE;
+ sg_dma_address(s) = IOMMU_MAPPING_ERROR;
sg_dma_len(s) = 0;
}
}
@@ -836,7 +838,7 @@ void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
- return dma_addr == DMA_ERROR_CODE;
+ return dma_addr == IOMMU_MAPPING_ERROR;
}
static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 8500deda9175..1e95475883cd 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3981,6 +3981,9 @@ struct dma_map_ops intel_dma_ops = {
.map_page = intel_map_page,
.unmap_page = intel_unmap_page,
.mapping_error = intel_mapping_error,
+#ifdef CONFIG_X86
+ .dma_supported = x86_dma_supported,
+#endif
};
static inline int iommu_domain_cache_init(void)
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 6c2999872090..594b24d410c3 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -590,16 +590,6 @@ config LEDS_KTD2692
Say Y to enable this driver.
-config LEDS_SEAD3
- tristate "LED support for the MIPS SEAD 3 board"
- depends on LEDS_CLASS && MIPS_SEAD3
- help
- Say Y here to include support for the FLED and PLED LEDs on SEAD3 eval
- boards.
-
- This driver can also be built as a module. If so the module
- will be called leds-sead3.
-
config LEDS_IS31FL319X
tristate "LED Support for ISSI IS31FL319x I2C LED controller family"
depends on LEDS_CLASS && I2C && OF
@@ -651,14 +641,6 @@ config LEDS_SYSCON
devices. This will only work with device tree enabled
devices.
-config LEDS_VERSATILE
- tristate "LED support for the ARM Versatile and RealView"
- depends on ARCH_REALVIEW || ARCH_VERSATILE
- depends on LEDS_CLASS
- help
- This option enabled support for the LEDs on the ARM Versatile
- and RealView boards. Say Y to enabled these.
-
config LEDS_PM8058
tristate "LED Support for the Qualcomm PM8058 PMIC"
depends on MFD_PM8XXX
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 45f133962ed8..909dae62ba05 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -62,11 +62,9 @@ obj-$(CONFIG_LEDS_MAX8997) += leds-max8997.o
obj-$(CONFIG_LEDS_LM355x) += leds-lm355x.o
obj-$(CONFIG_LEDS_BLINKM) += leds-blinkm.o
obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o
-obj-$(CONFIG_LEDS_VERSATILE) += leds-versatile.o
obj-$(CONFIG_LEDS_MENF21BMC) += leds-menf21bmc.o
obj-$(CONFIG_LEDS_KTD2692) += leds-ktd2692.o
obj-$(CONFIG_LEDS_POWERNV) += leds-powernv.o
-obj-$(CONFIG_LEDS_SEAD3) += leds-sead3.o
obj-$(CONFIG_LEDS_IS31FL319X) += leds-is31fl319x.o
obj-$(CONFIG_LEDS_IS31FL32XX) += leds-is31fl32xx.o
obj-$(CONFIG_LEDS_PM8058) += leds-pm8058.o
diff --git a/drivers/leds/leds-aat1290.c b/drivers/leds/leds-aat1290.c
index def3cf9f7e92..a21e19297745 100644
--- a/drivers/leds/leds-aat1290.c
+++ b/drivers/leds/leds-aat1290.c
@@ -503,8 +503,9 @@ static int aat1290_led_probe(struct platform_device *pdev)
aat1290_init_v4l2_flash_config(led, &led_cfg, &v4l2_sd_cfg);
/* Create V4L2 Flash subdev. */
- led->v4l2_flash = v4l2_flash_init(dev, sub_node, fled_cdev, NULL,
- &v4l2_flash_ops, &v4l2_sd_cfg);
+ led->v4l2_flash = v4l2_flash_init(dev, of_fwnode_handle(sub_node),
+ fled_cdev, NULL, &v4l2_flash_ops,
+ &v4l2_sd_cfg);
if (IS_ERR(led->v4l2_flash)) {
ret = PTR_ERR(led->v4l2_flash);
goto error_v4l2_flash_init;
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index e9ba8cd32d66..924e50aefb00 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -168,13 +168,13 @@ static int lp5523_post_init_device(struct lp55xx_chip *chip)
static void lp5523_load_engine(struct lp55xx_chip *chip)
{
enum lp55xx_engine_index idx = chip->engine_idx;
- u8 mask[] = {
+ static const u8 mask[] = {
[LP55XX_ENGINE_1] = LP5523_MODE_ENG1_M,
[LP55XX_ENGINE_2] = LP5523_MODE_ENG2_M,
[LP55XX_ENGINE_3] = LP5523_MODE_ENG3_M,
};
- u8 val[] = {
+ static const u8 val[] = {
[LP55XX_ENGINE_1] = LP5523_LOAD_ENG1,
[LP55XX_ENGINE_2] = LP5523_LOAD_ENG2,
[LP55XX_ENGINE_3] = LP5523_LOAD_ENG3,
@@ -188,7 +188,7 @@ static void lp5523_load_engine(struct lp55xx_chip *chip)
static void lp5523_load_engine_and_select_page(struct lp55xx_chip *chip)
{
enum lp55xx_engine_index idx = chip->engine_idx;
- u8 page_sel[] = {
+ static const u8 page_sel[] = {
[LP55XX_ENGINE_1] = LP5523_PAGE_ENG1,
[LP55XX_ENGINE_2] = LP5523_PAGE_ENG2,
[LP55XX_ENGINE_3] = LP5523_PAGE_ENG3,
@@ -208,7 +208,7 @@ static void lp5523_stop_all_engines(struct lp55xx_chip *chip)
static void lp5523_stop_engine(struct lp55xx_chip *chip)
{
enum lp55xx_engine_index idx = chip->engine_idx;
- u8 mask[] = {
+ static const u8 mask[] = {
[LP55XX_ENGINE_1] = LP5523_MODE_ENG1_M,
[LP55XX_ENGINE_2] = LP5523_MODE_ENG2_M,
[LP55XX_ENGINE_3] = LP5523_MODE_ENG3_M,
@@ -505,7 +505,7 @@ static int lp5523_load_mux(struct lp55xx_chip *chip, u16 mux, int nr)
{
struct lp55xx_engine *engine = &chip->engines[nr - 1];
int ret;
- u8 mux_page[] = {
+ static const u8 mux_page[] = {
[LP55XX_ENGINE_1] = LP5523_PAGE_MUX1,
[LP55XX_ENGINE_2] = LP5523_PAGE_MUX2,
[LP55XX_ENGINE_3] = LP5523_PAGE_MUX3,
diff --git a/drivers/leds/leds-max77693.c b/drivers/leds/leds-max77693.c
index 1eb58ef6aefe..2d3062d53325 100644
--- a/drivers/leds/leds-max77693.c
+++ b/drivers/leds/leds-max77693.c
@@ -930,8 +930,9 @@ static int max77693_register_led(struct max77693_sub_led *sub_led,
max77693_init_v4l2_flash_config(sub_led, led_cfg, &v4l2_sd_cfg);
/* Register in the V4L2 subsystem. */
- sub_led->v4l2_flash = v4l2_flash_init(dev, sub_node, fled_cdev, NULL,
- &v4l2_flash_ops, &v4l2_sd_cfg);
+ sub_led->v4l2_flash = v4l2_flash_init(dev, of_fwnode_handle(sub_node),
+ fled_cdev, NULL, &v4l2_flash_ops,
+ &v4l2_sd_cfg);
if (IS_ERR(sub_led->v4l2_flash)) {
ret = PTR_ERR(sub_led->v4l2_flash);
goto err_v4l2_flash_init;
diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c
index ded1e4dac36a..3bf9a1271819 100644
--- a/drivers/leds/leds-pca963x.c
+++ b/drivers/leds/leds-pca963x.c
@@ -342,6 +342,12 @@ pca963x_dt_init(struct i2c_client *client, struct pca963x_chipdef *chip)
if (of_property_read_u32(np, "nxp,period-scale", &chip->scaling))
chip->scaling = 1000;
+ /* default to non-inverted output, unless inverted is specified */
+ if (of_property_read_bool(np, "nxp,inverted-out"))
+ pdata->dir = PCA963X_INVERTED;
+ else
+ pdata->dir = PCA963X_NORMAL;
+
return pdata;
}
@@ -452,11 +458,18 @@ static int pca963x_probe(struct i2c_client *client,
i2c_smbus_write_byte_data(client, PCA963X_MODE1, BIT(4));
if (pdata) {
+ u8 mode2 = i2c_smbus_read_byte_data(pca963x->chip->client,
+ PCA963X_MODE2);
/* Configure output: open-drain or totem pole (push-pull) */
if (pdata->outdrv == PCA963X_OPEN_DRAIN)
- i2c_smbus_write_byte_data(client, PCA963X_MODE2, 0x01);
+ mode2 |= 0x01;
else
- i2c_smbus_write_byte_data(client, PCA963X_MODE2, 0x05);
+ mode2 |= 0x05;
+ /* Configure direction: normal or inverted */
+ if (pdata->dir == PCA963X_INVERTED)
+ mode2 |= 0x10;
+ i2c_smbus_write_byte_data(pca963x->chip->client, PCA963X_MODE2,
+ mode2);
}
return 0;
diff --git a/drivers/leds/leds-sead3.c b/drivers/leds/leds-sead3.c
deleted file mode 100644
index eb97a3271bb3..000000000000
--- a/drivers/leds/leds-sead3.c
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- * Copyright (C) 2015 Imagination Technologies, Inc.
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-#include <linux/err.h>
-#include <linux/io.h>
-
-#include <asm/mips-boards/sead3-addr.h>
-
-static void sead3_pled_set(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- writel(value, (void __iomem *)SEAD3_CPLD_P_LED);
-}
-
-static void sead3_fled_set(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- writel(value, (void __iomem *)SEAD3_CPLD_F_LED);
-}
-
-static struct led_classdev sead3_pled = {
- .name = "sead3::pled",
- .brightness_set = sead3_pled_set,
- .flags = LED_CORE_SUSPENDRESUME,
-};
-
-static struct led_classdev sead3_fled = {
- .name = "sead3::fled",
- .brightness_set = sead3_fled_set,
- .flags = LED_CORE_SUSPENDRESUME,
-};
-
-static int sead3_led_probe(struct platform_device *pdev)
-{
- int ret;
-
- ret = led_classdev_register(&pdev->dev, &sead3_pled);
- if (ret < 0)
- return ret;
-
- ret = led_classdev_register(&pdev->dev, &sead3_fled);
- if (ret < 0)
- led_classdev_unregister(&sead3_pled);
-
- return ret;
-}
-
-static int sead3_led_remove(struct platform_device *pdev)
-{
- led_classdev_unregister(&sead3_pled);
- led_classdev_unregister(&sead3_fled);
-
- return 0;
-}
-
-static struct platform_driver sead3_led_driver = {
- .probe = sead3_led_probe,
- .remove = sead3_led_remove,
- .driver = {
- .name = "sead3-led",
- },
-};
-
-module_platform_driver(sead3_led_driver);
-
-MODULE_AUTHOR("Kristian Kielhofner <[email protected]>");
-MODULE_DESCRIPTION("SEAD3 LED driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-versatile.c b/drivers/leds/leds-versatile.c
deleted file mode 100644
index 80553022d661..000000000000
--- a/drivers/leds/leds-versatile.c
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Driver for the 8 user LEDs found on the RealViews and Versatiles
- * Based on DaVinci's DM365 board code
- *
- * License terms: GNU General Public License (GPL) version 2
- * Author: Linus Walleij <[email protected]>
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/leds.h>
-#include <linux/platform_device.h>
-
-struct versatile_led {
- void __iomem *base;
- struct led_classdev cdev;
- u8 mask;
-};
-
-/*
- * The triggers lines up below will only be used if the
- * LED triggers are compiled in.
- */
-static const struct {
- const char *name;
- const char *trigger;
-} versatile_leds[] = {
- { "versatile:0", "heartbeat", },
- { "versatile:1", "mmc0", },
- { "versatile:2", "cpu0" },
- { "versatile:3", "cpu1" },
- { "versatile:4", "cpu2" },
- { "versatile:5", "cpu3" },
- { "versatile:6", },
- { "versatile:7", },
-};
-
-static void versatile_led_set(struct led_classdev *cdev,
- enum led_brightness b)
-{
- struct versatile_led *led = container_of(cdev,
- struct versatile_led, cdev);
- u32 reg = readl(led->base);
-
- if (b != LED_OFF)
- reg |= led->mask;
- else
- reg &= ~led->mask;
- writel(reg, led->base);
-}
-
-static enum led_brightness versatile_led_get(struct led_classdev *cdev)
-{
- struct versatile_led *led = container_of(cdev,
- struct versatile_led, cdev);
- u32 reg = readl(led->base);
-
- return (reg & led->mask) ? LED_FULL : LED_OFF;
-}
-
-static int versatile_leds_probe(struct platform_device *dev)
-{
- int i;
- struct resource *res;
- void __iomem *base;
-
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&dev->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- /* All off */
- writel(0, base);
- for (i = 0; i < ARRAY_SIZE(versatile_leds); i++) {
- struct versatile_led *led;
-
- led = kzalloc(sizeof(*led), GFP_KERNEL);
- if (!led)
- break;
-
- led->base = base;
- led->cdev.name = versatile_leds[i].name;
- led->cdev.brightness_set = versatile_led_set;
- led->cdev.brightness_get = versatile_led_get;
- led->cdev.default_trigger = versatile_leds[i].trigger;
- led->mask = BIT(i);
-
- if (led_classdev_register(NULL, &led->cdev) < 0) {
- kfree(led);
- break;
- }
- }
-
- return 0;
-}
-
-static struct platform_driver versatile_leds_driver = {
- .driver = {
- .name = "versatile-leds",
- },
- .probe = versatile_leds_probe,
-};
-
-module_platform_driver(versatile_leds_driver);
-
-MODULE_AUTHOR("Linus Walleij <[email protected]>");
-MODULE_DESCRIPTION("ARM Versatile LED driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/trigger/ledtrig-gpio.c b/drivers/leds/trigger/ledtrig-gpio.c
index 51288a45fbcb..8891e88d54dd 100644
--- a/drivers/leds/trigger/ledtrig-gpio.c
+++ b/drivers/leds/trigger/ledtrig-gpio.c
@@ -14,14 +14,12 @@
#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
-#include <linux/workqueue.h>
#include <linux/leds.h>
#include <linux/slab.h>
#include "../leds.h"
struct gpio_trig_data {
struct led_classdev *led;
- struct work_struct work;
unsigned desired_brightness; /* desired brightness when led is on */
unsigned inverted; /* true when gpio is inverted */
@@ -32,22 +30,8 @@ static irqreturn_t gpio_trig_irq(int irq, void *_led)
{
struct led_classdev *led = _led;
struct gpio_trig_data *gpio_data = led->trigger_data;
-
- /* just schedule_work since gpio_get_value can sleep */
- schedule_work(&gpio_data->work);
-
- return IRQ_HANDLED;
-};
-
-static void gpio_trig_work(struct work_struct *work)
-{
- struct gpio_trig_data *gpio_data = container_of(work,
- struct gpio_trig_data, work);
int tmp;
- if (!gpio_data->gpio)
- return;
-
tmp = gpio_get_value_cansleep(gpio_data->gpio);
if (gpio_data->inverted)
tmp = !tmp;
@@ -61,6 +45,8 @@ static void gpio_trig_work(struct work_struct *work)
} else {
led_set_brightness_nosleep(gpio_data->led, LED_OFF);
}
+
+ return IRQ_HANDLED;
}
static ssize_t gpio_trig_brightness_show(struct device *dev,
@@ -120,7 +106,7 @@ static ssize_t gpio_trig_inverted_store(struct device *dev,
gpio_data->inverted = inverted;
/* After inverting, we need to update the LED. */
- schedule_work(&gpio_data->work);
+ gpio_trig_irq(0, led);
return n;
}
@@ -147,7 +133,6 @@ static ssize_t gpio_trig_gpio_store(struct device *dev,
ret = sscanf(buf, "%u", &gpio);
if (ret < 1) {
dev_err(dev, "couldn't read gpio number\n");
- flush_work(&gpio_data->work);
return -EINVAL;
}
@@ -161,8 +146,8 @@ static ssize_t gpio_trig_gpio_store(struct device *dev,
return n;
}
- ret = request_irq(gpio_to_irq(gpio), gpio_trig_irq,
- IRQF_SHARED | IRQF_TRIGGER_RISING
+ ret = request_threaded_irq(gpio_to_irq(gpio), NULL, gpio_trig_irq,
+ IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_RISING
| IRQF_TRIGGER_FALLING, "ledtrig-gpio", led);
if (ret) {
dev_err(dev, "request_irq failed with error %d\n", ret);
@@ -170,6 +155,8 @@ static ssize_t gpio_trig_gpio_store(struct device *dev,
if (gpio_data->gpio != 0)
free_irq(gpio_to_irq(gpio_data->gpio), led);
gpio_data->gpio = gpio;
+ /* After changing the GPIO, we need to update the LED. */
+ gpio_trig_irq(0, led);
}
return ret ? ret : n;
@@ -199,7 +186,6 @@ static void gpio_trig_activate(struct led_classdev *led)
gpio_data->led = led;
led->trigger_data = gpio_data;
- INIT_WORK(&gpio_data->work, gpio_trig_work);
led->activated = true;
return;
@@ -222,7 +208,6 @@ static void gpio_trig_deactivate(struct led_classdev *led)
device_remove_file(led->dev, &dev_attr_gpio);
device_remove_file(led->dev, &dev_attr_inverted);
device_remove_file(led->dev, &dev_attr_desired_brightness);
- flush_work(&gpio_data->work);
if (gpio_data->gpio != 0)
free_irq(gpio_to_irq(gpio_data->gpio), led);
kfree(gpio_data);
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index ee1a3d9147ef..c5731e5e3c6c 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -124,6 +124,14 @@ config MAILBOX_TEST
Test client to help with testing new Controller driver
implementations.
+config QCOM_APCS_IPC
+ tristate "Qualcomm APCS IPC driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ Say y here to enable support for the APCS IPC mailbox driver,
+ providing an interface for invoking the inter-process communication
+ signals from the application processor to other masters.
+
config TEGRA_HSP_MBOX
bool "Tegra HSP (Hardware Synchronization Primitives) Driver"
depends on ARCH_TEGRA_186_SOC
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index e2bcb03cd35b..d54e41206e17 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -32,4 +32,6 @@ obj-$(CONFIG_BCM_PDC_MBOX) += bcm-pdc-mailbox.o
obj-$(CONFIG_BCM_FLEXRM_MBOX) += bcm-flexrm-mailbox.o
+obj-$(CONFIG_QCOM_APCS_IPC) += qcom-apcs-ipc-mailbox.o
+
obj-$(CONFIG_TEGRA_HSP_MBOX) += tegra-hsp.o
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 9dfbf7ea10a2..537f4f6d009b 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -355,11 +355,14 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
spin_unlock_irqrestore(&chan->lock, flags);
- ret = chan->mbox->ops->startup(chan);
- if (ret) {
- dev_err(dev, "Unable to startup the chan (%d)\n", ret);
- mbox_free_channel(chan);
- chan = ERR_PTR(ret);
+ if (chan->mbox->ops->startup) {
+ ret = chan->mbox->ops->startup(chan);
+
+ if (ret) {
+ dev_err(dev, "Unable to startup the chan (%d)\n", ret);
+ mbox_free_channel(chan);
+ chan = ERR_PTR(ret);
+ }
}
mutex_unlock(&con_mutex);
@@ -408,7 +411,8 @@ void mbox_free_channel(struct mbox_chan *chan)
if (!chan || !chan->cl)
return;
- chan->mbox->ops->shutdown(chan);
+ if (chan->mbox->ops->shutdown)
+ chan->mbox->ops->shutdown(chan);
/* The queued TX requests are simply aborted, no callbacks are made */
spin_lock_irqsave(&chan->lock, flags);
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
new file mode 100644
index 000000000000..9924c6d7f05d
--- /dev/null
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2017, Linaro Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/mailbox_controller.h>
+
+#define QCOM_APCS_IPC_BITS 32
+
+struct qcom_apcs_ipc {
+ struct mbox_controller mbox;
+ struct mbox_chan mbox_chans[QCOM_APCS_IPC_BITS];
+
+ void __iomem *reg;
+ unsigned long offset;
+};
+
+static int qcom_apcs_ipc_send_data(struct mbox_chan *chan, void *data)
+{
+ struct qcom_apcs_ipc *apcs = container_of(chan->mbox,
+ struct qcom_apcs_ipc, mbox);
+ unsigned long idx = (unsigned long)chan->con_priv;
+
+ writel(BIT(idx), apcs->reg);
+
+ return 0;
+}
+
+static const struct mbox_chan_ops qcom_apcs_ipc_ops = {
+ .send_data = qcom_apcs_ipc_send_data,
+};
+
+static int qcom_apcs_ipc_probe(struct platform_device *pdev)
+{
+ struct qcom_apcs_ipc *apcs;
+ struct resource *res;
+ unsigned long offset;
+ void __iomem *base;
+ unsigned long i;
+ int ret;
+
+ apcs = devm_kzalloc(&pdev->dev, sizeof(*apcs), GFP_KERNEL);
+ if (!apcs)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ offset = (unsigned long)of_device_get_match_data(&pdev->dev);
+
+ apcs->reg = base + offset;
+
+ /* Initialize channel identifiers */
+ for (i = 0; i < ARRAY_SIZE(apcs->mbox_chans); i++)
+ apcs->mbox_chans[i].con_priv = (void *)i;
+
+ apcs->mbox.dev = &pdev->dev;
+ apcs->mbox.ops = &qcom_apcs_ipc_ops;
+ apcs->mbox.chans = apcs->mbox_chans;
+ apcs->mbox.num_chans = ARRAY_SIZE(apcs->mbox_chans);
+
+ ret = mbox_controller_register(&apcs->mbox);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register APCS IPC controller\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, apcs);
+
+ return 0;
+}
+
+static int qcom_apcs_ipc_remove(struct platform_device *pdev)
+{
+ struct qcom_apcs_ipc *apcs = platform_get_drvdata(pdev);
+
+ mbox_controller_unregister(&apcs->mbox);
+
+ return 0;
+}
+
+/* .data is the offset of the ipc register within the global block */
+static const struct of_device_id qcom_apcs_ipc_of_match[] = {
+ { .compatible = "qcom,msm8916-apcs-kpss-global", .data = (void *)8 },
+ { .compatible = "qcom,msm8996-apcs-hmss-global", .data = (void *)16 },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match);
+
+static struct platform_driver qcom_apcs_ipc_driver = {
+ .probe = qcom_apcs_ipc_probe,
+ .remove = qcom_apcs_ipc_remove,
+ .driver = {
+ .name = "qcom_apcs_ipc",
+ .of_match_table = qcom_apcs_ipc_of_match,
+ },
+};
+
+static int __init qcom_apcs_ipc_init(void)
+{
+ return platform_driver_register(&qcom_apcs_ipc_driver);
+}
+postcore_initcall(qcom_apcs_ipc_init);
+
+static void __exit qcom_apcs_ipc_exit(void)
+{
+ platform_driver_unregister(&qcom_apcs_ipc_driver);
+}
+module_exit(qcom_apcs_ipc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm APCS IPC driver");
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 906103c168ea..4a249ee86364 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -521,6 +521,23 @@ config DM_INTEGRITY
To compile this code as a module, choose M here: the module will
be called dm-integrity.
+config DM_ZONED
+ tristate "Drive-managed zoned block device target support"
+ depends on BLK_DEV_DM
+ depends on BLK_DEV_ZONED
+ ---help---
+ This device-mapper target takes a host-managed or host-aware zoned
+ block device and exposes most of its capacity as a regular block
+ device (drive-managed zoned block device) without any write
+ constraints. This is mainly intended for use with file systems that
+ do not natively support zoned block devices but still want to
+ benefit from the increased capacity offered by SMR disks. Other uses
+ by applications using raw block devices (for example object stores)
+ are also possible.
+
+ To compile this code as a module, choose M here: the module will
+ be called dm-zoned.
+
If unsure, say N.
endif # MD
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 913720bd81c1..786ec9e86d65 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -20,6 +20,7 @@ dm-era-y += dm-era-target.o
dm-verity-y += dm-verity-target.o
md-mod-y += md.o bitmap.o
raid456-y += raid5.o raid5-cache.o raid5-ppl.o
+dm-zoned-y += dm-zoned-target.o dm-zoned-metadata.o dm-zoned-reclaim.o
# Note: link order is important. All raid personalities
# and must come before md.o, as they each initialise
@@ -60,6 +61,7 @@ obj-$(CONFIG_DM_CACHE_SMQ) += dm-cache-smq.o
obj-$(CONFIG_DM_ERA) += dm-era.o
obj-$(CONFIG_DM_LOG_WRITES) += dm-log-writes.o
obj-$(CONFIG_DM_INTEGRITY) += dm-integrity.o
+obj-$(CONFIG_DM_ZONED) += dm-zoned.o
ifeq ($(CONFIG_DM_UEVENT),y)
dm-mod-objs += dm-uevent.o
diff --git a/drivers/md/dm-bio-prison-v1.c b/drivers/md/dm-bio-prison-v1.c
index 82d27384d31f..874841f0fc83 100644
--- a/drivers/md/dm-bio-prison-v1.c
+++ b/drivers/md/dm-bio-prison-v1.c
@@ -116,7 +116,7 @@ static int __bio_detain(struct dm_bio_prison *prison,
while (*new) {
struct dm_bio_prison_cell *cell =
- container_of(*new, struct dm_bio_prison_cell, node);
+ rb_entry(*new, struct dm_bio_prison_cell, node);
r = cmp_keys(key, &cell->key);
diff --git a/drivers/md/dm-bio-prison-v2.c b/drivers/md/dm-bio-prison-v2.c
index c9b11f799cd8..8ce3a1a588cf 100644
--- a/drivers/md/dm-bio-prison-v2.c
+++ b/drivers/md/dm-bio-prison-v2.c
@@ -120,7 +120,7 @@ static bool __find_or_insert(struct dm_bio_prison_v2 *prison,
while (*new) {
struct dm_bio_prison_cell_v2 *cell =
- container_of(*new, struct dm_bio_prison_cell_v2, node);
+ rb_entry(*new, struct dm_bio_prison_cell_v2, node);
r = cmp_keys(key, &cell->key);
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 52ca8d059e82..24eddbdf2ab4 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -147,4 +147,7 @@ static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen
return !maxlen || strlen(result) + 1 >= maxlen;
}
+extern atomic_t dm_global_event_nr;
+extern wait_queue_head_t dm_global_eventq;
+
#endif
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 9e1b72e8f7ef..cdf6b1e12460 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -246,6 +246,9 @@ static struct crypto_aead *any_tfm_aead(struct crypt_config *cc)
* plain64: the initial vector is the 64-bit little-endian version of the sector
* number, padded with zeros if necessary.
*
+ * plain64be: the initial vector is the 64-bit big-endian version of the sector
+ * number, padded with zeros if necessary.
+ *
* essiv: "encrypted sector|salt initial vector", the sector number is
* encrypted with the bulk cipher using a salt as key. The salt
* should be derived from the bulk cipher's key via hashing.
@@ -302,6 +305,16 @@ static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
return 0;
}
+static int crypt_iv_plain64be_gen(struct crypt_config *cc, u8 *iv,
+ struct dm_crypt_request *dmreq)
+{
+ memset(iv, 0, cc->iv_size);
+ /* iv_size is at least of size u64; usually it is 16 bytes */
+ *(__be64 *)&iv[cc->iv_size - sizeof(u64)] = cpu_to_be64(dmreq->iv_sector);
+
+ return 0;
+}
+
/* Initialise ESSIV - compute salt but no local memory allocations */
static int crypt_iv_essiv_init(struct crypt_config *cc)
{
@@ -835,6 +848,10 @@ static const struct crypt_iv_operations crypt_iv_plain64_ops = {
.generator = crypt_iv_plain64_gen
};
+static const struct crypt_iv_operations crypt_iv_plain64be_ops = {
+ .generator = crypt_iv_plain64be_gen
+};
+
static const struct crypt_iv_operations crypt_iv_essiv_ops = {
.ctr = crypt_iv_essiv_ctr,
.dtr = crypt_iv_essiv_dtr,
@@ -2208,6 +2225,8 @@ static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
cc->iv_gen_ops = &crypt_iv_plain_ops;
else if (strcmp(ivmode, "plain64") == 0)
cc->iv_gen_ops = &crypt_iv_plain64_ops;
+ else if (strcmp(ivmode, "plain64be") == 0)
+ cc->iv_gen_ops = &crypt_iv_plain64be_ops;
else if (strcmp(ivmode, "essiv") == 0)
cc->iv_gen_ops = &crypt_iv_essiv_ops;
else if (strcmp(ivmode, "benbi") == 0)
@@ -2987,7 +3006,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 17, 0},
+ .version = {1, 18, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 3d04d5ce19d9..e2c7234931bc 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -275,7 +275,7 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
struct flakey_c *fc = ti->private;
bio->bi_bdev = fc->dev->bdev;
- if (bio_sectors(bio))
+ if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)
bio->bi_iter.bi_sector =
flakey_map_sector(ti, bio->bi_iter.bi_sector);
}
@@ -306,6 +306,14 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
pb->bio_submitted = false;
+ /* Do not fail reset zone */
+ if (bio_op(bio) == REQ_OP_ZONE_RESET)
+ goto map_bio;
+
+ /* We need to remap reported zones, so remember the BIO iter */
+ if (bio_op(bio) == REQ_OP_ZONE_REPORT)
+ goto map_bio;
+
/* Are we alive ? */
elapsed = (jiffies - fc->start_time) / HZ;
if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
@@ -359,11 +367,19 @@ map_bio:
}
static int flakey_end_io(struct dm_target *ti, struct bio *bio,
- blk_status_t *error)
+ blk_status_t *error)
{
struct flakey_c *fc = ti->private;
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
+ if (bio_op(bio) == REQ_OP_ZONE_RESET)
+ return DM_ENDIO_DONE;
+
+ if (bio_op(bio) == REQ_OP_ZONE_REPORT) {
+ dm_remap_zone_report(ti, bio, fc->start);
+ return DM_ENDIO_DONE;
+ }
+
if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
all_corrupt_bio_flags_match(bio, fc)) {
@@ -446,7 +462,8 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
static struct target_type flakey_target = {
.name = "flakey",
- .version = {1, 4, 0},
+ .version = {1, 5, 0},
+ .features = DM_TARGET_ZONED_HM,
.module = THIS_MODULE,
.ctr = flakey_ctr,
.dtr = flakey_dtr,
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 41852ae287a5..e06f0ef7d2ec 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -23,6 +23,14 @@
#define DM_MSG_PREFIX "ioctl"
#define DM_DRIVER_EMAIL "[email protected]"
+struct dm_file {
+ /*
+ * poll will wait until the global event number is greater than
+ * this value.
+ */
+ volatile unsigned global_event_nr;
+};
+
/*-----------------------------------------------------------------
* The ioctl interface needs to be able to look up devices by
* name or uuid.
@@ -456,9 +464,9 @@ void dm_deferred_remove(void)
* All the ioctl commands get dispatched to functions with this
* prototype.
*/
-typedef int (*ioctl_fn)(struct dm_ioctl *param, size_t param_size);
+typedef int (*ioctl_fn)(struct file *filp, struct dm_ioctl *param, size_t param_size);
-static int remove_all(struct dm_ioctl *param, size_t param_size)
+static int remove_all(struct file *filp, struct dm_ioctl *param, size_t param_size)
{
dm_hash_remove_all(true, !!(param->flags & DM_DEFERRED_REMOVE), false);
param->data_size = 0;
@@ -491,13 +499,14 @@ static void *get_result_buffer(struct dm_ioctl *param, size_t param_size,
return ((void *) param) + param->data_start;
}
-static int list_devices(struct dm_ioctl *param, size_t param_size)
+static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_size)
{
unsigned int i;
struct hash_cell *hc;
size_t len, needed = 0;
struct gendisk *disk;
struct dm_name_list *nl, *old_nl = NULL;
+ uint32_t *event_nr;
down_write(&_hash_lock);
@@ -510,6 +519,7 @@ static int list_devices(struct dm_ioctl *param, size_t param_size)
needed += sizeof(struct dm_name_list);
needed += strlen(hc->name) + 1;
needed += ALIGN_MASK;
+ needed += (sizeof(uint32_t) + ALIGN_MASK) & ~ALIGN_MASK;
}
}
@@ -539,7 +549,9 @@ static int list_devices(struct dm_ioctl *param, size_t param_size)
strcpy(nl->name, hc->name);
old_nl = nl;
- nl = align_ptr(((void *) ++nl) + strlen(hc->name) + 1);
+ event_nr = align_ptr(((void *) (nl + 1)) + strlen(hc->name) + 1);
+ *event_nr = dm_get_event_nr(hc->md);
+ nl = align_ptr(event_nr + 1);
}
}
@@ -582,7 +594,7 @@ static void list_version_get_info(struct target_type *tt, void *param)
info->vers = align_ptr(((void *) ++info->vers) + strlen(tt->name) + 1);
}
-static int list_versions(struct dm_ioctl *param, size_t param_size)
+static int list_versions(struct file *filp, struct dm_ioctl *param, size_t param_size)
{
size_t len, needed = 0;
struct dm_target_versions *vers;
@@ -724,7 +736,7 @@ static void __dev_status(struct mapped_device *md, struct dm_ioctl *param)
}
}
-static int dev_create(struct dm_ioctl *param, size_t param_size)
+static int dev_create(struct file *filp, struct dm_ioctl *param, size_t param_size)
{
int r, m = DM_ANY_MINOR;
struct mapped_device *md;
@@ -816,7 +828,7 @@ static struct mapped_device *find_device(struct dm_ioctl *param)
return md;
}
-static int dev_remove(struct dm_ioctl *param, size_t param_size)
+static int dev_remove(struct file *filp, struct dm_ioctl *param, size_t param_size)
{
struct hash_cell *hc;
struct mapped_device *md;
@@ -881,7 +893,7 @@ static int invalid_str(char *str, void *end)
return -EINVAL;
}
-static int dev_rename(struct dm_ioctl *param, size_t param_size)
+static int dev_rename(struct file *filp, struct dm_ioctl *param, size_t param_size)
{
int r;
char *new_data = (char *) param + param->data_start;
@@ -911,7 +923,7 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
return 0;
}
-static int dev_set_geometry(struct dm_ioctl *param, size_t param_size)
+static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t param_size)
{
int r = -EINVAL, x;
struct mapped_device *md;
@@ -1060,7 +1072,7 @@ static int do_resume(struct dm_ioctl *param)
* Set or unset the suspension state of a device.
* If the device already is in the requested state we just return its status.
*/
-static int dev_suspend(struct dm_ioctl *param, size_t param_size)
+static int dev_suspend(struct file *filp, struct dm_ioctl *param, size_t param_size)
{
if (param->flags & DM_SUSPEND_FLAG)
return do_suspend(param);
@@ -1072,7 +1084,7 @@ static int dev_suspend(struct dm_ioctl *param, size_t param_size)
* Copies device info back to user space, used by
* the create and info ioctls.
*/
-static int dev_status(struct dm_ioctl *param, size_t param_size)
+static int dev_status(struct file *filp, struct dm_ioctl *param, size_t param_size)
{
struct mapped_device *md;
@@ -1163,7 +1175,7 @@ static void retrieve_status(struct dm_table *table,
/*
* Wait for a device to report an event
*/
-static int dev_wait(struct dm_ioctl *param, size_t param_size)
+static int dev_wait(struct file *filp, struct dm_ioctl *param, size_t param_size)
{
int r = 0;
struct mapped_device *md;
@@ -1200,6 +1212,19 @@ out:
return r;
}
+/*
+ * Remember the global event number and make it possible to poll
+ * for further events.
+ */
+static int dev_arm_poll(struct file *filp, struct dm_ioctl *param, size_t param_size)
+{
+ struct dm_file *priv = filp->private_data;
+
+ priv->global_event_nr = atomic_read(&dm_global_event_nr);
+
+ return 0;
+}
+
static inline fmode_t get_mode(struct dm_ioctl *param)
{
fmode_t mode = FMODE_READ | FMODE_WRITE;
@@ -1269,7 +1294,7 @@ static bool is_valid_type(enum dm_queue_mode cur, enum dm_queue_mode new)
return false;
}
-static int table_load(struct dm_ioctl *param, size_t param_size)
+static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_size)
{
int r;
struct hash_cell *hc;
@@ -1356,7 +1381,7 @@ err:
return r;
}
-static int table_clear(struct dm_ioctl *param, size_t param_size)
+static int table_clear(struct file *filp, struct dm_ioctl *param, size_t param_size)
{
struct hash_cell *hc;
struct mapped_device *md;
@@ -1430,7 +1455,7 @@ static void retrieve_deps(struct dm_table *table,
param->data_size = param->data_start + needed;
}
-static int table_deps(struct dm_ioctl *param, size_t param_size)
+static int table_deps(struct file *filp, struct dm_ioctl *param, size_t param_size)
{
struct mapped_device *md;
struct dm_table *table;
@@ -1456,7 +1481,7 @@ static int table_deps(struct dm_ioctl *param, size_t param_size)
* Return the status of a device as a text string for each
* target.
*/
-static int table_status(struct dm_ioctl *param, size_t param_size)
+static int table_status(struct file *filp, struct dm_ioctl *param, size_t param_size)
{
struct mapped_device *md;
struct dm_table *table;
@@ -1511,7 +1536,7 @@ static int message_for_md(struct mapped_device *md, unsigned argc, char **argv,
/*
* Pass a message to the target that's at the supplied device offset.
*/
-static int target_message(struct dm_ioctl *param, size_t param_size)
+static int target_message(struct file *filp, struct dm_ioctl *param, size_t param_size)
{
int r, argc;
char **argv;
@@ -1628,7 +1653,8 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
{DM_LIST_VERSIONS_CMD, 0, list_versions},
{DM_TARGET_MSG_CMD, 0, target_message},
- {DM_DEV_SET_GEOMETRY_CMD, 0, dev_set_geometry}
+ {DM_DEV_SET_GEOMETRY_CMD, 0, dev_set_geometry},
+ {DM_DEV_ARM_POLL, IOCTL_FLAGS_NO_PARAMS, dev_arm_poll},
};
if (unlikely(cmd >= ARRAY_SIZE(_ioctls)))
@@ -1783,7 +1809,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
return 0;
}
-static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
+static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *user)
{
int r = 0;
int ioctl_flags;
@@ -1837,7 +1863,7 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
goto out;
param->data_size = offsetof(struct dm_ioctl, data);
- r = fn(param, input_param_size);
+ r = fn(file, param, input_param_size);
if (unlikely(param->flags & DM_BUFFER_FULL_FLAG) &&
unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS))
@@ -1856,7 +1882,7 @@ out:
static long dm_ctl_ioctl(struct file *file, uint command, ulong u)
{
- return (long)ctl_ioctl(command, (struct dm_ioctl __user *)u);
+ return (long)ctl_ioctl(file, command, (struct dm_ioctl __user *)u);
}
#ifdef CONFIG_COMPAT
@@ -1868,8 +1894,47 @@ static long dm_compat_ctl_ioctl(struct file *file, uint command, ulong u)
#define dm_compat_ctl_ioctl NULL
#endif
+static int dm_open(struct inode *inode, struct file *filp)
+{
+ int r;
+ struct dm_file *priv;
+
+ r = nonseekable_open(inode, filp);
+ if (unlikely(r))
+ return r;
+
+ priv = filp->private_data = kmalloc(sizeof(struct dm_file), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->global_event_nr = atomic_read(&dm_global_event_nr);
+
+ return 0;
+}
+
+static int dm_release(struct inode *inode, struct file *filp)
+{
+ kfree(filp->private_data);
+ return 0;
+}
+
+static unsigned dm_poll(struct file *filp, poll_table *wait)
+{
+ struct dm_file *priv = filp->private_data;
+ unsigned mask = 0;
+
+ poll_wait(filp, &dm_global_eventq, wait);
+
+ if ((int)(atomic_read(&dm_global_event_nr) - priv->global_event_nr) > 0)
+ mask |= POLLIN;
+
+ return mask;
+}
+
static const struct file_operations _ctl_fops = {
- .open = nonseekable_open,
+ .open = dm_open,
+ .release = dm_release,
+ .poll = dm_poll,
.unlocked_ioctl = dm_ctl_ioctl,
.compat_ioctl = dm_compat_ctl_ioctl,
.owner = THIS_MODULE,
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index f85846741d50..cf2c67e35eaf 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -356,6 +356,7 @@ struct kcopyd_job {
struct mutex lock;
atomic_t sub_jobs;
sector_t progress;
+ sector_t write_offset;
struct kcopyd_job *master_job;
};
@@ -386,6 +387,31 @@ void dm_kcopyd_exit(void)
* Functions to push and pop a job onto the head of a given job
* list.
*/
+static struct kcopyd_job *pop_io_job(struct list_head *jobs,
+ struct dm_kcopyd_client *kc)
+{
+ struct kcopyd_job *job;
+
+ /*
+ * For I/O jobs, pop any read, any write without sequential write
+ * constraint and sequential writes that are at the right position.
+ */
+ list_for_each_entry(job, jobs, list) {
+ if (job->rw == READ || !test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags)) {
+ list_del(&job->list);
+ return job;
+ }
+
+ if (job->write_offset == job->master_job->write_offset) {
+ job->master_job->write_offset += job->source.count;
+ list_del(&job->list);
+ return job;
+ }
+ }
+
+ return NULL;
+}
+
static struct kcopyd_job *pop(struct list_head *jobs,
struct dm_kcopyd_client *kc)
{
@@ -395,8 +421,12 @@ static struct kcopyd_job *pop(struct list_head *jobs,
spin_lock_irqsave(&kc->job_lock, flags);
if (!list_empty(jobs)) {
- job = list_entry(jobs->next, struct kcopyd_job, list);
- list_del(&job->list);
+ if (jobs == &kc->io_jobs)
+ job = pop_io_job(jobs, kc);
+ else {
+ job = list_entry(jobs->next, struct kcopyd_job, list);
+ list_del(&job->list);
+ }
}
spin_unlock_irqrestore(&kc->job_lock, flags);
@@ -506,6 +536,14 @@ static int run_io_job(struct kcopyd_job *job)
.client = job->kc->io_client,
};
+ /*
+ * If we need to write sequentially and some reads or writes failed,
+ * no point in continuing.
+ */
+ if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
+ job->master_job->write_err)
+ return -EIO;
+
io_job_start(job->kc->throttle);
if (job->rw == READ)
@@ -655,6 +693,7 @@ static void segment_complete(int read_err, unsigned long write_err,
int i;
*sub_job = *job;
+ sub_job->write_offset = progress;
sub_job->source.sector += progress;
sub_job->source.count = count;
@@ -723,6 +762,27 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
job->num_dests = num_dests;
memcpy(&job->dests, dests, sizeof(*dests) * num_dests);
+ /*
+ * If one of the destination is a host-managed zoned block device,
+ * we need to write sequentially. If one of the destination is a
+ * host-aware device, then leave it to the caller to choose what to do.
+ */
+ if (!test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags)) {
+ for (i = 0; i < job->num_dests; i++) {
+ if (bdev_zoned_model(dests[i].bdev) == BLK_ZONED_HM) {
+ set_bit(DM_KCOPYD_WRITE_SEQ, &job->flags);
+ break;
+ }
+ }
+ }
+
+ /*
+ * If we need to write sequentially, errors cannot be ignored.
+ */
+ if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
+ test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags))
+ clear_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags);
+
if (from) {
job->source = *from;
job->pages = NULL;
@@ -746,6 +806,7 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
job->fn = fn;
job->context = context;
job->master_job = job;
+ job->write_offset = 0;
if (job->source.count <= SUB_JOB_SIZE)
dispatch_job(job);
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 7d42a9d9f406..41971a090e34 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -89,7 +89,7 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
struct linear_c *lc = ti->private;
bio->bi_bdev = lc->dev->bdev;
- if (bio_sectors(bio))
+ if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)
bio->bi_iter.bi_sector =
linear_map_sector(ti, bio->bi_iter.bi_sector);
}
@@ -101,6 +101,17 @@ static int linear_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
+static int linear_end_io(struct dm_target *ti, struct bio *bio,
+ blk_status_t *error)
+{
+ struct linear_c *lc = ti->private;
+
+ if (!*error && bio_op(bio) == REQ_OP_ZONE_REPORT)
+ dm_remap_zone_report(ti, bio, lc->start);
+
+ return DM_ENDIO_DONE;
+}
+
static void linear_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
{
@@ -159,18 +170,49 @@ static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
}
+static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i)
+{
+ struct linear_c *lc = ti->private;
+ struct block_device *bdev = lc->dev->bdev;
+ struct dax_device *dax_dev = lc->dev->dax_dev;
+ sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
+
+ dev_sector = linear_map_sector(ti, sector);
+ if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
+ return 0;
+ return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
+}
+
+static void linear_dax_flush(struct dm_target *ti, pgoff_t pgoff, void *addr,
+ size_t size)
+{
+ struct linear_c *lc = ti->private;
+ struct block_device *bdev = lc->dev->bdev;
+ struct dax_device *dax_dev = lc->dev->dax_dev;
+ sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
+
+ dev_sector = linear_map_sector(ti, sector);
+ if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(size, PAGE_SIZE), &pgoff))
+ return;
+ dax_flush(dax_dev, pgoff, addr, size);
+}
+
static struct target_type linear_target = {
.name = "linear",
- .version = {1, 3, 0},
- .features = DM_TARGET_PASSES_INTEGRITY,
+ .version = {1, 4, 0},
+ .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
.module = THIS_MODULE,
.ctr = linear_ctr,
.dtr = linear_dtr,
.map = linear_map,
+ .end_io = linear_end_io,
.status = linear_status,
.prepare_ioctl = linear_prepare_ioctl,
.iterate_devices = linear_iterate_devices,
.direct_access = linear_dax_direct_access,
+ .dax_copy_from_iter = linear_dax_copy_from_iter,
+ .dax_flush = linear_dax_flush,
};
int __init dm_linear_init(void)
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index b4b75dad816a..2e10c2f13a34 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1571,7 +1571,7 @@ static sector_t __rdev_sectors(struct raid_set *rs)
return rdev->sectors;
}
- BUG(); /* Constructor ensures we got some. */
+ return 0;
}
/* Calculate the sectors per device and per array used for @rs */
@@ -2941,7 +2941,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
bool resize;
struct raid_type *rt;
unsigned int num_raid_params, num_raid_devs;
- sector_t calculated_dev_sectors;
+ sector_t calculated_dev_sectors, rdev_sectors;
struct raid_set *rs = NULL;
const char *arg;
struct rs_layout rs_layout;
@@ -3017,7 +3017,14 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (r)
goto bad;
- resize = calculated_dev_sectors != __rdev_sectors(rs);
+ rdev_sectors = __rdev_sectors(rs);
+ if (!rdev_sectors) {
+ ti->error = "Invalid rdev size";
+ r = -EINVAL;
+ goto bad;
+ }
+
+ resize = calculated_dev_sectors != rdev_sectors;
INIT_WORK(&rs->md.event_work, do_table_event);
ti->private = rs;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 11621a0af887..a0375530b07f 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -332,6 +332,44 @@ static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
}
+static size_t stripe_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i)
+{
+ sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
+ struct stripe_c *sc = ti->private;
+ struct dax_device *dax_dev;
+ struct block_device *bdev;
+ uint32_t stripe;
+
+ stripe_map_sector(sc, sector, &stripe, &dev_sector);
+ dev_sector += sc->stripe[stripe].physical_start;
+ dax_dev = sc->stripe[stripe].dev->dax_dev;
+ bdev = sc->stripe[stripe].dev->bdev;
+
+ if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
+ return 0;
+ return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
+}
+
+static void stripe_dax_flush(struct dm_target *ti, pgoff_t pgoff, void *addr,
+ size_t size)
+{
+ sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
+ struct stripe_c *sc = ti->private;
+ struct dax_device *dax_dev;
+ struct block_device *bdev;
+ uint32_t stripe;
+
+ stripe_map_sector(sc, sector, &stripe, &dev_sector);
+ dev_sector += sc->stripe[stripe].physical_start;
+ dax_dev = sc->stripe[stripe].dev->dax_dev;
+ bdev = sc->stripe[stripe].dev->bdev;
+
+ if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(size, PAGE_SIZE), &pgoff))
+ return;
+ dax_flush(dax_dev, pgoff, addr, size);
+}
+
/*
* Stripe status:
*
@@ -452,6 +490,8 @@ static struct target_type stripe_target = {
.iterate_devices = stripe_iterate_devices,
.io_hints = stripe_io_hints,
.direct_access = stripe_dax_direct_access,
+ .dax_copy_from_iter = stripe_dax_copy_from_iter,
+ .dax_flush = stripe_dax_flush,
};
int __init dm_stripe_init(void)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 5f5eae41f804..a39bcd9b982a 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -319,6 +319,39 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
return 1;
}
+ /*
+ * If the target is mapped to zoned block device(s), check
+ * that the zones are not partially mapped.
+ */
+ if (bdev_zoned_model(bdev) != BLK_ZONED_NONE) {
+ unsigned int zone_sectors = bdev_zone_sectors(bdev);
+
+ if (start & (zone_sectors - 1)) {
+ DMWARN("%s: start=%llu not aligned to h/w zone size %u of %s",
+ dm_device_name(ti->table->md),
+ (unsigned long long)start,
+ zone_sectors, bdevname(bdev, b));
+ return 1;
+ }
+
+ /*
+ * Note: The last zone of a zoned block device may be smaller
+ * than other zones. So for a target mapping the end of a
+ * zoned block device with such a zone, len would not be zone
+ * aligned. We do not allow such last smaller zone to be part
+ * of the mapping here to ensure that mappings with multiple
+ * devices do not end up with a smaller zone in the middle of
+ * the sector range.
+ */
+ if (len & (zone_sectors - 1)) {
+ DMWARN("%s: len=%llu not aligned to h/w zone size %u of %s",
+ dm_device_name(ti->table->md),
+ (unsigned long long)len,
+ zone_sectors, bdevname(bdev, b));
+ return 1;
+ }
+ }
+
if (logical_block_size_sectors <= 1)
return 0;
@@ -456,6 +489,8 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
q->limits.alignment_offset,
(unsigned long long) start << SECTOR_SHIFT);
+ limits->zoned = blk_queue_zoned_model(q);
+
return 0;
}
@@ -1346,6 +1381,88 @@ bool dm_table_has_no_data_devices(struct dm_table *table)
return true;
}
+static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+ enum blk_zoned_model *zoned_model = data;
+
+ return q && blk_queue_zoned_model(q) == *zoned_model;
+}
+
+static bool dm_table_supports_zoned_model(struct dm_table *t,
+ enum blk_zoned_model zoned_model)
+{
+ struct dm_target *ti;
+ unsigned i;
+
+ for (i = 0; i < dm_table_get_num_targets(t); i++) {
+ ti = dm_table_get_target(t, i);
+
+ if (zoned_model == BLK_ZONED_HM &&
+ !dm_target_supports_zoned_hm(ti->type))
+ return false;
+
+ if (!ti->type->iterate_devices ||
+ !ti->type->iterate_devices(ti, device_is_zoned_model, &zoned_model))
+ return false;
+ }
+
+ return true;
+}
+
+static int device_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+ unsigned int *zone_sectors = data;
+
+ return q && blk_queue_zone_sectors(q) == *zone_sectors;
+}
+
+static bool dm_table_matches_zone_sectors(struct dm_table *t,
+ unsigned int zone_sectors)
+{
+ struct dm_target *ti;
+ unsigned i;
+
+ for (i = 0; i < dm_table_get_num_targets(t); i++) {
+ ti = dm_table_get_target(t, i);
+
+ if (!ti->type->iterate_devices ||
+ !ti->type->iterate_devices(ti, device_matches_zone_sectors, &zone_sectors))
+ return false;
+ }
+
+ return true;
+}
+
+static int validate_hardware_zoned_model(struct dm_table *table,
+ enum blk_zoned_model zoned_model,
+ unsigned int zone_sectors)
+{
+ if (zoned_model == BLK_ZONED_NONE)
+ return 0;
+
+ if (!dm_table_supports_zoned_model(table, zoned_model)) {
+ DMERR("%s: zoned model is not consistent across all devices",
+ dm_device_name(table->md));
+ return -EINVAL;
+ }
+
+ /* Check zone size validity and compatibility */
+ if (!zone_sectors || !is_power_of_2(zone_sectors))
+ return -EINVAL;
+
+ if (!dm_table_matches_zone_sectors(table, zone_sectors)) {
+ DMERR("%s: zone sectors is not consistent across all devices",
+ dm_device_name(table->md));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/*
* Establish the new table's queue_limits and validate them.
*/
@@ -1355,6 +1472,8 @@ int dm_calculate_queue_limits(struct dm_table *table,
struct dm_target *ti;
struct queue_limits ti_limits;
unsigned i;
+ enum blk_zoned_model zoned_model = BLK_ZONED_NONE;
+ unsigned int zone_sectors = 0;
blk_set_stacking_limits(limits);
@@ -1372,6 +1491,15 @@ int dm_calculate_queue_limits(struct dm_table *table,
ti->type->iterate_devices(ti, dm_set_device_limits,
&ti_limits);
+ if (zoned_model == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) {
+ /*
+ * After stacking all limits, validate all devices
+ * in table support this zoned model and zone sectors.
+ */
+ zoned_model = ti_limits.zoned;
+ zone_sectors = ti_limits.chunk_sectors;
+ }
+
/* Set I/O hints portion of queue limits */
if (ti->type->io_hints)
ti->type->io_hints(ti, &ti_limits);
@@ -1396,8 +1524,42 @@ combine_limits:
dm_device_name(table->md),
(unsigned long long) ti->begin,
(unsigned long long) ti->len);
+
+ /*
+ * FIXME: this should likely be moved to blk_stack_limits(), would
+ * also eliminate limits->zoned stacking hack in dm_set_device_limits()
+ */
+ if (limits->zoned == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) {
+ /*
+ * By default, the stacked limits zoned model is set to
+ * BLK_ZONED_NONE in blk_set_stacking_limits(). Update
+ * this model using the first target model reported
+ * that is not BLK_ZONED_NONE. This will be either the
+ * first target device zoned model or the model reported
+ * by the target .io_hints.
+ */
+ limits->zoned = ti_limits.zoned;
+ }
}
+ /*
+ * Verify that the zoned model and zone sectors, as determined before
+ * any .io_hints override, are the same across all devices in the table.
+ * - this is especially relevant if .io_hints is emulating a disk-managed
+ * zoned model (aka BLK_ZONED_NONE) on host-managed zoned block devices.
+ * BUT...
+ */
+ if (limits->zoned != BLK_ZONED_NONE) {
+ /*
+ * ...IF the above limits stacking determined a zoned model
+ * validate that all of the table's devices conform to it.
+ */
+ zoned_model = limits->zoned;
+ zone_sectors = limits->chunk_sectors;
+ }
+ if (validate_hardware_zoned_model(table, zoned_model, zone_sectors))
+ return -EINVAL;
+
return validate_hardware_logical_block_alignment(table, limits);
}
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
new file mode 100644
index 000000000000..884ff7c170a0
--- /dev/null
+++ b/drivers/md/dm-zoned-metadata.c
@@ -0,0 +1,2509 @@
+/*
+ * Copyright (C) 2017 Western Digital Corporation or its affiliates.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-zoned.h"
+
+#include <linux/module.h>
+#include <linux/crc32.h>
+
+#define DM_MSG_PREFIX "zoned metadata"
+
+/*
+ * Metadata version.
+ */
+#define DMZ_META_VER 1
+
+/*
+ * On-disk super block magic.
+ */
+#define DMZ_MAGIC ((((unsigned int)('D')) << 24) | \
+ (((unsigned int)('Z')) << 16) | \
+ (((unsigned int)('B')) << 8) | \
+ ((unsigned int)('D')))
+
+/*
+ * On disk super block.
+ * This uses only 512 B but uses on disk a full 4KB block. This block is
+ * followed on disk by the mapping table of chunks to zones and the bitmap
+ * blocks indicating zone block validity.
+ * The overall resulting metadata format is:
+ * (1) Super block (1 block)
+ * (2) Chunk mapping table (nr_map_blocks)
+ * (3) Bitmap blocks (nr_bitmap_blocks)
+ * All metadata blocks are stored in conventional zones, starting from the
+ * the first conventional zone found on disk.
+ */
+struct dmz_super {
+ /* Magic number */
+ __le32 magic; /* 4 */
+
+ /* Metadata version number */
+ __le32 version; /* 8 */
+
+ /* Generation number */
+ __le64 gen; /* 16 */
+
+ /* This block number */
+ __le64 sb_block; /* 24 */
+
+ /* The number of metadata blocks, including this super block */
+ __le32 nr_meta_blocks; /* 28 */
+
+ /* The number of sequential zones reserved for reclaim */
+ __le32 nr_reserved_seq; /* 32 */
+
+ /* The number of entries in the mapping table */
+ __le32 nr_chunks; /* 36 */
+
+ /* The number of blocks used for the chunk mapping table */
+ __le32 nr_map_blocks; /* 40 */
+
+ /* The number of blocks used for the block bitmaps */
+ __le32 nr_bitmap_blocks; /* 44 */
+
+ /* Checksum */
+ __le32 crc; /* 48 */
+
+ /* Padding to full 512B sector */
+ u8 reserved[464]; /* 512 */
+};
+
+/*
+ * Chunk mapping entry: entries are indexed by chunk number
+ * and give the zone ID (dzone_id) mapping the chunk on disk.
+ * This zone may be sequential or random. If it is a sequential
+ * zone, a second zone (bzone_id) used as a write buffer may
+ * also be specified. This second zone will always be a randomly
+ * writeable zone.
+ */
+struct dmz_map {
+ __le32 dzone_id;
+ __le32 bzone_id;
+};
+
+/*
+ * Chunk mapping table metadata: 512 8-bytes entries per 4KB block.
+ */
+#define DMZ_MAP_ENTRIES (DMZ_BLOCK_SIZE / sizeof(struct dmz_map))
+#define DMZ_MAP_ENTRIES_SHIFT (ilog2(DMZ_MAP_ENTRIES))
+#define DMZ_MAP_ENTRIES_MASK (DMZ_MAP_ENTRIES - 1)
+#define DMZ_MAP_UNMAPPED UINT_MAX
+
+/*
+ * Meta data block descriptor (for cached metadata blocks).
+ */
+struct dmz_mblock {
+ struct rb_node node;
+ struct list_head link;
+ sector_t no;
+ atomic_t ref;
+ unsigned long state;
+ struct page *page;
+ void *data;
+};
+
+/*
+ * Metadata block state flags.
+ */
+enum {
+ DMZ_META_DIRTY,
+ DMZ_META_READING,
+ DMZ_META_WRITING,
+ DMZ_META_ERROR,
+};
+
+/*
+ * Super block information (one per metadata set).
+ */
+struct dmz_sb {
+ sector_t block;
+ struct dmz_mblock *mblk;
+ struct dmz_super *sb;
+};
+
+/*
+ * In-memory metadata.
+ */
+struct dmz_metadata {
+ struct dmz_dev *dev;
+
+ sector_t zone_bitmap_size;
+ unsigned int zone_nr_bitmap_blocks;
+
+ unsigned int nr_bitmap_blocks;
+ unsigned int nr_map_blocks;
+
+ unsigned int nr_useable_zones;
+ unsigned int nr_meta_blocks;
+ unsigned int nr_meta_zones;
+ unsigned int nr_data_zones;
+ unsigned int nr_rnd_zones;
+ unsigned int nr_reserved_seq;
+ unsigned int nr_chunks;
+
+ /* Zone information array */
+ struct dm_zone *zones;
+
+ struct dm_zone *sb_zone;
+ struct dmz_sb sb[2];
+ unsigned int mblk_primary;
+ u64 sb_gen;
+ unsigned int min_nr_mblks;
+ unsigned int max_nr_mblks;
+ atomic_t nr_mblks;
+ struct rw_semaphore mblk_sem;
+ struct mutex mblk_flush_lock;
+ spinlock_t mblk_lock;
+ struct rb_root mblk_rbtree;
+ struct list_head mblk_lru_list;
+ struct list_head mblk_dirty_list;
+ struct shrinker mblk_shrinker;
+
+ /* Zone allocation management */
+ struct mutex map_lock;
+ struct dmz_mblock **map_mblk;
+ unsigned int nr_rnd;
+ atomic_t unmap_nr_rnd;
+ struct list_head unmap_rnd_list;
+ struct list_head map_rnd_list;
+
+ unsigned int nr_seq;
+ atomic_t unmap_nr_seq;
+ struct list_head unmap_seq_list;
+ struct list_head map_seq_list;
+
+ atomic_t nr_reserved_seq_zones;
+ struct list_head reserved_seq_zones_list;
+
+ wait_queue_head_t free_wq;
+};
+
+/*
+ * Various accessors
+ */
+unsigned int dmz_id(struct dmz_metadata *zmd, struct dm_zone *zone)
+{
+ return ((unsigned int)(zone - zmd->zones));
+}
+
+sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone)
+{
+ return (sector_t)dmz_id(zmd, zone) << zmd->dev->zone_nr_sectors_shift;
+}
+
+sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone)
+{
+ return (sector_t)dmz_id(zmd, zone) << zmd->dev->zone_nr_blocks_shift;
+}
+
+unsigned int dmz_nr_chunks(struct dmz_metadata *zmd)
+{
+ return zmd->nr_chunks;
+}
+
+unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd)
+{
+ return zmd->nr_rnd;
+}
+
+unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd)
+{
+ return atomic_read(&zmd->unmap_nr_rnd);
+}
+
+/*
+ * Lock/unlock mapping table.
+ * The map lock also protects all the zone lists.
+ */
+void dmz_lock_map(struct dmz_metadata *zmd)
+{
+ mutex_lock(&zmd->map_lock);
+}
+
+void dmz_unlock_map(struct dmz_metadata *zmd)
+{
+ mutex_unlock(&zmd->map_lock);
+}
+
+/*
+ * Lock/unlock metadata access. This is a "read" lock on a semaphore
+ * that prevents metadata flush from running while metadata are being
+ * modified. The actual metadata write mutual exclusion is achieved with
+ * the map lock and zone styate management (active and reclaim state are
+ * mutually exclusive).
+ */
+void dmz_lock_metadata(struct dmz_metadata *zmd)
+{
+ down_read(&zmd->mblk_sem);
+}
+
+void dmz_unlock_metadata(struct dmz_metadata *zmd)
+{
+ up_read(&zmd->mblk_sem);
+}
+
+/*
+ * Lock/unlock flush: prevent concurrent executions
+ * of dmz_flush_metadata as well as metadata modification in reclaim
+ * while flush is being executed.
+ */
+void dmz_lock_flush(struct dmz_metadata *zmd)
+{
+ mutex_lock(&zmd->mblk_flush_lock);
+}
+
+void dmz_unlock_flush(struct dmz_metadata *zmd)
+{
+ mutex_unlock(&zmd->mblk_flush_lock);
+}
+
+/*
+ * Allocate a metadata block.
+ */
+static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd,
+ sector_t mblk_no)
+{
+ struct dmz_mblock *mblk = NULL;
+
+ /* See if we can reuse cached blocks */
+ if (zmd->max_nr_mblks && atomic_read(&zmd->nr_mblks) > zmd->max_nr_mblks) {
+ spin_lock(&zmd->mblk_lock);
+ mblk = list_first_entry_or_null(&zmd->mblk_lru_list,
+ struct dmz_mblock, link);
+ if (mblk) {
+ list_del_init(&mblk->link);
+ rb_erase(&mblk->node, &zmd->mblk_rbtree);
+ mblk->no = mblk_no;
+ }
+ spin_unlock(&zmd->mblk_lock);
+ if (mblk)
+ return mblk;
+ }
+
+ /* Allocate a new block */
+ mblk = kmalloc(sizeof(struct dmz_mblock), GFP_NOIO);
+ if (!mblk)
+ return NULL;
+
+ mblk->page = alloc_page(GFP_NOIO);
+ if (!mblk->page) {
+ kfree(mblk);
+ return NULL;
+ }
+
+ RB_CLEAR_NODE(&mblk->node);
+ INIT_LIST_HEAD(&mblk->link);
+ atomic_set(&mblk->ref, 0);
+ mblk->state = 0;
+ mblk->no = mblk_no;
+ mblk->data = page_address(mblk->page);
+
+ atomic_inc(&zmd->nr_mblks);
+
+ return mblk;
+}
+
+/*
+ * Free a metadata block.
+ */
+static void dmz_free_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
+{
+ __free_pages(mblk->page, 0);
+ kfree(mblk);
+
+ atomic_dec(&zmd->nr_mblks);
+}
+
+/*
+ * Insert a metadata block in the rbtree.
+ */
+static void dmz_insert_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
+{
+ struct rb_root *root = &zmd->mblk_rbtree;
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+ struct dmz_mblock *b;
+
+ /* Figure out where to put the new node */
+ while (*new) {
+ b = container_of(*new, struct dmz_mblock, node);
+ parent = *new;
+ new = (b->no < mblk->no) ? &((*new)->rb_left) : &((*new)->rb_right);
+ }
+
+ /* Add new node and rebalance tree */
+ rb_link_node(&mblk->node, parent, new);
+ rb_insert_color(&mblk->node, root);
+}
+
+/*
+ * Lookup a metadata block in the rbtree.
+ */
+static struct dmz_mblock *dmz_lookup_mblock(struct dmz_metadata *zmd,
+ sector_t mblk_no)
+{
+ struct rb_root *root = &zmd->mblk_rbtree;
+ struct rb_node *node = root->rb_node;
+ struct dmz_mblock *mblk;
+
+ while (node) {
+ mblk = container_of(node, struct dmz_mblock, node);
+ if (mblk->no == mblk_no)
+ return mblk;
+ node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right;
+ }
+
+ return NULL;
+}
+
+/*
+ * Metadata block BIO end callback.
+ */
+static void dmz_mblock_bio_end_io(struct bio *bio)
+{
+ struct dmz_mblock *mblk = bio->bi_private;
+ int flag;
+
+ if (bio->bi_status)
+ set_bit(DMZ_META_ERROR, &mblk->state);
+
+ if (bio_op(bio) == REQ_OP_WRITE)
+ flag = DMZ_META_WRITING;
+ else
+ flag = DMZ_META_READING;
+
+ clear_bit_unlock(flag, &mblk->state);
+ smp_mb__after_atomic();
+ wake_up_bit(&mblk->state, flag);
+
+ bio_put(bio);
+}
+
+/*
+ * Read a metadata block from disk.
+ */
+static struct dmz_mblock *dmz_fetch_mblock(struct dmz_metadata *zmd,
+ sector_t mblk_no)
+{
+ struct dmz_mblock *mblk;
+ sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
+ struct bio *bio;
+
+ /* Get block and insert it */
+ mblk = dmz_alloc_mblock(zmd, mblk_no);
+ if (!mblk)
+ return NULL;
+
+ spin_lock(&zmd->mblk_lock);
+ atomic_inc(&mblk->ref);
+ set_bit(DMZ_META_READING, &mblk->state);
+ dmz_insert_mblock(zmd, mblk);
+ spin_unlock(&zmd->mblk_lock);
+
+ bio = bio_alloc(GFP_NOIO, 1);
+ if (!bio) {
+ dmz_free_mblock(zmd, mblk);
+ return NULL;
+ }
+
+ bio->bi_iter.bi_sector = dmz_blk2sect(block);
+ bio->bi_bdev = zmd->dev->bdev;
+ bio->bi_private = mblk;
+ bio->bi_end_io = dmz_mblock_bio_end_io;
+ bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO);
+ bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
+ submit_bio(bio);
+
+ return mblk;
+}
+
+/*
+ * Free metadata blocks.
+ */
+static unsigned long dmz_shrink_mblock_cache(struct dmz_metadata *zmd,
+ unsigned long limit)
+{
+ struct dmz_mblock *mblk;
+ unsigned long count = 0;
+
+ if (!zmd->max_nr_mblks)
+ return 0;
+
+ while (!list_empty(&zmd->mblk_lru_list) &&
+ atomic_read(&zmd->nr_mblks) > zmd->min_nr_mblks &&
+ count < limit) {
+ mblk = list_first_entry(&zmd->mblk_lru_list,
+ struct dmz_mblock, link);
+ list_del_init(&mblk->link);
+ rb_erase(&mblk->node, &zmd->mblk_rbtree);
+ dmz_free_mblock(zmd, mblk);
+ count++;
+ }
+
+ return count;
+}
+
+/*
+ * For mblock shrinker: get the number of unused metadata blocks in the cache.
+ */
+static unsigned long dmz_mblock_shrinker_count(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, mblk_shrinker);
+
+ return atomic_read(&zmd->nr_mblks);
+}
+
+/*
+ * For mblock shrinker: scan unused metadata blocks and shrink the cache.
+ */
+static unsigned long dmz_mblock_shrinker_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, mblk_shrinker);
+ unsigned long count;
+
+ spin_lock(&zmd->mblk_lock);
+ count = dmz_shrink_mblock_cache(zmd, sc->nr_to_scan);
+ spin_unlock(&zmd->mblk_lock);
+
+ return count ? count : SHRINK_STOP;
+}
+
+/*
+ * Release a metadata block.
+ */
+static void dmz_release_mblock(struct dmz_metadata *zmd,
+ struct dmz_mblock *mblk)
+{
+
+ if (!mblk)
+ return;
+
+ spin_lock(&zmd->mblk_lock);
+
+ if (atomic_dec_and_test(&mblk->ref)) {
+ if (test_bit(DMZ_META_ERROR, &mblk->state)) {
+ rb_erase(&mblk->node, &zmd->mblk_rbtree);
+ dmz_free_mblock(zmd, mblk);
+ } else if (!test_bit(DMZ_META_DIRTY, &mblk->state)) {
+ list_add_tail(&mblk->link, &zmd->mblk_lru_list);
+ dmz_shrink_mblock_cache(zmd, 1);
+ }
+ }
+
+ spin_unlock(&zmd->mblk_lock);
+}
+
+/*
+ * Get a metadata block from the rbtree. If the block
+ * is not present, read it from disk.
+ */
+static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
+ sector_t mblk_no)
+{
+ struct dmz_mblock *mblk;
+
+ /* Check rbtree */
+ spin_lock(&zmd->mblk_lock);
+ mblk = dmz_lookup_mblock(zmd, mblk_no);
+ if (mblk) {
+ /* Cache hit: remove block from LRU list */
+ if (atomic_inc_return(&mblk->ref) == 1 &&
+ !test_bit(DMZ_META_DIRTY, &mblk->state))
+ list_del_init(&mblk->link);
+ }
+ spin_unlock(&zmd->mblk_lock);
+
+ if (!mblk) {
+ /* Cache miss: read the block from disk */
+ mblk = dmz_fetch_mblock(zmd, mblk_no);
+ if (!mblk)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Wait for on-going read I/O and check for error */
+ wait_on_bit_io(&mblk->state, DMZ_META_READING,
+ TASK_UNINTERRUPTIBLE);
+ if (test_bit(DMZ_META_ERROR, &mblk->state)) {
+ dmz_release_mblock(zmd, mblk);
+ return ERR_PTR(-EIO);
+ }
+
+ return mblk;
+}
+
+/*
+ * Mark a metadata block dirty.
+ */
+static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
+{
+ spin_lock(&zmd->mblk_lock);
+ if (!test_and_set_bit(DMZ_META_DIRTY, &mblk->state))
+ list_add_tail(&mblk->link, &zmd->mblk_dirty_list);
+ spin_unlock(&zmd->mblk_lock);
+}
+
+/*
+ * Issue a metadata block write BIO.
+ */
+static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
+ unsigned int set)
+{
+ sector_t block = zmd->sb[set].block + mblk->no;
+ struct bio *bio;
+
+ bio = bio_alloc(GFP_NOIO, 1);
+ if (!bio) {
+ set_bit(DMZ_META_ERROR, &mblk->state);
+ return;
+ }
+
+ set_bit(DMZ_META_WRITING, &mblk->state);
+
+ bio->bi_iter.bi_sector = dmz_blk2sect(block);
+ bio->bi_bdev = zmd->dev->bdev;
+ bio->bi_private = mblk;
+ bio->bi_end_io = dmz_mblock_bio_end_io;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
+ bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
+ submit_bio(bio);
+}
+
+/*
+ * Read/write a metadata block.
+ */
+static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
+ struct page *page)
+{
+ struct bio *bio;
+ int ret;
+
+ bio = bio_alloc(GFP_NOIO, 1);
+ if (!bio)
+ return -ENOMEM;
+
+ bio->bi_iter.bi_sector = dmz_blk2sect(block);
+ bio->bi_bdev = zmd->dev->bdev;
+ bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO);
+ bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
+ ret = submit_bio_wait(bio);
+ bio_put(bio);
+
+ return ret;
+}
+
+/*
+ * Write super block of the specified metadata set.
+ */
+static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
+{
+ sector_t block = zmd->sb[set].block;
+ struct dmz_mblock *mblk = zmd->sb[set].mblk;
+ struct dmz_super *sb = zmd->sb[set].sb;
+ u64 sb_gen = zmd->sb_gen + 1;
+ int ret;
+
+ sb->magic = cpu_to_le32(DMZ_MAGIC);
+ sb->version = cpu_to_le32(DMZ_META_VER);
+
+ sb->gen = cpu_to_le64(sb_gen);
+
+ sb->sb_block = cpu_to_le64(block);
+ sb->nr_meta_blocks = cpu_to_le32(zmd->nr_meta_blocks);
+ sb->nr_reserved_seq = cpu_to_le32(zmd->nr_reserved_seq);
+ sb->nr_chunks = cpu_to_le32(zmd->nr_chunks);
+
+ sb->nr_map_blocks = cpu_to_le32(zmd->nr_map_blocks);
+ sb->nr_bitmap_blocks = cpu_to_le32(zmd->nr_bitmap_blocks);
+
+ sb->crc = 0;
+ sb->crc = cpu_to_le32(crc32_le(sb_gen, (unsigned char *)sb, DMZ_BLOCK_SIZE));
+
+ ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page);
+ if (ret == 0)
+ ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL);
+
+ return ret;
+}
+
+/*
+ * Write dirty metadata blocks to the specified set.
+ */
+static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
+ struct list_head *write_list,
+ unsigned int set)
+{
+ struct dmz_mblock *mblk;
+ struct blk_plug plug;
+ int ret = 0;
+
+ /* Issue writes */
+ blk_start_plug(&plug);
+ list_for_each_entry(mblk, write_list, link)
+ dmz_write_mblock(zmd, mblk, set);
+ blk_finish_plug(&plug);
+
+ /* Wait for completion */
+ list_for_each_entry(mblk, write_list, link) {
+ wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
+ TASK_UNINTERRUPTIBLE);
+ if (test_bit(DMZ_META_ERROR, &mblk->state)) {
+ clear_bit(DMZ_META_ERROR, &mblk->state);
+ ret = -EIO;
+ }
+ }
+
+ /* Flush drive cache (this will also sync data) */
+ if (ret == 0)
+ ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL);
+
+ return ret;
+}
+
+/*
+ * Log dirty metadata blocks.
+ */
+static int dmz_log_dirty_mblocks(struct dmz_metadata *zmd,
+ struct list_head *write_list)
+{
+ unsigned int log_set = zmd->mblk_primary ^ 0x1;
+ int ret;
+
+ /* Write dirty blocks to the log */
+ ret = dmz_write_dirty_mblocks(zmd, write_list, log_set);
+ if (ret)
+ return ret;
+
+ /*
+ * No error so far: now validate the log by updating the
+ * log index super block generation.
+ */
+ ret = dmz_write_sb(zmd, log_set);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Flush dirty metadata blocks.
+ */
+int dmz_flush_metadata(struct dmz_metadata *zmd)
+{
+ struct dmz_mblock *mblk;
+ struct list_head write_list;
+ int ret;
+
+ if (WARN_ON(!zmd))
+ return 0;
+
+ INIT_LIST_HEAD(&write_list);
+
+ /*
+ * Make sure that metadata blocks are stable before logging: take
+ * the write lock on the metadata semaphore to prevent target BIOs
+ * from modifying metadata.
+ */
+ down_write(&zmd->mblk_sem);
+
+ /*
+ * This is called from the target flush work and reclaim work.
+ * Concurrent execution is not allowed.
+ */
+ dmz_lock_flush(zmd);
+
+ /* Get dirty blocks */
+ spin_lock(&zmd->mblk_lock);
+ list_splice_init(&zmd->mblk_dirty_list, &write_list);
+ spin_unlock(&zmd->mblk_lock);
+
+ /* If there are no dirty metadata blocks, just flush the device cache */
+ if (list_empty(&write_list)) {
+ ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL);
+ goto out;
+ }
+
+ /*
+ * The primary metadata set is still clean. Keep it this way until
+ * all updates are successful in the secondary set. That is, use
+ * the secondary set as a log.
+ */
+ ret = dmz_log_dirty_mblocks(zmd, &write_list);
+ if (ret)
+ goto out;
+
+ /*
+ * The log is on disk. It is now safe to update in place
+ * in the primary metadata set.
+ */
+ ret = dmz_write_dirty_mblocks(zmd, &write_list, zmd->mblk_primary);
+ if (ret)
+ goto out;
+
+ ret = dmz_write_sb(zmd, zmd->mblk_primary);
+ if (ret)
+ goto out;
+
+ while (!list_empty(&write_list)) {
+ mblk = list_first_entry(&write_list, struct dmz_mblock, link);
+ list_del_init(&mblk->link);
+
+ spin_lock(&zmd->mblk_lock);
+ clear_bit(DMZ_META_DIRTY, &mblk->state);
+ if (atomic_read(&mblk->ref) == 0)
+ list_add_tail(&mblk->link, &zmd->mblk_lru_list);
+ spin_unlock(&zmd->mblk_lock);
+ }
+
+ zmd->sb_gen++;
+out:
+ if (ret && !list_empty(&write_list)) {
+ spin_lock(&zmd->mblk_lock);
+ list_splice(&write_list, &zmd->mblk_dirty_list);
+ spin_unlock(&zmd->mblk_lock);
+ }
+
+ dmz_unlock_flush(zmd);
+ up_write(&zmd->mblk_sem);
+
+ return ret;
+}
+
+/*
+ * Check super block.
+ */
+static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_super *sb)
+{
+ unsigned int nr_meta_zones, nr_data_zones;
+ struct dmz_dev *dev = zmd->dev;
+ u32 crc, stored_crc;
+ u64 gen;
+
+ gen = le64_to_cpu(sb->gen);
+ stored_crc = le32_to_cpu(sb->crc);
+ sb->crc = 0;
+ crc = crc32_le(gen, (unsigned char *)sb, DMZ_BLOCK_SIZE);
+ if (crc != stored_crc) {
+ dmz_dev_err(dev, "Invalid checksum (needed 0x%08x, got 0x%08x)",
+ crc, stored_crc);
+ return -ENXIO;
+ }
+
+ if (le32_to_cpu(sb->magic) != DMZ_MAGIC) {
+ dmz_dev_err(dev, "Invalid meta magic (needed 0x%08x, got 0x%08x)",
+ DMZ_MAGIC, le32_to_cpu(sb->magic));
+ return -ENXIO;
+ }
+
+ if (le32_to_cpu(sb->version) != DMZ_META_VER) {
+ dmz_dev_err(dev, "Invalid meta version (needed %d, got %d)",
+ DMZ_META_VER, le32_to_cpu(sb->version));
+ return -ENXIO;
+ }
+
+ nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + dev->zone_nr_blocks - 1)
+ >> dev->zone_nr_blocks_shift;
+ if (!nr_meta_zones ||
+ nr_meta_zones >= zmd->nr_rnd_zones) {
+ dmz_dev_err(dev, "Invalid number of metadata blocks");
+ return -ENXIO;
+ }
+
+ if (!le32_to_cpu(sb->nr_reserved_seq) ||
+ le32_to_cpu(sb->nr_reserved_seq) >= (zmd->nr_useable_zones - nr_meta_zones)) {
+ dmz_dev_err(dev, "Invalid number of reserved sequential zones");
+ return -ENXIO;
+ }
+
+ nr_data_zones = zmd->nr_useable_zones -
+ (nr_meta_zones * 2 + le32_to_cpu(sb->nr_reserved_seq));
+ if (le32_to_cpu(sb->nr_chunks) > nr_data_zones) {
+ dmz_dev_err(dev, "Invalid number of chunks %u / %u",
+ le32_to_cpu(sb->nr_chunks), nr_data_zones);
+ return -ENXIO;
+ }
+
+ /* OK */
+ zmd->nr_meta_blocks = le32_to_cpu(sb->nr_meta_blocks);
+ zmd->nr_reserved_seq = le32_to_cpu(sb->nr_reserved_seq);
+ zmd->nr_chunks = le32_to_cpu(sb->nr_chunks);
+ zmd->nr_map_blocks = le32_to_cpu(sb->nr_map_blocks);
+ zmd->nr_bitmap_blocks = le32_to_cpu(sb->nr_bitmap_blocks);
+ zmd->nr_meta_zones = nr_meta_zones;
+ zmd->nr_data_zones = nr_data_zones;
+
+ return 0;
+}
+
+/*
+ * Read the first or second super block from disk.
+ */
+static int dmz_read_sb(struct dmz_metadata *zmd, unsigned int set)
+{
+ return dmz_rdwr_block(zmd, REQ_OP_READ, zmd->sb[set].block,
+ zmd->sb[set].mblk->page);
+}
+
+/*
+ * Determine the position of the secondary super blocks on disk.
+ * This is used only if a corruption of the primary super block
+ * is detected.
+ */
+static int dmz_lookup_secondary_sb(struct dmz_metadata *zmd)
+{
+ unsigned int zone_nr_blocks = zmd->dev->zone_nr_blocks;
+ struct dmz_mblock *mblk;
+ int i;
+
+ /* Allocate a block */
+ mblk = dmz_alloc_mblock(zmd, 0);
+ if (!mblk)
+ return -ENOMEM;
+
+ zmd->sb[1].mblk = mblk;
+ zmd->sb[1].sb = mblk->data;
+
+ /* Bad first super block: search for the second one */
+ zmd->sb[1].block = zmd->sb[0].block + zone_nr_blocks;
+ for (i = 0; i < zmd->nr_rnd_zones - 1; i++) {
+ if (dmz_read_sb(zmd, 1) != 0)
+ break;
+ if (le32_to_cpu(zmd->sb[1].sb->magic) == DMZ_MAGIC)
+ return 0;
+ zmd->sb[1].block += zone_nr_blocks;
+ }
+
+ dmz_free_mblock(zmd, mblk);
+ zmd->sb[1].mblk = NULL;
+
+ return -EIO;
+}
+
+/*
+ * Read the first or second super block from disk.
+ */
+static int dmz_get_sb(struct dmz_metadata *zmd, unsigned int set)
+{
+ struct dmz_mblock *mblk;
+ int ret;
+
+ /* Allocate a block */
+ mblk = dmz_alloc_mblock(zmd, 0);
+ if (!mblk)
+ return -ENOMEM;
+
+ zmd->sb[set].mblk = mblk;
+ zmd->sb[set].sb = mblk->data;
+
+ /* Read super block */
+ ret = dmz_read_sb(zmd, set);
+ if (ret) {
+ dmz_free_mblock(zmd, mblk);
+ zmd->sb[set].mblk = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Recover a metadata set.
+ */
+static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set)
+{
+ unsigned int src_set = dst_set ^ 0x1;
+ struct page *page;
+ int i, ret;
+
+ dmz_dev_warn(zmd->dev, "Metadata set %u invalid: recovering", dst_set);
+
+ if (dst_set == 0)
+ zmd->sb[0].block = dmz_start_block(zmd, zmd->sb_zone);
+ else {
+ zmd->sb[1].block = zmd->sb[0].block +
+ (zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift);
+ }
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ /* Copy metadata blocks */
+ for (i = 1; i < zmd->nr_meta_blocks; i++) {
+ ret = dmz_rdwr_block(zmd, REQ_OP_READ,
+ zmd->sb[src_set].block + i, page);
+ if (ret)
+ goto out;
+ ret = dmz_rdwr_block(zmd, REQ_OP_WRITE,
+ zmd->sb[dst_set].block + i, page);
+ if (ret)
+ goto out;
+ }
+
+ /* Finalize with the super block */
+ if (!zmd->sb[dst_set].mblk) {
+ zmd->sb[dst_set].mblk = dmz_alloc_mblock(zmd, 0);
+ if (!zmd->sb[dst_set].mblk) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ zmd->sb[dst_set].sb = zmd->sb[dst_set].mblk->data;
+ }
+
+ ret = dmz_write_sb(zmd, dst_set);
+out:
+ __free_pages(page, 0);
+
+ return ret;
+}
+
+/*
+ * Get super block from disk.
+ */
+static int dmz_load_sb(struct dmz_metadata *zmd)
+{
+ bool sb_good[2] = {false, false};
+ u64 sb_gen[2] = {0, 0};
+ int ret;
+
+ /* Read and check the primary super block */
+ zmd->sb[0].block = dmz_start_block(zmd, zmd->sb_zone);
+ ret = dmz_get_sb(zmd, 0);
+ if (ret) {
+ dmz_dev_err(zmd->dev, "Read primary super block failed");
+ return ret;
+ }
+
+ ret = dmz_check_sb(zmd, zmd->sb[0].sb);
+
+ /* Read and check secondary super block */
+ if (ret == 0) {
+ sb_good[0] = true;
+ zmd->sb[1].block = zmd->sb[0].block +
+ (zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift);
+ ret = dmz_get_sb(zmd, 1);
+ } else
+ ret = dmz_lookup_secondary_sb(zmd);
+
+ if (ret) {
+ dmz_dev_err(zmd->dev, "Read secondary super block failed");
+ return ret;
+ }
+
+ ret = dmz_check_sb(zmd, zmd->sb[1].sb);
+ if (ret == 0)
+ sb_good[1] = true;
+
+ /* Use highest generation sb first */
+ if (!sb_good[0] && !sb_good[1]) {
+ dmz_dev_err(zmd->dev, "No valid super block found");
+ return -EIO;
+ }
+
+ if (sb_good[0])
+ sb_gen[0] = le64_to_cpu(zmd->sb[0].sb->gen);
+ else
+ ret = dmz_recover_mblocks(zmd, 0);
+
+ if (sb_good[1])
+ sb_gen[1] = le64_to_cpu(zmd->sb[1].sb->gen);
+ else
+ ret = dmz_recover_mblocks(zmd, 1);
+
+ if (ret) {
+ dmz_dev_err(zmd->dev, "Recovery failed");
+ return -EIO;
+ }
+
+ if (sb_gen[0] >= sb_gen[1]) {
+ zmd->sb_gen = sb_gen[0];
+ zmd->mblk_primary = 0;
+ } else {
+ zmd->sb_gen = sb_gen[1];
+ zmd->mblk_primary = 1;
+ }
+
+ dmz_dev_debug(zmd->dev, "Using super block %u (gen %llu)",
+ zmd->mblk_primary, zmd->sb_gen);
+
+ return 0;
+}
+
+/*
+ * Initialize a zone descriptor.
+ */
+static int dmz_init_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
+ struct blk_zone *blkz)
+{
+ struct dmz_dev *dev = zmd->dev;
+
+ /* Ignore the eventual last runt (smaller) zone */
+ if (blkz->len != dev->zone_nr_sectors) {
+ if (blkz->start + blkz->len == dev->capacity)
+ return 0;
+ return -ENXIO;
+ }
+
+ INIT_LIST_HEAD(&zone->link);
+ atomic_set(&zone->refcount, 0);
+ zone->chunk = DMZ_MAP_UNMAPPED;
+
+ if (blkz->type == BLK_ZONE_TYPE_CONVENTIONAL) {
+ set_bit(DMZ_RND, &zone->flags);
+ zmd->nr_rnd_zones++;
+ } else if (blkz->type == BLK_ZONE_TYPE_SEQWRITE_REQ ||
+ blkz->type == BLK_ZONE_TYPE_SEQWRITE_PREF) {
+ set_bit(DMZ_SEQ, &zone->flags);
+ } else
+ return -ENXIO;
+
+ if (blkz->cond == BLK_ZONE_COND_OFFLINE)
+ set_bit(DMZ_OFFLINE, &zone->flags);
+ else if (blkz->cond == BLK_ZONE_COND_READONLY)
+ set_bit(DMZ_READ_ONLY, &zone->flags);
+
+ if (dmz_is_rnd(zone))
+ zone->wp_block = 0;
+ else
+ zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
+
+ if (!dmz_is_offline(zone) && !dmz_is_readonly(zone)) {
+ zmd->nr_useable_zones++;
+ if (dmz_is_rnd(zone)) {
+ zmd->nr_rnd_zones++;
+ if (!zmd->sb_zone) {
+ /* Super block zone */
+ zmd->sb_zone = zone;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Free zones descriptors.
+ */
+static void dmz_drop_zones(struct dmz_metadata *zmd)
+{
+ kfree(zmd->zones);
+ zmd->zones = NULL;
+}
+
+/*
+ * The size of a zone report in number of zones.
+ * This results in 4096*64B=256KB report zones commands.
+ */
+#define DMZ_REPORT_NR_ZONES 4096
+
+/*
+ * Allocate and initialize zone descriptors using the zone
+ * information from disk.
+ */
+static int dmz_init_zones(struct dmz_metadata *zmd)
+{
+ struct dmz_dev *dev = zmd->dev;
+ struct dm_zone *zone;
+ struct blk_zone *blkz;
+ unsigned int nr_blkz;
+ sector_t sector = 0;
+ int i, ret = 0;
+
+ /* Init */
+ zmd->zone_bitmap_size = dev->zone_nr_blocks >> 3;
+ zmd->zone_nr_bitmap_blocks = zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT;
+
+ /* Allocate zone array */
+ zmd->zones = kcalloc(dev->nr_zones, sizeof(struct dm_zone), GFP_KERNEL);
+ if (!zmd->zones)
+ return -ENOMEM;
+
+ dmz_dev_info(dev, "Using %zu B for zone information",
+ sizeof(struct dm_zone) * dev->nr_zones);
+
+ /* Get zone information */
+ nr_blkz = DMZ_REPORT_NR_ZONES;
+ blkz = kcalloc(nr_blkz, sizeof(struct blk_zone), GFP_KERNEL);
+ if (!blkz) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Get zone information and initialize zone descriptors.
+ * At the same time, determine where the super block
+ * should be: first block of the first randomly writable
+ * zone.
+ */
+ zone = zmd->zones;
+ while (sector < dev->capacity) {
+ /* Get zone information */
+ nr_blkz = DMZ_REPORT_NR_ZONES;
+ ret = blkdev_report_zones(dev->bdev, sector, blkz,
+ &nr_blkz, GFP_KERNEL);
+ if (ret) {
+ dmz_dev_err(dev, "Report zones failed %d", ret);
+ goto out;
+ }
+
+ /* Process report */
+ for (i = 0; i < nr_blkz; i++) {
+ ret = dmz_init_zone(zmd, zone, &blkz[i]);
+ if (ret)
+ goto out;
+ sector += dev->zone_nr_sectors;
+ zone++;
+ }
+ }
+
+ /* The entire zone configuration of the disk should now be known */
+ if (sector < dev->capacity) {
+ dmz_dev_err(dev, "Failed to get correct zone information");
+ ret = -ENXIO;
+ }
+out:
+ kfree(blkz);
+ if (ret)
+ dmz_drop_zones(zmd);
+
+ return ret;
+}
+
+/*
+ * Update a zone information.
+ */
+static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
+{
+ unsigned int nr_blkz = 1;
+ struct blk_zone blkz;
+ int ret;
+
+ /* Get zone information from disk */
+ ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone),
+ &blkz, &nr_blkz, GFP_KERNEL);
+ if (ret) {
+ dmz_dev_err(zmd->dev, "Get zone %u report failed",
+ dmz_id(zmd, zone));
+ return ret;
+ }
+
+ clear_bit(DMZ_OFFLINE, &zone->flags);
+ clear_bit(DMZ_READ_ONLY, &zone->flags);
+ if (blkz.cond == BLK_ZONE_COND_OFFLINE)
+ set_bit(DMZ_OFFLINE, &zone->flags);
+ else if (blkz.cond == BLK_ZONE_COND_READONLY)
+ set_bit(DMZ_READ_ONLY, &zone->flags);
+
+ if (dmz_is_seq(zone))
+ zone->wp_block = dmz_sect2blk(blkz.wp - blkz.start);
+ else
+ zone->wp_block = 0;
+
+ return 0;
+}
+
+/*
+ * Check a zone write pointer position when the zone is marked
+ * with the sequential write error flag.
+ */
+static int dmz_handle_seq_write_err(struct dmz_metadata *zmd,
+ struct dm_zone *zone)
+{
+ unsigned int wp = 0;
+ int ret;
+
+ wp = zone->wp_block;
+ ret = dmz_update_zone(zmd, zone);
+ if (ret)
+ return ret;
+
+ dmz_dev_warn(zmd->dev, "Processing zone %u write error (zone wp %u/%u)",
+ dmz_id(zmd, zone), zone->wp_block, wp);
+
+ if (zone->wp_block < wp) {
+ dmz_invalidate_blocks(zmd, zone, zone->wp_block,
+ wp - zone->wp_block);
+ }
+
+ return 0;
+}
+
+static struct dm_zone *dmz_get(struct dmz_metadata *zmd, unsigned int zone_id)
+{
+ return &zmd->zones[zone_id];
+}
+
+/*
+ * Reset a zone write pointer.
+ */
+static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
+{
+ int ret;
+
+ /*
+ * Ignore offline zones, read only zones,
+ * and conventional zones.
+ */
+ if (dmz_is_offline(zone) ||
+ dmz_is_readonly(zone) ||
+ dmz_is_rnd(zone))
+ return 0;
+
+ if (!dmz_is_empty(zone) || dmz_seq_write_err(zone)) {
+ struct dmz_dev *dev = zmd->dev;
+
+ ret = blkdev_reset_zones(dev->bdev,
+ dmz_start_sect(zmd, zone),
+ dev->zone_nr_sectors, GFP_KERNEL);
+ if (ret) {
+ dmz_dev_err(dev, "Reset zone %u failed %d",
+ dmz_id(zmd, zone), ret);
+ return ret;
+ }
+ }
+
+ /* Clear write error bit and rewind write pointer position */
+ clear_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
+ zone->wp_block = 0;
+
+ return 0;
+}
+
+static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone);
+
+/*
+ * Initialize chunk mapping.
+ */
+static int dmz_load_mapping(struct dmz_metadata *zmd)
+{
+ struct dmz_dev *dev = zmd->dev;
+ struct dm_zone *dzone, *bzone;
+ struct dmz_mblock *dmap_mblk = NULL;
+ struct dmz_map *dmap;
+ unsigned int i = 0, e = 0, chunk = 0;
+ unsigned int dzone_id;
+ unsigned int bzone_id;
+
+ /* Metadata block array for the chunk mapping table */
+ zmd->map_mblk = kcalloc(zmd->nr_map_blocks,
+ sizeof(struct dmz_mblk *), GFP_KERNEL);
+ if (!zmd->map_mblk)
+ return -ENOMEM;
+
+ /* Get chunk mapping table blocks and initialize zone mapping */
+ while (chunk < zmd->nr_chunks) {
+ if (!dmap_mblk) {
+ /* Get mapping block */
+ dmap_mblk = dmz_get_mblock(zmd, i + 1);
+ if (IS_ERR(dmap_mblk))
+ return PTR_ERR(dmap_mblk);
+ zmd->map_mblk[i] = dmap_mblk;
+ dmap = (struct dmz_map *) dmap_mblk->data;
+ i++;
+ e = 0;
+ }
+
+ /* Check data zone */
+ dzone_id = le32_to_cpu(dmap[e].dzone_id);
+ if (dzone_id == DMZ_MAP_UNMAPPED)
+ goto next;
+
+ if (dzone_id >= dev->nr_zones) {
+ dmz_dev_err(dev, "Chunk %u mapping: invalid data zone ID %u",
+ chunk, dzone_id);
+ return -EIO;
+ }
+
+ dzone = dmz_get(zmd, dzone_id);
+ set_bit(DMZ_DATA, &dzone->flags);
+ dzone->chunk = chunk;
+ dmz_get_zone_weight(zmd, dzone);
+
+ if (dmz_is_rnd(dzone))
+ list_add_tail(&dzone->link, &zmd->map_rnd_list);
+ else
+ list_add_tail(&dzone->link, &zmd->map_seq_list);
+
+ /* Check buffer zone */
+ bzone_id = le32_to_cpu(dmap[e].bzone_id);
+ if (bzone_id == DMZ_MAP_UNMAPPED)
+ goto next;
+
+ if (bzone_id >= dev->nr_zones) {
+ dmz_dev_err(dev, "Chunk %u mapping: invalid buffer zone ID %u",
+ chunk, bzone_id);
+ return -EIO;
+ }
+
+ bzone = dmz_get(zmd, bzone_id);
+ if (!dmz_is_rnd(bzone)) {
+ dmz_dev_err(dev, "Chunk %u mapping: invalid buffer zone %u",
+ chunk, bzone_id);
+ return -EIO;
+ }
+
+ set_bit(DMZ_DATA, &bzone->flags);
+ set_bit(DMZ_BUF, &bzone->flags);
+ bzone->chunk = chunk;
+ bzone->bzone = dzone;
+ dzone->bzone = bzone;
+ dmz_get_zone_weight(zmd, bzone);
+ list_add_tail(&bzone->link, &zmd->map_rnd_list);
+next:
+ chunk++;
+ e++;
+ if (e >= DMZ_MAP_ENTRIES)
+ dmap_mblk = NULL;
+ }
+
+ /*
+ * At this point, only meta zones and mapped data zones were
+ * fully initialized. All remaining zones are unmapped data
+ * zones. Finish initializing those here.
+ */
+ for (i = 0; i < dev->nr_zones; i++) {
+ dzone = dmz_get(zmd, i);
+ if (dmz_is_meta(dzone))
+ continue;
+
+ if (dmz_is_rnd(dzone))
+ zmd->nr_rnd++;
+ else
+ zmd->nr_seq++;
+
+ if (dmz_is_data(dzone)) {
+ /* Already initialized */
+ continue;
+ }
+
+ /* Unmapped data zone */
+ set_bit(DMZ_DATA, &dzone->flags);
+ dzone->chunk = DMZ_MAP_UNMAPPED;
+ if (dmz_is_rnd(dzone)) {
+ list_add_tail(&dzone->link, &zmd->unmap_rnd_list);
+ atomic_inc(&zmd->unmap_nr_rnd);
+ } else if (atomic_read(&zmd->nr_reserved_seq_zones) < zmd->nr_reserved_seq) {
+ list_add_tail(&dzone->link, &zmd->reserved_seq_zones_list);
+ atomic_inc(&zmd->nr_reserved_seq_zones);
+ zmd->nr_seq--;
+ } else {
+ list_add_tail(&dzone->link, &zmd->unmap_seq_list);
+ atomic_inc(&zmd->unmap_nr_seq);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Set a data chunk mapping.
+ */
+static void dmz_set_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk,
+ unsigned int dzone_id, unsigned int bzone_id)
+{
+ struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT];
+ struct dmz_map *dmap = (struct dmz_map *) dmap_mblk->data;
+ int map_idx = chunk & DMZ_MAP_ENTRIES_MASK;
+
+ dmap[map_idx].dzone_id = cpu_to_le32(dzone_id);
+ dmap[map_idx].bzone_id = cpu_to_le32(bzone_id);
+ dmz_dirty_mblock(zmd, dmap_mblk);
+}
+
+/*
+ * The list of mapped zones is maintained in LRU order.
+ * This rotates a zone at the end of its map list.
+ */
+static void __dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
+{
+ if (list_empty(&zone->link))
+ return;
+
+ list_del_init(&zone->link);
+ if (dmz_is_seq(zone)) {
+ /* LRU rotate sequential zone */
+ list_add_tail(&zone->link, &zmd->map_seq_list);
+ } else {
+ /* LRU rotate random zone */
+ list_add_tail(&zone->link, &zmd->map_rnd_list);
+ }
+}
+
+/*
+ * The list of mapped random zones is maintained
+ * in LRU order. This rotates a zone at the end of the list.
+ */
+static void dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
+{
+ __dmz_lru_zone(zmd, zone);
+ if (zone->bzone)
+ __dmz_lru_zone(zmd, zone->bzone);
+}
+
+/*
+ * Wait for any zone to be freed.
+ */
+static void dmz_wait_for_free_zones(struct dmz_metadata *zmd)
+{
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(&zmd->free_wq, &wait, TASK_UNINTERRUPTIBLE);
+ dmz_unlock_map(zmd);
+ dmz_unlock_metadata(zmd);
+
+ io_schedule_timeout(HZ);
+
+ dmz_lock_metadata(zmd);
+ dmz_lock_map(zmd);
+ finish_wait(&zmd->free_wq, &wait);
+}
+
+/*
+ * Lock a zone for reclaim (set the zone RECLAIM bit).
+ * Returns false if the zone cannot be locked or if it is already locked
+ * and 1 otherwise.
+ */
+int dmz_lock_zone_reclaim(struct dm_zone *zone)
+{
+ /* Active zones cannot be reclaimed */
+ if (dmz_is_active(zone))
+ return 0;
+
+ return !test_and_set_bit(DMZ_RECLAIM, &zone->flags);
+}
+
+/*
+ * Clear a zone reclaim flag.
+ */
+void dmz_unlock_zone_reclaim(struct dm_zone *zone)
+{
+ WARN_ON(dmz_is_active(zone));
+ WARN_ON(!dmz_in_reclaim(zone));
+
+ clear_bit_unlock(DMZ_RECLAIM, &zone->flags);
+ smp_mb__after_atomic();
+ wake_up_bit(&zone->flags, DMZ_RECLAIM);
+}
+
+/*
+ * Wait for a zone reclaim to complete.
+ */
+static void dmz_wait_for_reclaim(struct dmz_metadata *zmd, struct dm_zone *zone)
+{
+ dmz_unlock_map(zmd);
+ dmz_unlock_metadata(zmd);
+ wait_on_bit_timeout(&zone->flags, DMZ_RECLAIM, TASK_UNINTERRUPTIBLE, HZ);
+ dmz_lock_metadata(zmd);
+ dmz_lock_map(zmd);
+}
+
+/*
+ * Select a random write zone for reclaim.
+ */
+static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
+{
+ struct dm_zone *dzone = NULL;
+ struct dm_zone *zone;
+
+ if (list_empty(&zmd->map_rnd_list))
+ return NULL;
+
+ list_for_each_entry(zone, &zmd->map_rnd_list, link) {
+ if (dmz_is_buf(zone))
+ dzone = zone->bzone;
+ else
+ dzone = zone;
+ if (dmz_lock_zone_reclaim(dzone))
+ return dzone;
+ }
+
+ return NULL;
+}
+
+/*
+ * Select a buffered sequential zone for reclaim.
+ */
+static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
+{
+ struct dm_zone *zone;
+
+ if (list_empty(&zmd->map_seq_list))
+ return NULL;
+
+ list_for_each_entry(zone, &zmd->map_seq_list, link) {
+ if (!zone->bzone)
+ continue;
+ if (dmz_lock_zone_reclaim(zone))
+ return zone;
+ }
+
+ return NULL;
+}
+
+/*
+ * Select a zone for reclaim.
+ */
+struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd)
+{
+ struct dm_zone *zone;
+
+ /*
+ * Search for a zone candidate to reclaim: 2 cases are possible.
+ * (1) There is no free sequential zones. Then a random data zone
+ * cannot be reclaimed. So choose a sequential zone to reclaim so
+ * that afterward a random zone can be reclaimed.
+ * (2) At least one free sequential zone is available, then choose
+ * the oldest random zone (data or buffer) that can be locked.
+ */
+ dmz_lock_map(zmd);
+ if (list_empty(&zmd->reserved_seq_zones_list))
+ zone = dmz_get_seq_zone_for_reclaim(zmd);
+ else
+ zone = dmz_get_rnd_zone_for_reclaim(zmd);
+ dmz_unlock_map(zmd);
+
+ return zone;
+}
+
+/*
+ * Activate a zone (increment its reference count).
+ */
+void dmz_activate_zone(struct dm_zone *zone)
+{
+ set_bit(DMZ_ACTIVE, &zone->flags);
+ atomic_inc(&zone->refcount);
+}
+
+/*
+ * Deactivate a zone. This decrement the zone reference counter
+ * and clears the active state of the zone once the count reaches 0,
+ * indicating that all BIOs to the zone have completed. Returns
+ * true if the zone was deactivated.
+ */
+void dmz_deactivate_zone(struct dm_zone *zone)
+{
+ if (atomic_dec_and_test(&zone->refcount)) {
+ WARN_ON(!test_bit(DMZ_ACTIVE, &zone->flags));
+ clear_bit_unlock(DMZ_ACTIVE, &zone->flags);
+ smp_mb__after_atomic();
+ }
+}
+
+/*
+ * Get the zone mapping a chunk, if the chunk is mapped already.
+ * If no mapping exist and the operation is WRITE, a zone is
+ * allocated and used to map the chunk.
+ * The zone returned will be set to the active state.
+ */
+struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk, int op)
+{
+ struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT];
+ struct dmz_map *dmap = (struct dmz_map *) dmap_mblk->data;
+ int dmap_idx = chunk & DMZ_MAP_ENTRIES_MASK;
+ unsigned int dzone_id;
+ struct dm_zone *dzone = NULL;
+ int ret = 0;
+
+ dmz_lock_map(zmd);
+again:
+ /* Get the chunk mapping */
+ dzone_id = le32_to_cpu(dmap[dmap_idx].dzone_id);
+ if (dzone_id == DMZ_MAP_UNMAPPED) {
+ /*
+ * Read or discard in unmapped chunks are fine. But for
+ * writes, we need a mapping, so get one.
+ */
+ if (op != REQ_OP_WRITE)
+ goto out;
+
+ /* Alloate a random zone */
+ dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
+ if (!dzone) {
+ dmz_wait_for_free_zones(zmd);
+ goto again;
+ }
+
+ dmz_map_zone(zmd, dzone, chunk);
+
+ } else {
+ /* The chunk is already mapped: get the mapping zone */
+ dzone = dmz_get(zmd, dzone_id);
+ if (dzone->chunk != chunk) {
+ dzone = ERR_PTR(-EIO);
+ goto out;
+ }
+
+ /* Repair write pointer if the sequential dzone has error */
+ if (dmz_seq_write_err(dzone)) {
+ ret = dmz_handle_seq_write_err(zmd, dzone);
+ if (ret) {
+ dzone = ERR_PTR(-EIO);
+ goto out;
+ }
+ clear_bit(DMZ_SEQ_WRITE_ERR, &dzone->flags);
+ }
+ }
+
+ /*
+ * If the zone is being reclaimed, the chunk mapping may change
+ * to a different zone. So wait for reclaim and retry. Otherwise,
+ * activate the zone (this will prevent reclaim from touching it).
+ */
+ if (dmz_in_reclaim(dzone)) {
+ dmz_wait_for_reclaim(zmd, dzone);
+ goto again;
+ }
+ dmz_activate_zone(dzone);
+ dmz_lru_zone(zmd, dzone);
+out:
+ dmz_unlock_map(zmd);
+
+ return dzone;
+}
+
+/*
+ * Write and discard change the block validity of data zones and their buffer
+ * zones. Check here that valid blocks are still present. If all blocks are
+ * invalid, the zones can be unmapped on the fly without waiting for reclaim
+ * to do it.
+ */
+void dmz_put_chunk_mapping(struct dmz_metadata *zmd, struct dm_zone *dzone)
+{
+ struct dm_zone *bzone;
+
+ dmz_lock_map(zmd);
+
+ bzone = dzone->bzone;
+ if (bzone) {
+ if (dmz_weight(bzone))
+ dmz_lru_zone(zmd, bzone);
+ else {
+ /* Empty buffer zone: reclaim it */
+ dmz_unmap_zone(zmd, bzone);
+ dmz_free_zone(zmd, bzone);
+ bzone = NULL;
+ }
+ }
+
+ /* Deactivate the data zone */
+ dmz_deactivate_zone(dzone);
+ if (dmz_is_active(dzone) || bzone || dmz_weight(dzone))
+ dmz_lru_zone(zmd, dzone);
+ else {
+ /* Unbuffered inactive empty data zone: reclaim it */
+ dmz_unmap_zone(zmd, dzone);
+ dmz_free_zone(zmd, dzone);
+ }
+
+ dmz_unlock_map(zmd);
+}
+
+/*
+ * Allocate and map a random zone to buffer a chunk
+ * already mapped to a sequential zone.
+ */
+struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd,
+ struct dm_zone *dzone)
+{
+ struct dm_zone *bzone;
+
+ dmz_lock_map(zmd);
+again:
+ bzone = dzone->bzone;
+ if (bzone)
+ goto out;
+
+ /* Alloate a random zone */
+ bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
+ if (!bzone) {
+ dmz_wait_for_free_zones(zmd);
+ goto again;
+ }
+
+ /* Update the chunk mapping */
+ dmz_set_chunk_mapping(zmd, dzone->chunk, dmz_id(zmd, dzone),
+ dmz_id(zmd, bzone));
+
+ set_bit(DMZ_BUF, &bzone->flags);
+ bzone->chunk = dzone->chunk;
+ bzone->bzone = dzone;
+ dzone->bzone = bzone;
+ list_add_tail(&bzone->link, &zmd->map_rnd_list);
+out:
+ dmz_unlock_map(zmd);
+
+ return bzone;
+}
+
+/*
+ * Get an unmapped (free) zone.
+ * This must be called with the mapping lock held.
+ */
+struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned long flags)
+{
+ struct list_head *list;
+ struct dm_zone *zone;
+
+ if (flags & DMZ_ALLOC_RND)
+ list = &zmd->unmap_rnd_list;
+ else
+ list = &zmd->unmap_seq_list;
+again:
+ if (list_empty(list)) {
+ /*
+ * No free zone: if this is for reclaim, allow using the
+ * reserved sequential zones.
+ */
+ if (!(flags & DMZ_ALLOC_RECLAIM) ||
+ list_empty(&zmd->reserved_seq_zones_list))
+ return NULL;
+
+ zone = list_first_entry(&zmd->reserved_seq_zones_list,
+ struct dm_zone, link);
+ list_del_init(&zone->link);
+ atomic_dec(&zmd->nr_reserved_seq_zones);
+ return zone;
+ }
+
+ zone = list_first_entry(list, struct dm_zone, link);
+ list_del_init(&zone->link);
+
+ if (dmz_is_rnd(zone))
+ atomic_dec(&zmd->unmap_nr_rnd);
+ else
+ atomic_dec(&zmd->unmap_nr_seq);
+
+ if (dmz_is_offline(zone)) {
+ dmz_dev_warn(zmd->dev, "Zone %u is offline", dmz_id(zmd, zone));
+ zone = NULL;
+ goto again;
+ }
+
+ return zone;
+}
+
+/*
+ * Free a zone.
+ * This must be called with the mapping lock held.
+ */
+void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
+{
+ /* If this is a sequential zone, reset it */
+ if (dmz_is_seq(zone))
+ dmz_reset_zone(zmd, zone);
+
+ /* Return the zone to its type unmap list */
+ if (dmz_is_rnd(zone)) {
+ list_add_tail(&zone->link, &zmd->unmap_rnd_list);
+ atomic_inc(&zmd->unmap_nr_rnd);
+ } else if (atomic_read(&zmd->nr_reserved_seq_zones) <
+ zmd->nr_reserved_seq) {
+ list_add_tail(&zone->link, &zmd->reserved_seq_zones_list);
+ atomic_inc(&zmd->nr_reserved_seq_zones);
+ } else {
+ list_add_tail(&zone->link, &zmd->unmap_seq_list);
+ atomic_inc(&zmd->unmap_nr_seq);
+ }
+
+ wake_up_all(&zmd->free_wq);
+}
+
+/*
+ * Map a chunk to a zone.
+ * This must be called with the mapping lock held.
+ */
+void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *dzone,
+ unsigned int chunk)
+{
+ /* Set the chunk mapping */
+ dmz_set_chunk_mapping(zmd, chunk, dmz_id(zmd, dzone),
+ DMZ_MAP_UNMAPPED);
+ dzone->chunk = chunk;
+ if (dmz_is_rnd(dzone))
+ list_add_tail(&dzone->link, &zmd->map_rnd_list);
+ else
+ list_add_tail(&dzone->link, &zmd->map_seq_list);
+}
+
+/*
+ * Unmap a zone.
+ * This must be called with the mapping lock held.
+ */
+void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
+{
+ unsigned int chunk = zone->chunk;
+ unsigned int dzone_id;
+
+ if (chunk == DMZ_MAP_UNMAPPED) {
+ /* Already unmapped */
+ return;
+ }
+
+ if (test_and_clear_bit(DMZ_BUF, &zone->flags)) {
+ /*
+ * Unmapping the chunk buffer zone: clear only
+ * the chunk buffer mapping
+ */
+ dzone_id = dmz_id(zmd, zone->bzone);
+ zone->bzone->bzone = NULL;
+ zone->bzone = NULL;
+
+ } else {
+ /*
+ * Unmapping the chunk data zone: the zone must
+ * not be buffered.
+ */
+ if (WARN_ON(zone->bzone)) {
+ zone->bzone->bzone = NULL;
+ zone->bzone = NULL;
+ }
+ dzone_id = DMZ_MAP_UNMAPPED;
+ }
+
+ dmz_set_chunk_mapping(zmd, chunk, dzone_id, DMZ_MAP_UNMAPPED);
+
+ zone->chunk = DMZ_MAP_UNMAPPED;
+ list_del_init(&zone->link);
+}
+
+/*
+ * Set @nr_bits bits in @bitmap starting from @bit.
+ * Return the number of bits changed from 0 to 1.
+ */
+static unsigned int dmz_set_bits(unsigned long *bitmap,
+ unsigned int bit, unsigned int nr_bits)
+{
+ unsigned long *addr;
+ unsigned int end = bit + nr_bits;
+ unsigned int n = 0;
+
+ while (bit < end) {
+ if (((bit & (BITS_PER_LONG - 1)) == 0) &&
+ ((end - bit) >= BITS_PER_LONG)) {
+ /* Try to set the whole word at once */
+ addr = bitmap + BIT_WORD(bit);
+ if (*addr == 0) {
+ *addr = ULONG_MAX;
+ n += BITS_PER_LONG;
+ bit += BITS_PER_LONG;
+ continue;
+ }
+ }
+
+ if (!test_and_set_bit(bit, bitmap))
+ n++;
+ bit++;
+ }
+
+ return n;
+}
+
+/*
+ * Get the bitmap block storing the bit for chunk_block in zone.
+ */
+static struct dmz_mblock *dmz_get_bitmap(struct dmz_metadata *zmd,
+ struct dm_zone *zone,
+ sector_t chunk_block)
+{
+ sector_t bitmap_block = 1 + zmd->nr_map_blocks +
+ (sector_t)(dmz_id(zmd, zone) * zmd->zone_nr_bitmap_blocks) +
+ (chunk_block >> DMZ_BLOCK_SHIFT_BITS);
+
+ return dmz_get_mblock(zmd, bitmap_block);
+}
+
+/*
+ * Copy the valid blocks bitmap of from_zone to the bitmap of to_zone.
+ */
+int dmz_copy_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
+ struct dm_zone *to_zone)
+{
+ struct dmz_mblock *from_mblk, *to_mblk;
+ sector_t chunk_block = 0;
+
+ /* Get the zones bitmap blocks */
+ while (chunk_block < zmd->dev->zone_nr_blocks) {
+ from_mblk = dmz_get_bitmap(zmd, from_zone, chunk_block);
+ if (IS_ERR(from_mblk))
+ return PTR_ERR(from_mblk);
+ to_mblk = dmz_get_bitmap(zmd, to_zone, chunk_block);
+ if (IS_ERR(to_mblk)) {
+ dmz_release_mblock(zmd, from_mblk);
+ return PTR_ERR(to_mblk);
+ }
+
+ memcpy(to_mblk->data, from_mblk->data, DMZ_BLOCK_SIZE);
+ dmz_dirty_mblock(zmd, to_mblk);
+
+ dmz_release_mblock(zmd, to_mblk);
+ dmz_release_mblock(zmd, from_mblk);
+
+ chunk_block += DMZ_BLOCK_SIZE_BITS;
+ }
+
+ to_zone->weight = from_zone->weight;
+
+ return 0;
+}
+
+/*
+ * Merge the valid blocks bitmap of from_zone into the bitmap of to_zone,
+ * starting from chunk_block.
+ */
+int dmz_merge_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
+ struct dm_zone *to_zone, sector_t chunk_block)
+{
+ unsigned int nr_blocks;
+ int ret;
+
+ /* Get the zones bitmap blocks */
+ while (chunk_block < zmd->dev->zone_nr_blocks) {
+ /* Get a valid region from the source zone */
+ ret = dmz_first_valid_block(zmd, from_zone, &chunk_block);
+ if (ret <= 0)
+ return ret;
+
+ nr_blocks = ret;
+ ret = dmz_validate_blocks(zmd, to_zone, chunk_block, nr_blocks);
+ if (ret)
+ return ret;
+
+ chunk_block += nr_blocks;
+ }
+
+ return 0;
+}
+
+/*
+ * Validate all the blocks in the range [block..block+nr_blocks-1].
+ */
+int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
+ sector_t chunk_block, unsigned int nr_blocks)
+{
+ unsigned int count, bit, nr_bits;
+ unsigned int zone_nr_blocks = zmd->dev->zone_nr_blocks;
+ struct dmz_mblock *mblk;
+ unsigned int n = 0;
+
+ dmz_dev_debug(zmd->dev, "=> VALIDATE zone %u, block %llu, %u blocks",
+ dmz_id(zmd, zone), (unsigned long long)chunk_block,
+ nr_blocks);
+
+ WARN_ON(chunk_block + nr_blocks > zone_nr_blocks);
+
+ while (nr_blocks) {
+ /* Get bitmap block */
+ mblk = dmz_get_bitmap(zmd, zone, chunk_block);
+ if (IS_ERR(mblk))
+ return PTR_ERR(mblk);
+
+ /* Set bits */
+ bit = chunk_block & DMZ_BLOCK_MASK_BITS;
+ nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
+
+ count = dmz_set_bits((unsigned long *)mblk->data, bit, nr_bits);
+ if (count) {
+ dmz_dirty_mblock(zmd, mblk);
+ n += count;
+ }
+ dmz_release_mblock(zmd, mblk);
+
+ nr_blocks -= nr_bits;
+ chunk_block += nr_bits;
+ }
+
+ if (likely(zone->weight + n <= zone_nr_blocks))
+ zone->weight += n;
+ else {
+ dmz_dev_warn(zmd->dev, "Zone %u: weight %u should be <= %u",
+ dmz_id(zmd, zone), zone->weight,
+ zone_nr_blocks - n);
+ zone->weight = zone_nr_blocks;
+ }
+
+ return 0;
+}
+
+/*
+ * Clear nr_bits bits in bitmap starting from bit.
+ * Return the number of bits cleared.
+ */
+static int dmz_clear_bits(unsigned long *bitmap, int bit, int nr_bits)
+{
+ unsigned long *addr;
+ int end = bit + nr_bits;
+ int n = 0;
+
+ while (bit < end) {
+ if (((bit & (BITS_PER_LONG - 1)) == 0) &&
+ ((end - bit) >= BITS_PER_LONG)) {
+ /* Try to clear whole word at once */
+ addr = bitmap + BIT_WORD(bit);
+ if (*addr == ULONG_MAX) {
+ *addr = 0;
+ n += BITS_PER_LONG;
+ bit += BITS_PER_LONG;
+ continue;
+ }
+ }
+
+ if (test_and_clear_bit(bit, bitmap))
+ n++;
+ bit++;
+ }
+
+ return n;
+}
+
+/*
+ * Invalidate all the blocks in the range [block..block+nr_blocks-1].
+ */
+int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
+ sector_t chunk_block, unsigned int nr_blocks)
+{
+ unsigned int count, bit, nr_bits;
+ struct dmz_mblock *mblk;
+ unsigned int n = 0;
+
+ dmz_dev_debug(zmd->dev, "=> INVALIDATE zone %u, block %llu, %u blocks",
+ dmz_id(zmd, zone), (u64)chunk_block, nr_blocks);
+
+ WARN_ON(chunk_block + nr_blocks > zmd->dev->zone_nr_blocks);
+
+ while (nr_blocks) {
+ /* Get bitmap block */
+ mblk = dmz_get_bitmap(zmd, zone, chunk_block);
+ if (IS_ERR(mblk))
+ return PTR_ERR(mblk);
+
+ /* Clear bits */
+ bit = chunk_block & DMZ_BLOCK_MASK_BITS;
+ nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
+
+ count = dmz_clear_bits((unsigned long *)mblk->data,
+ bit, nr_bits);
+ if (count) {
+ dmz_dirty_mblock(zmd, mblk);
+ n += count;
+ }
+ dmz_release_mblock(zmd, mblk);
+
+ nr_blocks -= nr_bits;
+ chunk_block += nr_bits;
+ }
+
+ if (zone->weight >= n)
+ zone->weight -= n;
+ else {
+ dmz_dev_warn(zmd->dev, "Zone %u: weight %u should be >= %u",
+ dmz_id(zmd, zone), zone->weight, n);
+ zone->weight = 0;
+ }
+
+ return 0;
+}
+
+/*
+ * Get a block bit value.
+ */
+static int dmz_test_block(struct dmz_metadata *zmd, struct dm_zone *zone,
+ sector_t chunk_block)
+{
+ struct dmz_mblock *mblk;
+ int ret;
+
+ WARN_ON(chunk_block >= zmd->dev->zone_nr_blocks);
+
+ /* Get bitmap block */
+ mblk = dmz_get_bitmap(zmd, zone, chunk_block);
+ if (IS_ERR(mblk))
+ return PTR_ERR(mblk);
+
+ /* Get offset */
+ ret = test_bit(chunk_block & DMZ_BLOCK_MASK_BITS,
+ (unsigned long *) mblk->data) != 0;
+
+ dmz_release_mblock(zmd, mblk);
+
+ return ret;
+}
+
+/*
+ * Return the number of blocks from chunk_block to the first block with a bit
+ * value specified by set. Search at most nr_blocks blocks from chunk_block.
+ */
+static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone,
+ sector_t chunk_block, unsigned int nr_blocks,
+ int set)
+{
+ struct dmz_mblock *mblk;
+ unsigned int bit, set_bit, nr_bits;
+ unsigned long *bitmap;
+ int n = 0;
+
+ WARN_ON(chunk_block + nr_blocks > zmd->dev->zone_nr_blocks);
+
+ while (nr_blocks) {
+ /* Get bitmap block */
+ mblk = dmz_get_bitmap(zmd, zone, chunk_block);
+ if (IS_ERR(mblk))
+ return PTR_ERR(mblk);
+
+ /* Get offset */
+ bitmap = (unsigned long *) mblk->data;
+ bit = chunk_block & DMZ_BLOCK_MASK_BITS;
+ nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
+ if (set)
+ set_bit = find_next_bit(bitmap, DMZ_BLOCK_SIZE_BITS, bit);
+ else
+ set_bit = find_next_zero_bit(bitmap, DMZ_BLOCK_SIZE_BITS, bit);
+ dmz_release_mblock(zmd, mblk);
+
+ n += set_bit - bit;
+ if (set_bit < DMZ_BLOCK_SIZE_BITS)
+ break;
+
+ nr_blocks -= nr_bits;
+ chunk_block += nr_bits;
+ }
+
+ return n;
+}
+
+/*
+ * Test if chunk_block is valid. If it is, the number of consecutive
+ * valid blocks from chunk_block will be returned.
+ */
+int dmz_block_valid(struct dmz_metadata *zmd, struct dm_zone *zone,
+ sector_t chunk_block)
+{
+ int valid;
+
+ valid = dmz_test_block(zmd, zone, chunk_block);
+ if (valid <= 0)
+ return valid;
+
+ /* The block is valid: get the number of valid blocks from block */
+ return dmz_to_next_set_block(zmd, zone, chunk_block,
+ zmd->dev->zone_nr_blocks - chunk_block, 0);
+}
+
+/*
+ * Find the first valid block from @chunk_block in @zone.
+ * If such a block is found, its number is returned using
+ * @chunk_block and the total number of valid blocks from @chunk_block
+ * is returned.
+ */
+int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone,
+ sector_t *chunk_block)
+{
+ sector_t start_block = *chunk_block;
+ int ret;
+
+ ret = dmz_to_next_set_block(zmd, zone, start_block,
+ zmd->dev->zone_nr_blocks - start_block, 1);
+ if (ret < 0)
+ return ret;
+
+ start_block += ret;
+ *chunk_block = start_block;
+
+ return dmz_to_next_set_block(zmd, zone, start_block,
+ zmd->dev->zone_nr_blocks - start_block, 0);
+}
+
+/*
+ * Count the number of bits set starting from bit up to bit + nr_bits - 1.
+ */
+static int dmz_count_bits(void *bitmap, int bit, int nr_bits)
+{
+ unsigned long *addr;
+ int end = bit + nr_bits;
+ int n = 0;
+
+ while (bit < end) {
+ if (((bit & (BITS_PER_LONG - 1)) == 0) &&
+ ((end - bit) >= BITS_PER_LONG)) {
+ addr = (unsigned long *)bitmap + BIT_WORD(bit);
+ if (*addr == ULONG_MAX) {
+ n += BITS_PER_LONG;
+ bit += BITS_PER_LONG;
+ continue;
+ }
+ }
+
+ if (test_bit(bit, bitmap))
+ n++;
+ bit++;
+ }
+
+ return n;
+}
+
+/*
+ * Get a zone weight.
+ */
+static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone)
+{
+ struct dmz_mblock *mblk;
+ sector_t chunk_block = 0;
+ unsigned int bit, nr_bits;
+ unsigned int nr_blocks = zmd->dev->zone_nr_blocks;
+ void *bitmap;
+ int n = 0;
+
+ while (nr_blocks) {
+ /* Get bitmap block */
+ mblk = dmz_get_bitmap(zmd, zone, chunk_block);
+ if (IS_ERR(mblk)) {
+ n = 0;
+ break;
+ }
+
+ /* Count bits in this block */
+ bitmap = mblk->data;
+ bit = chunk_block & DMZ_BLOCK_MASK_BITS;
+ nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
+ n += dmz_count_bits(bitmap, bit, nr_bits);
+
+ dmz_release_mblock(zmd, mblk);
+
+ nr_blocks -= nr_bits;
+ chunk_block += nr_bits;
+ }
+
+ zone->weight = n;
+}
+
+/*
+ * Cleanup the zoned metadata resources.
+ */
+static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
+{
+ struct rb_root *root;
+ struct dmz_mblock *mblk, *next;
+ int i;
+
+ /* Release zone mapping resources */
+ if (zmd->map_mblk) {
+ for (i = 0; i < zmd->nr_map_blocks; i++)
+ dmz_release_mblock(zmd, zmd->map_mblk[i]);
+ kfree(zmd->map_mblk);
+ zmd->map_mblk = NULL;
+ }
+
+ /* Release super blocks */
+ for (i = 0; i < 2; i++) {
+ if (zmd->sb[i].mblk) {
+ dmz_free_mblock(zmd, zmd->sb[i].mblk);
+ zmd->sb[i].mblk = NULL;
+ }
+ }
+
+ /* Free cached blocks */
+ while (!list_empty(&zmd->mblk_dirty_list)) {
+ mblk = list_first_entry(&zmd->mblk_dirty_list,
+ struct dmz_mblock, link);
+ dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)",
+ (u64)mblk->no, atomic_read(&mblk->ref));
+ list_del_init(&mblk->link);
+ rb_erase(&mblk->node, &zmd->mblk_rbtree);
+ dmz_free_mblock(zmd, mblk);
+ }
+
+ while (!list_empty(&zmd->mblk_lru_list)) {
+ mblk = list_first_entry(&zmd->mblk_lru_list,
+ struct dmz_mblock, link);
+ list_del_init(&mblk->link);
+ rb_erase(&mblk->node, &zmd->mblk_rbtree);
+ dmz_free_mblock(zmd, mblk);
+ }
+
+ /* Sanity checks: the mblock rbtree should now be empty */
+ root = &zmd->mblk_rbtree;
+ rbtree_postorder_for_each_entry_safe(mblk, next, root, node) {
+ dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree",
+ (u64)mblk->no, atomic_read(&mblk->ref));
+ atomic_set(&mblk->ref, 0);
+ dmz_free_mblock(zmd, mblk);
+ }
+
+ /* Free the zone descriptors */
+ dmz_drop_zones(zmd);
+}
+
+/*
+ * Initialize the zoned metadata.
+ */
+int dmz_ctr_metadata(struct dmz_dev *dev, struct dmz_metadata **metadata)
+{
+ struct dmz_metadata *zmd;
+ unsigned int i, zid;
+ struct dm_zone *zone;
+ int ret;
+
+ zmd = kzalloc(sizeof(struct dmz_metadata), GFP_KERNEL);
+ if (!zmd)
+ return -ENOMEM;
+
+ zmd->dev = dev;
+ zmd->mblk_rbtree = RB_ROOT;
+ init_rwsem(&zmd->mblk_sem);
+ mutex_init(&zmd->mblk_flush_lock);
+ spin_lock_init(&zmd->mblk_lock);
+ INIT_LIST_HEAD(&zmd->mblk_lru_list);
+ INIT_LIST_HEAD(&zmd->mblk_dirty_list);
+
+ mutex_init(&zmd->map_lock);
+ atomic_set(&zmd->unmap_nr_rnd, 0);
+ INIT_LIST_HEAD(&zmd->unmap_rnd_list);
+ INIT_LIST_HEAD(&zmd->map_rnd_list);
+
+ atomic_set(&zmd->unmap_nr_seq, 0);
+ INIT_LIST_HEAD(&zmd->unmap_seq_list);
+ INIT_LIST_HEAD(&zmd->map_seq_list);
+
+ atomic_set(&zmd->nr_reserved_seq_zones, 0);
+ INIT_LIST_HEAD(&zmd->reserved_seq_zones_list);
+
+ init_waitqueue_head(&zmd->free_wq);
+
+ /* Initialize zone descriptors */
+ ret = dmz_init_zones(zmd);
+ if (ret)
+ goto err;
+
+ /* Get super block */
+ ret = dmz_load_sb(zmd);
+ if (ret)
+ goto err;
+
+ /* Set metadata zones starting from sb_zone */
+ zid = dmz_id(zmd, zmd->sb_zone);
+ for (i = 0; i < zmd->nr_meta_zones << 1; i++) {
+ zone = dmz_get(zmd, zid + i);
+ if (!dmz_is_rnd(zone))
+ goto err;
+ set_bit(DMZ_META, &zone->flags);
+ }
+
+ /* Load mapping table */
+ ret = dmz_load_mapping(zmd);
+ if (ret)
+ goto err;
+
+ /*
+ * Cache size boundaries: allow at least 2 super blocks, the chunk map
+ * blocks and enough blocks to be able to cache the bitmap blocks of
+ * up to 16 zones when idle (min_nr_mblks). Otherwise, if busy, allow
+ * the cache to add 512 more metadata blocks.
+ */
+ zmd->min_nr_mblks = 2 + zmd->nr_map_blocks + zmd->zone_nr_bitmap_blocks * 16;
+ zmd->max_nr_mblks = zmd->min_nr_mblks + 512;
+ zmd->mblk_shrinker.count_objects = dmz_mblock_shrinker_count;
+ zmd->mblk_shrinker.scan_objects = dmz_mblock_shrinker_scan;
+ zmd->mblk_shrinker.seeks = DEFAULT_SEEKS;
+
+ /* Metadata cache shrinker */
+ ret = register_shrinker(&zmd->mblk_shrinker);
+ if (ret) {
+ dmz_dev_err(dev, "Register metadata cache shrinker failed");
+ goto err;
+ }
+
+ dmz_dev_info(dev, "Host-%s zoned block device",
+ bdev_zoned_model(dev->bdev) == BLK_ZONED_HA ?
+ "aware" : "managed");
+ dmz_dev_info(dev, " %llu 512-byte logical sectors",
+ (u64)dev->capacity);
+ dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors",
+ dev->nr_zones, (u64)dev->zone_nr_sectors);
+ dmz_dev_info(dev, " %u metadata zones",
+ zmd->nr_meta_zones * 2);
+ dmz_dev_info(dev, " %u data zones for %u chunks",
+ zmd->nr_data_zones, zmd->nr_chunks);
+ dmz_dev_info(dev, " %u random zones (%u unmapped)",
+ zmd->nr_rnd, atomic_read(&zmd->unmap_nr_rnd));
+ dmz_dev_info(dev, " %u sequential zones (%u unmapped)",
+ zmd->nr_seq, atomic_read(&zmd->unmap_nr_seq));
+ dmz_dev_info(dev, " %u reserved sequential data zones",
+ zmd->nr_reserved_seq);
+
+ dmz_dev_debug(dev, "Format:");
+ dmz_dev_debug(dev, "%u metadata blocks per set (%u max cache)",
+ zmd->nr_meta_blocks, zmd->max_nr_mblks);
+ dmz_dev_debug(dev, " %u data zone mapping blocks",
+ zmd->nr_map_blocks);
+ dmz_dev_debug(dev, " %u bitmap blocks",
+ zmd->nr_bitmap_blocks);
+
+ *metadata = zmd;
+
+ return 0;
+err:
+ dmz_cleanup_metadata(zmd);
+ kfree(zmd);
+ *metadata = NULL;
+
+ return ret;
+}
+
+/*
+ * Cleanup the zoned metadata resources.
+ */
+void dmz_dtr_metadata(struct dmz_metadata *zmd)
+{
+ unregister_shrinker(&zmd->mblk_shrinker);
+ dmz_cleanup_metadata(zmd);
+ kfree(zmd);
+}
+
+/*
+ * Check zone information on resume.
+ */
+int dmz_resume_metadata(struct dmz_metadata *zmd)
+{
+ struct dmz_dev *dev = zmd->dev;
+ struct dm_zone *zone;
+ sector_t wp_block;
+ unsigned int i;
+ int ret;
+
+ /* Check zones */
+ for (i = 0; i < dev->nr_zones; i++) {
+ zone = dmz_get(zmd, i);
+ if (!zone) {
+ dmz_dev_err(dev, "Unable to get zone %u", i);
+ return -EIO;
+ }
+
+ wp_block = zone->wp_block;
+
+ ret = dmz_update_zone(zmd, zone);
+ if (ret) {
+ dmz_dev_err(dev, "Broken zone %u", i);
+ return ret;
+ }
+
+ if (dmz_is_offline(zone)) {
+ dmz_dev_warn(dev, "Zone %u is offline", i);
+ continue;
+ }
+
+ /* Check write pointer */
+ if (!dmz_is_seq(zone))
+ zone->wp_block = 0;
+ else if (zone->wp_block != wp_block) {
+ dmz_dev_err(dev, "Zone %u: Invalid wp (%llu / %llu)",
+ i, (u64)zone->wp_block, (u64)wp_block);
+ zone->wp_block = wp_block;
+ dmz_invalidate_blocks(zmd, zone, zone->wp_block,
+ dev->zone_nr_blocks - zone->wp_block);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
new file mode 100644
index 000000000000..05c0a126f5c8
--- /dev/null
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -0,0 +1,570 @@
+/*
+ * Copyright (C) 2017 Western Digital Corporation or its affiliates.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-zoned.h"
+
+#include <linux/module.h>
+
+#define DM_MSG_PREFIX "zoned reclaim"
+
+struct dmz_reclaim {
+ struct dmz_metadata *metadata;
+ struct dmz_dev *dev;
+
+ struct delayed_work work;
+ struct workqueue_struct *wq;
+
+ struct dm_kcopyd_client *kc;
+ struct dm_kcopyd_throttle kc_throttle;
+ int kc_err;
+
+ unsigned long flags;
+
+ /* Last target access time */
+ unsigned long atime;
+};
+
+/*
+ * Reclaim state flags.
+ */
+enum {
+ DMZ_RECLAIM_KCOPY,
+};
+
+/*
+ * Number of seconds of target BIO inactivity to consider the target idle.
+ */
+#define DMZ_IDLE_PERIOD (10UL * HZ)
+
+/*
+ * Percentage of unmapped (free) random zones below which reclaim starts
+ * even if the target is busy.
+ */
+#define DMZ_RECLAIM_LOW_UNMAP_RND 30
+
+/*
+ * Percentage of unmapped (free) random zones above which reclaim will
+ * stop if the target is busy.
+ */
+#define DMZ_RECLAIM_HIGH_UNMAP_RND 50
+
+/*
+ * Align a sequential zone write pointer to chunk_block.
+ */
+static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
+ sector_t block)
+{
+ struct dmz_metadata *zmd = zrc->metadata;
+ sector_t wp_block = zone->wp_block;
+ unsigned int nr_blocks;
+ int ret;
+
+ if (wp_block == block)
+ return 0;
+
+ if (wp_block > block)
+ return -EIO;
+
+ /*
+ * Zeroout the space between the write
+ * pointer and the requested position.
+ */
+ nr_blocks = block - wp_block;
+ ret = blkdev_issue_zeroout(zrc->dev->bdev,
+ dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
+ dmz_blk2sect(nr_blocks), GFP_NOFS, false);
+ if (ret) {
+ dmz_dev_err(zrc->dev,
+ "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
+ dmz_id(zmd, zone), (unsigned long long)wp_block,
+ (unsigned long long)block, nr_blocks, ret);
+ return ret;
+ }
+
+ zone->wp_block = block;
+
+ return 0;
+}
+
+/*
+ * dm_kcopyd_copy end notification.
+ */
+static void dmz_reclaim_kcopy_end(int read_err, unsigned long write_err,
+ void *context)
+{
+ struct dmz_reclaim *zrc = context;
+
+ if (read_err || write_err)
+ zrc->kc_err = -EIO;
+ else
+ zrc->kc_err = 0;
+
+ clear_bit_unlock(DMZ_RECLAIM_KCOPY, &zrc->flags);
+ smp_mb__after_atomic();
+ wake_up_bit(&zrc->flags, DMZ_RECLAIM_KCOPY);
+}
+
+/*
+ * Copy valid blocks of src_zone into dst_zone.
+ */
+static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
+ struct dm_zone *src_zone, struct dm_zone *dst_zone)
+{
+ struct dmz_metadata *zmd = zrc->metadata;
+ struct dmz_dev *dev = zrc->dev;
+ struct dm_io_region src, dst;
+ sector_t block = 0, end_block;
+ sector_t nr_blocks;
+ sector_t src_zone_block;
+ sector_t dst_zone_block;
+ unsigned long flags = 0;
+ int ret;
+
+ if (dmz_is_seq(src_zone))
+ end_block = src_zone->wp_block;
+ else
+ end_block = dev->zone_nr_blocks;
+ src_zone_block = dmz_start_block(zmd, src_zone);
+ dst_zone_block = dmz_start_block(zmd, dst_zone);
+
+ if (dmz_is_seq(dst_zone))
+ set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
+
+ while (block < end_block) {
+ /* Get a valid region from the source zone */
+ ret = dmz_first_valid_block(zmd, src_zone, &block);
+ if (ret <= 0)
+ return ret;
+ nr_blocks = ret;
+
+ /*
+ * If we are writing in a sequential zone, we must make sure
+ * that writes are sequential. So Zeroout any eventual hole
+ * between writes.
+ */
+ if (dmz_is_seq(dst_zone)) {
+ ret = dmz_reclaim_align_wp(zrc, dst_zone, block);
+ if (ret)
+ return ret;
+ }
+
+ src.bdev = dev->bdev;
+ src.sector = dmz_blk2sect(src_zone_block + block);
+ src.count = dmz_blk2sect(nr_blocks);
+
+ dst.bdev = dev->bdev;
+ dst.sector = dmz_blk2sect(dst_zone_block + block);
+ dst.count = src.count;
+
+ /* Copy the valid region */
+ set_bit(DMZ_RECLAIM_KCOPY, &zrc->flags);
+ ret = dm_kcopyd_copy(zrc->kc, &src, 1, &dst, flags,
+ dmz_reclaim_kcopy_end, zrc);
+ if (ret)
+ return ret;
+
+ /* Wait for copy to complete */
+ wait_on_bit_io(&zrc->flags, DMZ_RECLAIM_KCOPY,
+ TASK_UNINTERRUPTIBLE);
+ if (zrc->kc_err)
+ return zrc->kc_err;
+
+ block += nr_blocks;
+ if (dmz_is_seq(dst_zone))
+ dst_zone->wp_block = block;
+ }
+
+ return 0;
+}
+
+/*
+ * Move valid blocks of dzone buffer zone into dzone (after its write pointer)
+ * and free the buffer zone.
+ */
+static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
+{
+ struct dm_zone *bzone = dzone->bzone;
+ sector_t chunk_block = dzone->wp_block;
+ struct dmz_metadata *zmd = zrc->metadata;
+ int ret;
+
+ dmz_dev_debug(zrc->dev,
+ "Chunk %u, move buf zone %u (weight %u) to data zone %u (weight %u)",
+ dzone->chunk, dmz_id(zmd, bzone), dmz_weight(bzone),
+ dmz_id(zmd, dzone), dmz_weight(dzone));
+
+ /* Flush data zone into the buffer zone */
+ ret = dmz_reclaim_copy(zrc, bzone, dzone);
+ if (ret < 0)
+ return ret;
+
+ dmz_lock_flush(zmd);
+
+ /* Validate copied blocks */
+ ret = dmz_merge_valid_blocks(zmd, bzone, dzone, chunk_block);
+ if (ret == 0) {
+ /* Free the buffer zone */
+ dmz_invalidate_blocks(zmd, bzone, 0, zrc->dev->zone_nr_blocks);
+ dmz_lock_map(zmd);
+ dmz_unmap_zone(zmd, bzone);
+ dmz_unlock_zone_reclaim(dzone);
+ dmz_free_zone(zmd, bzone);
+ dmz_unlock_map(zmd);
+ }
+
+ dmz_unlock_flush(zmd);
+
+ return 0;
+}
+
+/*
+ * Merge valid blocks of dzone into its buffer zone and free dzone.
+ */
+static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
+{
+ unsigned int chunk = dzone->chunk;
+ struct dm_zone *bzone = dzone->bzone;
+ struct dmz_metadata *zmd = zrc->metadata;
+ int ret = 0;
+
+ dmz_dev_debug(zrc->dev,
+ "Chunk %u, move data zone %u (weight %u) to buf zone %u (weight %u)",
+ chunk, dmz_id(zmd, dzone), dmz_weight(dzone),
+ dmz_id(zmd, bzone), dmz_weight(bzone));
+
+ /* Flush data zone into the buffer zone */
+ ret = dmz_reclaim_copy(zrc, dzone, bzone);
+ if (ret < 0)
+ return ret;
+
+ dmz_lock_flush(zmd);
+
+ /* Validate copied blocks */
+ ret = dmz_merge_valid_blocks(zmd, dzone, bzone, 0);
+ if (ret == 0) {
+ /*
+ * Free the data zone and remap the chunk to
+ * the buffer zone.
+ */
+ dmz_invalidate_blocks(zmd, dzone, 0, zrc->dev->zone_nr_blocks);
+ dmz_lock_map(zmd);
+ dmz_unmap_zone(zmd, bzone);
+ dmz_unmap_zone(zmd, dzone);
+ dmz_unlock_zone_reclaim(dzone);
+ dmz_free_zone(zmd, dzone);
+ dmz_map_zone(zmd, bzone, chunk);
+ dmz_unlock_map(zmd);
+ }
+
+ dmz_unlock_flush(zmd);
+
+ return 0;
+}
+
+/*
+ * Move valid blocks of the random data zone dzone into a free sequential zone.
+ * Once blocks are moved, remap the zone chunk to the sequential zone.
+ */
+static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
+{
+ unsigned int chunk = dzone->chunk;
+ struct dm_zone *szone = NULL;
+ struct dmz_metadata *zmd = zrc->metadata;
+ int ret;
+
+ /* Get a free sequential zone */
+ dmz_lock_map(zmd);
+ szone = dmz_alloc_zone(zmd, DMZ_ALLOC_RECLAIM);
+ dmz_unlock_map(zmd);
+ if (!szone)
+ return -ENOSPC;
+
+ dmz_dev_debug(zrc->dev,
+ "Chunk %u, move rnd zone %u (weight %u) to seq zone %u",
+ chunk, dmz_id(zmd, dzone), dmz_weight(dzone),
+ dmz_id(zmd, szone));
+
+ /* Flush the random data zone into the sequential zone */
+ ret = dmz_reclaim_copy(zrc, dzone, szone);
+
+ dmz_lock_flush(zmd);
+
+ if (ret == 0) {
+ /* Validate copied blocks */
+ ret = dmz_copy_valid_blocks(zmd, dzone, szone);
+ }
+ if (ret) {
+ /* Free the sequential zone */
+ dmz_lock_map(zmd);
+ dmz_free_zone(zmd, szone);
+ dmz_unlock_map(zmd);
+ } else {
+ /* Free the data zone and remap the chunk */
+ dmz_invalidate_blocks(zmd, dzone, 0, zrc->dev->zone_nr_blocks);
+ dmz_lock_map(zmd);
+ dmz_unmap_zone(zmd, dzone);
+ dmz_unlock_zone_reclaim(dzone);
+ dmz_free_zone(zmd, dzone);
+ dmz_map_zone(zmd, szone, chunk);
+ dmz_unlock_map(zmd);
+ }
+
+ dmz_unlock_flush(zmd);
+
+ return 0;
+}
+
+/*
+ * Reclaim an empty zone.
+ */
+static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
+{
+ struct dmz_metadata *zmd = zrc->metadata;
+
+ dmz_lock_flush(zmd);
+ dmz_lock_map(zmd);
+ dmz_unmap_zone(zmd, dzone);
+ dmz_unlock_zone_reclaim(dzone);
+ dmz_free_zone(zmd, dzone);
+ dmz_unlock_map(zmd);
+ dmz_unlock_flush(zmd);
+}
+
+/*
+ * Find a candidate zone for reclaim and process it.
+ */
+static void dmz_reclaim(struct dmz_reclaim *zrc)
+{
+ struct dmz_metadata *zmd = zrc->metadata;
+ struct dm_zone *dzone;
+ struct dm_zone *rzone;
+ unsigned long start;
+ int ret;
+
+ /* Get a data zone */
+ dzone = dmz_get_zone_for_reclaim(zmd);
+ if (!dzone)
+ return;
+
+ start = jiffies;
+
+ if (dmz_is_rnd(dzone)) {
+ if (!dmz_weight(dzone)) {
+ /* Empty zone */
+ dmz_reclaim_empty(zrc, dzone);
+ ret = 0;
+ } else {
+ /*
+ * Reclaim the random data zone by moving its
+ * valid data blocks to a free sequential zone.
+ */
+ ret = dmz_reclaim_rnd_data(zrc, dzone);
+ }
+ rzone = dzone;
+
+ } else {
+ struct dm_zone *bzone = dzone->bzone;
+ sector_t chunk_block = 0;
+
+ ret = dmz_first_valid_block(zmd, bzone, &chunk_block);
+ if (ret < 0)
+ goto out;
+
+ if (ret == 0 || chunk_block >= dzone->wp_block) {
+ /*
+ * The buffer zone is empty or its valid blocks are
+ * after the data zone write pointer.
+ */
+ ret = dmz_reclaim_buf(zrc, dzone);
+ rzone = bzone;
+ } else {
+ /*
+ * Reclaim the data zone by merging it into the
+ * buffer zone so that the buffer zone itself can
+ * be later reclaimed.
+ */
+ ret = dmz_reclaim_seq_data(zrc, dzone);
+ rzone = dzone;
+ }
+ }
+out:
+ if (ret) {
+ dmz_unlock_zone_reclaim(dzone);
+ return;
+ }
+
+ (void) dmz_flush_metadata(zrc->metadata);
+
+ dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
+ dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
+}
+
+/*
+ * Test if the target device is idle.
+ */
+static inline int dmz_target_idle(struct dmz_reclaim *zrc)
+{
+ return time_is_before_jiffies(zrc->atime + DMZ_IDLE_PERIOD);
+}
+
+/*
+ * Test if reclaim is necessary.
+ */
+static bool dmz_should_reclaim(struct dmz_reclaim *zrc)
+{
+ struct dmz_metadata *zmd = zrc->metadata;
+ unsigned int nr_rnd = dmz_nr_rnd_zones(zmd);
+ unsigned int nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd);
+ unsigned int p_unmap_rnd = nr_unmap_rnd * 100 / nr_rnd;
+
+ /* Reclaim when idle */
+ if (dmz_target_idle(zrc) && nr_unmap_rnd < nr_rnd)
+ return true;
+
+ /* If there are still plenty of random zones, do not reclaim */
+ if (p_unmap_rnd >= DMZ_RECLAIM_HIGH_UNMAP_RND)
+ return false;
+
+ /*
+ * If the percentage of unmappped random zones is low,
+ * reclaim even if the target is busy.
+ */
+ return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND;
+}
+
+/*
+ * Reclaim work function.
+ */
+static void dmz_reclaim_work(struct work_struct *work)
+{
+ struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work);
+ struct dmz_metadata *zmd = zrc->metadata;
+ unsigned int nr_rnd, nr_unmap_rnd;
+ unsigned int p_unmap_rnd;
+
+ if (!dmz_should_reclaim(zrc)) {
+ mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
+ return;
+ }
+
+ /*
+ * We need to start reclaiming random zones: set up zone copy
+ * throttling to either go fast if we are very low on random zones
+ * and slower if there are still some free random zones to avoid
+ * as much as possible to negatively impact the user workload.
+ */
+ nr_rnd = dmz_nr_rnd_zones(zmd);
+ nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd);
+ p_unmap_rnd = nr_unmap_rnd * 100 / nr_rnd;
+ if (dmz_target_idle(zrc) || p_unmap_rnd < DMZ_RECLAIM_LOW_UNMAP_RND / 2) {
+ /* Idle or very low percentage: go fast */
+ zrc->kc_throttle.throttle = 100;
+ } else {
+ /* Busy but we still have some random zone: throttle */
+ zrc->kc_throttle.throttle = min(75U, 100U - p_unmap_rnd / 2);
+ }
+
+ dmz_dev_debug(zrc->dev,
+ "Reclaim (%u): %s, %u%% free rnd zones (%u/%u)",
+ zrc->kc_throttle.throttle,
+ (dmz_target_idle(zrc) ? "Idle" : "Busy"),
+ p_unmap_rnd, nr_unmap_rnd, nr_rnd);
+
+ dmz_reclaim(zrc);
+
+ dmz_schedule_reclaim(zrc);
+}
+
+/*
+ * Initialize reclaim.
+ */
+int dmz_ctr_reclaim(struct dmz_dev *dev, struct dmz_metadata *zmd,
+ struct dmz_reclaim **reclaim)
+{
+ struct dmz_reclaim *zrc;
+ int ret;
+
+ zrc = kzalloc(sizeof(struct dmz_reclaim), GFP_KERNEL);
+ if (!zrc)
+ return -ENOMEM;
+
+ zrc->dev = dev;
+ zrc->metadata = zmd;
+ zrc->atime = jiffies;
+
+ /* Reclaim kcopyd client */
+ zrc->kc = dm_kcopyd_client_create(&zrc->kc_throttle);
+ if (IS_ERR(zrc->kc)) {
+ ret = PTR_ERR(zrc->kc);
+ zrc->kc = NULL;
+ goto err;
+ }
+
+ /* Reclaim work */
+ INIT_DELAYED_WORK(&zrc->work, dmz_reclaim_work);
+ zrc->wq = alloc_ordered_workqueue("dmz_rwq_%s", WQ_MEM_RECLAIM,
+ dev->name);
+ if (!zrc->wq) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ *reclaim = zrc;
+ queue_delayed_work(zrc->wq, &zrc->work, 0);
+
+ return 0;
+err:
+ if (zrc->kc)
+ dm_kcopyd_client_destroy(zrc->kc);
+ kfree(zrc);
+
+ return ret;
+}
+
+/*
+ * Terminate reclaim.
+ */
+void dmz_dtr_reclaim(struct dmz_reclaim *zrc)
+{
+ cancel_delayed_work_sync(&zrc->work);
+ destroy_workqueue(zrc->wq);
+ dm_kcopyd_client_destroy(zrc->kc);
+ kfree(zrc);
+}
+
+/*
+ * Suspend reclaim.
+ */
+void dmz_suspend_reclaim(struct dmz_reclaim *zrc)
+{
+ cancel_delayed_work_sync(&zrc->work);
+}
+
+/*
+ * Resume reclaim.
+ */
+void dmz_resume_reclaim(struct dmz_reclaim *zrc)
+{
+ queue_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
+}
+
+/*
+ * BIO accounting.
+ */
+void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc)
+{
+ zrc->atime = jiffies;
+}
+
+/*
+ * Start reclaim if necessary.
+ */
+void dmz_schedule_reclaim(struct dmz_reclaim *zrc)
+{
+ if (dmz_should_reclaim(zrc))
+ mod_delayed_work(zrc->wq, &zrc->work, 0);
+}
+
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
new file mode 100644
index 000000000000..2b538fa817f4
--- /dev/null
+++ b/drivers/md/dm-zoned-target.c
@@ -0,0 +1,967 @@
+/*
+ * Copyright (C) 2017 Western Digital Corporation or its affiliates.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-zoned.h"
+
+#include <linux/module.h>
+
+#define DM_MSG_PREFIX "zoned"
+
+#define DMZ_MIN_BIOS 8192
+
+/*
+ * Zone BIO context.
+ */
+struct dmz_bioctx {
+ struct dmz_target *target;
+ struct dm_zone *zone;
+ struct bio *bio;
+ atomic_t ref;
+ blk_status_t status;
+};
+
+/*
+ * Chunk work descriptor.
+ */
+struct dm_chunk_work {
+ struct work_struct work;
+ atomic_t refcount;
+ struct dmz_target *target;
+ unsigned int chunk;
+ struct bio_list bio_list;
+};
+
+/*
+ * Target descriptor.
+ */
+struct dmz_target {
+ struct dm_dev *ddev;
+
+ unsigned long flags;
+
+ /* Zoned block device information */
+ struct dmz_dev *dev;
+
+ /* For metadata handling */
+ struct dmz_metadata *metadata;
+
+ /* For reclaim */
+ struct dmz_reclaim *reclaim;
+
+ /* For chunk work */
+ struct mutex chunk_lock;
+ struct radix_tree_root chunk_rxtree;
+ struct workqueue_struct *chunk_wq;
+
+ /* For cloned BIOs to zones */
+ struct bio_set *bio_set;
+
+ /* For flush */
+ spinlock_t flush_lock;
+ struct bio_list flush_list;
+ struct delayed_work flush_work;
+ struct workqueue_struct *flush_wq;
+};
+
+/*
+ * Flush intervals (seconds).
+ */
+#define DMZ_FLUSH_PERIOD (10 * HZ)
+
+/*
+ * Target BIO completion.
+ */
+static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
+{
+ struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
+
+ if (bioctx->status == BLK_STS_OK && status != BLK_STS_OK)
+ bioctx->status = status;
+ bio_endio(bio);
+}
+
+/*
+ * Partial clone read BIO completion callback. This terminates the
+ * target BIO when there are no more references to its context.
+ */
+static void dmz_read_bio_end_io(struct bio *bio)
+{
+ struct dmz_bioctx *bioctx = bio->bi_private;
+ blk_status_t status = bio->bi_status;
+
+ bio_put(bio);
+ dmz_bio_endio(bioctx->bio, status);
+}
+
+/*
+ * Issue a BIO to a zone. The BIO may only partially process the
+ * original target BIO.
+ */
+static int dmz_submit_read_bio(struct dmz_target *dmz, struct dm_zone *zone,
+ struct bio *bio, sector_t chunk_block,
+ unsigned int nr_blocks)
+{
+ struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
+ sector_t sector;
+ struct bio *clone;
+
+ /* BIO remap sector */
+ sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
+
+ /* If the read is not partial, there is no need to clone the BIO */
+ if (nr_blocks == dmz_bio_blocks(bio)) {
+ /* Setup and submit the BIO */
+ bio->bi_iter.bi_sector = sector;
+ atomic_inc(&bioctx->ref);
+ generic_make_request(bio);
+ return 0;
+ }
+
+ /* Partial BIO: we need to clone the BIO */
+ clone = bio_clone_fast(bio, GFP_NOIO, dmz->bio_set);
+ if (!clone)
+ return -ENOMEM;
+
+ /* Setup the clone */
+ clone->bi_iter.bi_sector = sector;
+ clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT;
+ clone->bi_end_io = dmz_read_bio_end_io;
+ clone->bi_private = bioctx;
+
+ bio_advance(bio, clone->bi_iter.bi_size);
+
+ /* Submit the clone */
+ atomic_inc(&bioctx->ref);
+ generic_make_request(clone);
+
+ return 0;
+}
+
+/*
+ * Zero out pages of discarded blocks accessed by a read BIO.
+ */
+static void dmz_handle_read_zero(struct dmz_target *dmz, struct bio *bio,
+ sector_t chunk_block, unsigned int nr_blocks)
+{
+ unsigned int size = nr_blocks << DMZ_BLOCK_SHIFT;
+
+ /* Clear nr_blocks */
+ swap(bio->bi_iter.bi_size, size);
+ zero_fill_bio(bio);
+ swap(bio->bi_iter.bi_size, size);
+
+ bio_advance(bio, size);
+}
+
+/*
+ * Process a read BIO.
+ */
+static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
+ struct bio *bio)
+{
+ sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio));
+ unsigned int nr_blocks = dmz_bio_blocks(bio);
+ sector_t end_block = chunk_block + nr_blocks;
+ struct dm_zone *rzone, *bzone;
+ int ret;
+
+ /* Read into unmapped chunks need only zeroing the BIO buffer */
+ if (!zone) {
+ zero_fill_bio(bio);
+ return 0;
+ }
+
+ dmz_dev_debug(dmz->dev, "READ chunk %llu -> %s zone %u, block %llu, %u blocks",
+ (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
+ (dmz_is_rnd(zone) ? "RND" : "SEQ"),
+ dmz_id(dmz->metadata, zone),
+ (unsigned long long)chunk_block, nr_blocks);
+
+ /* Check block validity to determine the read location */
+ bzone = zone->bzone;
+ while (chunk_block < end_block) {
+ nr_blocks = 0;
+ if (dmz_is_rnd(zone) || chunk_block < zone->wp_block) {
+ /* Test block validity in the data zone */
+ ret = dmz_block_valid(dmz->metadata, zone, chunk_block);
+ if (ret < 0)
+ return ret;
+ if (ret > 0) {
+ /* Read data zone blocks */
+ nr_blocks = ret;
+ rzone = zone;
+ }
+ }
+
+ /*
+ * No valid blocks found in the data zone.
+ * Check the buffer zone, if there is one.
+ */
+ if (!nr_blocks && bzone) {
+ ret = dmz_block_valid(dmz->metadata, bzone, chunk_block);
+ if (ret < 0)
+ return ret;
+ if (ret > 0) {
+ /* Read buffer zone blocks */
+ nr_blocks = ret;
+ rzone = bzone;
+ }
+ }
+
+ if (nr_blocks) {
+ /* Valid blocks found: read them */
+ nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block);
+ ret = dmz_submit_read_bio(dmz, rzone, bio, chunk_block, nr_blocks);
+ if (ret)
+ return ret;
+ chunk_block += nr_blocks;
+ } else {
+ /* No valid block: zeroout the current BIO block */
+ dmz_handle_read_zero(dmz, bio, chunk_block, 1);
+ chunk_block++;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Issue a write BIO to a zone.
+ */
+static void dmz_submit_write_bio(struct dmz_target *dmz, struct dm_zone *zone,
+ struct bio *bio, sector_t chunk_block,
+ unsigned int nr_blocks)
+{
+ struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
+
+ /* Setup and submit the BIO */
+ bio->bi_bdev = dmz->dev->bdev;
+ bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
+ atomic_inc(&bioctx->ref);
+ generic_make_request(bio);
+
+ if (dmz_is_seq(zone))
+ zone->wp_block += nr_blocks;
+}
+
+/*
+ * Write blocks directly in a data zone, at the write pointer.
+ * If a buffer zone is assigned, invalidate the blocks written
+ * in place.
+ */
+static int dmz_handle_direct_write(struct dmz_target *dmz,
+ struct dm_zone *zone, struct bio *bio,
+ sector_t chunk_block,
+ unsigned int nr_blocks)
+{
+ struct dmz_metadata *zmd = dmz->metadata;
+ struct dm_zone *bzone = zone->bzone;
+ int ret;
+
+ if (dmz_is_readonly(zone))
+ return -EROFS;
+
+ /* Submit write */
+ dmz_submit_write_bio(dmz, zone, bio, chunk_block, nr_blocks);
+
+ /*
+ * Validate the blocks in the data zone and invalidate
+ * in the buffer zone, if there is one.
+ */
+ ret = dmz_validate_blocks(zmd, zone, chunk_block, nr_blocks);
+ if (ret == 0 && bzone)
+ ret = dmz_invalidate_blocks(zmd, bzone, chunk_block, nr_blocks);
+
+ return ret;
+}
+
+/*
+ * Write blocks in the buffer zone of @zone.
+ * If no buffer zone is assigned yet, get one.
+ * Called with @zone write locked.
+ */
+static int dmz_handle_buffered_write(struct dmz_target *dmz,
+ struct dm_zone *zone, struct bio *bio,
+ sector_t chunk_block,
+ unsigned int nr_blocks)
+{
+ struct dmz_metadata *zmd = dmz->metadata;
+ struct dm_zone *bzone;
+ int ret;
+
+ /* Get the buffer zone. One will be allocated if needed */
+ bzone = dmz_get_chunk_buffer(zmd, zone);
+ if (!bzone)
+ return -ENOSPC;
+
+ if (dmz_is_readonly(bzone))
+ return -EROFS;
+
+ /* Submit write */
+ dmz_submit_write_bio(dmz, bzone, bio, chunk_block, nr_blocks);
+
+ /*
+ * Validate the blocks in the buffer zone
+ * and invalidate in the data zone.
+ */
+ ret = dmz_validate_blocks(zmd, bzone, chunk_block, nr_blocks);
+ if (ret == 0 && chunk_block < zone->wp_block)
+ ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
+
+ return ret;
+}
+
+/*
+ * Process a write BIO.
+ */
+static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone,
+ struct bio *bio)
+{
+ sector_t chunk_block = dmz_chunk_block(dmz->dev, dmz_bio_block(bio));
+ unsigned int nr_blocks = dmz_bio_blocks(bio);
+
+ if (!zone)
+ return -ENOSPC;
+
+ dmz_dev_debug(dmz->dev, "WRITE chunk %llu -> %s zone %u, block %llu, %u blocks",
+ (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
+ (dmz_is_rnd(zone) ? "RND" : "SEQ"),
+ dmz_id(dmz->metadata, zone),
+ (unsigned long long)chunk_block, nr_blocks);
+
+ if (dmz_is_rnd(zone) || chunk_block == zone->wp_block) {
+ /*
+ * zone is a random zone or it is a sequential zone
+ * and the BIO is aligned to the zone write pointer:
+ * direct write the zone.
+ */
+ return dmz_handle_direct_write(dmz, zone, bio, chunk_block, nr_blocks);
+ }
+
+ /*
+ * This is an unaligned write in a sequential zone:
+ * use buffered write.
+ */
+ return dmz_handle_buffered_write(dmz, zone, bio, chunk_block, nr_blocks);
+}
+
+/*
+ * Process a discard BIO.
+ */
+static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone,
+ struct bio *bio)
+{
+ struct dmz_metadata *zmd = dmz->metadata;
+ sector_t block = dmz_bio_block(bio);
+ unsigned int nr_blocks = dmz_bio_blocks(bio);
+ sector_t chunk_block = dmz_chunk_block(dmz->dev, block);
+ int ret = 0;
+
+ /* For unmapped chunks, there is nothing to do */
+ if (!zone)
+ return 0;
+
+ if (dmz_is_readonly(zone))
+ return -EROFS;
+
+ dmz_dev_debug(dmz->dev, "DISCARD chunk %llu -> zone %u, block %llu, %u blocks",
+ (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
+ dmz_id(zmd, zone),
+ (unsigned long long)chunk_block, nr_blocks);
+
+ /*
+ * Invalidate blocks in the data zone and its
+ * buffer zone if one is mapped.
+ */
+ if (dmz_is_rnd(zone) || chunk_block < zone->wp_block)
+ ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
+ if (ret == 0 && zone->bzone)
+ ret = dmz_invalidate_blocks(zmd, zone->bzone,
+ chunk_block, nr_blocks);
+ return ret;
+}
+
+/*
+ * Process a BIO.
+ */
+static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
+ struct bio *bio)
+{
+ struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
+ struct dmz_metadata *zmd = dmz->metadata;
+ struct dm_zone *zone;
+ int ret;
+
+ /*
+ * Write may trigger a zone allocation. So make sure the
+ * allocation can succeed.
+ */
+ if (bio_op(bio) == REQ_OP_WRITE)
+ dmz_schedule_reclaim(dmz->reclaim);
+
+ dmz_lock_metadata(zmd);
+
+ /*
+ * Get the data zone mapping the chunk. There may be no
+ * mapping for read and discard. If a mapping is obtained,
+ + the zone returned will be set to active state.
+ */
+ zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(dmz->dev, bio),
+ bio_op(bio));
+ if (IS_ERR(zone)) {
+ ret = PTR_ERR(zone);
+ goto out;
+ }
+
+ /* Process the BIO */
+ if (zone) {
+ dmz_activate_zone(zone);
+ bioctx->zone = zone;
+ }
+
+ switch (bio_op(bio)) {
+ case REQ_OP_READ:
+ ret = dmz_handle_read(dmz, zone, bio);
+ break;
+ case REQ_OP_WRITE:
+ ret = dmz_handle_write(dmz, zone, bio);
+ break;
+ case REQ_OP_DISCARD:
+ case REQ_OP_WRITE_ZEROES:
+ ret = dmz_handle_discard(dmz, zone, bio);
+ break;
+ default:
+ dmz_dev_err(dmz->dev, "Unsupported BIO operation 0x%x",
+ bio_op(bio));
+ ret = -EIO;
+ }
+
+ /*
+ * Release the chunk mapping. This will check that the mapping
+ * is still valid, that is, that the zone used still has valid blocks.
+ */
+ if (zone)
+ dmz_put_chunk_mapping(zmd, zone);
+out:
+ dmz_bio_endio(bio, errno_to_blk_status(ret));
+
+ dmz_unlock_metadata(zmd);
+}
+
+/*
+ * Increment a chunk reference counter.
+ */
+static inline void dmz_get_chunk_work(struct dm_chunk_work *cw)
+{
+ atomic_inc(&cw->refcount);
+}
+
+/*
+ * Decrement a chunk work reference count and
+ * free it if it becomes 0.
+ */
+static void dmz_put_chunk_work(struct dm_chunk_work *cw)
+{
+ if (atomic_dec_and_test(&cw->refcount)) {
+ WARN_ON(!bio_list_empty(&cw->bio_list));
+ radix_tree_delete(&cw->target->chunk_rxtree, cw->chunk);
+ kfree(cw);
+ }
+}
+
+/*
+ * Chunk BIO work function.
+ */
+static void dmz_chunk_work(struct work_struct *work)
+{
+ struct dm_chunk_work *cw = container_of(work, struct dm_chunk_work, work);
+ struct dmz_target *dmz = cw->target;
+ struct bio *bio;
+
+ mutex_lock(&dmz->chunk_lock);
+
+ /* Process the chunk BIOs */
+ while ((bio = bio_list_pop(&cw->bio_list))) {
+ mutex_unlock(&dmz->chunk_lock);
+ dmz_handle_bio(dmz, cw, bio);
+ mutex_lock(&dmz->chunk_lock);
+ dmz_put_chunk_work(cw);
+ }
+
+ /* Queueing the work incremented the work refcount */
+ dmz_put_chunk_work(cw);
+
+ mutex_unlock(&dmz->chunk_lock);
+}
+
+/*
+ * Flush work.
+ */
+static void dmz_flush_work(struct work_struct *work)
+{
+ struct dmz_target *dmz = container_of(work, struct dmz_target, flush_work.work);
+ struct bio *bio;
+ int ret;
+
+ /* Flush dirty metadata blocks */
+ ret = dmz_flush_metadata(dmz->metadata);
+
+ /* Process queued flush requests */
+ while (1) {
+ spin_lock(&dmz->flush_lock);
+ bio = bio_list_pop(&dmz->flush_list);
+ spin_unlock(&dmz->flush_lock);
+
+ if (!bio)
+ break;
+
+ dmz_bio_endio(bio, errno_to_blk_status(ret));
+ }
+
+ queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
+}
+
+/*
+ * Get a chunk work and start it to process a new BIO.
+ * If the BIO chunk has no work yet, create one.
+ */
+static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
+{
+ unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
+ struct dm_chunk_work *cw;
+
+ mutex_lock(&dmz->chunk_lock);
+
+ /* Get the BIO chunk work. If one is not active yet, create one */
+ cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
+ if (!cw) {
+ int ret;
+
+ /* Create a new chunk work */
+ cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOFS);
+ if (!cw)
+ goto out;
+
+ INIT_WORK(&cw->work, dmz_chunk_work);
+ atomic_set(&cw->refcount, 0);
+ cw->target = dmz;
+ cw->chunk = chunk;
+ bio_list_init(&cw->bio_list);
+
+ ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
+ if (unlikely(ret)) {
+ kfree(cw);
+ cw = NULL;
+ goto out;
+ }
+ }
+
+ bio_list_add(&cw->bio_list, bio);
+ dmz_get_chunk_work(cw);
+
+ if (queue_work(dmz->chunk_wq, &cw->work))
+ dmz_get_chunk_work(cw);
+out:
+ mutex_unlock(&dmz->chunk_lock);
+}
+
+/*
+ * Process a new BIO.
+ */
+static int dmz_map(struct dm_target *ti, struct bio *bio)
+{
+ struct dmz_target *dmz = ti->private;
+ struct dmz_dev *dev = dmz->dev;
+ struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
+ sector_t sector = bio->bi_iter.bi_sector;
+ unsigned int nr_sectors = bio_sectors(bio);
+ sector_t chunk_sector;
+
+ dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
+ bio_op(bio), (unsigned long long)sector, nr_sectors,
+ (unsigned long long)dmz_bio_chunk(dmz->dev, bio),
+ (unsigned long long)dmz_chunk_block(dmz->dev, dmz_bio_block(bio)),
+ (unsigned int)dmz_bio_blocks(bio));
+
+ bio->bi_bdev = dev->bdev;
+
+ if (!nr_sectors && (bio_op(bio) != REQ_OP_FLUSH) && (bio_op(bio) != REQ_OP_WRITE))
+ return DM_MAPIO_REMAPPED;
+
+ /* The BIO should be block aligned */
+ if ((nr_sectors & DMZ_BLOCK_SECTORS_MASK) || (sector & DMZ_BLOCK_SECTORS_MASK))
+ return DM_MAPIO_KILL;
+
+ /* Initialize the BIO context */
+ bioctx->target = dmz;
+ bioctx->zone = NULL;
+ bioctx->bio = bio;
+ atomic_set(&bioctx->ref, 1);
+ bioctx->status = BLK_STS_OK;
+
+ /* Set the BIO pending in the flush list */
+ if (bio_op(bio) == REQ_OP_FLUSH || (!nr_sectors && bio_op(bio) == REQ_OP_WRITE)) {
+ spin_lock(&dmz->flush_lock);
+ bio_list_add(&dmz->flush_list, bio);
+ spin_unlock(&dmz->flush_lock);
+ mod_delayed_work(dmz->flush_wq, &dmz->flush_work, 0);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ /* Split zone BIOs to fit entirely into a zone */
+ chunk_sector = sector & (dev->zone_nr_sectors - 1);
+ if (chunk_sector + nr_sectors > dev->zone_nr_sectors)
+ dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
+
+ /* Now ready to handle this BIO */
+ dmz_reclaim_bio_acc(dmz->reclaim);
+ dmz_queue_chunk_work(dmz, bio);
+
+ return DM_MAPIO_SUBMITTED;
+}
+
+/*
+ * Completed target BIO processing.
+ */
+static int dmz_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
+{
+ struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
+
+ if (bioctx->status == BLK_STS_OK && *error)
+ bioctx->status = *error;
+
+ if (!atomic_dec_and_test(&bioctx->ref))
+ return DM_ENDIO_INCOMPLETE;
+
+ /* Done */
+ bio->bi_status = bioctx->status;
+
+ if (bioctx->zone) {
+ struct dm_zone *zone = bioctx->zone;
+
+ if (*error && bio_op(bio) == REQ_OP_WRITE) {
+ if (dmz_is_seq(zone))
+ set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
+ }
+ dmz_deactivate_zone(zone);
+ }
+
+ return DM_ENDIO_DONE;
+}
+
+/*
+ * Get zoned device information.
+ */
+static int dmz_get_zoned_device(struct dm_target *ti, char *path)
+{
+ struct dmz_target *dmz = ti->private;
+ struct request_queue *q;
+ struct dmz_dev *dev;
+ int ret;
+
+ /* Get the target device */
+ ret = dm_get_device(ti, path, dm_table_get_mode(ti->table), &dmz->ddev);
+ if (ret) {
+ ti->error = "Get target device failed";
+ dmz->ddev = NULL;
+ return ret;
+ }
+
+ dev = kzalloc(sizeof(struct dmz_dev), GFP_KERNEL);
+ if (!dev) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ dev->bdev = dmz->ddev->bdev;
+ (void)bdevname(dev->bdev, dev->name);
+
+ if (bdev_zoned_model(dev->bdev) == BLK_ZONED_NONE) {
+ ti->error = "Not a zoned block device";
+ ret = -EINVAL;
+ goto err;
+ }
+
+ dev->capacity = i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
+ if (ti->begin || (ti->len != dev->capacity)) {
+ ti->error = "Partial mapping not supported";
+ ret = -EINVAL;
+ goto err;
+ }
+
+ q = bdev_get_queue(dev->bdev);
+ dev->zone_nr_sectors = q->limits.chunk_sectors;
+ dev->zone_nr_sectors_shift = ilog2(dev->zone_nr_sectors);
+
+ dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors);
+ dev->zone_nr_blocks_shift = ilog2(dev->zone_nr_blocks);
+
+ dev->nr_zones = (dev->capacity + dev->zone_nr_sectors - 1)
+ >> dev->zone_nr_sectors_shift;
+
+ dmz->dev = dev;
+
+ return 0;
+err:
+ dm_put_device(ti, dmz->ddev);
+ kfree(dev);
+
+ return ret;
+}
+
+/*
+ * Cleanup zoned device information.
+ */
+static void dmz_put_zoned_device(struct dm_target *ti)
+{
+ struct dmz_target *dmz = ti->private;
+
+ dm_put_device(ti, dmz->ddev);
+ kfree(dmz->dev);
+ dmz->dev = NULL;
+}
+
+/*
+ * Setup target.
+ */
+static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ struct dmz_target *dmz;
+ struct dmz_dev *dev;
+ int ret;
+
+ /* Check arguments */
+ if (argc != 1) {
+ ti->error = "Invalid argument count";
+ return -EINVAL;
+ }
+
+ /* Allocate and initialize the target descriptor */
+ dmz = kzalloc(sizeof(struct dmz_target), GFP_KERNEL);
+ if (!dmz) {
+ ti->error = "Unable to allocate the zoned target descriptor";
+ return -ENOMEM;
+ }
+ ti->private = dmz;
+
+ /* Get the target zoned block device */
+ ret = dmz_get_zoned_device(ti, argv[0]);
+ if (ret) {
+ dmz->ddev = NULL;
+ goto err;
+ }
+
+ /* Initialize metadata */
+ dev = dmz->dev;
+ ret = dmz_ctr_metadata(dev, &dmz->metadata);
+ if (ret) {
+ ti->error = "Metadata initialization failed";
+ goto err_dev;
+ }
+
+ /* Set target (no write same support) */
+ ti->max_io_len = dev->zone_nr_sectors << 9;
+ ti->num_flush_bios = 1;
+ ti->num_discard_bios = 1;
+ ti->num_write_zeroes_bios = 1;
+ ti->per_io_data_size = sizeof(struct dmz_bioctx);
+ ti->flush_supported = true;
+ ti->discards_supported = true;
+ ti->split_discard_bios = true;
+
+ /* The exposed capacity is the number of chunks that can be mapped */
+ ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) << dev->zone_nr_sectors_shift;
+
+ /* Zone BIO */
+ dmz->bio_set = bioset_create(DMZ_MIN_BIOS, 0, 0);
+ if (!dmz->bio_set) {
+ ti->error = "Create BIO set failed";
+ ret = -ENOMEM;
+ goto err_meta;
+ }
+
+ /* Chunk BIO work */
+ mutex_init(&dmz->chunk_lock);
+ INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOFS);
+ dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
+ 0, dev->name);
+ if (!dmz->chunk_wq) {
+ ti->error = "Create chunk workqueue failed";
+ ret = -ENOMEM;
+ goto err_bio;
+ }
+
+ /* Flush work */
+ spin_lock_init(&dmz->flush_lock);
+ bio_list_init(&dmz->flush_list);
+ INIT_DELAYED_WORK(&dmz->flush_work, dmz_flush_work);
+ dmz->flush_wq = alloc_ordered_workqueue("dmz_fwq_%s", WQ_MEM_RECLAIM,
+ dev->name);
+ if (!dmz->flush_wq) {
+ ti->error = "Create flush workqueue failed";
+ ret = -ENOMEM;
+ goto err_cwq;
+ }
+ mod_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
+
+ /* Initialize reclaim */
+ ret = dmz_ctr_reclaim(dev, dmz->metadata, &dmz->reclaim);
+ if (ret) {
+ ti->error = "Zone reclaim initialization failed";
+ goto err_fwq;
+ }
+
+ dmz_dev_info(dev, "Target device: %llu 512-byte logical sectors (%llu blocks)",
+ (unsigned long long)ti->len,
+ (unsigned long long)dmz_sect2blk(ti->len));
+
+ return 0;
+err_fwq:
+ destroy_workqueue(dmz->flush_wq);
+err_cwq:
+ destroy_workqueue(dmz->chunk_wq);
+err_bio:
+ bioset_free(dmz->bio_set);
+err_meta:
+ dmz_dtr_metadata(dmz->metadata);
+err_dev:
+ dmz_put_zoned_device(ti);
+err:
+ kfree(dmz);
+
+ return ret;
+}
+
+/*
+ * Cleanup target.
+ */
+static void dmz_dtr(struct dm_target *ti)
+{
+ struct dmz_target *dmz = ti->private;
+
+ flush_workqueue(dmz->chunk_wq);
+ destroy_workqueue(dmz->chunk_wq);
+
+ dmz_dtr_reclaim(dmz->reclaim);
+
+ cancel_delayed_work_sync(&dmz->flush_work);
+ destroy_workqueue(dmz->flush_wq);
+
+ (void) dmz_flush_metadata(dmz->metadata);
+
+ dmz_dtr_metadata(dmz->metadata);
+
+ bioset_free(dmz->bio_set);
+
+ dmz_put_zoned_device(ti);
+
+ kfree(dmz);
+}
+
+/*
+ * Setup target request queue limits.
+ */
+static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+ struct dmz_target *dmz = ti->private;
+ unsigned int chunk_sectors = dmz->dev->zone_nr_sectors;
+
+ limits->logical_block_size = DMZ_BLOCK_SIZE;
+ limits->physical_block_size = DMZ_BLOCK_SIZE;
+
+ blk_limits_io_min(limits, DMZ_BLOCK_SIZE);
+ blk_limits_io_opt(limits, DMZ_BLOCK_SIZE);
+
+ limits->discard_alignment = DMZ_BLOCK_SIZE;
+ limits->discard_granularity = DMZ_BLOCK_SIZE;
+ limits->max_discard_sectors = chunk_sectors;
+ limits->max_hw_discard_sectors = chunk_sectors;
+ limits->max_write_zeroes_sectors = chunk_sectors;
+
+ /* FS hint to try to align to the device zone size */
+ limits->chunk_sectors = chunk_sectors;
+ limits->max_sectors = chunk_sectors;
+
+ /* We are exposing a drive-managed zoned block device */
+ limits->zoned = BLK_ZONED_NONE;
+}
+
+/*
+ * Pass on ioctl to the backend device.
+ */
+static int dmz_prepare_ioctl(struct dm_target *ti,
+ struct block_device **bdev, fmode_t *mode)
+{
+ struct dmz_target *dmz = ti->private;
+
+ *bdev = dmz->dev->bdev;
+
+ return 0;
+}
+
+/*
+ * Stop works on suspend.
+ */
+static void dmz_suspend(struct dm_target *ti)
+{
+ struct dmz_target *dmz = ti->private;
+
+ flush_workqueue(dmz->chunk_wq);
+ dmz_suspend_reclaim(dmz->reclaim);
+ cancel_delayed_work_sync(&dmz->flush_work);
+}
+
+/*
+ * Restart works on resume or if suspend failed.
+ */
+static void dmz_resume(struct dm_target *ti)
+{
+ struct dmz_target *dmz = ti->private;
+
+ queue_delayed_work(dmz->flush_wq, &dmz->flush_work, DMZ_FLUSH_PERIOD);
+ dmz_resume_reclaim(dmz->reclaim);
+}
+
+static int dmz_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct dmz_target *dmz = ti->private;
+
+ return fn(ti, dmz->ddev, 0, dmz->dev->capacity, data);
+}
+
+static struct target_type dmz_type = {
+ .name = "zoned",
+ .version = {1, 0, 0},
+ .features = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM,
+ .module = THIS_MODULE,
+ .ctr = dmz_ctr,
+ .dtr = dmz_dtr,
+ .map = dmz_map,
+ .end_io = dmz_end_io,
+ .io_hints = dmz_io_hints,
+ .prepare_ioctl = dmz_prepare_ioctl,
+ .postsuspend = dmz_suspend,
+ .resume = dmz_resume,
+ .iterate_devices = dmz_iterate_devices,
+};
+
+static int __init dmz_init(void)
+{
+ return dm_register_target(&dmz_type);
+}
+
+static void __exit dmz_exit(void)
+{
+ dm_unregister_target(&dmz_type);
+}
+
+module_init(dmz_init);
+module_exit(dmz_exit);
+
+MODULE_DESCRIPTION(DM_NAME " target for zoned block devices");
+MODULE_AUTHOR("Damien Le Moal <[email protected]>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
new file mode 100644
index 000000000000..12419f0bfe78
--- /dev/null
+++ b/drivers/md/dm-zoned.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2017 Western Digital Corporation or its affiliates.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_ZONED_H
+#define DM_ZONED_H
+
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/device-mapper.h>
+#include <linux/dm-kcopyd.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/rwsem.h>
+#include <linux/rbtree.h>
+#include <linux/radix-tree.h>
+#include <linux/shrinker.h>
+
+/*
+ * dm-zoned creates block devices with 4KB blocks, always.
+ */
+#define DMZ_BLOCK_SHIFT 12
+#define DMZ_BLOCK_SIZE (1 << DMZ_BLOCK_SHIFT)
+#define DMZ_BLOCK_MASK (DMZ_BLOCK_SIZE - 1)
+
+#define DMZ_BLOCK_SHIFT_BITS (DMZ_BLOCK_SHIFT + 3)
+#define DMZ_BLOCK_SIZE_BITS (1 << DMZ_BLOCK_SHIFT_BITS)
+#define DMZ_BLOCK_MASK_BITS (DMZ_BLOCK_SIZE_BITS - 1)
+
+#define DMZ_BLOCK_SECTORS_SHIFT (DMZ_BLOCK_SHIFT - SECTOR_SHIFT)
+#define DMZ_BLOCK_SECTORS (DMZ_BLOCK_SIZE >> SECTOR_SHIFT)
+#define DMZ_BLOCK_SECTORS_MASK (DMZ_BLOCK_SECTORS - 1)
+
+/*
+ * 4KB block <-> 512B sector conversion.
+ */
+#define dmz_blk2sect(b) ((sector_t)(b) << DMZ_BLOCK_SECTORS_SHIFT)
+#define dmz_sect2blk(s) ((sector_t)(s) >> DMZ_BLOCK_SECTORS_SHIFT)
+
+#define dmz_bio_block(bio) dmz_sect2blk((bio)->bi_iter.bi_sector)
+#define dmz_bio_blocks(bio) dmz_sect2blk(bio_sectors(bio))
+
+/*
+ * Zoned block device information.
+ */
+struct dmz_dev {
+ struct block_device *bdev;
+
+ char name[BDEVNAME_SIZE];
+
+ sector_t capacity;
+
+ unsigned int nr_zones;
+
+ sector_t zone_nr_sectors;
+ unsigned int zone_nr_sectors_shift;
+
+ sector_t zone_nr_blocks;
+ sector_t zone_nr_blocks_shift;
+};
+
+#define dmz_bio_chunk(dev, bio) ((bio)->bi_iter.bi_sector >> \
+ (dev)->zone_nr_sectors_shift)
+#define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1))
+
+/*
+ * Zone descriptor.
+ */
+struct dm_zone {
+ /* For listing the zone depending on its state */
+ struct list_head link;
+
+ /* Zone type and state */
+ unsigned long flags;
+
+ /* Zone activation reference count */
+ atomic_t refcount;
+
+ /* Zone write pointer block (relative to the zone start block) */
+ unsigned int wp_block;
+
+ /* Zone weight (number of valid blocks in the zone) */
+ unsigned int weight;
+
+ /* The chunk that the zone maps */
+ unsigned int chunk;
+
+ /*
+ * For a sequential data zone, pointer to the random zone
+ * used as a buffer for processing unaligned writes.
+ * For a buffer zone, this points back to the data zone.
+ */
+ struct dm_zone *bzone;
+};
+
+/*
+ * Zone flags.
+ */
+enum {
+ /* Zone write type */
+ DMZ_RND,
+ DMZ_SEQ,
+
+ /* Zone critical condition */
+ DMZ_OFFLINE,
+ DMZ_READ_ONLY,
+
+ /* How the zone is being used */
+ DMZ_META,
+ DMZ_DATA,
+ DMZ_BUF,
+
+ /* Zone internal state */
+ DMZ_ACTIVE,
+ DMZ_RECLAIM,
+ DMZ_SEQ_WRITE_ERR,
+};
+
+/*
+ * Zone data accessors.
+ */
+#define dmz_is_rnd(z) test_bit(DMZ_RND, &(z)->flags)
+#define dmz_is_seq(z) test_bit(DMZ_SEQ, &(z)->flags)
+#define dmz_is_empty(z) ((z)->wp_block == 0)
+#define dmz_is_offline(z) test_bit(DMZ_OFFLINE, &(z)->flags)
+#define dmz_is_readonly(z) test_bit(DMZ_READ_ONLY, &(z)->flags)
+#define dmz_is_active(z) test_bit(DMZ_ACTIVE, &(z)->flags)
+#define dmz_in_reclaim(z) test_bit(DMZ_RECLAIM, &(z)->flags)
+#define dmz_seq_write_err(z) test_bit(DMZ_SEQ_WRITE_ERR, &(z)->flags)
+
+#define dmz_is_meta(z) test_bit(DMZ_META, &(z)->flags)
+#define dmz_is_buf(z) test_bit(DMZ_BUF, &(z)->flags)
+#define dmz_is_data(z) test_bit(DMZ_DATA, &(z)->flags)
+
+#define dmz_weight(z) ((z)->weight)
+
+/*
+ * Message functions.
+ */
+#define dmz_dev_info(dev, format, args...) \
+ DMINFO("(%s): " format, (dev)->name, ## args)
+
+#define dmz_dev_err(dev, format, args...) \
+ DMERR("(%s): " format, (dev)->name, ## args)
+
+#define dmz_dev_warn(dev, format, args...) \
+ DMWARN("(%s): " format, (dev)->name, ## args)
+
+#define dmz_dev_debug(dev, format, args...) \
+ DMDEBUG("(%s): " format, (dev)->name, ## args)
+
+struct dmz_metadata;
+struct dmz_reclaim;
+
+/*
+ * Functions defined in dm-zoned-metadata.c
+ */
+int dmz_ctr_metadata(struct dmz_dev *dev, struct dmz_metadata **zmd);
+void dmz_dtr_metadata(struct dmz_metadata *zmd);
+int dmz_resume_metadata(struct dmz_metadata *zmd);
+
+void dmz_lock_map(struct dmz_metadata *zmd);
+void dmz_unlock_map(struct dmz_metadata *zmd);
+void dmz_lock_metadata(struct dmz_metadata *zmd);
+void dmz_unlock_metadata(struct dmz_metadata *zmd);
+void dmz_lock_flush(struct dmz_metadata *zmd);
+void dmz_unlock_flush(struct dmz_metadata *zmd);
+int dmz_flush_metadata(struct dmz_metadata *zmd);
+
+unsigned int dmz_id(struct dmz_metadata *zmd, struct dm_zone *zone);
+sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone);
+sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone);
+unsigned int dmz_nr_chunks(struct dmz_metadata *zmd);
+
+#define DMZ_ALLOC_RND 0x01
+#define DMZ_ALLOC_RECLAIM 0x02
+
+struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned long flags);
+void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
+
+void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
+ unsigned int chunk);
+void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
+unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd);
+unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd);
+
+void dmz_activate_zone(struct dm_zone *zone);
+void dmz_deactivate_zone(struct dm_zone *zone);
+
+int dmz_lock_zone_reclaim(struct dm_zone *zone);
+void dmz_unlock_zone_reclaim(struct dm_zone *zone);
+struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd);
+
+struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd,
+ unsigned int chunk, int op);
+void dmz_put_chunk_mapping(struct dmz_metadata *zmd, struct dm_zone *zone);
+struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd,
+ struct dm_zone *dzone);
+
+int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
+ sector_t chunk_block, unsigned int nr_blocks);
+int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
+ sector_t chunk_block, unsigned int nr_blocks);
+int dmz_block_valid(struct dmz_metadata *zmd, struct dm_zone *zone,
+ sector_t chunk_block);
+int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone,
+ sector_t *chunk_block);
+int dmz_copy_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
+ struct dm_zone *to_zone);
+int dmz_merge_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
+ struct dm_zone *to_zone, sector_t chunk_block);
+
+/*
+ * Functions defined in dm-zoned-reclaim.c
+ */
+int dmz_ctr_reclaim(struct dmz_dev *dev, struct dmz_metadata *zmd,
+ struct dmz_reclaim **zrc);
+void dmz_dtr_reclaim(struct dmz_reclaim *zrc);
+void dmz_suspend_reclaim(struct dmz_reclaim *zrc);
+void dmz_resume_reclaim(struct dmz_reclaim *zrc);
+void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc);
+void dmz_schedule_reclaim(struct dmz_reclaim *zrc);
+
+#endif /* DM_ZONED_H */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 402946035308..10cabe961bdb 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -19,6 +19,7 @@
#include <linux/dax.h>
#include <linux/slab.h>
#include <linux/idr.h>
+#include <linux/uio.h>
#include <linux/hdreg.h>
#include <linux/delay.h>
#include <linux/wait.h>
@@ -58,6 +59,9 @@ static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
static struct workqueue_struct *deferred_remove_workqueue;
+atomic_t dm_global_event_nr = ATOMIC_INIT(0);
+DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
+
/*
* One of these is allocated per bio.
*/
@@ -969,6 +973,48 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
return ret;
}
+static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i)
+{
+ struct mapped_device *md = dax_get_private(dax_dev);
+ sector_t sector = pgoff * PAGE_SECTORS;
+ struct dm_target *ti;
+ long ret = 0;
+ int srcu_idx;
+
+ ti = dm_dax_get_live_target(md, sector, &srcu_idx);
+
+ if (!ti)
+ goto out;
+ if (!ti->type->dax_copy_from_iter) {
+ ret = copy_from_iter(addr, bytes, i);
+ goto out;
+ }
+ ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i);
+ out:
+ dm_put_live_table(md, srcu_idx);
+
+ return ret;
+}
+
+static void dm_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+ size_t size)
+{
+ struct mapped_device *md = dax_get_private(dax_dev);
+ sector_t sector = pgoff * PAGE_SECTORS;
+ struct dm_target *ti;
+ int srcu_idx;
+
+ ti = dm_dax_get_live_target(md, sector, &srcu_idx);
+
+ if (!ti)
+ goto out;
+ if (ti->type->dax_flush)
+ ti->type->dax_flush(ti, pgoff, addr, size);
+ out:
+ dm_put_live_table(md, srcu_idx);
+}
+
/*
* A target may call dm_accept_partial_bio only from the map routine. It is
* allowed for all bio types except REQ_PREFLUSH.
@@ -1010,6 +1056,85 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
/*
+ * The zone descriptors obtained with a zone report indicate
+ * zone positions within the target device. The zone descriptors
+ * must be remapped to match their position within the dm device.
+ * A target may call dm_remap_zone_report after completion of a
+ * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained
+ * from the target device mapping to the dm device.
+ */
+void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
+{
+#ifdef CONFIG_BLK_DEV_ZONED
+ struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
+ struct bio *report_bio = tio->io->bio;
+ struct blk_zone_report_hdr *hdr = NULL;
+ struct blk_zone *zone;
+ unsigned int nr_rep = 0;
+ unsigned int ofst;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+ void *addr;
+
+ if (bio->bi_status)
+ return;
+
+ /*
+ * Remap the start sector of the reported zones. For sequential zones,
+ * also remap the write pointer position.
+ */
+ bio_for_each_segment(bvec, report_bio, iter) {
+ addr = kmap_atomic(bvec.bv_page);
+
+ /* Remember the report header in the first page */
+ if (!hdr) {
+ hdr = addr;
+ ofst = sizeof(struct blk_zone_report_hdr);
+ } else
+ ofst = 0;
+
+ /* Set zones start sector */
+ while (hdr->nr_zones && ofst < bvec.bv_len) {
+ zone = addr + ofst;
+ if (zone->start >= start + ti->len) {
+ hdr->nr_zones = 0;
+ break;
+ }
+ zone->start = zone->start + ti->begin - start;
+ if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
+ if (zone->cond == BLK_ZONE_COND_FULL)
+ zone->wp = zone->start + zone->len;
+ else if (zone->cond == BLK_ZONE_COND_EMPTY)
+ zone->wp = zone->start;
+ else
+ zone->wp = zone->wp + ti->begin - start;
+ }
+ ofst += sizeof(struct blk_zone);
+ hdr->nr_zones--;
+ nr_rep++;
+ }
+
+ if (addr != hdr)
+ kunmap_atomic(addr);
+
+ if (!hdr->nr_zones)
+ break;
+ }
+
+ if (hdr) {
+ hdr->nr_zones = nr_rep;
+ kunmap_atomic(hdr);
+ }
+
+ bio_advance(report_bio, report_bio->bi_iter.bi_size);
+
+#else /* !CONFIG_BLK_DEV_ZONED */
+ bio->bi_status = BLK_STS_NOTSUPP;
+#endif
+}
+EXPORT_SYMBOL_GPL(dm_remap_zone_report);
+
+/*
* Flush current->bio_list when the target map method blocks.
* This fixes deadlocks in snapshot and possibly in other targets.
*/
@@ -1149,7 +1274,8 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
return r;
}
- bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
+ if (bio_op(bio) != REQ_OP_ZONE_REPORT)
+ bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
clone->bi_iter.bi_size = to_bytes(len);
if (unlikely(bio_integrity(bio) != NULL))
@@ -1338,7 +1464,11 @@ static int __split_and_process_non_flush(struct clone_info *ci)
if (!dm_target_is_valid(ti))
return -EIO;
- len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
+ if (bio_op(bio) == REQ_OP_ZONE_REPORT)
+ len = ci->sector_count;
+ else
+ len = min_t(sector_t, max_io_len(ci->sector, ti),
+ ci->sector_count);
r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
if (r < 0)
@@ -1381,6 +1511,10 @@ static void __split_and_process_bio(struct mapped_device *md,
ci.sector_count = 0;
error = __send_empty_flush(&ci);
/* dec_pending submits any data associated with flush */
+ } else if (bio_op(bio) == REQ_OP_ZONE_RESET) {
+ ci.bio = bio;
+ ci.sector_count = 0;
+ error = __split_and_process_non_flush(&ci);
} else {
ci.bio = bio;
ci.sector_count = bio_sectors(bio);
@@ -1759,7 +1893,9 @@ static void event_callback(void *context)
dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
atomic_inc(&md->event_nr);
+ atomic_inc(&dm_global_event_nr);
wake_up(&md->eventq);
+ wake_up(&dm_global_eventq);
}
/*
@@ -2865,6 +3001,8 @@ static const struct block_device_operations dm_blk_dops = {
static const struct dax_operations dm_dax_ops = {
.direct_access = dm_dax_direct_access,
+ .copy_from_iter = dm_dax_copy_from_iter,
+ .flush = dm_dax_flush,
};
/*
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index b0536cfd8e17..06a64d5d8c6c 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -170,7 +170,7 @@ static void add_sector(struct faulty_conf *conf, sector_t start, int mode)
conf->nfaults = n+1;
}
-static void faulty_make_request(struct mddev *mddev, struct bio *bio)
+static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
{
struct faulty_conf *conf = mddev->private;
int failit = 0;
@@ -182,7 +182,7 @@ static void faulty_make_request(struct mddev *mddev, struct bio *bio)
* just fail immediately
*/
bio_io_error(bio);
- return;
+ return true;
}
if (check_sector(conf, bio->bi_iter.bi_sector,
@@ -224,6 +224,7 @@ static void faulty_make_request(struct mddev *mddev, struct bio *bio)
bio->bi_bdev = conf->rdev->bdev;
generic_make_request(bio);
+ return true;
}
static void faulty_status(struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index df6f2c98eca7..5f1eb9189542 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -245,7 +245,7 @@ static void linear_free(struct mddev *mddev, void *priv)
kfree(conf);
}
-static void linear_make_request(struct mddev *mddev, struct bio *bio)
+static bool linear_make_request(struct mddev *mddev, struct bio *bio)
{
char b[BDEVNAME_SIZE];
struct dev_info *tmp_dev;
@@ -254,7 +254,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
md_flush_request(mddev, bio);
- return;
+ return true;
}
tmp_dev = which_dev(mddev, bio_sector);
@@ -292,7 +292,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
mddev_check_write_zeroes(mddev, bio);
generic_make_request(bio);
}
- return;
+ return true;
out_of_bounds:
pr_err("md/linear:%s: make_request: Sector %llu out of bounds on dev %s: %llu sectors, offset %llu\n",
@@ -302,6 +302,7 @@ out_of_bounds:
(unsigned long long)tmp_dev->rdev->sectors,
(unsigned long long)start_sector);
bio_io_error(bio);
+ return true;
}
static void linear_status (struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 31bcbfb09fef..8cdca0296749 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -203,6 +203,14 @@ struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
}
EXPORT_SYMBOL_GPL(bio_alloc_mddev);
+static struct bio *md_bio_alloc_sync(struct mddev *mddev)
+{
+ if (!mddev || !mddev->sync_set)
+ return bio_alloc(GFP_NOIO, 1);
+
+ return bio_alloc_bioset(GFP_NOIO, 1, mddev->sync_set);
+}
+
/*
* We have a system wide 'event count' that is incremented
* on any 'interesting' event, and readers of /proc/mdstat
@@ -277,7 +285,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
bio_endio(bio);
return BLK_QC_T_NONE;
}
- smp_rmb(); /* Ensure implications of 'active' are visible */
+check_suspended:
rcu_read_lock();
if (mddev->suspended) {
DEFINE_WAIT(__wait);
@@ -302,7 +310,11 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
sectors = bio_sectors(bio);
/* bio could be mergeable after passing to underlayer */
bio->bi_opf &= ~REQ_NOMERGE;
- mddev->pers->make_request(mddev, bio);
+ if (!mddev->pers->make_request(mddev, bio)) {
+ atomic_dec(&mddev->active_io);
+ wake_up(&mddev->sb_wait);
+ goto check_suspended;
+ }
cpu = part_stat_lock();
part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
@@ -327,6 +339,7 @@ void mddev_suspend(struct mddev *mddev)
if (mddev->suspended++)
return;
synchronize_rcu();
+ wake_up(&mddev->sb_wait);
wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
mddev->pers->quiesce(mddev, 1);
@@ -462,7 +475,7 @@ static void mddev_delayed_delete(struct work_struct *ws);
static void mddev_put(struct mddev *mddev)
{
- struct bio_set *bs = NULL;
+ struct bio_set *bs = NULL, *sync_bs = NULL;
if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
return;
@@ -472,7 +485,9 @@ static void mddev_put(struct mddev *mddev)
* so destroy it */
list_del_init(&mddev->all_mddevs);
bs = mddev->bio_set;
+ sync_bs = mddev->sync_set;
mddev->bio_set = NULL;
+ mddev->sync_set = NULL;
if (mddev->gendisk) {
/* We did a probe so need to clean up. Call
* queue_work inside the spinlock so that
@@ -487,6 +502,8 @@ static void mddev_put(struct mddev *mddev)
spin_unlock(&all_mddevs_lock);
if (bs)
bioset_free(bs);
+ if (sync_bs)
+ bioset_free(sync_bs);
}
static void md_safemode_timeout(unsigned long data);
@@ -751,7 +768,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
if (test_bit(Faulty, &rdev->flags))
return;
- bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
+ bio = md_bio_alloc_sync(mddev);
atomic_inc(&rdev->nr_pending);
@@ -783,7 +800,7 @@ int md_super_wait(struct mddev *mddev)
int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, int op, int op_flags, bool metadata_op)
{
- struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
+ struct bio *bio = md_bio_alloc_sync(rdev->mddev);
int ret;
bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
@@ -1852,7 +1869,7 @@ retry:
max_dev = le32_to_cpu(sb->max_dev);
for (i=0; i<max_dev;i++)
- sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
+ sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
@@ -5432,6 +5449,11 @@ int md_run(struct mddev *mddev)
if (!mddev->bio_set)
return -ENOMEM;
}
+ if (mddev->sync_set == NULL) {
+ mddev->sync_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
+ if (!mddev->sync_set)
+ return -ENOMEM;
+ }
spin_lock(&pers_lock);
pers = find_pers(mddev->level, mddev->clevel);
@@ -7950,12 +7972,14 @@ EXPORT_SYMBOL(md_done_sync);
* If we need to update some array metadata (e.g. 'active' flag
* in superblock) before writing, schedule a superblock update
* and wait for it to complete.
+ * A return value of 'false' means that the write wasn't recorded
+ * and cannot proceed as the array is being suspend.
*/
-void md_write_start(struct mddev *mddev, struct bio *bi)
+bool md_write_start(struct mddev *mddev, struct bio *bi)
{
int did_change = 0;
if (bio_data_dir(bi) != WRITE)
- return;
+ return true;
BUG_ON(mddev->ro == 1);
if (mddev->ro == 2) {
@@ -7987,7 +8011,12 @@ void md_write_start(struct mddev *mddev, struct bio *bi)
if (did_change)
sysfs_notify_dirent_safe(mddev->sysfs_state);
wait_event(mddev->sb_wait,
- !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
+ !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) && !mddev->suspended);
+ if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
+ percpu_ref_put(&mddev->writes_pending);
+ return false;
+ }
+ return true;
}
EXPORT_SYMBOL(md_write_start);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 0fa1de42c42b..991f0fe2dcc6 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -444,6 +444,9 @@ struct mddev {
struct attribute_group *to_remove;
struct bio_set *bio_set;
+ struct bio_set *sync_set; /* for sync operations like
+ * metadata and bitmap writes
+ */
/* Generic flush handling.
* The last to finish preflush schedules a worker to submit
@@ -510,7 +513,7 @@ struct md_personality
int level;
struct list_head list;
struct module *owner;
- void (*make_request)(struct mddev *mddev, struct bio *bio);
+ bool (*make_request)(struct mddev *mddev, struct bio *bio);
int (*run)(struct mddev *mddev);
void (*free)(struct mddev *mddev, void *priv);
void (*status)(struct seq_file *seq, struct mddev *mddev);
@@ -649,7 +652,7 @@ extern void md_wakeup_thread(struct md_thread *thread);
extern void md_check_recovery(struct mddev *mddev);
extern void md_reap_sync_thread(struct mddev *mddev);
extern int mddev_init_writes_pending(struct mddev *mddev);
-extern void md_write_start(struct mddev *mddev, struct bio *bi);
+extern bool md_write_start(struct mddev *mddev, struct bio *bi);
extern void md_write_inc(struct mddev *mddev, struct bio *bi);
extern void md_write_end(struct mddev *mddev);
extern void md_done_sync(struct mddev *mddev, int blocks, int ok);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 68d036e64041..23a162ba6c56 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -106,7 +106,7 @@ static void multipath_end_request(struct bio *bio)
rdev_dec_pending(rdev, conf->mddev);
}
-static void multipath_make_request(struct mddev *mddev, struct bio * bio)
+static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
{
struct mpconf *conf = mddev->private;
struct multipath_bh * mp_bh;
@@ -114,7 +114,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
md_flush_request(mddev, bio);
- return;
+ return true;
}
mp_bh = mempool_alloc(conf->pool, GFP_NOIO);
@@ -126,7 +126,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
if (mp_bh->path < 0) {
bio_io_error(bio);
mempool_free(mp_bh, conf->pool);
- return;
+ return true;
}
multipath = conf->multipaths + mp_bh->path;
@@ -141,7 +141,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
mddev_check_writesame(mddev, &mp_bh->bio);
mddev_check_write_zeroes(mddev, &mp_bh->bio);
generic_make_request(&mp_bh->bio);
- return;
+ return true;
}
static void multipath_status(struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index d6c0bc76e837..94d9ae9b0fd0 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -548,7 +548,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
bio_endio(bio);
}
-static void raid0_make_request(struct mddev *mddev, struct bio *bio)
+static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
{
struct strip_zone *zone;
struct md_rdev *tmp_dev;
@@ -559,12 +559,12 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
md_flush_request(mddev, bio);
- return;
+ return true;
}
if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) {
raid0_handle_discard(mddev, bio);
- return;
+ return true;
}
bio_sector = bio->bi_iter.bi_sector;
@@ -599,6 +599,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
mddev_check_writesame(mddev, bio);
mddev_check_write_zeroes(mddev, bio);
generic_make_request(bio);
+ return true;
}
static void raid0_status(struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 98ca2c1d3226..3febfc8391fb 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1321,7 +1321,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
* Continue immediately if no resync is active currently.
*/
- md_write_start(mddev, bio); /* wait on superblock update early */
if ((bio_end_sector(bio) > mddev->suspend_lo &&
bio->bi_iter.bi_sector < mddev->suspend_hi) ||
@@ -1335,7 +1334,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
*/
DEFINE_WAIT(w);
for (;;) {
- flush_signals(current);
+ sigset_t full, old;
prepare_to_wait(&conf->wait_barrier,
&w, TASK_INTERRUPTIBLE);
if (bio_end_sector(bio) <= mddev->suspend_lo ||
@@ -1345,7 +1344,10 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
bio->bi_iter.bi_sector,
bio_end_sector(bio))))
break;
+ sigfillset(&full);
+ sigprocmask(SIG_BLOCK, &full, &old);
schedule();
+ sigprocmask(SIG_SETMASK, &old, NULL);
}
finish_wait(&conf->wait_barrier, &w);
}
@@ -1550,13 +1552,13 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
wake_up(&conf->wait_barrier);
}
-static void raid1_make_request(struct mddev *mddev, struct bio *bio)
+static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
{
sector_t sectors;
if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
md_flush_request(mddev, bio);
- return;
+ return true;
}
/*
@@ -1571,8 +1573,12 @@ static void raid1_make_request(struct mddev *mddev, struct bio *bio)
if (bio_data_dir(bio) == READ)
raid1_read_request(mddev, bio, sectors, NULL);
- else
+ else {
+ if (!md_write_start(mddev,bio))
+ return false;
raid1_write_request(mddev, bio, sectors);
+ }
+ return true;
}
static void raid1_status(struct seq_file *seq, struct mddev *mddev)
@@ -2165,9 +2171,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
struct r1conf *conf = mddev->private;
int i;
int disks = conf->raid_disks * 2;
- struct bio *bio, *wbio;
-
- bio = r1_bio->bios[r1_bio->read_disk];
+ struct bio *wbio;
if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
/* ouch - failed to read all of that. */
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 57a250fdbbcc..5026e7ad51d3 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1303,8 +1303,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
sector_t sectors;
int max_sectors;
- md_write_start(mddev, bio);
-
/*
* Register the new request and wait if the reconstruction
* thread has put up a bar for new requests.
@@ -1525,7 +1523,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
raid10_write_request(mddev, bio, r10_bio);
}
-static void raid10_make_request(struct mddev *mddev, struct bio *bio)
+static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
{
struct r10conf *conf = mddev->private;
sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
@@ -1534,9 +1532,12 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
md_flush_request(mddev, bio);
- return;
+ return true;
}
+ if (!md_write_start(mddev, bio))
+ return false;
+
/*
* If this request crosses a chunk boundary, we need to split
* it.
@@ -1553,6 +1554,7 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
/* In case raid10d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
+ return true;
}
static void raid10_status(struct seq_file *seq, struct mddev *mddev)
@@ -3293,7 +3295,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
biolist = bio;
bio->bi_end_io = end_sync_read;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
- if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
+ if (test_bit(FailFast, &rdev->flags))
bio->bi_opf |= MD_FAILFAST;
bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
@@ -3305,7 +3307,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
continue;
}
atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
/* Need to set up for writing to the replacement */
bio = r10_bio->devs[i].repl_bio;
@@ -3316,11 +3317,12 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
biolist = bio;
bio->bi_end_io = end_sync_write;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
- if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
+ if (test_bit(FailFast, &rdev->flags))
bio->bi_opf |= MD_FAILFAST;
bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev;
count++;
+ rcu_read_unlock();
}
if (count < 2) {
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 62c965be97e1..2ceb338b094b 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5479,7 +5479,6 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);
bi->bi_next = NULL;
- md_write_start(mddev, bi);
stripe_sectors = conf->chunk_sectors *
(conf->raid_disks - conf->max_degraded);
@@ -5549,11 +5548,10 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
release_stripe_plug(mddev, sh);
}
- md_write_end(mddev);
bio_endio(bi);
}
-static void raid5_make_request(struct mddev *mddev, struct bio * bi)
+static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
{
struct r5conf *conf = mddev->private;
int dd_idx;
@@ -5569,10 +5567,10 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
int ret = r5l_handle_flush_request(conf->log, bi);
if (ret == 0)
- return;
+ return true;
if (ret == -ENODEV) {
md_flush_request(mddev, bi);
- return;
+ return true;
}
/* ret == -EAGAIN, fallback */
/*
@@ -5582,6 +5580,8 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
do_flush = bi->bi_opf & REQ_PREFLUSH;
}
+ if (!md_write_start(mddev, bi))
+ return false;
/*
* If array is degraded, better not do chunk aligned read because
* later we might have to read it again in order to reconstruct
@@ -5591,18 +5591,18 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
mddev->reshape_position == MaxSector) {
bi = chunk_aligned_read(mddev, bi);
if (!bi)
- return;
+ return true;
}
if (unlikely(bio_op(bi) == REQ_OP_DISCARD)) {
make_discard_request(mddev, bi);
- return;
+ md_write_end(mddev);
+ return true;
}
logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
last_sector = bio_end_sector(bi);
bi->bi_next = NULL;
- md_write_start(mddev, bi);
prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
@@ -5693,12 +5693,15 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
* userspace, we want an interruptible
* wait.
*/
- flush_signals(current);
prepare_to_wait(&conf->wait_for_overlap,
&w, TASK_INTERRUPTIBLE);
if (logical_sector >= mddev->suspend_lo &&
logical_sector < mddev->suspend_hi) {
+ sigset_t full, old;
+ sigfillset(&full);
+ sigprocmask(SIG_BLOCK, &full, &old);
schedule();
+ sigprocmask(SIG_SETMASK, &old, NULL);
do_prepare = true;
}
goto retry;
@@ -5740,6 +5743,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
if (rw == WRITE)
md_write_end(mddev);
bio_endio(bi);
+ return true;
}
static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index 9dfc79800c71..bf45977b2823 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -28,6 +28,8 @@
#include <linux/string.h>
#include <linux/types.h>
+#include <drm/drm_edid.h>
+
#include "cec-priv.h"
static void cec_fill_msg_report_features(struct cec_adapter *adap,
@@ -366,6 +368,8 @@ int cec_thread_func(void *_adap)
* transmit should be canceled.
*/
err = wait_event_interruptible_timeout(adap->kthread_waitq,
+ (adap->needs_hpd &&
+ (!adap->is_configured && !adap->is_configuring)) ||
kthread_should_stop() ||
(!adap->transmitting &&
!list_empty(&adap->transmit_queue)),
@@ -381,7 +385,9 @@ int cec_thread_func(void *_adap)
mutex_lock(&adap->lock);
- if (kthread_should_stop()) {
+ if ((adap->needs_hpd &&
+ (!adap->is_configured && !adap->is_configuring)) ||
+ kthread_should_stop()) {
cec_flush(adap);
goto unlock;
}
@@ -392,7 +398,7 @@ int cec_thread_func(void *_adap)
* happen and is an indication of a faulty CEC adapter
* driver, or the CEC bus is in some weird state.
*/
- dprintk(0, "message %*ph timed out!\n",
+ dprintk(0, "%s: message %*ph timed out!\n", __func__,
adap->transmitting->msg.len,
adap->transmitting->msg.msg);
/* Just give up on this. */
@@ -468,7 +474,7 @@ void cec_transmit_done(struct cec_adapter *adap, u8 status, u8 arb_lost_cnt,
struct cec_msg *msg;
u64 ts = ktime_get_ns();
- dprintk(2, "cec_transmit_done %02x\n", status);
+ dprintk(2, "%s: status %02x\n", __func__, status);
mutex_lock(&adap->lock);
data = adap->transmitting;
if (!data) {
@@ -477,7 +483,8 @@ void cec_transmit_done(struct cec_adapter *adap, u8 status, u8 arb_lost_cnt,
* unplugged while the transmit is ongoing. Ignore this
* transmit in that case.
*/
- dprintk(1, "cec_transmit_done without an ongoing transmit!\n");
+ dprintk(1, "%s was called without an ongoing transmit!\n",
+ __func__);
goto unlock;
}
@@ -504,6 +511,12 @@ void cec_transmit_done(struct cec_adapter *adap, u8 status, u8 arb_lost_cnt,
!(status & (CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_OK))) {
/* Retry this message */
data->attempts--;
+ if (msg->timeout)
+ dprintk(2, "retransmit: %*ph (attempts: %d, wait for 0x%02x)\n",
+ msg->len, msg->msg, data->attempts, msg->reply);
+ else
+ dprintk(2, "retransmit: %*ph (attempts: %d)\n",
+ msg->len, msg->msg, data->attempts);
/* Add the message in front of the transmit queue */
list_add(&data->list, &adap->transmit_queue);
adap->transmit_queue_sz++;
@@ -544,6 +557,32 @@ unlock:
}
EXPORT_SYMBOL_GPL(cec_transmit_done);
+void cec_transmit_attempt_done(struct cec_adapter *adap, u8 status)
+{
+ switch (status) {
+ case CEC_TX_STATUS_OK:
+ cec_transmit_done(adap, status, 0, 0, 0, 0);
+ return;
+ case CEC_TX_STATUS_ARB_LOST:
+ cec_transmit_done(adap, status, 1, 0, 0, 0);
+ return;
+ case CEC_TX_STATUS_NACK:
+ cec_transmit_done(adap, status, 0, 1, 0, 0);
+ return;
+ case CEC_TX_STATUS_LOW_DRIVE:
+ cec_transmit_done(adap, status, 0, 0, 1, 0);
+ return;
+ case CEC_TX_STATUS_ERROR:
+ cec_transmit_done(adap, status, 0, 0, 0, 1);
+ return;
+ default:
+ /* Should never happen */
+ WARN(1, "cec-%s: invalid status 0x%02x\n", adap->name, status);
+ return;
+ }
+}
+EXPORT_SYMBOL_GPL(cec_transmit_attempt_done);
+
/*
* Called when waiting for a reply times out.
*/
@@ -647,7 +686,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
return -EINVAL;
}
if (!adap->is_configured && !adap->is_configuring) {
- if (msg->msg[0] != 0xf0) {
+ if (adap->needs_hpd || msg->msg[0] != 0xf0) {
dprintk(1, "%s: adapter is unconfigured\n", __func__);
return -ENONET;
}
@@ -911,7 +950,7 @@ void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg)
memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
mutex_lock(&adap->lock);
- dprintk(2, "cec_received_msg: %*ph\n", msg->len, msg->msg);
+ dprintk(2, "%s: %*ph\n", __func__, msg->len, msg->msg);
/* Check if this message was for us (directed or broadcast). */
if (!cec_msg_is_broadcast(msg))
@@ -1112,9 +1151,6 @@ static int cec_config_log_addr(struct cec_adapter *adap,
las->log_addr[idx] = log_addr;
las->log_addr_mask |= 1 << log_addr;
adap->phys_addrs[log_addr] = adap->phys_addr;
-
- dprintk(2, "claimed addr %d (%d)\n", log_addr,
- las->primary_device_type[idx]);
return 1;
}
@@ -1126,7 +1162,9 @@ static int cec_config_log_addr(struct cec_adapter *adap,
*/
static void cec_adap_unconfigure(struct cec_adapter *adap)
{
- WARN_ON(adap->ops->adap_log_addr(adap, CEC_LOG_ADDR_INVALID));
+ if (!adap->needs_hpd ||
+ adap->phys_addr != CEC_PHYS_ADDR_INVALID)
+ WARN_ON(adap->ops->adap_log_addr(adap, CEC_LOG_ADDR_INVALID));
adap->log_addrs.log_addr_mask = 0;
adap->is_configuring = false;
adap->is_configured = false;
@@ -1300,7 +1338,7 @@ configured:
/* Report Physical Address */
cec_msg_report_physical_addr(&msg, adap->phys_addr,
las->primary_device_type[i]);
- dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
+ dprintk(1, "config: la %d pa %x.%x.%x.%x\n",
las->log_addr[i],
cec_phys_addr_exp(adap->phys_addr));
cec_transmit_msg_fh(adap, &msg, NULL, false);
@@ -1355,6 +1393,8 @@ void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
if (phys_addr == adap->phys_addr || adap->devnode.unregistered)
return;
+ dprintk(1, "new physical address %x.%x.%x.%x\n",
+ cec_phys_addr_exp(phys_addr));
if (phys_addr == CEC_PHYS_ADDR_INVALID ||
adap->phys_addr != CEC_PHYS_ADDR_INVALID) {
adap->phys_addr = CEC_PHYS_ADDR_INVALID;
@@ -1364,7 +1404,7 @@ void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
if (adap->monitor_all_cnt)
WARN_ON(call_op(adap, adap_monitor_all_enable, false));
mutex_lock(&adap->devnode.lock);
- if (list_empty(&adap->devnode.fhs))
+ if (adap->needs_hpd || list_empty(&adap->devnode.fhs))
WARN_ON(adap->ops->adap_enable(adap, false));
mutex_unlock(&adap->devnode.lock);
if (phys_addr == CEC_PHYS_ADDR_INVALID)
@@ -1372,7 +1412,7 @@ void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
}
mutex_lock(&adap->devnode.lock);
- if (list_empty(&adap->devnode.fhs) &&
+ if ((adap->needs_hpd || list_empty(&adap->devnode.fhs)) &&
adap->ops->adap_enable(adap, true)) {
mutex_unlock(&adap->devnode.lock);
return;
@@ -1380,7 +1420,7 @@ void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
if (adap->monitor_all_cnt &&
call_op(adap, adap_monitor_all_enable, true)) {
- if (list_empty(&adap->devnode.fhs))
+ if (adap->needs_hpd || list_empty(&adap->devnode.fhs))
WARN_ON(adap->ops->adap_enable(adap, false));
mutex_unlock(&adap->devnode.lock);
return;
@@ -1404,6 +1444,18 @@ void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
}
EXPORT_SYMBOL_GPL(cec_s_phys_addr);
+void cec_s_phys_addr_from_edid(struct cec_adapter *adap,
+ const struct edid *edid)
+{
+ u16 pa = CEC_PHYS_ADDR_INVALID;
+
+ if (edid && edid->extensions)
+ pa = cec_get_edid_phys_addr((const u8 *)edid,
+ EDID_LENGTH * (edid->extensions + 1), NULL);
+ cec_s_phys_addr(adap, pa, false);
+}
+EXPORT_SYMBOL_GPL(cec_s_phys_addr_from_edid);
+
/*
* Called from either the ioctl or a driver to set the logical addresses.
*
@@ -1534,12 +1586,12 @@ int __cec_s_log_addrs(struct cec_adapter *adap,
if (log_addrs->num_log_addrs == 2) {
if (!(type_mask & ((1 << CEC_LOG_ADDR_TYPE_AUDIOSYSTEM) |
(1 << CEC_LOG_ADDR_TYPE_TV)))) {
- dprintk(1, "Two LAs is only allowed for audiosystem and TV\n");
+ dprintk(1, "two LAs is only allowed for audiosystem and TV\n");
return -EINVAL;
}
if (!(type_mask & ((1 << CEC_LOG_ADDR_TYPE_PLAYBACK) |
(1 << CEC_LOG_ADDR_TYPE_RECORD)))) {
- dprintk(1, "An audiosystem/TV can only be combined with record or playback\n");
+ dprintk(1, "an audiosystem/TV can only be combined with record or playback\n");
return -EINVAL;
}
}
@@ -1653,7 +1705,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
bool from_unregistered = init_laddr == 0xf;
struct cec_msg tx_cec_msg = { };
- dprintk(1, "cec_receive_notify: %*ph\n", msg->len, msg->msg);
+ dprintk(2, "%s: %*ph\n", __func__, msg->len, msg->msg);
/* If this is a CDC-Only device, then ignore any non-CDC messages */
if (cec_is_cdc_only(&adap->log_addrs) &&
@@ -1722,7 +1774,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
if (!from_unregistered)
adap->phys_addrs[init_laddr] = pa;
- dprintk(1, "Reported physical address %x.%x.%x.%x for logical address %d\n",
+ dprintk(1, "reported physical address %x.%x.%x.%x for logical address %d\n",
cec_phys_addr_exp(pa), init_laddr);
break;
}
diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
index 999926f731c8..f7eb4c54a354 100644
--- a/drivers/media/cec/cec-api.c
+++ b/drivers/media/cec/cec-api.c
@@ -202,7 +202,8 @@ static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
err = -EPERM;
else if (adap->is_configuring)
err = -ENONET;
- else if (!adap->is_configured && msg.msg[0] != 0xf0)
+ else if (!adap->is_configured &&
+ (adap->needs_hpd || msg.msg[0] != 0xf0))
err = -ENONET;
else if (cec_is_busy(adap, fh))
err = -EBUSY;
@@ -515,6 +516,7 @@ static int cec_open(struct inode *inode, struct file *filp)
mutex_lock(&devnode->lock);
if (list_empty(&devnode->fhs) &&
+ !adap->needs_hpd &&
adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
err = adap->ops->adap_enable(adap, true);
if (err) {
@@ -559,6 +561,7 @@ static int cec_release(struct inode *inode, struct file *filp)
mutex_lock(&devnode->lock);
list_del(&fh->list);
if (list_empty(&devnode->fhs) &&
+ !adap->needs_hpd &&
adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
WARN_ON(adap->ops->adap_enable(adap, false));
}
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c
index 2f87748ba4fc..b516d599d6c4 100644
--- a/drivers/media/cec/cec-core.c
+++ b/drivers/media/cec/cec-core.c
@@ -230,6 +230,7 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
adap->log_addrs.cec_version = CEC_OP_CEC_VERSION_2_0;
adap->log_addrs.vendor_id = CEC_VENDOR_ID_NONE;
adap->capabilities = caps;
+ adap->needs_hpd = caps & CEC_CAP_NEEDS_HPD;
adap->available_log_addrs = available_las;
adap->sequence = 0;
adap->ops = ops;
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index d38bf9bce480..af694f2066a2 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -193,8 +193,10 @@ static void dvb_ca_private_put(struct dvb_ca_private *ca)
}
static void dvb_ca_en50221_thread_wakeup(struct dvb_ca_private *ca);
-static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * ebuf, int ecount);
-static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, u8 * ebuf, int ecount);
+static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot,
+ u8 *ebuf, int ecount);
+static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
+ u8 *ebuf, int ecount);
/**
@@ -206,7 +208,7 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, u8 * e
* @nlen: Number of bytes in needle.
* @return Pointer into haystack needle was found at, or NULL if not found.
*/
-static char *findstr(char * haystack, int hlen, char * needle, int nlen)
+static char *findstr(char *haystack, int hlen, char *needle, int nlen)
{
int i;
@@ -390,7 +392,8 @@ static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot)
* @return 0 on success, nonzero on error.
*/
static int dvb_ca_en50221_read_tuple(struct dvb_ca_private *ca, int slot,
- int *address, int *tupleType, int *tupleLength, u8 * tuple)
+ int *address, int *tupleType,
+ int *tupleLength, u8 *tuple)
{
int i;
int _tupleType;
@@ -621,7 +624,8 @@ static int dvb_ca_en50221_set_configoption(struct dvb_ca_private *ca, int slot)
*
* @return Number of bytes read, or < 0 on error
*/
-static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * ebuf, int ecount)
+static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot,
+ u8 *ebuf, int ecount)
{
int bytes_read;
int status;
@@ -745,7 +749,8 @@ exit:
*
* @return Number of bytes written, or < 0 on error.
*/
-static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, u8 * buf, int bytes_write)
+static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
+ u8 *buf, int bytes_write)
{
int status;
int i;
@@ -840,7 +845,6 @@ exit:
exitnowrite:
return status;
}
-EXPORT_SYMBOL(dvb_ca_en50221_camchange_irq);
@@ -849,7 +853,7 @@ EXPORT_SYMBOL(dvb_ca_en50221_camchange_irq);
/**
- * dvb_ca_en50221_camready_irq - A CAM has been removed => shut it down.
+ * dvb_ca_en50221_slot_shutdown - A CAM has been removed => shut it down.
*
* @ca: CA instance.
* @slot: Slot to shut down.
@@ -870,11 +874,10 @@ static int dvb_ca_en50221_slot_shutdown(struct dvb_ca_private *ca, int slot)
/* success */
return 0;
}
-EXPORT_SYMBOL(dvb_ca_en50221_camready_irq);
/**
- * dvb_ca_en50221_camready_irq - A CAMCHANGE IRQ has occurred.
+ * dvb_ca_en50221_camchange_irq - A CAMCHANGE IRQ has occurred.
*
* @ca: CA instance.
* @slot: Slot concerned.
@@ -899,7 +902,7 @@ void dvb_ca_en50221_camchange_irq(struct dvb_ca_en50221 *pubca, int slot, int ch
atomic_inc(&ca->slot_info[slot].camchange_count);
dvb_ca_en50221_thread_wakeup(ca);
}
-EXPORT_SYMBOL(dvb_ca_en50221_frda_irq);
+EXPORT_SYMBOL(dvb_ca_en50221_camchange_irq);
/**
@@ -919,10 +922,11 @@ void dvb_ca_en50221_camready_irq(struct dvb_ca_en50221 *pubca, int slot)
dvb_ca_en50221_thread_wakeup(ca);
}
}
+EXPORT_SYMBOL(dvb_ca_en50221_camready_irq);
/**
- * An FR or DA IRQ has occurred.
+ * dvb_ca_en50221_frda_irq - An FR or DA IRQ has occurred.
*
* @ca: CA instance.
* @slot: Slot concerned.
@@ -949,7 +953,7 @@ void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221 *pubca, int slot)
break;
}
}
-
+EXPORT_SYMBOL(dvb_ca_en50221_frda_irq);
/* ******************************************************************************** */
@@ -1345,7 +1349,8 @@ static long dvb_ca_en50221_io_ioctl(struct file *file,
* @return Number of bytes read, or <0 on error.
*/
static ssize_t dvb_ca_en50221_io_write(struct file *file,
- const char __user * buf, size_t count, loff_t * ppos)
+ const char __user *buf, size_t count,
+ loff_t *ppos)
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_ca_private *ca = dvbdev->priv;
@@ -1485,8 +1490,8 @@ nextslot:
*
* @return Number of bytes read, or <0 on error.
*/
-static ssize_t dvb_ca_en50221_io_read(struct file *file, char __user * buf,
- size_t count, loff_t * ppos)
+static ssize_t dvb_ca_en50221_io_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_ca_private *ca = dvbdev->priv;
@@ -1664,7 +1669,7 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file)
*
* @return Standard poll mask.
*/
-static unsigned int dvb_ca_en50221_io_poll(struct file *file, poll_table * wait)
+static unsigned int dvb_ca_en50221_io_poll(struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct dvb_ca_private *ca = dvbdev->priv;
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index e8c6554a47aa..3a260b82b3e8 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -436,6 +436,7 @@ config DVB_TDA10048
config DVB_AF9013
tristate "Afatech AF9013 demodulator"
depends on DVB_CORE && I2C
+ select REGMAP
default m if !MEDIA_SUBDRV_AUTOSELECT
help
Say Y when you want to support this frontend.
diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c
index b978002af4d8..b8f3ebfc3e27 100644
--- a/drivers/media/dvb-frontends/af9013.c
+++ b/drivers/media/dvb-frontends/af9013.c
@@ -20,13 +20,18 @@
#include "af9013_priv.h"
-/* Max transfer size done by I2C transfer functions */
-#define MAX_XFER_SIZE 64
-
struct af9013_state {
- struct i2c_adapter *i2c;
+ struct i2c_client *client;
+ struct regmap *regmap;
struct dvb_frontend fe;
- struct af9013_config config;
+ u32 clk;
+ u8 tuner;
+ u32 if_frequency;
+ u8 ts_mode;
+ u8 ts_output_pin;
+ bool spec_inv;
+ u8 api_version[4];
+ u8 gpio[4];
/* tuner/demod RF and IF AGC limits used for signal strength calc */
u8 signal_strength_en, rf_50, rf_80, if_50, if_80;
@@ -44,188 +49,14 @@ struct af9013_state {
struct delayed_work statistics_work;
};
-/* write multiple registers */
-static int af9013_wr_regs_i2c(struct af9013_state *priv, u8 mbox, u16 reg,
- const u8 *val, int len)
-{
- int ret;
- u8 buf[MAX_XFER_SIZE];
- struct i2c_msg msg[1] = {
- {
- .addr = priv->config.i2c_addr,
- .flags = 0,
- .len = 3 + len,
- .buf = buf,
- }
- };
-
- if (3 + len > sizeof(buf)) {
- dev_warn(&priv->i2c->dev,
- "%s: i2c wr reg=%04x: len=%d is too big!\n",
- KBUILD_MODNAME, reg, len);
- return -EINVAL;
- }
-
- buf[0] = (reg >> 8) & 0xff;
- buf[1] = (reg >> 0) & 0xff;
- buf[2] = mbox;
- memcpy(&buf[3], val, len);
-
- ret = i2c_transfer(priv->i2c, msg, 1);
- if (ret == 1) {
- ret = 0;
- } else {
- dev_warn(&priv->i2c->dev, "%s: i2c wr failed=%d reg=%04x " \
- "len=%d\n", KBUILD_MODNAME, ret, reg, len);
- ret = -EREMOTEIO;
- }
- return ret;
-}
-
-/* read multiple registers */
-static int af9013_rd_regs_i2c(struct af9013_state *priv, u8 mbox, u16 reg,
- u8 *val, int len)
-{
- int ret;
- u8 buf[3];
- struct i2c_msg msg[2] = {
- {
- .addr = priv->config.i2c_addr,
- .flags = 0,
- .len = 3,
- .buf = buf,
- }, {
- .addr = priv->config.i2c_addr,
- .flags = I2C_M_RD,
- .len = len,
- .buf = val,
- }
- };
-
- buf[0] = (reg >> 8) & 0xff;
- buf[1] = (reg >> 0) & 0xff;
- buf[2] = mbox;
-
- ret = i2c_transfer(priv->i2c, msg, 2);
- if (ret == 2) {
- ret = 0;
- } else {
- dev_warn(&priv->i2c->dev, "%s: i2c rd failed=%d reg=%04x " \
- "len=%d\n", KBUILD_MODNAME, ret, reg, len);
- ret = -EREMOTEIO;
- }
- return ret;
-}
-
-/* write multiple registers */
-static int af9013_wr_regs(struct af9013_state *priv, u16 reg, const u8 *val,
- int len)
-{
- int ret, i;
- u8 mbox = (0 << 7)|(0 << 6)|(1 << 1)|(1 << 0);
-
- if ((priv->config.ts_mode == AF9013_TS_USB) &&
- ((reg & 0xff00) != 0xff00) && ((reg & 0xff00) != 0xae00)) {
- mbox |= ((len - 1) << 2);
- ret = af9013_wr_regs_i2c(priv, mbox, reg, val, len);
- } else {
- for (i = 0; i < len; i++) {
- ret = af9013_wr_regs_i2c(priv, mbox, reg+i, val+i, 1);
- if (ret)
- goto err;
- }
- }
-
-err:
- return 0;
-}
-
-/* read multiple registers */
-static int af9013_rd_regs(struct af9013_state *priv, u16 reg, u8 *val, int len)
-{
- int ret, i;
- u8 mbox = (0 << 7)|(0 << 6)|(1 << 1)|(0 << 0);
-
- if ((priv->config.ts_mode == AF9013_TS_USB) &&
- ((reg & 0xff00) != 0xff00) && ((reg & 0xff00) != 0xae00)) {
- mbox |= ((len - 1) << 2);
- ret = af9013_rd_regs_i2c(priv, mbox, reg, val, len);
- } else {
- for (i = 0; i < len; i++) {
- ret = af9013_rd_regs_i2c(priv, mbox, reg+i, val+i, 1);
- if (ret)
- goto err;
- }
- }
-
-err:
- return 0;
-}
-
-/* write single register */
-static int af9013_wr_reg(struct af9013_state *priv, u16 reg, u8 val)
-{
- return af9013_wr_regs(priv, reg, &val, 1);
-}
-
-/* read single register */
-static int af9013_rd_reg(struct af9013_state *priv, u16 reg, u8 *val)
-{
- return af9013_rd_regs(priv, reg, val, 1);
-}
-
-static int af9013_write_ofsm_regs(struct af9013_state *state, u16 reg, u8 *val,
- u8 len)
-{
- u8 mbox = (1 << 7)|(1 << 6)|((len - 1) << 2)|(1 << 1)|(1 << 0);
- return af9013_wr_regs_i2c(state, mbox, reg, val, len);
-}
-
-static int af9013_wr_reg_bits(struct af9013_state *state, u16 reg, int pos,
- int len, u8 val)
-{
- int ret;
- u8 tmp, mask;
-
- /* no need for read if whole reg is written */
- if (len != 8) {
- ret = af9013_rd_reg(state, reg, &tmp);
- if (ret)
- return ret;
-
- mask = (0xff >> (8 - len)) << pos;
- val <<= pos;
- tmp &= ~mask;
- val |= tmp;
- }
-
- return af9013_wr_reg(state, reg, val);
-}
-
-static int af9013_rd_reg_bits(struct af9013_state *state, u16 reg, int pos,
- int len, u8 *val)
-{
- int ret;
- u8 tmp;
-
- ret = af9013_rd_reg(state, reg, &tmp);
- if (ret)
- return ret;
-
- *val = (tmp >> pos);
- *val &= (0xff >> (8 - len));
-
- return 0;
-}
-
static int af9013_set_gpio(struct af9013_state *state, u8 gpio, u8 gpioval)
{
+ struct i2c_client *client = state->client;
int ret;
u8 pos;
u16 addr;
- dev_dbg(&state->i2c->dev, "%s: gpio=%d gpioval=%02x\n",
- __func__, gpio, gpioval);
+ dev_dbg(&client->dev, "gpio %u, gpioval %02x\n", gpio, gpioval);
/*
* GPIO0 & GPIO1 0xd735
@@ -243,8 +74,6 @@ static int af9013_set_gpio(struct af9013_state *state, u8 gpio, u8 gpioval)
break;
default:
- dev_err(&state->i2c->dev, "%s: invalid gpio=%d\n",
- KBUILD_MODNAME, gpio);
ret = -EINVAL;
goto err;
}
@@ -261,197 +90,124 @@ static int af9013_set_gpio(struct af9013_state *state, u8 gpio, u8 gpioval)
break;
}
- ret = af9013_wr_reg_bits(state, addr, pos, 4, gpioval);
+ ret = regmap_update_bits(state->regmap, addr, 0x0f << pos,
+ gpioval << pos);
if (ret)
goto err;
- return ret;
-err:
- dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
- return ret;
-}
-
-static u32 af9013_div(struct af9013_state *state, u32 a, u32 b, u32 x)
-{
- u32 r = 0, c = 0, i;
-
- dev_dbg(&state->i2c->dev, "%s: a=%d b=%d x=%d\n", __func__, a, b, x);
-
- if (a > b) {
- c = a / b;
- a = a - c * b;
- }
-
- for (i = 0; i < x; i++) {
- if (a >= b) {
- r += 1;
- a -= b;
- }
- a <<= 1;
- r <<= 1;
- }
- r = (c << (u32)x) + r;
-
- dev_dbg(&state->i2c->dev, "%s: a=%d b=%d x=%d r=%d r=%x\n",
- __func__, a, b, x, r, r);
-
- return r;
-}
-
-static int af9013_power_ctrl(struct af9013_state *state, u8 onoff)
-{
- int ret, i;
- u8 tmp;
-
- dev_dbg(&state->i2c->dev, "%s: onoff=%d\n", __func__, onoff);
-
- /* enable reset */
- ret = af9013_wr_reg_bits(state, 0xd417, 4, 1, 1);
- if (ret)
- goto err;
-
- /* start reset mechanism */
- ret = af9013_wr_reg(state, 0xaeff, 1);
- if (ret)
- goto err;
-
- /* wait reset performs */
- for (i = 0; i < 150; i++) {
- ret = af9013_rd_reg_bits(state, 0xd417, 1, 1, &tmp);
- if (ret)
- goto err;
-
- if (tmp)
- break; /* reset done */
-
- usleep_range(5000, 25000);
- }
-
- if (!tmp)
- return -ETIMEDOUT;
-
- if (onoff) {
- /* clear reset */
- ret = af9013_wr_reg_bits(state, 0xd417, 1, 1, 0);
- if (ret)
- goto err;
-
- /* disable reset */
- ret = af9013_wr_reg_bits(state, 0xd417, 4, 1, 0);
-
- /* power on */
- ret = af9013_wr_reg_bits(state, 0xd73a, 3, 1, 0);
- } else {
- /* power off */
- ret = af9013_wr_reg_bits(state, 0xd73a, 3, 1, 1);
- }
-
- return ret;
+ return 0;
err:
- dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed %d\n", ret);
return ret;
}
static int af9013_statistics_ber_unc_start(struct dvb_frontend *fe)
{
struct af9013_state *state = fe->demodulator_priv;
+ struct i2c_client *client = state->client;
int ret;
- dev_dbg(&state->i2c->dev, "%s:\n", __func__);
+ dev_dbg(&client->dev, "\n");
/* reset and start BER counter */
- ret = af9013_wr_reg_bits(state, 0xd391, 4, 1, 1);
+ ret = regmap_update_bits(state->regmap, 0xd391, 0x10, 0x10);
if (ret)
goto err;
- return ret;
+ return 0;
err:
- dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed %d\n", ret);
return ret;
}
static int af9013_statistics_ber_unc_result(struct dvb_frontend *fe)
{
struct af9013_state *state = fe->demodulator_priv;
+ struct i2c_client *client = state->client;
int ret;
+ unsigned int utmp;
u8 buf[5];
- dev_dbg(&state->i2c->dev, "%s:\n", __func__);
+ dev_dbg(&client->dev, "\n");
/* check if error bit count is ready */
- ret = af9013_rd_reg_bits(state, 0xd391, 4, 1, &buf[0]);
+ ret = regmap_read(state->regmap, 0xd391, &utmp);
if (ret)
goto err;
- if (!buf[0]) {
- dev_dbg(&state->i2c->dev, "%s: not ready\n", __func__);
+ if (!((utmp >> 4) & 0x01)) {
+ dev_dbg(&client->dev, "not ready\n");
return 0;
}
- ret = af9013_rd_regs(state, 0xd387, buf, 5);
+ ret = regmap_bulk_read(state->regmap, 0xd387, buf, 5);
if (ret)
goto err;
state->ber = (buf[2] << 16) | (buf[1] << 8) | buf[0];
state->ucblocks += (buf[4] << 8) | buf[3];
- return ret;
+ return 0;
err:
- dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed %d\n", ret);
return ret;
}
static int af9013_statistics_snr_start(struct dvb_frontend *fe)
{
struct af9013_state *state = fe->demodulator_priv;
+ struct i2c_client *client = state->client;
int ret;
- dev_dbg(&state->i2c->dev, "%s:\n", __func__);
+ dev_dbg(&client->dev, "\n");
/* start SNR meas */
- ret = af9013_wr_reg_bits(state, 0xd2e1, 3, 1, 1);
+ ret = regmap_update_bits(state->regmap, 0xd2e1, 0x08, 0x08);
if (ret)
goto err;
- return ret;
+ return 0;
err:
- dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed %d\n", ret);
return ret;
}
static int af9013_statistics_snr_result(struct dvb_frontend *fe)
{
struct af9013_state *state = fe->demodulator_priv;
+ struct i2c_client *client = state->client;
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i, len;
- u8 buf[3], tmp;
+ unsigned int utmp;
+ u8 buf[3];
u32 snr_val;
const struct af9013_snr *uninitialized_var(snr_lut);
- dev_dbg(&state->i2c->dev, "%s:\n", __func__);
+ dev_dbg(&client->dev, "\n");
/* check if SNR ready */
- ret = af9013_rd_reg_bits(state, 0xd2e1, 3, 1, &tmp);
+ ret = regmap_read(state->regmap, 0xd2e1, &utmp);
if (ret)
goto err;
- if (!tmp) {
- dev_dbg(&state->i2c->dev, "%s: not ready\n", __func__);
+ if (!((utmp >> 3) & 0x01)) {
+ dev_dbg(&client->dev, "not ready\n");
return 0;
}
/* read value */
- ret = af9013_rd_regs(state, 0xd2e3, buf, 3);
+ ret = regmap_bulk_read(state->regmap, 0xd2e3, buf, 3);
if (ret)
goto err;
snr_val = (buf[2] << 16) | (buf[1] << 8) | buf[0];
/* read current modulation */
- ret = af9013_rd_reg(state, 0xd3c1, &tmp);
+ ret = regmap_read(state->regmap, 0xd3c1, &utmp);
if (ret)
goto err;
- switch ((tmp >> 6) & 3) {
+ switch ((utmp >> 6) & 3) {
case 0:
len = ARRAY_SIZE(qpsk_snr_lut);
snr_lut = qpsk_snr_lut;
@@ -469,32 +225,36 @@ static int af9013_statistics_snr_result(struct dvb_frontend *fe)
}
for (i = 0; i < len; i++) {
- tmp = snr_lut[i].snr;
+ utmp = snr_lut[i].snr;
if (snr_val < snr_lut[i].val)
break;
}
- state->snr = tmp * 10; /* dB/10 */
+ state->snr = utmp * 10; /* dB/10 */
- return ret;
+ c->cnr.stat[0].svalue = 1000 * utmp;
+ c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
+
+ return 0;
err:
- dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed %d\n", ret);
return ret;
}
static int af9013_statistics_signal_strength(struct dvb_frontend *fe)
{
struct af9013_state *state = fe->demodulator_priv;
+ struct i2c_client *client = state->client;
int ret = 0;
u8 buf[2], rf_gain, if_gain;
int signal_strength;
- dev_dbg(&state->i2c->dev, "%s:\n", __func__);
+ dev_dbg(&client->dev, "\n");
if (!state->signal_strength_en)
return 0;
- ret = af9013_rd_regs(state, 0xd07c, buf, 2);
+ ret = regmap_bulk_read(state->regmap, 0xd07c, buf, 2);
if (ret)
goto err;
@@ -513,9 +273,9 @@ static int af9013_statistics_signal_strength(struct dvb_frontend *fe)
state->signal_strength = signal_strength;
- return ret;
+ return 0;
err:
- dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed %d\n", ret);
return ret;
}
@@ -535,6 +295,7 @@ static void af9013_statistics_work(struct work_struct *work)
switch (state->statistics_step) {
default:
state->statistics_step = 0;
+ /* fall-through */
case 0:
af9013_statistics_signal_strength(&state->fe);
state->statistics_step++;
@@ -579,61 +340,72 @@ static int af9013_get_tune_settings(struct dvb_frontend *fe,
static int af9013_set_frontend(struct dvb_frontend *fe)
{
struct af9013_state *state = fe->demodulator_priv;
+ struct i2c_client *client = state->client;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i, sampling_freq;
bool auto_mode, spec_inv;
u8 buf[6];
u32 if_frequency, freq_cw;
- dev_dbg(&state->i2c->dev, "%s: frequency=%d bandwidth_hz=%d\n",
- __func__, c->frequency, c->bandwidth_hz);
+ dev_dbg(&client->dev, "frequency %u, bandwidth_hz %u\n",
+ c->frequency, c->bandwidth_hz);
/* program tuner */
- if (fe->ops.tuner_ops.set_params)
- fe->ops.tuner_ops.set_params(fe);
+ if (fe->ops.tuner_ops.set_params) {
+ ret = fe->ops.tuner_ops.set_params(fe);
+ if (ret)
+ goto err;
+ }
/* program CFOE coefficients */
if (c->bandwidth_hz != state->bandwidth_hz) {
for (i = 0; i < ARRAY_SIZE(coeff_lut); i++) {
- if (coeff_lut[i].clock == state->config.clock &&
+ if (coeff_lut[i].clock == state->clk &&
coeff_lut[i].bandwidth_hz == c->bandwidth_hz) {
break;
}
}
/* Return an error if can't find bandwidth or the right clock */
- if (i == ARRAY_SIZE(coeff_lut))
- return -EINVAL;
+ if (i == ARRAY_SIZE(coeff_lut)) {
+ ret = -EINVAL;
+ goto err;
+ }
- ret = af9013_wr_regs(state, 0xae00, coeff_lut[i].val,
- sizeof(coeff_lut[i].val));
+ ret = regmap_bulk_write(state->regmap, 0xae00, coeff_lut[i].val,
+ sizeof(coeff_lut[i].val));
+ if (ret)
+ goto err;
}
/* program frequency control */
if (c->bandwidth_hz != state->bandwidth_hz || state->first_tune) {
/* get used IF frequency */
- if (fe->ops.tuner_ops.get_if_frequency)
- fe->ops.tuner_ops.get_if_frequency(fe, &if_frequency);
- else
- if_frequency = state->config.if_frequency;
+ if (fe->ops.tuner_ops.get_if_frequency) {
+ ret = fe->ops.tuner_ops.get_if_frequency(fe,
+ &if_frequency);
+ if (ret)
+ goto err;
+ } else {
+ if_frequency = state->if_frequency;
+ }
- dev_dbg(&state->i2c->dev, "%s: if_frequency=%d\n",
- __func__, if_frequency);
+ dev_dbg(&client->dev, "if_frequency %u\n", if_frequency);
sampling_freq = if_frequency;
- while (sampling_freq > (state->config.clock / 2))
- sampling_freq -= state->config.clock;
+ while (sampling_freq > (state->clk / 2))
+ sampling_freq -= state->clk;
if (sampling_freq < 0) {
sampling_freq *= -1;
- spec_inv = state->config.spec_inv;
+ spec_inv = state->spec_inv;
} else {
- spec_inv = !state->config.spec_inv;
+ spec_inv = !state->spec_inv;
}
- freq_cw = af9013_div(state, sampling_freq, state->config.clock,
- 23);
+ freq_cw = DIV_ROUND_CLOSEST_ULL((u64)sampling_freq * 0x800000,
+ state->clk);
if (spec_inv)
freq_cw = 0x800000 - freq_cw;
@@ -648,32 +420,32 @@ static int af9013_set_frontend(struct dvb_frontend *fe)
buf[4] = (freq_cw >> 8) & 0xff;
buf[5] = (freq_cw >> 16) & 0x7f;
- ret = af9013_wr_regs(state, 0xd140, buf, 3);
+ ret = regmap_bulk_write(state->regmap, 0xd140, buf, 3);
if (ret)
goto err;
- ret = af9013_wr_regs(state, 0x9be7, buf, 6);
+ ret = regmap_bulk_write(state->regmap, 0x9be7, buf, 6);
if (ret)
goto err;
}
/* clear TPS lock flag */
- ret = af9013_wr_reg_bits(state, 0xd330, 3, 1, 1);
+ ret = regmap_update_bits(state->regmap, 0xd330, 0x08, 0x08);
if (ret)
goto err;
/* clear MPEG2 lock flag */
- ret = af9013_wr_reg_bits(state, 0xd507, 6, 1, 0);
+ ret = regmap_update_bits(state->regmap, 0xd507, 0x40, 0x00);
if (ret)
goto err;
/* empty channel function */
- ret = af9013_wr_reg_bits(state, 0x9bfe, 0, 1, 0);
+ ret = regmap_update_bits(state->regmap, 0x9bfe, 0x01, 0x00);
if (ret)
goto err;
/* empty DVB-T channel function */
- ret = af9013_wr_reg_bits(state, 0x9bc2, 0, 1, 0);
+ ret = regmap_update_bits(state->regmap, 0x9bc2, 0x01, 0x00);
if (ret)
goto err;
@@ -691,8 +463,7 @@ static int af9013_set_frontend(struct dvb_frontend *fe)
buf[0] |= (1 << 0);
break;
default:
- dev_dbg(&state->i2c->dev, "%s: invalid transmission_mode\n",
- __func__);
+ dev_dbg(&client->dev, "invalid transmission_mode\n");
auto_mode = true;
}
@@ -712,8 +483,7 @@ static int af9013_set_frontend(struct dvb_frontend *fe)
buf[0] |= (3 << 2);
break;
default:
- dev_dbg(&state->i2c->dev, "%s: invalid guard_interval\n",
- __func__);
+ dev_dbg(&client->dev, "invalid guard_interval\n");
auto_mode = true;
}
@@ -733,7 +503,7 @@ static int af9013_set_frontend(struct dvb_frontend *fe)
buf[0] |= (3 << 4);
break;
default:
- dev_dbg(&state->i2c->dev, "%s: invalid hierarchy\n", __func__);
+ dev_dbg(&client->dev, "invalid hierarchy\n");
auto_mode = true;
}
@@ -750,7 +520,7 @@ static int af9013_set_frontend(struct dvb_frontend *fe)
buf[1] |= (2 << 6);
break;
default:
- dev_dbg(&state->i2c->dev, "%s: invalid modulation\n", __func__);
+ dev_dbg(&client->dev, "invalid modulation\n");
auto_mode = true;
}
@@ -776,8 +546,7 @@ static int af9013_set_frontend(struct dvb_frontend *fe)
buf[2] |= (4 << 0);
break;
default:
- dev_dbg(&state->i2c->dev, "%s: invalid code_rate_HP\n",
- __func__);
+ dev_dbg(&client->dev, "invalid code_rate_HP\n");
auto_mode = true;
}
@@ -802,8 +571,7 @@ static int af9013_set_frontend(struct dvb_frontend *fe)
case FEC_NONE:
break;
default:
- dev_dbg(&state->i2c->dev, "%s: invalid code_rate_LP\n",
- __func__);
+ dev_dbg(&client->dev, "invalid code_rate_LP\n");
auto_mode = true;
}
@@ -817,38 +585,37 @@ static int af9013_set_frontend(struct dvb_frontend *fe)
buf[1] |= (2 << 2);
break;
default:
- dev_dbg(&state->i2c->dev, "%s: invalid bandwidth_hz\n",
- __func__);
+ dev_dbg(&client->dev, "invalid bandwidth_hz\n");
ret = -EINVAL;
goto err;
}
- ret = af9013_wr_regs(state, 0xd3c0, buf, 3);
+ ret = regmap_bulk_write(state->regmap, 0xd3c0, buf, 3);
if (ret)
goto err;
if (auto_mode) {
/* clear easy mode flag */
- ret = af9013_wr_reg(state, 0xaefd, 0);
+ ret = regmap_write(state->regmap, 0xaefd, 0x00);
if (ret)
goto err;
- dev_dbg(&state->i2c->dev, "%s: auto params\n", __func__);
+ dev_dbg(&client->dev, "auto params\n");
} else {
/* set easy mode flag */
- ret = af9013_wr_reg(state, 0xaefd, 1);
+ ret = regmap_write(state->regmap, 0xaefd, 0x01);
if (ret)
goto err;
- ret = af9013_wr_reg(state, 0xaefe, 0);
+ ret = regmap_write(state->regmap, 0xaefe, 0x00);
if (ret)
goto err;
- dev_dbg(&state->i2c->dev, "%s: manual params\n", __func__);
+ dev_dbg(&client->dev, "manual params\n");
}
- /* tune */
- ret = af9013_wr_reg(state, 0xffff, 0);
+ /* Reset FSM */
+ ret = regmap_write(state->regmap, 0xffff, 0x00);
if (ret)
goto err;
@@ -856,9 +623,9 @@ static int af9013_set_frontend(struct dvb_frontend *fe)
state->set_frontend_jiffies = jiffies;
state->first_tune = false;
- return ret;
+ return 0;
err:
- dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed %d\n", ret);
return ret;
}
@@ -866,12 +633,13 @@ static int af9013_get_frontend(struct dvb_frontend *fe,
struct dtv_frontend_properties *c)
{
struct af9013_state *state = fe->demodulator_priv;
+ struct i2c_client *client = state->client;
int ret;
u8 buf[3];
- dev_dbg(&state->i2c->dev, "%s:\n", __func__);
+ dev_dbg(&client->dev, "\n");
- ret = af9013_rd_regs(state, 0xd3c0, buf, 3);
+ ret = regmap_bulk_read(state->regmap, 0xd3c0, buf, 3);
if (ret)
goto err;
@@ -973,17 +741,18 @@ static int af9013_get_frontend(struct dvb_frontend *fe,
break;
}
- return ret;
+ return 0;
err:
- dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed %d\n", ret);
return ret;
}
static int af9013_read_status(struct dvb_frontend *fe, enum fe_status *status)
{
struct af9013_state *state = fe->demodulator_priv;
+ struct i2c_client *client = state->client;
int ret;
- u8 tmp;
+ unsigned int utmp;
/*
* Return status from the cache if it is younger than 2000ms with the
@@ -1001,21 +770,21 @@ static int af9013_read_status(struct dvb_frontend *fe, enum fe_status *status)
}
/* MPEG2 lock */
- ret = af9013_rd_reg_bits(state, 0xd507, 6, 1, &tmp);
+ ret = regmap_read(state->regmap, 0xd507, &utmp);
if (ret)
goto err;
- if (tmp)
+ if ((utmp >> 6) & 0x01)
*status |= FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI |
FE_HAS_SYNC | FE_HAS_LOCK;
if (!*status) {
/* TPS lock */
- ret = af9013_rd_reg_bits(state, 0xd330, 3, 1, &tmp);
+ ret = regmap_read(state->regmap, 0xd330, &utmp);
if (ret)
goto err;
- if (tmp)
+ if ((utmp >> 3) & 0x01)
*status |= FE_HAS_SIGNAL | FE_HAS_CARRIER |
FE_HAS_VITERBI;
}
@@ -1023,9 +792,9 @@ static int af9013_read_status(struct dvb_frontend *fe, enum fe_status *status)
state->fe_status = *status;
state->read_status_jiffies = jiffies;
- return ret;
+ return 0;
err:
- dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed %d\n", ret);
return ret;
}
@@ -1060,118 +829,82 @@ static int af9013_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
static int af9013_init(struct dvb_frontend *fe)
{
struct af9013_state *state = fe->demodulator_priv;
+ struct i2c_client *client = state->client;
int ret, i, len;
- u8 buf[3], tmp;
- u32 adc_cw;
+ unsigned int utmp;
+ u8 buf[3];
const struct af9013_reg_bit *init;
- dev_dbg(&state->i2c->dev, "%s:\n", __func__);
+ dev_dbg(&client->dev, "\n");
+
+ /* ADC on */
+ ret = regmap_update_bits(state->regmap, 0xd73a, 0x08, 0x00);
+ if (ret)
+ goto err;
- /* power on */
- ret = af9013_power_ctrl(state, 1);
+ /* Clear reset */
+ ret = regmap_update_bits(state->regmap, 0xd417, 0x02, 0x00);
if (ret)
goto err;
- /* enable ADC */
- ret = af9013_wr_reg(state, 0xd73a, 0xa4);
+ /* Disable reset */
+ ret = regmap_update_bits(state->regmap, 0xd417, 0x10, 0x00);
if (ret)
goto err;
/* write API version to firmware */
- ret = af9013_wr_regs(state, 0x9bf2, state->config.api_version, 4);
+ ret = regmap_bulk_write(state->regmap, 0x9bf2, state->api_version, 4);
if (ret)
goto err;
/* program ADC control */
- switch (state->config.clock) {
+ switch (state->clk) {
case 28800000: /* 28.800 MHz */
- tmp = 0;
+ utmp = 0;
break;
case 20480000: /* 20.480 MHz */
- tmp = 1;
+ utmp = 1;
break;
case 28000000: /* 28.000 MHz */
- tmp = 2;
+ utmp = 2;
break;
case 25000000: /* 25.000 MHz */
- tmp = 3;
+ utmp = 3;
break;
default:
- dev_err(&state->i2c->dev, "%s: invalid clock\n",
- KBUILD_MODNAME);
- return -EINVAL;
- }
-
- adc_cw = af9013_div(state, state->config.clock, 1000000ul, 19);
- buf[0] = (adc_cw >> 0) & 0xff;
- buf[1] = (adc_cw >> 8) & 0xff;
- buf[2] = (adc_cw >> 16) & 0xff;
-
- ret = af9013_wr_regs(state, 0xd180, buf, 3);
- if (ret)
- goto err;
-
- ret = af9013_wr_reg_bits(state, 0x9bd2, 0, 4, tmp);
- if (ret)
- goto err;
-
- /* set I2C master clock */
- ret = af9013_wr_reg(state, 0xd416, 0x14);
- if (ret)
- goto err;
-
- /* set 16 embx */
- ret = af9013_wr_reg_bits(state, 0xd700, 1, 1, 1);
- if (ret)
- goto err;
-
- /* set no trigger */
- ret = af9013_wr_reg_bits(state, 0xd700, 2, 1, 0);
- if (ret)
+ ret = -EINVAL;
goto err;
+ }
- /* set read-update bit for constellation */
- ret = af9013_wr_reg_bits(state, 0xd371, 1, 1, 1);
+ ret = regmap_update_bits(state->regmap, 0x9bd2, 0x0f, utmp);
if (ret)
goto err;
- /* settings for mp2if */
- if (state->config.ts_mode == AF9013_TS_USB) {
- /* AF9015 split PSB to 1.5k + 0.5k */
- ret = af9013_wr_reg_bits(state, 0xd50b, 2, 1, 1);
- if (ret)
- goto err;
- } else {
- /* AF9013 change the output bit to data7 */
- ret = af9013_wr_reg_bits(state, 0xd500, 3, 1, 1);
- if (ret)
- goto err;
-
- /* AF9013 set mpeg to full speed */
- ret = af9013_wr_reg_bits(state, 0xd502, 4, 1, 1);
- if (ret)
- goto err;
- }
-
- ret = af9013_wr_reg_bits(state, 0xd520, 4, 1, 1);
+ utmp = div_u64((u64)state->clk * 0x80000, 1000000);
+ buf[0] = (utmp >> 0) & 0xff;
+ buf[1] = (utmp >> 8) & 0xff;
+ buf[2] = (utmp >> 16) & 0xff;
+ ret = regmap_bulk_write(state->regmap, 0xd180, buf, 3);
if (ret)
goto err;
/* load OFSM settings */
- dev_dbg(&state->i2c->dev, "%s: load ofsm settings\n", __func__);
+ dev_dbg(&client->dev, "load ofsm settings\n");
len = ARRAY_SIZE(ofsm_init);
init = ofsm_init;
for (i = 0; i < len; i++) {
- ret = af9013_wr_reg_bits(state, init[i].addr, init[i].pos,
- init[i].len, init[i].val);
+ u16 reg = init[i].addr;
+ u8 mask = GENMASK(init[i].pos + init[i].len - 1, init[i].pos);
+ u8 val = init[i].val << init[i].pos;
+
+ ret = regmap_update_bits(state->regmap, reg, mask, val);
if (ret)
goto err;
}
/* load tuner specific settings */
- dev_dbg(&state->i2c->dev, "%s: load tuner specific settings\n",
- __func__);
- switch (state->config.tuner) {
+ dev_dbg(&client->dev, "load tuner specific settings\n");
+ switch (state->tuner) {
case AF9013_TUNER_MXL5003D:
len = ARRAY_SIZE(tuner_init_mxl5003d);
init = tuner_init_mxl5003d;
@@ -1216,98 +949,126 @@ static int af9013_init(struct dvb_frontend *fe)
}
for (i = 0; i < len; i++) {
- ret = af9013_wr_reg_bits(state, init[i].addr, init[i].pos,
- init[i].len, init[i].val);
+ u16 reg = init[i].addr;
+ u8 mask = GENMASK(init[i].pos + init[i].len - 1, init[i].pos);
+ u8 val = init[i].val << init[i].pos;
+
+ ret = regmap_update_bits(state->regmap, reg, mask, val);
if (ret)
goto err;
}
- /* TS mode */
- ret = af9013_wr_reg_bits(state, 0xd500, 1, 2, state->config.ts_mode);
+ /* TS interface */
+ if (state->ts_output_pin == 7)
+ utmp = 1 << 3 | state->ts_mode << 1;
+ else
+ utmp = 0 << 3 | state->ts_mode << 1;
+ ret = regmap_update_bits(state->regmap, 0xd500, 0x0e, utmp);
if (ret)
goto err;
/* enable lock led */
- ret = af9013_wr_reg_bits(state, 0xd730, 0, 1, 1);
+ ret = regmap_update_bits(state->regmap, 0xd730, 0x01, 0x01);
if (ret)
goto err;
/* check if we support signal strength */
if (!state->signal_strength_en) {
- ret = af9013_rd_reg_bits(state, 0x9bee, 0, 1,
- &state->signal_strength_en);
+ ret = regmap_read(state->regmap, 0x9bee, &utmp);
if (ret)
goto err;
+
+ state->signal_strength_en = (utmp >> 0) & 0x01;
}
/* read values needed for signal strength calculation */
if (state->signal_strength_en && !state->rf_50) {
- ret = af9013_rd_reg(state, 0x9bbd, &state->rf_50);
+ ret = regmap_bulk_read(state->regmap, 0x9bbd, &state->rf_50, 1);
if (ret)
goto err;
-
- ret = af9013_rd_reg(state, 0x9bd0, &state->rf_80);
+ ret = regmap_bulk_read(state->regmap, 0x9bd0, &state->rf_80, 1);
if (ret)
goto err;
-
- ret = af9013_rd_reg(state, 0x9be2, &state->if_50);
+ ret = regmap_bulk_read(state->regmap, 0x9be2, &state->if_50, 1);
if (ret)
goto err;
-
- ret = af9013_rd_reg(state, 0x9be4, &state->if_80);
+ ret = regmap_bulk_read(state->regmap, 0x9be4, &state->if_80, 1);
if (ret)
goto err;
}
/* SNR */
- ret = af9013_wr_reg(state, 0xd2e2, 1);
+ ret = regmap_write(state->regmap, 0xd2e2, 0x01);
if (ret)
goto err;
/* BER / UCB */
buf[0] = (10000 >> 0) & 0xff;
buf[1] = (10000 >> 8) & 0xff;
- ret = af9013_wr_regs(state, 0xd385, buf, 2);
+ ret = regmap_bulk_write(state->regmap, 0xd385, buf, 2);
if (ret)
goto err;
/* enable FEC monitor */
- ret = af9013_wr_reg_bits(state, 0xd392, 1, 1, 1);
+ ret = regmap_update_bits(state->regmap, 0xd392, 0x02, 0x02);
if (ret)
goto err;
state->first_tune = true;
schedule_delayed_work(&state->statistics_work, msecs_to_jiffies(400));
- return ret;
+ return 0;
err:
- dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed %d\n", ret);
return ret;
}
static int af9013_sleep(struct dvb_frontend *fe)
{
struct af9013_state *state = fe->demodulator_priv;
+ struct i2c_client *client = state->client;
int ret;
+ unsigned int utmp;
- dev_dbg(&state->i2c->dev, "%s:\n", __func__);
+ dev_dbg(&client->dev, "\n");
/* stop statistics polling */
cancel_delayed_work_sync(&state->statistics_work);
/* disable lock led */
- ret = af9013_wr_reg_bits(state, 0xd730, 0, 1, 0);
+ ret = regmap_update_bits(state->regmap, 0xd730, 0x01, 0x00);
if (ret)
goto err;
- /* power off */
- ret = af9013_power_ctrl(state, 0);
+ /* Enable reset */
+ ret = regmap_update_bits(state->regmap, 0xd417, 0x10, 0x10);
if (ret)
goto err;
- return ret;
+ /* Start reset execution */
+ ret = regmap_write(state->regmap, 0xaeff, 0x01);
+ if (ret)
+ goto err;
+
+ /* Wait reset performs */
+ ret = regmap_read_poll_timeout(state->regmap, 0xd417, utmp,
+ (utmp >> 1) & 0x01, 5000, 1000000);
+ if (ret)
+ goto err;
+
+ if (!((utmp >> 1) & 0x01)) {
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ /* ADC off */
+ ret = regmap_update_bits(state->regmap, 0xd73a, 0x08, 0x08);
+ if (ret)
+ goto err;
+
+ return 0;
err:
- dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed %d\n", ret);
return ret;
}
@@ -1315,200 +1076,174 @@ static int af9013_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
{
int ret;
struct af9013_state *state = fe->demodulator_priv;
+ struct i2c_client *client = state->client;
- dev_dbg(&state->i2c->dev, "%s: enable=%d\n", __func__, enable);
+ dev_dbg(&client->dev, "enable %d\n", enable);
/* gate already open or close */
if (state->i2c_gate_state == enable)
return 0;
- if (state->config.ts_mode == AF9013_TS_USB)
- ret = af9013_wr_reg_bits(state, 0xd417, 3, 1, enable);
+ if (state->ts_mode == AF9013_TS_MODE_USB)
+ ret = regmap_update_bits(state->regmap, 0xd417, 0x08,
+ enable << 3);
else
- ret = af9013_wr_reg_bits(state, 0xd607, 2, 1, enable);
+ ret = regmap_update_bits(state->regmap, 0xd607, 0x04,
+ enable << 2);
if (ret)
goto err;
state->i2c_gate_state = enable;
- return ret;
+ return 0;
err:
- dev_dbg(&state->i2c->dev, "%s: failed=%d\n", __func__, ret);
+ dev_dbg(&client->dev, "failed %d\n", ret);
return ret;
}
static void af9013_release(struct dvb_frontend *fe)
{
struct af9013_state *state = fe->demodulator_priv;
+ struct i2c_client *client = state->client;
- /* stop statistics polling */
- cancel_delayed_work_sync(&state->statistics_work);
+ dev_dbg(&client->dev, "\n");
- kfree(state);
+ i2c_unregister_device(client);
}
static const struct dvb_frontend_ops af9013_ops;
static int af9013_download_firmware(struct af9013_state *state)
{
- int i, len, remaining, ret;
- const struct firmware *fw;
+ struct i2c_client *client = state->client;
+ int ret, i, len, rem;
+ unsigned int utmp;
+ u8 buf[4];
u16 checksum = 0;
- u8 val;
- u8 fw_params[4];
- u8 *fw_file = AF9013_FIRMWARE;
+ const struct firmware *firmware;
+ const char *name = AF9013_FIRMWARE;
- msleep(100);
- /* check whether firmware is already running */
- ret = af9013_rd_reg(state, 0x98be, &val);
+ dev_dbg(&client->dev, "\n");
+
+ /* Check whether firmware is already running */
+ ret = regmap_read(state->regmap, 0x98be, &utmp);
if (ret)
goto err;
- else
- dev_dbg(&state->i2c->dev, "%s: firmware status=%02x\n",
- __func__, val);
- if (val == 0x0c) /* fw is running, no need for download */
- goto exit;
+ dev_dbg(&client->dev, "firmware status %02x\n", utmp);
- dev_info(&state->i2c->dev, "%s: found a '%s' in cold state, will try " \
- "to load a firmware\n",
- KBUILD_MODNAME, af9013_ops.info.name);
+ if (utmp == 0x0c)
+ return 0;
- /* request the firmware, this will block and timeout */
- ret = request_firmware(&fw, fw_file, state->i2c->dev.parent);
+ dev_info(&client->dev, "found a '%s' in cold state, will try to load a firmware\n",
+ af9013_ops.info.name);
+
+ /* Request the firmware, will block and timeout */
+ ret = request_firmware(&firmware, name, &client->dev);
if (ret) {
- dev_info(&state->i2c->dev, "%s: did not find the firmware " \
- "file. (%s) Please see linux/Documentation/dvb/ for " \
- "more details on firmware-problems. (%d)\n",
- KBUILD_MODNAME, fw_file, ret);
+ dev_info(&client->dev, "firmware file '%s' not found %d\n",
+ name, ret);
goto err;
}
- dev_info(&state->i2c->dev, "%s: downloading firmware from file '%s'\n",
- KBUILD_MODNAME, fw_file);
-
- /* calc checksum */
- for (i = 0; i < fw->size; i++)
- checksum += fw->data[i];
+ dev_info(&client->dev, "downloading firmware from file '%s'\n",
+ name);
- fw_params[0] = checksum >> 8;
- fw_params[1] = checksum & 0xff;
- fw_params[2] = fw->size >> 8;
- fw_params[3] = fw->size & 0xff;
+ /* Write firmware checksum & size */
+ for (i = 0; i < firmware->size; i++)
+ checksum += firmware->data[i];
- /* write fw checksum & size */
- ret = af9013_write_ofsm_regs(state, 0x50fc,
- fw_params, sizeof(fw_params));
+ buf[0] = (checksum >> 8) & 0xff;
+ buf[1] = (checksum >> 0) & 0xff;
+ buf[2] = (firmware->size >> 8) & 0xff;
+ buf[3] = (firmware->size >> 0) & 0xff;
+ ret = regmap_bulk_write(state->regmap, 0x50fc, buf, 4);
if (ret)
- goto err_release;
-
- #define FW_ADDR 0x5100 /* firmware start address */
- #define LEN_MAX 16 /* max packet size */
- for (remaining = fw->size; remaining > 0; remaining -= LEN_MAX) {
- len = remaining;
- if (len > LEN_MAX)
- len = LEN_MAX;
-
- ret = af9013_write_ofsm_regs(state,
- FW_ADDR + fw->size - remaining,
- (u8 *) &fw->data[fw->size - remaining], len);
+ goto err_release_firmware;
+
+ /* Download firmware */
+ #define LEN_MAX 16
+ for (rem = firmware->size; rem > 0; rem -= LEN_MAX) {
+ len = min(LEN_MAX, rem);
+ ret = regmap_bulk_write(state->regmap,
+ 0x5100 + firmware->size - rem,
+ &firmware->data[firmware->size - rem],
+ len);
if (ret) {
- dev_err(&state->i2c->dev,
- "%s: firmware download failed=%d\n",
- KBUILD_MODNAME, ret);
- goto err_release;
+ dev_err(&client->dev, "firmware download failed %d\n",
+ ret);
+ goto err_release_firmware;
}
}
- /* request boot firmware */
- ret = af9013_wr_reg(state, 0xe205, 1);
- if (ret)
- goto err_release;
-
- for (i = 0; i < 15; i++) {
- msleep(100);
+ release_firmware(firmware);
- /* check firmware status */
- ret = af9013_rd_reg(state, 0x98be, &val);
- if (ret)
- goto err_release;
+ /* Boot firmware */
+ ret = regmap_write(state->regmap, 0xe205, 0x01);
+ if (ret)
+ goto err;
- dev_dbg(&state->i2c->dev, "%s: firmware status=%02x\n",
- __func__, val);
+ /* Check firmware status. 0c=OK, 04=fail */
+ ret = regmap_read_poll_timeout(state->regmap, 0x98be, utmp,
+ (utmp == 0x0c || utmp == 0x04),
+ 5000, 1000000);
+ if (ret)
+ goto err;
- if (val == 0x0c || val == 0x04) /* success or fail */
- break;
- }
+ dev_dbg(&client->dev, "firmware status %02x\n", utmp);
- if (val == 0x04) {
- dev_err(&state->i2c->dev, "%s: firmware did not run\n",
- KBUILD_MODNAME);
+ if (utmp == 0x04) {
ret = -ENODEV;
- } else if (val != 0x0c) {
- dev_err(&state->i2c->dev, "%s: firmware boot timeout\n",
- KBUILD_MODNAME);
+ dev_err(&client->dev, "firmware did not run\n");
+ goto err;
+ } else if (utmp != 0x0c) {
ret = -ENODEV;
+ dev_err(&client->dev, "firmware boot timeout\n");
+ goto err;
}
-err_release:
- release_firmware(fw);
+ dev_info(&client->dev, "found a '%s' in warm state\n",
+ af9013_ops.info.name);
+
+ return 0;
+err_release_firmware:
+ release_firmware(firmware);
err:
-exit:
- if (!ret)
- dev_info(&state->i2c->dev, "%s: found a '%s' in warm state\n",
- KBUILD_MODNAME, af9013_ops.info.name);
+ dev_dbg(&client->dev, "failed %d\n", ret);
return ret;
}
+/*
+ * XXX: That is wrapper to af9013_probe() via driver core in order to provide
+ * proper I2C client for legacy media attach binding.
+ * New users must use I2C client binding directly!
+ */
struct dvb_frontend *af9013_attach(const struct af9013_config *config,
- struct i2c_adapter *i2c)
+ struct i2c_adapter *i2c)
{
- int ret;
- struct af9013_state *state = NULL;
- u8 buf[4], i;
-
- /* allocate memory for the internal state */
- state = kzalloc(sizeof(struct af9013_state), GFP_KERNEL);
- if (state == NULL)
- goto err;
-
- /* setup the state */
- state->i2c = i2c;
- memcpy(&state->config, config, sizeof(struct af9013_config));
-
- /* download firmware */
- if (state->config.ts_mode != AF9013_TS_USB) {
- ret = af9013_download_firmware(state);
- if (ret)
- goto err;
- }
-
- /* firmware version */
- ret = af9013_rd_regs(state, 0x5103, buf, 4);
- if (ret)
- goto err;
-
- dev_info(&state->i2c->dev, "%s: firmware version %d.%d.%d.%d\n",
- KBUILD_MODNAME, buf[0], buf[1], buf[2], buf[3]);
-
- /* set GPIOs */
- for (i = 0; i < sizeof(state->config.gpio); i++) {
- ret = af9013_set_gpio(state, i, state->config.gpio[i]);
- if (ret)
- goto err;
- }
-
- /* create dvb_frontend */
- memcpy(&state->fe.ops, &af9013_ops,
- sizeof(struct dvb_frontend_ops));
- state->fe.demodulator_priv = state;
-
- INIT_DELAYED_WORK(&state->statistics_work, af9013_statistics_work);
-
- return &state->fe;
-err:
- kfree(state);
- return NULL;
+ struct i2c_client *client;
+ struct i2c_board_info board_info;
+ struct af9013_platform_data pdata;
+
+ pdata.clk = config->clock;
+ pdata.tuner = config->tuner;
+ pdata.if_frequency = config->if_frequency;
+ pdata.ts_mode = config->ts_mode;
+ pdata.ts_output_pin = 7;
+ pdata.spec_inv = config->spec_inv;
+ memcpy(&pdata.api_version, config->api_version, sizeof(pdata.api_version));
+ memcpy(&pdata.gpio, config->gpio, sizeof(pdata.gpio));
+ pdata.attach_in_use = true;
+
+ memset(&board_info, 0, sizeof(board_info));
+ strlcpy(board_info.type, "af9013", sizeof(board_info.type));
+ board_info.addr = config->i2c_addr;
+ board_info.platform_data = &pdata;
+ client = i2c_new_device(i2c, &board_info);
+ if (!client || !client->dev.driver)
+ return NULL;
+
+ return pdata.get_dvb_frontend(client);
}
EXPORT_SYMBOL(af9013_attach);
@@ -1555,6 +1290,279 @@ static const struct dvb_frontend_ops af9013_ops = {
.i2c_gate_ctrl = af9013_i2c_gate_ctrl,
};
+static struct dvb_frontend *af9013_get_dvb_frontend(struct i2c_client *client)
+{
+ struct af9013_state *state = i2c_get_clientdata(client);
+
+ dev_dbg(&client->dev, "\n");
+
+ return &state->fe;
+}
+
+/* Own I2C access routines needed for regmap as chip uses extra command byte */
+static int af9013_wregs(struct i2c_client *client, u8 cmd, u16 reg,
+ const u8 *val, int len)
+{
+ int ret;
+ u8 buf[21];
+ struct i2c_msg msg[1] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 3 + len,
+ .buf = buf,
+ }
+ };
+
+ if (3 + len > sizeof(buf)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ buf[0] = (reg >> 8) & 0xff;
+ buf[1] = (reg >> 0) & 0xff;
+ buf[2] = cmd;
+ memcpy(&buf[3], val, len);
+ ret = i2c_transfer(client->adapter, msg, 1);
+ if (ret < 0) {
+ goto err;
+ } else if (ret != 1) {
+ ret = -EREMOTEIO;
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_dbg(&client->dev, "failed %d\n", ret);
+ return ret;
+}
+
+static int af9013_rregs(struct i2c_client *client, u8 cmd, u16 reg,
+ u8 *val, int len)
+{
+ int ret;
+ u8 buf[3];
+ struct i2c_msg msg[2] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 3,
+ .buf = buf,
+ }, {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = val,
+ }
+ };
+
+ buf[0] = (reg >> 8) & 0xff;
+ buf[1] = (reg >> 0) & 0xff;
+ buf[2] = cmd;
+ ret = i2c_transfer(client->adapter, msg, 2);
+ if (ret < 0) {
+ goto err;
+ } else if (ret != 2) {
+ ret = -EREMOTEIO;
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_dbg(&client->dev, "failed %d\n", ret);
+ return ret;
+}
+
+static int af9013_regmap_write(void *context, const void *data, size_t count)
+{
+ struct i2c_client *client = context;
+ struct af9013_state *state = i2c_get_clientdata(client);
+ int ret, i;
+ u8 cmd;
+ u16 reg = ((u8 *)data)[0] << 8|((u8 *)data)[1] << 0;
+ u8 *val = &((u8 *)data)[2];
+ const unsigned int len = count - 2;
+
+ if (state->ts_mode == AF9013_TS_MODE_USB && (reg & 0xff00) != 0xae00) {
+ cmd = 0 << 7|0 << 6|(len - 1) << 2|1 << 1|1 << 0;
+ ret = af9013_wregs(client, cmd, reg, val, len);
+ if (ret)
+ goto err;
+ } else if (reg >= 0x5100 && reg < 0x8fff) {
+ /* Firmware download */
+ cmd = 1 << 7|1 << 6|(len - 1) << 2|1 << 1|1 << 0;
+ ret = af9013_wregs(client, cmd, reg, val, len);
+ if (ret)
+ goto err;
+ } else {
+ cmd = 0 << 7|0 << 6|(1 - 1) << 2|1 << 1|1 << 0;
+ for (i = 0; i < len; i++) {
+ ret = af9013_wregs(client, cmd, reg + i, val + i, 1);
+ if (ret)
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ dev_dbg(&client->dev, "failed %d\n", ret);
+ return ret;
+}
+
+static int af9013_regmap_read(void *context, const void *reg_buf,
+ size_t reg_size, void *val_buf, size_t val_size)
+{
+ struct i2c_client *client = context;
+ struct af9013_state *state = i2c_get_clientdata(client);
+ int ret, i;
+ u8 cmd;
+ u16 reg = ((u8 *)reg_buf)[0] << 8|((u8 *)reg_buf)[1] << 0;
+ u8 *val = &((u8 *)val_buf)[0];
+ const unsigned int len = val_size;
+
+ if (state->ts_mode == AF9013_TS_MODE_USB && (reg & 0xff00) != 0xae00) {
+ cmd = 0 << 7|0 << 6|(len - 1) << 2|1 << 1|0 << 0;
+ ret = af9013_rregs(client, cmd, reg, val_buf, len);
+ if (ret)
+ goto err;
+ } else {
+ cmd = 0 << 7|0 << 6|(1 - 1) << 2|1 << 1|0 << 0;
+ for (i = 0; i < len; i++) {
+ ret = af9013_rregs(client, cmd, reg + i, val + i, 1);
+ if (ret)
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ dev_dbg(&client->dev, "failed %d\n", ret);
+ return ret;
+}
+
+static int af9013_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct af9013_state *state;
+ struct af9013_platform_data *pdata = client->dev.platform_data;
+ struct dtv_frontend_properties *c;
+ int ret, i;
+ u8 firmware_version[4];
+ static const struct regmap_bus regmap_bus = {
+ .read = af9013_regmap_read,
+ .write = af9013_regmap_write,
+ };
+ static const struct regmap_config regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ };
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* Setup the state */
+ state->client = client;
+ i2c_set_clientdata(client, state);
+ state->clk = pdata->clk;
+ state->tuner = pdata->tuner;
+ state->if_frequency = pdata->if_frequency;
+ state->ts_mode = pdata->ts_mode;
+ state->ts_output_pin = pdata->ts_output_pin;
+ state->spec_inv = pdata->spec_inv;
+ memcpy(&state->api_version, pdata->api_version, sizeof(state->api_version));
+ memcpy(&state->gpio, pdata->gpio, sizeof(state->gpio));
+ INIT_DELAYED_WORK(&state->statistics_work, af9013_statistics_work);
+ state->regmap = regmap_init(&client->dev, &regmap_bus, client,
+ &regmap_config);
+ if (IS_ERR(state->regmap)) {
+ ret = PTR_ERR(state->regmap);
+ goto err_kfree;
+ }
+
+ /* Download firmware */
+ if (state->ts_mode != AF9013_TS_MODE_USB) {
+ ret = af9013_download_firmware(state);
+ if (ret)
+ goto err_regmap_exit;
+ }
+
+ /* Firmware version */
+ ret = regmap_bulk_read(state->regmap, 0x5103, firmware_version,
+ sizeof(firmware_version));
+ if (ret)
+ goto err_regmap_exit;
+
+ /* Set GPIOs */
+ for (i = 0; i < sizeof(state->gpio); i++) {
+ ret = af9013_set_gpio(state, i, state->gpio[i]);
+ if (ret)
+ goto err_regmap_exit;
+ }
+
+ /* Create dvb frontend */
+ memcpy(&state->fe.ops, &af9013_ops, sizeof(state->fe.ops));
+ if (!pdata->attach_in_use)
+ state->fe.ops.release = NULL;
+ state->fe.demodulator_priv = state;
+
+ /* Setup callbacks */
+ pdata->get_dvb_frontend = af9013_get_dvb_frontend;
+
+ /* Init stats to indicate which stats are supported */
+ c = &state->fe.dtv_property_cache;
+ c->cnr.len = 1;
+
+ dev_info(&client->dev, "Afatech AF9013 successfully attached\n");
+ dev_info(&client->dev, "firmware version: %d.%d.%d.%d\n",
+ firmware_version[0], firmware_version[1],
+ firmware_version[2], firmware_version[3]);
+ return 0;
+err_regmap_exit:
+ regmap_exit(state->regmap);
+err_kfree:
+ kfree(state);
+err:
+ dev_dbg(&client->dev, "failed %d\n", ret);
+ return ret;
+}
+
+static int af9013_remove(struct i2c_client *client)
+{
+ struct af9013_state *state = i2c_get_clientdata(client);
+
+ dev_dbg(&client->dev, "\n");
+
+ /* Stop statistics polling */
+ cancel_delayed_work_sync(&state->statistics_work);
+
+ regmap_exit(state->regmap);
+
+ kfree(state);
+
+ return 0;
+}
+
+static const struct i2c_device_id af9013_id_table[] = {
+ {"af9013", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, af9013_id_table);
+
+static struct i2c_driver af9013_driver = {
+ .driver = {
+ .name = "af9013",
+ .suppress_bind_attrs = true,
+ },
+ .probe = af9013_probe,
+ .remove = af9013_remove,
+ .id_table = af9013_id_table,
+};
+
+module_i2c_driver(af9013_driver);
+
MODULE_AUTHOR("Antti Palosaari <[email protected]>");
MODULE_DESCRIPTION("Afatech AF9013 DVB-T demodulator driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-frontends/af9013.h b/drivers/media/dvb-frontends/af9013.h
index 277112863719..353274524f1b 100644
--- a/drivers/media/dvb-frontends/af9013.h
+++ b/drivers/media/dvb-frontends/af9013.h
@@ -23,29 +23,27 @@
#include <linux/dvb/frontend.h>
-/* AF9013/5 GPIOs (mostly guessed)
- demod#1-gpio#0 - set demod#2 i2c-addr for dual devices
- demod#1-gpio#1 - xtal setting (?)
- demod#1-gpio#3 - tuner#1
- demod#2-gpio#0 - tuner#2
- demod#2-gpio#1 - xtal setting (?)
-*/
-
-struct af9013_config {
- /*
- * I2C address
- */
- u8 i2c_addr;
+/*
+ * I2C address: 0x1c, 0x1d
+ */
+/**
+ * struct af9013_platform_data - Platform data for the af9013 driver
+ * @clk: Clock frequency.
+ * @tuner: Used tuner model.
+ * @if_frequency: IF frequency.
+ * @ts_mode: TS mode.
+ * @ts_output_pin: TS output pin.
+ * @spec_inv: Input spectrum inverted.
+ * @api_version: Firmware API version.
+ * @gpio: GPIOs.
+ * @get_dvb_frontend: Get DVB frontend callback.
+ */
+struct af9013_platform_data {
/*
- * clock
* 20480000, 25000000, 28000000, 28800000
*/
- u32 clock;
-
- /*
- * tuner
- */
+ u32 clk;
#define AF9013_TUNER_MXL5003D 3 /* MaxLinear */
#define AF9013_TUNER_MXL5005D 13 /* MaxLinear */
#define AF9013_TUNER_MXL5005R 30 /* MaxLinear */
@@ -60,33 +58,14 @@ struct af9013_config {
#define AF9013_TUNER_MXL5007T 177 /* MaxLinear */
#define AF9013_TUNER_TDA18218 179 /* NXP */
u8 tuner;
-
- /*
- * IF frequency
- */
u32 if_frequency;
-
- /*
- * TS settings
- */
-#define AF9013_TS_USB 0
-#define AF9013_TS_PARALLEL 1
-#define AF9013_TS_SERIAL 2
- u8 ts_mode:2;
-
- /*
- * input spectrum inversion
- */
+#define AF9013_TS_MODE_USB 0
+#define AF9013_TS_MODE_PARALLEL 1
+#define AF9013_TS_MODE_SERIAL 2
+ u8 ts_mode;
+ u8 ts_output_pin;
bool spec_inv;
-
- /*
- * firmware API version
- */
u8 api_version[4];
-
- /*
- * GPIOs
- */
#define AF9013_GPIO_ON (1 << 0)
#define AF9013_GPIO_EN (1 << 1)
#define AF9013_GPIO_O (1 << 2)
@@ -96,8 +75,29 @@ struct af9013_config {
#define AF9013_GPIO_TUNER_ON (AF9013_GPIO_ON|AF9013_GPIO_EN)
#define AF9013_GPIO_TUNER_OFF (AF9013_GPIO_ON|AF9013_GPIO_EN|AF9013_GPIO_O)
u8 gpio[4];
+
+ struct dvb_frontend* (*get_dvb_frontend)(struct i2c_client *);
+
+/* private: For legacy media attach wrapper. Do not set value. */
+ bool attach_in_use;
+ u8 i2c_addr;
+ u32 clock;
};
+#define af9013_config af9013_platform_data
+#define AF9013_TS_USB AF9013_TS_MODE_USB
+#define AF9013_TS_PARALLEL AF9013_TS_MODE_PARALLEL
+#define AF9013_TS_SERIAL AF9013_TS_MODE_SERIAL
+
+/*
+ * AF9013/5 GPIOs (mostly guessed)
+ * demod#1-gpio#0 - set demod#2 i2c-addr for dual devices
+ * demod#1-gpio#1 - xtal setting (?)
+ * demod#1-gpio#3 - tuner#1
+ * demod#2-gpio#0 - tuner#2
+ * demod#2-gpio#1 - xtal setting (?)
+ */
+
#if IS_REACHABLE(CONFIG_DVB_AF9013)
extern struct dvb_frontend *af9013_attach(const struct af9013_config *config,
struct i2c_adapter *i2c);
diff --git a/drivers/media/dvb-frontends/af9013_priv.h b/drivers/media/dvb-frontends/af9013_priv.h
index 31d6538abfae..35ca5c9bcacd 100644
--- a/drivers/media/dvb-frontends/af9013_priv.h
+++ b/drivers/media/dvb-frontends/af9013_priv.h
@@ -24,6 +24,8 @@
#include "dvb_frontend.h"
#include "af9013.h"
#include <linux/firmware.h>
+#include <linux/math64.h>
+#include <linux/regmap.h>
#define AF9013_FIRMWARE "dvb-fe-af9013.fw"
diff --git a/drivers/media/dvb-frontends/au8522_common.c b/drivers/media/dvb-frontends/au8522_common.c
index cf4ac240a01f..6722838c3707 100644
--- a/drivers/media/dvb-frontends/au8522_common.c
+++ b/drivers/media/dvb-frontends/au8522_common.c
@@ -234,6 +234,7 @@ int au8522_init(struct dvb_frontend *fe)
chip, so that when it gets powered back up it won't think
that it is already tuned */
state->current_frequency = 0;
+ state->current_modulation = VSB_8;
au8522_writereg(state, 0xa4, 1 << 5);
diff --git a/drivers/media/dvb-frontends/au8522_decoder.c b/drivers/media/dvb-frontends/au8522_decoder.c
index a2e771305008..343dc92ef54e 100644
--- a/drivers/media/dvb-frontends/au8522_decoder.c
+++ b/drivers/media/dvb-frontends/au8522_decoder.c
@@ -17,7 +17,6 @@
/* Developer notes:
*
- * VBI support is not yet working
* Enough is implemented here for CVBS and S-Video inputs, but the actual
* analog demodulator code isn't implemented (not needed for xc5000 since it
* has its own demodulator and outputs CVBS)
@@ -179,42 +178,6 @@ static inline struct au8522_state *to_state(struct v4l2_subdev *sd)
return container_of(sd, struct au8522_state, sd);
}
-static void setup_vbi(struct au8522_state *state, int aud_input)
-{
- int i;
-
- /* These are set to zero regardless of what mode we're in */
- au8522_writereg(state, AU8522_TVDEC_VBI_CTRL_H_REG017H, 0x00);
- au8522_writereg(state, AU8522_TVDEC_VBI_CTRL_L_REG018H, 0x00);
- au8522_writereg(state, AU8522_TVDEC_VBI_USER_TOTAL_BITS_REG019H, 0x00);
- au8522_writereg(state, AU8522_TVDEC_VBI_USER_TUNIT_H_REG01AH, 0x00);
- au8522_writereg(state, AU8522_TVDEC_VBI_USER_TUNIT_L_REG01BH, 0x00);
- au8522_writereg(state, AU8522_TVDEC_VBI_USER_THRESH1_REG01CH, 0x00);
- au8522_writereg(state, AU8522_TVDEC_VBI_USER_FRAME_PAT2_REG01EH, 0x00);
- au8522_writereg(state, AU8522_TVDEC_VBI_USER_FRAME_PAT1_REG01FH, 0x00);
- au8522_writereg(state, AU8522_TVDEC_VBI_USER_FRAME_PAT0_REG020H, 0x00);
- au8522_writereg(state, AU8522_TVDEC_VBI_USER_FRAME_MASK2_REG021H,
- 0x00);
- au8522_writereg(state, AU8522_TVDEC_VBI_USER_FRAME_MASK1_REG022H,
- 0x00);
- au8522_writereg(state, AU8522_TVDEC_VBI_USER_FRAME_MASK0_REG023H,
- 0x00);
-
- /* Setup the VBI registers */
- for (i = 0x30; i < 0x60; i++)
- au8522_writereg(state, i, 0x40);
-
- /* For some reason, every register is 0x40 except register 0x44
- (confirmed via the HVR-950q USB capture) */
- au8522_writereg(state, 0x44, 0x60);
-
- /* Enable VBI (we always do this regardless of whether the user is
- viewing closed caption info) */
- au8522_writereg(state, AU8522_TVDEC_VBI_CTRL_H_REG017H,
- AU8522_TVDEC_VBI_CTRL_H_REG017H_CCON);
-
-}
-
static void setup_decoder_defaults(struct au8522_state *state, bool is_svideo)
{
int i;
@@ -317,8 +280,6 @@ static void setup_decoder_defaults(struct au8522_state *state, bool is_svideo)
AU8522_TOREGAAGC_REG0E5H_CVBS);
au8522_writereg(state, AU8522_REG016H, AU8522_REG016H_CVBS);
- setup_vbi(state, 0);
-
if (is_svideo) {
/* Despite what the table says, for the HVR-950q we still need
to be in CVBS mode for the S-Video input (reason unknown). */
@@ -456,30 +417,29 @@ static void set_audio_input(struct au8522_state *state)
lpfilter_coef[i].reg_val[0]);
}
- /* Setup audio */
- au8522_writereg(state, AU8522_AUDIO_VOLUME_L_REG0F2H, 0x00);
- au8522_writereg(state, AU8522_AUDIO_VOLUME_R_REG0F3H, 0x00);
- au8522_writereg(state, AU8522_AUDIO_VOLUME_REG0F4H, 0x00);
- au8522_writereg(state, AU8522_I2C_CONTROL_REG1_REG091H, 0x80);
- au8522_writereg(state, AU8522_I2C_CONTROL_REG0_REG090H, 0x84);
- msleep(150);
- au8522_writereg(state, AU8522_SYSTEM_MODULE_CONTROL_0_REG0A4H, 0x00);
- msleep(10);
- au8522_writereg(state, AU8522_SYSTEM_MODULE_CONTROL_0_REG0A4H,
- AU8522_SYSTEM_MODULE_CONTROL_0_REG0A4H_CVBS);
- msleep(50);
+ /* Set the volume */
au8522_writereg(state, AU8522_AUDIO_VOLUME_L_REG0F2H, 0x7F);
au8522_writereg(state, AU8522_AUDIO_VOLUME_R_REG0F3H, 0x7F);
au8522_writereg(state, AU8522_AUDIO_VOLUME_REG0F4H, 0xff);
- msleep(80);
- au8522_writereg(state, AU8522_AUDIO_VOLUME_L_REG0F2H, 0x7F);
- au8522_writereg(state, AU8522_AUDIO_VOLUME_R_REG0F3H, 0x7F);
+
+ /* Not sure what this does */
au8522_writereg(state, AU8522_REG0F9H, AU8522_REG0F9H_AUDIO);
+
+ /* Setup the audio mode to stereo DBX */
au8522_writereg(state, AU8522_AUDIO_MODE_REG0F1H, 0x82);
msleep(70);
- au8522_writereg(state, AU8522_SYSTEM_MODULE_CONTROL_1_REG0A5H, 0x09);
+
+ /* Start the audio processing module */
+ au8522_writereg(state, AU8522_SYSTEM_MODULE_CONTROL_0_REG0A4H, 0x9d);
+
+ /* Set the audio frequency to 48 KHz */
au8522_writereg(state, AU8522_AUDIOFREQ_REG606H, 0x03);
+
+ /* Set the I2S parameters (WS, LSB, mode, sample rate */
au8522_writereg(state, AU8522_I2S_CTRL_2_REG112H, 0xc2);
+
+ /* Enable the I2S output */
+ au8522_writereg(state, AU8522_SYSTEM_MODULE_CONTROL_1_REG0A5H, 0x09);
}
/* ----------------------------------------------------------------------- */
@@ -663,10 +623,12 @@ static int au8522_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
int val = 0;
struct au8522_state *state = to_state(sd);
u8 lock_status;
+ u8 pll_status;
/* Interrogate the decoder to see if we are getting a real signal */
lock_status = au8522_readreg(state, 0x00);
- if (lock_status == 0xa2)
+ pll_status = au8522_readreg(state, 0x7e);
+ if ((lock_status == 0xa2) && (pll_status & 0x10))
vt->signal = 0xffff;
else
vt->signal = 0x00;
diff --git a/drivers/media/dvb-frontends/au8522_dig.c b/drivers/media/dvb-frontends/au8522_dig.c
index 7ed326e43fc4..3f3635f5a06a 100644
--- a/drivers/media/dvb-frontends/au8522_dig.c
+++ b/drivers/media/dvb-frontends/au8522_dig.c
@@ -271,9 +271,9 @@ static int au8522_set_if(struct dvb_frontend *fe, enum au8522_if_freq if_freq)
return -EINVAL;
}
dprintk("%s() %s MHz\n", __func__, ifmhz);
- au8522_writereg(state, 0x80b5, r0b5);
- au8522_writereg(state, 0x80b6, r0b6);
- au8522_writereg(state, 0x80b7, r0b7);
+ au8522_writereg(state, 0x00b5, r0b5);
+ au8522_writereg(state, 0x00b6, r0b6);
+ au8522_writereg(state, 0x00b7, r0b7);
return 0;
}
@@ -283,33 +283,32 @@ static struct {
u16 reg;
u16 data;
} VSB_mod_tab[] = {
- { 0x8090, 0x84 },
- { 0x4092, 0x11 },
+ { 0x0090, 0x84 },
{ 0x2005, 0x00 },
- { 0x8091, 0x80 },
- { 0x80a3, 0x0c },
- { 0x80a4, 0xe8 },
- { 0x8081, 0xc4 },
- { 0x80a5, 0x40 },
- { 0x80a7, 0x40 },
- { 0x80a6, 0x67 },
- { 0x8262, 0x20 },
- { 0x821c, 0x30 },
- { 0x80d8, 0x1a },
- { 0x8227, 0xa0 },
- { 0x8121, 0xff },
- { 0x80a8, 0xf0 },
- { 0x80a9, 0x05 },
- { 0x80aa, 0x77 },
- { 0x80ab, 0xf0 },
- { 0x80ac, 0x05 },
- { 0x80ad, 0x77 },
- { 0x80ae, 0x41 },
- { 0x80af, 0x66 },
- { 0x821b, 0xcc },
- { 0x821d, 0x80 },
- { 0x80a4, 0xe8 },
- { 0x8231, 0x13 },
+ { 0x0091, 0x80 },
+ { 0x00a3, 0x0c },
+ { 0x00a4, 0xe8 },
+ { 0x0081, 0xc4 },
+ { 0x00a5, 0x40 },
+ { 0x00a7, 0x40 },
+ { 0x00a6, 0x67 },
+ { 0x0262, 0x20 },
+ { 0x021c, 0x30 },
+ { 0x00d8, 0x1a },
+ { 0x0227, 0xa0 },
+ { 0x0121, 0xff },
+ { 0x00a8, 0xf0 },
+ { 0x00a9, 0x05 },
+ { 0x00aa, 0x77 },
+ { 0x00ab, 0xf0 },
+ { 0x00ac, 0x05 },
+ { 0x00ad, 0x77 },
+ { 0x00ae, 0x41 },
+ { 0x00af, 0x66 },
+ { 0x021b, 0xcc },
+ { 0x021d, 0x80 },
+ { 0x00a4, 0xe8 },
+ { 0x0231, 0x13 },
};
/* QAM64 Modulation table */
@@ -396,78 +395,78 @@ static struct {
u16 reg;
u16 data;
} QAM256_mod_tab[] = {
- { 0x80a3, 0x09 },
- { 0x80a4, 0x00 },
- { 0x8081, 0xc4 },
- { 0x80a5, 0x40 },
- { 0x80aa, 0x77 },
- { 0x80ad, 0x77 },
- { 0x80a6, 0x67 },
- { 0x8262, 0x20 },
- { 0x821c, 0x30 },
- { 0x80b8, 0x3e },
- { 0x80b9, 0xf0 },
- { 0x80ba, 0x01 },
- { 0x80bb, 0x18 },
- { 0x80bc, 0x50 },
- { 0x80bd, 0x00 },
- { 0x80be, 0xea },
- { 0x80bf, 0xef },
- { 0x80c0, 0xfc },
- { 0x80c1, 0xbd },
- { 0x80c2, 0x1f },
- { 0x80c3, 0xfc },
- { 0x80c4, 0xdd },
- { 0x80c5, 0xaf },
- { 0x80c6, 0x00 },
- { 0x80c7, 0x38 },
- { 0x80c8, 0x30 },
- { 0x80c9, 0x05 },
- { 0x80ca, 0x4a },
- { 0x80cb, 0xd0 },
- { 0x80cc, 0x01 },
- { 0x80cd, 0xd9 },
- { 0x80ce, 0x6f },
- { 0x80cf, 0xf9 },
- { 0x80d0, 0x70 },
- { 0x80d1, 0xdf },
- { 0x80d2, 0xf7 },
- { 0x80d3, 0xc2 },
- { 0x80d4, 0xdf },
- { 0x80d5, 0x02 },
- { 0x80d6, 0x9a },
- { 0x80d7, 0xd0 },
- { 0x8250, 0x0d },
- { 0x8251, 0xcd },
- { 0x8252, 0xe0 },
- { 0x8253, 0x05 },
- { 0x8254, 0xa7 },
- { 0x8255, 0xff },
- { 0x8256, 0xed },
- { 0x8257, 0x5b },
- { 0x8258, 0xae },
- { 0x8259, 0xe6 },
- { 0x825a, 0x3d },
- { 0x825b, 0x0f },
- { 0x825c, 0x0d },
- { 0x825d, 0xea },
- { 0x825e, 0xf2 },
- { 0x825f, 0x51 },
- { 0x8260, 0xf5 },
- { 0x8261, 0x06 },
- { 0x821a, 0x00 },
- { 0x8546, 0x40 },
- { 0x8210, 0x26 },
- { 0x8211, 0xf6 },
- { 0x8212, 0x84 },
- { 0x8213, 0x02 },
- { 0x8502, 0x01 },
- { 0x8121, 0x04 },
- { 0x8122, 0x04 },
- { 0x852e, 0x10 },
- { 0x80a4, 0xca },
- { 0x80a7, 0x40 },
- { 0x8526, 0x01 },
+ { 0x00a3, 0x09 },
+ { 0x00a4, 0x00 },
+ { 0x0081, 0xc4 },
+ { 0x00a5, 0x40 },
+ { 0x00aa, 0x77 },
+ { 0x00ad, 0x77 },
+ { 0x00a6, 0x67 },
+ { 0x0262, 0x20 },
+ { 0x021c, 0x30 },
+ { 0x00b8, 0x3e },
+ { 0x00b9, 0xf0 },
+ { 0x00ba, 0x01 },
+ { 0x00bb, 0x18 },
+ { 0x00bc, 0x50 },
+ { 0x00bd, 0x00 },
+ { 0x00be, 0xea },
+ { 0x00bf, 0xef },
+ { 0x00c0, 0xfc },
+ { 0x00c1, 0xbd },
+ { 0x00c2, 0x1f },
+ { 0x00c3, 0xfc },
+ { 0x00c4, 0xdd },
+ { 0x00c5, 0xaf },
+ { 0x00c6, 0x00 },
+ { 0x00c7, 0x38 },
+ { 0x00c8, 0x30 },
+ { 0x00c9, 0x05 },
+ { 0x00ca, 0x4a },
+ { 0x00cb, 0xd0 },
+ { 0x00cc, 0x01 },
+ { 0x00cd, 0xd9 },
+ { 0x00ce, 0x6f },
+ { 0x00cf, 0xf9 },
+ { 0x00d0, 0x70 },
+ { 0x00d1, 0xdf },
+ { 0x00d2, 0xf7 },
+ { 0x00d3, 0xc2 },
+ { 0x00d4, 0xdf },
+ { 0x00d5, 0x02 },
+ { 0x00d6, 0x9a },
+ { 0x00d7, 0xd0 },
+ { 0x0250, 0x0d },
+ { 0x0251, 0xcd },
+ { 0x0252, 0xe0 },
+ { 0x0253, 0x05 },
+ { 0x0254, 0xa7 },
+ { 0x0255, 0xff },
+ { 0x0256, 0xed },
+ { 0x0257, 0x5b },
+ { 0x0258, 0xae },
+ { 0x0259, 0xe6 },
+ { 0x025a, 0x3d },
+ { 0x025b, 0x0f },
+ { 0x025c, 0x0d },
+ { 0x025d, 0xea },
+ { 0x025e, 0xf2 },
+ { 0x025f, 0x51 },
+ { 0x0260, 0xf5 },
+ { 0x0261, 0x06 },
+ { 0x021a, 0x00 },
+ { 0x0546, 0x40 },
+ { 0x0210, 0x26 },
+ { 0x0211, 0xf6 },
+ { 0x0212, 0x84 },
+ { 0x0213, 0x02 },
+ { 0x0502, 0x01 },
+ { 0x0121, 0x04 },
+ { 0x0122, 0x04 },
+ { 0x052e, 0x10 },
+ { 0x00a4, 0xca },
+ { 0x00a7, 0x40 },
+ { 0x0526, 0x01 },
};
static struct {
@@ -654,12 +653,12 @@ static int au8522_read_status(struct dvb_frontend *fe, enum fe_status *status)
if (state->current_modulation == VSB_8) {
dprintk("%s() Checking VSB_8\n", __func__);
- reg = au8522_readreg(state, 0x4088);
+ reg = au8522_readreg(state, 0x0088);
if ((reg & 0x03) == 0x03)
*status |= FE_HAS_LOCK | FE_HAS_SYNC | FE_HAS_VITERBI;
} else {
dprintk("%s() Checking QAM\n", __func__);
- reg = au8522_readreg(state, 0x4541);
+ reg = au8522_readreg(state, 0x0541);
if (reg & 0x80)
*status |= FE_HAS_VITERBI;
if (reg & 0x20)
@@ -745,17 +744,17 @@ static int au8522_read_snr(struct dvb_frontend *fe, u16 *snr)
if (state->current_modulation == QAM_256)
ret = au8522_mse2snr_lookup(qam256_mse2snr_tab,
ARRAY_SIZE(qam256_mse2snr_tab),
- au8522_readreg(state, 0x4522),
+ au8522_readreg(state, 0x0522),
snr);
else if (state->current_modulation == QAM_64)
ret = au8522_mse2snr_lookup(qam64_mse2snr_tab,
ARRAY_SIZE(qam64_mse2snr_tab),
- au8522_readreg(state, 0x4522),
+ au8522_readreg(state, 0x0522),
snr);
else /* VSB_8 */
ret = au8522_mse2snr_lookup(vsb_mse2snr_tab,
ARRAY_SIZE(vsb_mse2snr_tab),
- au8522_readreg(state, 0x4311),
+ au8522_readreg(state, 0x0311),
snr);
if (state->config.led_cfg)
@@ -804,9 +803,9 @@ static int au8522_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
struct au8522_state *state = fe->demodulator_priv;
if (state->current_modulation == VSB_8)
- *ucblocks = au8522_readreg(state, 0x4087);
+ *ucblocks = au8522_readreg(state, 0x0087);
else
- *ucblocks = au8522_readreg(state, 0x4543);
+ *ucblocks = au8522_readreg(state, 0x0543);
return 0;
}
diff --git a/drivers/media/dvb-frontends/bcm3510.c b/drivers/media/dvb-frontends/bcm3510.c
index 617c5e29f919..ba63ad170d3c 100644
--- a/drivers/media/dvb-frontends/bcm3510.c
+++ b/drivers/media/dvb-frontends/bcm3510.c
@@ -538,6 +538,7 @@ static int bcm3510_set_frontend(struct dvb_frontend *fe)
cmd.ACQUIRE0.MODE = 0x9;
cmd.ACQUIRE1.SYM_RATE = 0x0;
cmd.ACQUIRE1.IF_FREQ = 0x0;
+ break;
default:
return -EINVAL;
}
@@ -772,7 +773,8 @@ static int bcm3510_init(struct dvb_frontend* fe)
deb_info("attempting to download firmware\n");
if ((ret = bcm3510_init_cold(st)) < 0)
return ret;
- case JDEC_EEPROM_LOAD_WAIT: /* fall-through is wanted */
+ /* fall-through */
+ case JDEC_EEPROM_LOAD_WAIT:
deb_info("firmware is loaded\n");
bcm3510_check_firmware_version(st);
break;
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index ce37dc2e89c7..08f67d60a7d9 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -38,6 +38,8 @@
#define MAX_WRITE_REGSIZE 16
#define LOG2_E_100X 144
+#define INTLOG10X100(x) ((u32) (((u64) intlog10(x) * 100) >> 24))
+
/* DVB-C constellation */
enum sony_dvbc_constellation_t {
SONY_DVBC_CONSTELLATION_16QAM,
@@ -65,6 +67,7 @@ struct cxd2841er_priv {
u8 system;
enum cxd2841er_xtal xtal;
enum fe_caps caps;
+ u32 flags;
};
static const struct cxd2841er_cnr_data s_cn_data[] = {
@@ -201,11 +204,6 @@ static const struct cxd2841er_cnr_data s2_cn_data[] = {
{ 0x0016, 19700 }, { 0x0015, 19900 }, { 0x0014, 20000 },
};
-#define MAKE_IFFREQ_CONFIG(iffreq) ((u32)(((iffreq)/41.0)*16777216.0 + 0.5))
-#define MAKE_IFFREQ_CONFIG_XTAL(xtal, iffreq) ((xtal == SONY_XTAL_24000) ? \
- (u32)(((iffreq)/48.0)*16777216.0 + 0.5) : \
- (u32)(((iffreq)/41.0)*16777216.0 + 0.5))
-
static int cxd2841er_freeze_regs(struct cxd2841er_priv *priv);
static int cxd2841er_unfreeze_regs(struct cxd2841er_priv *priv);
@@ -214,10 +212,8 @@ static void cxd2841er_i2c_debug(struct cxd2841er_priv *priv,
const u8 *data, u32 len)
{
dev_dbg(&priv->i2c->dev,
- "cxd2841er: I2C %s addr %02x reg 0x%02x size %d\n",
- (write == 0 ? "read" : "write"), addr, reg, len);
- print_hex_dump_bytes("cxd2841er: I2C data: ",
- DUMP_PREFIX_OFFSET, data, len);
+ "cxd2841er: I2C %s addr %02x reg 0x%02x size %d data %*ph\n",
+ (write == 0 ? "read" : "write"), addr, reg, len, len, data);
}
static int cxd2841er_write_regs(struct cxd2841er_priv *priv,
@@ -284,17 +280,8 @@ static int cxd2841er_read_regs(struct cxd2841er_priv *priv,
}
};
- ret = i2c_transfer(priv->i2c, &msg[0], 1);
- if (ret >= 0 && ret != 1)
- ret = -EIO;
- if (ret < 0) {
- dev_warn(&priv->i2c->dev,
- "%s: i2c rw failed=%d addr=%02x reg=%02x\n",
- KBUILD_MODNAME, ret, i2c_addr, reg);
- return ret;
- }
- ret = i2c_transfer(priv->i2c, &msg[1], 1);
- if (ret >= 0 && ret != 1)
+ ret = i2c_transfer(priv->i2c, msg, 2);
+ if (ret >= 0 && ret != 2)
ret = -EIO;
if (ret < 0) {
dev_warn(&priv->i2c->dev,
@@ -327,6 +314,49 @@ static int cxd2841er_set_reg_bits(struct cxd2841er_priv *priv,
return cxd2841er_write_reg(priv, addr, reg, data);
}
+static u32 cxd2841er_calc_iffreq_xtal(enum cxd2841er_xtal xtal, u32 ifhz)
+{
+ u64 tmp;
+
+ tmp = (u64) ifhz * 16777216;
+ do_div(tmp, ((xtal == SONY_XTAL_24000) ? 48000000 : 41000000));
+
+ return (u32) tmp;
+}
+
+static u32 cxd2841er_calc_iffreq(u32 ifhz)
+{
+ return cxd2841er_calc_iffreq_xtal(SONY_XTAL_20500, ifhz);
+}
+
+static int cxd2841er_get_if_hz(struct cxd2841er_priv *priv, u32 def_hz)
+{
+ u32 hz;
+
+ if (priv->frontend.ops.tuner_ops.get_if_frequency
+ && (priv->flags & CXD2841ER_AUTO_IFHZ))
+ priv->frontend.ops.tuner_ops.get_if_frequency(
+ &priv->frontend, &hz);
+ else
+ hz = def_hz;
+
+ return hz;
+}
+
+static int cxd2841er_tuner_set(struct dvb_frontend *fe)
+{
+ struct cxd2841er_priv *priv = fe->demodulator_priv;
+
+ if ((priv->flags & CXD2841ER_USE_GATECTRL) && fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+ if (fe->ops.tuner_ops.set_params)
+ fe->ops.tuner_ops.set_params(fe);
+ if ((priv->flags & CXD2841ER_USE_GATECTRL) && fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 0);
+
+ return 0;
+}
+
static int cxd2841er_dvbs2_set_symbol_rate(struct cxd2841er_priv *priv,
u32 symbol_rate)
{
@@ -884,6 +914,18 @@ static void cxd2841er_set_ts_clock_mode(struct cxd2841er_priv *priv,
/*
* slave Bank Addr Bit default Name
+ * <SLV-T> 00h C4h [1:0] 2'b?? OSERCKMODE
+ */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xc4,
+ ((priv->flags & CXD2841ER_TS_SERIAL) ? 0x01 : 0x00), 0x03);
+ /*
+ * slave Bank Addr Bit default Name
+ * <SLV-T> 00h D1h [1:0] 2'b?? OSERDUTYMODE
+ */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xd1,
+ ((priv->flags & CXD2841ER_TS_SERIAL) ? 0x01 : 0x00), 0x03);
+ /*
+ * slave Bank Addr Bit default Name
* <SLV-T> 00h D9h [7:0] 8'h08 OTSCKPERIOD
*/
cxd2841er_write_reg(priv, I2C_SLVT, 0xd9, 0x08);
@@ -897,7 +939,8 @@ static void cxd2841er_set_ts_clock_mode(struct cxd2841er_priv *priv,
* slave Bank Addr Bit default Name
* <SLV-T> 00h 33h [1:0] 2'b01 OREG_CKSEL_TSIF
*/
- cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x33, 0x00, 0x03);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x33,
+ ((priv->flags & CXD2841ER_TS_SERIAL) ? 0x01 : 0x00), 0x03);
/*
* Enable TS IF Clock
* slave Bank Addr Bit default Name
@@ -1421,11 +1464,11 @@ static int cxd2841er_read_ber_i(struct cxd2841er_priv *priv,
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x60);
cxd2841er_read_regs(priv, I2C_SLVT, 0x5B, pktnum, sizeof(pktnum));
cxd2841er_read_regs(priv, I2C_SLVT, 0x16, data, sizeof(data));
+ cxd2841er_unfreeze_regs(priv);
if (!pktnum[0] && !pktnum[1]) {
dev_dbg(&priv->i2c->dev,
"%s(): no valid BER data\n", __func__);
- cxd2841er_unfreeze_regs(priv);
return -EINVAL;
}
@@ -1435,7 +1478,6 @@ static int cxd2841er_read_ber_i(struct cxd2841er_priv *priv,
dev_dbg(&priv->i2c->dev, "%s(): bit_error=%u bit_count=%u\n",
__func__, *bit_error, *bit_count);
- cxd2841er_unfreeze_regs(priv);
return 0;
}
@@ -1645,6 +1687,8 @@ static u32 cxd2841er_dvbs_read_snr(struct cxd2841er_priv *priv,
* <SLV-T> A1h 12h [7:0] ICPM_QUICKCNDT[7:0]
*/
cxd2841er_read_regs(priv, I2C_SLVT, 0x10, data, 3);
+ cxd2841er_unfreeze_regs(priv);
+
if (data[0] & 0x01) {
value = ((u32)(data[1] & 0x1F) << 8) | (u32)(data[2] & 0xFF);
min_index = 0;
@@ -1687,11 +1731,9 @@ static u32 cxd2841er_dvbs_read_snr(struct cxd2841er_priv *priv,
} else {
dev_dbg(&priv->i2c->dev,
"%s(): no data available\n", __func__);
- cxd2841er_unfreeze_regs(priv);
return -EINVAL;
}
done:
- cxd2841er_unfreeze_regs(priv);
*snr = res;
return 0;
}
@@ -1720,12 +1762,12 @@ static int cxd2841er_read_snr_c(struct cxd2841er_priv *priv, u32 *snr)
cxd2841er_read_regs(priv, I2C_SLVT, 0x19, data, 1);
qam = (enum sony_dvbc_constellation_t) (data[0] & 0x07);
cxd2841er_read_regs(priv, I2C_SLVT, 0x4C, data, 2);
+ cxd2841er_unfreeze_regs(priv);
reg = ((u32)(data[0]&0x1f) << 8) | (u32)data[1];
if (reg == 0) {
dev_dbg(&priv->i2c->dev,
"%s(): reg value out of range\n", __func__);
- cxd2841er_unfreeze_regs(priv);
return 0;
}
@@ -1746,11 +1788,9 @@ static int cxd2841er_read_snr_c(struct cxd2841er_priv *priv, u32 *snr)
*snr = -88 * (int32_t)sony_log(reg) + 86999;
break;
default:
- cxd2841er_unfreeze_regs(priv);
return -EINVAL;
}
- cxd2841er_unfreeze_regs(priv);
return 0;
}
@@ -1769,17 +1809,17 @@ static int cxd2841er_read_snr_t(struct cxd2841er_priv *priv, u32 *snr)
cxd2841er_freeze_regs(priv);
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
cxd2841er_read_regs(priv, I2C_SLVT, 0x28, data, sizeof(data));
+ cxd2841er_unfreeze_regs(priv);
+
reg = ((u32)data[0] << 8) | (u32)data[1];
if (reg == 0) {
dev_dbg(&priv->i2c->dev,
"%s(): reg value out of range\n", __func__);
- cxd2841er_unfreeze_regs(priv);
return 0;
}
if (reg > 4996)
reg = 4996;
- *snr = 10000 * ((intlog10(reg) - intlog10(5350 - reg)) >> 24) + 28500;
- cxd2841er_unfreeze_regs(priv);
+ *snr = 100 * ((INTLOG10X100(reg) - INTLOG10X100(5350 - reg)) + 285);
return 0;
}
@@ -1798,18 +1838,17 @@ static int cxd2841er_read_snr_t2(struct cxd2841er_priv *priv, u32 *snr)
cxd2841er_freeze_regs(priv);
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x20);
cxd2841er_read_regs(priv, I2C_SLVT, 0x28, data, sizeof(data));
+ cxd2841er_unfreeze_regs(priv);
+
reg = ((u32)data[0] << 8) | (u32)data[1];
if (reg == 0) {
dev_dbg(&priv->i2c->dev,
"%s(): reg value out of range\n", __func__);
- cxd2841er_unfreeze_regs(priv);
return 0;
}
if (reg > 10876)
reg = 10876;
- *snr = 10000 * ((intlog10(reg) -
- intlog10(12600 - reg)) >> 24) + 32000;
- cxd2841er_unfreeze_regs(priv);
+ *snr = 100 * ((INTLOG10X100(reg) - INTLOG10X100(12600 - reg)) + 320);
return 0;
}
@@ -1829,15 +1868,15 @@ static int cxd2841er_read_snr_i(struct cxd2841er_priv *priv, u32 *snr)
cxd2841er_freeze_regs(priv);
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x60);
cxd2841er_read_regs(priv, I2C_SLVT, 0x28, data, sizeof(data));
+ cxd2841er_unfreeze_regs(priv);
+
reg = ((u32)data[0] << 8) | (u32)data[1];
if (reg == 0) {
dev_dbg(&priv->i2c->dev,
"%s(): reg value out of range\n", __func__);
- cxd2841er_unfreeze_regs(priv);
return 0;
}
*snr = 10000 * (intlog10(reg) >> 24) - 9031;
- cxd2841er_unfreeze_regs(priv);
return 0;
}
@@ -2136,7 +2175,7 @@ static int cxd2841er_dvbt2_set_plp_config(struct cxd2841er_priv *priv,
static int cxd2841er_sleep_tc_to_active_t2_band(struct cxd2841er_priv *priv,
u32 bandwidth)
{
- u32 iffreq;
+ u32 iffreq, ifhz;
u8 data[MAX_WRITE_REGSIZE];
const uint8_t nominalRate8bw[3][5] = {
@@ -2239,10 +2278,12 @@ static int cxd2841er_sleep_tc_to_active_t2_band(struct cxd2841er_priv *priv,
/* Group delay equaliser settings for
* ASCOT2D, ASCOT2E and ASCOT3 tuners
*/
- cxd2841er_write_regs(priv, I2C_SLVT,
+ if (priv->flags & CXD2841ER_ASCOT)
+ cxd2841er_write_regs(priv, I2C_SLVT,
0xA6, itbCoef8bw[priv->xtal], 14);
/* <IF freq setting> */
- iffreq = MAKE_IFFREQ_CONFIG_XTAL(priv->xtal, 4.80);
+ ifhz = cxd2841er_get_if_hz(priv, 4800000);
+ iffreq = cxd2841er_calc_iffreq_xtal(priv->xtal, ifhz);
data[0] = (u8) ((iffreq >> 16) & 0xff);
data[1] = (u8)((iffreq >> 8) & 0xff);
data[2] = (u8)(iffreq & 0xff);
@@ -2267,10 +2308,12 @@ static int cxd2841er_sleep_tc_to_active_t2_band(struct cxd2841er_priv *priv,
/* Group delay equaliser settings for
* ASCOT2D, ASCOT2E and ASCOT3 tuners
*/
- cxd2841er_write_regs(priv, I2C_SLVT,
+ if (priv->flags & CXD2841ER_ASCOT)
+ cxd2841er_write_regs(priv, I2C_SLVT,
0xA6, itbCoef7bw[priv->xtal], 14);
/* <IF freq setting> */
- iffreq = MAKE_IFFREQ_CONFIG_XTAL(priv->xtal, 4.20);
+ ifhz = cxd2841er_get_if_hz(priv, 4200000);
+ iffreq = cxd2841er_calc_iffreq_xtal(priv->xtal, ifhz);
data[0] = (u8) ((iffreq >> 16) & 0xff);
data[1] = (u8)((iffreq >> 8) & 0xff);
data[2] = (u8)(iffreq & 0xff);
@@ -2295,10 +2338,12 @@ static int cxd2841er_sleep_tc_to_active_t2_band(struct cxd2841er_priv *priv,
/* Group delay equaliser settings for
* ASCOT2D, ASCOT2E and ASCOT3 tuners
*/
- cxd2841er_write_regs(priv, I2C_SLVT,
+ if (priv->flags & CXD2841ER_ASCOT)
+ cxd2841er_write_regs(priv, I2C_SLVT,
0xA6, itbCoef6bw[priv->xtal], 14);
/* <IF freq setting> */
- iffreq = MAKE_IFFREQ_CONFIG_XTAL(priv->xtal, 3.60);
+ ifhz = cxd2841er_get_if_hz(priv, 3600000);
+ iffreq = cxd2841er_calc_iffreq_xtal(priv->xtal, ifhz);
data[0] = (u8) ((iffreq >> 16) & 0xff);
data[1] = (u8)((iffreq >> 8) & 0xff);
data[2] = (u8)(iffreq & 0xff);
@@ -2323,10 +2368,12 @@ static int cxd2841er_sleep_tc_to_active_t2_band(struct cxd2841er_priv *priv,
/* Group delay equaliser settings for
* ASCOT2D, ASCOT2E and ASCOT3 tuners
*/
- cxd2841er_write_regs(priv, I2C_SLVT,
+ if (priv->flags & CXD2841ER_ASCOT)
+ cxd2841er_write_regs(priv, I2C_SLVT,
0xA6, itbCoef5bw[priv->xtal], 14);
/* <IF freq setting> */
- iffreq = MAKE_IFFREQ_CONFIG_XTAL(priv->xtal, 3.60);
+ ifhz = cxd2841er_get_if_hz(priv, 3600000);
+ iffreq = cxd2841er_calc_iffreq_xtal(priv->xtal, ifhz);
data[0] = (u8) ((iffreq >> 16) & 0xff);
data[1] = (u8)((iffreq >> 8) & 0xff);
data[2] = (u8)(iffreq & 0xff);
@@ -2351,10 +2398,12 @@ static int cxd2841er_sleep_tc_to_active_t2_band(struct cxd2841er_priv *priv,
/* Group delay equaliser settings for
* ASCOT2D, ASCOT2E and ASCOT3 tuners
*/
- cxd2841er_write_regs(priv, I2C_SLVT,
+ if (priv->flags & CXD2841ER_ASCOT)
+ cxd2841er_write_regs(priv, I2C_SLVT,
0xA6, itbCoef17bw[priv->xtal], 14);
/* <IF freq setting> */
- iffreq = MAKE_IFFREQ_CONFIG_XTAL(priv->xtal, 3.50);
+ ifhz = cxd2841er_get_if_hz(priv, 3500000);
+ iffreq = cxd2841er_calc_iffreq_xtal(priv->xtal, ifhz);
data[0] = (u8) ((iffreq >> 16) & 0xff);
data[1] = (u8)((iffreq >> 8) & 0xff);
data[2] = (u8)(iffreq & 0xff);
@@ -2373,7 +2422,7 @@ static int cxd2841er_sleep_tc_to_active_t_band(
struct cxd2841er_priv *priv, u32 bandwidth)
{
u8 data[MAX_WRITE_REGSIZE];
- u32 iffreq;
+ u32 iffreq, ifhz;
u8 nominalRate8bw[3][5] = {
/* TRCG Nominal Rate [37:0] */
{0x11, 0xF0, 0x00, 0x00, 0x00}, /* 20.5MHz XTal */
@@ -2450,10 +2499,12 @@ static int cxd2841er_sleep_tc_to_active_t_band(
/* Group delay equaliser settings for
* ASCOT2D, ASCOT2E and ASCOT3 tuners
*/
- cxd2841er_write_regs(priv, I2C_SLVT,
+ if (priv->flags & CXD2841ER_ASCOT)
+ cxd2841er_write_regs(priv, I2C_SLVT,
0xA6, itbCoef8bw[priv->xtal], 14);
/* <IF freq setting> */
- iffreq = MAKE_IFFREQ_CONFIG_XTAL(priv->xtal, 4.80);
+ ifhz = cxd2841er_get_if_hz(priv, 4800000);
+ iffreq = cxd2841er_calc_iffreq_xtal(priv->xtal, ifhz);
data[0] = (u8) ((iffreq >> 16) & 0xff);
data[1] = (u8)((iffreq >> 8) & 0xff);
data[2] = (u8)(iffreq & 0xff);
@@ -2485,10 +2536,12 @@ static int cxd2841er_sleep_tc_to_active_t_band(
/* Group delay equaliser settings for
* ASCOT2D, ASCOT2E and ASCOT3 tuners
*/
- cxd2841er_write_regs(priv, I2C_SLVT,
+ if (priv->flags & CXD2841ER_ASCOT)
+ cxd2841er_write_regs(priv, I2C_SLVT,
0xA6, itbCoef7bw[priv->xtal], 14);
/* <IF freq setting> */
- iffreq = MAKE_IFFREQ_CONFIG_XTAL(priv->xtal, 4.20);
+ ifhz = cxd2841er_get_if_hz(priv, 4200000);
+ iffreq = cxd2841er_calc_iffreq_xtal(priv->xtal, ifhz);
data[0] = (u8) ((iffreq >> 16) & 0xff);
data[1] = (u8)((iffreq >> 8) & 0xff);
data[2] = (u8)(iffreq & 0xff);
@@ -2520,10 +2573,12 @@ static int cxd2841er_sleep_tc_to_active_t_band(
/* Group delay equaliser settings for
* ASCOT2D, ASCOT2E and ASCOT3 tuners
*/
- cxd2841er_write_regs(priv, I2C_SLVT,
+ if (priv->flags & CXD2841ER_ASCOT)
+ cxd2841er_write_regs(priv, I2C_SLVT,
0xA6, itbCoef6bw[priv->xtal], 14);
/* <IF freq setting> */
- iffreq = MAKE_IFFREQ_CONFIG_XTAL(priv->xtal, 3.60);
+ ifhz = cxd2841er_get_if_hz(priv, 3600000);
+ iffreq = cxd2841er_calc_iffreq_xtal(priv->xtal, ifhz);
data[0] = (u8) ((iffreq >> 16) & 0xff);
data[1] = (u8)((iffreq >> 8) & 0xff);
data[2] = (u8)(iffreq & 0xff);
@@ -2555,10 +2610,12 @@ static int cxd2841er_sleep_tc_to_active_t_band(
/* Group delay equaliser settings for
* ASCOT2D, ASCOT2E and ASCOT3 tuners
*/
- cxd2841er_write_regs(priv, I2C_SLVT,
+ if (priv->flags & CXD2841ER_ASCOT)
+ cxd2841er_write_regs(priv, I2C_SLVT,
0xA6, itbCoef5bw[priv->xtal], 14);
/* <IF freq setting> */
- iffreq = MAKE_IFFREQ_CONFIG_XTAL(priv->xtal, 3.60);
+ ifhz = cxd2841er_get_if_hz(priv, 3600000);
+ iffreq = cxd2841er_calc_iffreq_xtal(priv->xtal, ifhz);
data[0] = (u8) ((iffreq >> 16) & 0xff);
data[1] = (u8)((iffreq >> 8) & 0xff);
data[2] = (u8)(iffreq & 0xff);
@@ -2591,7 +2648,7 @@ static int cxd2841er_sleep_tc_to_active_t_band(
static int cxd2841er_sleep_tc_to_active_i_band(
struct cxd2841er_priv *priv, u32 bandwidth)
{
- u32 iffreq;
+ u32 iffreq, ifhz;
u8 data[3];
/* TRCG Nominal Rate */
@@ -2656,11 +2713,13 @@ static int cxd2841er_sleep_tc_to_active_i_band(
cxd2841er_write_regs(priv, I2C_SLVT,
0x9F, nominalRate8bw[priv->xtal], 5);
/* Group delay equaliser settings for ASCOT tuners optimized */
- cxd2841er_write_regs(priv, I2C_SLVT,
+ if (priv->flags & CXD2841ER_ASCOT)
+ cxd2841er_write_regs(priv, I2C_SLVT,
0xA6, itbCoef8bw[priv->xtal], 14);
/* IF freq setting */
- iffreq = MAKE_IFFREQ_CONFIG_XTAL(priv->xtal, 4.75);
+ ifhz = cxd2841er_get_if_hz(priv, 4750000);
+ iffreq = cxd2841er_calc_iffreq_xtal(priv->xtal, ifhz);
data[0] = (u8) ((iffreq >> 16) & 0xff);
data[1] = (u8)((iffreq >> 8) & 0xff);
data[2] = (u8)(iffreq & 0xff);
@@ -2685,11 +2744,13 @@ static int cxd2841er_sleep_tc_to_active_i_band(
cxd2841er_write_regs(priv, I2C_SLVT,
0x9F, nominalRate7bw[priv->xtal], 5);
/* Group delay equaliser settings for ASCOT tuners optimized */
- cxd2841er_write_regs(priv, I2C_SLVT,
+ if (priv->flags & CXD2841ER_ASCOT)
+ cxd2841er_write_regs(priv, I2C_SLVT,
0xA6, itbCoef7bw[priv->xtal], 14);
/* IF freq setting */
- iffreq = MAKE_IFFREQ_CONFIG_XTAL(priv->xtal, 4.15);
+ ifhz = cxd2841er_get_if_hz(priv, 4150000);
+ iffreq = cxd2841er_calc_iffreq_xtal(priv->xtal, ifhz);
data[0] = (u8) ((iffreq >> 16) & 0xff);
data[1] = (u8)((iffreq >> 8) & 0xff);
data[2] = (u8)(iffreq & 0xff);
@@ -2714,11 +2775,13 @@ static int cxd2841er_sleep_tc_to_active_i_band(
cxd2841er_write_regs(priv, I2C_SLVT,
0x9F, nominalRate6bw[priv->xtal], 5);
/* Group delay equaliser settings for ASCOT tuners optimized */
- cxd2841er_write_regs(priv, I2C_SLVT,
+ if (priv->flags & CXD2841ER_ASCOT)
+ cxd2841er_write_regs(priv, I2C_SLVT,
0xA6, itbCoef6bw[priv->xtal], 14);
/* IF freq setting */
- iffreq = MAKE_IFFREQ_CONFIG_XTAL(priv->xtal, 3.55);
+ ifhz = cxd2841er_get_if_hz(priv, 3550000);
+ iffreq = cxd2841er_calc_iffreq_xtal(priv->xtal, ifhz);
data[0] = (u8) ((iffreq >> 16) & 0xff);
data[1] = (u8)((iffreq >> 8) & 0xff);
data[2] = (u8)(iffreq & 0xff);
@@ -2761,7 +2824,7 @@ static int cxd2841er_sleep_tc_to_active_c_band(struct cxd2841er_priv *priv,
0x27, 0xA7, 0x28, 0xB3, 0x02, 0xF0, 0x01, 0xE8,
0x00, 0xCF, 0x00, 0xE6, 0x23, 0xA4 };
u8 b10_b6[3];
- u32 iffreq;
+ u32 iffreq, ifhz;
if (bandwidth != 6000000 &&
bandwidth != 7000000 &&
@@ -2776,16 +2839,20 @@ static int cxd2841er_sleep_tc_to_active_c_band(struct cxd2841er_priv *priv,
switch (bandwidth) {
case 8000000:
case 7000000:
- cxd2841er_write_regs(
- priv, I2C_SLVT, 0xa6,
- bw7_8mhz_b10_a6, sizeof(bw7_8mhz_b10_a6));
- iffreq = MAKE_IFFREQ_CONFIG(4.9);
+ if (priv->flags & CXD2841ER_ASCOT)
+ cxd2841er_write_regs(
+ priv, I2C_SLVT, 0xa6,
+ bw7_8mhz_b10_a6, sizeof(bw7_8mhz_b10_a6));
+ ifhz = cxd2841er_get_if_hz(priv, 4900000);
+ iffreq = cxd2841er_calc_iffreq(ifhz);
break;
case 6000000:
- cxd2841er_write_regs(
- priv, I2C_SLVT, 0xa6,
- bw6mhz_b10_a6, sizeof(bw6mhz_b10_a6));
- iffreq = MAKE_IFFREQ_CONFIG(3.7);
+ if (priv->flags & CXD2841ER_ASCOT)
+ cxd2841er_write_regs(
+ priv, I2C_SLVT, 0xa6,
+ bw6mhz_b10_a6, sizeof(bw6mhz_b10_a6));
+ ifhz = cxd2841er_get_if_hz(priv, 3700000);
+ iffreq = cxd2841er_calc_iffreq(ifhz);
break;
default:
dev_err(&priv->i2c->dev, "%s(): unsupported bandwidth %d\n",
@@ -2872,8 +2939,9 @@ static int cxd2841er_sleep_tc_to_active_t(struct cxd2841er_priv *priv,
cxd2841er_write_reg(priv, I2C_SLVT, 0x6a, 0x50);
/* Set SLV-T Bank : 0x10 */
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
- /* ASCOT setting ON */
- cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xa5, 0x01, 0x01);
+ /* ASCOT setting */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xa5,
+ ((priv->flags & CXD2841ER_ASCOT) ? 0x01 : 0x00), 0x01);
/* Set SLV-T Bank : 0x18 */
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x18);
/* Pre-RS BER moniter setting */
@@ -2950,8 +3018,9 @@ static int cxd2841er_sleep_tc_to_active_t2(struct cxd2841er_priv *priv,
cxd2841er_write_reg(priv, I2C_SLVT, 0x6a, 0x50);
/* Set SLV-T Bank : 0x10 */
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
- /* ASCOT setting ON */
- cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xa5, 0x01, 0x01);
+ /* ASCOT setting */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xa5,
+ ((priv->flags & CXD2841ER_ASCOT) ? 0x01 : 0x00), 0x01);
/* Set SLV-T Bank : 0x20 */
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x20);
/* Acquisition optimization setting */
@@ -3088,8 +3157,9 @@ static int cxd2841er_sleep_tc_to_active_i(struct cxd2841er_priv *priv,
cxd2841er_write_regs(priv, I2C_SLVT, 0x43, data, 2);
/* Enable ADC 4 */
cxd2841er_write_reg(priv, I2C_SLVX, 0x18, 0x00);
- /* ASCOT setting ON */
- cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xa5, 0x01, 0x01);
+ /* ASCOT setting */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xa5,
+ ((priv->flags & CXD2841ER_ASCOT) ? 0x01 : 0x00), 0x01);
/* FEC Auto Recovery setting */
cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x30, 0x01, 0x01);
cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x31, 0x00, 0x01);
@@ -3173,8 +3243,9 @@ static int cxd2841er_sleep_tc_to_active_c(struct cxd2841er_priv *priv,
cxd2841er_write_reg(priv, I2C_SLVT, 0x6a, 0x48);
/* Set SLV-T Bank : 0x10 */
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
- /* ASCOT setting ON */
- cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xa5, 0x01, 0x01);
+ /* ASCOT setting */
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xa5,
+ ((priv->flags & CXD2841ER_ASCOT) ? 0x01 : 0x00), 0x01);
/* Set SLV-T Bank : 0x40 */
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x40);
/* Demod setting */
@@ -3236,6 +3307,10 @@ static int cxd2841er_set_frontend_s(struct dvb_frontend *fe)
__func__,
(p->delivery_system == SYS_DVBS ? "DVB-S" : "DVB-S2"),
p->frequency, symbol_rate, priv->xtal);
+
+ if (priv->flags & CXD2841ER_EARLY_TUNE)
+ cxd2841er_tuner_set(fe);
+
switch (priv->state) {
case STATE_SLEEP_S:
ret = cxd2841er_sleep_s_to_active_s(
@@ -3254,12 +3329,10 @@ static int cxd2841er_set_frontend_s(struct dvb_frontend *fe)
dev_dbg(&priv->i2c->dev, "%s(): tune failed\n", __func__);
goto done;
}
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
- if (fe->ops.tuner_ops.set_params)
- fe->ops.tuner_ops.set_params(fe);
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0);
+
+ if (!(priv->flags & CXD2841ER_EARLY_TUNE))
+ cxd2841er_tuner_set(fe);
+
cxd2841er_tune_done(priv);
timeout = ((3000000 + (symbol_rate - 1)) / symbol_rate) + 150;
for (i = 0; i < timeout / CXD2841ER_DVBS_POLLING_INVL; i++) {
@@ -3298,6 +3371,10 @@ static int cxd2841er_set_frontend_tc(struct dvb_frontend *fe)
dev_dbg(&priv->i2c->dev, "%s() delivery_system=%d bandwidth_hz=%d\n",
__func__, p->delivery_system, p->bandwidth_hz);
+
+ if (priv->flags & CXD2841ER_EARLY_TUNE)
+ cxd2841er_tuner_set(fe);
+
if (p->delivery_system == SYS_DVBT) {
priv->system = SYS_DVBT;
switch (priv->state) {
@@ -3379,13 +3456,15 @@ static int cxd2841er_set_frontend_tc(struct dvb_frontend *fe)
}
if (ret)
goto done;
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 1);
- if (fe->ops.tuner_ops.set_params)
- fe->ops.tuner_ops.set_params(fe);
- if (fe->ops.i2c_gate_ctrl)
- fe->ops.i2c_gate_ctrl(fe, 0);
+
+ if (!(priv->flags & CXD2841ER_EARLY_TUNE))
+ cxd2841er_tuner_set(fe);
+
cxd2841er_tune_done(priv);
+
+ if (priv->flags & CXD2841ER_NO_WAIT_LOCK)
+ goto done;
+
timeout = 2500;
while (timeout > 0) {
ret = cxd2841er_read_status_tc(fe, &status);
@@ -3705,14 +3784,20 @@ static int cxd2841er_init_tc(struct dvb_frontend *fe)
dev_dbg(&priv->i2c->dev, "%s() bandwidth_hz=%d\n",
__func__, p->bandwidth_hz);
cxd2841er_shutdown_to_sleep_tc(priv);
- /* SONY_DEMOD_CONFIG_IFAGCNEG = 1 */
+ /* SONY_DEMOD_CONFIG_IFAGCNEG = 1 (0 for NO_AGCNEG */
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x10);
- cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xcb, 0x40, 0x40);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xcb,
+ ((priv->flags & CXD2841ER_NO_AGCNEG) ? 0x00 : 0x40), 0x40);
/* SONY_DEMOD_CONFIG_IFAGC_ADC_FS = 0 */
cxd2841er_write_reg(priv, I2C_SLVT, 0xcd, 0x50);
/* SONY_DEMOD_CONFIG_PARALLEL_SEL = 1 */
cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x00);
- cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xc4, 0x00, 0x80);
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xc4,
+ ((priv->flags & CXD2841ER_TS_SERIAL) ? 0x80 : 0x00), 0x80);
+
+ /* clear TSCFG bits 3+4 */
+ if (priv->flags & CXD2841ER_TSBITS)
+ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0xc4, 0x00, 0x18);
cxd2841er_init_stats(fe);
@@ -3740,6 +3825,7 @@ static struct dvb_frontend *cxd2841er_attach(struct cxd2841er_config *cfg,
priv->i2c_addr_slvx = (cfg->i2c_addr + 4) >> 1;
priv->i2c_addr_slvt = (cfg->i2c_addr) >> 1;
priv->xtal = cfg->xtal;
+ priv->flags = cfg->flags;
priv->frontend.demodulator_priv = priv;
dev_info(&priv->i2c->dev,
"%s(): I2C adapter %p SLVX addr %x SLVT addr %x\n",
@@ -3747,16 +3833,39 @@ static struct dvb_frontend *cxd2841er_attach(struct cxd2841er_config *cfg,
priv->i2c_addr_slvx, priv->i2c_addr_slvt);
chip_id = cxd2841er_chip_id(priv);
switch (chip_id) {
+ case CXD2837ER_CHIP_ID:
+ snprintf(cxd2841er_t_c_ops.info.name, 128,
+ "Sony CXD2837ER DVB-T/T2/C demodulator");
+ name = "CXD2837ER";
+ type = "C/T/T2";
+ break;
+ case CXD2838ER_CHIP_ID:
+ snprintf(cxd2841er_t_c_ops.info.name, 128,
+ "Sony CXD2838ER ISDB-T demodulator");
+ cxd2841er_t_c_ops.delsys[0] = SYS_ISDBT;
+ cxd2841er_t_c_ops.delsys[1] = SYS_UNDEFINED;
+ cxd2841er_t_c_ops.delsys[2] = SYS_UNDEFINED;
+ name = "CXD2838ER";
+ type = "ISDB-T";
+ break;
case CXD2841ER_CHIP_ID:
snprintf(cxd2841er_t_c_ops.info.name, 128,
"Sony CXD2841ER DVB-T/T2/C demodulator");
name = "CXD2841ER";
+ type = "T/T2/C/ISDB-T";
+ break;
+ case CXD2843ER_CHIP_ID:
+ snprintf(cxd2841er_t_c_ops.info.name, 128,
+ "Sony CXD2843ER DVB-T/T2/C/C2 demodulator");
+ name = "CXD2843ER";
+ type = "C/C2/T/T2";
break;
case CXD2854ER_CHIP_ID:
snprintf(cxd2841er_t_c_ops.info.name, 128,
"Sony CXD2854ER DVB-T/T2/C and ISDB-T demodulator");
cxd2841er_t_c_ops.delsys[3] = SYS_ISDBT;
name = "CXD2854ER";
+ type = "C/C2/T/T2/ISDB-T";
break;
default:
dev_err(&priv->i2c->dev, "%s(): invalid chip ID 0x%02x\n",
@@ -3776,7 +3885,6 @@ static struct dvb_frontend *cxd2841er_attach(struct cxd2841er_config *cfg,
memcpy(&priv->frontend.ops,
&cxd2841er_t_c_ops,
sizeof(struct dvb_frontend_ops));
- type = "T/T2/C/ISDB-T";
}
dev_info(&priv->i2c->dev,
diff --git a/drivers/media/dvb-frontends/cxd2841er.h b/drivers/media/dvb-frontends/cxd2841er.h
index 7f1acfb8f4f5..dc32f5fb6662 100644
--- a/drivers/media/dvb-frontends/cxd2841er.h
+++ b/drivers/media/dvb-frontends/cxd2841er.h
@@ -24,6 +24,15 @@
#include <linux/dvb/frontend.h>
+#define CXD2841ER_USE_GATECTRL 1 /* bit 0 */
+#define CXD2841ER_AUTO_IFHZ 2 /* bit 1 */
+#define CXD2841ER_TS_SERIAL 4 /* bit 2 */
+#define CXD2841ER_ASCOT 8 /* bit 3 */
+#define CXD2841ER_EARLY_TUNE 16 /* bit 4 */
+#define CXD2841ER_NO_WAIT_LOCK 32 /* bit 5 */
+#define CXD2841ER_NO_AGCNEG 64 /* bit 6 */
+#define CXD2841ER_TSBITS 128 /* bit 7 */
+
enum cxd2841er_xtal {
SONY_XTAL_20500, /* 20.5 MHz */
SONY_XTAL_24000, /* 24 MHz */
@@ -33,6 +42,7 @@ enum cxd2841er_xtal {
struct cxd2841er_config {
u8 i2c_addr;
enum cxd2841er_xtal xtal;
+ u32 flags;
};
#if IS_REACHABLE(CONFIG_DVB_CXD2841ER)
diff --git a/drivers/media/dvb-frontends/cxd2841er_priv.h b/drivers/media/dvb-frontends/cxd2841er_priv.h
index 0bbce451149f..6a7126480889 100644
--- a/drivers/media/dvb-frontends/cxd2841er_priv.h
+++ b/drivers/media/dvb-frontends/cxd2841er_priv.h
@@ -25,7 +25,10 @@
#define I2C_SLVX 0
#define I2C_SLVT 1
+#define CXD2837ER_CHIP_ID 0xb1
+#define CXD2838ER_CHIP_ID 0xb0
#define CXD2841ER_CHIP_ID 0xa7
+#define CXD2843ER_CHIP_ID 0xa4
#define CXD2854ER_CHIP_ID 0xc1
#define CXD2841ER_DVBS_POLLING_INVL 10
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index 3815ea515364..1caa04d8f60f 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -279,10 +279,10 @@ static int dib7000p_set_power_mode(struct dib7000p_state *state, enum dib7000p_p
if (state->version != SOC7090)
reg_1280 &= ~((1 << 11));
reg_1280 &= ~(1 << 6);
- /* fall through wanted to enable the interfaces */
-
+ /* fall-through */
+ case DIB7000P_POWER_INTERFACE_ONLY:
/* just leave power on the control-interfaces: GPIO and (I2C or SDIO) */
- case DIB7000P_POWER_INTERFACE_ONLY: /* TODO power up either SDIO or I2C */
+ /* TODO power up either SDIO or I2C */
if (state->version == SOC7090)
reg_1280 &= ~((1 << 7) | (1 << 5));
else
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index daeaf965dd56..14040c915dbb 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -2837,7 +2837,8 @@ ctrl_set_cfg_mpeg_output(struct drx_demod_instance *demod, struct drx_cfg_mpeg_o
/* coef = 188/204 */
max_bit_rate =
(ext_attr->curr_symbol_rate / 8) * nr_bits * 188;
- /* pass through b/c Annex A/c need following settings */
+ /* pass through as b/c Annex A/c need following settings */
+ /* fall-through */
case DRX_STANDARD_ITU_B:
rc = drxj_dap_write_reg16(dev_addr, FEC_OC_FCT_USAGE__A, FEC_OC_FCT_USAGE__PRE, 0);
if (rc != 0) {
@@ -4776,9 +4777,9 @@ set_frequency(struct drx_demod_instance *demod,
No need to account for mirroring on RF
*/
switch (ext_attr->standard) {
- case DRX_STANDARD_ITU_A: /* fallthrough */
- case DRX_STANDARD_ITU_C: /* fallthrough */
- case DRX_STANDARD_PAL_SECAM_LP: /* fallthrough */
+ case DRX_STANDARD_ITU_A:
+ case DRX_STANDARD_ITU_C:
+ case DRX_STANDARD_PAL_SECAM_LP:
case DRX_STANDARD_8VSB:
select_pos_image = true;
break;
@@ -4787,11 +4788,12 @@ set_frequency(struct drx_demod_instance *demod,
Sound carrier is already 3Mhz above centre frequency due
to tuner setting so now add an extra shift of 1MHz... */
fm_frequency_shift = 1000;
- case DRX_STANDARD_ITU_B: /* fallthrough */
- case DRX_STANDARD_NTSC: /* fallthrough */
- case DRX_STANDARD_PAL_SECAM_BG: /* fallthrough */
- case DRX_STANDARD_PAL_SECAM_DK: /* fallthrough */
- case DRX_STANDARD_PAL_SECAM_I: /* fallthrough */
+ /*fall through */
+ case DRX_STANDARD_ITU_B:
+ case DRX_STANDARD_NTSC:
+ case DRX_STANDARD_PAL_SECAM_BG:
+ case DRX_STANDARD_PAL_SECAM_DK:
+ case DRX_STANDARD_PAL_SECAM_I:
case DRX_STANDARD_PAL_SECAM_L:
select_pos_image = false;
break;
diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c
index 71910561005f..17638e08835a 100644
--- a/drivers/media/dvb-frontends/drxd_hard.c
+++ b/drivers/media/dvb-frontends/drxd_hard.c
@@ -1517,12 +1517,14 @@ static int SetDeviceTypeId(struct drxd_state *state)
switch (deviceId) {
case 4:
state->diversity = 1;
+ /* fall through */
case 3:
case 7:
state->PGA = 1;
break;
case 6:
state->diversity = 1;
+ /* fall through */
case 5:
case 8:
break;
@@ -1969,7 +1971,8 @@ static int DRX_Start(struct drxd_state *state, s32 off)
switch (p->transmission_mode) {
default: /* Not set, detect it automatically */
operationMode |= SC_RA_RAM_OP_AUTO_MODE__M;
- /* fall through , try first guess DRX_FFTMODE_8K */
+ /* try first guess DRX_FFTMODE_8K */
+ /* fall through */
case TRANSMISSION_MODE_8K:
transmissionParams |= SC_RA_RAM_OP_PARAM_MODE_8K;
if (state->type_A) {
@@ -2143,8 +2146,8 @@ static int DRX_Start(struct drxd_state *state, s32 off)
switch (p->modulation) {
default:
operationMode |= SC_RA_RAM_OP_AUTO_CONST__M;
- /* fall through , try first guess
- DRX_CONSTELLATION_QAM64 */
+ /* try first guess DRX_CONSTELLATION_QAM64 */
+ /* fall through */
case QAM_64:
transmissionParams |= SC_RA_RAM_OP_PARAM_CONST_QAM64;
if (state->type_A) {
@@ -2280,6 +2283,7 @@ static int DRX_Start(struct drxd_state *state, s32 off)
break;
default:
operationMode |= SC_RA_RAM_OP_AUTO_RATE__M;
+ /* fall through */
case FEC_2_3:
transmissionParams |= SC_RA_RAM_OP_PARAM_RATE_2_3;
if (state->type_A) {
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index 050fe34342d3..48a8aad47a74 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -3271,10 +3271,12 @@ static int dvbt_sc_command(struct drxk_state *state,
case OFDM_SC_RA_RAM_CMD_PROGRAM_PARAM:
status |= write16(state, OFDM_SC_RA_RAM_PARAM1__A, param1);
/* All commands using 1 parameters */
+ /* fall through */
case OFDM_SC_RA_RAM_CMD_SET_ECHO_TIMING:
case OFDM_SC_RA_RAM_CMD_USER_IO:
status |= write16(state, OFDM_SC_RA_RAM_PARAM0__A, param0);
/* All commands using 0 parameters */
+ /* fall through */
case OFDM_SC_RA_RAM_CMD_GET_OP_PARAM:
case OFDM_SC_RA_RAM_CMD_NULL:
/* Write command */
@@ -3782,7 +3784,8 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
case TRANSMISSION_MODE_AUTO:
default:
operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_MODE__M;
- /* fall through , try first guess DRX_FFTMODE_8K */
+ /* try first guess DRX_FFTMODE_8K */
+ /* fall through */
case TRANSMISSION_MODE_8K:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_MODE_8K;
break;
@@ -3796,7 +3799,8 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
default:
case GUARD_INTERVAL_AUTO:
operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_GUARD__M;
- /* fall through , try first guess DRX_GUARD_1DIV4 */
+ /* try first guess DRX_GUARD_1DIV4 */
+ /* fall through */
case GUARD_INTERVAL_1_4:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_GUARD_4;
break;
@@ -3817,9 +3821,9 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
case HIERARCHY_NONE:
default:
operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_HIER__M;
- /* fall through , try first guess SC_RA_RAM_OP_PARAM_HIER_NO */
+ /* try first guess SC_RA_RAM_OP_PARAM_HIER_NO */
/* transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_HIER_NO; */
- /* break; */
+ /* fall through */
case HIERARCHY_1:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_HIER_A1;
break;
@@ -3837,7 +3841,8 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
case QAM_AUTO:
default:
operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_CONST__M;
- /* fall through , try first guess DRX_CONSTELLATION_QAM64 */
+ /* try first guess DRX_CONSTELLATION_QAM64 */
+ /* fall through */
case QAM_64:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_CONST_QAM64;
break;
@@ -3880,7 +3885,8 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
case FEC_AUTO:
default:
operation_mode |= OFDM_SC_RA_RAM_OP_AUTO_RATE__M;
- /* fall through , try first guess DRX_CODERATE_2DIV3 */
+ /* try first guess DRX_CODERATE_2DIV3 */
+ /* fall through */
case FEC_2_3:
transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_RATE_2_3;
break;
@@ -3914,7 +3920,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
switch (state->props.bandwidth_hz) {
case 0:
state->props.bandwidth_hz = 8000000;
- /* fall though */
+ /* fall through */
case 8000000:
bandwidth = DRXK_BANDWIDTH_8MHZ_IN_HZ;
status = write16(state, OFDM_SC_RA_RAM_SRMM_FIX_FACT_8K__A,
diff --git a/drivers/media/dvb-frontends/mt352.c b/drivers/media/dvb-frontends/mt352.c
index e127090f2d22..d5fa96f0a6cd 100644
--- a/drivers/media/dvb-frontends/mt352.c
+++ b/drivers/media/dvb-frontends/mt352.c
@@ -211,6 +211,7 @@ static int mt352_set_parameters(struct dvb_frontend *fe)
if (op->hierarchy == HIERARCHY_AUTO ||
op->hierarchy == HIERARCHY_NONE)
break;
+ /* fall through */
default:
return -EINVAL;
}
diff --git a/drivers/media/dvb-frontends/or51132.c b/drivers/media/dvb-frontends/or51132.c
index 62aa00767015..5f2549c48eb0 100644
--- a/drivers/media/dvb-frontends/or51132.c
+++ b/drivers/media/dvb-frontends/or51132.c
@@ -493,8 +493,8 @@ start:
switch (reg&0xff) {
case 0x06:
if (reg & 0x1000) usK = 3 << 24;
- /* Fall through to QAM64 case */
- case 0x43:
+ /* fall through */
+ case 0x43: /* QAM64 */
c = 150204167;
break;
case 0x45:
diff --git a/drivers/media/dvb-frontends/s5h1411.c b/drivers/media/dvb-frontends/s5h1411.c
index f29750a96196..dd09336a135b 100644
--- a/drivers/media/dvb-frontends/s5h1411.c
+++ b/drivers/media/dvb-frontends/s5h1411.c
@@ -51,7 +51,7 @@ static int debug;
#define dprintk(arg...) do { \
if (debug) \
printk(arg); \
- } while (0)
+} while (0)
/* Register values to initialise the demod, defaults to VSB */
static struct init_tab {
@@ -410,7 +410,7 @@ static int s5h1411_set_if_freq(struct dvb_frontend *fe, int KHz)
default:
dprintk("%s(%d KHz) Invalid, defaulting to 5380\n",
__func__, KHz);
- /* no break, need to continue */
+ /* fall through */
case 5380:
case 44000:
s5h1411_writereg(state, S5H1411_I2C_TOP_ADDR, 0x38, 0x1be4);
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
index fd49c436a36d..e726c2e00460 100644
--- a/drivers/media/dvb-frontends/stv0367.c
+++ b/drivers/media/dvb-frontends/stv0367.c
@@ -26,6 +26,7 @@
#include <linux/i2c.h>
#include "stv0367.h"
+#include "stv0367_defs.h"
#include "stv0367_regs.h"
#include "stv0367_priv.h"
@@ -45,6 +46,8 @@ module_param_named(i2c_debug, i2cdebug, int, 0644);
} while (0)
/* DVB-C */
+enum active_demod_state { demod_none, demod_ter, demod_cab };
+
struct stv0367cab_state {
enum stv0367_cab_signal_type state;
u32 mclk;
@@ -56,6 +59,7 @@ struct stv0367cab_state {
u32 freq_khz; /* found frequency (in kHz) */
u32 symbol_rate; /* found symbol rate (in Bds) */
enum fe_spectral_inversion spect_inv; /* Spectrum Inversion */
+ u32 qamfec_status_reg; /* status reg to poll for FEC Lock */
};
struct stv0367ter_state {
@@ -89,461 +93,12 @@ struct stv0367_state {
struct stv0367cab_state *cab_state;
/* DVB-T */
struct stv0367ter_state *ter_state;
-};
-
-struct st_register {
- u16 addr;
- u8 value;
-};
-
-/* values for STV4100 XTAL=30M int clk=53.125M*/
-static struct st_register def0367ter[STV0367TER_NBREGS] = {
- {R367TER_ID, 0x60},
- {R367TER_I2CRPT, 0xa0},
- /* {R367TER_I2CRPT, 0x22},*/
- {R367TER_TOPCTRL, 0x00},/* for xc5000; was 0x02 */
- {R367TER_IOCFG0, 0x40},
- {R367TER_DAC0R, 0x00},
- {R367TER_IOCFG1, 0x00},
- {R367TER_DAC1R, 0x00},
- {R367TER_IOCFG2, 0x62},
- {R367TER_SDFR, 0x00},
- {R367TER_STATUS, 0xf8},
- {R367TER_AUX_CLK, 0x0a},
- {R367TER_FREESYS1, 0x00},
- {R367TER_FREESYS2, 0x00},
- {R367TER_FREESYS3, 0x00},
- {R367TER_GPIO_CFG, 0x55},
- {R367TER_GPIO_CMD, 0x00},
- {R367TER_AGC2MAX, 0xff},
- {R367TER_AGC2MIN, 0x00},
- {R367TER_AGC1MAX, 0xff},
- {R367TER_AGC1MIN, 0x00},
- {R367TER_AGCR, 0xbc},
- {R367TER_AGC2TH, 0x00},
- {R367TER_AGC12C, 0x00},
- {R367TER_AGCCTRL1, 0x85},
- {R367TER_AGCCTRL2, 0x1f},
- {R367TER_AGC1VAL1, 0x00},
- {R367TER_AGC1VAL2, 0x00},
- {R367TER_AGC2VAL1, 0x6f},
- {R367TER_AGC2VAL2, 0x05},
- {R367TER_AGC2PGA, 0x00},
- {R367TER_OVF_RATE1, 0x00},
- {R367TER_OVF_RATE2, 0x00},
- {R367TER_GAIN_SRC1, 0xaa},/* for xc5000; was 0x2b */
- {R367TER_GAIN_SRC2, 0xd6},/* for xc5000; was 0x04 */
- {R367TER_INC_DEROT1, 0x55},
- {R367TER_INC_DEROT2, 0x55},
- {R367TER_PPM_CPAMP_DIR, 0x2c},
- {R367TER_PPM_CPAMP_INV, 0x00},
- {R367TER_FREESTFE_1, 0x00},
- {R367TER_FREESTFE_2, 0x1c},
- {R367TER_DCOFFSET, 0x00},
- {R367TER_EN_PROCESS, 0x05},
- {R367TER_SDI_SMOOTHER, 0x80},
- {R367TER_FE_LOOP_OPEN, 0x1c},
- {R367TER_FREQOFF1, 0x00},
- {R367TER_FREQOFF2, 0x00},
- {R367TER_FREQOFF3, 0x00},
- {R367TER_TIMOFF1, 0x00},
- {R367TER_TIMOFF2, 0x00},
- {R367TER_EPQ, 0x02},
- {R367TER_EPQAUTO, 0x01},
- {R367TER_SYR_UPDATE, 0xf5},
- {R367TER_CHPFREE, 0x00},
- {R367TER_PPM_STATE_MAC, 0x23},
- {R367TER_INR_THRESHOLD, 0xff},
- {R367TER_EPQ_TPS_ID_CELL, 0xf9},
- {R367TER_EPQ_CFG, 0x00},
- {R367TER_EPQ_STATUS, 0x01},
- {R367TER_AUTORELOCK, 0x81},
- {R367TER_BER_THR_VMSB, 0x00},
- {R367TER_BER_THR_MSB, 0x00},
- {R367TER_BER_THR_LSB, 0x00},
- {R367TER_CCD, 0x83},
- {R367TER_SPECTR_CFG, 0x00},
- {R367TER_CHC_DUMMY, 0x18},
- {R367TER_INC_CTL, 0x88},
- {R367TER_INCTHRES_COR1, 0xb4},
- {R367TER_INCTHRES_COR2, 0x96},
- {R367TER_INCTHRES_DET1, 0x0e},
- {R367TER_INCTHRES_DET2, 0x11},
- {R367TER_IIR_CELLNB, 0x8d},
- {R367TER_IIRCX_COEFF1_MSB, 0x00},
- {R367TER_IIRCX_COEFF1_LSB, 0x00},
- {R367TER_IIRCX_COEFF2_MSB, 0x09},
- {R367TER_IIRCX_COEFF2_LSB, 0x18},
- {R367TER_IIRCX_COEFF3_MSB, 0x14},
- {R367TER_IIRCX_COEFF3_LSB, 0x9c},
- {R367TER_IIRCX_COEFF4_MSB, 0x00},
- {R367TER_IIRCX_COEFF4_LSB, 0x00},
- {R367TER_IIRCX_COEFF5_MSB, 0x36},
- {R367TER_IIRCX_COEFF5_LSB, 0x42},
- {R367TER_FEPATH_CFG, 0x00},
- {R367TER_PMC1_FUNC, 0x65},
- {R367TER_PMC1_FOR, 0x00},
- {R367TER_PMC2_FUNC, 0x00},
- {R367TER_STATUS_ERR_DA, 0xe0},
- {R367TER_DIG_AGC_R, 0xfe},
- {R367TER_COMAGC_TARMSB, 0x0b},
- {R367TER_COM_AGC_TAR_ENMODE, 0x41},
- {R367TER_COM_AGC_CFG, 0x3e},
- {R367TER_COM_AGC_GAIN1, 0x39},
- {R367TER_AUT_AGC_TARGETMSB, 0x0b},
- {R367TER_LOCK_DET_MSB, 0x01},
- {R367TER_AGCTAR_LOCK_LSBS, 0x40},
- {R367TER_AUT_GAIN_EN, 0xf4},
- {R367TER_AUT_CFG, 0xf0},
- {R367TER_LOCKN, 0x23},
- {R367TER_INT_X_3, 0x00},
- {R367TER_INT_X_2, 0x03},
- {R367TER_INT_X_1, 0x8d},
- {R367TER_INT_X_0, 0xa0},
- {R367TER_MIN_ERRX_MSB, 0x00},
- {R367TER_COR_CTL, 0x23},
- {R367TER_COR_STAT, 0xf6},
- {R367TER_COR_INTEN, 0x00},
- {R367TER_COR_INTSTAT, 0x3f},
- {R367TER_COR_MODEGUARD, 0x03},
- {R367TER_AGC_CTL, 0x08},
- {R367TER_AGC_MANUAL1, 0x00},
- {R367TER_AGC_MANUAL2, 0x00},
- {R367TER_AGC_TARG, 0x16},
- {R367TER_AGC_GAIN1, 0x53},
- {R367TER_AGC_GAIN2, 0x1d},
- {R367TER_RESERVED_1, 0x00},
- {R367TER_RESERVED_2, 0x00},
- {R367TER_RESERVED_3, 0x00},
- {R367TER_CAS_CTL, 0x44},
- {R367TER_CAS_FREQ, 0xb3},
- {R367TER_CAS_DAGCGAIN, 0x12},
- {R367TER_SYR_CTL, 0x04},
- {R367TER_SYR_STAT, 0x10},
- {R367TER_SYR_NCO1, 0x00},
- {R367TER_SYR_NCO2, 0x00},
- {R367TER_SYR_OFFSET1, 0x00},
- {R367TER_SYR_OFFSET2, 0x00},
- {R367TER_FFT_CTL, 0x00},
- {R367TER_SCR_CTL, 0x70},
- {R367TER_PPM_CTL1, 0xf8},
- {R367TER_TRL_CTL, 0x14},/* for xc5000; was 0xac */
- {R367TER_TRL_NOMRATE1, 0xae},/* for xc5000; was 0x1e */
- {R367TER_TRL_NOMRATE2, 0x56},/* for xc5000; was 0x58 */
- {R367TER_TRL_TIME1, 0x1d},
- {R367TER_TRL_TIME2, 0xfc},
- {R367TER_CRL_CTL, 0x24},
- {R367TER_CRL_FREQ1, 0xad},
- {R367TER_CRL_FREQ2, 0x9d},
- {R367TER_CRL_FREQ3, 0xff},
- {R367TER_CHC_CTL, 0x01},
- {R367TER_CHC_SNR, 0xf0},
- {R367TER_BDI_CTL, 0x00},
- {R367TER_DMP_CTL, 0x00},
- {R367TER_TPS_RCVD1, 0x30},
- {R367TER_TPS_RCVD2, 0x02},
- {R367TER_TPS_RCVD3, 0x01},
- {R367TER_TPS_RCVD4, 0x00},
- {R367TER_TPS_ID_CELL1, 0x00},
- {R367TER_TPS_ID_CELL2, 0x00},
- {R367TER_TPS_RCVD5_SET1, 0x02},
- {R367TER_TPS_SET2, 0x02},
- {R367TER_TPS_SET3, 0x01},
- {R367TER_TPS_CTL, 0x00},
- {R367TER_CTL_FFTOSNUM, 0x34},
- {R367TER_TESTSELECT, 0x09},
- {R367TER_MSC_REV, 0x0a},
- {R367TER_PIR_CTL, 0x00},
- {R367TER_SNR_CARRIER1, 0xa1},
- {R367TER_SNR_CARRIER2, 0x9a},
- {R367TER_PPM_CPAMP, 0x2c},
- {R367TER_TSM_AP0, 0x00},
- {R367TER_TSM_AP1, 0x00},
- {R367TER_TSM_AP2 , 0x00},
- {R367TER_TSM_AP3, 0x00},
- {R367TER_TSM_AP4, 0x00},
- {R367TER_TSM_AP5, 0x00},
- {R367TER_TSM_AP6, 0x00},
- {R367TER_TSM_AP7, 0x00},
- {R367TER_TSTRES, 0x00},
- {R367TER_ANACTRL, 0x0D},/* PLL stoped, restart at init!!! */
- {R367TER_TSTBUS, 0x00},
- {R367TER_TSTRATE, 0x00},
- {R367TER_CONSTMODE, 0x01},
- {R367TER_CONSTCARR1, 0x00},
- {R367TER_CONSTCARR2, 0x00},
- {R367TER_ICONSTEL, 0x0a},
- {R367TER_QCONSTEL, 0x15},
- {R367TER_TSTBISTRES0, 0x00},
- {R367TER_TSTBISTRES1, 0x00},
- {R367TER_TSTBISTRES2, 0x28},
- {R367TER_TSTBISTRES3, 0x00},
- {R367TER_RF_AGC1, 0xff},
- {R367TER_RF_AGC2, 0x83},
- {R367TER_ANADIGCTRL, 0x19},
- {R367TER_PLLMDIV, 0x01},/* for xc5000; was 0x0c */
- {R367TER_PLLNDIV, 0x06},/* for xc5000; was 0x55 */
- {R367TER_PLLSETUP, 0x18},
- {R367TER_DUAL_AD12, 0x0C},/* for xc5000 AGC voltage 1.6V */
- {R367TER_TSTBIST, 0x00},
- {R367TER_PAD_COMP_CTRL, 0x00},
- {R367TER_PAD_COMP_WR, 0x00},
- {R367TER_PAD_COMP_RD, 0xe0},
- {R367TER_SYR_TARGET_FFTADJT_MSB, 0x00},
- {R367TER_SYR_TARGET_FFTADJT_LSB, 0x00},
- {R367TER_SYR_TARGET_CHCADJT_MSB, 0x00},
- {R367TER_SYR_TARGET_CHCADJT_LSB, 0x00},
- {R367TER_SYR_FLAG, 0x00},
- {R367TER_CRL_TARGET1, 0x00},
- {R367TER_CRL_TARGET2, 0x00},
- {R367TER_CRL_TARGET3, 0x00},
- {R367TER_CRL_TARGET4, 0x00},
- {R367TER_CRL_FLAG, 0x00},
- {R367TER_TRL_TARGET1, 0x00},
- {R367TER_TRL_TARGET2, 0x00},
- {R367TER_TRL_CHC, 0x00},
- {R367TER_CHC_SNR_TARG, 0x00},
- {R367TER_TOP_TRACK, 0x00},
- {R367TER_TRACKER_FREE1, 0x00},
- {R367TER_ERROR_CRL1, 0x00},
- {R367TER_ERROR_CRL2, 0x00},
- {R367TER_ERROR_CRL3, 0x00},
- {R367TER_ERROR_CRL4, 0x00},
- {R367TER_DEC_NCO1, 0x2c},
- {R367TER_DEC_NCO2, 0x0f},
- {R367TER_DEC_NCO3, 0x20},
- {R367TER_SNR, 0xf1},
- {R367TER_SYR_FFTADJ1, 0x00},
- {R367TER_SYR_FFTADJ2, 0x00},
- {R367TER_SYR_CHCADJ1, 0x00},
- {R367TER_SYR_CHCADJ2, 0x00},
- {R367TER_SYR_OFF, 0x00},
- {R367TER_PPM_OFFSET1, 0x00},
- {R367TER_PPM_OFFSET2, 0x03},
- {R367TER_TRACKER_FREE2, 0x00},
- {R367TER_DEBG_LT10, 0x00},
- {R367TER_DEBG_LT11, 0x00},
- {R367TER_DEBG_LT12, 0x00},
- {R367TER_DEBG_LT13, 0x00},
- {R367TER_DEBG_LT14, 0x00},
- {R367TER_DEBG_LT15, 0x00},
- {R367TER_DEBG_LT16, 0x00},
- {R367TER_DEBG_LT17, 0x00},
- {R367TER_DEBG_LT18, 0x00},
- {R367TER_DEBG_LT19, 0x00},
- {R367TER_DEBG_LT1A, 0x00},
- {R367TER_DEBG_LT1B, 0x00},
- {R367TER_DEBG_LT1C, 0x00},
- {R367TER_DEBG_LT1D, 0x00},
- {R367TER_DEBG_LT1E, 0x00},
- {R367TER_DEBG_LT1F, 0x00},
- {R367TER_RCCFGH, 0x00},
- {R367TER_RCCFGM, 0x00},
- {R367TER_RCCFGL, 0x00},
- {R367TER_RCINSDELH, 0x00},
- {R367TER_RCINSDELM, 0x00},
- {R367TER_RCINSDELL, 0x00},
- {R367TER_RCSTATUS, 0x00},
- {R367TER_RCSPEED, 0x6f},
- {R367TER_RCDEBUGM, 0xe7},
- {R367TER_RCDEBUGL, 0x9b},
- {R367TER_RCOBSCFG, 0x00},
- {R367TER_RCOBSM, 0x00},
- {R367TER_RCOBSL, 0x00},
- {R367TER_RCFECSPY, 0x00},
- {R367TER_RCFSPYCFG, 0x00},
- {R367TER_RCFSPYDATA, 0x00},
- {R367TER_RCFSPYOUT, 0x00},
- {R367TER_RCFSTATUS, 0x00},
- {R367TER_RCFGOODPACK, 0x00},
- {R367TER_RCFPACKCNT, 0x00},
- {R367TER_RCFSPYMISC, 0x00},
- {R367TER_RCFBERCPT4, 0x00},
- {R367TER_RCFBERCPT3, 0x00},
- {R367TER_RCFBERCPT2, 0x00},
- {R367TER_RCFBERCPT1, 0x00},
- {R367TER_RCFBERCPT0, 0x00},
- {R367TER_RCFBERERR2, 0x00},
- {R367TER_RCFBERERR1, 0x00},
- {R367TER_RCFBERERR0, 0x00},
- {R367TER_RCFSTATESM, 0x00},
- {R367TER_RCFSTATESL, 0x00},
- {R367TER_RCFSPYBER, 0x00},
- {R367TER_RCFSPYDISTM, 0x00},
- {R367TER_RCFSPYDISTL, 0x00},
- {R367TER_RCFSPYOBS7, 0x00},
- {R367TER_RCFSPYOBS6, 0x00},
- {R367TER_RCFSPYOBS5, 0x00},
- {R367TER_RCFSPYOBS4, 0x00},
- {R367TER_RCFSPYOBS3, 0x00},
- {R367TER_RCFSPYOBS2, 0x00},
- {R367TER_RCFSPYOBS1, 0x00},
- {R367TER_RCFSPYOBS0, 0x00},
- {R367TER_TSGENERAL, 0x00},
- {R367TER_RC1SPEED, 0x6f},
- {R367TER_TSGSTATUS, 0x18},
- {R367TER_FECM, 0x01},
- {R367TER_VTH12, 0xff},
- {R367TER_VTH23, 0xa1},
- {R367TER_VTH34, 0x64},
- {R367TER_VTH56, 0x40},
- {R367TER_VTH67, 0x00},
- {R367TER_VTH78, 0x2c},
- {R367TER_VITCURPUN, 0x12},
- {R367TER_VERROR, 0x01},
- {R367TER_PRVIT, 0x3f},
- {R367TER_VAVSRVIT, 0x00},
- {R367TER_VSTATUSVIT, 0xbd},
- {R367TER_VTHINUSE, 0xa1},
- {R367TER_KDIV12, 0x20},
- {R367TER_KDIV23, 0x40},
- {R367TER_KDIV34, 0x20},
- {R367TER_KDIV56, 0x30},
- {R367TER_KDIV67, 0x00},
- {R367TER_KDIV78, 0x30},
- {R367TER_SIGPOWER, 0x54},
- {R367TER_DEMAPVIT, 0x40},
- {R367TER_VITSCALE, 0x00},
- {R367TER_FFEC1PRG, 0x00},
- {R367TER_FVITCURPUN, 0x12},
- {R367TER_FVERROR, 0x01},
- {R367TER_FVSTATUSVIT, 0xbd},
- {R367TER_DEBUG_LT1, 0x00},
- {R367TER_DEBUG_LT2, 0x00},
- {R367TER_DEBUG_LT3, 0x00},
- {R367TER_TSTSFMET, 0x00},
- {R367TER_SELOUT, 0x00},
- {R367TER_TSYNC, 0x00},
- {R367TER_TSTERR, 0x00},
- {R367TER_TSFSYNC, 0x00},
- {R367TER_TSTSFERR, 0x00},
- {R367TER_TSTTSSF1, 0x01},
- {R367TER_TSTTSSF2, 0x1f},
- {R367TER_TSTTSSF3, 0x00},
- {R367TER_TSTTS1, 0x00},
- {R367TER_TSTTS2, 0x1f},
- {R367TER_TSTTS3, 0x01},
- {R367TER_TSTTS4, 0x00},
- {R367TER_TSTTSRC, 0x00},
- {R367TER_TSTTSRS, 0x00},
- {R367TER_TSSTATEM, 0xb0},
- {R367TER_TSSTATEL, 0x40},
- {R367TER_TSCFGH, 0xC0},
- {R367TER_TSCFGM, 0xc0},/* for xc5000; was 0x00 */
- {R367TER_TSCFGL, 0x20},
- {R367TER_TSSYNC, 0x00},
- {R367TER_TSINSDELH, 0x00},
- {R367TER_TSINSDELM, 0x00},
- {R367TER_TSINSDELL, 0x00},
- {R367TER_TSDIVN, 0x03},
- {R367TER_TSDIVPM, 0x00},
- {R367TER_TSDIVPL, 0x00},
- {R367TER_TSDIVQM, 0x00},
- {R367TER_TSDIVQL, 0x00},
- {R367TER_TSDILSTKM, 0x00},
- {R367TER_TSDILSTKL, 0x00},
- {R367TER_TSSPEED, 0x40},/* for xc5000; was 0x6f */
- {R367TER_TSSTATUS, 0x81},
- {R367TER_TSSTATUS2, 0x6a},
- {R367TER_TSBITRATEM, 0x0f},
- {R367TER_TSBITRATEL, 0xc6},
- {R367TER_TSPACKLENM, 0x00},
- {R367TER_TSPACKLENL, 0xfc},
- {R367TER_TSBLOCLENM, 0x0a},
- {R367TER_TSBLOCLENL, 0x80},
- {R367TER_TSDLYH, 0x90},
- {R367TER_TSDLYM, 0x68},
- {R367TER_TSDLYL, 0x01},
- {R367TER_TSNPDAV, 0x00},
- {R367TER_TSBUFSTATH, 0x00},
- {R367TER_TSBUFSTATM, 0x00},
- {R367TER_TSBUFSTATL, 0x00},
- {R367TER_TSDEBUGM, 0xcf},
- {R367TER_TSDEBUGL, 0x1e},
- {R367TER_TSDLYSETH, 0x00},
- {R367TER_TSDLYSETM, 0x68},
- {R367TER_TSDLYSETL, 0x00},
- {R367TER_TSOBSCFG, 0x00},
- {R367TER_TSOBSM, 0x47},
- {R367TER_TSOBSL, 0x1f},
- {R367TER_ERRCTRL1, 0x95},
- {R367TER_ERRCNT1H, 0x80},
- {R367TER_ERRCNT1M, 0x00},
- {R367TER_ERRCNT1L, 0x00},
- {R367TER_ERRCTRL2, 0x95},
- {R367TER_ERRCNT2H, 0x00},
- {R367TER_ERRCNT2M, 0x00},
- {R367TER_ERRCNT2L, 0x00},
- {R367TER_FECSPY, 0x88},
- {R367TER_FSPYCFG, 0x2c},
- {R367TER_FSPYDATA, 0x3a},
- {R367TER_FSPYOUT, 0x06},
- {R367TER_FSTATUS, 0x61},
- {R367TER_FGOODPACK, 0xff},
- {R367TER_FPACKCNT, 0xff},
- {R367TER_FSPYMISC, 0x66},
- {R367TER_FBERCPT4, 0x00},
- {R367TER_FBERCPT3, 0x00},
- {R367TER_FBERCPT2, 0x36},
- {R367TER_FBERCPT1, 0x36},
- {R367TER_FBERCPT0, 0x14},
- {R367TER_FBERERR2, 0x00},
- {R367TER_FBERERR1, 0x03},
- {R367TER_FBERERR0, 0x28},
- {R367TER_FSTATESM, 0x00},
- {R367TER_FSTATESL, 0x02},
- {R367TER_FSPYBER, 0x00},
- {R367TER_FSPYDISTM, 0x01},
- {R367TER_FSPYDISTL, 0x9f},
- {R367TER_FSPYOBS7, 0xc9},
- {R367TER_FSPYOBS6, 0x99},
- {R367TER_FSPYOBS5, 0x08},
- {R367TER_FSPYOBS4, 0xec},
- {R367TER_FSPYOBS3, 0x01},
- {R367TER_FSPYOBS2, 0x0f},
- {R367TER_FSPYOBS1, 0xf5},
- {R367TER_FSPYOBS0, 0x08},
- {R367TER_SFDEMAP, 0x40},
- {R367TER_SFERROR, 0x00},
- {R367TER_SFAVSR, 0x30},
- {R367TER_SFECSTATUS, 0xcc},
- {R367TER_SFKDIV12, 0x20},
- {R367TER_SFKDIV23, 0x40},
- {R367TER_SFKDIV34, 0x20},
- {R367TER_SFKDIV56, 0x20},
- {R367TER_SFKDIV67, 0x00},
- {R367TER_SFKDIV78, 0x20},
- {R367TER_SFDILSTKM, 0x00},
- {R367TER_SFDILSTKL, 0x00},
- {R367TER_SFSTATUS, 0xb5},
- {R367TER_SFDLYH, 0x90},
- {R367TER_SFDLYM, 0x60},
- {R367TER_SFDLYL, 0x01},
- {R367TER_SFDLYSETH, 0xc0},
- {R367TER_SFDLYSETM, 0x60},
- {R367TER_SFDLYSETL, 0x00},
- {R367TER_SFOBSCFG, 0x00},
- {R367TER_SFOBSM, 0x47},
- {R367TER_SFOBSL, 0x05},
- {R367TER_SFECINFO, 0x40},
- {R367TER_SFERRCTRL, 0x74},
- {R367TER_SFERRCNTH, 0x80},
- {R367TER_SFERRCNTM , 0x00},
- {R367TER_SFERRCNTL, 0x00},
- {R367TER_SYMBRATEM, 0x2f},
- {R367TER_SYMBRATEL, 0x50},
- {R367TER_SYMBSTATUS, 0x7f},
- {R367TER_SYMBCFG, 0x00},
- {R367TER_SYMBFIFOM, 0xf4},
- {R367TER_SYMBFIFOL, 0x0d},
- {R367TER_SYMBOFFSM, 0xf0},
- {R367TER_SYMBOFFSL, 0x2d},
- {R367TER_DEBUG_LT4, 0x00},
- {R367TER_DEBUG_LT5, 0x00},
- {R367TER_DEBUG_LT6, 0x00},
- {R367TER_DEBUG_LT7, 0x00},
- {R367TER_DEBUG_LT8, 0x00},
- {R367TER_DEBUG_LT9, 0x00},
+ /* flags for operation control */
+ u8 use_i2c_gatectrl;
+ u8 deftabs;
+ u8 reinit_on_setfrontend;
+ u8 auto_if_khz;
+ enum active_demod_state activedemod;
};
#define RF_LOOKUP_TABLE_SIZE 31
@@ -571,197 +126,6 @@ static const s32 stv0367cab_RF_LookUp2[RF_LOOKUP_TABLE2_SIZE][RF_LOOKUP_TABLE2_S
}
};
-static struct st_register def0367cab[STV0367CAB_NBREGS] = {
- {R367CAB_ID, 0x60},
- {R367CAB_I2CRPT, 0xa0},
- /*{R367CAB_I2CRPT, 0x22},*/
- {R367CAB_TOPCTRL, 0x10},
- {R367CAB_IOCFG0, 0x80},
- {R367CAB_DAC0R, 0x00},
- {R367CAB_IOCFG1, 0x00},
- {R367CAB_DAC1R, 0x00},
- {R367CAB_IOCFG2, 0x00},
- {R367CAB_SDFR, 0x00},
- {R367CAB_AUX_CLK, 0x00},
- {R367CAB_FREESYS1, 0x00},
- {R367CAB_FREESYS2, 0x00},
- {R367CAB_FREESYS3, 0x00},
- {R367CAB_GPIO_CFG, 0x55},
- {R367CAB_GPIO_CMD, 0x01},
- {R367CAB_TSTRES, 0x00},
- {R367CAB_ANACTRL, 0x0d},/* was 0x00 need to check - I.M.L.*/
- {R367CAB_TSTBUS, 0x00},
- {R367CAB_RF_AGC1, 0xea},
- {R367CAB_RF_AGC2, 0x82},
- {R367CAB_ANADIGCTRL, 0x0b},
- {R367CAB_PLLMDIV, 0x01},
- {R367CAB_PLLNDIV, 0x08},
- {R367CAB_PLLSETUP, 0x18},
- {R367CAB_DUAL_AD12, 0x0C}, /* for xc5000 AGC voltage 1.6V */
- {R367CAB_TSTBIST, 0x00},
- {R367CAB_CTRL_1, 0x00},
- {R367CAB_CTRL_2, 0x03},
- {R367CAB_IT_STATUS1, 0x2b},
- {R367CAB_IT_STATUS2, 0x08},
- {R367CAB_IT_EN1, 0x00},
- {R367CAB_IT_EN2, 0x00},
- {R367CAB_CTRL_STATUS, 0x04},
- {R367CAB_TEST_CTL, 0x00},
- {R367CAB_AGC_CTL, 0x73},
- {R367CAB_AGC_IF_CFG, 0x50},
- {R367CAB_AGC_RF_CFG, 0x00},
- {R367CAB_AGC_PWM_CFG, 0x03},
- {R367CAB_AGC_PWR_REF_L, 0x5a},
- {R367CAB_AGC_PWR_REF_H, 0x00},
- {R367CAB_AGC_RF_TH_L, 0xff},
- {R367CAB_AGC_RF_TH_H, 0x07},
- {R367CAB_AGC_IF_LTH_L, 0x00},
- {R367CAB_AGC_IF_LTH_H, 0x08},
- {R367CAB_AGC_IF_HTH_L, 0xff},
- {R367CAB_AGC_IF_HTH_H, 0x07},
- {R367CAB_AGC_PWR_RD_L, 0xa0},
- {R367CAB_AGC_PWR_RD_M, 0xe9},
- {R367CAB_AGC_PWR_RD_H, 0x03},
- {R367CAB_AGC_PWM_IFCMD_L, 0xe4},
- {R367CAB_AGC_PWM_IFCMD_H, 0x00},
- {R367CAB_AGC_PWM_RFCMD_L, 0xff},
- {R367CAB_AGC_PWM_RFCMD_H, 0x07},
- {R367CAB_IQDEM_CFG, 0x01},
- {R367CAB_MIX_NCO_LL, 0x22},
- {R367CAB_MIX_NCO_HL, 0x96},
- {R367CAB_MIX_NCO_HH, 0x55},
- {R367CAB_SRC_NCO_LL, 0xff},
- {R367CAB_SRC_NCO_LH, 0x0c},
- {R367CAB_SRC_NCO_HL, 0xf5},
- {R367CAB_SRC_NCO_HH, 0x20},
- {R367CAB_IQDEM_GAIN_SRC_L, 0x06},
- {R367CAB_IQDEM_GAIN_SRC_H, 0x01},
- {R367CAB_IQDEM_DCRM_CFG_LL, 0xfe},
- {R367CAB_IQDEM_DCRM_CFG_LH, 0xff},
- {R367CAB_IQDEM_DCRM_CFG_HL, 0x0f},
- {R367CAB_IQDEM_DCRM_CFG_HH, 0x00},
- {R367CAB_IQDEM_ADJ_COEFF0, 0x34},
- {R367CAB_IQDEM_ADJ_COEFF1, 0xae},
- {R367CAB_IQDEM_ADJ_COEFF2, 0x46},
- {R367CAB_IQDEM_ADJ_COEFF3, 0x77},
- {R367CAB_IQDEM_ADJ_COEFF4, 0x96},
- {R367CAB_IQDEM_ADJ_COEFF5, 0x69},
- {R367CAB_IQDEM_ADJ_COEFF6, 0xc7},
- {R367CAB_IQDEM_ADJ_COEFF7, 0x01},
- {R367CAB_IQDEM_ADJ_EN, 0x04},
- {R367CAB_IQDEM_ADJ_AGC_REF, 0x94},
- {R367CAB_ALLPASSFILT1, 0xc9},
- {R367CAB_ALLPASSFILT2, 0x2d},
- {R367CAB_ALLPASSFILT3, 0xa3},
- {R367CAB_ALLPASSFILT4, 0xfb},
- {R367CAB_ALLPASSFILT5, 0xf6},
- {R367CAB_ALLPASSFILT6, 0x45},
- {R367CAB_ALLPASSFILT7, 0x6f},
- {R367CAB_ALLPASSFILT8, 0x7e},
- {R367CAB_ALLPASSFILT9, 0x05},
- {R367CAB_ALLPASSFILT10, 0x0a},
- {R367CAB_ALLPASSFILT11, 0x51},
- {R367CAB_TRL_AGC_CFG, 0x20},
- {R367CAB_TRL_LPF_CFG, 0x28},
- {R367CAB_TRL_LPF_ACQ_GAIN, 0x44},
- {R367CAB_TRL_LPF_TRK_GAIN, 0x22},
- {R367CAB_TRL_LPF_OUT_GAIN, 0x03},
- {R367CAB_TRL_LOCKDET_LTH, 0x04},
- {R367CAB_TRL_LOCKDET_HTH, 0x11},
- {R367CAB_TRL_LOCKDET_TRGVAL, 0x20},
- {R367CAB_IQ_QAM, 0x01},
- {R367CAB_FSM_STATE, 0xa0},
- {R367CAB_FSM_CTL, 0x08},
- {R367CAB_FSM_STS, 0x0c},
- {R367CAB_FSM_SNR0_HTH, 0x00},
- {R367CAB_FSM_SNR1_HTH, 0x00},
- {R367CAB_FSM_SNR2_HTH, 0x23},/* 0x00 */
- {R367CAB_FSM_SNR0_LTH, 0x00},
- {R367CAB_FSM_SNR1_LTH, 0x00},
- {R367CAB_FSM_EQA1_HTH, 0x00},
- {R367CAB_FSM_TEMPO, 0x32},
- {R367CAB_FSM_CONFIG, 0x03},
- {R367CAB_EQU_I_TESTTAP_L, 0x11},
- {R367CAB_EQU_I_TESTTAP_M, 0x00},
- {R367CAB_EQU_I_TESTTAP_H, 0x00},
- {R367CAB_EQU_TESTAP_CFG, 0x00},
- {R367CAB_EQU_Q_TESTTAP_L, 0xff},
- {R367CAB_EQU_Q_TESTTAP_M, 0x00},
- {R367CAB_EQU_Q_TESTTAP_H, 0x00},
- {R367CAB_EQU_TAP_CTRL, 0x00},
- {R367CAB_EQU_CTR_CRL_CONTROL_L, 0x11},
- {R367CAB_EQU_CTR_CRL_CONTROL_H, 0x05},
- {R367CAB_EQU_CTR_HIPOW_L, 0x00},
- {R367CAB_EQU_CTR_HIPOW_H, 0x00},
- {R367CAB_EQU_I_EQU_LO, 0xef},
- {R367CAB_EQU_I_EQU_HI, 0x00},
- {R367CAB_EQU_Q_EQU_LO, 0xee},
- {R367CAB_EQU_Q_EQU_HI, 0x00},
- {R367CAB_EQU_MAPPER, 0xc5},
- {R367CAB_EQU_SWEEP_RATE, 0x80},
- {R367CAB_EQU_SNR_LO, 0x64},
- {R367CAB_EQU_SNR_HI, 0x03},
- {R367CAB_EQU_GAMMA_LO, 0x00},
- {R367CAB_EQU_GAMMA_HI, 0x00},
- {R367CAB_EQU_ERR_GAIN, 0x36},
- {R367CAB_EQU_RADIUS, 0xaa},
- {R367CAB_EQU_FFE_MAINTAP, 0x00},
- {R367CAB_EQU_FFE_LEAKAGE, 0x63},
- {R367CAB_EQU_FFE_MAINTAP_POS, 0xdf},
- {R367CAB_EQU_GAIN_WIDE, 0x88},
- {R367CAB_EQU_GAIN_NARROW, 0x41},
- {R367CAB_EQU_CTR_LPF_GAIN, 0xd1},
- {R367CAB_EQU_CRL_LPF_GAIN, 0xa7},
- {R367CAB_EQU_GLOBAL_GAIN, 0x06},
- {R367CAB_EQU_CRL_LD_SEN, 0x85},
- {R367CAB_EQU_CRL_LD_VAL, 0xe2},
- {R367CAB_EQU_CRL_TFR, 0x20},
- {R367CAB_EQU_CRL_BISTH_LO, 0x00},
- {R367CAB_EQU_CRL_BISTH_HI, 0x00},
- {R367CAB_EQU_SWEEP_RANGE_LO, 0x00},
- {R367CAB_EQU_SWEEP_RANGE_HI, 0x00},
- {R367CAB_EQU_CRL_LIMITER, 0x40},
- {R367CAB_EQU_MODULUS_MAP, 0x90},
- {R367CAB_EQU_PNT_GAIN, 0xa7},
- {R367CAB_FEC_AC_CTR_0, 0x16},
- {R367CAB_FEC_AC_CTR_1, 0x0b},
- {R367CAB_FEC_AC_CTR_2, 0x88},
- {R367CAB_FEC_AC_CTR_3, 0x02},
- {R367CAB_FEC_STATUS, 0x12},
- {R367CAB_RS_COUNTER_0, 0x7d},
- {R367CAB_RS_COUNTER_1, 0xd0},
- {R367CAB_RS_COUNTER_2, 0x19},
- {R367CAB_RS_COUNTER_3, 0x0b},
- {R367CAB_RS_COUNTER_4, 0xa3},
- {R367CAB_RS_COUNTER_5, 0x00},
- {R367CAB_BERT_0, 0x01},
- {R367CAB_BERT_1, 0x25},
- {R367CAB_BERT_2, 0x41},
- {R367CAB_BERT_3, 0x39},
- {R367CAB_OUTFORMAT_0, 0xc2},
- {R367CAB_OUTFORMAT_1, 0x22},
- {R367CAB_SMOOTHER_2, 0x28},
- {R367CAB_TSMF_CTRL_0, 0x01},
- {R367CAB_TSMF_CTRL_1, 0xc6},
- {R367CAB_TSMF_CTRL_3, 0x43},
- {R367CAB_TS_ON_ID_0, 0x00},
- {R367CAB_TS_ON_ID_1, 0x00},
- {R367CAB_TS_ON_ID_2, 0x00},
- {R367CAB_TS_ON_ID_3, 0x00},
- {R367CAB_RE_STATUS_0, 0x00},
- {R367CAB_RE_STATUS_1, 0x00},
- {R367CAB_RE_STATUS_2, 0x00},
- {R367CAB_RE_STATUS_3, 0x00},
- {R367CAB_TS_STATUS_0, 0x00},
- {R367CAB_TS_STATUS_1, 0x00},
- {R367CAB_TS_STATUS_2, 0xa0},
- {R367CAB_TS_STATUS_3, 0x00},
- {R367CAB_T_O_ID_0, 0x00},
- {R367CAB_T_O_ID_1, 0x00},
- {R367CAB_T_O_ID_2, 0x00},
- {R367CAB_T_O_ID_3, 0x00},
-};
-
static
int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len)
{
@@ -899,6 +263,78 @@ static u8 stv0367_getbits(u8 reg, u32 label)
return (reg & mask) >> pos;
}
#endif
+
+static void stv0367_write_table(struct stv0367_state *state,
+ const struct st_register *deftab)
+{
+ int i = 0;
+
+ while (1) {
+ if (!deftab[i].addr)
+ break;
+ stv0367_writereg(state, deftab[i].addr, deftab[i].value);
+ i++;
+ }
+}
+
+static void stv0367_pll_setup(struct stv0367_state *state,
+ u32 icspeed, u32 xtal)
+{
+ /* note on regs: R367TER_* and R367CAB_* defines each point to
+ * 0xf0d8, so just use R367TER_ for both cases
+ */
+
+ switch (icspeed) {
+ case STV0367_ICSPEED_58000:
+ switch (xtal) {
+ default:
+ case 27000000:
+ dprintk("STV0367 SetCLKgen for 58MHz IC and 27Mhz crystal\n");
+ /* PLLMDIV: 27, PLLNDIV: 232 */
+ stv0367_writereg(state, R367TER_PLLMDIV, 0x1b);
+ stv0367_writereg(state, R367TER_PLLNDIV, 0xe8);
+ break;
+ }
+ break;
+ default:
+ case STV0367_ICSPEED_53125:
+ switch (xtal) {
+ /* set internal freq to 53.125MHz */
+ case 16000000:
+ stv0367_writereg(state, R367TER_PLLMDIV, 0x2);
+ stv0367_writereg(state, R367TER_PLLNDIV, 0x1b);
+ break;
+ case 25000000:
+ stv0367_writereg(state, R367TER_PLLMDIV, 0xa);
+ stv0367_writereg(state, R367TER_PLLNDIV, 0x55);
+ break;
+ default:
+ case 27000000:
+ dprintk("FE_STV0367TER_SetCLKgen for 27Mhz\n");
+ stv0367_writereg(state, R367TER_PLLMDIV, 0x1);
+ stv0367_writereg(state, R367TER_PLLNDIV, 0x8);
+ break;
+ case 30000000:
+ stv0367_writereg(state, R367TER_PLLMDIV, 0xc);
+ stv0367_writereg(state, R367TER_PLLNDIV, 0x55);
+ break;
+ }
+ }
+
+ stv0367_writereg(state, R367TER_PLLSETUP, 0x18);
+}
+
+static int stv0367_get_if_khz(struct stv0367_state *state, u32 *ifkhz)
+{
+ if (state->auto_if_khz && state->fe.ops.tuner_ops.get_if_frequency) {
+ state->fe.ops.tuner_ops.get_if_frequency(&state->fe, ifkhz);
+ *ifkhz = *ifkhz / 1000; /* hz -> khz */
+ } else
+ *ifkhz = state->config->if_khz;
+
+ return 0;
+}
+
static int stv0367ter_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct stv0367_state *state = fe->demodulator_priv;
@@ -1260,9 +696,9 @@ stv0367_ter_signal_type stv0367ter_check_cpamp(struct stv0367_state *state,
dprintk("******last CPAMPvalue= %d at wd=%d\n", CPAMPvalue, wd);
if (CPAMPvalue < CPAMPMin) {
CPAMPStatus = FE_TER_NOCPAMP;
- printk(KERN_ERR "CPAMP failed\n");
+ dprintk("%s: CPAMP failed\n", __func__);
} else {
- printk(KERN_ERR "CPAMP OK !\n");
+ dprintk("%s: CPAMP OK !\n", __func__);
CPAMPStatus = FE_TER_CPAMPOK;
}
@@ -1538,41 +974,15 @@ static int stv0367ter_init(struct dvb_frontend *fe)
{
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367ter_state *ter_state = state->ter_state;
- int i;
dprintk("%s:\n", __func__);
ter_state->pBER = 0;
- for (i = 0; i < STV0367TER_NBREGS; i++)
- stv0367_writereg(state, def0367ter[i].addr,
- def0367ter[i].value);
+ stv0367_write_table(state,
+ stv0367_deftabs[state->deftabs][STV0367_TAB_TER]);
- switch (state->config->xtal) {
- /*set internal freq to 53.125MHz */
- case 16000000:
- stv0367_writereg(state, R367TER_PLLMDIV, 0x2);
- stv0367_writereg(state, R367TER_PLLNDIV, 0x1b);
- stv0367_writereg(state, R367TER_PLLSETUP, 0x18);
- break;
- case 25000000:
- stv0367_writereg(state, R367TER_PLLMDIV, 0xa);
- stv0367_writereg(state, R367TER_PLLNDIV, 0x55);
- stv0367_writereg(state, R367TER_PLLSETUP, 0x18);
- break;
- default:
- case 27000000:
- dprintk("FE_STV0367TER_SetCLKgen for 27Mhz\n");
- stv0367_writereg(state, R367TER_PLLMDIV, 0x1);
- stv0367_writereg(state, R367TER_PLLNDIV, 0x8);
- stv0367_writereg(state, R367TER_PLLSETUP, 0x18);
- break;
- case 30000000:
- stv0367_writereg(state, R367TER_PLLMDIV, 0xc);
- stv0367_writereg(state, R367TER_PLLNDIV, 0x55);
- stv0367_writereg(state, R367TER_PLLSETUP, 0x18);
- break;
- }
+ stv0367_pll_setup(state, STV0367_ICSPEED_53125, state->config->xtal);
stv0367_writereg(state, R367TER_I2CRPT, 0xa0);
stv0367_writereg(state, R367TER_ANACTRL, 0x00);
@@ -1598,10 +1008,12 @@ static int stv0367ter_algo(struct dvb_frontend *fe)
u8 /*constell,*/ counter;
s8 step;
s32 timing_offset = 0;
- u32 trl_nomrate = 0, InternalFreq = 0, temp = 0;
+ u32 trl_nomrate = 0, InternalFreq = 0, temp = 0, ifkhz = 0;
dprintk("%s:\n", __func__);
+ stv0367_get_if_khz(state, &ifkhz);
+
ter_state->frequency = p->frequency;
ter_state->force = FE_TER_FORCENONE
+ stv0367_readbits(state, F367TER_FORCE) * 2;
@@ -1704,8 +1116,7 @@ static int stv0367ter_algo(struct dvb_frontend *fe)
stv0367_readbits(state, F367TER_GAIN_SRC_LO);
temp = (int)
- ((InternalFreq - state->config->if_khz) * (1 << 16)
- / (InternalFreq));
+ ((InternalFreq - ifkhz) * (1 << 16) / (InternalFreq));
dprintk("DEROT temp=0x%x\n", temp);
stv0367_writebits(state, F367TER_INC_DEROT_HI, temp / 256);
@@ -1824,13 +1235,14 @@ static int stv0367ter_set_frontend(struct dvb_frontend *fe)
s8 num_trials, index;
u8 SenseTrials[] = { INVERSION_ON, INVERSION_OFF };
- stv0367ter_init(fe);
+ if (state->reinit_on_setfrontend)
+ stv0367ter_init(fe);
if (fe->ops.tuner_ops.set_params) {
- if (fe->ops.i2c_gate_ctrl)
+ if (state->use_i2c_gatectrl && fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
fe->ops.tuner_ops.set_params(fe);
- if (fe->ops.i2c_gate_ctrl)
+ if (state->use_i2c_gatectrl && fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
@@ -2321,6 +1733,12 @@ struct dvb_frontend *stv0367ter_attach(const struct stv0367_config *config,
state->fe.demodulator_priv = state;
state->chip_id = stv0367_readreg(state, 0xf000);
+ /* demod operation options */
+ state->use_i2c_gatectrl = 1;
+ state->deftabs = STV0367_DEFTAB_GENERIC;
+ state->reinit_on_setfrontend = 1;
+ state->auto_if_khz = 0;
+
dprintk("%s: chip_id = 0x%x\n", __func__, state->chip_id);
/* check if the demod is there */
@@ -2423,11 +1841,11 @@ static enum stv0367cab_mod stv0367cab_SetQamSize(struct stv0367_state *state,
case FE_CAB_MOD_QAM64:
stv0367_writereg(state, R367CAB_IQDEM_ADJ_AGC_REF, 0x82);
stv0367_writereg(state, R367CAB_AGC_PWR_REF_L, 0x5a);
- if (SymbolRate > 45000000) {
+ if (SymbolRate > 4500000) {
stv0367_writereg(state, R367CAB_FSM_STATE, 0xb0);
stv0367_writereg(state, R367CAB_EQU_CTR_LPF_GAIN, 0xc1);
stv0367_writereg(state, R367CAB_EQU_CRL_LPF_GAIN, 0xa5);
- } else if (SymbolRate > 25000000) {
+ } else if (SymbolRate > 2500000) {
stv0367_writereg(state, R367CAB_FSM_STATE, 0xa0);
stv0367_writereg(state, R367CAB_EQU_CTR_LPF_GAIN, 0xc1);
stv0367_writereg(state, R367CAB_EQU_CRL_LPF_GAIN, 0xa6);
@@ -2445,9 +1863,9 @@ static enum stv0367cab_mod stv0367cab_SetQamSize(struct stv0367_state *state,
stv0367_writereg(state, R367CAB_AGC_PWR_REF_L, 0x76);
stv0367_writereg(state, R367CAB_FSM_STATE, 0x90);
stv0367_writereg(state, R367CAB_EQU_CTR_LPF_GAIN, 0xb1);
- if (SymbolRate > 45000000)
+ if (SymbolRate > 4500000)
stv0367_writereg(state, R367CAB_EQU_CRL_LPF_GAIN, 0xa7);
- else if (SymbolRate > 25000000)
+ else if (SymbolRate > 2500000)
stv0367_writereg(state, R367CAB_EQU_CRL_LPF_GAIN, 0xa6);
else
stv0367_writereg(state, R367CAB_EQU_CRL_LPF_GAIN, 0x97);
@@ -2460,9 +1878,9 @@ static enum stv0367cab_mod stv0367cab_SetQamSize(struct stv0367_state *state,
stv0367_writereg(state, R367CAB_IQDEM_ADJ_AGC_REF, 0x94);
stv0367_writereg(state, R367CAB_AGC_PWR_REF_L, 0x5a);
stv0367_writereg(state, R367CAB_FSM_STATE, 0xa0);
- if (SymbolRate > 45000000)
+ if (SymbolRate > 4500000)
stv0367_writereg(state, R367CAB_EQU_CTR_LPF_GAIN, 0xc1);
- else if (SymbolRate > 25000000)
+ else if (SymbolRate > 2500000)
stv0367_writereg(state, R367CAB_EQU_CTR_LPF_GAIN, 0xc1);
else
stv0367_writereg(state, R367CAB_EQU_CTR_LPF_GAIN, 0xd1);
@@ -2731,7 +2149,8 @@ static int stv0367cab_read_status(struct dvb_frontend *fe,
*status = 0;
- if (stv0367_readbits(state, F367CAB_QAMFEC_LOCK)) {
+ if (stv0367_readbits(state, (state->cab_state->qamfec_status_reg ?
+ state->cab_state->qamfec_status_reg : F367CAB_QAMFEC_LOCK))) {
*status |= FE_HAS_LOCK;
dprintk("%s: stv0367 has locked\n", __func__);
}
@@ -2777,13 +2196,11 @@ static int stv0367cab_init(struct dvb_frontend *fe)
{
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367cab_state *cab_state = state->cab_state;
- int i;
dprintk("%s:\n", __func__);
- for (i = 0; i < STV0367CAB_NBREGS; i++)
- stv0367_writereg(state, def0367cab[i].addr,
- def0367cab[i].value);
+ stv0367_write_table(state,
+ stv0367_deftabs[state->deftabs][STV0367_TAB_CAB]);
switch (state->config->ts_mode) {
case STV0367_DVBCI_CLOCK:
@@ -2831,7 +2248,7 @@ enum stv0367_cab_signal_type stv0367cab_algo(struct stv0367_state *state,
{
struct stv0367cab_state *cab_state = state->cab_state;
enum stv0367_cab_signal_type signalType = FE_CAB_NOAGC;
- u32 QAMFEC_Lock, QAM_Lock, u32_tmp,
+ u32 QAMFEC_Lock, QAM_Lock, u32_tmp, ifkhz,
LockTime, TRLTimeOut, AGCTimeOut, CRLSymbols,
CRLTimeOut, EQLTimeOut, DemodTimeOut, FECTimeOut;
u8 TrackAGCAccum;
@@ -2839,6 +2256,8 @@ enum stv0367_cab_signal_type stv0367cab_algo(struct stv0367_state *state,
dprintk("%s:\n", __func__);
+ stv0367_get_if_khz(state, &ifkhz);
+
/* Timeouts calculation */
/* A max lock time of 25 ms is allowed for delayed AGC */
AGCTimeOut = 25;
@@ -2917,7 +2336,7 @@ enum stv0367_cab_signal_type stv0367cab_algo(struct stv0367_state *state,
/* The sweep function is never used, Sweep rate must be set to 0 */
/* Set the derotator frequency in Hz */
stv0367cab_set_derot_freq(state, cab_state->adc_clk,
- (1000 * (s32)state->config->if_khz + cab_state->derot_offset));
+ (1000 * (s32)ifkhz + cab_state->derot_offset));
/* Disable the Allpass Filter when the symbol rate is out of range */
if ((p->symbol_rate > 10800000) | (p->symbol_rate < 1800000)) {
stv0367_writebits(state, F367CAB_ADJ_EN, 0);
@@ -2996,7 +2415,9 @@ enum stv0367_cab_signal_type stv0367cab_algo(struct stv0367_state *state,
usleep_range(5000, 7000);
LockTime += 5;
QAMFEC_Lock = stv0367_readbits(state,
- F367CAB_QAMFEC_LOCK);
+ (state->cab_state->qamfec_status_reg ?
+ state->cab_state->qamfec_status_reg :
+ F367CAB_QAMFEC_LOCK));
} while (!QAMFEC_Lock && (LockTime < FECTimeOut));
} else
QAMFEC_Lock = 0;
@@ -3007,17 +2428,17 @@ enum stv0367_cab_signal_type stv0367cab_algo(struct stv0367_state *state,
F367CAB_QUAD_INV);
#if 0
/* not clear for me */
- if (state->config->if_khz != 0) {
- if (state->config->if_khz > cab_state->adc_clk / 1000) {
+ if (ifkhz != 0) {
+ if (ifkhz > cab_state->adc_clk / 1000) {
cab_state->freq_khz =
FE_Cab_TunerGetFrequency(pIntParams->hTuner)
- stv0367cab_get_derot_freq(state, cab_state->adc_clk)
- - cab_state->adc_clk / 1000 + state->config->if_khz;
+ - cab_state->adc_clk / 1000 + ifkhz;
} else {
cab_state->freq_khz =
FE_Cab_TunerGetFrequency(pIntParams->hTuner)
- stv0367cab_get_derot_freq(state, cab_state->adc_clk)
- + state->config->if_khz;
+ + ifkhz;
}
} else {
cab_state->freq_khz =
@@ -3116,14 +2537,15 @@ static int stv0367cab_set_frontend(struct dvb_frontend *fe)
break;
}
- stv0367cab_init(fe);
+ if (state->reinit_on_setfrontend)
+ stv0367cab_init(fe);
/* Tuner Frequency Setting */
if (fe->ops.tuner_ops.set_params) {
- if (fe->ops.i2c_gate_ctrl)
+ if (state->use_i2c_gatectrl && fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
fe->ops.tuner_ops.set_params(fe);
- if (fe->ops.i2c_gate_ctrl)
+ if (state->use_i2c_gatectrl && fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
@@ -3147,11 +2569,13 @@ static int stv0367cab_get_frontend(struct dvb_frontend *fe,
{
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367cab_state *cab_state = state->cab_state;
+ u32 ifkhz = 0;
enum stv0367cab_mod QAMSize;
dprintk("%s:\n", __func__);
+ stv0367_get_if_khz(state, &ifkhz);
p->symbol_rate = stv0367cab_GetSymbolRate(state, cab_state->mclk);
QAMSize = stv0367_readbits(state, F367CAB_QAM_MODE);
@@ -3179,19 +2603,19 @@ static int stv0367cab_get_frontend(struct dvb_frontend *fe,
dprintk("%s: tuner frequency = %d\n", __func__, p->frequency);
- if (state->config->if_khz == 0) {
+ if (ifkhz == 0) {
p->frequency +=
(stv0367cab_get_derot_freq(state, cab_state->adc_clk) -
cab_state->adc_clk / 4000);
return 0;
}
- if (state->config->if_khz > cab_state->adc_clk / 1000)
- p->frequency += (state->config->if_khz
+ if (ifkhz > cab_state->adc_clk / 1000)
+ p->frequency += (ifkhz
- stv0367cab_get_derot_freq(state, cab_state->adc_clk)
- cab_state->adc_clk / 1000);
else
- p->frequency += (state->config->if_khz
+ p->frequency += (ifkhz
- stv0367cab_get_derot_freq(state, cab_state->adc_clk));
return 0;
@@ -3432,11 +2856,18 @@ struct dvb_frontend *stv0367cab_attach(const struct stv0367_config *config,
state->i2c = i2c;
state->config = config;
cab_state->search_range = 280000;
+ cab_state->qamfec_status_reg = F367CAB_QAMFEC_LOCK;
state->cab_state = cab_state;
state->fe.ops = stv0367cab_ops;
state->fe.demodulator_priv = state;
state->chip_id = stv0367_readreg(state, 0xf000);
+ /* demod operation options */
+ state->use_i2c_gatectrl = 1;
+ state->deftabs = STV0367_DEFTAB_GENERIC;
+ state->reinit_on_setfrontend = 1;
+ state->auto_if_khz = 0;
+
dprintk("%s: chip_id = 0x%x\n", __func__, state->chip_id);
/* check if the demod is there */
@@ -3452,6 +2883,327 @@ error:
}
EXPORT_SYMBOL(stv0367cab_attach);
+/*
+ * Functions for operation on Digital Devices hardware
+ */
+
+static void stv0367ddb_setup_ter(struct stv0367_state *state)
+{
+ stv0367_writereg(state, R367TER_DEBUG_LT4, 0x00);
+ stv0367_writereg(state, R367TER_DEBUG_LT5, 0x00);
+ stv0367_writereg(state, R367TER_DEBUG_LT6, 0x00); /* R367CAB_CTRL_1 */
+ stv0367_writereg(state, R367TER_DEBUG_LT7, 0x00); /* R367CAB_CTRL_2 */
+ stv0367_writereg(state, R367TER_DEBUG_LT8, 0x00);
+ stv0367_writereg(state, R367TER_DEBUG_LT9, 0x00);
+
+ /* Tuner Setup */
+ /* Buffer Q disabled, I Enabled, unsigned ADC */
+ stv0367_writereg(state, R367TER_ANADIGCTRL, 0x89);
+ stv0367_writereg(state, R367TER_DUAL_AD12, 0x04); /* ADCQ disabled */
+
+ /* Clock setup */
+ /* PLL bypassed and disabled */
+ stv0367_writereg(state, R367TER_ANACTRL, 0x0D);
+ stv0367_writereg(state, R367TER_TOPCTRL, 0x00); /* Set OFDM */
+
+ /* IC runs at 54 MHz with a 27 MHz crystal */
+ stv0367_pll_setup(state, STV0367_ICSPEED_53125, state->config->xtal);
+
+ msleep(50);
+ /* PLL enabled and used */
+ stv0367_writereg(state, R367TER_ANACTRL, 0x00);
+
+ state->activedemod = demod_ter;
+}
+
+static void stv0367ddb_setup_cab(struct stv0367_state *state)
+{
+ stv0367_writereg(state, R367TER_DEBUG_LT4, 0x00);
+ stv0367_writereg(state, R367TER_DEBUG_LT5, 0x01);
+ stv0367_writereg(state, R367TER_DEBUG_LT6, 0x06); /* R367CAB_CTRL_1 */
+ stv0367_writereg(state, R367TER_DEBUG_LT7, 0x03); /* R367CAB_CTRL_2 */
+ stv0367_writereg(state, R367TER_DEBUG_LT8, 0x00);
+ stv0367_writereg(state, R367TER_DEBUG_LT9, 0x00);
+
+ /* Tuner Setup */
+ /* Buffer Q disabled, I Enabled, signed ADC */
+ stv0367_writereg(state, R367TER_ANADIGCTRL, 0x8B);
+ /* ADCQ disabled */
+ stv0367_writereg(state, R367TER_DUAL_AD12, 0x04);
+
+ /* Clock setup */
+ /* PLL bypassed and disabled */
+ stv0367_writereg(state, R367TER_ANACTRL, 0x0D);
+ /* Set QAM */
+ stv0367_writereg(state, R367TER_TOPCTRL, 0x10);
+
+ /* IC runs at 58 MHz with a 27 MHz crystal */
+ stv0367_pll_setup(state, STV0367_ICSPEED_58000, state->config->xtal);
+
+ msleep(50);
+ /* PLL enabled and used */
+ stv0367_writereg(state, R367TER_ANACTRL, 0x00);
+
+ state->cab_state->mclk = stv0367cab_get_mclk(&state->fe,
+ state->config->xtal);
+ state->cab_state->adc_clk = stv0367cab_get_adc_freq(&state->fe,
+ state->config->xtal);
+
+ state->activedemod = demod_cab;
+}
+
+static int stv0367ddb_set_frontend(struct dvb_frontend *fe)
+{
+ struct stv0367_state *state = fe->demodulator_priv;
+
+ switch (fe->dtv_property_cache.delivery_system) {
+ case SYS_DVBT:
+ if (state->activedemod != demod_ter)
+ stv0367ddb_setup_ter(state);
+
+ return stv0367ter_set_frontend(fe);
+ case SYS_DVBC_ANNEX_A:
+ if (state->activedemod != demod_cab)
+ stv0367ddb_setup_cab(state);
+
+ /* protect against division error oopses */
+ if (fe->dtv_property_cache.symbol_rate == 0) {
+ printk(KERN_ERR "Invalid symbol rate\n");
+ return -EINVAL;
+ }
+
+ return stv0367cab_set_frontend(fe);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int stv0367ddb_read_status(struct dvb_frontend *fe,
+ enum fe_status *status)
+{
+ struct stv0367_state *state = fe->demodulator_priv;
+
+ switch (state->activedemod) {
+ case demod_ter:
+ return stv0367ter_read_status(fe, status);
+ case demod_cab:
+ return stv0367cab_read_status(fe, status);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int stv0367ddb_get_frontend(struct dvb_frontend *fe,
+ struct dtv_frontend_properties *p)
+{
+ struct stv0367_state *state = fe->demodulator_priv;
+
+ switch (state->activedemod) {
+ case demod_ter:
+ return stv0367ter_get_frontend(fe, p);
+ case demod_cab:
+ return stv0367cab_get_frontend(fe, p);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int stv0367ddb_sleep(struct dvb_frontend *fe)
+{
+ struct stv0367_state *state = fe->demodulator_priv;
+
+ switch (state->activedemod) {
+ case demod_ter:
+ state->activedemod = demod_none;
+ return stv0367ter_sleep(fe);
+ case demod_cab:
+ state->activedemod = demod_none;
+ return stv0367cab_sleep(fe);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int stv0367ddb_init(struct stv0367_state *state)
+{
+ struct stv0367ter_state *ter_state = state->ter_state;
+
+ stv0367_writereg(state, R367TER_TOPCTRL, 0x10);
+
+ if (stv0367_deftabs[state->deftabs][STV0367_TAB_BASE])
+ stv0367_write_table(state,
+ stv0367_deftabs[state->deftabs][STV0367_TAB_BASE]);
+
+ stv0367_write_table(state,
+ stv0367_deftabs[state->deftabs][STV0367_TAB_CAB]);
+
+ stv0367_writereg(state, R367TER_TOPCTRL, 0x00);
+ stv0367_write_table(state,
+ stv0367_deftabs[state->deftabs][STV0367_TAB_TER]);
+
+ stv0367_writereg(state, R367TER_GAIN_SRC1, 0x2A);
+ stv0367_writereg(state, R367TER_GAIN_SRC2, 0xD6);
+ stv0367_writereg(state, R367TER_INC_DEROT1, 0x55);
+ stv0367_writereg(state, R367TER_INC_DEROT2, 0x55);
+ stv0367_writereg(state, R367TER_TRL_CTL, 0x14);
+ stv0367_writereg(state, R367TER_TRL_NOMRATE1, 0xAE);
+ stv0367_writereg(state, R367TER_TRL_NOMRATE2, 0x56);
+ stv0367_writereg(state, R367TER_FEPATH_CFG, 0x0);
+
+ /* OFDM TS Setup */
+
+ stv0367_writereg(state, R367TER_TSCFGH, 0x70);
+ stv0367_writereg(state, R367TER_TSCFGM, 0xC0);
+ stv0367_writereg(state, R367TER_TSCFGL, 0x20);
+ stv0367_writereg(state, R367TER_TSSPEED, 0x40); /* Fixed at 54 MHz */
+
+ stv0367_writereg(state, R367TER_TSCFGH, 0x71);
+ stv0367_writereg(state, R367TER_TSCFGH, 0x70);
+
+ stv0367_writereg(state, R367TER_TOPCTRL, 0x10);
+
+ /* Also needed for QAM */
+ stv0367_writereg(state, R367TER_AGC12C, 0x01); /* AGC Pin setup */
+
+ stv0367_writereg(state, R367TER_AGCCTRL1, 0x8A);
+
+ /* QAM TS setup, note exact format also depends on descrambler */
+ /* settings */
+ /* Inverted Clock, Swap, serial */
+ stv0367_writereg(state, R367CAB_OUTFORMAT_0, 0x85);
+
+ /* Clock setup (PLL bypassed and disabled) */
+ stv0367_writereg(state, R367TER_ANACTRL, 0x0D);
+
+ /* IC runs at 58 MHz with a 27 MHz crystal */
+ stv0367_pll_setup(state, STV0367_ICSPEED_58000, state->config->xtal);
+
+ /* Tuner setup */
+ /* Buffer Q disabled, I Enabled, signed ADC */
+ stv0367_writereg(state, R367TER_ANADIGCTRL, 0x8b);
+ stv0367_writereg(state, R367TER_DUAL_AD12, 0x04); /* ADCQ disabled */
+
+ /* Improves the C/N lock limit */
+ stv0367_writereg(state, R367CAB_FSM_SNR2_HTH, 0x23);
+ /* ZIF/IF Automatic mode */
+ stv0367_writereg(state, R367CAB_IQ_QAM, 0x01);
+ /* Improving burst noise performances */
+ stv0367_writereg(state, R367CAB_EQU_FFE_LEAKAGE, 0x83);
+ /* Improving ACI performances */
+ stv0367_writereg(state, R367CAB_IQDEM_ADJ_EN, 0x05);
+
+ /* PLL enabled and used */
+ stv0367_writereg(state, R367TER_ANACTRL, 0x00);
+
+ stv0367_writereg(state, R367TER_I2CRPT, (0x08 | ((5 & 0x07) << 4)));
+
+ ter_state->pBER = 0;
+ ter_state->first_lock = 0;
+ ter_state->unlock_counter = 2;
+
+ return 0;
+}
+
+static const struct dvb_frontend_ops stv0367ddb_ops = {
+ .delsys = { SYS_DVBC_ANNEX_A, SYS_DVBT },
+ .info = {
+ .name = "ST STV0367 DDB DVB-C/T",
+ .frequency_min = 47000000,
+ .frequency_max = 865000000,
+ .frequency_stepsize = 166667,
+ .frequency_tolerance = 0,
+ .symbol_rate_min = 870000,
+ .symbol_rate_max = 11700000,
+ .caps = /* DVB-C */
+ 0x400 |/* FE_CAN_QAM_4 */
+ FE_CAN_QAM_16 | FE_CAN_QAM_32 |
+ FE_CAN_QAM_64 | FE_CAN_QAM_128 |
+ FE_CAN_QAM_256 | FE_CAN_FEC_AUTO |
+ /* DVB-T */
+ FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
+ FE_CAN_FEC_AUTO |
+ FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 |
+ FE_CAN_QAM_128 | FE_CAN_QAM_256 | FE_CAN_QAM_AUTO |
+ FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_RECOVER |
+ FE_CAN_INVERSION_AUTO |
+ FE_CAN_MUTE_TS
+ },
+ .release = stv0367_release,
+ .sleep = stv0367ddb_sleep,
+ .i2c_gate_ctrl = stv0367cab_gate_ctrl, /* valid for TER and CAB */
+ .set_frontend = stv0367ddb_set_frontend,
+ .get_frontend = stv0367ddb_get_frontend,
+ .get_tune_settings = stv0367_get_tune_settings,
+ .read_status = stv0367ddb_read_status,
+};
+
+struct dvb_frontend *stv0367ddb_attach(const struct stv0367_config *config,
+ struct i2c_adapter *i2c)
+{
+ struct stv0367_state *state = NULL;
+ struct stv0367ter_state *ter_state = NULL;
+ struct stv0367cab_state *cab_state = NULL;
+
+ /* allocate memory for the internal state */
+ state = kzalloc(sizeof(struct stv0367_state), GFP_KERNEL);
+ if (state == NULL)
+ goto error;
+ ter_state = kzalloc(sizeof(struct stv0367ter_state), GFP_KERNEL);
+ if (ter_state == NULL)
+ goto error;
+ cab_state = kzalloc(sizeof(struct stv0367cab_state), GFP_KERNEL);
+ if (cab_state == NULL)
+ goto error;
+
+ /* setup the state */
+ state->i2c = i2c;
+ state->config = config;
+ state->ter_state = ter_state;
+ cab_state->search_range = 280000;
+ cab_state->qamfec_status_reg = F367CAB_DESCR_SYNCSTATE;
+ state->cab_state = cab_state;
+ state->fe.ops = stv0367ddb_ops;
+ state->fe.demodulator_priv = state;
+ state->chip_id = stv0367_readreg(state, R367TER_ID);
+
+ /* demod operation options */
+ state->use_i2c_gatectrl = 0;
+ state->deftabs = STV0367_DEFTAB_DDB;
+ state->reinit_on_setfrontend = 0;
+ state->auto_if_khz = 1;
+ state->activedemod = demod_none;
+
+ dprintk("%s: chip_id = 0x%x\n", __func__, state->chip_id);
+
+ /* check if the demod is there */
+ if ((state->chip_id != 0x50) && (state->chip_id != 0x60))
+ goto error;
+
+ dev_info(&i2c->dev, "Found %s with ChipID %02X at adr %02X\n",
+ state->fe.ops.info.name, state->chip_id,
+ config->demod_address);
+
+ stv0367ddb_init(state);
+
+ return &state->fe;
+
+error:
+ kfree(cab_state);
+ kfree(ter_state);
+ kfree(state);
+ return NULL;
+}
+EXPORT_SYMBOL(stv0367ddb_attach);
+
MODULE_PARM_DESC(debug, "Set debug");
MODULE_PARM_DESC(i2c_debug, "Set i2c debug");
diff --git a/drivers/media/dvb-frontends/stv0367.h b/drivers/media/dvb-frontends/stv0367.h
index 26c38a0503c8..8f7a31481744 100644
--- a/drivers/media/dvb-frontends/stv0367.h
+++ b/drivers/media/dvb-frontends/stv0367.h
@@ -25,6 +25,9 @@
#include <linux/dvb/frontend.h>
#include "dvb_frontend.h"
+#define STV0367_ICSPEED_53125 53125000
+#define STV0367_ICSPEED_58000 58000000
+
struct stv0367_config {
u8 demod_address;
u32 xtal;
@@ -41,6 +44,9 @@ dvb_frontend *stv0367ter_attach(const struct stv0367_config *config,
extern struct
dvb_frontend *stv0367cab_attach(const struct stv0367_config *config,
struct i2c_adapter *i2c);
+extern struct
+dvb_frontend *stv0367ddb_attach(const struct stv0367_config *config,
+ struct i2c_adapter *i2c);
#else
static inline struct
dvb_frontend *stv0367ter_attach(const struct stv0367_config *config,
@@ -56,6 +62,13 @@ dvb_frontend *stv0367cab_attach(const struct stv0367_config *config,
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return NULL;
}
+static inline struct
+dvb_frontend *stv0367ddb_attach(const struct stv0367_config *config,
+ struct i2c_adapter *i2c)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
#endif
#endif
diff --git a/drivers/media/dvb-frontends/stv0367_defs.h b/drivers/media/dvb-frontends/stv0367_defs.h
new file mode 100644
index 000000000000..277d2971ed3f
--- /dev/null
+++ b/drivers/media/dvb-frontends/stv0367_defs.h
@@ -0,0 +1,1301 @@
+/*
+ * stv0367_defs.h
+ *
+ * Driver for ST STV0367 DVB-T & DVB-C demodulator IC.
+ *
+ * Copyright (C) ST Microelectronics.
+ * Copyright (C) 2010,2011 NetUP Inc.
+ * Copyright (C) 2010,2011 Igor M. Liplianin <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *
+ * GNU General Public License for more details.
+ */
+
+#ifndef STV0367_DEFS_H
+#define STV0367_DEFS_H
+
+#include "stv0367_regs.h"
+
+#define STV0367_DEFTAB_GENERIC 0
+#define STV0367_DEFTAB_DDB 1
+#define STV0367_DEFTAB_MAX 2
+
+#define STV0367_TAB_TER 0
+#define STV0367_TAB_CAB 1
+#define STV0367_TAB_BASE 2
+#define STV0367_TAB_MAX 3
+
+struct st_register {
+ u16 addr;
+ u8 value;
+};
+
+/* values for STV4100 XTAL=30M int clk=53.125M*/
+static const struct st_register def0367ter[] = {
+ {R367TER_ID, 0x60},
+ {R367TER_I2CRPT, 0xa0},
+ /* {R367TER_I2CRPT, 0x22},*/
+ {R367TER_TOPCTRL, 0x00},/* for xc5000; was 0x02 */
+ {R367TER_IOCFG0, 0x40},
+ {R367TER_DAC0R, 0x00},
+ {R367TER_IOCFG1, 0x00},
+ {R367TER_DAC1R, 0x00},
+ {R367TER_IOCFG2, 0x62},
+ {R367TER_SDFR, 0x00},
+ {R367TER_STATUS, 0xf8},
+ {R367TER_AUX_CLK, 0x0a},
+ {R367TER_FREESYS1, 0x00},
+ {R367TER_FREESYS2, 0x00},
+ {R367TER_FREESYS3, 0x00},
+ {R367TER_GPIO_CFG, 0x55},
+ {R367TER_GPIO_CMD, 0x00},
+ {R367TER_AGC2MAX, 0xff},
+ {R367TER_AGC2MIN, 0x00},
+ {R367TER_AGC1MAX, 0xff},
+ {R367TER_AGC1MIN, 0x00},
+ {R367TER_AGCR, 0xbc},
+ {R367TER_AGC2TH, 0x00},
+ {R367TER_AGC12C, 0x00},
+ {R367TER_AGCCTRL1, 0x85},
+ {R367TER_AGCCTRL2, 0x1f},
+ {R367TER_AGC1VAL1, 0x00},
+ {R367TER_AGC1VAL2, 0x00},
+ {R367TER_AGC2VAL1, 0x6f},
+ {R367TER_AGC2VAL2, 0x05},
+ {R367TER_AGC2PGA, 0x00},
+ {R367TER_OVF_RATE1, 0x00},
+ {R367TER_OVF_RATE2, 0x00},
+ {R367TER_GAIN_SRC1, 0xaa},/* for xc5000; was 0x2b */
+ {R367TER_GAIN_SRC2, 0xd6},/* for xc5000; was 0x04 */
+ {R367TER_INC_DEROT1, 0x55},
+ {R367TER_INC_DEROT2, 0x55},
+ {R367TER_PPM_CPAMP_DIR, 0x2c},
+ {R367TER_PPM_CPAMP_INV, 0x00},
+ {R367TER_FREESTFE_1, 0x00},
+ {R367TER_FREESTFE_2, 0x1c},
+ {R367TER_DCOFFSET, 0x00},
+ {R367TER_EN_PROCESS, 0x05},
+ {R367TER_SDI_SMOOTHER, 0x80},
+ {R367TER_FE_LOOP_OPEN, 0x1c},
+ {R367TER_FREQOFF1, 0x00},
+ {R367TER_FREQOFF2, 0x00},
+ {R367TER_FREQOFF3, 0x00},
+ {R367TER_TIMOFF1, 0x00},
+ {R367TER_TIMOFF2, 0x00},
+ {R367TER_EPQ, 0x02},
+ {R367TER_EPQAUTO, 0x01},
+ {R367TER_SYR_UPDATE, 0xf5},
+ {R367TER_CHPFREE, 0x00},
+ {R367TER_PPM_STATE_MAC, 0x23},
+ {R367TER_INR_THRESHOLD, 0xff},
+ {R367TER_EPQ_TPS_ID_CELL, 0xf9},
+ {R367TER_EPQ_CFG, 0x00},
+ {R367TER_EPQ_STATUS, 0x01},
+ {R367TER_AUTORELOCK, 0x81},
+ {R367TER_BER_THR_VMSB, 0x00},
+ {R367TER_BER_THR_MSB, 0x00},
+ {R367TER_BER_THR_LSB, 0x00},
+ {R367TER_CCD, 0x83},
+ {R367TER_SPECTR_CFG, 0x00},
+ {R367TER_CHC_DUMMY, 0x18},
+ {R367TER_INC_CTL, 0x88},
+ {R367TER_INCTHRES_COR1, 0xb4},
+ {R367TER_INCTHRES_COR2, 0x96},
+ {R367TER_INCTHRES_DET1, 0x0e},
+ {R367TER_INCTHRES_DET2, 0x11},
+ {R367TER_IIR_CELLNB, 0x8d},
+ {R367TER_IIRCX_COEFF1_MSB, 0x00},
+ {R367TER_IIRCX_COEFF1_LSB, 0x00},
+ {R367TER_IIRCX_COEFF2_MSB, 0x09},
+ {R367TER_IIRCX_COEFF2_LSB, 0x18},
+ {R367TER_IIRCX_COEFF3_MSB, 0x14},
+ {R367TER_IIRCX_COEFF3_LSB, 0x9c},
+ {R367TER_IIRCX_COEFF4_MSB, 0x00},
+ {R367TER_IIRCX_COEFF4_LSB, 0x00},
+ {R367TER_IIRCX_COEFF5_MSB, 0x36},
+ {R367TER_IIRCX_COEFF5_LSB, 0x42},
+ {R367TER_FEPATH_CFG, 0x00},
+ {R367TER_PMC1_FUNC, 0x65},
+ {R367TER_PMC1_FOR, 0x00},
+ {R367TER_PMC2_FUNC, 0x00},
+ {R367TER_STATUS_ERR_DA, 0xe0},
+ {R367TER_DIG_AGC_R, 0xfe},
+ {R367TER_COMAGC_TARMSB, 0x0b},
+ {R367TER_COM_AGC_TAR_ENMODE, 0x41},
+ {R367TER_COM_AGC_CFG, 0x3e},
+ {R367TER_COM_AGC_GAIN1, 0x39},
+ {R367TER_AUT_AGC_TARGETMSB, 0x0b},
+ {R367TER_LOCK_DET_MSB, 0x01},
+ {R367TER_AGCTAR_LOCK_LSBS, 0x40},
+ {R367TER_AUT_GAIN_EN, 0xf4},
+ {R367TER_AUT_CFG, 0xf0},
+ {R367TER_LOCKN, 0x23},
+ {R367TER_INT_X_3, 0x00},
+ {R367TER_INT_X_2, 0x03},
+ {R367TER_INT_X_1, 0x8d},
+ {R367TER_INT_X_0, 0xa0},
+ {R367TER_MIN_ERRX_MSB, 0x00},
+ {R367TER_COR_CTL, 0x23},
+ {R367TER_COR_STAT, 0xf6},
+ {R367TER_COR_INTEN, 0x00},
+ {R367TER_COR_INTSTAT, 0x3f},
+ {R367TER_COR_MODEGUARD, 0x03},
+ {R367TER_AGC_CTL, 0x08},
+ {R367TER_AGC_MANUAL1, 0x00},
+ {R367TER_AGC_MANUAL2, 0x00},
+ {R367TER_AGC_TARG, 0x16},
+ {R367TER_AGC_GAIN1, 0x53},
+ {R367TER_AGC_GAIN2, 0x1d},
+ {R367TER_RESERVED_1, 0x00},
+ {R367TER_RESERVED_2, 0x00},
+ {R367TER_RESERVED_3, 0x00},
+ {R367TER_CAS_CTL, 0x44},
+ {R367TER_CAS_FREQ, 0xb3},
+ {R367TER_CAS_DAGCGAIN, 0x12},
+ {R367TER_SYR_CTL, 0x04},
+ {R367TER_SYR_STAT, 0x10},
+ {R367TER_SYR_NCO1, 0x00},
+ {R367TER_SYR_NCO2, 0x00},
+ {R367TER_SYR_OFFSET1, 0x00},
+ {R367TER_SYR_OFFSET2, 0x00},
+ {R367TER_FFT_CTL, 0x00},
+ {R367TER_SCR_CTL, 0x70},
+ {R367TER_PPM_CTL1, 0xf8},
+ {R367TER_TRL_CTL, 0x14},/* for xc5000; was 0xac */
+ {R367TER_TRL_NOMRATE1, 0xae},/* for xc5000; was 0x1e */
+ {R367TER_TRL_NOMRATE2, 0x56},/* for xc5000; was 0x58 */
+ {R367TER_TRL_TIME1, 0x1d},
+ {R367TER_TRL_TIME2, 0xfc},
+ {R367TER_CRL_CTL, 0x24},
+ {R367TER_CRL_FREQ1, 0xad},
+ {R367TER_CRL_FREQ2, 0x9d},
+ {R367TER_CRL_FREQ3, 0xff},
+ {R367TER_CHC_CTL, 0x01},
+ {R367TER_CHC_SNR, 0xf0},
+ {R367TER_BDI_CTL, 0x00},
+ {R367TER_DMP_CTL, 0x00},
+ {R367TER_TPS_RCVD1, 0x30},
+ {R367TER_TPS_RCVD2, 0x02},
+ {R367TER_TPS_RCVD3, 0x01},
+ {R367TER_TPS_RCVD4, 0x00},
+ {R367TER_TPS_ID_CELL1, 0x00},
+ {R367TER_TPS_ID_CELL2, 0x00},
+ {R367TER_TPS_RCVD5_SET1, 0x02},
+ {R367TER_TPS_SET2, 0x02},
+ {R367TER_TPS_SET3, 0x01},
+ {R367TER_TPS_CTL, 0x00},
+ {R367TER_CTL_FFTOSNUM, 0x34},
+ {R367TER_TESTSELECT, 0x09},
+ {R367TER_MSC_REV, 0x0a},
+ {R367TER_PIR_CTL, 0x00},
+ {R367TER_SNR_CARRIER1, 0xa1},
+ {R367TER_SNR_CARRIER2, 0x9a},
+ {R367TER_PPM_CPAMP, 0x2c},
+ {R367TER_TSM_AP0, 0x00},
+ {R367TER_TSM_AP1, 0x00},
+ {R367TER_TSM_AP2, 0x00},
+ {R367TER_TSM_AP3, 0x00},
+ {R367TER_TSM_AP4, 0x00},
+ {R367TER_TSM_AP5, 0x00},
+ {R367TER_TSM_AP6, 0x00},
+ {R367TER_TSM_AP7, 0x00},
+ {R367TER_TSTRES, 0x00},
+ {R367TER_ANACTRL, 0x0D},/* PLL stopped, restart at init!!! */
+ {R367TER_TSTBUS, 0x00},
+ {R367TER_TSTRATE, 0x00},
+ {R367TER_CONSTMODE, 0x01},
+ {R367TER_CONSTCARR1, 0x00},
+ {R367TER_CONSTCARR2, 0x00},
+ {R367TER_ICONSTEL, 0x0a},
+ {R367TER_QCONSTEL, 0x15},
+ {R367TER_TSTBISTRES0, 0x00},
+ {R367TER_TSTBISTRES1, 0x00},
+ {R367TER_TSTBISTRES2, 0x28},
+ {R367TER_TSTBISTRES3, 0x00},
+ {R367TER_RF_AGC1, 0xff},
+ {R367TER_RF_AGC2, 0x83},
+ {R367TER_ANADIGCTRL, 0x19},
+ {R367TER_PLLMDIV, 0x01},/* for xc5000; was 0x0c */
+ {R367TER_PLLNDIV, 0x06},/* for xc5000; was 0x55 */
+ {R367TER_PLLSETUP, 0x18},
+ {R367TER_DUAL_AD12, 0x0C},/* for xc5000 AGC voltage 1.6V */
+ {R367TER_TSTBIST, 0x00},
+ {R367TER_PAD_COMP_CTRL, 0x00},
+ {R367TER_PAD_COMP_WR, 0x00},
+ {R367TER_PAD_COMP_RD, 0xe0},
+ {R367TER_SYR_TARGET_FFTADJT_MSB, 0x00},
+ {R367TER_SYR_TARGET_FFTADJT_LSB, 0x00},
+ {R367TER_SYR_TARGET_CHCADJT_MSB, 0x00},
+ {R367TER_SYR_TARGET_CHCADJT_LSB, 0x00},
+ {R367TER_SYR_FLAG, 0x00},
+ {R367TER_CRL_TARGET1, 0x00},
+ {R367TER_CRL_TARGET2, 0x00},
+ {R367TER_CRL_TARGET3, 0x00},
+ {R367TER_CRL_TARGET4, 0x00},
+ {R367TER_CRL_FLAG, 0x00},
+ {R367TER_TRL_TARGET1, 0x00},
+ {R367TER_TRL_TARGET2, 0x00},
+ {R367TER_TRL_CHC, 0x00},
+ {R367TER_CHC_SNR_TARG, 0x00},
+ {R367TER_TOP_TRACK, 0x00},
+ {R367TER_TRACKER_FREE1, 0x00},
+ {R367TER_ERROR_CRL1, 0x00},
+ {R367TER_ERROR_CRL2, 0x00},
+ {R367TER_ERROR_CRL3, 0x00},
+ {R367TER_ERROR_CRL4, 0x00},
+ {R367TER_DEC_NCO1, 0x2c},
+ {R367TER_DEC_NCO2, 0x0f},
+ {R367TER_DEC_NCO3, 0x20},
+ {R367TER_SNR, 0xf1},
+ {R367TER_SYR_FFTADJ1, 0x00},
+ {R367TER_SYR_FFTADJ2, 0x00},
+ {R367TER_SYR_CHCADJ1, 0x00},
+ {R367TER_SYR_CHCADJ2, 0x00},
+ {R367TER_SYR_OFF, 0x00},
+ {R367TER_PPM_OFFSET1, 0x00},
+ {R367TER_PPM_OFFSET2, 0x03},
+ {R367TER_TRACKER_FREE2, 0x00},
+ {R367TER_DEBG_LT10, 0x00},
+ {R367TER_DEBG_LT11, 0x00},
+ {R367TER_DEBG_LT12, 0x00},
+ {R367TER_DEBG_LT13, 0x00},
+ {R367TER_DEBG_LT14, 0x00},
+ {R367TER_DEBG_LT15, 0x00},
+ {R367TER_DEBG_LT16, 0x00},
+ {R367TER_DEBG_LT17, 0x00},
+ {R367TER_DEBG_LT18, 0x00},
+ {R367TER_DEBG_LT19, 0x00},
+ {R367TER_DEBG_LT1A, 0x00},
+ {R367TER_DEBG_LT1B, 0x00},
+ {R367TER_DEBG_LT1C, 0x00},
+ {R367TER_DEBG_LT1D, 0x00},
+ {R367TER_DEBG_LT1E, 0x00},
+ {R367TER_DEBG_LT1F, 0x00},
+ {R367TER_RCCFGH, 0x00},
+ {R367TER_RCCFGM, 0x00},
+ {R367TER_RCCFGL, 0x00},
+ {R367TER_RCINSDELH, 0x00},
+ {R367TER_RCINSDELM, 0x00},
+ {R367TER_RCINSDELL, 0x00},
+ {R367TER_RCSTATUS, 0x00},
+ {R367TER_RCSPEED, 0x6f},
+ {R367TER_RCDEBUGM, 0xe7},
+ {R367TER_RCDEBUGL, 0x9b},
+ {R367TER_RCOBSCFG, 0x00},
+ {R367TER_RCOBSM, 0x00},
+ {R367TER_RCOBSL, 0x00},
+ {R367TER_RCFECSPY, 0x00},
+ {R367TER_RCFSPYCFG, 0x00},
+ {R367TER_RCFSPYDATA, 0x00},
+ {R367TER_RCFSPYOUT, 0x00},
+ {R367TER_RCFSTATUS, 0x00},
+ {R367TER_RCFGOODPACK, 0x00},
+ {R367TER_RCFPACKCNT, 0x00},
+ {R367TER_RCFSPYMISC, 0x00},
+ {R367TER_RCFBERCPT4, 0x00},
+ {R367TER_RCFBERCPT3, 0x00},
+ {R367TER_RCFBERCPT2, 0x00},
+ {R367TER_RCFBERCPT1, 0x00},
+ {R367TER_RCFBERCPT0, 0x00},
+ {R367TER_RCFBERERR2, 0x00},
+ {R367TER_RCFBERERR1, 0x00},
+ {R367TER_RCFBERERR0, 0x00},
+ {R367TER_RCFSTATESM, 0x00},
+ {R367TER_RCFSTATESL, 0x00},
+ {R367TER_RCFSPYBER, 0x00},
+ {R367TER_RCFSPYDISTM, 0x00},
+ {R367TER_RCFSPYDISTL, 0x00},
+ {R367TER_RCFSPYOBS7, 0x00},
+ {R367TER_RCFSPYOBS6, 0x00},
+ {R367TER_RCFSPYOBS5, 0x00},
+ {R367TER_RCFSPYOBS4, 0x00},
+ {R367TER_RCFSPYOBS3, 0x00},
+ {R367TER_RCFSPYOBS2, 0x00},
+ {R367TER_RCFSPYOBS1, 0x00},
+ {R367TER_RCFSPYOBS0, 0x00},
+ {R367TER_TSGENERAL, 0x00},
+ {R367TER_RC1SPEED, 0x6f},
+ {R367TER_TSGSTATUS, 0x18},
+ {R367TER_FECM, 0x01},
+ {R367TER_VTH12, 0xff},
+ {R367TER_VTH23, 0xa1},
+ {R367TER_VTH34, 0x64},
+ {R367TER_VTH56, 0x40},
+ {R367TER_VTH67, 0x00},
+ {R367TER_VTH78, 0x2c},
+ {R367TER_VITCURPUN, 0x12},
+ {R367TER_VERROR, 0x01},
+ {R367TER_PRVIT, 0x3f},
+ {R367TER_VAVSRVIT, 0x00},
+ {R367TER_VSTATUSVIT, 0xbd},
+ {R367TER_VTHINUSE, 0xa1},
+ {R367TER_KDIV12, 0x20},
+ {R367TER_KDIV23, 0x40},
+ {R367TER_KDIV34, 0x20},
+ {R367TER_KDIV56, 0x30},
+ {R367TER_KDIV67, 0x00},
+ {R367TER_KDIV78, 0x30},
+ {R367TER_SIGPOWER, 0x54},
+ {R367TER_DEMAPVIT, 0x40},
+ {R367TER_VITSCALE, 0x00},
+ {R367TER_FFEC1PRG, 0x00},
+ {R367TER_FVITCURPUN, 0x12},
+ {R367TER_FVERROR, 0x01},
+ {R367TER_FVSTATUSVIT, 0xbd},
+ {R367TER_DEBUG_LT1, 0x00},
+ {R367TER_DEBUG_LT2, 0x00},
+ {R367TER_DEBUG_LT3, 0x00},
+ {R367TER_TSTSFMET, 0x00},
+ {R367TER_SELOUT, 0x00},
+ {R367TER_TSYNC, 0x00},
+ {R367TER_TSTERR, 0x00},
+ {R367TER_TSFSYNC, 0x00},
+ {R367TER_TSTSFERR, 0x00},
+ {R367TER_TSTTSSF1, 0x01},
+ {R367TER_TSTTSSF2, 0x1f},
+ {R367TER_TSTTSSF3, 0x00},
+ {R367TER_TSTTS1, 0x00},
+ {R367TER_TSTTS2, 0x1f},
+ {R367TER_TSTTS3, 0x01},
+ {R367TER_TSTTS4, 0x00},
+ {R367TER_TSTTSRC, 0x00},
+ {R367TER_TSTTSRS, 0x00},
+ {R367TER_TSSTATEM, 0xb0},
+ {R367TER_TSSTATEL, 0x40},
+ {R367TER_TSCFGH, 0xC0},
+ {R367TER_TSCFGM, 0xc0},/* for xc5000; was 0x00 */
+ {R367TER_TSCFGL, 0x20},
+ {R367TER_TSSYNC, 0x00},
+ {R367TER_TSINSDELH, 0x00},
+ {R367TER_TSINSDELM, 0x00},
+ {R367TER_TSINSDELL, 0x00},
+ {R367TER_TSDIVN, 0x03},
+ {R367TER_TSDIVPM, 0x00},
+ {R367TER_TSDIVPL, 0x00},
+ {R367TER_TSDIVQM, 0x00},
+ {R367TER_TSDIVQL, 0x00},
+ {R367TER_TSDILSTKM, 0x00},
+ {R367TER_TSDILSTKL, 0x00},
+ {R367TER_TSSPEED, 0x40},/* for xc5000; was 0x6f */
+ {R367TER_TSSTATUS, 0x81},
+ {R367TER_TSSTATUS2, 0x6a},
+ {R367TER_TSBITRATEM, 0x0f},
+ {R367TER_TSBITRATEL, 0xc6},
+ {R367TER_TSPACKLENM, 0x00},
+ {R367TER_TSPACKLENL, 0xfc},
+ {R367TER_TSBLOCLENM, 0x0a},
+ {R367TER_TSBLOCLENL, 0x80},
+ {R367TER_TSDLYH, 0x90},
+ {R367TER_TSDLYM, 0x68},
+ {R367TER_TSDLYL, 0x01},
+ {R367TER_TSNPDAV, 0x00},
+ {R367TER_TSBUFSTATH, 0x00},
+ {R367TER_TSBUFSTATM, 0x00},
+ {R367TER_TSBUFSTATL, 0x00},
+ {R367TER_TSDEBUGM, 0xcf},
+ {R367TER_TSDEBUGL, 0x1e},
+ {R367TER_TSDLYSETH, 0x00},
+ {R367TER_TSDLYSETM, 0x68},
+ {R367TER_TSDLYSETL, 0x00},
+ {R367TER_TSOBSCFG, 0x00},
+ {R367TER_TSOBSM, 0x47},
+ {R367TER_TSOBSL, 0x1f},
+ {R367TER_ERRCTRL1, 0x95},
+ {R367TER_ERRCNT1H, 0x80},
+ {R367TER_ERRCNT1M, 0x00},
+ {R367TER_ERRCNT1L, 0x00},
+ {R367TER_ERRCTRL2, 0x95},
+ {R367TER_ERRCNT2H, 0x00},
+ {R367TER_ERRCNT2M, 0x00},
+ {R367TER_ERRCNT2L, 0x00},
+ {R367TER_FECSPY, 0x88},
+ {R367TER_FSPYCFG, 0x2c},
+ {R367TER_FSPYDATA, 0x3a},
+ {R367TER_FSPYOUT, 0x06},
+ {R367TER_FSTATUS, 0x61},
+ {R367TER_FGOODPACK, 0xff},
+ {R367TER_FPACKCNT, 0xff},
+ {R367TER_FSPYMISC, 0x66},
+ {R367TER_FBERCPT4, 0x00},
+ {R367TER_FBERCPT3, 0x00},
+ {R367TER_FBERCPT2, 0x36},
+ {R367TER_FBERCPT1, 0x36},
+ {R367TER_FBERCPT0, 0x14},
+ {R367TER_FBERERR2, 0x00},
+ {R367TER_FBERERR1, 0x03},
+ {R367TER_FBERERR0, 0x28},
+ {R367TER_FSTATESM, 0x00},
+ {R367TER_FSTATESL, 0x02},
+ {R367TER_FSPYBER, 0x00},
+ {R367TER_FSPYDISTM, 0x01},
+ {R367TER_FSPYDISTL, 0x9f},
+ {R367TER_FSPYOBS7, 0xc9},
+ {R367TER_FSPYOBS6, 0x99},
+ {R367TER_FSPYOBS5, 0x08},
+ {R367TER_FSPYOBS4, 0xec},
+ {R367TER_FSPYOBS3, 0x01},
+ {R367TER_FSPYOBS2, 0x0f},
+ {R367TER_FSPYOBS1, 0xf5},
+ {R367TER_FSPYOBS0, 0x08},
+ {R367TER_SFDEMAP, 0x40},
+ {R367TER_SFERROR, 0x00},
+ {R367TER_SFAVSR, 0x30},
+ {R367TER_SFECSTATUS, 0xcc},
+ {R367TER_SFKDIV12, 0x20},
+ {R367TER_SFKDIV23, 0x40},
+ {R367TER_SFKDIV34, 0x20},
+ {R367TER_SFKDIV56, 0x20},
+ {R367TER_SFKDIV67, 0x00},
+ {R367TER_SFKDIV78, 0x20},
+ {R367TER_SFDILSTKM, 0x00},
+ {R367TER_SFDILSTKL, 0x00},
+ {R367TER_SFSTATUS, 0xb5},
+ {R367TER_SFDLYH, 0x90},
+ {R367TER_SFDLYM, 0x60},
+ {R367TER_SFDLYL, 0x01},
+ {R367TER_SFDLYSETH, 0xc0},
+ {R367TER_SFDLYSETM, 0x60},
+ {R367TER_SFDLYSETL, 0x00},
+ {R367TER_SFOBSCFG, 0x00},
+ {R367TER_SFOBSM, 0x47},
+ {R367TER_SFOBSL, 0x05},
+ {R367TER_SFECINFO, 0x40},
+ {R367TER_SFERRCTRL, 0x74},
+ {R367TER_SFERRCNTH, 0x80},
+ {R367TER_SFERRCNTM, 0x00},
+ {R367TER_SFERRCNTL, 0x00},
+ {R367TER_SYMBRATEM, 0x2f},
+ {R367TER_SYMBRATEL, 0x50},
+ {R367TER_SYMBSTATUS, 0x7f},
+ {R367TER_SYMBCFG, 0x00},
+ {R367TER_SYMBFIFOM, 0xf4},
+ {R367TER_SYMBFIFOL, 0x0d},
+ {R367TER_SYMBOFFSM, 0xf0},
+ {R367TER_SYMBOFFSL, 0x2d},
+ {R367TER_DEBUG_LT4, 0x00},
+ {R367TER_DEBUG_LT5, 0x00},
+ {R367TER_DEBUG_LT6, 0x00},
+ {R367TER_DEBUG_LT7, 0x00},
+ {R367TER_DEBUG_LT8, 0x00},
+ {R367TER_DEBUG_LT9, 0x00},
+ {0x0000, 0x00},
+};
+
+static const struct st_register def0367cab[] = {
+ {R367CAB_ID, 0x60},
+ {R367CAB_I2CRPT, 0xa0},
+ /*{R367CAB_I2CRPT, 0x22},*/
+ {R367CAB_TOPCTRL, 0x10},
+ {R367CAB_IOCFG0, 0x80},
+ {R367CAB_DAC0R, 0x00},
+ {R367CAB_IOCFG1, 0x00},
+ {R367CAB_DAC1R, 0x00},
+ {R367CAB_IOCFG2, 0x00},
+ {R367CAB_SDFR, 0x00},
+ {R367CAB_AUX_CLK, 0x00},
+ {R367CAB_FREESYS1, 0x00},
+ {R367CAB_FREESYS2, 0x00},
+ {R367CAB_FREESYS3, 0x00},
+ {R367CAB_GPIO_CFG, 0x55},
+ {R367CAB_GPIO_CMD, 0x01},
+ {R367CAB_TSTRES, 0x00},
+ {R367CAB_ANACTRL, 0x0d},/* was 0x00 need to check - I.M.L.*/
+ {R367CAB_TSTBUS, 0x00},
+ {R367CAB_RF_AGC1, 0xea},
+ {R367CAB_RF_AGC2, 0x82},
+ {R367CAB_ANADIGCTRL, 0x0b},
+ {R367CAB_PLLMDIV, 0x01},
+ {R367CAB_PLLNDIV, 0x08},
+ {R367CAB_PLLSETUP, 0x18},
+ {R367CAB_DUAL_AD12, 0x0C}, /* for xc5000 AGC voltage 1.6V */
+ {R367CAB_TSTBIST, 0x00},
+ {R367CAB_CTRL_1, 0x00},
+ {R367CAB_CTRL_2, 0x03},
+ {R367CAB_IT_STATUS1, 0x2b},
+ {R367CAB_IT_STATUS2, 0x08},
+ {R367CAB_IT_EN1, 0x00},
+ {R367CAB_IT_EN2, 0x00},
+ {R367CAB_CTRL_STATUS, 0x04},
+ {R367CAB_TEST_CTL, 0x00},
+ {R367CAB_AGC_CTL, 0x73},
+ {R367CAB_AGC_IF_CFG, 0x50},
+ {R367CAB_AGC_RF_CFG, 0x00},
+ {R367CAB_AGC_PWM_CFG, 0x03},
+ {R367CAB_AGC_PWR_REF_L, 0x5a},
+ {R367CAB_AGC_PWR_REF_H, 0x00},
+ {R367CAB_AGC_RF_TH_L, 0xff},
+ {R367CAB_AGC_RF_TH_H, 0x07},
+ {R367CAB_AGC_IF_LTH_L, 0x00},
+ {R367CAB_AGC_IF_LTH_H, 0x08},
+ {R367CAB_AGC_IF_HTH_L, 0xff},
+ {R367CAB_AGC_IF_HTH_H, 0x07},
+ {R367CAB_AGC_PWR_RD_L, 0xa0},
+ {R367CAB_AGC_PWR_RD_M, 0xe9},
+ {R367CAB_AGC_PWR_RD_H, 0x03},
+ {R367CAB_AGC_PWM_IFCMD_L, 0xe4},
+ {R367CAB_AGC_PWM_IFCMD_H, 0x00},
+ {R367CAB_AGC_PWM_RFCMD_L, 0xff},
+ {R367CAB_AGC_PWM_RFCMD_H, 0x07},
+ {R367CAB_IQDEM_CFG, 0x01},
+ {R367CAB_MIX_NCO_LL, 0x22},
+ {R367CAB_MIX_NCO_HL, 0x96},
+ {R367CAB_MIX_NCO_HH, 0x55},
+ {R367CAB_SRC_NCO_LL, 0xff},
+ {R367CAB_SRC_NCO_LH, 0x0c},
+ {R367CAB_SRC_NCO_HL, 0xf5},
+ {R367CAB_SRC_NCO_HH, 0x20},
+ {R367CAB_IQDEM_GAIN_SRC_L, 0x06},
+ {R367CAB_IQDEM_GAIN_SRC_H, 0x01},
+ {R367CAB_IQDEM_DCRM_CFG_LL, 0xfe},
+ {R367CAB_IQDEM_DCRM_CFG_LH, 0xff},
+ {R367CAB_IQDEM_DCRM_CFG_HL, 0x0f},
+ {R367CAB_IQDEM_DCRM_CFG_HH, 0x00},
+ {R367CAB_IQDEM_ADJ_COEFF0, 0x34},
+ {R367CAB_IQDEM_ADJ_COEFF1, 0xae},
+ {R367CAB_IQDEM_ADJ_COEFF2, 0x46},
+ {R367CAB_IQDEM_ADJ_COEFF3, 0x77},
+ {R367CAB_IQDEM_ADJ_COEFF4, 0x96},
+ {R367CAB_IQDEM_ADJ_COEFF5, 0x69},
+ {R367CAB_IQDEM_ADJ_COEFF6, 0xc7},
+ {R367CAB_IQDEM_ADJ_COEFF7, 0x01},
+ {R367CAB_IQDEM_ADJ_EN, 0x04},
+ {R367CAB_IQDEM_ADJ_AGC_REF, 0x94},
+ {R367CAB_ALLPASSFILT1, 0xc9},
+ {R367CAB_ALLPASSFILT2, 0x2d},
+ {R367CAB_ALLPASSFILT3, 0xa3},
+ {R367CAB_ALLPASSFILT4, 0xfb},
+ {R367CAB_ALLPASSFILT5, 0xf6},
+ {R367CAB_ALLPASSFILT6, 0x45},
+ {R367CAB_ALLPASSFILT7, 0x6f},
+ {R367CAB_ALLPASSFILT8, 0x7e},
+ {R367CAB_ALLPASSFILT9, 0x05},
+ {R367CAB_ALLPASSFILT10, 0x0a},
+ {R367CAB_ALLPASSFILT11, 0x51},
+ {R367CAB_TRL_AGC_CFG, 0x20},
+ {R367CAB_TRL_LPF_CFG, 0x28},
+ {R367CAB_TRL_LPF_ACQ_GAIN, 0x44},
+ {R367CAB_TRL_LPF_TRK_GAIN, 0x22},
+ {R367CAB_TRL_LPF_OUT_GAIN, 0x03},
+ {R367CAB_TRL_LOCKDET_LTH, 0x04},
+ {R367CAB_TRL_LOCKDET_HTH, 0x11},
+ {R367CAB_TRL_LOCKDET_TRGVAL, 0x20},
+ {R367CAB_IQ_QAM, 0x01},
+ {R367CAB_FSM_STATE, 0xa0},
+ {R367CAB_FSM_CTL, 0x08},
+ {R367CAB_FSM_STS, 0x0c},
+ {R367CAB_FSM_SNR0_HTH, 0x00},
+ {R367CAB_FSM_SNR1_HTH, 0x00},
+ {R367CAB_FSM_SNR2_HTH, 0x23},/* 0x00 */
+ {R367CAB_FSM_SNR0_LTH, 0x00},
+ {R367CAB_FSM_SNR1_LTH, 0x00},
+ {R367CAB_FSM_EQA1_HTH, 0x00},
+ {R367CAB_FSM_TEMPO, 0x32},
+ {R367CAB_FSM_CONFIG, 0x03},
+ {R367CAB_EQU_I_TESTTAP_L, 0x11},
+ {R367CAB_EQU_I_TESTTAP_M, 0x00},
+ {R367CAB_EQU_I_TESTTAP_H, 0x00},
+ {R367CAB_EQU_TESTAP_CFG, 0x00},
+ {R367CAB_EQU_Q_TESTTAP_L, 0xff},
+ {R367CAB_EQU_Q_TESTTAP_M, 0x00},
+ {R367CAB_EQU_Q_TESTTAP_H, 0x00},
+ {R367CAB_EQU_TAP_CTRL, 0x00},
+ {R367CAB_EQU_CTR_CRL_CONTROL_L, 0x11},
+ {R367CAB_EQU_CTR_CRL_CONTROL_H, 0x05},
+ {R367CAB_EQU_CTR_HIPOW_L, 0x00},
+ {R367CAB_EQU_CTR_HIPOW_H, 0x00},
+ {R367CAB_EQU_I_EQU_LO, 0xef},
+ {R367CAB_EQU_I_EQU_HI, 0x00},
+ {R367CAB_EQU_Q_EQU_LO, 0xee},
+ {R367CAB_EQU_Q_EQU_HI, 0x00},
+ {R367CAB_EQU_MAPPER, 0xc5},
+ {R367CAB_EQU_SWEEP_RATE, 0x80},
+ {R367CAB_EQU_SNR_LO, 0x64},
+ {R367CAB_EQU_SNR_HI, 0x03},
+ {R367CAB_EQU_GAMMA_LO, 0x00},
+ {R367CAB_EQU_GAMMA_HI, 0x00},
+ {R367CAB_EQU_ERR_GAIN, 0x36},
+ {R367CAB_EQU_RADIUS, 0xaa},
+ {R367CAB_EQU_FFE_MAINTAP, 0x00},
+ {R367CAB_EQU_FFE_LEAKAGE, 0x63},
+ {R367CAB_EQU_FFE_MAINTAP_POS, 0xdf},
+ {R367CAB_EQU_GAIN_WIDE, 0x88},
+ {R367CAB_EQU_GAIN_NARROW, 0x41},
+ {R367CAB_EQU_CTR_LPF_GAIN, 0xd1},
+ {R367CAB_EQU_CRL_LPF_GAIN, 0xa7},
+ {R367CAB_EQU_GLOBAL_GAIN, 0x06},
+ {R367CAB_EQU_CRL_LD_SEN, 0x85},
+ {R367CAB_EQU_CRL_LD_VAL, 0xe2},
+ {R367CAB_EQU_CRL_TFR, 0x20},
+ {R367CAB_EQU_CRL_BISTH_LO, 0x00},
+ {R367CAB_EQU_CRL_BISTH_HI, 0x00},
+ {R367CAB_EQU_SWEEP_RANGE_LO, 0x00},
+ {R367CAB_EQU_SWEEP_RANGE_HI, 0x00},
+ {R367CAB_EQU_CRL_LIMITER, 0x40},
+ {R367CAB_EQU_MODULUS_MAP, 0x90},
+ {R367CAB_EQU_PNT_GAIN, 0xa7},
+ {R367CAB_FEC_AC_CTR_0, 0x16},
+ {R367CAB_FEC_AC_CTR_1, 0x0b},
+ {R367CAB_FEC_AC_CTR_2, 0x88},
+ {R367CAB_FEC_AC_CTR_3, 0x02},
+ {R367CAB_FEC_STATUS, 0x12},
+ {R367CAB_RS_COUNTER_0, 0x7d},
+ {R367CAB_RS_COUNTER_1, 0xd0},
+ {R367CAB_RS_COUNTER_2, 0x19},
+ {R367CAB_RS_COUNTER_3, 0x0b},
+ {R367CAB_RS_COUNTER_4, 0xa3},
+ {R367CAB_RS_COUNTER_5, 0x00},
+ {R367CAB_BERT_0, 0x01},
+ {R367CAB_BERT_1, 0x25},
+ {R367CAB_BERT_2, 0x41},
+ {R367CAB_BERT_3, 0x39},
+ {R367CAB_OUTFORMAT_0, 0xc2},
+ {R367CAB_OUTFORMAT_1, 0x22},
+ {R367CAB_SMOOTHER_2, 0x28},
+ {R367CAB_TSMF_CTRL_0, 0x01},
+ {R367CAB_TSMF_CTRL_1, 0xc6},
+ {R367CAB_TSMF_CTRL_3, 0x43},
+ {R367CAB_TS_ON_ID_0, 0x00},
+ {R367CAB_TS_ON_ID_1, 0x00},
+ {R367CAB_TS_ON_ID_2, 0x00},
+ {R367CAB_TS_ON_ID_3, 0x00},
+ {R367CAB_RE_STATUS_0, 0x00},
+ {R367CAB_RE_STATUS_1, 0x00},
+ {R367CAB_RE_STATUS_2, 0x00},
+ {R367CAB_RE_STATUS_3, 0x00},
+ {R367CAB_TS_STATUS_0, 0x00},
+ {R367CAB_TS_STATUS_1, 0x00},
+ {R367CAB_TS_STATUS_2, 0xa0},
+ {R367CAB_TS_STATUS_3, 0x00},
+ {R367CAB_T_O_ID_0, 0x00},
+ {R367CAB_T_O_ID_1, 0x00},
+ {R367CAB_T_O_ID_2, 0x00},
+ {R367CAB_T_O_ID_3, 0x00},
+ {0x0000, 0x00},
+};
+
+/**************
+ *
+ * Defaults / Tables for Digital Devices C/T Cine/Flex devices
+ *
+ **************/
+
+static const struct st_register def0367dd_ofdm[] = {
+ {R367TER_AGC2MAX, 0xff},
+ {R367TER_AGC2MIN, 0x00},
+ {R367TER_AGC1MAX, 0xff},
+ {R367TER_AGC1MIN, 0x00},
+ {R367TER_AGCR, 0xbc},
+ {R367TER_AGC2TH, 0x00},
+ {R367TER_AGCCTRL1, 0x85},
+ {R367TER_AGCCTRL2, 0x1f},
+ {R367TER_AGC1VAL1, 0x00},
+ {R367TER_AGC1VAL2, 0x00},
+ {R367TER_AGC2VAL1, 0x6f},
+ {R367TER_AGC2VAL2, 0x05},
+ {R367TER_AGC2PGA, 0x00},
+ {R367TER_OVF_RATE1, 0x00},
+ {R367TER_OVF_RATE2, 0x00},
+ {R367TER_GAIN_SRC1, 0x2b},
+ {R367TER_GAIN_SRC2, 0x04},
+ {R367TER_INC_DEROT1, 0x55},
+ {R367TER_INC_DEROT2, 0x55},
+ {R367TER_PPM_CPAMP_DIR, 0x2c},
+ {R367TER_PPM_CPAMP_INV, 0x00},
+ {R367TER_FREESTFE_1, 0x00},
+ {R367TER_FREESTFE_2, 0x1c},
+ {R367TER_DCOFFSET, 0x00},
+ {R367TER_EN_PROCESS, 0x05},
+ {R367TER_SDI_SMOOTHER, 0x80},
+ {R367TER_FE_LOOP_OPEN, 0x1c},
+ {R367TER_FREQOFF1, 0x00},
+ {R367TER_FREQOFF2, 0x00},
+ {R367TER_FREQOFF3, 0x00},
+ {R367TER_TIMOFF1, 0x00},
+ {R367TER_TIMOFF2, 0x00},
+ {R367TER_EPQ, 0x02},
+ {R367TER_EPQAUTO, 0x01},
+ {R367TER_SYR_UPDATE, 0xf5},
+ {R367TER_CHPFREE, 0x00},
+ {R367TER_PPM_STATE_MAC, 0x23},
+ {R367TER_INR_THRESHOLD, 0xff},
+ {R367TER_EPQ_TPS_ID_CELL, 0xf9},
+ {R367TER_EPQ_CFG, 0x00},
+ {R367TER_EPQ_STATUS, 0x01},
+ {R367TER_AUTORELOCK, 0x81},
+ {R367TER_BER_THR_VMSB, 0x00},
+ {R367TER_BER_THR_MSB, 0x00},
+ {R367TER_BER_THR_LSB, 0x00},
+ {R367TER_CCD, 0x83},
+ {R367TER_SPECTR_CFG, 0x00},
+ {R367TER_CHC_DUMMY, 0x18},
+ {R367TER_INC_CTL, 0x88},
+ {R367TER_INCTHRES_COR1, 0xb4},
+ {R367TER_INCTHRES_COR2, 0x96},
+ {R367TER_INCTHRES_DET1, 0x0e},
+ {R367TER_INCTHRES_DET2, 0x11},
+ {R367TER_IIR_CELLNB, 0x8d},
+ {R367TER_IIRCX_COEFF1_MSB, 0x00},
+ {R367TER_IIRCX_COEFF1_LSB, 0x00},
+ {R367TER_IIRCX_COEFF2_MSB, 0x09},
+ {R367TER_IIRCX_COEFF2_LSB, 0x18},
+ {R367TER_IIRCX_COEFF3_MSB, 0x14},
+ {R367TER_IIRCX_COEFF3_LSB, 0x9c},
+ {R367TER_IIRCX_COEFF4_MSB, 0x00},
+ {R367TER_IIRCX_COEFF4_LSB, 0x00},
+ {R367TER_IIRCX_COEFF5_MSB, 0x36},
+ {R367TER_IIRCX_COEFF5_LSB, 0x42},
+ {R367TER_FEPATH_CFG, 0x00},
+ {R367TER_PMC1_FUNC, 0x65},
+ {R367TER_PMC1_FOR, 0x00},
+ {R367TER_PMC2_FUNC, 0x00},
+ {R367TER_STATUS_ERR_DA, 0xe0},
+ {R367TER_DIG_AGC_R, 0xfe},
+ {R367TER_COMAGC_TARMSB, 0x0b},
+ {R367TER_COM_AGC_TAR_ENMODE, 0x41},
+ {R367TER_COM_AGC_CFG, 0x3e},
+ {R367TER_COM_AGC_GAIN1, 0x39},
+ {R367TER_AUT_AGC_TARGETMSB, 0x0b},
+ {R367TER_LOCK_DET_MSB, 0x01},
+ {R367TER_AGCTAR_LOCK_LSBS, 0x40},
+ {R367TER_AUT_GAIN_EN, 0xf4},
+ {R367TER_AUT_CFG, 0xf0},
+ {R367TER_LOCKN, 0x23},
+ {R367TER_INT_X_3, 0x00},
+ {R367TER_INT_X_2, 0x03},
+ {R367TER_INT_X_1, 0x8d},
+ {R367TER_INT_X_0, 0xa0},
+ {R367TER_MIN_ERRX_MSB, 0x00},
+ {R367TER_COR_CTL, 0x00},
+ {R367TER_COR_STAT, 0xf6},
+ {R367TER_COR_INTEN, 0x00},
+ {R367TER_COR_INTSTAT, 0x3f},
+ {R367TER_COR_MODEGUARD, 0x03},
+ {R367TER_AGC_CTL, 0x08},
+ {R367TER_AGC_MANUAL1, 0x00},
+ {R367TER_AGC_MANUAL2, 0x00},
+ {R367TER_AGC_TARG, 0x16},
+ {R367TER_AGC_GAIN1, 0x53},
+ {R367TER_AGC_GAIN2, 0x1d},
+ {R367TER_RESERVED_1, 0x00},
+ {R367TER_RESERVED_2, 0x00},
+ {R367TER_RESERVED_3, 0x00},
+ {R367TER_CAS_CTL, 0x44},
+ {R367TER_CAS_FREQ, 0xb3},
+ {R367TER_CAS_DAGCGAIN, 0x12},
+ {R367TER_SYR_CTL, 0x04},
+ {R367TER_SYR_STAT, 0x10},
+ {R367TER_SYR_NCO1, 0x00},
+ {R367TER_SYR_NCO2, 0x00},
+ {R367TER_SYR_OFFSET1, 0x00},
+ {R367TER_SYR_OFFSET2, 0x00},
+ {R367TER_FFT_CTL, 0x00},
+ {R367TER_SCR_CTL, 0x70},
+ {R367TER_PPM_CTL1, 0xf8},
+ {R367TER_TRL_CTL, 0xac},
+ {R367TER_TRL_NOMRATE1, 0x1e},
+ {R367TER_TRL_NOMRATE2, 0x58},
+ {R367TER_TRL_TIME1, 0x1d},
+ {R367TER_TRL_TIME2, 0xfc},
+ {R367TER_CRL_CTL, 0x24},
+ {R367TER_CRL_FREQ1, 0xad},
+ {R367TER_CRL_FREQ2, 0x9d},
+ {R367TER_CRL_FREQ3, 0xff},
+ {R367TER_CHC_CTL, 0x01},
+ {R367TER_CHC_SNR, 0xf0},
+ {R367TER_BDI_CTL, 0x00},
+ {R367TER_DMP_CTL, 0x00},
+ {R367TER_TPS_RCVD1, 0x30},
+ {R367TER_TPS_RCVD2, 0x02},
+ {R367TER_TPS_RCVD3, 0x01},
+ {R367TER_TPS_RCVD4, 0x00},
+ {R367TER_TPS_ID_CELL1, 0x00},
+ {R367TER_TPS_ID_CELL2, 0x00},
+ {R367TER_TPS_RCVD5_SET1, 0x02},
+ {R367TER_TPS_SET2, 0x02},
+ {R367TER_TPS_SET3, 0x01},
+ {R367TER_TPS_CTL, 0x00},
+ {R367TER_CTL_FFTOSNUM, 0x34},
+ {R367TER_TESTSELECT, 0x09},
+ {R367TER_MSC_REV, 0x0a},
+ {R367TER_PIR_CTL, 0x00},
+ {R367TER_SNR_CARRIER1, 0xa1},
+ {R367TER_SNR_CARRIER2, 0x9a},
+ {R367TER_PPM_CPAMP, 0x2c},
+ {R367TER_TSM_AP0, 0x00},
+ {R367TER_TSM_AP1, 0x00},
+ {R367TER_TSM_AP2, 0x00},
+ {R367TER_TSM_AP3, 0x00},
+ {R367TER_TSM_AP4, 0x00},
+ {R367TER_TSM_AP5, 0x00},
+ {R367TER_TSM_AP6, 0x00},
+ {R367TER_TSM_AP7, 0x00},
+ {R367TER_CONSTMODE, 0x01},
+ {R367TER_CONSTCARR1, 0x00},
+ {R367TER_CONSTCARR2, 0x00},
+ {R367TER_ICONSTEL, 0x0a},
+ {R367TER_QCONSTEL, 0x15},
+ {R367TER_TSTBISTRES0, 0x00},
+ {R367TER_TSTBISTRES1, 0x00},
+ {R367TER_TSTBISTRES2, 0x28},
+ {R367TER_TSTBISTRES3, 0x00},
+ {R367TER_SYR_TARGET_FFTADJT_MSB, 0x00},
+ {R367TER_SYR_TARGET_FFTADJT_LSB, 0x00},
+ {R367TER_SYR_TARGET_CHCADJT_MSB, 0x00},
+ {R367TER_SYR_TARGET_CHCADJT_LSB, 0x00},
+ {R367TER_SYR_FLAG, 0x00},
+ {R367TER_CRL_TARGET1, 0x00},
+ {R367TER_CRL_TARGET2, 0x00},
+ {R367TER_CRL_TARGET3, 0x00},
+ {R367TER_CRL_TARGET4, 0x00},
+ {R367TER_CRL_FLAG, 0x00},
+ {R367TER_TRL_TARGET1, 0x00},
+ {R367TER_TRL_TARGET2, 0x00},
+ {R367TER_TRL_CHC, 0x00},
+ {R367TER_CHC_SNR_TARG, 0x00},
+ {R367TER_TOP_TRACK, 0x00},
+ {R367TER_TRACKER_FREE1, 0x00},
+ {R367TER_ERROR_CRL1, 0x00},
+ {R367TER_ERROR_CRL2, 0x00},
+ {R367TER_ERROR_CRL3, 0x00},
+ {R367TER_ERROR_CRL4, 0x00},
+ {R367TER_DEC_NCO1, 0x2c},
+ {R367TER_DEC_NCO2, 0x0f},
+ {R367TER_DEC_NCO3, 0x20},
+ {R367TER_SNR, 0xf1},
+ {R367TER_SYR_FFTADJ1, 0x00},
+ {R367TER_SYR_FFTADJ2, 0x00},
+ {R367TER_SYR_CHCADJ1, 0x00},
+ {R367TER_SYR_CHCADJ2, 0x00},
+ {R367TER_SYR_OFF, 0x00},
+ {R367TER_PPM_OFFSET1, 0x00},
+ {R367TER_PPM_OFFSET2, 0x03},
+ {R367TER_TRACKER_FREE2, 0x00},
+ {R367TER_DEBG_LT10, 0x00},
+ {R367TER_DEBG_LT11, 0x00},
+ {R367TER_DEBG_LT12, 0x00},
+ {R367TER_DEBG_LT13, 0x00},
+ {R367TER_DEBG_LT14, 0x00},
+ {R367TER_DEBG_LT15, 0x00},
+ {R367TER_DEBG_LT16, 0x00},
+ {R367TER_DEBG_LT17, 0x00},
+ {R367TER_DEBG_LT18, 0x00},
+ {R367TER_DEBG_LT19, 0x00},
+ {R367TER_DEBG_LT1A, 0x00},
+ {R367TER_DEBG_LT1B, 0x00},
+ {R367TER_DEBG_LT1C, 0x00},
+ {R367TER_DEBG_LT1D, 0x00},
+ {R367TER_DEBG_LT1E, 0x00},
+ {R367TER_DEBG_LT1F, 0x00},
+ {R367TER_RCCFGH, 0x00},
+ {R367TER_RCCFGM, 0x00},
+ {R367TER_RCCFGL, 0x00},
+ {R367TER_RCINSDELH, 0x00},
+ {R367TER_RCINSDELM, 0x00},
+ {R367TER_RCINSDELL, 0x00},
+ {R367TER_RCSTATUS, 0x00},
+ {R367TER_RCSPEED, 0x6f},
+ {R367TER_RCDEBUGM, 0xe7},
+ {R367TER_RCDEBUGL, 0x9b},
+ {R367TER_RCOBSCFG, 0x00},
+ {R367TER_RCOBSM, 0x00},
+ {R367TER_RCOBSL, 0x00},
+ {R367TER_RCFECSPY, 0x00},
+ {R367TER_RCFSPYCFG, 0x00},
+ {R367TER_RCFSPYDATA, 0x00},
+ {R367TER_RCFSPYOUT, 0x00},
+ {R367TER_RCFSTATUS, 0x00},
+ {R367TER_RCFGOODPACK, 0x00},
+ {R367TER_RCFPACKCNT, 0x00},
+ {R367TER_RCFSPYMISC, 0x00},
+ {R367TER_RCFBERCPT4, 0x00},
+ {R367TER_RCFBERCPT3, 0x00},
+ {R367TER_RCFBERCPT2, 0x00},
+ {R367TER_RCFBERCPT1, 0x00},
+ {R367TER_RCFBERCPT0, 0x00},
+ {R367TER_RCFBERERR2, 0x00},
+ {R367TER_RCFBERERR1, 0x00},
+ {R367TER_RCFBERERR0, 0x00},
+ {R367TER_RCFSTATESM, 0x00},
+ {R367TER_RCFSTATESL, 0x00},
+ {R367TER_RCFSPYBER, 0x00},
+ {R367TER_RCFSPYDISTM, 0x00},
+ {R367TER_RCFSPYDISTL, 0x00},
+ {R367TER_RCFSPYOBS7, 0x00},
+ {R367TER_RCFSPYOBS6, 0x00},
+ {R367TER_RCFSPYOBS5, 0x00},
+ {R367TER_RCFSPYOBS4, 0x00},
+ {R367TER_RCFSPYOBS3, 0x00},
+ {R367TER_RCFSPYOBS2, 0x00},
+ {R367TER_RCFSPYOBS1, 0x00},
+ {R367TER_RCFSPYOBS0, 0x00},
+ {R367TER_FECM, 0x01},
+ {R367TER_VTH12, 0xff},
+ {R367TER_VTH23, 0xa1},
+ {R367TER_VTH34, 0x64},
+ {R367TER_VTH56, 0x40},
+ {R367TER_VTH67, 0x00},
+ {R367TER_VTH78, 0x2c},
+ {R367TER_VITCURPUN, 0x12},
+ {R367TER_VERROR, 0x01},
+ {R367TER_PRVIT, 0x3f},
+ {R367TER_VAVSRVIT, 0x00},
+ {R367TER_VSTATUSVIT, 0xbd},
+ {R367TER_VTHINUSE, 0xa1},
+ {R367TER_KDIV12, 0x20},
+ {R367TER_KDIV23, 0x40},
+ {R367TER_KDIV34, 0x20},
+ {R367TER_KDIV56, 0x30},
+ {R367TER_KDIV67, 0x00},
+ {R367TER_KDIV78, 0x30},
+ {R367TER_SIGPOWER, 0x54},
+ {R367TER_DEMAPVIT, 0x40},
+ {R367TER_VITSCALE, 0x00},
+ {R367TER_FFEC1PRG, 0x00},
+ {R367TER_FVITCURPUN, 0x12},
+ {R367TER_FVERROR, 0x01},
+ {R367TER_FVSTATUSVIT, 0xbd},
+ {R367TER_DEBUG_LT1, 0x00},
+ {R367TER_DEBUG_LT2, 0x00},
+ {R367TER_DEBUG_LT3, 0x00},
+ {R367TER_TSTSFMET, 0x00},
+ {R367TER_SELOUT, 0x00},
+ {R367TER_TSYNC, 0x00},
+ {R367TER_TSTERR, 0x00},
+ {R367TER_TSFSYNC, 0x00},
+ {R367TER_TSTSFERR, 0x00},
+ {R367TER_TSTTSSF1, 0x01},
+ {R367TER_TSTTSSF2, 0x1f},
+ {R367TER_TSTTSSF3, 0x00},
+ {R367TER_TSTTS1, 0x00},
+ {R367TER_TSTTS2, 0x1f},
+ {R367TER_TSTTS3, 0x01},
+ {R367TER_TSTTS4, 0x00},
+ {R367TER_TSTTSRC, 0x00},
+ {R367TER_TSTTSRS, 0x00},
+ {R367TER_TSSTATEM, 0xb0},
+ {R367TER_TSSTATEL, 0x40},
+ {R367TER_TSCFGH, 0x80},
+ {R367TER_TSCFGM, 0x00},
+ {R367TER_TSCFGL, 0x20},
+ {R367TER_TSSYNC, 0x00},
+ {R367TER_TSINSDELH, 0x00},
+ {R367TER_TSINSDELM, 0x00},
+ {R367TER_TSINSDELL, 0x00},
+ {R367TER_TSDIVN, 0x03},
+ {R367TER_TSDIVPM, 0x00},
+ {R367TER_TSDIVPL, 0x00},
+ {R367TER_TSDIVQM, 0x00},
+ {R367TER_TSDIVQL, 0x00},
+ {R367TER_TSDILSTKM, 0x00},
+ {R367TER_TSDILSTKL, 0x00},
+ {R367TER_TSSPEED, 0x6f},
+ {R367TER_TSSTATUS, 0x81},
+ {R367TER_TSSTATUS2, 0x6a},
+ {R367TER_TSBITRATEM, 0x0f},
+ {R367TER_TSBITRATEL, 0xc6},
+ {R367TER_TSPACKLENM, 0x00},
+ {R367TER_TSPACKLENL, 0xfc},
+ {R367TER_TSBLOCLENM, 0x0a},
+ {R367TER_TSBLOCLENL, 0x80},
+ {R367TER_TSDLYH, 0x90},
+ {R367TER_TSDLYM, 0x68},
+ {R367TER_TSDLYL, 0x01},
+ {R367TER_TSNPDAV, 0x00},
+ {R367TER_TSBUFSTATH, 0x00},
+ {R367TER_TSBUFSTATM, 0x00},
+ {R367TER_TSBUFSTATL, 0x00},
+ {R367TER_TSDEBUGM, 0xcf},
+ {R367TER_TSDEBUGL, 0x1e},
+ {R367TER_TSDLYSETH, 0x00},
+ {R367TER_TSDLYSETM, 0x68},
+ {R367TER_TSDLYSETL, 0x00},
+ {R367TER_TSOBSCFG, 0x00},
+ {R367TER_TSOBSM, 0x47},
+ {R367TER_TSOBSL, 0x1f},
+ {R367TER_ERRCTRL1, 0x95},
+ {R367TER_ERRCNT1H, 0x80},
+ {R367TER_ERRCNT1M, 0x00},
+ {R367TER_ERRCNT1L, 0x00},
+ {R367TER_ERRCTRL2, 0x95},
+ {R367TER_ERRCNT2H, 0x00},
+ {R367TER_ERRCNT2M, 0x00},
+ {R367TER_ERRCNT2L, 0x00},
+ {R367TER_FECSPY, 0x88},
+ {R367TER_FSPYCFG, 0x2c},
+ {R367TER_FSPYDATA, 0x3a},
+ {R367TER_FSPYOUT, 0x06},
+ {R367TER_FSTATUS, 0x61},
+ {R367TER_FGOODPACK, 0xff},
+ {R367TER_FPACKCNT, 0xff},
+ {R367TER_FSPYMISC, 0x66},
+ {R367TER_FBERCPT4, 0x00},
+ {R367TER_FBERCPT3, 0x00},
+ {R367TER_FBERCPT2, 0x36},
+ {R367TER_FBERCPT1, 0x36},
+ {R367TER_FBERCPT0, 0x14},
+ {R367TER_FBERERR2, 0x00},
+ {R367TER_FBERERR1, 0x03},
+ {R367TER_FBERERR0, 0x28},
+ {R367TER_FSTATESM, 0x00},
+ {R367TER_FSTATESL, 0x02},
+ {R367TER_FSPYBER, 0x00},
+ {R367TER_FSPYDISTM, 0x01},
+ {R367TER_FSPYDISTL, 0x9f},
+ {R367TER_FSPYOBS7, 0xc9},
+ {R367TER_FSPYOBS6, 0x99},
+ {R367TER_FSPYOBS5, 0x08},
+ {R367TER_FSPYOBS4, 0xec},
+ {R367TER_FSPYOBS3, 0x01},
+ {R367TER_FSPYOBS2, 0x0f},
+ {R367TER_FSPYOBS1, 0xf5},
+ {R367TER_FSPYOBS0, 0x08},
+ {R367TER_SFDEMAP, 0x40},
+ {R367TER_SFERROR, 0x00},
+ {R367TER_SFAVSR, 0x30},
+ {R367TER_SFECSTATUS, 0xcc},
+ {R367TER_SFKDIV12, 0x20},
+ {R367TER_SFKDIV23, 0x40},
+ {R367TER_SFKDIV34, 0x20},
+ {R367TER_SFKDIV56, 0x20},
+ {R367TER_SFKDIV67, 0x00},
+ {R367TER_SFKDIV78, 0x20},
+ {R367TER_SFDILSTKM, 0x00},
+ {R367TER_SFDILSTKL, 0x00},
+ {R367TER_SFSTATUS, 0xb5},
+ {R367TER_SFDLYH, 0x90},
+ {R367TER_SFDLYM, 0x60},
+ {R367TER_SFDLYL, 0x01},
+ {R367TER_SFDLYSETH, 0xc0},
+ {R367TER_SFDLYSETM, 0x60},
+ {R367TER_SFDLYSETL, 0x00},
+ {R367TER_SFOBSCFG, 0x00},
+ {R367TER_SFOBSM, 0x47},
+ {R367TER_SFOBSL, 0x05},
+ {R367TER_SFECINFO, 0x40},
+ {R367TER_SFERRCTRL, 0x74},
+ {R367TER_SFERRCNTH, 0x80},
+ {R367TER_SFERRCNTM, 0x00},
+ {R367TER_SFERRCNTL, 0x00},
+ {R367TER_SYMBRATEM, 0x2f},
+ {R367TER_SYMBRATEL, 0x50},
+ {R367TER_SYMBSTATUS, 0x7f},
+ {R367TER_SYMBCFG, 0x00},
+ {R367TER_SYMBFIFOM, 0xf4},
+ {R367TER_SYMBFIFOL, 0x0d},
+ {R367TER_SYMBOFFSM, 0xf0},
+ {R367TER_SYMBOFFSL, 0x2d},
+ {0x0000, 0x00} /* EOT */
+};
+
+static const struct st_register def0367dd_qam[] = {
+ {R367CAB_CTRL_1, 0x06}, /* Orginal 0x04 */
+ {R367CAB_CTRL_2, 0x03},
+ {R367CAB_IT_STATUS1, 0x2b},
+ {R367CAB_IT_STATUS2, 0x08},
+ {R367CAB_IT_EN1, 0x00},
+ {R367CAB_IT_EN2, 0x00},
+ {R367CAB_CTRL_STATUS, 0x04},
+ {R367CAB_TEST_CTL, 0x00},
+ {R367CAB_AGC_CTL, 0x73},
+ {R367CAB_AGC_IF_CFG, 0x50},
+ {R367CAB_AGC_RF_CFG, 0x02}, /* RF Freeze */
+ {R367CAB_AGC_PWM_CFG, 0x03},
+ {R367CAB_AGC_PWR_REF_L, 0x5a},
+ {R367CAB_AGC_PWR_REF_H, 0x00},
+ {R367CAB_AGC_RF_TH_L, 0xff},
+ {R367CAB_AGC_RF_TH_H, 0x07},
+ {R367CAB_AGC_IF_LTH_L, 0x00},
+ {R367CAB_AGC_IF_LTH_H, 0x08},
+ {R367CAB_AGC_IF_HTH_L, 0xff},
+ {R367CAB_AGC_IF_HTH_H, 0x07},
+ {R367CAB_AGC_PWR_RD_L, 0xa0},
+ {R367CAB_AGC_PWR_RD_M, 0xe9},
+ {R367CAB_AGC_PWR_RD_H, 0x03},
+ {R367CAB_AGC_PWM_IFCMD_L, 0xe4},
+ {R367CAB_AGC_PWM_IFCMD_H, 0x00},
+ {R367CAB_AGC_PWM_RFCMD_L, 0xff},
+ {R367CAB_AGC_PWM_RFCMD_H, 0x07},
+ {R367CAB_IQDEM_CFG, 0x01},
+ {R367CAB_MIX_NCO_LL, 0x22},
+ {R367CAB_MIX_NCO_HL, 0x96},
+ {R367CAB_MIX_NCO_HH, 0x55},
+ {R367CAB_SRC_NCO_LL, 0xff},
+ {R367CAB_SRC_NCO_LH, 0x0c},
+ {R367CAB_SRC_NCO_HL, 0xf5},
+ {R367CAB_SRC_NCO_HH, 0x20},
+ {R367CAB_IQDEM_GAIN_SRC_L, 0x06},
+ {R367CAB_IQDEM_GAIN_SRC_H, 0x01},
+ {R367CAB_IQDEM_DCRM_CFG_LL, 0xfe},
+ {R367CAB_IQDEM_DCRM_CFG_LH, 0xff},
+ {R367CAB_IQDEM_DCRM_CFG_HL, 0x0f},
+ {R367CAB_IQDEM_DCRM_CFG_HH, 0x00},
+ {R367CAB_IQDEM_ADJ_COEFF0, 0x34},
+ {R367CAB_IQDEM_ADJ_COEFF1, 0xae},
+ {R367CAB_IQDEM_ADJ_COEFF2, 0x46},
+ {R367CAB_IQDEM_ADJ_COEFF3, 0x77},
+ {R367CAB_IQDEM_ADJ_COEFF4, 0x96},
+ {R367CAB_IQDEM_ADJ_COEFF5, 0x69},
+ {R367CAB_IQDEM_ADJ_COEFF6, 0xc7},
+ {R367CAB_IQDEM_ADJ_COEFF7, 0x01},
+ {R367CAB_IQDEM_ADJ_EN, 0x04},
+ {R367CAB_IQDEM_ADJ_AGC_REF, 0x94},
+ {R367CAB_ALLPASSFILT1, 0xc9},
+ {R367CAB_ALLPASSFILT2, 0x2d},
+ {R367CAB_ALLPASSFILT3, 0xa3},
+ {R367CAB_ALLPASSFILT4, 0xfb},
+ {R367CAB_ALLPASSFILT5, 0xf6},
+ {R367CAB_ALLPASSFILT6, 0x45},
+ {R367CAB_ALLPASSFILT7, 0x6f},
+ {R367CAB_ALLPASSFILT8, 0x7e},
+ {R367CAB_ALLPASSFILT9, 0x05},
+ {R367CAB_ALLPASSFILT10, 0x0a},
+ {R367CAB_ALLPASSFILT11, 0x51},
+ {R367CAB_TRL_AGC_CFG, 0x20},
+ {R367CAB_TRL_LPF_CFG, 0x28},
+ {R367CAB_TRL_LPF_ACQ_GAIN, 0x44},
+ {R367CAB_TRL_LPF_TRK_GAIN, 0x22},
+ {R367CAB_TRL_LPF_OUT_GAIN, 0x03},
+ {R367CAB_TRL_LOCKDET_LTH, 0x04},
+ {R367CAB_TRL_LOCKDET_HTH, 0x11},
+ {R367CAB_TRL_LOCKDET_TRGVAL, 0x20},
+ {R367CAB_IQ_QAM, 0x01},
+ {R367CAB_FSM_STATE, 0xa0},
+ {R367CAB_FSM_CTL, 0x08},
+ {R367CAB_FSM_STS, 0x0c},
+ {R367CAB_FSM_SNR0_HTH, 0x00},
+ {R367CAB_FSM_SNR1_HTH, 0x00},
+ {R367CAB_FSM_SNR2_HTH, 0x00},
+ {R367CAB_FSM_SNR0_LTH, 0x00},
+ {R367CAB_FSM_SNR1_LTH, 0x00},
+ {R367CAB_FSM_EQA1_HTH, 0x00},
+ {R367CAB_FSM_TEMPO, 0x32},
+ {R367CAB_FSM_CONFIG, 0x03},
+ {R367CAB_EQU_I_TESTTAP_L, 0x11},
+ {R367CAB_EQU_I_TESTTAP_M, 0x00},
+ {R367CAB_EQU_I_TESTTAP_H, 0x00},
+ {R367CAB_EQU_TESTAP_CFG, 0x00},
+ {R367CAB_EQU_Q_TESTTAP_L, 0xff},
+ {R367CAB_EQU_Q_TESTTAP_M, 0x00},
+ {R367CAB_EQU_Q_TESTTAP_H, 0x00},
+ {R367CAB_EQU_TAP_CTRL, 0x00},
+ {R367CAB_EQU_CTR_CRL_CONTROL_L, 0x11},
+ {R367CAB_EQU_CTR_CRL_CONTROL_H, 0x05},
+ {R367CAB_EQU_CTR_HIPOW_L, 0x00},
+ {R367CAB_EQU_CTR_HIPOW_H, 0x00},
+ {R367CAB_EQU_I_EQU_LO, 0xef},
+ {R367CAB_EQU_I_EQU_HI, 0x00},
+ {R367CAB_EQU_Q_EQU_LO, 0xee},
+ {R367CAB_EQU_Q_EQU_HI, 0x00},
+ {R367CAB_EQU_MAPPER, 0xc5},
+ {R367CAB_EQU_SWEEP_RATE, 0x80},
+ {R367CAB_EQU_SNR_LO, 0x64},
+ {R367CAB_EQU_SNR_HI, 0x03},
+ {R367CAB_EQU_GAMMA_LO, 0x00},
+ {R367CAB_EQU_GAMMA_HI, 0x00},
+ {R367CAB_EQU_ERR_GAIN, 0x36},
+ {R367CAB_EQU_RADIUS, 0xaa},
+ {R367CAB_EQU_FFE_MAINTAP, 0x00},
+ {R367CAB_EQU_FFE_LEAKAGE, 0x63},
+ {R367CAB_EQU_FFE_MAINTAP_POS, 0xdf},
+ {R367CAB_EQU_GAIN_WIDE, 0x88},
+ {R367CAB_EQU_GAIN_NARROW, 0x41},
+ {R367CAB_EQU_CTR_LPF_GAIN, 0xd1},
+ {R367CAB_EQU_CRL_LPF_GAIN, 0xa7},
+ {R367CAB_EQU_GLOBAL_GAIN, 0x06},
+ {R367CAB_EQU_CRL_LD_SEN, 0x85},
+ {R367CAB_EQU_CRL_LD_VAL, 0xe2},
+ {R367CAB_EQU_CRL_TFR, 0x20},
+ {R367CAB_EQU_CRL_BISTH_LO, 0x00},
+ {R367CAB_EQU_CRL_BISTH_HI, 0x00},
+ {R367CAB_EQU_SWEEP_RANGE_LO, 0x00},
+ {R367CAB_EQU_SWEEP_RANGE_HI, 0x00},
+ {R367CAB_EQU_CRL_LIMITER, 0x40},
+ {R367CAB_EQU_MODULUS_MAP, 0x90},
+ {R367CAB_EQU_PNT_GAIN, 0xa7},
+ {R367CAB_FEC_AC_CTR_0, 0x16},
+ {R367CAB_FEC_AC_CTR_1, 0x0b},
+ {R367CAB_FEC_AC_CTR_2, 0x88},
+ {R367CAB_FEC_AC_CTR_3, 0x02},
+ {R367CAB_FEC_STATUS, 0x12},
+ {R367CAB_RS_COUNTER_0, 0x7d},
+ {R367CAB_RS_COUNTER_1, 0xd0},
+ {R367CAB_RS_COUNTER_2, 0x19},
+ {R367CAB_RS_COUNTER_3, 0x0b},
+ {R367CAB_RS_COUNTER_4, 0xa3},
+ {R367CAB_RS_COUNTER_5, 0x00},
+ {R367CAB_BERT_0, 0x01},
+ {R367CAB_BERT_1, 0x25},
+ {R367CAB_BERT_2, 0x41},
+ {R367CAB_BERT_3, 0x39},
+ {R367CAB_OUTFORMAT_0, 0xc2},
+ {R367CAB_OUTFORMAT_1, 0x22},
+ {R367CAB_SMOOTHER_2, 0x28},
+ {R367CAB_TSMF_CTRL_0, 0x01},
+ {R367CAB_TSMF_CTRL_1, 0xc6},
+ {R367CAB_TSMF_CTRL_3, 0x43},
+ {R367CAB_TS_ON_ID_0, 0x00},
+ {R367CAB_TS_ON_ID_1, 0x00},
+ {R367CAB_TS_ON_ID_2, 0x00},
+ {R367CAB_TS_ON_ID_3, 0x00},
+ {R367CAB_RE_STATUS_0, 0x00},
+ {R367CAB_RE_STATUS_1, 0x00},
+ {R367CAB_RE_STATUS_2, 0x00},
+ {R367CAB_RE_STATUS_3, 0x00},
+ {R367CAB_TS_STATUS_0, 0x00},
+ {R367CAB_TS_STATUS_1, 0x00},
+ {R367CAB_TS_STATUS_2, 0xa0},
+ {R367CAB_TS_STATUS_3, 0x00},
+ {R367CAB_T_O_ID_0, 0x00},
+ {R367CAB_T_O_ID_1, 0x00},
+ {R367CAB_T_O_ID_2, 0x00},
+ {R367CAB_T_O_ID_3, 0x00},
+ {0x0000, 0x00} /* EOT */
+};
+
+static const struct st_register def0367dd_base[] = {
+ {R367TER_IOCFG0, 0x80},
+ {R367TER_DAC0R, 0x00},
+ {R367TER_IOCFG1, 0x00},
+ {R367TER_DAC1R, 0x00},
+ {R367TER_IOCFG2, 0x00},
+ {R367TER_SDFR, 0x00},
+ {R367TER_AUX_CLK, 0x00},
+ {R367TER_FREESYS1, 0x00},
+ {R367TER_FREESYS2, 0x00},
+ {R367TER_FREESYS3, 0x00},
+ {R367TER_GPIO_CFG, 0x55},
+ {R367TER_GPIO_CMD, 0x01},
+ {R367TER_TSTRES, 0x00},
+ {R367TER_ANACTRL, 0x00},
+ {R367TER_TSTBUS, 0x00},
+ {R367TER_RF_AGC2, 0x20},
+ {R367TER_ANADIGCTRL, 0x0b},
+ {R367TER_PLLMDIV, 0x01},
+ {R367TER_PLLNDIV, 0x08},
+ {R367TER_PLLSETUP, 0x18},
+ {R367TER_DUAL_AD12, 0x04},
+ {R367TER_TSTBIST, 0x00},
+ {0x0000, 0x00} /* EOT */
+};
+
+/*
+ * Tables combined
+ */
+
+static const struct
+st_register *stv0367_deftabs[STV0367_DEFTAB_MAX][STV0367_TAB_MAX] = {
+ /* generic default/init tabs */
+ { def0367ter, def0367cab, NULL },
+ /* default tabs for digital devices cards/flex modules */
+ { def0367dd_ofdm, def0367dd_qam, def0367dd_base },
+};
+
+#endif
diff --git a/drivers/media/dvb-frontends/stv0367_regs.h b/drivers/media/dvb-frontends/stv0367_regs.h
index 1d1586221239..cc66d93c5a1f 100644
--- a/drivers/media/dvb-frontends/stv0367_regs.h
+++ b/drivers/media/dvb-frontends/stv0367_regs.h
@@ -2639,8 +2639,6 @@
#define R367TER_DEBUG_LT9 0xf405
#define F367TER_F_DEBUG_LT9 0xf40500ff
-#define STV0367TER_NBREGS 445
-
/* ID */
#define R367CAB_ID 0xf000
#define F367CAB_IDENTIFICATIONREGISTER 0xf00000ff
@@ -3605,6 +3603,4 @@
#define R367CAB_T_O_ID_3 0xf4d3
#define F367CAB_TS_ID_I_H 0xf4d300ff
-#define STV0367CAB_NBREGS 187
-
#endif
diff --git a/drivers/media/dvb-frontends/zl10353.c b/drivers/media/dvb-frontends/zl10353.c
index 47c0549eb7b2..1c689f7f4ab8 100644
--- a/drivers/media/dvb-frontends/zl10353.c
+++ b/drivers/media/dvb-frontends/zl10353.c
@@ -211,7 +211,7 @@ static int zl10353_set_parameters(struct dvb_frontend *fe)
break;
default:
c->bandwidth_hz = 8000000;
- /* fall though */
+ /* fall through */
case 8000000:
zl10353_single_write(fe, MCLK_RATIO, 0x75);
zl10353_single_write(fe, 0x64, 0x36);
@@ -268,6 +268,7 @@ static int zl10353_set_parameters(struct dvb_frontend *fe)
if (c->hierarchy == HIERARCHY_AUTO ||
c->hierarchy == HIERARCHY_NONE)
break;
+ /* fall through */
default:
return -EINVAL;
}
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index aaa9471c7d11..121b3b5394cb 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -209,6 +209,7 @@ config VIDEO_ADV7604
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
depends on GPIOLIB || COMPILE_TEST
select HDMI
+ select V4L2_FWNODE
---help---
Support for the Analog Devices ADV7604 video decoder.
@@ -302,6 +303,16 @@ config VIDEO_AD5820
This is a driver for the AD5820 camera lens voice coil.
It is used for example in Nokia N900 (RX-51).
+config VIDEO_DW9714
+ tristate "DW9714 lens voice coil support"
+ depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on VIDEO_V4L2_SUBDEV_API
+ ---help---
+ This is a driver for the DW9714 camera lens voice coil.
+ DW9714 is a 10 bit DAC with 120mA output current sink
+ capability. This is designed for linear control of
+ voice coil motors, controlled via I2C serial interface.
+
config VIDEO_SAA7110
tristate "Philips SAA7110 video decoder"
depends on VIDEO_V4L2 && I2C
@@ -324,6 +335,7 @@ config VIDEO_TC358743
tristate "Toshiba TC358743 decoder"
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
select HDMI
+ select V4L2_FWNODE
---help---
Support for the Toshiba TC358743 HDMI to MIPI CSI-2 bridge.
@@ -333,6 +345,7 @@ config VIDEO_TC358743
config VIDEO_TVP514X
tristate "Texas Instruments TVP514x video decoder"
depends on VIDEO_V4L2 && I2C
+ select V4L2_FWNODE
---help---
This is a Video4Linux2 sensor-level driver for the TI TVP5146/47
decoder. It is currently working with the TI OMAP3 camera
@@ -344,6 +357,7 @@ config VIDEO_TVP514X
config VIDEO_TVP5150
tristate "Texas Instruments TVP5150 video decoder"
depends on VIDEO_V4L2 && I2C
+ select V4L2_FWNODE
---help---
Support for the Texas Instruments TVP5150 video decoder.
@@ -353,6 +367,7 @@ config VIDEO_TVP5150
config VIDEO_TVP7002
tristate "Texas Instruments TVP7002 video decoder"
depends on VIDEO_V4L2 && I2C
+ select V4L2_FWNODE
---help---
Support for the Texas Instruments TVP7002 video decoder.
@@ -535,6 +550,7 @@ config VIDEO_OV2659
tristate "OmniVision OV2659 sensor support"
depends on VIDEO_V4L2 && I2C
depends on MEDIA_CAMERA_SUPPORT
+ select V4L2_FWNODE
---help---
This is a Video4Linux2 sensor-level driver for the OmniVision
OV2659 camera.
@@ -542,11 +558,22 @@ config VIDEO_OV2659
To compile this driver as a module, choose M here: the
module will be called ov2659.
+config VIDEO_OV5640
+ tristate "OmniVision OV5640 sensor support"
+ depends on OF
+ depends on GPIOLIB && VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CAMERA_SUPPORT
+ select V4L2_FWNODE
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the Omnivision
+ OV5640 camera sensor with a MIPI CSI-2 interface.
+
config VIDEO_OV5645
tristate "OmniVision OV5645 sensor support"
depends on OF
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on MEDIA_CAMERA_SUPPORT
+ select V4L2_FWNODE
---help---
This is a Video4Linux2 sensor-level driver for the OmniVision
OV5645 camera.
@@ -558,6 +585,7 @@ config VIDEO_OV5647
tristate "OmniVision OV5647 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on MEDIA_CAMERA_SUPPORT
+ select V4L2_FWNODE
---help---
This is a Video4Linux2 sensor-level driver for the OmniVision
OV5647 camera.
@@ -592,6 +620,14 @@ config VIDEO_OV9650
This is a V4L2 sensor-level driver for the Omnivision
OV9650 and OV9652 camera sensors.
+config VIDEO_OV13858
+ tristate "OmniVision OV13858 sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CAMERA_SUPPORT
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the OmniVision
+ OV13858 camera.
+
config VIDEO_VS6624
tristate "ST VS6624 sensor support"
depends on VIDEO_V4L2 && I2C
@@ -650,6 +686,7 @@ config VIDEO_MT9V032
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on MEDIA_CAMERA_SUPPORT
select REGMAP_I2C
+ select V4L2_FWNODE
---help---
This is a Video4Linux2 sensor-level driver for the Micron
MT9V032 752x480 CMOS sensor.
@@ -697,6 +734,7 @@ config VIDEO_S5K4ECGX
config VIDEO_S5K5BAF
tristate "Samsung S5K5BAF sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
---help---
This is a V4L2 sensor-level driver for Samsung S5K5BAF 2M
camera sensor with an embedded SoC image signal processor.
@@ -707,6 +745,7 @@ source "drivers/media/i2c/et8ek8/Kconfig"
config VIDEO_S5C73M3
tristate "Samsung S5C73M3 sensor support"
depends on I2C && SPI && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
---help---
This is a V4L2 sensor-level driver for Samsung S5C73M3
8 Mpixel camera.
@@ -785,6 +824,18 @@ config VIDEO_SAA6752HS
To compile this driver as a module, choose M here: the
module will be called saa6752hs.
+comment "SDR tuner chips"
+
+config SDR_MAX2175
+ tristate "Maxim 2175 RF to Bits tuner"
+ depends on VIDEO_V4L2 && MEDIA_SDR_SUPPORT && I2C
+ ---help---
+ Support for Maxim 2175 tuner. It is an advanced analog/digital
+ radio receiver with RF-to-Bits front-end designed for SDR solutions.
+
+ To compile this driver as a module, choose M here; the
+ module will be called max2175.
+
comment "Miscellaneous helper chips"
config VIDEO_THS7303
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 62323ec66be8..2c0868fa6034 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_VIDEO_SAA7127) += saa7127.o
obj-$(CONFIG_VIDEO_SAA7185) += saa7185.o
obj-$(CONFIG_VIDEO_SAA6752HS) += saa6752hs.o
obj-$(CONFIG_VIDEO_AD5820) += ad5820.o
+obj-$(CONFIG_VIDEO_DW9714) += dw9714.o
obj-$(CONFIG_VIDEO_ADV7170) += adv7170.o
obj-$(CONFIG_VIDEO_ADV7175) += adv7175.o
obj-$(CONFIG_VIDEO_ADV7180) += adv7180.o
@@ -58,11 +59,13 @@ obj-$(CONFIG_VIDEO_SONY_BTF_MPX) += sony-btf-mpx.o
obj-$(CONFIG_VIDEO_UPD64031A) += upd64031a.o
obj-$(CONFIG_VIDEO_UPD64083) += upd64083.o
obj-$(CONFIG_VIDEO_OV2640) += ov2640.o
+obj-$(CONFIG_VIDEO_OV5640) += ov5640.o
obj-$(CONFIG_VIDEO_OV5645) += ov5645.o
obj-$(CONFIG_VIDEO_OV5647) += ov5647.o
obj-$(CONFIG_VIDEO_OV7640) += ov7640.o
obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
obj-$(CONFIG_VIDEO_OV9650) += ov9650.o
+obj-$(CONFIG_VIDEO_OV13858) += ov13858.o
obj-$(CONFIG_VIDEO_MT9M032) += mt9m032.o
obj-$(CONFIG_VIDEO_MT9M111) += mt9m111.o
obj-$(CONFIG_VIDEO_MT9P031) += mt9p031.o
@@ -86,3 +89,5 @@ obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
obj-$(CONFIG_VIDEO_ML86V7667) += ml86v7667.o
obj-$(CONFIG_VIDEO_OV2659) += ov2659.o
obj-$(CONFIG_VIDEO_TC358743) += tc358743.o
+
+obj-$(CONFIG_SDR_MAX2175) += max2175.o
diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c
index 3d2a3c6b67d8..034ebf754007 100644
--- a/drivers/media/i2c/ad5820.c
+++ b/drivers/media/i2c/ad5820.c
@@ -341,7 +341,7 @@ static int ad5820_remove(struct i2c_client *client)
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct ad5820_device *coil = to_ad5820_device(subdev);
- v4l2_device_unregister_subdev(&coil->subdev);
+ v4l2_async_unregister_subdev(&coil->subdev);
v4l2_ctrl_handler_free(&coil->ctrls);
media_entity_cleanup(&coil->subdev.entity);
mutex_destroy(&coil->power_lock);
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index bdbbf8cf27e4..78de7ddf5081 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -1452,6 +1452,8 @@ static SIMPLE_DEV_PM_OPS(adv7180_pm_ops, adv7180_suspend, adv7180_resume);
#ifdef CONFIG_OF
static const struct of_device_id adv7180_of_id[] = {
{ .compatible = "adi,adv7180", },
+ { .compatible = "adi,adv7180cp", },
+ { .compatible = "adi,adv7180st", },
{ .compatible = "adi,adv7182", },
{ .compatible = "adi,adv7280", },
{ .compatible = "adi,adv7280-m", },
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index f1fa9cec489f..660bacb8f7d9 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -33,6 +33,7 @@
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of_graph.h>
#include <linux/slab.h>
#include <linux/v4l2-dv-timings.h>
#include <linux/videodev2.h>
@@ -45,7 +46,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-dv-timings.h>
-#include <media/v4l2-of.h>
+#include <media/v4l2-fwnode.h>
static int debug;
module_param(debug, int, 0644);
@@ -3069,7 +3070,7 @@ MODULE_DEVICE_TABLE(of, adv76xx_of_id);
static int adv76xx_parse_dt(struct adv76xx_state *state)
{
- struct v4l2_of_endpoint bus_cfg;
+ struct v4l2_fwnode_endpoint bus_cfg;
struct device_node *endpoint;
struct device_node *np;
unsigned int flags;
@@ -3083,7 +3084,7 @@ static int adv76xx_parse_dt(struct adv76xx_state *state)
if (!endpoint)
return -EINVAL;
- ret = v4l2_of_parse_endpoint(endpoint, &bus_cfg);
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint), &bus_cfg);
if (ret) {
of_node_put(endpoint);
return ret;
diff --git a/drivers/media/i2c/as3645a.c b/drivers/media/i2c/as3645a.c
index b6aeceea9850..af5db71a0888 100644
--- a/drivers/media/i2c/as3645a.c
+++ b/drivers/media/i2c/as3645a.c
@@ -294,8 +294,8 @@ static int as3645a_read_fault(struct as3645a *flash)
dev_dbg(&client->dev, "Inductor Peak limit fault\n");
if (rval & AS_FAULT_INFO_INDICATOR_LED)
- dev_dbg(&client->dev, "Indicator LED fault: "
- "Short circuit or open loop\n");
+ dev_dbg(&client->dev,
+ "Indicator LED fault: Short circuit or open loop\n");
dev_dbg(&client->dev, "%u connected LEDs\n",
rval & AS_FAULT_INFO_LED_AMOUNT ? 2 : 1);
@@ -310,8 +310,8 @@ static int as3645a_read_fault(struct as3645a *flash)
dev_dbg(&client->dev, "Short circuit fault\n");
if (rval & AS_FAULT_INFO_OVER_VOLTAGE)
- dev_dbg(&client->dev, "Over voltage fault: "
- "Indicates missing capacitor or open connection\n");
+ dev_dbg(&client->dev,
+ "Over voltage fault: Indicates missing capacitor or open connection\n");
return rval;
}
@@ -583,8 +583,8 @@ static int as3645a_registered(struct v4l2_subdev *sd)
/* Verify the chip model and version. */
if (model != 0x01 || rfu != 0x00) {
- dev_err(&client->dev, "AS3645A not detected "
- "(model %d rfu %d)\n", model, rfu);
+ dev_err(&client->dev,
+ "AS3645A not detected (model %d rfu %d)\n", model, rfu);
rval = -ENODEV;
goto power_off;
}
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index b8d3c070bfc1..39f51daa7558 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -416,11 +416,13 @@ static void cx25840_initialize(struct i2c_client *client)
INIT_WORK(&state->fw_work, cx25840_work_handler);
init_waitqueue_head(&state->fw_wait);
q = create_singlethread_workqueue("cx25840_fw");
- prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
- queue_work(q, &state->fw_work);
- schedule();
- finish_wait(&state->fw_wait, &wait);
- destroy_workqueue(q);
+ if (q) {
+ prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
+ queue_work(q, &state->fw_work);
+ schedule();
+ finish_wait(&state->fw_wait, &wait);
+ destroy_workqueue(q);
+ }
/* 6. */
cx25840_write(client, 0x115, 0x8c);
@@ -630,11 +632,13 @@ static void cx23885_initialize(struct i2c_client *client)
INIT_WORK(&state->fw_work, cx25840_work_handler);
init_waitqueue_head(&state->fw_wait);
q = create_singlethread_workqueue("cx25840_fw");
- prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
- queue_work(q, &state->fw_work);
- schedule();
- finish_wait(&state->fw_wait, &wait);
- destroy_workqueue(q);
+ if (q) {
+ prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
+ queue_work(q, &state->fw_work);
+ schedule();
+ finish_wait(&state->fw_wait, &wait);
+ destroy_workqueue(q);
+ }
/* Call the cx23888 specific std setup func, we no longer rely on
* the generic cx24840 func.
@@ -748,11 +752,13 @@ static void cx231xx_initialize(struct i2c_client *client)
INIT_WORK(&state->fw_work, cx25840_work_handler);
init_waitqueue_head(&state->fw_wait);
q = create_singlethread_workqueue("cx25840_fw");
- prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
- queue_work(q, &state->fw_work);
- schedule();
- finish_wait(&state->fw_wait, &wait);
- destroy_workqueue(q);
+ if (q) {
+ prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
+ queue_work(q, &state->fw_work);
+ schedule();
+ finish_wait(&state->fw_wait, &wait);
+ destroy_workqueue(q);
+ }
cx25840_std_setup(client);
diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c
new file mode 100644
index 000000000000..6a607d7f82de
--- /dev/null
+++ b/drivers/media/i2c/dw9714.c
@@ -0,0 +1,291 @@
+/*
+ * Copyright (c) 2015--2017 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+
+#define DW9714_NAME "dw9714"
+#define DW9714_MAX_FOCUS_POS 1023
+/*
+ * This acts as the minimum granularity of lens movement.
+ * Keep this value power of 2, so the control steps can be
+ * uniformly adjusted for gradual lens movement, with desired
+ * number of control steps.
+ */
+#define DW9714_CTRL_STEPS 16
+#define DW9714_CTRL_DELAY_US 1000
+/*
+ * S[3:2] = 0x00, codes per step for "Linear Slope Control"
+ * S[1:0] = 0x00, step period
+ */
+#define DW9714_DEFAULT_S 0x0
+#define DW9714_VAL(data, s) ((data) << 4 | (s))
+
+/* dw9714 device structure */
+struct dw9714_device {
+ struct i2c_client *client;
+ struct v4l2_ctrl_handler ctrls_vcm;
+ struct v4l2_subdev sd;
+ u16 current_val;
+};
+
+static inline struct dw9714_device *to_dw9714_vcm(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct dw9714_device, ctrls_vcm);
+}
+
+static inline struct dw9714_device *sd_to_dw9714_vcm(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct dw9714_device, sd);
+}
+
+static int dw9714_i2c_write(struct i2c_client *client, u16 data)
+{
+ int ret;
+ u16 val = cpu_to_be16(data);
+
+ ret = i2c_master_send(client, (const char *)&val, sizeof(val));
+ if (ret != sizeof(val)) {
+ dev_err(&client->dev, "I2C write fail\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int dw9714_t_focus_vcm(struct dw9714_device *dw9714_dev, u16 val)
+{
+ struct i2c_client *client = dw9714_dev->client;
+
+ dw9714_dev->current_val = val;
+
+ return dw9714_i2c_write(client, DW9714_VAL(val, DW9714_DEFAULT_S));
+}
+
+static int dw9714_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct dw9714_device *dev_vcm = to_dw9714_vcm(ctrl);
+
+ if (ctrl->id == V4L2_CID_FOCUS_ABSOLUTE)
+ return dw9714_t_focus_vcm(dev_vcm, ctrl->val);
+
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops dw9714_vcm_ctrl_ops = {
+ .s_ctrl = dw9714_set_ctrl,
+};
+
+static int dw9714_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct dw9714_device *dw9714_dev = sd_to_dw9714_vcm(sd);
+ struct device *dev = &dw9714_dev->client->dev;
+ int rval;
+
+ rval = pm_runtime_get_sync(dev);
+ if (rval < 0) {
+ pm_runtime_put_noidle(dev);
+ return rval;
+ }
+
+ return 0;
+}
+
+static int dw9714_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct dw9714_device *dw9714_dev = sd_to_dw9714_vcm(sd);
+ struct device *dev = &dw9714_dev->client->dev;
+
+ pm_runtime_put(dev);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops dw9714_int_ops = {
+ .open = dw9714_open,
+ .close = dw9714_close,
+};
+
+static const struct v4l2_subdev_ops dw9714_ops = { };
+
+static void dw9714_subdev_cleanup(struct dw9714_device *dw9714_dev)
+{
+ v4l2_async_unregister_subdev(&dw9714_dev->sd);
+ v4l2_ctrl_handler_free(&dw9714_dev->ctrls_vcm);
+ media_entity_cleanup(&dw9714_dev->sd.entity);
+}
+
+static int dw9714_init_controls(struct dw9714_device *dev_vcm)
+{
+ struct v4l2_ctrl_handler *hdl = &dev_vcm->ctrls_vcm;
+ const struct v4l2_ctrl_ops *ops = &dw9714_vcm_ctrl_ops;
+ struct i2c_client *client = dev_vcm->client;
+
+ v4l2_ctrl_handler_init(hdl, 1);
+
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FOCUS_ABSOLUTE,
+ 0, DW9714_MAX_FOCUS_POS, DW9714_CTRL_STEPS, 0);
+
+ if (hdl->error)
+ dev_err(&client->dev, "%s fail error: 0x%x\n",
+ __func__, hdl->error);
+ dev_vcm->sd.ctrl_handler = hdl;
+ return hdl->error;
+}
+
+static int dw9714_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct dw9714_device *dw9714_dev;
+ int rval;
+
+ dw9714_dev = devm_kzalloc(&client->dev, sizeof(*dw9714_dev),
+ GFP_KERNEL);
+ if (dw9714_dev == NULL)
+ return -ENOMEM;
+
+ dw9714_dev->client = client;
+
+ v4l2_i2c_subdev_init(&dw9714_dev->sd, client, &dw9714_ops);
+ dw9714_dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ dw9714_dev->sd.internal_ops = &dw9714_int_ops;
+
+ rval = dw9714_init_controls(dw9714_dev);
+ if (rval)
+ goto err_cleanup;
+
+ rval = media_entity_pads_init(&dw9714_dev->sd.entity, 0, NULL);
+ if (rval < 0)
+ goto err_cleanup;
+
+ dw9714_dev->sd.entity.function = MEDIA_ENT_F_LENS;
+
+ rval = v4l2_async_register_subdev(&dw9714_dev->sd);
+ if (rval < 0)
+ goto err_cleanup;
+
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+
+ return 0;
+
+err_cleanup:
+ dw9714_subdev_cleanup(dw9714_dev);
+ dev_err(&client->dev, "Probe failed: %d\n", rval);
+ return rval;
+}
+
+static int dw9714_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct dw9714_device *dw9714_dev = sd_to_dw9714_vcm(sd);
+
+ pm_runtime_disable(&client->dev);
+ dw9714_subdev_cleanup(dw9714_dev);
+
+ return 0;
+}
+
+/*
+ * This function sets the vcm position, so it consumes least current
+ * The lens position is gradually moved in units of DW9714_CTRL_STEPS,
+ * to make the movements smoothly.
+ */
+static int __maybe_unused dw9714_vcm_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct dw9714_device *dw9714_dev = sd_to_dw9714_vcm(sd);
+ int ret, val;
+
+ for (val = dw9714_dev->current_val & ~(DW9714_CTRL_STEPS - 1);
+ val >= 0; val -= DW9714_CTRL_STEPS) {
+ ret = dw9714_i2c_write(client,
+ DW9714_VAL(val, DW9714_DEFAULT_S));
+ if (ret)
+ dev_err_once(dev, "%s I2C failure: %d", __func__, ret);
+ usleep_range(DW9714_CTRL_DELAY_US, DW9714_CTRL_DELAY_US + 10);
+ }
+ return 0;
+}
+
+/*
+ * This function sets the vcm position to the value set by the user
+ * through v4l2_ctrl_ops s_ctrl handler
+ * The lens position is gradually moved in units of DW9714_CTRL_STEPS,
+ * to make the movements smoothly.
+ */
+static int __maybe_unused dw9714_vcm_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct dw9714_device *dw9714_dev = sd_to_dw9714_vcm(sd);
+ int ret, val;
+
+ for (val = dw9714_dev->current_val % DW9714_CTRL_STEPS;
+ val < dw9714_dev->current_val + DW9714_CTRL_STEPS - 1;
+ val += DW9714_CTRL_STEPS) {
+ ret = dw9714_i2c_write(client,
+ DW9714_VAL(val, DW9714_DEFAULT_S));
+ if (ret)
+ dev_err_ratelimited(dev, "%s I2C failure: %d",
+ __func__, ret);
+ usleep_range(DW9714_CTRL_DELAY_US, DW9714_CTRL_DELAY_US + 10);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id dw9714_acpi_match[] = {
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, dw9714_acpi_match);
+#endif
+
+static const struct i2c_device_id dw9714_id_table[] = {
+ {DW9714_NAME, 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, dw9714_id_table);
+
+static const struct dev_pm_ops dw9714_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dw9714_vcm_suspend, dw9714_vcm_resume)
+ SET_RUNTIME_PM_OPS(dw9714_vcm_suspend, dw9714_vcm_resume, NULL)
+};
+
+static struct i2c_driver dw9714_i2c_driver = {
+ .driver = {
+ .name = DW9714_NAME,
+ .pm = &dw9714_pm_ops,
+ .acpi_match_table = ACPI_PTR(dw9714_acpi_match),
+ },
+ .probe = dw9714_probe,
+ .remove = dw9714_remove,
+ .id_table = dw9714_id_table,
+};
+
+module_i2c_driver(dw9714_i2c_driver);
+
+MODULE_AUTHOR("Tianshu Qiu <[email protected]>");
+MODULE_AUTHOR("Jian Xu Zheng <[email protected]>");
+MODULE_AUTHOR("Yuning Pu <[email protected]>");
+MODULE_AUTHOR("Jouni Ukkonen <[email protected]>");
+MODULE_AUTHOR("Tommi Franttila <[email protected]>");
+MODULE_DESCRIPTION("DW9714 VCM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/max2175.c b/drivers/media/i2c/max2175.c
new file mode 100644
index 000000000000..a4736a8a7792
--- /dev/null
+++ b/drivers/media/i2c/max2175.c
@@ -0,0 +1,1453 @@
+/*
+ * Maxim Integrated MAX2175 RF to Bits tuner driver
+ *
+ * This driver & most of the hard coded values are based on the reference
+ * application delivered by Maxim for this device.
+ *
+ * Copyright (C) 2016 Maxim Integrated Products
+ * Copyright (C) 2017 Renesas Electronics Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/max2175.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+
+#include "max2175.h"
+
+#define DRIVER_NAME "max2175"
+
+#define mxm_dbg(ctx, fmt, arg...) dev_dbg(&ctx->client->dev, fmt, ## arg)
+#define mxm_err(ctx, fmt, arg...) dev_err(&ctx->client->dev, fmt, ## arg)
+
+/* Rx mode */
+struct max2175_rxmode {
+ enum max2175_band band; /* Associated band */
+ u32 freq; /* Default freq in Hz */
+ u8 i2s_word_size; /* Bit value */
+};
+
+/* Register map to define preset values */
+struct max2175_reg_map {
+ u8 idx; /* Register index */
+ u8 val; /* Register value */
+};
+
+static const struct max2175_rxmode eu_rx_modes[] = {
+ /* EU modes */
+ [MAX2175_EU_FM_1_2] = { MAX2175_BAND_FM, 98256000, 1 },
+ [MAX2175_DAB_1_2] = { MAX2175_BAND_VHF, 182640000, 0 },
+};
+
+static const struct max2175_rxmode na_rx_modes[] = {
+ /* NA modes */
+ [MAX2175_NA_FM_1_0] = { MAX2175_BAND_FM, 98255520, 1 },
+ [MAX2175_NA_FM_2_0] = { MAX2175_BAND_FM, 98255520, 6 },
+};
+
+/*
+ * Preset values:
+ * Based on Maxim MAX2175 Register Table revision: 130p10
+ */
+static const u8 full_fm_eu_1p0[] = {
+ 0x15, 0x04, 0xb8, 0xe3, 0x35, 0x18, 0x7c, 0x00,
+ 0x00, 0x7d, 0x40, 0x08, 0x70, 0x7a, 0x88, 0x91,
+ 0x61, 0x61, 0x61, 0x61, 0x5a, 0x0f, 0x34, 0x1c,
+ 0x14, 0x88, 0x33, 0x02, 0x00, 0x09, 0x00, 0x65,
+ 0x9f, 0x2b, 0x80, 0x00, 0x95, 0x05, 0x2c, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40,
+ 0x4a, 0x08, 0xa8, 0x0e, 0x0e, 0x2f, 0x7e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xab, 0x5e, 0xa9,
+ 0xae, 0xbb, 0x57, 0x18, 0x3b, 0x03, 0x3b, 0x64,
+ 0x40, 0x60, 0x00, 0x2a, 0xbf, 0x3f, 0xff, 0x9f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
+ 0xff, 0xfc, 0xef, 0x1c, 0x40, 0x00, 0x00, 0x02,
+ 0x00, 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xac, 0x40, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0x00, 0x00,
+ 0x00, 0x47, 0x00, 0x00, 0x11, 0x3f, 0x22, 0x00,
+ 0xf1, 0x00, 0x41, 0x03, 0xb0, 0x00, 0x00, 0x00,
+ 0x1b,
+};
+
+static const u8 full_fm_na_1p0[] = {
+ 0x13, 0x08, 0x8d, 0xc0, 0x35, 0x18, 0x7d, 0x3f,
+ 0x7d, 0x75, 0x40, 0x08, 0x70, 0x7a, 0x88, 0x91,
+ 0x61, 0x61, 0x61, 0x61, 0x5c, 0x0f, 0x34, 0x1c,
+ 0x14, 0x88, 0x33, 0x02, 0x00, 0x01, 0x00, 0x65,
+ 0x9f, 0x2b, 0x80, 0x00, 0x95, 0x05, 0x2c, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40,
+ 0x4a, 0x08, 0xa8, 0x0e, 0x0e, 0xaf, 0x7e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xab, 0x5e, 0xa9,
+ 0xae, 0xbb, 0x57, 0x18, 0x3b, 0x03, 0x3b, 0x64,
+ 0x40, 0x60, 0x00, 0x2a, 0xbf, 0x3f, 0xff, 0x9f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
+ 0xff, 0xfc, 0xef, 0x1c, 0x40, 0x00, 0x00, 0x02,
+ 0x00, 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa6, 0x40, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0x00, 0x00,
+ 0x00, 0x35, 0x00, 0x00, 0x11, 0x3f, 0x22, 0x00,
+ 0xf1, 0x00, 0x41, 0x03, 0xb0, 0x00, 0x00, 0x00,
+ 0x1b,
+};
+
+/* DAB1.2 settings */
+static const struct max2175_reg_map dab12_map[] = {
+ { 0x01, 0x13 }, { 0x02, 0x0d }, { 0x03, 0x15 }, { 0x04, 0x55 },
+ { 0x05, 0x0a }, { 0x06, 0xa0 }, { 0x07, 0x40 }, { 0x08, 0x00 },
+ { 0x09, 0x00 }, { 0x0a, 0x7d }, { 0x0b, 0x4a }, { 0x0c, 0x28 },
+ { 0x0e, 0x43 }, { 0x0f, 0xb5 }, { 0x10, 0x31 }, { 0x11, 0x9e },
+ { 0x12, 0x68 }, { 0x13, 0x9e }, { 0x14, 0x68 }, { 0x15, 0x58 },
+ { 0x16, 0x2f }, { 0x17, 0x3f }, { 0x18, 0x40 }, { 0x1a, 0x88 },
+ { 0x1b, 0xaa }, { 0x1c, 0x9a }, { 0x1d, 0x00 }, { 0x1e, 0x00 },
+ { 0x23, 0x80 }, { 0x24, 0x00 }, { 0x25, 0x00 }, { 0x26, 0x00 },
+ { 0x27, 0x00 }, { 0x32, 0x08 }, { 0x33, 0xf8 }, { 0x36, 0x2d },
+ { 0x37, 0x7e }, { 0x55, 0xaf }, { 0x56, 0x3f }, { 0x57, 0xf8 },
+ { 0x58, 0x99 }, { 0x76, 0x00 }, { 0x77, 0x00 }, { 0x78, 0x02 },
+ { 0x79, 0x40 }, { 0x82, 0x00 }, { 0x83, 0x00 }, { 0x85, 0x00 },
+ { 0x86, 0x20 },
+};
+
+/* EU FM 1.2 settings */
+static const struct max2175_reg_map fmeu1p2_map[] = {
+ { 0x01, 0x15 }, { 0x02, 0x04 }, { 0x03, 0xb8 }, { 0x04, 0xe3 },
+ { 0x05, 0x35 }, { 0x06, 0x18 }, { 0x07, 0x7c }, { 0x08, 0x00 },
+ { 0x09, 0x00 }, { 0x0a, 0x73 }, { 0x0b, 0x40 }, { 0x0c, 0x08 },
+ { 0x0e, 0x7a }, { 0x0f, 0x88 }, { 0x10, 0x91 }, { 0x11, 0x61 },
+ { 0x12, 0x61 }, { 0x13, 0x61 }, { 0x14, 0x61 }, { 0x15, 0x5a },
+ { 0x16, 0x0f }, { 0x17, 0x34 }, { 0x18, 0x1c }, { 0x1a, 0x88 },
+ { 0x1b, 0x33 }, { 0x1c, 0x02 }, { 0x1d, 0x00 }, { 0x1e, 0x01 },
+ { 0x23, 0x80 }, { 0x24, 0x00 }, { 0x25, 0x95 }, { 0x26, 0x05 },
+ { 0x27, 0x2c }, { 0x32, 0x08 }, { 0x33, 0xa8 }, { 0x36, 0x2f },
+ { 0x37, 0x7e }, { 0x55, 0xbf }, { 0x56, 0x3f }, { 0x57, 0xff },
+ { 0x58, 0x9f }, { 0x76, 0xac }, { 0x77, 0x40 }, { 0x78, 0x00 },
+ { 0x79, 0x00 }, { 0x82, 0x47 }, { 0x83, 0x00 }, { 0x85, 0x11 },
+ { 0x86, 0x3f },
+};
+
+/* FM NA 1.0 settings */
+static const struct max2175_reg_map fmna1p0_map[] = {
+ { 0x01, 0x13 }, { 0x02, 0x08 }, { 0x03, 0x8d }, { 0x04, 0xc0 },
+ { 0x05, 0x35 }, { 0x06, 0x18 }, { 0x07, 0x7d }, { 0x08, 0x3f },
+ { 0x09, 0x7d }, { 0x0a, 0x75 }, { 0x0b, 0x40 }, { 0x0c, 0x08 },
+ { 0x0e, 0x7a }, { 0x0f, 0x88 }, { 0x10, 0x91 }, { 0x11, 0x61 },
+ { 0x12, 0x61 }, { 0x13, 0x61 }, { 0x14, 0x61 }, { 0x15, 0x5c },
+ { 0x16, 0x0f }, { 0x17, 0x34 }, { 0x18, 0x1c }, { 0x1a, 0x88 },
+ { 0x1b, 0x33 }, { 0x1c, 0x02 }, { 0x1d, 0x00 }, { 0x1e, 0x01 },
+ { 0x23, 0x80 }, { 0x24, 0x00 }, { 0x25, 0x95 }, { 0x26, 0x05 },
+ { 0x27, 0x2c }, { 0x32, 0x08 }, { 0x33, 0xa8 }, { 0x36, 0xaf },
+ { 0x37, 0x7e }, { 0x55, 0xbf }, { 0x56, 0x3f }, { 0x57, 0xff },
+ { 0x58, 0x9f }, { 0x76, 0xa6 }, { 0x77, 0x40 }, { 0x78, 0x00 },
+ { 0x79, 0x00 }, { 0x82, 0x35 }, { 0x83, 0x00 }, { 0x85, 0x11 },
+ { 0x86, 0x3f },
+};
+
+/* FM NA 2.0 settings */
+static const struct max2175_reg_map fmna2p0_map[] = {
+ { 0x01, 0x13 }, { 0x02, 0x08 }, { 0x03, 0x8d }, { 0x04, 0xc0 },
+ { 0x05, 0x35 }, { 0x06, 0x18 }, { 0x07, 0x7c }, { 0x08, 0x54 },
+ { 0x09, 0xa7 }, { 0x0a, 0x55 }, { 0x0b, 0x42 }, { 0x0c, 0x48 },
+ { 0x0e, 0x7a }, { 0x0f, 0x88 }, { 0x10, 0x91 }, { 0x11, 0x61 },
+ { 0x12, 0x61 }, { 0x13, 0x61 }, { 0x14, 0x61 }, { 0x15, 0x5c },
+ { 0x16, 0x0f }, { 0x17, 0x34 }, { 0x18, 0x1c }, { 0x1a, 0x88 },
+ { 0x1b, 0x33 }, { 0x1c, 0x02 }, { 0x1d, 0x00 }, { 0x1e, 0x01 },
+ { 0x23, 0x80 }, { 0x24, 0x00 }, { 0x25, 0x95 }, { 0x26, 0x05 },
+ { 0x27, 0x2c }, { 0x32, 0x08 }, { 0x33, 0xa8 }, { 0x36, 0xaf },
+ { 0x37, 0x7e }, { 0x55, 0xbf }, { 0x56, 0x3f }, { 0x57, 0xff },
+ { 0x58, 0x9f }, { 0x76, 0xac }, { 0x77, 0xc0 }, { 0x78, 0x00 },
+ { 0x79, 0x00 }, { 0x82, 0x6b }, { 0x83, 0x00 }, { 0x85, 0x11 },
+ { 0x86, 0x3f },
+};
+
+static const u16 ch_coeff_dab1[] = {
+ 0x001c, 0x0007, 0xffcd, 0x0056, 0xffa4, 0x0033, 0x0027, 0xff61,
+ 0x010e, 0xfec0, 0x0106, 0xffb8, 0xff1c, 0x023c, 0xfcb2, 0x039b,
+ 0xfd4e, 0x0055, 0x036a, 0xf7de, 0x0d21, 0xee72, 0x1499, 0x6a51,
+};
+
+static const u16 ch_coeff_fmeu[] = {
+ 0x0000, 0xffff, 0x0001, 0x0002, 0xfffa, 0xffff, 0x0015, 0xffec,
+ 0xffde, 0x0054, 0xfff9, 0xff52, 0x00b8, 0x00a2, 0xfe0a, 0x00af,
+ 0x02e3, 0xfc14, 0xfe89, 0x089d, 0xfa2e, 0xf30f, 0x25be, 0x4eb6,
+};
+
+static const u16 eq_coeff_fmeu1_ra02_m6db[] = {
+ 0x0040, 0xffc6, 0xfffa, 0x002c, 0x000d, 0xff90, 0x0037, 0x006e,
+ 0xffc0, 0xff5b, 0x006a, 0x00f0, 0xff57, 0xfe94, 0x0112, 0x0252,
+ 0xfe0c, 0xfc6a, 0x0385, 0x0553, 0xfa49, 0xf789, 0x0b91, 0x1a10,
+};
+
+static const u16 ch_coeff_fmna[] = {
+ 0x0001, 0x0003, 0xfffe, 0xfff4, 0x0000, 0x001f, 0x000c, 0xffbc,
+ 0xffd3, 0x007d, 0x0075, 0xff33, 0xff01, 0x0131, 0x01ef, 0xfe60,
+ 0xfc7a, 0x020e, 0x0656, 0xfd94, 0xf395, 0x02ab, 0x2857, 0x3d3f,
+};
+
+static const u16 eq_coeff_fmna1_ra02_m6db[] = {
+ 0xfff1, 0xffe1, 0xffef, 0x000e, 0x0030, 0x002f, 0xfff6, 0xffa7,
+ 0xff9d, 0x000a, 0x00a2, 0x00b5, 0xffea, 0xfed9, 0xfec5, 0x003d,
+ 0x0217, 0x021b, 0xff5a, 0xfc2b, 0xfcbd, 0x02c4, 0x0ac3, 0x0e85,
+};
+
+static const u8 adc_presets[2][23] = {
+ {
+ 0x83, 0x00, 0xcf, 0xb4, 0x0f, 0x2c, 0x0c, 0x49,
+ 0x00, 0x00, 0x00, 0x8c, 0x02, 0x02, 0x00, 0x04,
+ 0xec, 0x82, 0x4b, 0xcc, 0x01, 0x88, 0x0c,
+ },
+ {
+ 0x83, 0x00, 0xcf, 0xb4, 0x0f, 0x2c, 0x0c, 0x49,
+ 0x00, 0x00, 0x00, 0x8c, 0x02, 0x20, 0x33, 0x8c,
+ 0x57, 0xd7, 0x59, 0xb7, 0x65, 0x0e, 0x0c,
+ },
+};
+
+/* Tuner bands */
+static const struct v4l2_frequency_band eu_bands_rf = {
+ .tuner = 0,
+ .type = V4L2_TUNER_RF,
+ .index = 0,
+ .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = 65000000,
+ .rangehigh = 240000000,
+};
+
+static const struct v4l2_frequency_band na_bands_rf = {
+ .tuner = 0,
+ .type = V4L2_TUNER_RF,
+ .index = 0,
+ .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS,
+ .rangelow = 65000000,
+ .rangehigh = 108000000,
+};
+
+/* Regmap settings */
+static const struct regmap_range max2175_regmap_volatile_range[] = {
+ regmap_reg_range(0x30, 0x35),
+ regmap_reg_range(0x3a, 0x45),
+ regmap_reg_range(0x59, 0x5e),
+ regmap_reg_range(0x73, 0x75),
+};
+
+static const struct regmap_access_table max2175_volatile_regs = {
+ .yes_ranges = max2175_regmap_volatile_range,
+ .n_yes_ranges = ARRAY_SIZE(max2175_regmap_volatile_range),
+};
+
+static const struct reg_default max2175_reg_defaults[] = {
+ { 0x00, 0x07},
+};
+
+static const struct regmap_config max2175_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ .reg_defaults = max2175_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(max2175_reg_defaults),
+ .volatile_table = &max2175_volatile_regs,
+ .cache_type = REGCACHE_FLAT,
+};
+
+struct max2175 {
+ struct v4l2_subdev sd; /* Sub-device */
+ struct i2c_client *client; /* I2C client */
+
+ /* Controls */
+ struct v4l2_ctrl_handler ctrl_hdl;
+ struct v4l2_ctrl *lna_gain; /* LNA gain value */
+ struct v4l2_ctrl *if_gain; /* I/F gain value */
+ struct v4l2_ctrl *pll_lock; /* PLL lock */
+ struct v4l2_ctrl *i2s_en; /* I2S output enable */
+ struct v4l2_ctrl *hsls; /* High-side/Low-side polarity */
+ struct v4l2_ctrl *rx_mode; /* Receive mode */
+
+ /* Regmap */
+ struct regmap *regmap;
+
+ /* Cached configuration */
+ u32 freq; /* Tuned freq In Hz */
+ const struct max2175_rxmode *rx_modes; /* EU or NA modes */
+ const struct v4l2_frequency_band *bands_rf; /* EU or NA bands */
+
+ /* Device settings */
+ unsigned long xtal_freq; /* Ref Oscillator freq in Hz */
+ u32 decim_ratio;
+ bool master; /* Master/Slave */
+ bool am_hiz; /* AM Hi-Z filter */
+
+ /* ROM values */
+ u8 rom_bbf_bw_am;
+ u8 rom_bbf_bw_fm;
+ u8 rom_bbf_bw_dab;
+
+ /* Driver private variables */
+ bool mode_resolved; /* Flag to sanity check settings */
+};
+
+static inline struct max2175 *max2175_from_sd(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct max2175, sd);
+}
+
+static inline struct max2175 *max2175_from_ctrl_hdl(struct v4l2_ctrl_handler *h)
+{
+ return container_of(h, struct max2175, ctrl_hdl);
+}
+
+/* Get bitval of a given val */
+static inline u8 max2175_get_bitval(u8 val, u8 msb, u8 lsb)
+{
+ return (val & GENMASK(msb, lsb)) >> lsb;
+}
+
+/* Read/Write bit(s) on top of regmap */
+static int max2175_read(struct max2175 *ctx, u8 idx, u8 *val)
+{
+ u32 regval;
+ int ret;
+
+ ret = regmap_read(ctx->regmap, idx, &regval);
+ if (ret)
+ mxm_err(ctx, "read ret(%d): idx 0x%02x\n", ret, idx);
+ else
+ *val = regval;
+
+ return ret;
+}
+
+static int max2175_write(struct max2175 *ctx, u8 idx, u8 val)
+{
+ int ret;
+
+ ret = regmap_write(ctx->regmap, idx, val);
+ if (ret)
+ mxm_err(ctx, "write ret(%d): idx 0x%02x val 0x%02x\n",
+ ret, idx, val);
+
+ return ret;
+}
+
+static u8 max2175_read_bits(struct max2175 *ctx, u8 idx, u8 msb, u8 lsb)
+{
+ u8 val;
+
+ if (max2175_read(ctx, idx, &val))
+ return 0;
+
+ return max2175_get_bitval(val, msb, lsb);
+}
+
+static int max2175_write_bits(struct max2175 *ctx, u8 idx,
+ u8 msb, u8 lsb, u8 newval)
+{
+ int ret = regmap_update_bits(ctx->regmap, idx, GENMASK(msb, lsb),
+ newval << lsb);
+
+ if (ret)
+ mxm_err(ctx, "wbits ret(%d): idx 0x%02x\n", ret, idx);
+
+ return ret;
+}
+
+static int max2175_write_bit(struct max2175 *ctx, u8 idx, u8 bit, u8 newval)
+{
+ return max2175_write_bits(ctx, idx, bit, bit, newval);
+}
+
+/* Checks expected pattern every msec until timeout */
+static int max2175_poll_timeout(struct max2175 *ctx, u8 idx, u8 msb, u8 lsb,
+ u8 exp_bitval, u32 timeout_us)
+{
+ unsigned int val;
+
+ return regmap_read_poll_timeout(ctx->regmap, idx, val,
+ (max2175_get_bitval(val, msb, lsb) == exp_bitval),
+ 1000, timeout_us);
+}
+
+static int max2175_poll_csm_ready(struct max2175 *ctx)
+{
+ int ret;
+
+ ret = max2175_poll_timeout(ctx, 69, 1, 1, 0, 50000);
+ if (ret)
+ mxm_err(ctx, "csm not ready\n");
+
+ return ret;
+}
+
+#define MAX2175_IS_BAND_AM(ctx) \
+ (max2175_read_bits(ctx, 5, 1, 0) == MAX2175_BAND_AM)
+
+#define MAX2175_IS_BAND_VHF(ctx) \
+ (max2175_read_bits(ctx, 5, 1, 0) == MAX2175_BAND_VHF)
+
+#define MAX2175_IS_FM_MODE(ctx) \
+ (max2175_read_bits(ctx, 12, 5, 4) == 0)
+
+#define MAX2175_IS_FMHD_MODE(ctx) \
+ (max2175_read_bits(ctx, 12, 5, 4) == 1)
+
+#define MAX2175_IS_DAB_MODE(ctx) \
+ (max2175_read_bits(ctx, 12, 5, 4) == 2)
+
+static int max2175_band_from_freq(u32 freq)
+{
+ if (freq >= 144000 && freq <= 26100000)
+ return MAX2175_BAND_AM;
+ else if (freq >= 65000000 && freq <= 108000000)
+ return MAX2175_BAND_FM;
+
+ return MAX2175_BAND_VHF;
+}
+
+static void max2175_i2s_enable(struct max2175 *ctx, bool enable)
+{
+ if (enable)
+ /* Stuff bits are zeroed */
+ max2175_write_bits(ctx, 104, 3, 0, 2);
+ else
+ /* Keep SCK alive */
+ max2175_write_bits(ctx, 104, 3, 0, 9);
+ mxm_dbg(ctx, "i2s %sabled\n", enable ? "en" : "dis");
+}
+
+static void max2175_set_filter_coeffs(struct max2175 *ctx, u8 m_sel,
+ u8 bank, const u16 *coeffs)
+{
+ unsigned int i;
+ u8 coeff_addr, upper_address = 24;
+
+ mxm_dbg(ctx, "set_filter_coeffs: m_sel %d bank %d\n", m_sel, bank);
+ max2175_write_bits(ctx, 114, 5, 4, m_sel);
+
+ if (m_sel == 2)
+ upper_address = 12;
+
+ for (i = 0; i < upper_address; i++) {
+ coeff_addr = i + bank * 24;
+ max2175_write(ctx, 115, coeffs[i] >> 8);
+ max2175_write(ctx, 116, coeffs[i]);
+ max2175_write(ctx, 117, coeff_addr | 1 << 7);
+ }
+ max2175_write_bit(ctx, 117, 7, 0);
+}
+
+static void max2175_load_fmeu_1p2(struct max2175 *ctx)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(fmeu1p2_map); i++)
+ max2175_write(ctx, fmeu1p2_map[i].idx, fmeu1p2_map[i].val);
+
+ ctx->decim_ratio = 36;
+
+ /* Load the Channel Filter Coefficients into channel filter bank #2 */
+ max2175_set_filter_coeffs(ctx, MAX2175_CH_MSEL, 0, ch_coeff_fmeu);
+ max2175_set_filter_coeffs(ctx, MAX2175_EQ_MSEL, 0,
+ eq_coeff_fmeu1_ra02_m6db);
+}
+
+static void max2175_load_dab_1p2(struct max2175 *ctx)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dab12_map); i++)
+ max2175_write(ctx, dab12_map[i].idx, dab12_map[i].val);
+
+ ctx->decim_ratio = 1;
+
+ /* Load the Channel Filter Coefficients into channel filter bank #2 */
+ max2175_set_filter_coeffs(ctx, MAX2175_CH_MSEL, 2, ch_coeff_dab1);
+}
+
+static void max2175_load_fmna_1p0(struct max2175 *ctx)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(fmna1p0_map); i++)
+ max2175_write(ctx, fmna1p0_map[i].idx, fmna1p0_map[i].val);
+}
+
+static void max2175_load_fmna_2p0(struct max2175 *ctx)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(fmna2p0_map); i++)
+ max2175_write(ctx, fmna2p0_map[i].idx, fmna2p0_map[i].val);
+}
+
+static void max2175_set_bbfilter(struct max2175 *ctx)
+{
+ if (MAX2175_IS_BAND_AM(ctx)) {
+ max2175_write_bits(ctx, 12, 3, 0, ctx->rom_bbf_bw_am);
+ mxm_dbg(ctx, "set_bbfilter AM: rom %d\n", ctx->rom_bbf_bw_am);
+ } else if (MAX2175_IS_DAB_MODE(ctx)) {
+ max2175_write_bits(ctx, 12, 3, 0, ctx->rom_bbf_bw_dab);
+ mxm_dbg(ctx, "set_bbfilter DAB: rom %d\n", ctx->rom_bbf_bw_dab);
+ } else {
+ max2175_write_bits(ctx, 12, 3, 0, ctx->rom_bbf_bw_fm);
+ mxm_dbg(ctx, "set_bbfilter FM: rom %d\n", ctx->rom_bbf_bw_fm);
+ }
+}
+
+static bool max2175_set_csm_mode(struct max2175 *ctx,
+ enum max2175_csm_mode new_mode)
+{
+ int ret = max2175_poll_csm_ready(ctx);
+
+ if (ret)
+ return ret;
+
+ max2175_write_bits(ctx, 0, 2, 0, new_mode);
+ mxm_dbg(ctx, "set csm new mode %d\n", new_mode);
+
+ /* Wait for a fixed settle down time depending on new mode */
+ switch (new_mode) {
+ case MAX2175_PRESET_TUNE:
+ usleep_range(51100, 51500); /* 51.1ms */
+ break;
+ /*
+ * Other mode switches need different sleep values depending on band &
+ * mode
+ */
+ default:
+ break;
+ }
+
+ return max2175_poll_csm_ready(ctx);
+}
+
+static int max2175_csm_action(struct max2175 *ctx,
+ enum max2175_csm_mode action)
+{
+ int ret;
+
+ mxm_dbg(ctx, "csm_action: %d\n", action);
+
+ /* Other actions can be added in future when needed */
+ ret = max2175_set_csm_mode(ctx, MAX2175_LOAD_TO_BUFFER);
+ if (ret)
+ return ret;
+
+ return max2175_set_csm_mode(ctx, MAX2175_PRESET_TUNE);
+}
+
+static int max2175_set_lo_freq(struct max2175 *ctx, u32 lo_freq)
+{
+ u8 lo_mult, loband_bits = 0, vcodiv_bits = 0;
+ u32 int_desired, frac_desired;
+ enum max2175_band band;
+ int ret;
+
+ band = max2175_read_bits(ctx, 5, 1, 0);
+ switch (band) {
+ case MAX2175_BAND_AM:
+ lo_mult = 16;
+ break;
+ case MAX2175_BAND_FM:
+ if (lo_freq <= 74700000) {
+ lo_mult = 16;
+ } else if (lo_freq > 74700000 && lo_freq <= 110000000) {
+ loband_bits = 1;
+ lo_mult = 8;
+ } else {
+ loband_bits = 1;
+ vcodiv_bits = 3;
+ lo_mult = 8;
+ }
+ break;
+ case MAX2175_BAND_VHF:
+ if (lo_freq <= 210000000)
+ vcodiv_bits = 2;
+ else
+ vcodiv_bits = 1;
+
+ loband_bits = 2;
+ lo_mult = 4;
+ break;
+ default:
+ loband_bits = 3;
+ vcodiv_bits = 2;
+ lo_mult = 2;
+ break;
+ }
+
+ if (band == MAX2175_BAND_L)
+ lo_freq /= lo_mult;
+ else
+ lo_freq *= lo_mult;
+
+ int_desired = lo_freq / ctx->xtal_freq;
+ frac_desired = div_u64((u64)(lo_freq % ctx->xtal_freq) << 20,
+ ctx->xtal_freq);
+
+ /* Check CSM is not busy */
+ ret = max2175_poll_csm_ready(ctx);
+ if (ret)
+ return ret;
+
+ mxm_dbg(ctx, "lo_mult %u int %u frac %u\n",
+ lo_mult, int_desired, frac_desired);
+
+ /* Write the calculated values to the appropriate registers */
+ max2175_write(ctx, 1, int_desired);
+ max2175_write_bits(ctx, 2, 3, 0, (frac_desired >> 16) & 0xf);
+ max2175_write(ctx, 3, frac_desired >> 8);
+ max2175_write(ctx, 4, frac_desired);
+ max2175_write_bits(ctx, 5, 3, 2, loband_bits);
+ max2175_write_bits(ctx, 6, 7, 6, vcodiv_bits);
+
+ return ret;
+}
+
+/*
+ * Helper similar to DIV_ROUND_CLOSEST but an inline function that accepts s64
+ * dividend and s32 divisor
+ */
+static inline s64 max2175_round_closest(s64 dividend, s32 divisor)
+{
+ if ((dividend > 0 && divisor > 0) || (dividend < 0 && divisor < 0))
+ return div_s64(dividend + divisor / 2, divisor);
+
+ return div_s64(dividend - divisor / 2, divisor);
+}
+
+static int max2175_set_nco_freq(struct max2175 *ctx, s32 nco_freq)
+{
+ s32 clock_rate = ctx->xtal_freq / ctx->decim_ratio;
+ u32 nco_reg, abs_nco_freq = abs(nco_freq);
+ s64 nco_val_desired;
+ int ret;
+
+ if (abs_nco_freq < clock_rate / 2) {
+ nco_val_desired = 2 * nco_freq;
+ } else {
+ nco_val_desired = 2 * (clock_rate - abs_nco_freq);
+ if (nco_freq < 0)
+ nco_val_desired = -nco_val_desired;
+ }
+
+ nco_reg = max2175_round_closest(nco_val_desired << 20, clock_rate);
+
+ if (nco_freq < 0)
+ nco_reg += 0x200000;
+
+ /* Check CSM is not busy */
+ ret = max2175_poll_csm_ready(ctx);
+ if (ret)
+ return ret;
+
+ mxm_dbg(ctx, "freq %d desired %lld reg %u\n",
+ nco_freq, nco_val_desired, nco_reg);
+
+ /* Write the calculated values to the appropriate registers */
+ max2175_write_bits(ctx, 7, 4, 0, (nco_reg >> 16) & 0x1f);
+ max2175_write(ctx, 8, nco_reg >> 8);
+ max2175_write(ctx, 9, nco_reg);
+
+ return ret;
+}
+
+static int max2175_set_rf_freq_non_am_bands(struct max2175 *ctx, u64 freq,
+ u32 lo_pos)
+{
+ s64 adj_freq, low_if_freq;
+ int ret;
+
+ mxm_dbg(ctx, "rf_freq: non AM bands\n");
+
+ if (MAX2175_IS_FM_MODE(ctx))
+ low_if_freq = 128000;
+ else if (MAX2175_IS_FMHD_MODE(ctx))
+ low_if_freq = 228000;
+ else
+ return max2175_set_lo_freq(ctx, freq);
+
+ if (MAX2175_IS_BAND_VHF(ctx) == (lo_pos == MAX2175_LO_ABOVE_DESIRED))
+ adj_freq = freq + low_if_freq;
+ else
+ adj_freq = freq - low_if_freq;
+
+ ret = max2175_set_lo_freq(ctx, adj_freq);
+ if (ret)
+ return ret;
+
+ return max2175_set_nco_freq(ctx, -low_if_freq);
+}
+
+static int max2175_set_rf_freq(struct max2175 *ctx, u64 freq, u32 lo_pos)
+{
+ int ret;
+
+ if (MAX2175_IS_BAND_AM(ctx))
+ ret = max2175_set_nco_freq(ctx, freq);
+ else
+ ret = max2175_set_rf_freq_non_am_bands(ctx, freq, lo_pos);
+
+ mxm_dbg(ctx, "set_rf_freq: ret %d freq %llu\n", ret, freq);
+
+ return ret;
+}
+
+static int max2175_tune_rf_freq(struct max2175 *ctx, u64 freq, u32 hsls)
+{
+ int ret;
+
+ ret = max2175_set_rf_freq(ctx, freq, hsls);
+ if (ret)
+ return ret;
+
+ ret = max2175_csm_action(ctx, MAX2175_BUFFER_PLUS_PRESET_TUNE);
+ if (ret)
+ return ret;
+
+ mxm_dbg(ctx, "tune_rf_freq: old %u new %llu\n", ctx->freq, freq);
+ ctx->freq = freq;
+
+ return ret;
+}
+
+static void max2175_set_hsls(struct max2175 *ctx, u32 lo_pos)
+{
+ mxm_dbg(ctx, "set_hsls: lo_pos %u\n", lo_pos);
+
+ if ((lo_pos == MAX2175_LO_BELOW_DESIRED) == MAX2175_IS_BAND_VHF(ctx))
+ max2175_write_bit(ctx, 5, 4, 1);
+ else
+ max2175_write_bit(ctx, 5, 4, 0);
+}
+
+static void max2175_set_eu_rx_mode(struct max2175 *ctx, u32 rx_mode)
+{
+ switch (rx_mode) {
+ case MAX2175_EU_FM_1_2:
+ max2175_load_fmeu_1p2(ctx);
+ break;
+
+ case MAX2175_DAB_1_2:
+ max2175_load_dab_1p2(ctx);
+ break;
+ }
+ /* Master is the default setting */
+ if (!ctx->master)
+ max2175_write_bit(ctx, 30, 7, 1);
+}
+
+static void max2175_set_na_rx_mode(struct max2175 *ctx, u32 rx_mode)
+{
+ switch (rx_mode) {
+ case MAX2175_NA_FM_1_0:
+ max2175_load_fmna_1p0(ctx);
+ break;
+ case MAX2175_NA_FM_2_0:
+ max2175_load_fmna_2p0(ctx);
+ break;
+ }
+ /* Master is the default setting */
+ if (!ctx->master)
+ max2175_write_bit(ctx, 30, 7, 1);
+
+ ctx->decim_ratio = 27;
+
+ /* Load the Channel Filter Coefficients into channel filter bank #2 */
+ max2175_set_filter_coeffs(ctx, MAX2175_CH_MSEL, 0, ch_coeff_fmna);
+ max2175_set_filter_coeffs(ctx, MAX2175_EQ_MSEL, 0,
+ eq_coeff_fmna1_ra02_m6db);
+}
+
+static int max2175_set_rx_mode(struct max2175 *ctx, u32 rx_mode)
+{
+ mxm_dbg(ctx, "set_rx_mode: %u am_hiz %u\n", rx_mode, ctx->am_hiz);
+ if (ctx->xtal_freq == MAX2175_EU_XTAL_FREQ)
+ max2175_set_eu_rx_mode(ctx, rx_mode);
+ else
+ max2175_set_na_rx_mode(ctx, rx_mode);
+
+ if (ctx->am_hiz) {
+ mxm_dbg(ctx, "setting AM HiZ related config\n");
+ max2175_write_bit(ctx, 50, 5, 1);
+ max2175_write_bit(ctx, 90, 7, 1);
+ max2175_write_bits(ctx, 73, 1, 0, 2);
+ max2175_write_bits(ctx, 80, 5, 0, 33);
+ }
+
+ /* Load BB filter trim values saved in ROM */
+ max2175_set_bbfilter(ctx);
+
+ /* Set HSLS */
+ max2175_set_hsls(ctx, ctx->hsls->cur.val);
+
+ /* Use i2s enable settings */
+ max2175_i2s_enable(ctx, ctx->i2s_en->cur.val);
+
+ ctx->mode_resolved = true;
+
+ return 0;
+}
+
+static int max2175_rx_mode_from_freq(struct max2175 *ctx, u32 freq, u32 *mode)
+{
+ unsigned int i;
+ int band = max2175_band_from_freq(freq);
+
+ /* Pick the first match always */
+ for (i = 0; i <= ctx->rx_mode->maximum; i++) {
+ if (ctx->rx_modes[i].band == band) {
+ *mode = i;
+ mxm_dbg(ctx, "rx_mode_from_freq: freq %u mode %d\n",
+ freq, *mode);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static bool max2175_freq_rx_mode_valid(struct max2175 *ctx,
+ u32 mode, u32 freq)
+{
+ int band = max2175_band_from_freq(freq);
+
+ return (ctx->rx_modes[mode].band == band);
+}
+
+static void max2175_load_adc_presets(struct max2175 *ctx)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(adc_presets); i++)
+ for (j = 0; j < ARRAY_SIZE(adc_presets[0]); j++)
+ max2175_write(ctx, 146 + j + i * 55, adc_presets[i][j]);
+}
+
+static int max2175_init_power_manager(struct max2175 *ctx)
+{
+ int ret;
+
+ /* Execute on-chip power-up/calibration */
+ max2175_write_bit(ctx, 99, 2, 0);
+ usleep_range(1000, 1500);
+ max2175_write_bit(ctx, 99, 2, 1);
+
+ /* Wait for the power manager to finish. */
+ ret = max2175_poll_timeout(ctx, 69, 7, 7, 1, 50000);
+ if (ret)
+ mxm_err(ctx, "init pm failed\n");
+
+ return ret;
+}
+
+static int max2175_recalibrate_adc(struct max2175 *ctx)
+{
+ int ret;
+
+ /* ADC Re-calibration */
+ max2175_write(ctx, 150, 0xff);
+ max2175_write(ctx, 205, 0xff);
+ max2175_write(ctx, 147, 0x20);
+ max2175_write(ctx, 147, 0x00);
+ max2175_write(ctx, 202, 0x20);
+ max2175_write(ctx, 202, 0x00);
+
+ ret = max2175_poll_timeout(ctx, 69, 4, 3, 3, 50000);
+ if (ret)
+ mxm_err(ctx, "adc recalibration failed\n");
+
+ return ret;
+}
+
+static u8 max2175_read_rom(struct max2175 *ctx, u8 row)
+{
+ u8 data = 0;
+
+ max2175_write_bit(ctx, 56, 4, 0);
+ max2175_write_bits(ctx, 56, 3, 0, row);
+
+ usleep_range(2000, 2500);
+ max2175_read(ctx, 58, &data);
+
+ max2175_write_bits(ctx, 56, 3, 0, 0);
+
+ mxm_dbg(ctx, "read_rom: row %d data 0x%02x\n", row, data);
+
+ return data;
+}
+
+static void max2175_load_from_rom(struct max2175 *ctx)
+{
+ u8 data = 0;
+
+ data = max2175_read_rom(ctx, 0);
+ ctx->rom_bbf_bw_am = data & 0x0f;
+ max2175_write_bits(ctx, 81, 3, 0, data >> 4);
+
+ data = max2175_read_rom(ctx, 1);
+ ctx->rom_bbf_bw_fm = data & 0x0f;
+ ctx->rom_bbf_bw_dab = data >> 4;
+
+ data = max2175_read_rom(ctx, 2);
+ max2175_write_bits(ctx, 82, 4, 0, data & 0x1f);
+ max2175_write_bits(ctx, 82, 7, 5, data >> 5);
+
+ data = max2175_read_rom(ctx, 3);
+ if (ctx->am_hiz) {
+ data &= 0x0f;
+ data |= (max2175_read_rom(ctx, 7) & 0x40) >> 2;
+ if (!data)
+ data |= 2;
+ } else {
+ data = (data & 0xf0) >> 4;
+ data |= (max2175_read_rom(ctx, 7) & 0x80) >> 3;
+ if (!data)
+ data |= 30;
+ }
+ max2175_write_bits(ctx, 80, 5, 0, data + 31);
+
+ data = max2175_read_rom(ctx, 6);
+ max2175_write_bits(ctx, 81, 7, 6, data >> 6);
+}
+
+static void max2175_load_full_fm_eu_1p0(struct max2175 *ctx)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(full_fm_eu_1p0); i++)
+ max2175_write(ctx, i + 1, full_fm_eu_1p0[i]);
+
+ usleep_range(5000, 5500);
+ ctx->decim_ratio = 36;
+}
+
+static void max2175_load_full_fm_na_1p0(struct max2175 *ctx)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(full_fm_na_1p0); i++)
+ max2175_write(ctx, i + 1, full_fm_na_1p0[i]);
+
+ usleep_range(5000, 5500);
+ ctx->decim_ratio = 27;
+}
+
+static int max2175_core_init(struct max2175 *ctx, u32 refout_bits)
+{
+ int ret;
+
+ /* MAX2175 uses 36.864MHz clock for EU & 40.154MHz for NA region */
+ if (ctx->xtal_freq == MAX2175_EU_XTAL_FREQ)
+ max2175_load_full_fm_eu_1p0(ctx);
+ else
+ max2175_load_full_fm_na_1p0(ctx);
+
+ /* The default settings assume master */
+ if (!ctx->master)
+ max2175_write_bit(ctx, 30, 7, 1);
+
+ mxm_dbg(ctx, "refout_bits %u\n", refout_bits);
+
+ /* Set REFOUT */
+ max2175_write_bits(ctx, 56, 7, 5, refout_bits);
+
+ /* ADC Reset */
+ max2175_write_bit(ctx, 99, 1, 0);
+ usleep_range(1000, 1500);
+ max2175_write_bit(ctx, 99, 1, 1);
+
+ /* Load ADC preset values */
+ max2175_load_adc_presets(ctx);
+
+ /* Initialize the power management state machine */
+ ret = max2175_init_power_manager(ctx);
+ if (ret)
+ return ret;
+
+ /* Recalibrate ADC */
+ ret = max2175_recalibrate_adc(ctx);
+ if (ret)
+ return ret;
+
+ /* Load ROM values to appropriate registers */
+ max2175_load_from_rom(ctx);
+
+ if (ctx->xtal_freq == MAX2175_EU_XTAL_FREQ) {
+ /* Load FIR coefficients into bank 0 */
+ max2175_set_filter_coeffs(ctx, MAX2175_CH_MSEL, 0,
+ ch_coeff_fmeu);
+ max2175_set_filter_coeffs(ctx, MAX2175_EQ_MSEL, 0,
+ eq_coeff_fmeu1_ra02_m6db);
+ } else {
+ /* Load FIR coefficients into bank 0 */
+ max2175_set_filter_coeffs(ctx, MAX2175_CH_MSEL, 0,
+ ch_coeff_fmna);
+ max2175_set_filter_coeffs(ctx, MAX2175_EQ_MSEL, 0,
+ eq_coeff_fmna1_ra02_m6db);
+ }
+ mxm_dbg(ctx, "core initialized\n");
+
+ return 0;
+}
+
+static void max2175_s_ctrl_rx_mode(struct max2175 *ctx, u32 rx_mode)
+{
+ /* Load mode. Range check already done */
+ max2175_set_rx_mode(ctx, rx_mode);
+
+ mxm_dbg(ctx, "s_ctrl_rx_mode: %u curr freq %u\n", rx_mode, ctx->freq);
+
+ /* Check if current freq valid for mode & update */
+ if (max2175_freq_rx_mode_valid(ctx, rx_mode, ctx->freq))
+ max2175_tune_rf_freq(ctx, ctx->freq, ctx->hsls->cur.val);
+ else
+ /* Use default freq of mode if current freq is not valid */
+ max2175_tune_rf_freq(ctx, ctx->rx_modes[rx_mode].freq,
+ ctx->hsls->cur.val);
+}
+
+static int max2175_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct max2175 *ctx = max2175_from_ctrl_hdl(ctrl->handler);
+
+ mxm_dbg(ctx, "s_ctrl: id 0x%x, val %u\n", ctrl->id, ctrl->val);
+ switch (ctrl->id) {
+ case V4L2_CID_MAX2175_I2S_ENABLE:
+ max2175_i2s_enable(ctx, ctrl->val);
+ break;
+ case V4L2_CID_MAX2175_HSLS:
+ max2175_set_hsls(ctx, ctrl->val);
+ break;
+ case V4L2_CID_MAX2175_RX_MODE:
+ max2175_s_ctrl_rx_mode(ctx, ctrl->val);
+ break;
+ }
+
+ return 0;
+}
+
+static u32 max2175_get_lna_gain(struct max2175 *ctx)
+{
+ enum max2175_band band = max2175_read_bits(ctx, 5, 1, 0);
+
+ switch (band) {
+ case MAX2175_BAND_AM:
+ return max2175_read_bits(ctx, 51, 3, 0);
+ case MAX2175_BAND_FM:
+ return max2175_read_bits(ctx, 50, 3, 0);
+ case MAX2175_BAND_VHF:
+ return max2175_read_bits(ctx, 52, 5, 0);
+ default:
+ return 0;
+ }
+}
+
+static int max2175_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct max2175 *ctx = max2175_from_ctrl_hdl(ctrl->handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_RF_TUNER_LNA_GAIN:
+ ctrl->val = max2175_get_lna_gain(ctx);
+ break;
+ case V4L2_CID_RF_TUNER_IF_GAIN:
+ ctrl->val = max2175_read_bits(ctx, 49, 4, 0);
+ break;
+ case V4L2_CID_RF_TUNER_PLL_LOCK:
+ ctrl->val = (max2175_read_bits(ctx, 60, 7, 6) == 3);
+ break;
+ }
+
+ return 0;
+};
+
+static int max2175_set_freq_and_mode(struct max2175 *ctx, u32 freq)
+{
+ u32 rx_mode;
+ int ret;
+
+ /* Get band from frequency */
+ ret = max2175_rx_mode_from_freq(ctx, freq, &rx_mode);
+ if (ret)
+ return ret;
+
+ mxm_dbg(ctx, "set_freq_and_mode: freq %u rx_mode %d\n", freq, rx_mode);
+
+ /* Load mode */
+ max2175_set_rx_mode(ctx, rx_mode);
+ ctx->rx_mode->cur.val = rx_mode;
+
+ /* Tune to the new freq given */
+ return max2175_tune_rf_freq(ctx, freq, ctx->hsls->cur.val);
+}
+
+static int max2175_s_frequency(struct v4l2_subdev *sd,
+ const struct v4l2_frequency *vf)
+{
+ struct max2175 *ctx = max2175_from_sd(sd);
+ u32 freq;
+ int ret = 0;
+
+ mxm_dbg(ctx, "s_freq: new %u curr %u, mode_resolved %d\n",
+ vf->frequency, ctx->freq, ctx->mode_resolved);
+
+ if (vf->tuner != 0)
+ return -EINVAL;
+
+ freq = clamp(vf->frequency, ctx->bands_rf->rangelow,
+ ctx->bands_rf->rangehigh);
+
+ /* Check new freq valid for rx_mode if already resolved */
+ if (ctx->mode_resolved &&
+ max2175_freq_rx_mode_valid(ctx, ctx->rx_mode->cur.val, freq))
+ ret = max2175_tune_rf_freq(ctx, freq, ctx->hsls->cur.val);
+ else
+ /* Find default rx_mode for freq and tune to it */
+ ret = max2175_set_freq_and_mode(ctx, freq);
+
+ mxm_dbg(ctx, "s_freq: ret %d curr %u mode_resolved %d mode %u\n",
+ ret, ctx->freq, ctx->mode_resolved, ctx->rx_mode->cur.val);
+
+ return ret;
+}
+
+static int max2175_g_frequency(struct v4l2_subdev *sd,
+ struct v4l2_frequency *vf)
+{
+ struct max2175 *ctx = max2175_from_sd(sd);
+ int ret = 0;
+
+ if (vf->tuner != 0)
+ return -EINVAL;
+
+ /* RF freq */
+ vf->type = V4L2_TUNER_RF;
+ vf->frequency = ctx->freq;
+
+ return ret;
+}
+
+static int max2175_enum_freq_bands(struct v4l2_subdev *sd,
+ struct v4l2_frequency_band *band)
+{
+ struct max2175 *ctx = max2175_from_sd(sd);
+
+ if (band->tuner != 0 || band->index != 0)
+ return -EINVAL;
+
+ *band = *ctx->bands_rf;
+
+ return 0;
+}
+
+static int max2175_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
+{
+ struct max2175 *ctx = max2175_from_sd(sd);
+
+ if (vt->index > 0)
+ return -EINVAL;
+
+ strlcpy(vt->name, "RF", sizeof(vt->name));
+ vt->type = V4L2_TUNER_RF;
+ vt->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
+ vt->rangelow = ctx->bands_rf->rangelow;
+ vt->rangehigh = ctx->bands_rf->rangehigh;
+
+ return 0;
+}
+
+static int max2175_s_tuner(struct v4l2_subdev *sd, const struct v4l2_tuner *vt)
+{
+ /* Check tuner index is valid */
+ if (vt->index > 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_tuner_ops max2175_tuner_ops = {
+ .s_frequency = max2175_s_frequency,
+ .g_frequency = max2175_g_frequency,
+ .enum_freq_bands = max2175_enum_freq_bands,
+ .g_tuner = max2175_g_tuner,
+ .s_tuner = max2175_s_tuner,
+};
+
+static const struct v4l2_subdev_ops max2175_ops = {
+ .tuner = &max2175_tuner_ops,
+};
+
+static const struct v4l2_ctrl_ops max2175_ctrl_ops = {
+ .s_ctrl = max2175_s_ctrl,
+ .g_volatile_ctrl = max2175_g_volatile_ctrl,
+};
+
+/*
+ * I2S output enable/disable configuration. This is a private control.
+ * Refer to Documentation/media/v4l-drivers/max2175 for more details.
+ */
+static const struct v4l2_ctrl_config max2175_i2s_en = {
+ .ops = &max2175_ctrl_ops,
+ .id = V4L2_CID_MAX2175_I2S_ENABLE,
+ .name = "I2S Enable",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 1,
+ .is_private = 1,
+};
+
+/*
+ * HSLS value control LO freq adjacent location configuration.
+ * Refer to Documentation/media/v4l-drivers/max2175 for more details.
+ */
+static const struct v4l2_ctrl_config max2175_hsls = {
+ .ops = &max2175_ctrl_ops,
+ .id = V4L2_CID_MAX2175_HSLS,
+ .name = "HSLS Above/Below Desired",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 1,
+};
+
+/*
+ * Rx modes below are a set of preset configurations that decides the tuner's
+ * sck and sample rate of transmission. They are separate for EU & NA regions.
+ * Refer to Documentation/media/v4l-drivers/max2175 for more details.
+ */
+static const char * const max2175_ctrl_eu_rx_modes[] = {
+ [MAX2175_EU_FM_1_2] = "EU FM 1.2",
+ [MAX2175_DAB_1_2] = "DAB 1.2",
+};
+
+static const char * const max2175_ctrl_na_rx_modes[] = {
+ [MAX2175_NA_FM_1_0] = "NA FM 1.0",
+ [MAX2175_NA_FM_2_0] = "NA FM 2.0",
+};
+
+static const struct v4l2_ctrl_config max2175_eu_rx_mode = {
+ .ops = &max2175_ctrl_ops,
+ .id = V4L2_CID_MAX2175_RX_MODE,
+ .name = "RX Mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = ARRAY_SIZE(max2175_ctrl_eu_rx_modes) - 1,
+ .def = 0,
+ .qmenu = max2175_ctrl_eu_rx_modes,
+};
+
+static const struct v4l2_ctrl_config max2175_na_rx_mode = {
+ .ops = &max2175_ctrl_ops,
+ .id = V4L2_CID_MAX2175_RX_MODE,
+ .name = "RX Mode",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .max = ARRAY_SIZE(max2175_ctrl_na_rx_modes) - 1,
+ .def = 0,
+ .qmenu = max2175_ctrl_na_rx_modes,
+};
+
+static int max2175_refout_load_to_bits(struct i2c_client *client, u32 load,
+ u32 *bits)
+{
+ if (load <= 40)
+ *bits = load / 10;
+ else if (load >= 60 && load <= 70)
+ *bits = load / 10 - 1;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int max2175_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ bool master = true, am_hiz = false;
+ u32 refout_load, refout_bits = 0; /* REFOUT disabled */
+ struct v4l2_ctrl_handler *hdl;
+ struct fwnode_handle *fwnode;
+ struct device_node *np;
+ struct v4l2_subdev *sd;
+ struct regmap *regmap;
+ struct max2175 *ctx;
+ struct clk *clk;
+ int ret;
+
+ /* Parse DT properties */
+ np = of_parse_phandle(client->dev.of_node, "maxim,master", 0);
+ if (np) {
+ master = false; /* Slave tuner */
+ of_node_put(np);
+ }
+
+ fwnode = of_fwnode_handle(client->dev.of_node);
+ if (fwnode_property_present(fwnode, "maxim,am-hiz-filter"))
+ am_hiz = true;
+
+ if (!fwnode_property_read_u32(fwnode, "maxim,refout-load",
+ &refout_load)) {
+ ret = max2175_refout_load_to_bits(client, refout_load,
+ &refout_bits);
+ if (ret) {
+ dev_err(&client->dev, "invalid refout_load %u\n",
+ refout_load);
+ return -EINVAL;
+ }
+ }
+
+ clk = devm_clk_get(&client->dev, NULL);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(&client->dev, "cannot get clock %d\n", ret);
+ return -ENODEV;
+ }
+
+ regmap = devm_regmap_init_i2c(client, &max2175_regmap_config);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(&client->dev, "regmap init failed %d\n", ret);
+ return -ENODEV;
+ }
+
+ /* Alloc tuner context */
+ ctx = devm_kzalloc(&client->dev, sizeof(*ctx), GFP_KERNEL);
+ if (ctx == NULL)
+ return -ENOMEM;
+
+ sd = &ctx->sd;
+ ctx->master = master;
+ ctx->am_hiz = am_hiz;
+ ctx->mode_resolved = false;
+ ctx->regmap = regmap;
+ ctx->xtal_freq = clk_get_rate(clk);
+ dev_info(&client->dev, "xtal freq %luHz\n", ctx->xtal_freq);
+
+ v4l2_i2c_subdev_init(sd, client, &max2175_ops);
+ ctx->client = client;
+
+ sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Controls */
+ hdl = &ctx->ctrl_hdl;
+ ret = v4l2_ctrl_handler_init(hdl, 7);
+ if (ret)
+ return ret;
+
+ ctx->lna_gain = v4l2_ctrl_new_std(hdl, &max2175_ctrl_ops,
+ V4L2_CID_RF_TUNER_LNA_GAIN,
+ 0, 63, 1, 0);
+ ctx->lna_gain->flags |= (V4L2_CTRL_FLAG_VOLATILE |
+ V4L2_CTRL_FLAG_READ_ONLY);
+ ctx->if_gain = v4l2_ctrl_new_std(hdl, &max2175_ctrl_ops,
+ V4L2_CID_RF_TUNER_IF_GAIN,
+ 0, 31, 1, 0);
+ ctx->if_gain->flags |= (V4L2_CTRL_FLAG_VOLATILE |
+ V4L2_CTRL_FLAG_READ_ONLY);
+ ctx->pll_lock = v4l2_ctrl_new_std(hdl, &max2175_ctrl_ops,
+ V4L2_CID_RF_TUNER_PLL_LOCK,
+ 0, 1, 1, 0);
+ ctx->pll_lock->flags |= (V4L2_CTRL_FLAG_VOLATILE |
+ V4L2_CTRL_FLAG_READ_ONLY);
+ ctx->i2s_en = v4l2_ctrl_new_custom(hdl, &max2175_i2s_en, NULL);
+ ctx->hsls = v4l2_ctrl_new_custom(hdl, &max2175_hsls, NULL);
+
+ if (ctx->xtal_freq == MAX2175_EU_XTAL_FREQ) {
+ ctx->rx_mode = v4l2_ctrl_new_custom(hdl,
+ &max2175_eu_rx_mode, NULL);
+ ctx->rx_modes = eu_rx_modes;
+ ctx->bands_rf = &eu_bands_rf;
+ } else {
+ ctx->rx_mode = v4l2_ctrl_new_custom(hdl,
+ &max2175_na_rx_mode, NULL);
+ ctx->rx_modes = na_rx_modes;
+ ctx->bands_rf = &na_bands_rf;
+ }
+ ctx->sd.ctrl_handler = &ctx->ctrl_hdl;
+
+ /* Set the defaults */
+ ctx->freq = ctx->bands_rf->rangelow;
+
+ /* Register subdev */
+ ret = v4l2_async_register_subdev(sd);
+ if (ret) {
+ dev_err(&client->dev, "register subdev failed\n");
+ goto err_reg;
+ }
+
+ /* Initialize device */
+ ret = max2175_core_init(ctx, refout_bits);
+ if (ret)
+ goto err_init;
+
+ ret = v4l2_ctrl_handler_setup(hdl);
+ if (ret)
+ goto err_init;
+
+ return 0;
+
+err_init:
+ v4l2_async_unregister_subdev(sd);
+err_reg:
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+
+ return ret;
+}
+
+static int max2175_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct max2175 *ctx = max2175_from_sd(sd);
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
+ v4l2_async_unregister_subdev(sd);
+
+ return 0;
+}
+
+static const struct i2c_device_id max2175_id[] = {
+ { DRIVER_NAME, 0},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, max2175_id);
+
+static const struct of_device_id max2175_of_ids[] = {
+ { .compatible = "maxim,max2175", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max2175_of_ids);
+
+static struct i2c_driver max2175_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = max2175_of_ids,
+ },
+ .probe = max2175_probe,
+ .remove = max2175_remove,
+ .id_table = max2175_id,
+};
+
+module_i2c_driver(max2175_driver);
+
+MODULE_DESCRIPTION("Maxim MAX2175 RF to Bits tuner driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Ramesh Shanmugasundaram <[email protected]>");
diff --git a/drivers/media/i2c/max2175.h b/drivers/media/i2c/max2175.h
new file mode 100644
index 000000000000..eb43373ce7e2
--- /dev/null
+++ b/drivers/media/i2c/max2175.h
@@ -0,0 +1,109 @@
+/*
+ * Maxim Integrated MAX2175 RF to Bits tuner driver
+ *
+ * This driver & most of the hard coded values are based on the reference
+ * application delivered by Maxim for this device.
+ *
+ * Copyright (C) 2016 Maxim Integrated Products
+ * Copyright (C) 2017 Renesas Electronics Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MAX2175_H__
+#define __MAX2175_H__
+
+#define MAX2175_EU_XTAL_FREQ 36864000 /* In Hz */
+#define MAX2175_NA_XTAL_FREQ 40186125 /* In Hz */
+
+enum max2175_region {
+ MAX2175_REGION_EU = 0, /* Europe */
+ MAX2175_REGION_NA, /* North America */
+};
+
+enum max2175_band {
+ MAX2175_BAND_AM = 0,
+ MAX2175_BAND_FM,
+ MAX2175_BAND_VHF,
+ MAX2175_BAND_L,
+};
+
+enum max2175_eu_mode {
+ /* EU modes */
+ MAX2175_EU_FM_1_2 = 0,
+ MAX2175_DAB_1_2,
+
+ /*
+ * Other possible modes to add in future
+ * MAX2175_DAB_1_0,
+ * MAX2175_DAB_1_3,
+ * MAX2175_EU_FM_2_2,
+ * MAX2175_EU_FMHD_4_0,
+ * MAX2175_EU_AM_1_0,
+ * MAX2175_EU_AM_2_2,
+ */
+};
+
+enum max2175_na_mode {
+ /* NA modes */
+ MAX2175_NA_FM_1_0 = 0,
+ MAX2175_NA_FM_2_0,
+
+ /*
+ * Other possible modes to add in future
+ * MAX2175_NA_FMHD_1_0,
+ * MAX2175_NA_FMHD_1_2,
+ * MAX2175_NA_AM_1_0,
+ * MAX2175_NA_AM_1_2,
+ */
+};
+
+/* Supported I2S modes */
+enum {
+ MAX2175_I2S_MODE0 = 0,
+ MAX2175_I2S_MODE1,
+ MAX2175_I2S_MODE2,
+ MAX2175_I2S_MODE3,
+ MAX2175_I2S_MODE4,
+};
+
+/* Coefficient table groups */
+enum {
+ MAX2175_CH_MSEL = 0,
+ MAX2175_EQ_MSEL,
+ MAX2175_AA_MSEL,
+};
+
+/* HSLS LO injection polarity */
+enum {
+ MAX2175_LO_BELOW_DESIRED = 0,
+ MAX2175_LO_ABOVE_DESIRED,
+};
+
+/* Channel FSM modes */
+enum max2175_csm_mode {
+ MAX2175_LOAD_TO_BUFFER = 0,
+ MAX2175_PRESET_TUNE,
+ MAX2175_SEARCH,
+ MAX2175_AF_UPDATE,
+ MAX2175_JUMP_FAST_TUNE,
+ MAX2175_CHECK,
+ MAX2175_LOAD_AND_SWAP,
+ MAX2175_END,
+ MAX2175_BUFFER_PLUS_PRESET_TUNE,
+ MAX2175_BUFFER_PLUS_SEARCH,
+ MAX2175_BUFFER_PLUS_AF_UPDATE,
+ MAX2175_BUFFER_PLUS_JUMP_FAST_TUNE,
+ MAX2175_BUFFER_PLUS_CHECK,
+ MAX2175_BUFFER_PLUS_LOAD_AND_SWAP,
+ MAX2175_NO_ACTION
+};
+
+#endif /* __MAX2175_H__ */
diff --git a/drivers/media/i2c/msp3400-kthreads.c b/drivers/media/i2c/msp3400-kthreads.c
index 11fc593ed908..4dd01e9f553b 100644
--- a/drivers/media/i2c/msp3400-kthreads.c
+++ b/drivers/media/i2c/msp3400-kthreads.c
@@ -655,6 +655,7 @@ restart:
break;
case 0: /* 4.5 */
state->detected_std = V4L2_STD_MN;
+ /* fall-through */
default:
no_second:
state->second = msp3400c_carrier_detect_main[max1].cdo;
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 2e7a6e62a358..8a430640c85d 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -19,6 +19,7 @@
#include <linux/log2.h>
#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
@@ -28,7 +29,7 @@
#include <media/i2c/mt9v032.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
-#include <media/v4l2-of.h>
+#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
/* The first four rows are black rows. The active area spans 753x481 pixels. */
@@ -979,7 +980,7 @@ static struct mt9v032_platform_data *
mt9v032_get_pdata(struct i2c_client *client)
{
struct mt9v032_platform_data *pdata = NULL;
- struct v4l2_of_endpoint endpoint;
+ struct v4l2_fwnode_endpoint endpoint;
struct device_node *np;
struct property *prop;
@@ -990,7 +991,7 @@ mt9v032_get_pdata(struct i2c_client *client)
if (!np)
return NULL;
- if (v4l2_of_parse_endpoint(np, &endpoint) < 0)
+ if (v4l2_fwnode_endpoint_parse(of_fwnode_handle(np), &endpoint) < 0)
goto done;
pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
new file mode 100644
index 000000000000..86550d8ddfee
--- /dev/null
+++ b/drivers/media/i2c/ov13858.c
@@ -0,0 +1,1816 @@
+/*
+ * Copyright (c) 2017 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+
+#define OV13858_REG_VALUE_08BIT 1
+#define OV13858_REG_VALUE_16BIT 2
+#define OV13858_REG_VALUE_24BIT 3
+
+#define OV13858_REG_MODE_SELECT 0x0100
+#define OV13858_MODE_STANDBY 0x00
+#define OV13858_MODE_STREAMING 0x01
+
+#define OV13858_REG_SOFTWARE_RST 0x0103
+#define OV13858_SOFTWARE_RST 0x01
+
+/* PLL1 generates PCLK and MIPI_PHY_CLK */
+#define OV13858_REG_PLL1_CTRL_0 0x0300
+#define OV13858_REG_PLL1_CTRL_1 0x0301
+#define OV13858_REG_PLL1_CTRL_2 0x0302
+#define OV13858_REG_PLL1_CTRL_3 0x0303
+#define OV13858_REG_PLL1_CTRL_4 0x0304
+#define OV13858_REG_PLL1_CTRL_5 0x0305
+
+/* PLL2 generates DAC_CLK, SCLK and SRAM_CLK */
+#define OV13858_REG_PLL2_CTRL_B 0x030b
+#define OV13858_REG_PLL2_CTRL_C 0x030c
+#define OV13858_REG_PLL2_CTRL_D 0x030d
+#define OV13858_REG_PLL2_CTRL_E 0x030e
+#define OV13858_REG_PLL2_CTRL_F 0x030f
+#define OV13858_REG_PLL2_CTRL_12 0x0312
+#define OV13858_REG_MIPI_SC_CTRL0 0x3016
+#define OV13858_REG_MIPI_SC_CTRL1 0x3022
+
+/* Chip ID */
+#define OV13858_REG_CHIP_ID 0x300a
+#define OV13858_CHIP_ID 0x00d855
+
+/* V_TIMING internal */
+#define OV13858_REG_VTS 0x380e
+#define OV13858_VTS_30FPS 0x0c8e /* 30 fps */
+#define OV13858_VTS_60FPS 0x0648 /* 60 fps */
+#define OV13858_VTS_MAX 0x7fff
+#define OV13858_VBLANK_MIN 56
+
+/* HBLANK control - read only */
+#define OV13858_PPL_540MHZ 2244
+#define OV13858_PPL_1080MHZ 4488
+
+/* Exposure control */
+#define OV13858_REG_EXPOSURE 0x3500
+#define OV13858_EXPOSURE_MIN 4
+#define OV13858_EXPOSURE_MAX (OV13858_VTS_MAX - 8)
+#define OV13858_EXPOSURE_STEP 1
+#define OV13858_EXPOSURE_DEFAULT 0x640
+
+/* Analog gain control */
+#define OV13858_REG_ANALOG_GAIN 0x3508
+#define OV13858_ANA_GAIN_MIN 0
+#define OV13858_ANA_GAIN_MAX 0x1fff
+#define OV13858_ANA_GAIN_STEP 1
+#define OV13858_ANA_GAIN_DEFAULT 0x80
+
+/* Digital gain control */
+#define OV13858_REG_DIGITAL_GAIN 0x350a
+#define OV13858_DGTL_GAIN_MASK 0xf3
+#define OV13858_DGTL_GAIN_SHIFT 2
+#define OV13858_DGTL_GAIN_MIN 1
+#define OV13858_DGTL_GAIN_MAX 4
+#define OV13858_DGTL_GAIN_STEP 1
+#define OV13858_DGTL_GAIN_DEFAULT 1
+
+/* Test Pattern Control */
+#define OV13858_REG_TEST_PATTERN 0x4503
+#define OV13858_TEST_PATTERN_ENABLE BIT(7)
+#define OV13858_TEST_PATTERN_MASK 0xfc
+
+/* Number of frames to skip */
+#define OV13858_NUM_OF_SKIP_FRAMES 2
+
+struct ov13858_reg {
+ u16 address;
+ u8 val;
+};
+
+struct ov13858_reg_list {
+ u32 num_of_regs;
+ const struct ov13858_reg *regs;
+};
+
+/* Link frequency config */
+struct ov13858_link_freq_config {
+ u32 pixel_rate;
+ u32 pixels_per_line;
+
+ /* PLL registers for this link frequency */
+ struct ov13858_reg_list reg_list;
+};
+
+/* Mode : resolution and related config&values */
+struct ov13858_mode {
+ /* Frame width */
+ u32 width;
+ /* Frame height */
+ u32 height;
+
+ /* V-timing */
+ u32 vts;
+
+ /* Index of Link frequency config to be used */
+ u32 link_freq_index;
+ /* Default register values */
+ struct ov13858_reg_list reg_list;
+};
+
+/* 4224x3136 needs 1080Mbps/lane, 4 lanes */
+static const struct ov13858_reg mipi_data_rate_1080mbps[] = {
+ /* PLL1 registers */
+ {OV13858_REG_PLL1_CTRL_0, 0x07},
+ {OV13858_REG_PLL1_CTRL_1, 0x01},
+ {OV13858_REG_PLL1_CTRL_2, 0xc2},
+ {OV13858_REG_PLL1_CTRL_3, 0x00},
+ {OV13858_REG_PLL1_CTRL_4, 0x00},
+ {OV13858_REG_PLL1_CTRL_5, 0x01},
+
+ /* PLL2 registers */
+ {OV13858_REG_PLL2_CTRL_B, 0x05},
+ {OV13858_REG_PLL2_CTRL_C, 0x01},
+ {OV13858_REG_PLL2_CTRL_D, 0x0e},
+ {OV13858_REG_PLL2_CTRL_E, 0x05},
+ {OV13858_REG_PLL2_CTRL_F, 0x01},
+ {OV13858_REG_PLL2_CTRL_12, 0x01},
+ {OV13858_REG_MIPI_SC_CTRL0, 0x72},
+ {OV13858_REG_MIPI_SC_CTRL1, 0x01},
+};
+
+/*
+ * 2112x1568, 2112x1188, 1056x784 need 540Mbps/lane,
+ * 4 lanes
+ */
+static const struct ov13858_reg mipi_data_rate_540mbps[] = {
+ /* PLL1 registers */
+ {OV13858_REG_PLL1_CTRL_0, 0x07},
+ {OV13858_REG_PLL1_CTRL_1, 0x01},
+ {OV13858_REG_PLL1_CTRL_2, 0xc2},
+ {OV13858_REG_PLL1_CTRL_3, 0x01},
+ {OV13858_REG_PLL1_CTRL_4, 0x00},
+ {OV13858_REG_PLL1_CTRL_5, 0x01},
+
+ /* PLL2 registers */
+ {OV13858_REG_PLL2_CTRL_B, 0x05},
+ {OV13858_REG_PLL2_CTRL_C, 0x01},
+ {OV13858_REG_PLL2_CTRL_D, 0x0e},
+ {OV13858_REG_PLL2_CTRL_E, 0x05},
+ {OV13858_REG_PLL2_CTRL_F, 0x01},
+ {OV13858_REG_PLL2_CTRL_12, 0x01},
+ {OV13858_REG_MIPI_SC_CTRL0, 0x72},
+ {OV13858_REG_MIPI_SC_CTRL1, 0x01},
+};
+
+static const struct ov13858_reg mode_4224x3136_regs[] = {
+ {0x3013, 0x32},
+ {0x301b, 0xf0},
+ {0x301f, 0xd0},
+ {0x3106, 0x15},
+ {0x3107, 0x23},
+ {0x350a, 0x00},
+ {0x350e, 0x00},
+ {0x3510, 0x00},
+ {0x3511, 0x02},
+ {0x3512, 0x00},
+ {0x3600, 0x2b},
+ {0x3601, 0x52},
+ {0x3602, 0x60},
+ {0x3612, 0x05},
+ {0x3613, 0xa4},
+ {0x3620, 0x80},
+ {0x3621, 0x10},
+ {0x3622, 0x30},
+ {0x3624, 0x1c},
+ {0x3640, 0x10},
+ {0x3641, 0x70},
+ {0x3661, 0x80},
+ {0x3662, 0x12},
+ {0x3664, 0x73},
+ {0x3665, 0xa7},
+ {0x366e, 0xff},
+ {0x366f, 0xf4},
+ {0x3674, 0x00},
+ {0x3679, 0x0c},
+ {0x367f, 0x01},
+ {0x3680, 0x0c},
+ {0x3681, 0x50},
+ {0x3682, 0x50},
+ {0x3683, 0xa9},
+ {0x3684, 0xa9},
+ {0x3709, 0x5f},
+ {0x3714, 0x24},
+ {0x371a, 0x3e},
+ {0x3737, 0x04},
+ {0x3738, 0xcc},
+ {0x3739, 0x12},
+ {0x373d, 0x26},
+ {0x3764, 0x20},
+ {0x3765, 0x20},
+ {0x37a1, 0x36},
+ {0x37a8, 0x3b},
+ {0x37ab, 0x31},
+ {0x37c2, 0x04},
+ {0x37c3, 0xf1},
+ {0x37c5, 0x00},
+ {0x37d8, 0x03},
+ {0x37d9, 0x0c},
+ {0x37da, 0xc2},
+ {0x37dc, 0x02},
+ {0x37e0, 0x00},
+ {0x37e1, 0x0a},
+ {0x37e2, 0x14},
+ {0x37e3, 0x04},
+ {0x37e4, 0x2a},
+ {0x37e5, 0x03},
+ {0x37e6, 0x04},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x00},
+ {0x3804, 0x10},
+ {0x3805, 0x9f},
+ {0x3806, 0x0c},
+ {0x3807, 0x5f},
+ {0x3808, 0x10},
+ {0x3809, 0x80},
+ {0x380a, 0x0c},
+ {0x380b, 0x40},
+ {0x380c, 0x04},
+ {0x380d, 0x62},
+ {0x380e, 0x0c},
+ {0x380f, 0x8e},
+ {0x3811, 0x04},
+ {0x3813, 0x05},
+ {0x3814, 0x01},
+ {0x3815, 0x01},
+ {0x3816, 0x01},
+ {0x3817, 0x01},
+ {0x3820, 0xa8},
+ {0x3821, 0x00},
+ {0x3822, 0xc2},
+ {0x3823, 0x18},
+ {0x3826, 0x11},
+ {0x3827, 0x1c},
+ {0x3829, 0x03},
+ {0x3832, 0x00},
+ {0x3c80, 0x00},
+ {0x3c87, 0x01},
+ {0x3c8c, 0x19},
+ {0x3c8d, 0x1c},
+ {0x3c90, 0x00},
+ {0x3c91, 0x00},
+ {0x3c92, 0x00},
+ {0x3c93, 0x00},
+ {0x3c94, 0x40},
+ {0x3c95, 0x54},
+ {0x3c96, 0x34},
+ {0x3c97, 0x04},
+ {0x3c98, 0x00},
+ {0x3d8c, 0x73},
+ {0x3d8d, 0xc0},
+ {0x3f00, 0x0b},
+ {0x3f03, 0x00},
+ {0x4001, 0xe0},
+ {0x4008, 0x00},
+ {0x4009, 0x0f},
+ {0x4011, 0xf0},
+ {0x4017, 0x08},
+ {0x4050, 0x04},
+ {0x4051, 0x0b},
+ {0x4052, 0x00},
+ {0x4053, 0x80},
+ {0x4054, 0x00},
+ {0x4055, 0x80},
+ {0x4056, 0x00},
+ {0x4057, 0x80},
+ {0x4058, 0x00},
+ {0x4059, 0x80},
+ {0x405e, 0x20},
+ {0x4500, 0x07},
+ {0x4503, 0x00},
+ {0x450a, 0x04},
+ {0x4809, 0x04},
+ {0x480c, 0x12},
+ {0x481f, 0x30},
+ {0x4833, 0x10},
+ {0x4837, 0x0e},
+ {0x4902, 0x01},
+ {0x4d00, 0x03},
+ {0x4d01, 0xc9},
+ {0x4d02, 0xbc},
+ {0x4d03, 0xd7},
+ {0x4d04, 0xf0},
+ {0x4d05, 0xa2},
+ {0x5000, 0xfd},
+ {0x5001, 0x01},
+ {0x5040, 0x39},
+ {0x5041, 0x10},
+ {0x5042, 0x10},
+ {0x5043, 0x84},
+ {0x5044, 0x62},
+ {0x5180, 0x00},
+ {0x5181, 0x10},
+ {0x5182, 0x02},
+ {0x5183, 0x0f},
+ {0x5200, 0x1b},
+ {0x520b, 0x07},
+ {0x520c, 0x0f},
+ {0x5300, 0x04},
+ {0x5301, 0x0c},
+ {0x5302, 0x0c},
+ {0x5303, 0x0f},
+ {0x5304, 0x00},
+ {0x5305, 0x70},
+ {0x5306, 0x00},
+ {0x5307, 0x80},
+ {0x5308, 0x00},
+ {0x5309, 0xa5},
+ {0x530a, 0x00},
+ {0x530b, 0xd3},
+ {0x530c, 0x00},
+ {0x530d, 0xf0},
+ {0x530e, 0x01},
+ {0x530f, 0x10},
+ {0x5310, 0x01},
+ {0x5311, 0x20},
+ {0x5312, 0x01},
+ {0x5313, 0x20},
+ {0x5314, 0x01},
+ {0x5315, 0x20},
+ {0x5316, 0x08},
+ {0x5317, 0x08},
+ {0x5318, 0x10},
+ {0x5319, 0x88},
+ {0x531a, 0x88},
+ {0x531b, 0xa9},
+ {0x531c, 0xaa},
+ {0x531d, 0x0a},
+ {0x5405, 0x02},
+ {0x5406, 0x67},
+ {0x5407, 0x01},
+ {0x5408, 0x4a},
+};
+
+static const struct ov13858_reg mode_2112x1568_regs[] = {
+ {0x3013, 0x32},
+ {0x301b, 0xf0},
+ {0x301f, 0xd0},
+ {0x3106, 0x15},
+ {0x3107, 0x23},
+ {0x350a, 0x00},
+ {0x350e, 0x00},
+ {0x3510, 0x00},
+ {0x3511, 0x02},
+ {0x3512, 0x00},
+ {0x3600, 0x2b},
+ {0x3601, 0x52},
+ {0x3602, 0x60},
+ {0x3612, 0x05},
+ {0x3613, 0xa4},
+ {0x3620, 0x80},
+ {0x3621, 0x10},
+ {0x3622, 0x30},
+ {0x3624, 0x1c},
+ {0x3640, 0x10},
+ {0x3641, 0x70},
+ {0x3661, 0x80},
+ {0x3662, 0x10},
+ {0x3664, 0x73},
+ {0x3665, 0xa7},
+ {0x366e, 0xff},
+ {0x366f, 0xf4},
+ {0x3674, 0x00},
+ {0x3679, 0x0c},
+ {0x367f, 0x01},
+ {0x3680, 0x0c},
+ {0x3681, 0x50},
+ {0x3682, 0x50},
+ {0x3683, 0xa9},
+ {0x3684, 0xa9},
+ {0x3709, 0x5f},
+ {0x3714, 0x28},
+ {0x371a, 0x3e},
+ {0x3737, 0x08},
+ {0x3738, 0xcc},
+ {0x3739, 0x20},
+ {0x373d, 0x26},
+ {0x3764, 0x20},
+ {0x3765, 0x20},
+ {0x37a1, 0x36},
+ {0x37a8, 0x3b},
+ {0x37ab, 0x31},
+ {0x37c2, 0x14},
+ {0x37c3, 0xf1},
+ {0x37c5, 0x00},
+ {0x37d8, 0x03},
+ {0x37d9, 0x0c},
+ {0x37da, 0xc2},
+ {0x37dc, 0x02},
+ {0x37e0, 0x00},
+ {0x37e1, 0x0a},
+ {0x37e2, 0x14},
+ {0x37e3, 0x08},
+ {0x37e4, 0x38},
+ {0x37e5, 0x03},
+ {0x37e6, 0x08},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x00},
+ {0x3804, 0x10},
+ {0x3805, 0x9f},
+ {0x3806, 0x0c},
+ {0x3807, 0x5f},
+ {0x3808, 0x08},
+ {0x3809, 0x40},
+ {0x380a, 0x06},
+ {0x380b, 0x20},
+ {0x380c, 0x04},
+ {0x380d, 0x62},
+ {0x380e, 0x0c},
+ {0x380f, 0x8e},
+ {0x3811, 0x04},
+ {0x3813, 0x05},
+ {0x3814, 0x03},
+ {0x3815, 0x01},
+ {0x3816, 0x03},
+ {0x3817, 0x01},
+ {0x3820, 0xab},
+ {0x3821, 0x00},
+ {0x3822, 0xc2},
+ {0x3823, 0x18},
+ {0x3826, 0x04},
+ {0x3827, 0x90},
+ {0x3829, 0x07},
+ {0x3832, 0x00},
+ {0x3c80, 0x00},
+ {0x3c87, 0x01},
+ {0x3c8c, 0x19},
+ {0x3c8d, 0x1c},
+ {0x3c90, 0x00},
+ {0x3c91, 0x00},
+ {0x3c92, 0x00},
+ {0x3c93, 0x00},
+ {0x3c94, 0x40},
+ {0x3c95, 0x54},
+ {0x3c96, 0x34},
+ {0x3c97, 0x04},
+ {0x3c98, 0x00},
+ {0x3d8c, 0x73},
+ {0x3d8d, 0xc0},
+ {0x3f00, 0x0b},
+ {0x3f03, 0x00},
+ {0x4001, 0xe0},
+ {0x4008, 0x00},
+ {0x4009, 0x0d},
+ {0x4011, 0xf0},
+ {0x4017, 0x08},
+ {0x4050, 0x04},
+ {0x4051, 0x0b},
+ {0x4052, 0x00},
+ {0x4053, 0x80},
+ {0x4054, 0x00},
+ {0x4055, 0x80},
+ {0x4056, 0x00},
+ {0x4057, 0x80},
+ {0x4058, 0x00},
+ {0x4059, 0x80},
+ {0x405e, 0x20},
+ {0x4500, 0x07},
+ {0x4503, 0x00},
+ {0x450a, 0x04},
+ {0x4809, 0x04},
+ {0x480c, 0x12},
+ {0x481f, 0x30},
+ {0x4833, 0x10},
+ {0x4837, 0x1c},
+ {0x4902, 0x01},
+ {0x4d00, 0x03},
+ {0x4d01, 0xc9},
+ {0x4d02, 0xbc},
+ {0x4d03, 0xd7},
+ {0x4d04, 0xf0},
+ {0x4d05, 0xa2},
+ {0x5000, 0xfd},
+ {0x5001, 0x01},
+ {0x5040, 0x39},
+ {0x5041, 0x10},
+ {0x5042, 0x10},
+ {0x5043, 0x84},
+ {0x5044, 0x62},
+ {0x5180, 0x00},
+ {0x5181, 0x10},
+ {0x5182, 0x02},
+ {0x5183, 0x0f},
+ {0x5200, 0x1b},
+ {0x520b, 0x07},
+ {0x520c, 0x0f},
+ {0x5300, 0x04},
+ {0x5301, 0x0c},
+ {0x5302, 0x0c},
+ {0x5303, 0x0f},
+ {0x5304, 0x00},
+ {0x5305, 0x70},
+ {0x5306, 0x00},
+ {0x5307, 0x80},
+ {0x5308, 0x00},
+ {0x5309, 0xa5},
+ {0x530a, 0x00},
+ {0x530b, 0xd3},
+ {0x530c, 0x00},
+ {0x530d, 0xf0},
+ {0x530e, 0x01},
+ {0x530f, 0x10},
+ {0x5310, 0x01},
+ {0x5311, 0x20},
+ {0x5312, 0x01},
+ {0x5313, 0x20},
+ {0x5314, 0x01},
+ {0x5315, 0x20},
+ {0x5316, 0x08},
+ {0x5317, 0x08},
+ {0x5318, 0x10},
+ {0x5319, 0x88},
+ {0x531a, 0x88},
+ {0x531b, 0xa9},
+ {0x531c, 0xaa},
+ {0x531d, 0x0a},
+ {0x5405, 0x02},
+ {0x5406, 0x67},
+ {0x5407, 0x01},
+ {0x5408, 0x4a},
+};
+
+static const struct ov13858_reg mode_2112x1188_regs[] = {
+ {0x3013, 0x32},
+ {0x301b, 0xf0},
+ {0x301f, 0xd0},
+ {0x3106, 0x15},
+ {0x3107, 0x23},
+ {0x350a, 0x00},
+ {0x350e, 0x00},
+ {0x3510, 0x00},
+ {0x3511, 0x02},
+ {0x3512, 0x00},
+ {0x3600, 0x2b},
+ {0x3601, 0x52},
+ {0x3602, 0x60},
+ {0x3612, 0x05},
+ {0x3613, 0xa4},
+ {0x3620, 0x80},
+ {0x3621, 0x10},
+ {0x3622, 0x30},
+ {0x3624, 0x1c},
+ {0x3640, 0x10},
+ {0x3641, 0x70},
+ {0x3661, 0x80},
+ {0x3662, 0x10},
+ {0x3664, 0x73},
+ {0x3665, 0xa7},
+ {0x366e, 0xff},
+ {0x366f, 0xf4},
+ {0x3674, 0x00},
+ {0x3679, 0x0c},
+ {0x367f, 0x01},
+ {0x3680, 0x0c},
+ {0x3681, 0x50},
+ {0x3682, 0x50},
+ {0x3683, 0xa9},
+ {0x3684, 0xa9},
+ {0x3709, 0x5f},
+ {0x3714, 0x28},
+ {0x371a, 0x3e},
+ {0x3737, 0x08},
+ {0x3738, 0xcc},
+ {0x3739, 0x20},
+ {0x373d, 0x26},
+ {0x3764, 0x20},
+ {0x3765, 0x20},
+ {0x37a1, 0x36},
+ {0x37a8, 0x3b},
+ {0x37ab, 0x31},
+ {0x37c2, 0x14},
+ {0x37c3, 0xf1},
+ {0x37c5, 0x00},
+ {0x37d8, 0x03},
+ {0x37d9, 0x0c},
+ {0x37da, 0xc2},
+ {0x37dc, 0x02},
+ {0x37e0, 0x00},
+ {0x37e1, 0x0a},
+ {0x37e2, 0x14},
+ {0x37e3, 0x08},
+ {0x37e4, 0x38},
+ {0x37e5, 0x03},
+ {0x37e6, 0x08},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x01},
+ {0x3803, 0x84},
+ {0x3804, 0x10},
+ {0x3805, 0x9f},
+ {0x3806, 0x0a},
+ {0x3807, 0xd3},
+ {0x3808, 0x08},
+ {0x3809, 0x40},
+ {0x380a, 0x04},
+ {0x380b, 0xa4},
+ {0x380c, 0x04},
+ {0x380d, 0x62},
+ {0x380e, 0x0c},
+ {0x380f, 0x8e},
+ {0x3811, 0x08},
+ {0x3813, 0x03},
+ {0x3814, 0x03},
+ {0x3815, 0x01},
+ {0x3816, 0x03},
+ {0x3817, 0x01},
+ {0x3820, 0xab},
+ {0x3821, 0x00},
+ {0x3822, 0xc2},
+ {0x3823, 0x18},
+ {0x3826, 0x04},
+ {0x3827, 0x90},
+ {0x3829, 0x07},
+ {0x3832, 0x00},
+ {0x3c80, 0x00},
+ {0x3c87, 0x01},
+ {0x3c8c, 0x19},
+ {0x3c8d, 0x1c},
+ {0x3c90, 0x00},
+ {0x3c91, 0x00},
+ {0x3c92, 0x00},
+ {0x3c93, 0x00},
+ {0x3c94, 0x40},
+ {0x3c95, 0x54},
+ {0x3c96, 0x34},
+ {0x3c97, 0x04},
+ {0x3c98, 0x00},
+ {0x3d8c, 0x73},
+ {0x3d8d, 0xc0},
+ {0x3f00, 0x0b},
+ {0x3f03, 0x00},
+ {0x4001, 0xe0},
+ {0x4008, 0x00},
+ {0x4009, 0x0d},
+ {0x4011, 0xf0},
+ {0x4017, 0x08},
+ {0x4050, 0x04},
+ {0x4051, 0x0b},
+ {0x4052, 0x00},
+ {0x4053, 0x80},
+ {0x4054, 0x00},
+ {0x4055, 0x80},
+ {0x4056, 0x00},
+ {0x4057, 0x80},
+ {0x4058, 0x00},
+ {0x4059, 0x80},
+ {0x405e, 0x20},
+ {0x4500, 0x07},
+ {0x4503, 0x00},
+ {0x450a, 0x04},
+ {0x4809, 0x04},
+ {0x480c, 0x12},
+ {0x481f, 0x30},
+ {0x4833, 0x10},
+ {0x4837, 0x1c},
+ {0x4902, 0x01},
+ {0x4d00, 0x03},
+ {0x4d01, 0xc9},
+ {0x4d02, 0xbc},
+ {0x4d03, 0xd7},
+ {0x4d04, 0xf0},
+ {0x4d05, 0xa2},
+ {0x5000, 0xfd},
+ {0x5001, 0x01},
+ {0x5040, 0x39},
+ {0x5041, 0x10},
+ {0x5042, 0x10},
+ {0x5043, 0x84},
+ {0x5044, 0x62},
+ {0x5180, 0x00},
+ {0x5181, 0x10},
+ {0x5182, 0x02},
+ {0x5183, 0x0f},
+ {0x5200, 0x1b},
+ {0x520b, 0x07},
+ {0x520c, 0x0f},
+ {0x5300, 0x04},
+ {0x5301, 0x0c},
+ {0x5302, 0x0c},
+ {0x5303, 0x0f},
+ {0x5304, 0x00},
+ {0x5305, 0x70},
+ {0x5306, 0x00},
+ {0x5307, 0x80},
+ {0x5308, 0x00},
+ {0x5309, 0xa5},
+ {0x530a, 0x00},
+ {0x530b, 0xd3},
+ {0x530c, 0x00},
+ {0x530d, 0xf0},
+ {0x530e, 0x01},
+ {0x530f, 0x10},
+ {0x5310, 0x01},
+ {0x5311, 0x20},
+ {0x5312, 0x01},
+ {0x5313, 0x20},
+ {0x5314, 0x01},
+ {0x5315, 0x20},
+ {0x5316, 0x08},
+ {0x5317, 0x08},
+ {0x5318, 0x10},
+ {0x5319, 0x88},
+ {0x531a, 0x88},
+ {0x531b, 0xa9},
+ {0x531c, 0xaa},
+ {0x531d, 0x0a},
+ {0x5405, 0x02},
+ {0x5406, 0x67},
+ {0x5407, 0x01},
+ {0x5408, 0x4a},
+};
+
+static const struct ov13858_reg mode_1056x784_regs[] = {
+ {0x3013, 0x32},
+ {0x301b, 0xf0},
+ {0x301f, 0xd0},
+ {0x3106, 0x15},
+ {0x3107, 0x23},
+ {0x350a, 0x00},
+ {0x350e, 0x00},
+ {0x3510, 0x00},
+ {0x3511, 0x02},
+ {0x3512, 0x00},
+ {0x3600, 0x2b},
+ {0x3601, 0x52},
+ {0x3602, 0x60},
+ {0x3612, 0x05},
+ {0x3613, 0xa4},
+ {0x3620, 0x80},
+ {0x3621, 0x10},
+ {0x3622, 0x30},
+ {0x3624, 0x1c},
+ {0x3640, 0x10},
+ {0x3641, 0x70},
+ {0x3661, 0x80},
+ {0x3662, 0x08},
+ {0x3664, 0x73},
+ {0x3665, 0xa7},
+ {0x366e, 0xff},
+ {0x366f, 0xf4},
+ {0x3674, 0x00},
+ {0x3679, 0x0c},
+ {0x367f, 0x01},
+ {0x3680, 0x0c},
+ {0x3681, 0x50},
+ {0x3682, 0x50},
+ {0x3683, 0xa9},
+ {0x3684, 0xa9},
+ {0x3709, 0x5f},
+ {0x3714, 0x30},
+ {0x371a, 0x3e},
+ {0x3737, 0x08},
+ {0x3738, 0xcc},
+ {0x3739, 0x20},
+ {0x373d, 0x26},
+ {0x3764, 0x20},
+ {0x3765, 0x20},
+ {0x37a1, 0x36},
+ {0x37a8, 0x3b},
+ {0x37ab, 0x31},
+ {0x37c2, 0x2c},
+ {0x37c3, 0xf1},
+ {0x37c5, 0x00},
+ {0x37d8, 0x03},
+ {0x37d9, 0x06},
+ {0x37da, 0xc2},
+ {0x37dc, 0x02},
+ {0x37e0, 0x00},
+ {0x37e1, 0x0a},
+ {0x37e2, 0x14},
+ {0x37e3, 0x08},
+ {0x37e4, 0x36},
+ {0x37e5, 0x03},
+ {0x37e6, 0x08},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x00},
+ {0x3804, 0x10},
+ {0x3805, 0x9f},
+ {0x3806, 0x0c},
+ {0x3807, 0x5f},
+ {0x3808, 0x04},
+ {0x3809, 0x20},
+ {0x380a, 0x03},
+ {0x380b, 0x10},
+ {0x380c, 0x04},
+ {0x380d, 0x62},
+ {0x380e, 0x0c},
+ {0x380f, 0x8e},
+ {0x3811, 0x04},
+ {0x3813, 0x05},
+ {0x3814, 0x07},
+ {0x3815, 0x01},
+ {0x3816, 0x07},
+ {0x3817, 0x01},
+ {0x3820, 0xac},
+ {0x3821, 0x00},
+ {0x3822, 0xc2},
+ {0x3823, 0x18},
+ {0x3826, 0x04},
+ {0x3827, 0x48},
+ {0x3829, 0x03},
+ {0x3832, 0x00},
+ {0x3c80, 0x00},
+ {0x3c87, 0x01},
+ {0x3c8c, 0x19},
+ {0x3c8d, 0x1c},
+ {0x3c90, 0x00},
+ {0x3c91, 0x00},
+ {0x3c92, 0x00},
+ {0x3c93, 0x00},
+ {0x3c94, 0x40},
+ {0x3c95, 0x54},
+ {0x3c96, 0x34},
+ {0x3c97, 0x04},
+ {0x3c98, 0x00},
+ {0x3d8c, 0x73},
+ {0x3d8d, 0xc0},
+ {0x3f00, 0x0b},
+ {0x3f03, 0x00},
+ {0x4001, 0xe0},
+ {0x4008, 0x00},
+ {0x4009, 0x05},
+ {0x4011, 0xf0},
+ {0x4017, 0x08},
+ {0x4050, 0x02},
+ {0x4051, 0x05},
+ {0x4052, 0x00},
+ {0x4053, 0x80},
+ {0x4054, 0x00},
+ {0x4055, 0x80},
+ {0x4056, 0x00},
+ {0x4057, 0x80},
+ {0x4058, 0x00},
+ {0x4059, 0x80},
+ {0x405e, 0x20},
+ {0x4500, 0x07},
+ {0x4503, 0x00},
+ {0x450a, 0x04},
+ {0x4809, 0x04},
+ {0x480c, 0x12},
+ {0x481f, 0x30},
+ {0x4833, 0x10},
+ {0x4837, 0x1e},
+ {0x4902, 0x02},
+ {0x4d00, 0x03},
+ {0x4d01, 0xc9},
+ {0x4d02, 0xbc},
+ {0x4d03, 0xd7},
+ {0x4d04, 0xf0},
+ {0x4d05, 0xa2},
+ {0x5000, 0xfd},
+ {0x5001, 0x01},
+ {0x5040, 0x39},
+ {0x5041, 0x10},
+ {0x5042, 0x10},
+ {0x5043, 0x84},
+ {0x5044, 0x62},
+ {0x5180, 0x00},
+ {0x5181, 0x10},
+ {0x5182, 0x02},
+ {0x5183, 0x0f},
+ {0x5200, 0x1b},
+ {0x520b, 0x07},
+ {0x520c, 0x0f},
+ {0x5300, 0x04},
+ {0x5301, 0x0c},
+ {0x5302, 0x0c},
+ {0x5303, 0x0f},
+ {0x5304, 0x00},
+ {0x5305, 0x70},
+ {0x5306, 0x00},
+ {0x5307, 0x80},
+ {0x5308, 0x00},
+ {0x5309, 0xa5},
+ {0x530a, 0x00},
+ {0x530b, 0xd3},
+ {0x530c, 0x00},
+ {0x530d, 0xf0},
+ {0x530e, 0x01},
+ {0x530f, 0x10},
+ {0x5310, 0x01},
+ {0x5311, 0x20},
+ {0x5312, 0x01},
+ {0x5313, 0x20},
+ {0x5314, 0x01},
+ {0x5315, 0x20},
+ {0x5316, 0x08},
+ {0x5317, 0x08},
+ {0x5318, 0x10},
+ {0x5319, 0x88},
+ {0x531a, 0x88},
+ {0x531b, 0xa9},
+ {0x531c, 0xaa},
+ {0x531d, 0x0a},
+ {0x5405, 0x02},
+ {0x5406, 0x67},
+ {0x5407, 0x01},
+ {0x5408, 0x4a},
+};
+
+static const char * const ov13858_test_pattern_menu[] = {
+ "Disabled",
+ "Vertical Color Bar Type 1",
+ "Vertical Color Bar Type 2",
+ "Vertical Color Bar Type 3",
+ "Vertical Color Bar Type 4"
+};
+
+/* Configurations for supported link frequencies */
+#define OV13858_NUM_OF_LINK_FREQS 2
+#define OV13858_LINK_FREQ_1080MBPS 1080000000
+#define OV13858_LINK_FREQ_540MBPS 540000000
+#define OV13858_LINK_FREQ_INDEX_0 0
+#define OV13858_LINK_FREQ_INDEX_1 1
+
+/* Menu items for LINK_FREQ V4L2 control */
+static const s64 link_freq_menu_items[OV13858_NUM_OF_LINK_FREQS] = {
+ OV13858_LINK_FREQ_1080MBPS,
+ OV13858_LINK_FREQ_540MBPS
+};
+
+/* Link frequency configs */
+static const struct ov13858_link_freq_config
+ link_freq_configs[OV13858_NUM_OF_LINK_FREQS] = {
+ {
+ .pixel_rate = 864000000,
+ .pixels_per_line = OV13858_PPL_1080MHZ,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_rate_1080mbps),
+ .regs = mipi_data_rate_1080mbps,
+ }
+ },
+ {
+ .pixel_rate = 432000000,
+ .pixels_per_line = OV13858_PPL_540MHZ,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_rate_540mbps),
+ .regs = mipi_data_rate_540mbps,
+ }
+ }
+};
+
+/* Mode configs */
+static const struct ov13858_mode supported_modes[] = {
+ {
+ .width = 4224,
+ .height = 3136,
+ .vts = OV13858_VTS_30FPS,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_4224x3136_regs),
+ .regs = mode_4224x3136_regs,
+ },
+ .link_freq_index = OV13858_LINK_FREQ_INDEX_0,
+ },
+ {
+ .width = 2112,
+ .height = 1568,
+ .vts = OV13858_VTS_30FPS,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_2112x1568_regs),
+ .regs = mode_2112x1568_regs,
+ },
+ .link_freq_index = OV13858_LINK_FREQ_INDEX_1,
+ },
+ {
+ .width = 2112,
+ .height = 1188,
+ .vts = OV13858_VTS_30FPS,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_2112x1188_regs),
+ .regs = mode_2112x1188_regs,
+ },
+ .link_freq_index = OV13858_LINK_FREQ_INDEX_1,
+ },
+ {
+ .width = 1056,
+ .height = 784,
+ .vts = OV13858_VTS_30FPS,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1056x784_regs),
+ .regs = mode_1056x784_regs,
+ },
+ .link_freq_index = OV13858_LINK_FREQ_INDEX_1,
+ }
+};
+
+struct ov13858 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+
+ /* Current mode */
+ const struct ov13858_mode *cur_mode;
+
+ /* Mutex for serialized access */
+ struct mutex mutex;
+
+ /* Streaming on/off */
+ bool streaming;
+};
+
+#define to_ov13858(_sd) container_of(_sd, struct ov13858, sd)
+
+/* Read registers up to 4 at a time */
+static int ov13858_read_reg(struct ov13858 *ov13858, u16 reg, u32 len, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd);
+ struct i2c_msg msgs[2];
+ u8 *data_be_p;
+ int ret;
+ u32 data_be = 0;
+ u16 reg_addr_be = cpu_to_be16(reg);
+
+ if (len > 4)
+ return -EINVAL;
+
+ data_be_p = (u8 *)&data_be;
+ /* Write register address */
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = 2;
+ msgs[0].buf = (u8 *)&reg_addr_be;
+
+ /* Read data from register */
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_be_p[4 - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ *val = be32_to_cpu(data_be);
+
+ return 0;
+}
+
+/* Write registers up to 4 at a time */
+static int ov13858_write_reg(struct ov13858 *ov13858, u16 reg, u32 len, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd);
+ int buf_i, val_i;
+ u8 buf[6], *val_p;
+
+ if (len > 4)
+ return -EINVAL;
+
+ buf[0] = reg >> 8;
+ buf[1] = reg & 0xff;
+
+ val = cpu_to_be32(val);
+ val_p = (u8 *)&val;
+ buf_i = 2;
+ val_i = 4 - len;
+
+ while (val_i < 4)
+ buf[buf_i++] = val_p[val_i++];
+
+ if (i2c_master_send(client, buf, len + 2) != len + 2)
+ return -EIO;
+
+ return 0;
+}
+
+/* Write a list of registers */
+static int ov13858_write_regs(struct ov13858 *ov13858,
+ const struct ov13858_reg *regs, u32 len)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd);
+ int ret;
+ u32 i;
+
+ for (i = 0; i < len; i++) {
+ ret = ov13858_write_reg(ov13858, regs[i].address, 1,
+ regs[i].val);
+ if (ret) {
+ dev_err_ratelimited(
+ &client->dev,
+ "Failed to write reg 0x%4.4x. error = %d\n",
+ regs[i].address, ret);
+
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ov13858_write_reg_list(struct ov13858 *ov13858,
+ const struct ov13858_reg_list *r_list)
+{
+ return ov13858_write_regs(ov13858, r_list->regs, r_list->num_of_regs);
+}
+
+/* Open sub-device */
+static int ov13858_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct ov13858 *ov13858 = to_ov13858(sd);
+ struct v4l2_mbus_framefmt *try_fmt = v4l2_subdev_get_try_format(sd,
+ fh->pad,
+ 0);
+
+ mutex_lock(&ov13858->mutex);
+
+ /* Initialize try_fmt */
+ try_fmt->width = ov13858->cur_mode->width;
+ try_fmt->height = ov13858->cur_mode->height;
+ try_fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ try_fmt->field = V4L2_FIELD_NONE;
+
+ /* No crop or compose */
+ mutex_unlock(&ov13858->mutex);
+
+ return 0;
+}
+
+static int ov13858_update_digital_gain(struct ov13858 *ov13858, u32 d_gain)
+{
+ int ret;
+ u32 val;
+
+ if (d_gain == 3)
+ return -EINVAL;
+
+ ret = ov13858_read_reg(ov13858, OV13858_REG_DIGITAL_GAIN,
+ OV13858_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ val &= OV13858_DGTL_GAIN_MASK;
+ val |= (d_gain - 1) << OV13858_DGTL_GAIN_SHIFT;
+
+ return ov13858_write_reg(ov13858, OV13858_REG_DIGITAL_GAIN,
+ OV13858_REG_VALUE_08BIT, val);
+}
+
+static int ov13858_enable_test_pattern(struct ov13858 *ov13858, u32 pattern)
+{
+ int ret;
+ u32 val;
+
+ ret = ov13858_read_reg(ov13858, OV13858_REG_TEST_PATTERN,
+ OV13858_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ if (pattern) {
+ val &= OV13858_TEST_PATTERN_MASK;
+ val |= (pattern - 1) | OV13858_TEST_PATTERN_ENABLE;
+ } else {
+ val &= ~OV13858_TEST_PATTERN_ENABLE;
+ }
+
+ return ov13858_write_reg(ov13858, OV13858_REG_TEST_PATTERN,
+ OV13858_REG_VALUE_08BIT, val);
+}
+
+static int ov13858_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov13858 *ov13858 = container_of(ctrl->handler,
+ struct ov13858, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd);
+ s64 max;
+ int ret;
+
+ /* Propagate change of current control to all related controls */
+ switch (ctrl->id) {
+ case V4L2_CID_VBLANK:
+ /* Update max exposure while meeting expected vblanking */
+ max = ov13858->cur_mode->height + ctrl->val - 8;
+ __v4l2_ctrl_modify_range(ov13858->exposure,
+ ov13858->exposure->minimum,
+ max, ov13858->exposure->step, max);
+ break;
+ };
+
+ /*
+ * Applying V4L2 control value only happens
+ * when power is up for streaming
+ */
+ if (pm_runtime_get_if_in_use(&client->dev) <= 0)
+ return 0;
+
+ ret = 0;
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = ov13858_write_reg(ov13858, OV13858_REG_ANALOG_GAIN,
+ OV13858_REG_VALUE_16BIT, ctrl->val);
+ break;
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = ov13858_update_digital_gain(ov13858, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = ov13858_write_reg(ov13858, OV13858_REG_EXPOSURE,
+ OV13858_REG_VALUE_24BIT,
+ ctrl->val << 4);
+ break;
+ case V4L2_CID_VBLANK:
+ /* Update VTS that meets expected vertical blanking */
+ ret = ov13858_write_reg(ov13858, OV13858_REG_VTS,
+ OV13858_REG_VALUE_16BIT,
+ ov13858->cur_mode->height
+ + ctrl->val);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = ov13858_enable_test_pattern(ov13858, ctrl->val);
+ break;
+ default:
+ dev_info(&client->dev,
+ "ctrl(id:0x%x,val:0x%x) is not handled\n",
+ ctrl->id, ctrl->val);
+ break;
+ };
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov13858_ctrl_ops = {
+ .s_ctrl = ov13858_set_ctrl,
+};
+
+static int ov13858_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ /* Only one bayer order(GRBG) is supported */
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int ov13858_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static void ov13858_update_pad_format(const struct ov13858_mode *mode,
+ struct v4l2_subdev_format *fmt)
+{
+ fmt->format.width = mode->width;
+ fmt->format.height = mode->height;
+ fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ fmt->format.field = V4L2_FIELD_NONE;
+}
+
+static int ov13858_do_get_pad_format(struct ov13858 *ov13858,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *framefmt;
+ struct v4l2_subdev *sd = &ov13858->sd;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ fmt->format = *framefmt;
+ } else {
+ ov13858_update_pad_format(ov13858->cur_mode, fmt);
+ }
+
+ return 0;
+}
+
+static int ov13858_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov13858 *ov13858 = to_ov13858(sd);
+ int ret;
+
+ mutex_lock(&ov13858->mutex);
+ ret = ov13858_do_get_pad_format(ov13858, cfg, fmt);
+ mutex_unlock(&ov13858->mutex);
+
+ return ret;
+}
+
+/*
+ * Calculate resolution distance
+ */
+static int
+ov13858_get_resolution_dist(const struct ov13858_mode *mode,
+ struct v4l2_mbus_framefmt *framefmt)
+{
+ return abs(mode->width - framefmt->width) +
+ abs(mode->height - framefmt->height);
+}
+
+/*
+ * Find the closest supported resolution to the requested resolution
+ */
+static const struct ov13858_mode *
+ov13858_find_best_fit(struct ov13858 *ov13858,
+ struct v4l2_subdev_format *fmt)
+{
+ int i, dist, cur_best_fit = 0, cur_best_fit_dist = -1;
+ struct v4l2_mbus_framefmt *framefmt = &fmt->format;
+
+ for (i = 0; i < ARRAY_SIZE(supported_modes); i++) {
+ dist = ov13858_get_resolution_dist(&supported_modes[i],
+ framefmt);
+ if (cur_best_fit_dist == -1 || dist < cur_best_fit_dist) {
+ cur_best_fit_dist = dist;
+ cur_best_fit = i;
+ }
+ }
+
+ return &supported_modes[cur_best_fit];
+}
+
+static int
+ov13858_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov13858 *ov13858 = to_ov13858(sd);
+ const struct ov13858_mode *mode;
+ struct v4l2_mbus_framefmt *framefmt;
+ s64 h_blank;
+
+ mutex_lock(&ov13858->mutex);
+
+ /* Only one raw bayer(GRBG) order is supported */
+ if (fmt->format.code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ mode = ov13858_find_best_fit(ov13858, fmt);
+ ov13858_update_pad_format(mode, fmt);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ *framefmt = fmt->format;
+ } else {
+ ov13858->cur_mode = mode;
+ __v4l2_ctrl_s_ctrl(ov13858->link_freq, mode->link_freq_index);
+ __v4l2_ctrl_s_ctrl_int64(
+ ov13858->pixel_rate,
+ link_freq_configs[mode->link_freq_index].pixel_rate);
+ /* Update limits and set FPS to default */
+ __v4l2_ctrl_modify_range(
+ ov13858->vblank, OV13858_VBLANK_MIN,
+ OV13858_VTS_MAX - ov13858->cur_mode->height, 1,
+ ov13858->cur_mode->vts - ov13858->cur_mode->height);
+ h_blank =
+ link_freq_configs[mode->link_freq_index].pixels_per_line
+ - ov13858->cur_mode->width;
+ __v4l2_ctrl_modify_range(ov13858->hblank, h_blank,
+ h_blank, 1, h_blank);
+ }
+
+ mutex_unlock(&ov13858->mutex);
+
+ return 0;
+}
+
+static int ov13858_get_skip_frames(struct v4l2_subdev *sd, u32 *frames)
+{
+ *frames = OV13858_NUM_OF_SKIP_FRAMES;
+
+ return 0;
+}
+
+/* Start streaming */
+static int ov13858_start_streaming(struct ov13858 *ov13858)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd);
+ const struct ov13858_reg_list *reg_list;
+ int ret, link_freq_index;
+
+ /* Get out of from software reset */
+ ret = ov13858_write_reg(ov13858, OV13858_REG_SOFTWARE_RST,
+ OV13858_REG_VALUE_08BIT, OV13858_SOFTWARE_RST);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set powerup registers\n",
+ __func__);
+ return ret;
+ }
+
+ /* Setup PLL */
+ link_freq_index = ov13858->cur_mode->link_freq_index;
+ reg_list = &link_freq_configs[link_freq_index].reg_list;
+ ret = ov13858_write_reg_list(ov13858, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set plls\n", __func__);
+ return ret;
+ }
+
+ /* Apply default values of current mode */
+ reg_list = &ov13858->cur_mode->reg_list;
+ ret = ov13858_write_reg_list(ov13858, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ return ret;
+ }
+
+ /* Apply customized values from user */
+ ret = __v4l2_ctrl_handler_setup(ov13858->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ return ov13858_write_reg(ov13858, OV13858_REG_MODE_SELECT,
+ OV13858_REG_VALUE_08BIT,
+ OV13858_MODE_STREAMING);
+}
+
+/* Stop streaming */
+static int ov13858_stop_streaming(struct ov13858 *ov13858)
+{
+ return ov13858_write_reg(ov13858, OV13858_REG_MODE_SELECT,
+ OV13858_REG_VALUE_08BIT, OV13858_MODE_STANDBY);
+}
+
+static int ov13858_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov13858 *ov13858 = to_ov13858(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ mutex_lock(&ov13858->mutex);
+ if (ov13858->streaming == enable) {
+ mutex_unlock(&ov13858->mutex);
+ return 0;
+ }
+
+ if (enable) {
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ goto err_unlock;
+ }
+
+ /*
+ * Apply default & customized values
+ * and then start streaming.
+ */
+ ret = ov13858_start_streaming(ov13858);
+ if (ret)
+ goto err_rpm_put;
+ } else {
+ ov13858_stop_streaming(ov13858);
+ pm_runtime_put(&client->dev);
+ }
+
+ ov13858->streaming = enable;
+ mutex_unlock(&ov13858->mutex);
+
+ return ret;
+
+err_rpm_put:
+ pm_runtime_put(&client->dev);
+err_unlock:
+ mutex_unlock(&ov13858->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused ov13858_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov13858 *ov13858 = to_ov13858(sd);
+
+ if (ov13858->streaming)
+ ov13858_stop_streaming(ov13858);
+
+ return 0;
+}
+
+static int __maybe_unused ov13858_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov13858 *ov13858 = to_ov13858(sd);
+ int ret;
+
+ if (ov13858->streaming) {
+ ret = ov13858_start_streaming(ov13858);
+ if (ret)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ ov13858_stop_streaming(ov13858);
+ ov13858->streaming = 0;
+ return ret;
+}
+
+/* Verify chip ID */
+static int ov13858_identify_module(struct ov13858 *ov13858)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd);
+ int ret;
+ u32 val;
+
+ ret = ov13858_read_reg(ov13858, OV13858_REG_CHIP_ID,
+ OV13858_REG_VALUE_24BIT, &val);
+ if (ret)
+ return ret;
+
+ if (val != OV13858_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ OV13858_CHIP_ID, val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov13858_video_ops = {
+ .s_stream = ov13858_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops ov13858_pad_ops = {
+ .enum_mbus_code = ov13858_enum_mbus_code,
+ .get_fmt = ov13858_get_pad_format,
+ .set_fmt = ov13858_set_pad_format,
+ .enum_frame_size = ov13858_enum_frame_size,
+};
+
+static const struct v4l2_subdev_sensor_ops ov13858_sensor_ops = {
+ .g_skip_frames = ov13858_get_skip_frames,
+};
+
+static const struct v4l2_subdev_ops ov13858_subdev_ops = {
+ .video = &ov13858_video_ops,
+ .pad = &ov13858_pad_ops,
+ .sensor = &ov13858_sensor_ops,
+};
+
+static const struct media_entity_operations ov13858_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops ov13858_internal_ops = {
+ .open = ov13858_open,
+};
+
+/* Initialize control handlers */
+static int ov13858_init_controls(struct ov13858 *ov13858)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd);
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ int ret;
+
+ ctrl_hdlr = &ov13858->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8);
+ if (ret)
+ return ret;
+
+ mutex_init(&ov13858->mutex);
+ ctrl_hdlr->lock = &ov13858->mutex;
+ ov13858->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr,
+ &ov13858_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ OV13858_NUM_OF_LINK_FREQS - 1,
+ 0,
+ link_freq_menu_items);
+ ov13858->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ /* By default, PIXEL_RATE is read only */
+ ov13858->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov13858_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ link_freq_configs[0].pixel_rate, 1,
+ link_freq_configs[0].pixel_rate);
+
+ ov13858->vblank = v4l2_ctrl_new_std(
+ ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_VBLANK,
+ OV13858_VBLANK_MIN,
+ OV13858_VTS_MAX - ov13858->cur_mode->height, 1,
+ ov13858->cur_mode->vts
+ - ov13858->cur_mode->height);
+
+ ov13858->hblank = v4l2_ctrl_new_std(
+ ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_HBLANK,
+ OV13858_PPL_1080MHZ - ov13858->cur_mode->width,
+ OV13858_PPL_1080MHZ - ov13858->cur_mode->width,
+ 1,
+ OV13858_PPL_1080MHZ - ov13858->cur_mode->width);
+ ov13858->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ ov13858->exposure = v4l2_ctrl_new_std(
+ ctrl_hdlr, &ov13858_ctrl_ops,
+ V4L2_CID_EXPOSURE, OV13858_EXPOSURE_MIN,
+ OV13858_EXPOSURE_MAX, OV13858_EXPOSURE_STEP,
+ OV13858_EXPOSURE_DEFAULT);
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ OV13858_ANA_GAIN_MIN, OV13858_ANA_GAIN_MAX,
+ OV13858_ANA_GAIN_STEP, OV13858_ANA_GAIN_DEFAULT);
+
+ /* Digital gain */
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ OV13858_DGTL_GAIN_MIN, OV13858_DGTL_GAIN_MAX,
+ OV13858_DGTL_GAIN_STEP, OV13858_DGTL_GAIN_DEFAULT);
+
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov13858_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov13858_test_pattern_menu) - 1,
+ 0, 0, ov13858_test_pattern_menu);
+ if (ctrl_hdlr->error) {
+ ret = ctrl_hdlr->error;
+ dev_err(&client->dev, "%s control init failed (%d)\n",
+ __func__, ret);
+ goto error;
+ }
+
+ ov13858->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+ mutex_destroy(&ov13858->mutex);
+
+ return ret;
+}
+
+static void ov13858_free_controls(struct ov13858 *ov13858)
+{
+ v4l2_ctrl_handler_free(ov13858->sd.ctrl_handler);
+ mutex_destroy(&ov13858->mutex);
+}
+
+static int ov13858_probe(struct i2c_client *client,
+ const struct i2c_device_id *devid)
+{
+ struct ov13858 *ov13858;
+ int ret;
+ u32 val = 0;
+
+ device_property_read_u32(&client->dev, "clock-frequency", &val);
+ if (val != 19200000)
+ return -EINVAL;
+
+ ov13858 = devm_kzalloc(&client->dev, sizeof(*ov13858), GFP_KERNEL);
+ if (!ov13858)
+ return -ENOMEM;
+
+ /* Initialize subdev */
+ v4l2_i2c_subdev_init(&ov13858->sd, client, &ov13858_subdev_ops);
+
+ /* Check module identity */
+ ret = ov13858_identify_module(ov13858);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d\n", ret);
+ return ret;
+ }
+
+ /* Set default mode to max resolution */
+ ov13858->cur_mode = &supported_modes[0];
+
+ ret = ov13858_init_controls(ov13858);
+ if (ret)
+ return ret;
+
+ /* Initialize subdev */
+ ov13858->sd.internal_ops = &ov13858_internal_ops;
+ ov13858->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov13858->sd.entity.ops = &ov13858_subdev_entity_ops;
+ ov13858->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ /* Initialize source pad */
+ ov13858->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&ov13858->sd.entity, 1, &ov13858->pad);
+ if (ret) {
+ dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+ goto error_handler_free;
+ }
+
+ ret = v4l2_async_register_subdev(&ov13858->sd);
+ if (ret < 0)
+ goto error_media_entity;
+
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+ */
+ pm_runtime_get_noresume(&client->dev);
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_put(&client->dev);
+
+ return 0;
+
+error_media_entity:
+ media_entity_cleanup(&ov13858->sd.entity);
+
+error_handler_free:
+ ov13858_free_controls(ov13858);
+ dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int ov13858_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov13858 *ov13858 = to_ov13858(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ ov13858_free_controls(ov13858);
+
+ /*
+ * Disable runtime PM but keep the device turned on.
+ * i2c-core with ACPI domain PM will turn off the device.
+ */
+ pm_runtime_get_sync(&client->dev);
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id ov13858_id_table[] = {
+ {"ov13858", 0},
+ {},
+};
+
+MODULE_DEVICE_TABLE(i2c, ov13858_id_table);
+
+static const struct dev_pm_ops ov13858_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ov13858_suspend, ov13858_resume)
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ov13858_acpi_ids[] = {
+ {"OVTID858"},
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(acpi, ov13858_acpi_ids);
+#endif
+
+static struct i2c_driver ov13858_i2c_driver = {
+ .driver = {
+ .name = "ov13858",
+ .owner = THIS_MODULE,
+ .pm = &ov13858_pm_ops,
+ .acpi_match_table = ACPI_PTR(ov13858_acpi_ids),
+ },
+ .probe = ov13858_probe,
+ .remove = ov13858_remove,
+ .id_table = ov13858_id_table,
+};
+
+module_i2c_driver(ov13858_i2c_driver);
+
+MODULE_AUTHOR("Kan, Chris <[email protected]>");
+MODULE_AUTHOR("Rapolu, Chiranjeevi <[email protected]>");
+MODULE_AUTHOR("Yang, Hyungwoo <[email protected]>");
+MODULE_DESCRIPTION("Omnivision ov13858 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index 6e6367214d40..122dd6c5eb38 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -42,9 +42,9 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
#include <media/v4l2-image-sizes.h>
#include <media/v4l2-mediabus.h>
-#include <media/v4l2-of.h>
#include <media/v4l2-subdev.h>
#define DRIVER_NAME "ov2659"
@@ -1308,7 +1308,8 @@ static const struct v4l2_subdev_internal_ops ov2659_subdev_internal_ops = {
static int ov2659_detect(struct v4l2_subdev *sd)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
- u8 pid, ver;
+ u8 pid = 0;
+ u8 ver = 0;
int ret;
dev_dbg(&client->dev, "%s:\n", __func__);
@@ -1346,7 +1347,7 @@ static struct ov2659_platform_data *
ov2659_get_pdata(struct i2c_client *client)
{
struct ov2659_platform_data *pdata;
- struct v4l2_of_endpoint *bus_cfg;
+ struct v4l2_fwnode_endpoint *bus_cfg;
struct device_node *endpoint;
if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
@@ -1356,7 +1357,7 @@ ov2659_get_pdata(struct i2c_client *client)
if (!endpoint)
return NULL;
- bus_cfg = v4l2_of_alloc_parse_endpoint(endpoint);
+ bus_cfg = v4l2_fwnode_endpoint_alloc_parse(of_fwnode_handle(endpoint));
if (IS_ERR(bus_cfg)) {
pdata = NULL;
goto done;
@@ -1376,7 +1377,7 @@ ov2659_get_pdata(struct i2c_client *client)
pdata->link_frequency = bus_cfg->link_frequencies[0];
done:
- v4l2_of_free_endpoint(bus_cfg);
+ v4l2_fwnode_endpoint_free(bus_cfg);
of_node_put(endpoint);
return pdata;
}
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
new file mode 100644
index 000000000000..1f5b483cf334
--- /dev/null
+++ b/drivers/media/i2c/ov5640.c
@@ -0,0 +1,2344 @@
+/*
+ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2014-2017 Mentor Graphics Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+/* min/typical/max system clock (xclk) frequencies */
+#define OV5640_XCLK_MIN 6000000
+#define OV5640_XCLK_MAX 24000000
+
+#define OV5640_DEFAULT_SLAVE_ID 0x3c
+
+#define OV5640_REG_CHIP_ID 0x300a
+#define OV5640_REG_PAD_OUTPUT00 0x3019
+#define OV5640_REG_SC_PLL_CTRL0 0x3034
+#define OV5640_REG_SC_PLL_CTRL1 0x3035
+#define OV5640_REG_SC_PLL_CTRL2 0x3036
+#define OV5640_REG_SC_PLL_CTRL3 0x3037
+#define OV5640_REG_SLAVE_ID 0x3100
+#define OV5640_REG_SYS_ROOT_DIVIDER 0x3108
+#define OV5640_REG_AWB_R_GAIN 0x3400
+#define OV5640_REG_AWB_G_GAIN 0x3402
+#define OV5640_REG_AWB_B_GAIN 0x3404
+#define OV5640_REG_AWB_MANUAL_CTRL 0x3406
+#define OV5640_REG_AEC_PK_EXPOSURE_HI 0x3500
+#define OV5640_REG_AEC_PK_EXPOSURE_MED 0x3501
+#define OV5640_REG_AEC_PK_EXPOSURE_LO 0x3502
+#define OV5640_REG_AEC_PK_MANUAL 0x3503
+#define OV5640_REG_AEC_PK_REAL_GAIN 0x350a
+#define OV5640_REG_AEC_PK_VTS 0x350c
+#define OV5640_REG_TIMING_HTS 0x380c
+#define OV5640_REG_TIMING_VTS 0x380e
+#define OV5640_REG_TIMING_TC_REG21 0x3821
+#define OV5640_REG_AEC_CTRL00 0x3a00
+#define OV5640_REG_AEC_B50_STEP 0x3a08
+#define OV5640_REG_AEC_B60_STEP 0x3a0a
+#define OV5640_REG_AEC_CTRL0D 0x3a0d
+#define OV5640_REG_AEC_CTRL0E 0x3a0e
+#define OV5640_REG_AEC_CTRL0F 0x3a0f
+#define OV5640_REG_AEC_CTRL10 0x3a10
+#define OV5640_REG_AEC_CTRL11 0x3a11
+#define OV5640_REG_AEC_CTRL1B 0x3a1b
+#define OV5640_REG_AEC_CTRL1E 0x3a1e
+#define OV5640_REG_AEC_CTRL1F 0x3a1f
+#define OV5640_REG_HZ5060_CTRL00 0x3c00
+#define OV5640_REG_HZ5060_CTRL01 0x3c01
+#define OV5640_REG_SIGMADELTA_CTRL0C 0x3c0c
+#define OV5640_REG_FRAME_CTRL01 0x4202
+#define OV5640_REG_MIPI_CTRL00 0x4800
+#define OV5640_REG_DEBUG_MODE 0x4814
+#define OV5640_REG_PRE_ISP_TEST_SET1 0x503d
+#define OV5640_REG_SDE_CTRL0 0x5580
+#define OV5640_REG_SDE_CTRL1 0x5581
+#define OV5640_REG_SDE_CTRL3 0x5583
+#define OV5640_REG_SDE_CTRL4 0x5584
+#define OV5640_REG_SDE_CTRL5 0x5585
+#define OV5640_REG_AVG_READOUT 0x56a1
+
+enum ov5640_mode_id {
+ OV5640_MODE_QCIF_176_144 = 0,
+ OV5640_MODE_QVGA_320_240,
+ OV5640_MODE_VGA_640_480,
+ OV5640_MODE_NTSC_720_480,
+ OV5640_MODE_PAL_720_576,
+ OV5640_MODE_XGA_1024_768,
+ OV5640_MODE_720P_1280_720,
+ OV5640_MODE_1080P_1920_1080,
+ OV5640_MODE_QSXGA_2592_1944,
+ OV5640_NUM_MODES,
+};
+
+enum ov5640_frame_rate {
+ OV5640_15_FPS = 0,
+ OV5640_30_FPS,
+ OV5640_NUM_FRAMERATES,
+};
+
+/*
+ * FIXME: remove this when a subdev API becomes available
+ * to set the MIPI CSI-2 virtual channel.
+ */
+static unsigned int virtual_channel;
+module_param(virtual_channel, int, 0);
+MODULE_PARM_DESC(virtual_channel,
+ "MIPI CSI-2 virtual channel (0..3), default 0");
+
+static const int ov5640_framerates[] = {
+ [OV5640_15_FPS] = 15,
+ [OV5640_30_FPS] = 30,
+};
+
+/* regulator supplies */
+static const char * const ov5640_supply_name[] = {
+ "DOVDD", /* Digital I/O (1.8V) suppply */
+ "DVDD", /* Digital Core (1.5V) supply */
+ "AVDD", /* Analog (2.8V) supply */
+};
+
+#define OV5640_NUM_SUPPLIES ARRAY_SIZE(ov5640_supply_name)
+
+/*
+ * Image size under 1280 * 960 are SUBSAMPLING
+ * Image size upper 1280 * 960 are SCALING
+ */
+enum ov5640_downsize_mode {
+ SUBSAMPLING,
+ SCALING,
+};
+
+struct reg_value {
+ u16 reg_addr;
+ u8 val;
+ u8 mask;
+ u32 delay_ms;
+};
+
+struct ov5640_mode_info {
+ enum ov5640_mode_id id;
+ enum ov5640_downsize_mode dn_mode;
+ u32 width;
+ u32 height;
+ const struct reg_value *reg_data;
+ u32 reg_data_size;
+};
+
+struct ov5640_ctrls {
+ struct v4l2_ctrl_handler handler;
+ struct {
+ struct v4l2_ctrl *auto_exp;
+ struct v4l2_ctrl *exposure;
+ };
+ struct {
+ struct v4l2_ctrl *auto_wb;
+ struct v4l2_ctrl *blue_balance;
+ struct v4l2_ctrl *red_balance;
+ };
+ struct {
+ struct v4l2_ctrl *auto_gain;
+ struct v4l2_ctrl *gain;
+ };
+ struct v4l2_ctrl *brightness;
+ struct v4l2_ctrl *saturation;
+ struct v4l2_ctrl *contrast;
+ struct v4l2_ctrl *hue;
+ struct v4l2_ctrl *test_pattern;
+};
+
+struct ov5640_dev {
+ struct i2c_client *i2c_client;
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_fwnode_endpoint ep; /* the parsed DT endpoint info */
+ struct clk *xclk; /* system clock to OV5640 */
+ u32 xclk_freq;
+
+ struct regulator_bulk_data supplies[OV5640_NUM_SUPPLIES];
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *pwdn_gpio;
+
+ /* lock to protect all members below */
+ struct mutex lock;
+
+ int power_count;
+
+ struct v4l2_mbus_framefmt fmt;
+
+ const struct ov5640_mode_info *current_mode;
+ enum ov5640_frame_rate current_fr;
+ struct v4l2_fract frame_interval;
+
+ struct ov5640_ctrls ctrls;
+
+ u32 prev_sysclk, prev_hts;
+ u32 ae_low, ae_high, ae_target;
+
+ bool pending_mode_change;
+ bool streaming;
+};
+
+static inline struct ov5640_dev *to_ov5640_dev(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct ov5640_dev, sd);
+}
+
+static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct ov5640_dev,
+ ctrls.handler)->sd;
+}
+
+/*
+ * FIXME: all of these register tables are likely filled with
+ * entries that set the register to their power-on default values,
+ * and which are otherwise not touched by this driver. Those entries
+ * should be identified and removed to speed register load time
+ * over i2c.
+ */
+
+static const struct reg_value ov5640_init_setting_30fps_VGA[] = {
+
+ {0x3103, 0x11, 0, 0}, {0x3008, 0x82, 0, 5}, {0x3008, 0x42, 0, 0},
+ {0x3103, 0x03, 0, 0}, {0x3017, 0x00, 0, 0}, {0x3018, 0x00, 0, 0},
+ {0x3034, 0x18, 0, 0}, {0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0},
+ {0x3037, 0x13, 0, 0}, {0x3108, 0x01, 0, 0}, {0x3630, 0x36, 0, 0},
+ {0x3631, 0x0e, 0, 0}, {0x3632, 0xe2, 0, 0}, {0x3633, 0x12, 0, 0},
+ {0x3621, 0xe0, 0, 0}, {0x3704, 0xa0, 0, 0}, {0x3703, 0x5a, 0, 0},
+ {0x3715, 0x78, 0, 0}, {0x3717, 0x01, 0, 0}, {0x370b, 0x60, 0, 0},
+ {0x3705, 0x1a, 0, 0}, {0x3905, 0x02, 0, 0}, {0x3906, 0x10, 0, 0},
+ {0x3901, 0x0a, 0, 0}, {0x3731, 0x12, 0, 0}, {0x3600, 0x08, 0, 0},
+ {0x3601, 0x33, 0, 0}, {0x302d, 0x60, 0, 0}, {0x3620, 0x52, 0, 0},
+ {0x371b, 0x20, 0, 0}, {0x471c, 0x50, 0, 0}, {0x3a13, 0x43, 0, 0},
+ {0x3a18, 0x00, 0, 0}, {0x3a19, 0xf8, 0, 0}, {0x3635, 0x13, 0, 0},
+ {0x3636, 0x03, 0, 0}, {0x3634, 0x40, 0, 0}, {0x3622, 0x01, 0, 0},
+ {0x3c01, 0xa4, 0, 0}, {0x3c04, 0x28, 0, 0}, {0x3c05, 0x98, 0, 0},
+ {0x3c06, 0x00, 0, 0}, {0x3c07, 0x08, 0, 0}, {0x3c08, 0x00, 0, 0},
+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
+ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
+ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
+ {0x3808, 0x02, 0, 0}, {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0},
+ {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
+ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
+ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
+ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
+ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
+ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
+ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
+ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x3000, 0x00, 0, 0},
+ {0x3002, 0x1c, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3006, 0xc3, 0, 0},
+ {0x300e, 0x45, 0, 0}, {0x302e, 0x08, 0, 0}, {0x4300, 0x3f, 0, 0},
+ {0x501f, 0x00, 0, 0}, {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0},
+ {0x440e, 0x00, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
+ {0x4837, 0x0a, 0, 0}, {0x4800, 0x04, 0, 0}, {0x3824, 0x02, 0, 0},
+ {0x5000, 0xa7, 0, 0}, {0x5001, 0xa3, 0, 0}, {0x5180, 0xff, 0, 0},
+ {0x5181, 0xf2, 0, 0}, {0x5182, 0x00, 0, 0}, {0x5183, 0x14, 0, 0},
+ {0x5184, 0x25, 0, 0}, {0x5185, 0x24, 0, 0}, {0x5186, 0x09, 0, 0},
+ {0x5187, 0x09, 0, 0}, {0x5188, 0x09, 0, 0}, {0x5189, 0x88, 0, 0},
+ {0x518a, 0x54, 0, 0}, {0x518b, 0xee, 0, 0}, {0x518c, 0xb2, 0, 0},
+ {0x518d, 0x50, 0, 0}, {0x518e, 0x34, 0, 0}, {0x518f, 0x6b, 0, 0},
+ {0x5190, 0x46, 0, 0}, {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0},
+ {0x5193, 0x70, 0, 0}, {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0},
+ {0x5196, 0x03, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0},
+ {0x5199, 0x6c, 0, 0}, {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0},
+ {0x519c, 0x09, 0, 0}, {0x519d, 0x2b, 0, 0}, {0x519e, 0x38, 0, 0},
+ {0x5381, 0x1e, 0, 0}, {0x5382, 0x5b, 0, 0}, {0x5383, 0x08, 0, 0},
+ {0x5384, 0x0a, 0, 0}, {0x5385, 0x7e, 0, 0}, {0x5386, 0x88, 0, 0},
+ {0x5387, 0x7c, 0, 0}, {0x5388, 0x6c, 0, 0}, {0x5389, 0x10, 0, 0},
+ {0x538a, 0x01, 0, 0}, {0x538b, 0x98, 0, 0}, {0x5300, 0x08, 0, 0},
+ {0x5301, 0x30, 0, 0}, {0x5302, 0x10, 0, 0}, {0x5303, 0x00, 0, 0},
+ {0x5304, 0x08, 0, 0}, {0x5305, 0x30, 0, 0}, {0x5306, 0x08, 0, 0},
+ {0x5307, 0x16, 0, 0}, {0x5309, 0x08, 0, 0}, {0x530a, 0x30, 0, 0},
+ {0x530b, 0x04, 0, 0}, {0x530c, 0x06, 0, 0}, {0x5480, 0x01, 0, 0},
+ {0x5481, 0x08, 0, 0}, {0x5482, 0x14, 0, 0}, {0x5483, 0x28, 0, 0},
+ {0x5484, 0x51, 0, 0}, {0x5485, 0x65, 0, 0}, {0x5486, 0x71, 0, 0},
+ {0x5487, 0x7d, 0, 0}, {0x5488, 0x87, 0, 0}, {0x5489, 0x91, 0, 0},
+ {0x548a, 0x9a, 0, 0}, {0x548b, 0xaa, 0, 0}, {0x548c, 0xb8, 0, 0},
+ {0x548d, 0xcd, 0, 0}, {0x548e, 0xdd, 0, 0}, {0x548f, 0xea, 0, 0},
+ {0x5490, 0x1d, 0, 0}, {0x5580, 0x02, 0, 0}, {0x5583, 0x40, 0, 0},
+ {0x5584, 0x10, 0, 0}, {0x5589, 0x10, 0, 0}, {0x558a, 0x00, 0, 0},
+ {0x558b, 0xf8, 0, 0}, {0x5800, 0x23, 0, 0}, {0x5801, 0x14, 0, 0},
+ {0x5802, 0x0f, 0, 0}, {0x5803, 0x0f, 0, 0}, {0x5804, 0x12, 0, 0},
+ {0x5805, 0x26, 0, 0}, {0x5806, 0x0c, 0, 0}, {0x5807, 0x08, 0, 0},
+ {0x5808, 0x05, 0, 0}, {0x5809, 0x05, 0, 0}, {0x580a, 0x08, 0, 0},
+ {0x580b, 0x0d, 0, 0}, {0x580c, 0x08, 0, 0}, {0x580d, 0x03, 0, 0},
+ {0x580e, 0x00, 0, 0}, {0x580f, 0x00, 0, 0}, {0x5810, 0x03, 0, 0},
+ {0x5811, 0x09, 0, 0}, {0x5812, 0x07, 0, 0}, {0x5813, 0x03, 0, 0},
+ {0x5814, 0x00, 0, 0}, {0x5815, 0x01, 0, 0}, {0x5816, 0x03, 0, 0},
+ {0x5817, 0x08, 0, 0}, {0x5818, 0x0d, 0, 0}, {0x5819, 0x08, 0, 0},
+ {0x581a, 0x05, 0, 0}, {0x581b, 0x06, 0, 0}, {0x581c, 0x08, 0, 0},
+ {0x581d, 0x0e, 0, 0}, {0x581e, 0x29, 0, 0}, {0x581f, 0x17, 0, 0},
+ {0x5820, 0x11, 0, 0}, {0x5821, 0x11, 0, 0}, {0x5822, 0x15, 0, 0},
+ {0x5823, 0x28, 0, 0}, {0x5824, 0x46, 0, 0}, {0x5825, 0x26, 0, 0},
+ {0x5826, 0x08, 0, 0}, {0x5827, 0x26, 0, 0}, {0x5828, 0x64, 0, 0},
+ {0x5829, 0x26, 0, 0}, {0x582a, 0x24, 0, 0}, {0x582b, 0x22, 0, 0},
+ {0x582c, 0x24, 0, 0}, {0x582d, 0x24, 0, 0}, {0x582e, 0x06, 0, 0},
+ {0x582f, 0x22, 0, 0}, {0x5830, 0x40, 0, 0}, {0x5831, 0x42, 0, 0},
+ {0x5832, 0x24, 0, 0}, {0x5833, 0x26, 0, 0}, {0x5834, 0x24, 0, 0},
+ {0x5835, 0x22, 0, 0}, {0x5836, 0x22, 0, 0}, {0x5837, 0x26, 0, 0},
+ {0x5838, 0x44, 0, 0}, {0x5839, 0x24, 0, 0}, {0x583a, 0x26, 0, 0},
+ {0x583b, 0x28, 0, 0}, {0x583c, 0x42, 0, 0}, {0x583d, 0xce, 0, 0},
+ {0x5025, 0x00, 0, 0}, {0x3a0f, 0x30, 0, 0}, {0x3a10, 0x28, 0, 0},
+ {0x3a1b, 0x30, 0, 0}, {0x3a1e, 0x26, 0, 0}, {0x3a11, 0x60, 0, 0},
+ {0x3a1f, 0x14, 0, 0}, {0x3008, 0x02, 0, 0}, {0x3c00, 0x04, 0, 300},
+};
+
+static const struct reg_value ov5640_setting_30fps_VGA_640_480[] = {
+
+ {0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
+ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
+ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
+ {0x3808, 0x02, 0, 0}, {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0},
+ {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
+ {0x380e, 0x04, 0, 0}, {0x380f, 0x38, 0, 0}, {0x3810, 0x00, 0, 0},
+ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
+ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
+ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
+ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x0e, 0, 0},
+ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
+ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
+ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
+ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0}, {0x3503, 0x00, 0, 0},
+};
+
+static const struct reg_value ov5640_setting_15fps_VGA_640_480[] = {
+ {0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
+ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
+ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
+ {0x3808, 0x02, 0, 0}, {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0},
+ {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
+ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
+ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
+ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
+ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
+ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
+ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
+ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
+ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
+ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
+};
+
+static const struct reg_value ov5640_setting_30fps_XGA_1024_768[] = {
+
+ {0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
+ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
+ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
+ {0x3808, 0x02, 0, 0}, {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0},
+ {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
+ {0x380e, 0x04, 0, 0}, {0x380f, 0x38, 0, 0}, {0x3810, 0x00, 0, 0},
+ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
+ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
+ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
+ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x0e, 0, 0},
+ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
+ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
+ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
+ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0}, {0x3503, 0x00, 0, 0},
+ {0x3808, 0x04, 0, 0}, {0x3809, 0x00, 0, 0}, {0x380a, 0x03, 0, 0},
+ {0x380b, 0x00, 0, 0}, {0x3035, 0x12, 0, 0},
+};
+
+static const struct reg_value ov5640_setting_15fps_XGA_1024_768[] = {
+ {0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
+ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
+ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
+ {0x3808, 0x02, 0, 0}, {0x3809, 0x80, 0, 0}, {0x380a, 0x01, 0, 0},
+ {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
+ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
+ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
+ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
+ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
+ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
+ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
+ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
+ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
+ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0}, {0x3808, 0x04, 0, 0},
+ {0x3809, 0x00, 0, 0}, {0x380a, 0x03, 0, 0}, {0x380b, 0x00, 0, 0},
+};
+
+static const struct reg_value ov5640_setting_30fps_QVGA_320_240[] = {
+ {0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
+ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
+ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
+ {0x3808, 0x01, 0, 0}, {0x3809, 0x40, 0, 0}, {0x380a, 0x00, 0, 0},
+ {0x380b, 0xf0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
+ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
+ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
+ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
+ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
+ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
+ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
+ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
+ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
+ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
+};
+
+static const struct reg_value ov5640_setting_15fps_QVGA_320_240[] = {
+ {0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
+ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
+ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
+ {0x3808, 0x01, 0, 0}, {0x3809, 0x40, 0, 0}, {0x380a, 0x00, 0, 0},
+ {0x380b, 0xf0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
+ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
+ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
+ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
+ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
+ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
+ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
+ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
+ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
+ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
+};
+
+static const struct reg_value ov5640_setting_30fps_QCIF_176_144[] = {
+ {0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
+ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
+ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
+ {0x3808, 0x00, 0, 0}, {0x3809, 0xb0, 0, 0}, {0x380a, 0x00, 0, 0},
+ {0x380b, 0x90, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
+ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
+ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
+ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
+ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
+ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
+ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
+ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
+ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
+ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
+};
+static const struct reg_value ov5640_setting_15fps_QCIF_176_144[] = {
+ {0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
+ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
+ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
+ {0x3808, 0x00, 0, 0}, {0x3809, 0xb0, 0, 0}, {0x380a, 0x00, 0, 0},
+ {0x380b, 0x90, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
+ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
+ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
+ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
+ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
+ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
+ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
+ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
+ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
+ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
+};
+
+static const struct reg_value ov5640_setting_30fps_NTSC_720_480[] = {
+ {0x3035, 0x12, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
+ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
+ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
+ {0x3808, 0x02, 0, 0}, {0x3809, 0xd0, 0, 0}, {0x380a, 0x01, 0, 0},
+ {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
+ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
+ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x3c, 0, 0},
+ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
+ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
+ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
+ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
+ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
+ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
+ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
+};
+
+static const struct reg_value ov5640_setting_15fps_NTSC_720_480[] = {
+ {0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
+ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
+ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
+ {0x3808, 0x02, 0, 0}, {0x3809, 0xd0, 0, 0}, {0x380a, 0x01, 0, 0},
+ {0x380b, 0xe0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
+ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
+ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x3c, 0, 0},
+ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
+ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
+ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
+ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
+ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
+ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
+ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
+};
+
+static const struct reg_value ov5640_setting_30fps_PAL_720_576[] = {
+ {0x3035, 0x12, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
+ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
+ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
+ {0x3808, 0x02, 0, 0}, {0x3809, 0xd0, 0, 0}, {0x380a, 0x02, 0, 0},
+ {0x380b, 0x40, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
+ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
+ {0x3811, 0x38, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
+ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
+ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
+ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
+ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
+ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
+ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
+ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
+};
+
+static const struct reg_value ov5640_setting_15fps_PAL_720_576[] = {
+ {0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
+ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
+ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
+ {0x3808, 0x02, 0, 0}, {0x3809, 0xd0, 0, 0}, {0x380a, 0x02, 0, 0},
+ {0x380b, 0x40, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x68, 0, 0},
+ {0x380e, 0x03, 0, 0}, {0x380f, 0xd8, 0, 0}, {0x3810, 0x00, 0, 0},
+ {0x3811, 0x38, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
+ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
+ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
+ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
+ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
+ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
+ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
+ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
+};
+
+static const struct reg_value ov5640_setting_30fps_720P_1280_720[] = {
+ {0x3008, 0x42, 0, 0},
+ {0x3035, 0x21, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x07, 0, 0},
+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
+ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
+ {0x3802, 0x00, 0, 0}, {0x3803, 0xfa, 0, 0}, {0x3804, 0x0a, 0, 0},
+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x06, 0, 0}, {0x3807, 0xa9, 0, 0},
+ {0x3808, 0x05, 0, 0}, {0x3809, 0x00, 0, 0}, {0x380a, 0x02, 0, 0},
+ {0x380b, 0xd0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x64, 0, 0},
+ {0x380e, 0x02, 0, 0}, {0x380f, 0xe4, 0, 0}, {0x3810, 0x00, 0, 0},
+ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x04, 0, 0},
+ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
+ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x02, 0, 0},
+ {0x3a03, 0xe4, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0xbc, 0, 0},
+ {0x3a0a, 0x01, 0, 0}, {0x3a0b, 0x72, 0, 0}, {0x3a0e, 0x01, 0, 0},
+ {0x3a0d, 0x02, 0, 0}, {0x3a14, 0x02, 0, 0}, {0x3a15, 0xe4, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x02, 0, 0},
+ {0x4407, 0x04, 0, 0}, {0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0},
+ {0x3824, 0x04, 0, 0}, {0x5001, 0x83, 0, 0}, {0x4005, 0x1a, 0, 0},
+ {0x3008, 0x02, 0, 0}, {0x3503, 0, 0, 0},
+};
+
+static const struct reg_value ov5640_setting_15fps_720P_1280_720[] = {
+ {0x3035, 0x41, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x07, 0, 0},
+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
+ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
+ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
+ {0x3802, 0x00, 0, 0}, {0x3803, 0xfa, 0, 0}, {0x3804, 0x0a, 0, 0},
+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x06, 0, 0}, {0x3807, 0xa9, 0, 0},
+ {0x3808, 0x05, 0, 0}, {0x3809, 0x00, 0, 0}, {0x380a, 0x02, 0, 0},
+ {0x380b, 0xd0, 0, 0}, {0x380c, 0x07, 0, 0}, {0x380d, 0x64, 0, 0},
+ {0x380e, 0x02, 0, 0}, {0x380f, 0xe4, 0, 0}, {0x3810, 0x00, 0, 0},
+ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x04, 0, 0},
+ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
+ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x02, 0, 0},
+ {0x3a03, 0xe4, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0xbc, 0, 0},
+ {0x3a0a, 0x01, 0, 0}, {0x3a0b, 0x72, 0, 0}, {0x3a0e, 0x01, 0, 0},
+ {0x3a0d, 0x02, 0, 0}, {0x3a14, 0x02, 0, 0}, {0x3a15, 0xe4, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x02, 0, 0},
+ {0x4407, 0x04, 0, 0}, {0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0},
+ {0x3824, 0x04, 0, 0}, {0x5001, 0x83, 0, 0},
+};
+
+static const struct reg_value ov5640_setting_30fps_1080P_1920_1080[] = {
+ {0x3008, 0x42, 0, 0},
+ {0x3035, 0x21, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x08, 0, 0},
+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
+ {0x3820, 0x40, 0, 0}, {0x3821, 0x06, 0, 0}, {0x3814, 0x11, 0, 0},
+ {0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
+ {0x3802, 0x00, 0, 0}, {0x3803, 0x00, 0, 0}, {0x3804, 0x0a, 0, 0},
+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9f, 0, 0},
+ {0x3808, 0x0a, 0, 0}, {0x3809, 0x20, 0, 0}, {0x380a, 0x07, 0, 0},
+ {0x380b, 0x98, 0, 0}, {0x380c, 0x0b, 0, 0}, {0x380d, 0x1c, 0, 0},
+ {0x380e, 0x07, 0, 0}, {0x380f, 0xb0, 0, 0}, {0x3810, 0x00, 0, 0},
+ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x04, 0, 0},
+ {0x3618, 0x04, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x21, 0, 0},
+ {0x3709, 0x12, 0, 0}, {0x370c, 0x00, 0, 0}, {0x3a02, 0x03, 0, 0},
+ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
+ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
+ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x06, 0, 0}, {0x4713, 0x03, 0, 0},
+ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
+ {0x3824, 0x02, 0, 0}, {0x5001, 0x83, 0, 0}, {0x3035, 0x11, 0, 0},
+ {0x3036, 0x54, 0, 0}, {0x3c07, 0x07, 0, 0}, {0x3c08, 0x00, 0, 0},
+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
+ {0x3800, 0x01, 0, 0}, {0x3801, 0x50, 0, 0}, {0x3802, 0x01, 0, 0},
+ {0x3803, 0xb2, 0, 0}, {0x3804, 0x08, 0, 0}, {0x3805, 0xef, 0, 0},
+ {0x3806, 0x05, 0, 0}, {0x3807, 0xf1, 0, 0}, {0x3808, 0x07, 0, 0},
+ {0x3809, 0x80, 0, 0}, {0x380a, 0x04, 0, 0}, {0x380b, 0x38, 0, 0},
+ {0x380c, 0x09, 0, 0}, {0x380d, 0xc4, 0, 0}, {0x380e, 0x04, 0, 0},
+ {0x380f, 0x60, 0, 0}, {0x3612, 0x2b, 0, 0}, {0x3708, 0x64, 0, 0},
+ {0x3a02, 0x04, 0, 0}, {0x3a03, 0x60, 0, 0}, {0x3a08, 0x01, 0, 0},
+ {0x3a09, 0x50, 0, 0}, {0x3a0a, 0x01, 0, 0}, {0x3a0b, 0x18, 0, 0},
+ {0x3a0e, 0x03, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x04, 0, 0},
+ {0x3a15, 0x60, 0, 0}, {0x4713, 0x02, 0, 0}, {0x4407, 0x04, 0, 0},
+ {0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3824, 0x04, 0, 0},
+ {0x4005, 0x1a, 0, 0}, {0x3008, 0x02, 0, 0},
+ {0x3503, 0, 0, 0},
+};
+
+static const struct reg_value ov5640_setting_15fps_1080P_1920_1080[] = {
+ {0x3008, 0x42, 0, 0},
+ {0x3035, 0x21, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x08, 0, 0},
+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
+ {0x3820, 0x40, 0, 0}, {0x3821, 0x06, 0, 0}, {0x3814, 0x11, 0, 0},
+ {0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
+ {0x3802, 0x00, 0, 0}, {0x3803, 0x00, 0, 0}, {0x3804, 0x0a, 0, 0},
+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9f, 0, 0},
+ {0x3808, 0x0a, 0, 0}, {0x3809, 0x20, 0, 0}, {0x380a, 0x07, 0, 0},
+ {0x380b, 0x98, 0, 0}, {0x380c, 0x0b, 0, 0}, {0x380d, 0x1c, 0, 0},
+ {0x380e, 0x07, 0, 0}, {0x380f, 0xb0, 0, 0}, {0x3810, 0x00, 0, 0},
+ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x04, 0, 0},
+ {0x3618, 0x04, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x21, 0, 0},
+ {0x3709, 0x12, 0, 0}, {0x370c, 0x00, 0, 0}, {0x3a02, 0x03, 0, 0},
+ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
+ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
+ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x06, 0, 0}, {0x4713, 0x03, 0, 0},
+ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
+ {0x3824, 0x02, 0, 0}, {0x5001, 0x83, 0, 0}, {0x3035, 0x21, 0, 0},
+ {0x3036, 0x54, 0, 1}, {0x3c07, 0x07, 0, 0}, {0x3c08, 0x00, 0, 0},
+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
+ {0x3800, 0x01, 0, 0}, {0x3801, 0x50, 0, 0}, {0x3802, 0x01, 0, 0},
+ {0x3803, 0xb2, 0, 0}, {0x3804, 0x08, 0, 0}, {0x3805, 0xef, 0, 0},
+ {0x3806, 0x05, 0, 0}, {0x3807, 0xf1, 0, 0}, {0x3808, 0x07, 0, 0},
+ {0x3809, 0x80, 0, 0}, {0x380a, 0x04, 0, 0}, {0x380b, 0x38, 0, 0},
+ {0x380c, 0x09, 0, 0}, {0x380d, 0xc4, 0, 0}, {0x380e, 0x04, 0, 0},
+ {0x380f, 0x60, 0, 0}, {0x3612, 0x2b, 0, 0}, {0x3708, 0x64, 0, 0},
+ {0x3a02, 0x04, 0, 0}, {0x3a03, 0x60, 0, 0}, {0x3a08, 0x01, 0, 0},
+ {0x3a09, 0x50, 0, 0}, {0x3a0a, 0x01, 0, 0}, {0x3a0b, 0x18, 0, 0},
+ {0x3a0e, 0x03, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x04, 0, 0},
+ {0x3a15, 0x60, 0, 0}, {0x4713, 0x02, 0, 0}, {0x4407, 0x04, 0, 0},
+ {0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3824, 0x04, 0, 0},
+ {0x4005, 0x1a, 0, 0}, {0x3008, 0x02, 0, 0}, {0x3503, 0, 0, 0},
+};
+
+static const struct reg_value ov5640_setting_15fps_QSXGA_2592_1944[] = {
+ {0x3820, 0x40, 0, 0}, {0x3821, 0x06, 0, 0},
+ {0x3035, 0x21, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x08, 0, 0},
+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
+ {0x3820, 0x40, 0, 0}, {0x3821, 0x06, 0, 0}, {0x3814, 0x11, 0, 0},
+ {0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
+ {0x3802, 0x00, 0, 0}, {0x3803, 0x00, 0, 0}, {0x3804, 0x0a, 0, 0},
+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9f, 0, 0},
+ {0x3808, 0x0a, 0, 0}, {0x3809, 0x20, 0, 0}, {0x380a, 0x07, 0, 0},
+ {0x380b, 0x98, 0, 0}, {0x380c, 0x0b, 0, 0}, {0x380d, 0x1c, 0, 0},
+ {0x380e, 0x07, 0, 0}, {0x380f, 0xb0, 0, 0}, {0x3810, 0x00, 0, 0},
+ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x04, 0, 0},
+ {0x3618, 0x04, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x21, 0, 0},
+ {0x3709, 0x12, 0, 0}, {0x370c, 0x00, 0, 0}, {0x3a02, 0x03, 0, 0},
+ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
+ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
+ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
+ {0x4001, 0x02, 0, 0}, {0x4004, 0x06, 0, 0}, {0x4713, 0x03, 0, 0},
+ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
+ {0x3824, 0x02, 0, 0}, {0x5001, 0x83, 0, 70},
+};
+
+/* power-on sensor init reg table */
+static const struct ov5640_mode_info ov5640_mode_init_data = {
+ 0, SUBSAMPLING, 640, 480, ov5640_init_setting_30fps_VGA,
+ ARRAY_SIZE(ov5640_init_setting_30fps_VGA),
+};
+
+static const struct ov5640_mode_info
+ov5640_mode_data[OV5640_NUM_FRAMERATES][OV5640_NUM_MODES] = {
+ {
+ {OV5640_MODE_QCIF_176_144, SUBSAMPLING, 176, 144,
+ ov5640_setting_15fps_QCIF_176_144,
+ ARRAY_SIZE(ov5640_setting_15fps_QCIF_176_144)},
+ {OV5640_MODE_QVGA_320_240, SUBSAMPLING, 320, 240,
+ ov5640_setting_15fps_QVGA_320_240,
+ ARRAY_SIZE(ov5640_setting_15fps_QVGA_320_240)},
+ {OV5640_MODE_VGA_640_480, SUBSAMPLING, 640, 480,
+ ov5640_setting_15fps_VGA_640_480,
+ ARRAY_SIZE(ov5640_setting_15fps_VGA_640_480)},
+ {OV5640_MODE_NTSC_720_480, SUBSAMPLING, 720, 480,
+ ov5640_setting_15fps_NTSC_720_480,
+ ARRAY_SIZE(ov5640_setting_15fps_NTSC_720_480)},
+ {OV5640_MODE_PAL_720_576, SUBSAMPLING, 720, 576,
+ ov5640_setting_15fps_PAL_720_576,
+ ARRAY_SIZE(ov5640_setting_15fps_PAL_720_576)},
+ {OV5640_MODE_XGA_1024_768, SUBSAMPLING, 1024, 768,
+ ov5640_setting_15fps_XGA_1024_768,
+ ARRAY_SIZE(ov5640_setting_15fps_XGA_1024_768)},
+ {OV5640_MODE_720P_1280_720, SUBSAMPLING, 1280, 720,
+ ov5640_setting_15fps_720P_1280_720,
+ ARRAY_SIZE(ov5640_setting_15fps_720P_1280_720)},
+ {OV5640_MODE_1080P_1920_1080, SCALING, 1920, 1080,
+ ov5640_setting_15fps_1080P_1920_1080,
+ ARRAY_SIZE(ov5640_setting_15fps_1080P_1920_1080)},
+ {OV5640_MODE_QSXGA_2592_1944, SCALING, 2592, 1944,
+ ov5640_setting_15fps_QSXGA_2592_1944,
+ ARRAY_SIZE(ov5640_setting_15fps_QSXGA_2592_1944)},
+ }, {
+ {OV5640_MODE_QCIF_176_144, SUBSAMPLING, 176, 144,
+ ov5640_setting_30fps_QCIF_176_144,
+ ARRAY_SIZE(ov5640_setting_30fps_QCIF_176_144)},
+ {OV5640_MODE_QVGA_320_240, SUBSAMPLING, 320, 240,
+ ov5640_setting_30fps_QVGA_320_240,
+ ARRAY_SIZE(ov5640_setting_30fps_QVGA_320_240)},
+ {OV5640_MODE_VGA_640_480, SUBSAMPLING, 640, 480,
+ ov5640_setting_30fps_VGA_640_480,
+ ARRAY_SIZE(ov5640_setting_30fps_VGA_640_480)},
+ {OV5640_MODE_NTSC_720_480, SUBSAMPLING, 720, 480,
+ ov5640_setting_30fps_NTSC_720_480,
+ ARRAY_SIZE(ov5640_setting_30fps_NTSC_720_480)},
+ {OV5640_MODE_PAL_720_576, SUBSAMPLING, 720, 576,
+ ov5640_setting_30fps_PAL_720_576,
+ ARRAY_SIZE(ov5640_setting_30fps_PAL_720_576)},
+ {OV5640_MODE_XGA_1024_768, SUBSAMPLING, 1024, 768,
+ ov5640_setting_30fps_XGA_1024_768,
+ ARRAY_SIZE(ov5640_setting_30fps_XGA_1024_768)},
+ {OV5640_MODE_720P_1280_720, SUBSAMPLING, 1280, 720,
+ ov5640_setting_30fps_720P_1280_720,
+ ARRAY_SIZE(ov5640_setting_30fps_720P_1280_720)},
+ {OV5640_MODE_1080P_1920_1080, SCALING, 1920, 1080,
+ ov5640_setting_30fps_1080P_1920_1080,
+ ARRAY_SIZE(ov5640_setting_30fps_1080P_1920_1080)},
+ {OV5640_MODE_QSXGA_2592_1944, -1, 0, 0, NULL, 0},
+ },
+};
+
+static int ov5640_init_slave_id(struct ov5640_dev *sensor)
+{
+ struct i2c_client *client = sensor->i2c_client;
+ struct i2c_msg msg;
+ u8 buf[3];
+ int ret;
+
+ if (client->addr == OV5640_DEFAULT_SLAVE_ID)
+ return 0;
+
+ buf[0] = OV5640_REG_SLAVE_ID >> 8;
+ buf[1] = OV5640_REG_SLAVE_ID & 0xff;
+ buf[2] = client->addr << 1;
+
+ msg.addr = OV5640_DEFAULT_SLAVE_ID;
+ msg.flags = 0;
+ msg.buf = buf;
+ msg.len = sizeof(buf);
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret < 0) {
+ dev_err(&client->dev, "%s: failed with %d\n", __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ov5640_write_reg(struct ov5640_dev *sensor, u16 reg, u8 val)
+{
+ struct i2c_client *client = sensor->i2c_client;
+ struct i2c_msg msg;
+ u8 buf[3];
+ int ret;
+
+ buf[0] = reg >> 8;
+ buf[1] = reg & 0xff;
+ buf[2] = val;
+
+ msg.addr = client->addr;
+ msg.flags = client->flags;
+ msg.buf = buf;
+ msg.len = sizeof(buf);
+
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret < 0) {
+ v4l2_err(&sensor->sd, "%s: error: reg=%x, val=%x\n",
+ __func__, reg, val);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ov5640_read_reg(struct ov5640_dev *sensor, u16 reg, u8 *val)
+{
+ struct i2c_client *client = sensor->i2c_client;
+ struct i2c_msg msg[2];
+ u8 buf[2];
+ int ret;
+
+ buf[0] = reg >> 8;
+ buf[1] = reg & 0xff;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = client->flags;
+ msg[0].buf = buf;
+ msg[0].len = sizeof(buf);
+
+ msg[1].addr = client->addr;
+ msg[1].flags = client->flags | I2C_M_RD;
+ msg[1].buf = buf;
+ msg[1].len = 1;
+
+ ret = i2c_transfer(client->adapter, msg, 2);
+ if (ret < 0)
+ return ret;
+
+ *val = buf[0];
+ return 0;
+}
+
+static int ov5640_read_reg16(struct ov5640_dev *sensor, u16 reg, u16 *val)
+{
+ u8 hi, lo;
+ int ret;
+
+ ret = ov5640_read_reg(sensor, reg, &hi);
+ if (ret)
+ return ret;
+ ret = ov5640_read_reg(sensor, reg+1, &lo);
+ if (ret)
+ return ret;
+
+ *val = ((u16)hi << 8) | (u16)lo;
+ return 0;
+}
+
+static int ov5640_write_reg16(struct ov5640_dev *sensor, u16 reg, u16 val)
+{
+ int ret;
+
+ ret = ov5640_write_reg(sensor, reg, val >> 8);
+ if (ret)
+ return ret;
+
+ return ov5640_write_reg(sensor, reg + 1, val & 0xff);
+}
+
+static int ov5640_mod_reg(struct ov5640_dev *sensor, u16 reg,
+ u8 mask, u8 val)
+{
+ u8 readval;
+ int ret;
+
+ ret = ov5640_read_reg(sensor, reg, &readval);
+ if (ret)
+ return ret;
+
+ readval &= ~mask;
+ val &= mask;
+ val |= readval;
+
+ return ov5640_write_reg(sensor, reg, val);
+}
+
+/* download ov5640 settings to sensor through i2c */
+static int ov5640_load_regs(struct ov5640_dev *sensor,
+ const struct ov5640_mode_info *mode)
+{
+ const struct reg_value *regs = mode->reg_data;
+ unsigned int i;
+ u32 delay_ms;
+ u16 reg_addr;
+ u8 mask, val;
+ int ret = 0;
+
+ for (i = 0; i < mode->reg_data_size; ++i, ++regs) {
+ delay_ms = regs->delay_ms;
+ reg_addr = regs->reg_addr;
+ val = regs->val;
+ mask = regs->mask;
+
+ if (mask)
+ ret = ov5640_mod_reg(sensor, reg_addr, mask, val);
+ else
+ ret = ov5640_write_reg(sensor, reg_addr, val);
+ if (ret)
+ break;
+
+ if (delay_ms)
+ usleep_range(1000*delay_ms, 1000*delay_ms+100);
+ }
+
+ return ret;
+}
+
+/* read exposure, in number of line periods */
+static int ov5640_get_exposure(struct ov5640_dev *sensor)
+{
+ int exp, ret;
+ u8 temp;
+
+ ret = ov5640_read_reg(sensor, OV5640_REG_AEC_PK_EXPOSURE_HI, &temp);
+ if (ret)
+ return ret;
+ exp = ((int)temp & 0x0f) << 16;
+ ret = ov5640_read_reg(sensor, OV5640_REG_AEC_PK_EXPOSURE_MED, &temp);
+ if (ret)
+ return ret;
+ exp |= ((int)temp << 8);
+ ret = ov5640_read_reg(sensor, OV5640_REG_AEC_PK_EXPOSURE_LO, &temp);
+ if (ret)
+ return ret;
+ exp |= (int)temp;
+
+ return exp >> 4;
+}
+
+/* write exposure, given number of line periods */
+static int ov5640_set_exposure(struct ov5640_dev *sensor, u32 exposure)
+{
+ int ret;
+
+ exposure <<= 4;
+
+ ret = ov5640_write_reg(sensor,
+ OV5640_REG_AEC_PK_EXPOSURE_LO,
+ exposure & 0xff);
+ if (ret)
+ return ret;
+ ret = ov5640_write_reg(sensor,
+ OV5640_REG_AEC_PK_EXPOSURE_MED,
+ (exposure >> 8) & 0xff);
+ if (ret)
+ return ret;
+ return ov5640_write_reg(sensor,
+ OV5640_REG_AEC_PK_EXPOSURE_HI,
+ (exposure >> 16) & 0x0f);
+}
+
+static int ov5640_get_gain(struct ov5640_dev *sensor)
+{
+ u16 gain;
+ int ret;
+
+ ret = ov5640_read_reg16(sensor, OV5640_REG_AEC_PK_REAL_GAIN, &gain);
+ if (ret)
+ return ret;
+
+ return gain & 0x3ff;
+}
+
+static int ov5640_set_stream(struct ov5640_dev *sensor, bool on)
+{
+ int ret;
+
+ ret = ov5640_mod_reg(sensor, OV5640_REG_MIPI_CTRL00, BIT(5),
+ on ? 0 : BIT(5));
+ if (ret)
+ return ret;
+ ret = ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT00,
+ on ? 0x00 : 0x70);
+ if (ret)
+ return ret;
+
+ return ov5640_write_reg(sensor, OV5640_REG_FRAME_CTRL01,
+ on ? 0x00 : 0x0f);
+}
+
+static int ov5640_get_sysclk(struct ov5640_dev *sensor)
+{
+ /* calculate sysclk */
+ u32 xvclk = sensor->xclk_freq / 10000;
+ u32 multiplier, prediv, VCO, sysdiv, pll_rdiv;
+ u32 sclk_rdiv_map[] = {1, 2, 4, 8};
+ u32 bit_div2x = 1, sclk_rdiv, sysclk;
+ u8 temp1, temp2;
+ int ret;
+
+ ret = ov5640_read_reg(sensor, OV5640_REG_SC_PLL_CTRL0, &temp1);
+ if (ret)
+ return ret;
+ temp2 = temp1 & 0x0f;
+ if (temp2 == 8 || temp2 == 10)
+ bit_div2x = temp2 / 2;
+
+ ret = ov5640_read_reg(sensor, OV5640_REG_SC_PLL_CTRL1, &temp1);
+ if (ret)
+ return ret;
+ sysdiv = temp1 >> 4;
+ if (sysdiv == 0)
+ sysdiv = 16;
+
+ ret = ov5640_read_reg(sensor, OV5640_REG_SC_PLL_CTRL2, &temp1);
+ if (ret)
+ return ret;
+ multiplier = temp1;
+
+ ret = ov5640_read_reg(sensor, OV5640_REG_SC_PLL_CTRL3, &temp1);
+ if (ret)
+ return ret;
+ prediv = temp1 & 0x0f;
+ pll_rdiv = ((temp1 >> 4) & 0x01) + 1;
+
+ ret = ov5640_read_reg(sensor, OV5640_REG_SYS_ROOT_DIVIDER, &temp1);
+ if (ret)
+ return ret;
+ temp2 = temp1 & 0x03;
+ sclk_rdiv = sclk_rdiv_map[temp2];
+
+ if (!prediv || !sysdiv || !pll_rdiv || !bit_div2x)
+ return -EINVAL;
+
+ VCO = xvclk * multiplier / prediv;
+
+ sysclk = VCO / sysdiv / pll_rdiv * 2 / bit_div2x / sclk_rdiv;
+
+ return sysclk;
+}
+
+static int ov5640_set_night_mode(struct ov5640_dev *sensor)
+{
+ /* read HTS from register settings */
+ u8 mode;
+ int ret;
+
+ ret = ov5640_read_reg(sensor, OV5640_REG_AEC_CTRL00, &mode);
+ if (ret)
+ return ret;
+ mode &= 0xfb;
+ return ov5640_write_reg(sensor, OV5640_REG_AEC_CTRL00, mode);
+}
+
+static int ov5640_get_hts(struct ov5640_dev *sensor)
+{
+ /* read HTS from register settings */
+ u16 hts;
+ int ret;
+
+ ret = ov5640_read_reg16(sensor, OV5640_REG_TIMING_HTS, &hts);
+ if (ret)
+ return ret;
+ return hts;
+}
+
+static int ov5640_get_vts(struct ov5640_dev *sensor)
+{
+ u16 vts;
+ int ret;
+
+ ret = ov5640_read_reg16(sensor, OV5640_REG_TIMING_VTS, &vts);
+ if (ret)
+ return ret;
+ return vts;
+}
+
+static int ov5640_set_vts(struct ov5640_dev *sensor, int vts)
+{
+ return ov5640_write_reg16(sensor, OV5640_REG_TIMING_VTS, vts);
+}
+
+static int ov5640_get_light_freq(struct ov5640_dev *sensor)
+{
+ /* get banding filter value */
+ int ret, light_freq = 0;
+ u8 temp, temp1;
+
+ ret = ov5640_read_reg(sensor, OV5640_REG_HZ5060_CTRL01, &temp);
+ if (ret)
+ return ret;
+
+ if (temp & 0x80) {
+ /* manual */
+ ret = ov5640_read_reg(sensor, OV5640_REG_HZ5060_CTRL00,
+ &temp1);
+ if (ret)
+ return ret;
+ if (temp1 & 0x04) {
+ /* 50Hz */
+ light_freq = 50;
+ } else {
+ /* 60Hz */
+ light_freq = 60;
+ }
+ } else {
+ /* auto */
+ ret = ov5640_read_reg(sensor, OV5640_REG_SIGMADELTA_CTRL0C,
+ &temp1);
+ if (ret)
+ return ret;
+
+ if (temp1 & 0x01) {
+ /* 50Hz */
+ light_freq = 50;
+ } else {
+ /* 60Hz */
+ }
+ }
+
+ return light_freq;
+}
+
+static int ov5640_set_bandingfilter(struct ov5640_dev *sensor)
+{
+ u32 band_step60, max_band60, band_step50, max_band50, prev_vts;
+ int ret;
+
+ /* read preview PCLK */
+ ret = ov5640_get_sysclk(sensor);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ return -EINVAL;
+ sensor->prev_sysclk = ret;
+ /* read preview HTS */
+ ret = ov5640_get_hts(sensor);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ return -EINVAL;
+ sensor->prev_hts = ret;
+
+ /* read preview VTS */
+ ret = ov5640_get_vts(sensor);
+ if (ret < 0)
+ return ret;
+ prev_vts = ret;
+
+
+ /* calculate banding filter */
+ /* 60Hz */
+ band_step60 = sensor->prev_sysclk * 100 / sensor->prev_hts * 100 / 120;
+ ret = ov5640_write_reg16(sensor, OV5640_REG_AEC_B60_STEP, band_step60);
+ if (ret)
+ return ret;
+ if (!band_step60)
+ return -EINVAL;
+ max_band60 = (int)((prev_vts - 4) / band_step60);
+ ret = ov5640_write_reg(sensor, OV5640_REG_AEC_CTRL0D, max_band60);
+ if (ret)
+ return ret;
+
+ /* 50Hz */
+ band_step50 = sensor->prev_sysclk * 100 / sensor->prev_hts;
+ ret = ov5640_write_reg16(sensor, OV5640_REG_AEC_B50_STEP, band_step50);
+ if (ret)
+ return ret;
+ if (!band_step50)
+ return -EINVAL;
+ max_band50 = (int)((prev_vts - 4) / band_step50);
+ return ov5640_write_reg(sensor, OV5640_REG_AEC_CTRL0E, max_band50);
+}
+
+static int ov5640_set_ae_target(struct ov5640_dev *sensor, int target)
+{
+ /* stable in high */
+ u32 fast_high, fast_low;
+ int ret;
+
+ sensor->ae_low = target * 23 / 25; /* 0.92 */
+ sensor->ae_high = target * 27 / 25; /* 1.08 */
+
+ fast_high = sensor->ae_high << 1;
+ if (fast_high > 255)
+ fast_high = 255;
+
+ fast_low = sensor->ae_low >> 1;
+
+ ret = ov5640_write_reg(sensor, OV5640_REG_AEC_CTRL0F, sensor->ae_high);
+ if (ret)
+ return ret;
+ ret = ov5640_write_reg(sensor, OV5640_REG_AEC_CTRL10, sensor->ae_low);
+ if (ret)
+ return ret;
+ ret = ov5640_write_reg(sensor, OV5640_REG_AEC_CTRL1B, sensor->ae_high);
+ if (ret)
+ return ret;
+ ret = ov5640_write_reg(sensor, OV5640_REG_AEC_CTRL1E, sensor->ae_low);
+ if (ret)
+ return ret;
+ ret = ov5640_write_reg(sensor, OV5640_REG_AEC_CTRL11, fast_high);
+ if (ret)
+ return ret;
+ return ov5640_write_reg(sensor, OV5640_REG_AEC_CTRL1F, fast_low);
+}
+
+static int ov5640_binning_on(struct ov5640_dev *sensor)
+{
+ u8 temp;
+ int ret;
+
+ ret = ov5640_read_reg(sensor, OV5640_REG_TIMING_TC_REG21, &temp);
+ if (ret)
+ return ret;
+ temp &= 0xfe;
+ return temp ? 1 : 0;
+}
+
+static int ov5640_set_virtual_channel(struct ov5640_dev *sensor)
+{
+ u8 temp, channel = virtual_channel;
+ int ret;
+
+ if (channel > 3)
+ return -EINVAL;
+
+ ret = ov5640_read_reg(sensor, OV5640_REG_DEBUG_MODE, &temp);
+ if (ret)
+ return ret;
+ temp &= ~(3 << 6);
+ temp |= (channel << 6);
+ return ov5640_write_reg(sensor, OV5640_REG_DEBUG_MODE, temp);
+}
+
+static const struct ov5640_mode_info *
+ov5640_find_mode(struct ov5640_dev *sensor, enum ov5640_frame_rate fr,
+ int width, int height, bool nearest)
+{
+ const struct ov5640_mode_info *mode = NULL;
+ int i;
+
+ for (i = OV5640_NUM_MODES - 1; i >= 0; i--) {
+ mode = &ov5640_mode_data[fr][i];
+
+ if (!mode->reg_data)
+ continue;
+
+ if ((nearest && mode->width <= width &&
+ mode->height <= height) ||
+ (!nearest && mode->width == width &&
+ mode->height == height))
+ break;
+ }
+
+ if (nearest && i < 0)
+ mode = &ov5640_mode_data[fr][0];
+
+ return mode;
+}
+
+/*
+ * sensor changes between scaling and subsampling, go through
+ * exposure calculation
+ */
+static int ov5640_set_mode_exposure_calc(
+ struct ov5640_dev *sensor, const struct ov5640_mode_info *mode)
+{
+ u32 prev_shutter, prev_gain16;
+ u32 cap_shutter, cap_gain16;
+ u32 cap_sysclk, cap_hts, cap_vts;
+ u32 light_freq, cap_bandfilt, cap_maxband;
+ u32 cap_gain16_shutter;
+ u8 average;
+ int ret;
+
+ if (mode->reg_data == NULL)
+ return -EINVAL;
+
+ /* read preview shutter */
+ ret = ov5640_get_exposure(sensor);
+ if (ret < 0)
+ return ret;
+ prev_shutter = ret;
+ ret = ov5640_binning_on(sensor);
+ if (ret < 0)
+ return ret;
+ if (ret && mode->id != OV5640_MODE_720P_1280_720 &&
+ mode->id != OV5640_MODE_1080P_1920_1080)
+ prev_shutter *= 2;
+
+ /* read preview gain */
+ ret = ov5640_get_gain(sensor);
+ if (ret < 0)
+ return ret;
+ prev_gain16 = ret;
+
+ /* get average */
+ ret = ov5640_read_reg(sensor, OV5640_REG_AVG_READOUT, &average);
+ if (ret)
+ return ret;
+
+ /* turn off night mode for capture */
+ ret = ov5640_set_night_mode(sensor);
+ if (ret < 0)
+ return ret;
+
+ /* Write capture setting */
+ ret = ov5640_load_regs(sensor, mode);
+ if (ret < 0)
+ return ret;
+
+ /* read capture VTS */
+ ret = ov5640_get_vts(sensor);
+ if (ret < 0)
+ return ret;
+ cap_vts = ret;
+ ret = ov5640_get_hts(sensor);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ return -EINVAL;
+ cap_hts = ret;
+
+ ret = ov5640_get_sysclk(sensor);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ return -EINVAL;
+ cap_sysclk = ret;
+
+ /* calculate capture banding filter */
+ ret = ov5640_get_light_freq(sensor);
+ if (ret < 0)
+ return ret;
+ light_freq = ret;
+
+ if (light_freq == 60) {
+ /* 60Hz */
+ cap_bandfilt = cap_sysclk * 100 / cap_hts * 100 / 120;
+ } else {
+ /* 50Hz */
+ cap_bandfilt = cap_sysclk * 100 / cap_hts;
+ }
+
+ if (!sensor->prev_sysclk) {
+ ret = ov5640_get_sysclk(sensor);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ return -EINVAL;
+ sensor->prev_sysclk = ret;
+ }
+
+ if (!cap_bandfilt)
+ return -EINVAL;
+
+ cap_maxband = (int)((cap_vts - 4) / cap_bandfilt);
+
+ /* calculate capture shutter/gain16 */
+ if (average > sensor->ae_low && average < sensor->ae_high) {
+ /* in stable range */
+ cap_gain16_shutter =
+ prev_gain16 * prev_shutter *
+ cap_sysclk / sensor->prev_sysclk *
+ sensor->prev_hts / cap_hts *
+ sensor->ae_target / average;
+ } else {
+ cap_gain16_shutter =
+ prev_gain16 * prev_shutter *
+ cap_sysclk / sensor->prev_sysclk *
+ sensor->prev_hts / cap_hts;
+ }
+
+ /* gain to shutter */
+ if (cap_gain16_shutter < (cap_bandfilt * 16)) {
+ /* shutter < 1/100 */
+ cap_shutter = cap_gain16_shutter / 16;
+ if (cap_shutter < 1)
+ cap_shutter = 1;
+
+ cap_gain16 = cap_gain16_shutter / cap_shutter;
+ if (cap_gain16 < 16)
+ cap_gain16 = 16;
+ } else {
+ if (cap_gain16_shutter > (cap_bandfilt * cap_maxband * 16)) {
+ /* exposure reach max */
+ cap_shutter = cap_bandfilt * cap_maxband;
+ if (!cap_shutter)
+ return -EINVAL;
+
+ cap_gain16 = cap_gain16_shutter / cap_shutter;
+ } else {
+ /* 1/100 < (cap_shutter = n/100) =< max */
+ cap_shutter =
+ ((int)(cap_gain16_shutter / 16 / cap_bandfilt))
+ * cap_bandfilt;
+ if (!cap_shutter)
+ return -EINVAL;
+
+ cap_gain16 = cap_gain16_shutter / cap_shutter;
+ }
+ }
+
+ /* set capture gain */
+ ret = __v4l2_ctrl_s_ctrl(sensor->ctrls.gain, cap_gain16);
+ if (ret)
+ return ret;
+
+ /* write capture shutter */
+ if (cap_shutter > (cap_vts - 4)) {
+ cap_vts = cap_shutter + 4;
+ ret = ov5640_set_vts(sensor, cap_vts);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* set exposure */
+ return __v4l2_ctrl_s_ctrl(sensor->ctrls.exposure, cap_shutter);
+}
+
+/*
+ * if sensor changes inside scaling or subsampling
+ * change mode directly
+ */
+static int ov5640_set_mode_direct(struct ov5640_dev *sensor,
+ const struct ov5640_mode_info *mode)
+{
+ int ret;
+
+ if (mode->reg_data == NULL)
+ return -EINVAL;
+
+ /* Write capture setting */
+ ret = ov5640_load_regs(sensor, mode);
+ if (ret < 0)
+ return ret;
+
+ /* turn auto gain/exposure back on for direct mode */
+ ret = __v4l2_ctrl_s_ctrl(sensor->ctrls.auto_gain, 1);
+ if (ret)
+ return ret;
+ return __v4l2_ctrl_s_ctrl(sensor->ctrls.auto_exp, V4L2_EXPOSURE_AUTO);
+}
+
+static int ov5640_set_mode(struct ov5640_dev *sensor,
+ const struct ov5640_mode_info *orig_mode)
+{
+ const struct ov5640_mode_info *mode = sensor->current_mode;
+ enum ov5640_downsize_mode dn_mode, orig_dn_mode;
+ int ret;
+
+ dn_mode = mode->dn_mode;
+ orig_dn_mode = orig_mode->dn_mode;
+
+ /* auto gain and exposure must be turned off when changing modes */
+ ret = __v4l2_ctrl_s_ctrl(sensor->ctrls.auto_gain, 0);
+ if (ret)
+ return ret;
+ ret = __v4l2_ctrl_s_ctrl(sensor->ctrls.auto_exp, V4L2_EXPOSURE_MANUAL);
+ if (ret)
+ return ret;
+
+ if ((dn_mode == SUBSAMPLING && orig_dn_mode == SCALING) ||
+ (dn_mode == SCALING && orig_dn_mode == SUBSAMPLING)) {
+ /*
+ * change between subsampling and scaling
+ * go through exposure calucation
+ */
+ ret = ov5640_set_mode_exposure_calc(sensor, mode);
+ } else {
+ /*
+ * change inside subsampling or scaling
+ * download firmware directly
+ */
+ ret = ov5640_set_mode_direct(sensor, mode);
+ }
+
+ if (ret < 0)
+ return ret;
+
+ ret = ov5640_set_ae_target(sensor, sensor->ae_target);
+ if (ret < 0)
+ return ret;
+ ret = ov5640_get_light_freq(sensor);
+ if (ret < 0)
+ return ret;
+ ret = ov5640_set_bandingfilter(sensor);
+ if (ret < 0)
+ return ret;
+ ret = ov5640_set_virtual_channel(sensor);
+ if (ret < 0)
+ return ret;
+
+ sensor->pending_mode_change = false;
+
+ return 0;
+}
+
+/* restore the last set video mode after chip power-on */
+static int ov5640_restore_mode(struct ov5640_dev *sensor)
+{
+ int ret;
+
+ /* first load the initial register values */
+ ret = ov5640_load_regs(sensor, &ov5640_mode_init_data);
+ if (ret < 0)
+ return ret;
+
+ /* now restore the last capture mode */
+ return ov5640_set_mode(sensor, &ov5640_mode_init_data);
+}
+
+static void ov5640_power(struct ov5640_dev *sensor, bool enable)
+{
+ if (sensor->pwdn_gpio)
+ gpiod_set_value(sensor->pwdn_gpio, enable ? 0 : 1);
+}
+
+static void ov5640_reset(struct ov5640_dev *sensor)
+{
+ if (!sensor->reset_gpio)
+ return;
+
+ gpiod_set_value(sensor->reset_gpio, 0);
+
+ /* camera power cycle */
+ ov5640_power(sensor, false);
+ usleep_range(5000, 10000);
+ ov5640_power(sensor, true);
+ usleep_range(5000, 10000);
+
+ gpiod_set_value(sensor->reset_gpio, 1);
+ usleep_range(1000, 2000);
+
+ gpiod_set_value(sensor->reset_gpio, 0);
+ usleep_range(5000, 10000);
+}
+
+static int ov5640_set_power(struct ov5640_dev *sensor, bool on)
+{
+ int ret = 0;
+
+ if (on) {
+ clk_prepare_enable(sensor->xclk);
+
+ ret = regulator_bulk_enable(OV5640_NUM_SUPPLIES,
+ sensor->supplies);
+ if (ret)
+ goto xclk_off;
+
+ ov5640_reset(sensor);
+ ov5640_power(sensor, true);
+
+ ret = ov5640_init_slave_id(sensor);
+ if (ret)
+ goto power_off;
+
+ ret = ov5640_restore_mode(sensor);
+ if (ret)
+ goto power_off;
+
+ /*
+ * start streaming briefly followed by stream off in
+ * order to coax the clock lane into LP-11 state.
+ */
+ ret = ov5640_set_stream(sensor, true);
+ if (ret)
+ goto power_off;
+ usleep_range(1000, 2000);
+ ret = ov5640_set_stream(sensor, false);
+ if (ret)
+ goto power_off;
+
+ return 0;
+ }
+
+power_off:
+ ov5640_power(sensor, false);
+ regulator_bulk_disable(OV5640_NUM_SUPPLIES, sensor->supplies);
+xclk_off:
+ clk_disable_unprepare(sensor->xclk);
+ return ret;
+}
+
+/* --------------- Subdev Operations --------------- */
+
+static int ov5640_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
+ int ret = 0;
+
+ mutex_lock(&sensor->lock);
+
+ /*
+ * If the power count is modified from 0 to != 0 or from != 0 to 0,
+ * update the power state.
+ */
+ if (sensor->power_count == !on) {
+ ret = ov5640_set_power(sensor, !!on);
+ if (ret)
+ goto out;
+ }
+
+ /* Update the power count. */
+ sensor->power_count += on ? 1 : -1;
+ WARN_ON(sensor->power_count < 0);
+out:
+ mutex_unlock(&sensor->lock);
+
+ if (on && !ret && sensor->power_count == 1) {
+ /* restore controls */
+ ret = v4l2_ctrl_handler_setup(&sensor->ctrls.handler);
+ }
+
+ return ret;
+}
+
+static int ov5640_try_frame_interval(struct ov5640_dev *sensor,
+ struct v4l2_fract *fi,
+ u32 width, u32 height)
+{
+ const struct ov5640_mode_info *mode;
+ u32 minfps, maxfps, fps;
+ int ret;
+
+ minfps = ov5640_framerates[OV5640_15_FPS];
+ maxfps = ov5640_framerates[OV5640_30_FPS];
+
+ if (fi->numerator == 0) {
+ fi->denominator = maxfps;
+ fi->numerator = 1;
+ return OV5640_30_FPS;
+ }
+
+ fps = DIV_ROUND_CLOSEST(fi->denominator, fi->numerator);
+
+ fi->numerator = 1;
+ if (fps > maxfps)
+ fi->denominator = maxfps;
+ else if (fps < minfps)
+ fi->denominator = minfps;
+ else if (2 * fps >= 2 * minfps + (maxfps - minfps))
+ fi->denominator = maxfps;
+ else
+ fi->denominator = minfps;
+
+ ret = (fi->denominator == minfps) ? OV5640_15_FPS : OV5640_30_FPS;
+
+ mode = ov5640_find_mode(sensor, ret, width, height, false);
+ return mode ? ret : -EINVAL;
+}
+
+static int ov5640_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
+ struct v4l2_mbus_framefmt *fmt;
+
+ if (format->pad != 0)
+ return -EINVAL;
+
+ mutex_lock(&sensor->lock);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt = v4l2_subdev_get_try_format(&sensor->sd, cfg,
+ format->pad);
+ else
+ fmt = &sensor->fmt;
+
+ format->format = *fmt;
+
+ mutex_unlock(&sensor->lock);
+
+ return 0;
+}
+
+static int ov5640_try_fmt_internal(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *fmt,
+ enum ov5640_frame_rate fr,
+ const struct ov5640_mode_info **new_mode)
+{
+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
+ const struct ov5640_mode_info *mode;
+
+ mode = ov5640_find_mode(sensor, fr, fmt->width, fmt->height, true);
+ if (!mode)
+ return -EINVAL;
+
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->code = sensor->fmt.code;
+
+ if (new_mode)
+ *new_mode = mode;
+ return 0;
+}
+
+static int ov5640_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
+ const struct ov5640_mode_info *new_mode;
+ int ret;
+
+ if (format->pad != 0)
+ return -EINVAL;
+
+ mutex_lock(&sensor->lock);
+
+ if (sensor->streaming) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = ov5640_try_fmt_internal(sd, &format->format,
+ sensor->current_fr, &new_mode);
+ if (ret)
+ goto out;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ struct v4l2_mbus_framefmt *fmt =
+ v4l2_subdev_get_try_format(sd, cfg, 0);
+
+ *fmt = format->format;
+ goto out;
+ }
+
+ sensor->current_mode = new_mode;
+ sensor->fmt = format->format;
+ sensor->pending_mode_change = true;
+out:
+ mutex_unlock(&sensor->lock);
+ return ret;
+}
+
+
+/*
+ * Sensor Controls.
+ */
+
+static int ov5640_set_ctrl_hue(struct ov5640_dev *sensor, int value)
+{
+ int ret;
+
+ if (value) {
+ ret = ov5640_mod_reg(sensor, OV5640_REG_SDE_CTRL0,
+ BIT(0), BIT(0));
+ if (ret)
+ return ret;
+ ret = ov5640_write_reg16(sensor, OV5640_REG_SDE_CTRL1, value);
+ } else {
+ ret = ov5640_mod_reg(sensor, OV5640_REG_SDE_CTRL0, BIT(0), 0);
+ }
+
+ return ret;
+}
+
+static int ov5640_set_ctrl_contrast(struct ov5640_dev *sensor, int value)
+{
+ int ret;
+
+ if (value) {
+ ret = ov5640_mod_reg(sensor, OV5640_REG_SDE_CTRL0,
+ BIT(2), BIT(2));
+ if (ret)
+ return ret;
+ ret = ov5640_write_reg(sensor, OV5640_REG_SDE_CTRL5,
+ value & 0xff);
+ } else {
+ ret = ov5640_mod_reg(sensor, OV5640_REG_SDE_CTRL0, BIT(2), 0);
+ }
+
+ return ret;
+}
+
+static int ov5640_set_ctrl_saturation(struct ov5640_dev *sensor, int value)
+{
+ int ret;
+
+ if (value) {
+ ret = ov5640_mod_reg(sensor, OV5640_REG_SDE_CTRL0,
+ BIT(1), BIT(1));
+ if (ret)
+ return ret;
+ ret = ov5640_write_reg(sensor, OV5640_REG_SDE_CTRL3,
+ value & 0xff);
+ if (ret)
+ return ret;
+ ret = ov5640_write_reg(sensor, OV5640_REG_SDE_CTRL4,
+ value & 0xff);
+ } else {
+ ret = ov5640_mod_reg(sensor, OV5640_REG_SDE_CTRL0, BIT(1), 0);
+ }
+
+ return ret;
+}
+
+static int ov5640_set_ctrl_white_balance(struct ov5640_dev *sensor, int awb)
+{
+ int ret;
+
+ ret = ov5640_mod_reg(sensor, OV5640_REG_AWB_MANUAL_CTRL,
+ BIT(0), awb ? 0 : 1);
+ if (ret)
+ return ret;
+
+ if (!awb) {
+ u16 red = (u16)sensor->ctrls.red_balance->val;
+ u16 blue = (u16)sensor->ctrls.blue_balance->val;
+
+ ret = ov5640_write_reg16(sensor, OV5640_REG_AWB_R_GAIN, red);
+ if (ret)
+ return ret;
+ ret = ov5640_write_reg16(sensor, OV5640_REG_AWB_B_GAIN, blue);
+ }
+
+ return ret;
+}
+
+static int ov5640_set_ctrl_exposure(struct ov5640_dev *sensor, int exp)
+{
+ struct ov5640_ctrls *ctrls = &sensor->ctrls;
+ bool auto_exposure = (exp == V4L2_EXPOSURE_AUTO);
+ int ret = 0;
+
+ if (ctrls->auto_exp->is_new) {
+ ret = ov5640_mod_reg(sensor, OV5640_REG_AEC_PK_MANUAL,
+ BIT(0), auto_exposure ? 0 : BIT(0));
+ if (ret)
+ return ret;
+ }
+
+ if (!auto_exposure && ctrls->exposure->is_new) {
+ u16 max_exp;
+
+ ret = ov5640_read_reg16(sensor, OV5640_REG_AEC_PK_VTS,
+ &max_exp);
+ if (ret)
+ return ret;
+ ret = ov5640_get_vts(sensor);
+ if (ret < 0)
+ return ret;
+ max_exp += ret;
+
+ if (ctrls->exposure->val < max_exp)
+ ret = ov5640_set_exposure(sensor, ctrls->exposure->val);
+ }
+
+ return ret;
+}
+
+static int ov5640_set_ctrl_gain(struct ov5640_dev *sensor, int auto_gain)
+{
+ struct ov5640_ctrls *ctrls = &sensor->ctrls;
+ int ret = 0;
+
+ if (ctrls->auto_gain->is_new) {
+ ret = ov5640_mod_reg(sensor, OV5640_REG_AEC_PK_MANUAL,
+ BIT(1), ctrls->auto_gain->val ? 0 : BIT(1));
+ if (ret)
+ return ret;
+ }
+
+ if (!auto_gain && ctrls->gain->is_new) {
+ u16 gain = (u16)ctrls->gain->val;
+
+ ret = ov5640_write_reg16(sensor, OV5640_REG_AEC_PK_REAL_GAIN,
+ gain & 0x3ff);
+ }
+
+ return ret;
+}
+
+static int ov5640_set_ctrl_test_pattern(struct ov5640_dev *sensor, int value)
+{
+ return ov5640_mod_reg(sensor, OV5640_REG_PRE_ISP_TEST_SET1,
+ 0xa4, value ? 0xa4 : 0);
+}
+
+static int ov5640_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
+ int val;
+
+ /* v4l2_ctrl_lock() locks our own mutex */
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTOGAIN:
+ if (!ctrl->val)
+ return 0;
+ val = ov5640_get_gain(sensor);
+ if (val < 0)
+ return val;
+ sensor->ctrls.gain->val = val;
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ if (ctrl->val == V4L2_EXPOSURE_MANUAL)
+ return 0;
+ val = ov5640_get_exposure(sensor);
+ if (val < 0)
+ return val;
+ sensor->ctrls.exposure->val = val;
+ break;
+ }
+
+ return 0;
+}
+
+static int ov5640_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
+ int ret;
+
+ /* v4l2_ctrl_lock() locks our own mutex */
+
+ /*
+ * If the device is not powered up by the host driver do
+ * not apply any controls to H/W at this time. Instead
+ * the controls will be restored right after power-up.
+ */
+ if (sensor->power_count == 0)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_AUTOGAIN:
+ ret = ov5640_set_ctrl_gain(sensor, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE_AUTO:
+ ret = ov5640_set_ctrl_exposure(sensor, ctrl->val);
+ break;
+ case V4L2_CID_AUTO_WHITE_BALANCE:
+ ret = ov5640_set_ctrl_white_balance(sensor, ctrl->val);
+ break;
+ case V4L2_CID_HUE:
+ ret = ov5640_set_ctrl_hue(sensor, ctrl->val);
+ break;
+ case V4L2_CID_CONTRAST:
+ ret = ov5640_set_ctrl_contrast(sensor, ctrl->val);
+ break;
+ case V4L2_CID_SATURATION:
+ ret = ov5640_set_ctrl_saturation(sensor, ctrl->val);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = ov5640_set_ctrl_test_pattern(sensor, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov5640_ctrl_ops = {
+ .g_volatile_ctrl = ov5640_g_volatile_ctrl,
+ .s_ctrl = ov5640_s_ctrl,
+};
+
+static const char * const test_pattern_menu[] = {
+ "Disabled",
+ "Color bars",
+};
+
+static int ov5640_init_controls(struct ov5640_dev *sensor)
+{
+ const struct v4l2_ctrl_ops *ops = &ov5640_ctrl_ops;
+ struct ov5640_ctrls *ctrls = &sensor->ctrls;
+ struct v4l2_ctrl_handler *hdl = &ctrls->handler;
+ int ret;
+
+ v4l2_ctrl_handler_init(hdl, 32);
+
+ /* we can use our own mutex for the ctrl lock */
+ hdl->lock = &sensor->lock;
+
+ /* Auto/manual white balance */
+ ctrls->auto_wb = v4l2_ctrl_new_std(hdl, ops,
+ V4L2_CID_AUTO_WHITE_BALANCE,
+ 0, 1, 1, 1);
+ ctrls->blue_balance = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BLUE_BALANCE,
+ 0, 4095, 1, 0);
+ ctrls->red_balance = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_RED_BALANCE,
+ 0, 4095, 1, 0);
+ /* Auto/manual exposure */
+ ctrls->auto_exp = v4l2_ctrl_new_std_menu(hdl, ops,
+ V4L2_CID_EXPOSURE_AUTO,
+ V4L2_EXPOSURE_MANUAL, 0,
+ V4L2_EXPOSURE_AUTO);
+ ctrls->exposure = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE,
+ 0, 65535, 1, 0);
+ /* Auto/manual gain */
+ ctrls->auto_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTOGAIN,
+ 0, 1, 1, 1);
+ ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN,
+ 0, 1023, 1, 0);
+
+ ctrls->saturation = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SATURATION,
+ 0, 255, 1, 64);
+ ctrls->hue = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HUE,
+ 0, 359, 1, 0);
+ ctrls->contrast = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_CONTRAST,
+ 0, 255, 1, 0);
+ ctrls->test_pattern =
+ v4l2_ctrl_new_std_menu_items(hdl, ops, V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(test_pattern_menu) - 1,
+ 0, 0, test_pattern_menu);
+
+ if (hdl->error) {
+ ret = hdl->error;
+ goto free_ctrls;
+ }
+
+ ctrls->gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ v4l2_ctrl_auto_cluster(3, &ctrls->auto_wb, 0, false);
+ v4l2_ctrl_auto_cluster(2, &ctrls->auto_gain, 0, true);
+ v4l2_ctrl_auto_cluster(2, &ctrls->auto_exp, 1, true);
+
+ sensor->sd.ctrl_handler = hdl;
+ return 0;
+
+free_ctrls:
+ v4l2_ctrl_handler_free(hdl);
+ return ret;
+}
+
+static int ov5640_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->pad != 0)
+ return -EINVAL;
+ if (fse->index >= OV5640_NUM_MODES)
+ return -EINVAL;
+
+ fse->min_width = fse->max_width =
+ ov5640_mode_data[0][fse->index].width;
+ fse->min_height = fse->max_height =
+ ov5640_mode_data[0][fse->index].height;
+
+ return 0;
+}
+
+static int ov5640_enum_frame_interval(
+ struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
+ struct v4l2_fract tpf;
+ int ret;
+
+ if (fie->pad != 0)
+ return -EINVAL;
+ if (fie->index >= OV5640_NUM_FRAMERATES)
+ return -EINVAL;
+
+ tpf.numerator = 1;
+ tpf.denominator = ov5640_framerates[fie->index];
+
+ ret = ov5640_try_frame_interval(sensor, &tpf,
+ fie->width, fie->height);
+ if (ret < 0)
+ return -EINVAL;
+
+ fie->interval = tpf;
+ return 0;
+}
+
+static int ov5640_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
+
+ mutex_lock(&sensor->lock);
+ fi->interval = sensor->frame_interval;
+ mutex_unlock(&sensor->lock);
+
+ return 0;
+}
+
+static int ov5640_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
+ const struct ov5640_mode_info *mode;
+ int frame_rate, ret = 0;
+
+ if (fi->pad != 0)
+ return -EINVAL;
+
+ mutex_lock(&sensor->lock);
+
+ if (sensor->streaming) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ mode = sensor->current_mode;
+
+ frame_rate = ov5640_try_frame_interval(sensor, &fi->interval,
+ mode->width, mode->height);
+ if (frame_rate < 0)
+ frame_rate = OV5640_15_FPS;
+
+ sensor->current_fr = frame_rate;
+ sensor->frame_interval = fi->interval;
+ sensor->pending_mode_change = true;
+out:
+ mutex_unlock(&sensor->lock);
+ return ret;
+}
+
+static int ov5640_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
+
+ if (code->pad != 0)
+ return -EINVAL;
+ if (code->index != 0)
+ return -EINVAL;
+
+ code->code = sensor->fmt.code;
+
+ return 0;
+}
+
+static int ov5640_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
+ int ret = 0;
+
+ mutex_lock(&sensor->lock);
+
+ if (sensor->streaming == !enable) {
+ if (enable && sensor->pending_mode_change) {
+ ret = ov5640_set_mode(sensor, sensor->current_mode);
+ if (ret)
+ goto out;
+ }
+
+ ret = ov5640_set_stream(sensor, enable);
+ if (!ret)
+ sensor->streaming = enable;
+ }
+out:
+ mutex_unlock(&sensor->lock);
+ return ret;
+}
+
+static const struct v4l2_subdev_core_ops ov5640_core_ops = {
+ .s_power = ov5640_s_power,
+};
+
+static const struct v4l2_subdev_video_ops ov5640_video_ops = {
+ .g_frame_interval = ov5640_g_frame_interval,
+ .s_frame_interval = ov5640_s_frame_interval,
+ .s_stream = ov5640_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops ov5640_pad_ops = {
+ .enum_mbus_code = ov5640_enum_mbus_code,
+ .get_fmt = ov5640_get_fmt,
+ .set_fmt = ov5640_set_fmt,
+ .enum_frame_size = ov5640_enum_frame_size,
+ .enum_frame_interval = ov5640_enum_frame_interval,
+};
+
+static const struct v4l2_subdev_ops ov5640_subdev_ops = {
+ .core = &ov5640_core_ops,
+ .video = &ov5640_video_ops,
+ .pad = &ov5640_pad_ops,
+};
+
+static int ov5640_get_regulators(struct ov5640_dev *sensor)
+{
+ int i;
+
+ for (i = 0; i < OV5640_NUM_SUPPLIES; i++)
+ sensor->supplies[i].supply = ov5640_supply_name[i];
+
+ return devm_regulator_bulk_get(&sensor->i2c_client->dev,
+ OV5640_NUM_SUPPLIES,
+ sensor->supplies);
+}
+
+static int ov5640_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct fwnode_handle *endpoint;
+ struct ov5640_dev *sensor;
+ int ret;
+
+ sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
+ if (!sensor)
+ return -ENOMEM;
+
+ sensor->i2c_client = client;
+ sensor->fmt.code = MEDIA_BUS_FMT_UYVY8_2X8;
+ sensor->fmt.width = 640;
+ sensor->fmt.height = 480;
+ sensor->fmt.field = V4L2_FIELD_NONE;
+ sensor->frame_interval.numerator = 1;
+ sensor->frame_interval.denominator = ov5640_framerates[OV5640_30_FPS];
+ sensor->current_fr = OV5640_30_FPS;
+ sensor->current_mode =
+ &ov5640_mode_data[OV5640_30_FPS][OV5640_MODE_VGA_640_480];
+ sensor->pending_mode_change = true;
+
+ sensor->ae_target = 52;
+
+ endpoint = fwnode_graph_get_next_endpoint(
+ of_fwnode_handle(client->dev.of_node), NULL);
+ if (!endpoint) {
+ dev_err(dev, "endpoint node not found\n");
+ return -EINVAL;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(endpoint, &sensor->ep);
+ fwnode_handle_put(endpoint);
+ if (ret) {
+ dev_err(dev, "Could not parse endpoint\n");
+ return ret;
+ }
+
+ if (sensor->ep.bus_type != V4L2_MBUS_CSI2) {
+ dev_err(dev, "invalid bus type, must be MIPI CSI2\n");
+ return -EINVAL;
+ }
+
+ /* get system clock (xclk) */
+ sensor->xclk = devm_clk_get(dev, "xclk");
+ if (IS_ERR(sensor->xclk)) {
+ dev_err(dev, "failed to get xclk\n");
+ return PTR_ERR(sensor->xclk);
+ }
+
+ sensor->xclk_freq = clk_get_rate(sensor->xclk);
+ if (sensor->xclk_freq < OV5640_XCLK_MIN ||
+ sensor->xclk_freq > OV5640_XCLK_MAX) {
+ dev_err(dev, "xclk frequency out of range: %d Hz\n",
+ sensor->xclk_freq);
+ return -EINVAL;
+ }
+
+ /* request optional power down pin */
+ sensor->pwdn_gpio = devm_gpiod_get_optional(dev, "powerdown",
+ GPIOD_OUT_HIGH);
+ /* request optional reset pin */
+ sensor->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+
+ v4l2_i2c_subdev_init(&sensor->sd, client, &ov5640_subdev_ops);
+
+ sensor->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
+ sensor->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ret = media_entity_pads_init(&sensor->sd.entity, 1, &sensor->pad);
+ if (ret)
+ return ret;
+
+ ret = ov5640_get_regulators(sensor);
+ if (ret)
+ return ret;
+
+ mutex_init(&sensor->lock);
+
+ ret = ov5640_init_controls(sensor);
+ if (ret)
+ goto entity_cleanup;
+
+ ret = v4l2_async_register_subdev(&sensor->sd);
+ if (ret)
+ goto free_ctrls;
+
+ return 0;
+
+free_ctrls:
+ v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+entity_cleanup:
+ mutex_destroy(&sensor->lock);
+ media_entity_cleanup(&sensor->sd.entity);
+ return ret;
+}
+
+static int ov5640_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
+
+ v4l2_async_unregister_subdev(&sensor->sd);
+ mutex_destroy(&sensor->lock);
+ media_entity_cleanup(&sensor->sd.entity);
+ v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+
+ return 0;
+}
+
+static const struct i2c_device_id ov5640_id[] = {
+ {"ov5640", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, ov5640_id);
+
+static const struct of_device_id ov5640_dt_ids[] = {
+ { .compatible = "ovti,ov5640" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ov5640_dt_ids);
+
+static struct i2c_driver ov5640_i2c_driver = {
+ .driver = {
+ .name = "ov5640",
+ .of_match_table = ov5640_dt_ids,
+ },
+ .id_table = ov5640_id,
+ .probe = ov5640_probe,
+ .remove = ov5640_remove,
+};
+
+module_i2c_driver(ov5640_i2c_driver);
+
+MODULE_DESCRIPTION("OV5640 MIPI Camera Subdev Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
index 57bd591ea54b..d1e844f7f03f 100644
--- a/drivers/media/i2c/ov5645.c
+++ b/drivers/media/i2c/ov5645.c
@@ -39,7 +39,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <media/v4l2-ctrls.h>
-#include <media/v4l2-of.h>
+#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
#define OV5645_VOLTAGE_ANALOG 2800000
@@ -87,7 +87,7 @@ struct ov5645 {
struct device *dev;
struct v4l2_subdev sd;
struct media_pad pad;
- struct v4l2_of_endpoint ep;
+ struct v4l2_fwnode_endpoint ep;
struct v4l2_mbus_framefmt fmt;
struct v4l2_rect crop;
struct clk *xclk;
@@ -1102,7 +1102,8 @@ static int ov5645_probe(struct i2c_client *client,
return -EINVAL;
}
- ret = v4l2_of_parse_endpoint(endpoint, &ov5645->ep);
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint),
+ &ov5645->ep);
if (ret < 0) {
dev_err(dev, "parsing endpoint node failed\n");
return ret;
diff --git a/drivers/media/i2c/ov5647.c b/drivers/media/i2c/ov5647.c
index f57a0b354cf6..95ce90fdb876 100644
--- a/drivers/media/i2c/ov5647.c
+++ b/drivers/media/i2c/ov5647.c
@@ -25,12 +25,13 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of_graph.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
#include <media/v4l2-image-sizes.h>
#include <media/v4l2-mediabus.h>
-#include <media/v4l2-of.h>
#define SENSOR_NAME "ov5647"
@@ -510,7 +511,7 @@ static const struct v4l2_subdev_internal_ops ov5647_subdev_internal_ops = {
static int ov5647_parse_dt(struct device_node *np)
{
- struct v4l2_of_endpoint bus_cfg;
+ struct v4l2_fwnode_endpoint bus_cfg;
struct device_node *ep;
int ret;
@@ -519,7 +520,7 @@ static int ov5647_parse_dt(struct device_node *np)
if (!ep)
return -EINVAL;
- ret = v4l2_of_parse_endpoint(ep, &bus_cfg);
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &bus_cfg);
of_node_put(ep);
return ret;
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index 3844853ab0a0..f434fb2ee6fc 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -24,6 +24,7 @@
#include <linux/media.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
+#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -35,7 +36,7 @@
#include <media/v4l2-subdev.h>
#include <media/v4l2-mediabus.h>
#include <media/i2c/s5c73m3.h>
-#include <media/v4l2-of.h>
+#include <media/v4l2-fwnode.h>
#include "s5c73m3.h"
@@ -1602,7 +1603,7 @@ static int s5c73m3_get_platform_data(struct s5c73m3 *state)
const struct s5c73m3_platform_data *pdata = dev->platform_data;
struct device_node *node = dev->of_node;
struct device_node *node_ep;
- struct v4l2_of_endpoint ep;
+ struct v4l2_fwnode_endpoint ep;
int ret;
if (!node) {
@@ -1639,7 +1640,7 @@ static int s5c73m3_get_platform_data(struct s5c73m3 *state)
return 0;
}
- ret = v4l2_of_parse_endpoint(node_ep, &ep);
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(node_ep), &ep);
of_node_put(node_ep);
if (ret)
return ret;
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index db82ed05792e..962051b9939d 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -30,7 +30,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
#include <media/v4l2-mediabus.h>
-#include <media/v4l2-of.h>
+#include <media/v4l2-fwnode.h>
static int debug;
module_param(debug, int, 0644);
@@ -1841,7 +1841,7 @@ static int s5k5baf_parse_device_node(struct s5k5baf *state, struct device *dev)
{
struct device_node *node = dev->of_node;
struct device_node *node_ep;
- struct v4l2_of_endpoint ep;
+ struct v4l2_fwnode_endpoint ep;
int ret;
if (!node) {
@@ -1868,7 +1868,7 @@ static int s5k5baf_parse_device_node(struct s5k5baf *state, struct device *dev)
return -EINVAL;
}
- ret = v4l2_of_parse_endpoint(node_ep, &ep);
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(node_ep), &ep);
of_node_put(node_ep);
if (ret)
return ret;
diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c
index faee11383cb7..9fd254a8e20d 100644
--- a/drivers/media/i2c/s5k6aa.c
+++ b/drivers/media/i2c/s5k6aa.c
@@ -838,7 +838,7 @@ static int __s5k6aa_power_on(struct s5k6aa *s5k6aa)
if (s5k6aa->s_power)
ret = s5k6aa->s_power(1);
- usleep_range(4000, 4000);
+ usleep_range(4000, 5000);
if (s5k6aa_gpio_deassert(s5k6aa, RST))
msleep(20);
diff --git a/drivers/media/i2c/smiapp/Kconfig b/drivers/media/i2c/smiapp/Kconfig
index 3149cda1d0db..f59718d8e51e 100644
--- a/drivers/media/i2c/smiapp/Kconfig
+++ b/drivers/media/i2c/smiapp/Kconfig
@@ -3,5 +3,6 @@ config VIDEO_SMIAPP
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAVE_CLK
depends on MEDIA_CAMERA_SUPPORT
select VIDEO_SMIAPP_PLL
+ select V4L2_FWNODE
---help---
This is a generic driver for SMIA++/SMIA camera modules.
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index f4e92bdfe192..e0b0c032c4ac 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -27,12 +27,13 @@
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/smiapp.h>
#include <linux/v4l2-mediabus.h>
+#include <media/v4l2-fwnode.h>
#include <media/v4l2-device.h>
-#include <media/v4l2-of.h>
#include "smiapp.h"
@@ -2784,19 +2785,20 @@ static int __maybe_unused smiapp_resume(struct device *dev)
static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
{
struct smiapp_hwconfig *hwcfg;
- struct v4l2_of_endpoint *bus_cfg;
- struct device_node *ep;
+ struct v4l2_fwnode_endpoint *bus_cfg;
+ struct fwnode_handle *ep;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
int i;
int rval;
- if (!dev->of_node)
+ if (!fwnode)
return dev->platform_data;
- ep = of_graph_get_next_endpoint(dev->of_node, NULL);
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
if (!ep)
return NULL;
- bus_cfg = v4l2_of_alloc_parse_endpoint(ep);
+ bus_cfg = v4l2_fwnode_endpoint_alloc_parse(ep);
if (IS_ERR(bus_cfg))
goto out_err;
@@ -2817,11 +2819,10 @@ static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
dev_dbg(dev, "lanes %u\n", hwcfg->lanes);
/* NVM size is not mandatory */
- of_property_read_u32(dev->of_node, "nokia,nvm-size",
- &hwcfg->nvm_size);
+ fwnode_property_read_u32(fwnode, "nokia,nvm-size", &hwcfg->nvm_size);
- rval = of_property_read_u32(dev->of_node, "clock-frequency",
- &hwcfg->ext_clk);
+ rval = fwnode_property_read_u32(fwnode, "clock-frequency",
+ &hwcfg->ext_clk);
if (rval) {
dev_warn(dev, "can't get clock-frequency\n");
goto out_err;
@@ -2846,13 +2847,13 @@ static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
dev_dbg(dev, "freq %d: %lld\n", i, hwcfg->op_sys_clock[i]);
}
- v4l2_of_free_endpoint(bus_cfg);
- of_node_put(ep);
+ v4l2_fwnode_endpoint_free(bus_cfg);
+ fwnode_handle_put(ep);
return hwcfg;
out_err:
- v4l2_of_free_endpoint(bus_cfg);
- of_node_put(ep);
+ v4l2_fwnode_endpoint_free(bus_cfg);
+ fwnode_handle_put(ep);
return NULL;
}
diff --git a/drivers/media/i2c/soc_camera/ov6650.c b/drivers/media/i2c/soc_camera/ov6650.c
index dbd6d92c589f..d2be64d54b22 100644
--- a/drivers/media/i2c/soc_camera/ov6650.c
+++ b/drivers/media/i2c/soc_camera/ov6650.c
@@ -709,6 +709,7 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
switch (mf->code) {
case MEDIA_BUS_FMT_Y10_1X10:
mf->code = MEDIA_BUS_FMT_Y8_1X8;
+ /* fall through */
case MEDIA_BUS_FMT_Y8_1X8:
case MEDIA_BUS_FMT_YVYU8_2X8:
case MEDIA_BUS_FMT_YUYV8_2X8:
@@ -718,6 +719,7 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
break;
default:
mf->code = MEDIA_BUS_FMT_SBGGR8_1X8;
+ /* fall through */
case MEDIA_BUS_FMT_SBGGR8_1X8:
mf->colorspace = V4L2_COLORSPACE_SRGB;
break;
diff --git a/drivers/media/i2c/soc_camera/ov772x.c b/drivers/media/i2c/soc_camera/ov772x.c
index 0f7b9d1b9c57..806383500313 100644
--- a/drivers/media/i2c/soc_camera/ov772x.c
+++ b/drivers/media/i2c/soc_camera/ov772x.c
@@ -1047,11 +1047,13 @@ static int ov772x_probe(struct i2c_client *client,
return -EINVAL;
}
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_PROTOCOL_MANGLING)) {
dev_err(&adapter->dev,
- "I2C-Adapter doesn't support I2C_FUNC_SMBUS_BYTE_DATA\n");
+ "I2C-Adapter doesn't support SMBUS_BYTE_DATA or PROTOCOL_MANGLING\n");
return -EIO;
}
+ client->flags |= I2C_CLIENT_SCCB;
priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 3251cba89e8f..5788af238b86 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -33,6 +33,8 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/of_graph.h>
#include <linux/videodev2.h>
#include <linux/workqueue.h>
#include <linux/v4l2-dv-timings.h>
@@ -41,7 +43,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
-#include <media/v4l2-of.h>
+#include <media/v4l2-fwnode.h>
#include <media/i2c/tc358743.h>
#include "tc358743_regs.h"
@@ -61,6 +63,8 @@ MODULE_LICENSE("GPL");
#define I2C_MAX_XFER_SIZE (EDID_BLOCK_SIZE + 2)
+#define POLL_INTERVAL_MS 1000
+
static const struct v4l2_dv_timings_cap tc358743_timings_cap = {
.type = V4L2_DV_BT_656_1120,
/* keep this initialization for compatibility with GCC < 4.4.6 */
@@ -76,7 +80,7 @@ static const struct v4l2_dv_timings_cap tc358743_timings_cap = {
struct tc358743_state {
struct tc358743_platform_data pdata;
- struct v4l2_of_bus_mipi_csi2 bus;
+ struct v4l2_fwnode_bus_mipi_csi2 bus;
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler hdl;
@@ -91,6 +95,9 @@ struct tc358743_state {
struct delayed_work delayed_work_enable_hotplug;
+ struct timer_list timer;
+ struct work_struct work_i2c_poll;
+
/* edid */
u8 edid_blocks_written;
@@ -1296,7 +1303,6 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
tc358743_csi_err_int_handler(sd, handled);
i2c_wr16(sd, INTSTATUS, MASK_CSI_INT);
- intstatus &= ~MASK_CSI_INT;
}
intstatus = i2c_rd16(sd, INTSTATUS);
@@ -1319,6 +1325,24 @@ static irqreturn_t tc358743_irq_handler(int irq, void *dev_id)
return handled ? IRQ_HANDLED : IRQ_NONE;
}
+static void tc358743_irq_poll_timer(unsigned long arg)
+{
+ struct tc358743_state *state = (struct tc358743_state *)arg;
+
+ schedule_work(&state->work_i2c_poll);
+
+ mod_timer(&state->timer, jiffies + msecs_to_jiffies(POLL_INTERVAL_MS));
+}
+
+static void tc358743_work_i2c_poll(struct work_struct *work)
+{
+ struct tc358743_state *state = container_of(work,
+ struct tc358743_state, work_i2c_poll);
+ bool handled;
+
+ tc358743_isr(&state->sd, 0, &handled);
+}
+
static int tc358743_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{
@@ -1473,6 +1497,23 @@ static int tc358743_s_stream(struct v4l2_subdev *sd, int enable)
/* --------------- PAD OPS --------------- */
+static int tc358743_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ switch (code->index) {
+ case 0:
+ code->code = MEDIA_BUS_FMT_RGB888_1X24;
+ break;
+ case 1:
+ code->code = MEDIA_BUS_FMT_UYVY8_1X16;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int tc358743_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *format)
@@ -1642,6 +1683,7 @@ static const struct v4l2_subdev_video_ops tc358743_video_ops = {
};
static const struct v4l2_subdev_pad_ops tc358743_pad_ops = {
+ .enum_mbus_code = tc358743_enum_mbus_code,
.set_fmt = tc358743_set_fmt,
.get_fmt = tc358743_get_fmt,
.get_edid = tc358743_g_edid,
@@ -1695,7 +1737,7 @@ static void tc358743_gpio_reset(struct tc358743_state *state)
static int tc358743_probe_of(struct tc358743_state *state)
{
struct device *dev = &state->i2c_client->dev;
- struct v4l2_of_endpoint *endpoint;
+ struct v4l2_fwnode_endpoint *endpoint;
struct device_node *ep;
struct clk *refclk;
u32 bps_pr_lane;
@@ -1715,7 +1757,7 @@ static int tc358743_probe_of(struct tc358743_state *state)
return -EINVAL;
}
- endpoint = v4l2_of_alloc_parse_endpoint(ep);
+ endpoint = v4l2_fwnode_endpoint_alloc_parse(of_fwnode_handle(ep));
if (IS_ERR(endpoint)) {
dev_err(dev, "failed to parse endpoint\n");
return PTR_ERR(endpoint);
@@ -1730,7 +1772,11 @@ static int tc358743_probe_of(struct tc358743_state *state)
state->bus = endpoint->bus.mipi_csi2;
- clk_prepare_enable(refclk);
+ ret = clk_prepare_enable(refclk);
+ if (ret) {
+ dev_err(dev, "Failed! to enable clock\n");
+ goto free_endpoint;
+ }
state->pdata.refclk_hz = clk_get_rate(refclk);
state->pdata.ddc5v_delay = DDC5V_DELAY_100_MS;
@@ -1803,7 +1849,7 @@ static int tc358743_probe_of(struct tc358743_state *state)
disable_clk:
clk_disable_unprepare(refclk);
free_endpoint:
- v4l2_of_free_endpoint(endpoint);
+ v4l2_fwnode_endpoint_free(endpoint);
return ret;
}
#else
@@ -1887,6 +1933,8 @@ static int tc358743_probe(struct i2c_client *client,
if (err < 0)
goto err_hdl;
+ state->mbus_fmt_code = MEDIA_BUS_FMT_RGB888_1X24;
+
sd->dev = &client->dev;
err = v4l2_async_register_subdev(sd);
if (err < 0)
@@ -1901,7 +1949,6 @@ static int tc358743_probe(struct i2c_client *client,
tc358743_s_dv_timings(sd, &default_timing);
- state->mbus_fmt_code = MEDIA_BUS_FMT_RGB888_1X24;
tc358743_set_csi_color_space(sd);
tc358743_init_interrupts(sd);
@@ -1914,6 +1961,14 @@ static int tc358743_probe(struct i2c_client *client,
"tc358743", state);
if (err)
goto err_work_queues;
+ } else {
+ INIT_WORK(&state->work_i2c_poll,
+ tc358743_work_i2c_poll);
+ state->timer.data = (unsigned long)state;
+ state->timer.function = tc358743_irq_poll_timer;
+ state->timer.expires = jiffies +
+ msecs_to_jiffies(POLL_INTERVAL_MS);
+ add_timer(&state->timer);
}
tc358743_enable_interrupts(sd, tx_5v_power_present(sd));
@@ -1929,6 +1984,8 @@ static int tc358743_probe(struct i2c_client *client,
return 0;
err_work_queues:
+ if (!state->i2c_client->irq)
+ flush_work(&state->work_i2c_poll);
cancel_delayed_work(&state->delayed_work_enable_hotplug);
mutex_destroy(&state->confctl_mutex);
err_hdl:
@@ -1942,6 +1999,10 @@ static int tc358743_remove(struct i2c_client *client)
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct tc358743_state *state = to_state(sd);
+ if (!state->i2c_client->irq) {
+ del_timer_sync(&state->timer);
+ flush_work(&state->work_i2c_poll);
+ }
cancel_delayed_work(&state->delayed_work_enable_hotplug);
v4l2_async_unregister_subdev(sd);
v4l2_device_unregister_subdev(sd);
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index 07853d2252aa..ad2df998f9c5 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -38,7 +38,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-common.h>
#include <media/v4l2-mediabus.h>
-#include <media/v4l2-of.h>
+#include <media/v4l2-fwnode.h>
#include <media/v4l2-ctrls.h>
#include <media/i2c/tvp514x.h>
#include <media/media-entity.h>
@@ -998,7 +998,7 @@ static struct tvp514x_platform_data *
tvp514x_get_pdata(struct i2c_client *client)
{
struct tvp514x_platform_data *pdata = NULL;
- struct v4l2_of_endpoint bus_cfg;
+ struct v4l2_fwnode_endpoint bus_cfg;
struct device_node *endpoint;
unsigned int flags;
@@ -1009,7 +1009,7 @@ tvp514x_get_pdata(struct i2c_client *client)
if (!endpoint)
return NULL;
- if (v4l2_of_parse_endpoint(endpoint, &bus_cfg))
+ if (v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint), &bus_cfg))
goto done;
pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 04e96b3057bb..9da4bf4f2c7a 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -12,10 +12,11 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
+#include <linux/of_graph.h>
#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
-#include <media/v4l2-of.h>
+#include <media/v4l2-fwnode.h>
#include <media/v4l2-mc.h>
#include "tvp5150_reg.h"
@@ -1358,7 +1359,7 @@ static int tvp5150_init(struct i2c_client *c)
static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np)
{
- struct v4l2_of_endpoint bus_cfg;
+ struct v4l2_fwnode_endpoint bus_cfg;
struct device_node *ep;
#ifdef CONFIG_MEDIA_CONTROLLER
struct device_node *connectors, *child;
@@ -1373,7 +1374,7 @@ static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np)
if (!ep)
return -EINVAL;
- ret = v4l2_of_parse_endpoint(ep, &bus_cfg);
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &bus_cfg);
if (ret)
goto err;
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index 4c1190127c85..a26c1a3f7183 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -33,7 +33,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
-#include <media/v4l2-of.h>
+#include <media/v4l2-fwnode.h>
#include "tvp7002_reg.h"
@@ -889,7 +889,7 @@ static const struct v4l2_subdev_ops tvp7002_ops = {
static struct tvp7002_config *
tvp7002_get_pdata(struct i2c_client *client)
{
- struct v4l2_of_endpoint bus_cfg;
+ struct v4l2_fwnode_endpoint bus_cfg;
struct tvp7002_config *pdata = NULL;
struct device_node *endpoint;
unsigned int flags;
@@ -901,7 +901,7 @@ tvp7002_get_pdata(struct i2c_client *client)
if (!endpoint)
return NULL;
- if (v4l2_of_parse_endpoint(endpoint, &bus_cfg))
+ if (v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint), &bus_cfg))
goto done;
pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index bc44193efa47..dd0f0ead9516 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -18,6 +18,7 @@
#include <linux/bitmap.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <media/media-entity.h>
#include <media/media-device.h>
@@ -386,6 +387,41 @@ struct media_entity *media_graph_walk_next(struct media_graph *graph)
}
EXPORT_SYMBOL_GPL(media_graph_walk_next);
+int media_entity_get_fwnode_pad(struct media_entity *entity,
+ struct fwnode_handle *fwnode,
+ unsigned long direction_flags)
+{
+ struct fwnode_endpoint endpoint;
+ unsigned int i;
+ int ret;
+
+ if (!entity->ops || !entity->ops->get_fwnode_pad) {
+ for (i = 0; i < entity->num_pads; i++) {
+ if (entity->pads[i].flags & direction_flags)
+ return i;
+ }
+
+ return -ENXIO;
+ }
+
+ ret = fwnode_graph_parse_endpoint(fwnode, &endpoint);
+ if (ret)
+ return ret;
+
+ ret = entity->ops->get_fwnode_pad(&endpoint);
+ if (ret < 0)
+ return ret;
+
+ if (ret >= entity->num_pads)
+ return -ENXIO;
+
+ if (!(entity->pads[ret].flags & direction_flags))
+ return -ENXIO;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(media_entity_get_fwnode_pad);
+
/* -----------------------------------------------------------------------------
* Pipeline management
*/
@@ -530,8 +566,13 @@ void __media_pipeline_stop(struct media_entity *entity)
struct media_graph *graph = &entity->pipe->graph;
struct media_pipeline *pipe = entity->pipe;
+ /*
+ * If the following check fails, the driver has performed an
+ * unbalanced call to media_pipeline_stop()
+ */
+ if (WARN_ON(!pipe))
+ return;
- WARN_ON(!pipe->streaming_count);
media_graph_walk_start(graph, entity);
while ((entity = media_graph_walk_next(graph))) {
diff --git a/drivers/media/pci/bt8xx/dst_ca.c b/drivers/media/pci/bt8xx/dst_ca.c
index 04d06c564602..90f4263452d3 100644
--- a/drivers/media/pci/bt8xx/dst_ca.c
+++ b/drivers/media/pci/bt8xx/dst_ca.c
@@ -637,6 +637,7 @@ static long dst_ca_ioctl(struct file *file, unsigned int cmd, unsigned long ioct
goto free_mem_and_exit;
}
dprintk(verbose, DST_CA_INFO, 1, " -->CA_SET_PID Success !");
+ break;
default:
result = -EOPNOTSUPP;
}
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index d5c911c09e2b..f8e173f3e9e2 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -205,6 +205,8 @@ void cobalt_pcie_status_show(struct cobalt *cobalt)
offset = pci_find_capability(pci_dev, PCI_CAP_ID_EXP);
bus_offset = pci_find_capability(pci_bus_dev, PCI_CAP_ID_EXP);
+ if (!offset || !bus_offset)
+ return;
/* Device */
pci_read_config_dword(pci_dev, offset + PCI_EXP_DEVCAP, &capa);
diff --git a/drivers/media/pci/cx18/cx18-alsa-pcm.c b/drivers/media/pci/cx18/cx18-alsa-pcm.c
index 205a98da877c..f68ee57a9ae2 100644
--- a/drivers/media/pci/cx18/cx18-alsa-pcm.c
+++ b/drivers/media/pci/cx18/cx18-alsa-pcm.c
@@ -257,14 +257,16 @@ static int snd_cx18_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct snd_cx18_card *cxsc = snd_pcm_substream_chip(substream);
unsigned long flags;
+ unsigned char *dma_area = NULL;
spin_lock_irqsave(&cxsc->slock, flags);
if (substream->runtime->dma_area) {
dprintk("freeing pcm capture region\n");
- vfree(substream->runtime->dma_area);
+ dma_area = substream->runtime->dma_area;
substream->runtime->dma_area = NULL;
}
spin_unlock_irqrestore(&cxsc->slock, flags);
+ vfree(dma_area);
return 0;
}
diff --git a/drivers/media/pci/cx18/cx18-dvb.c b/drivers/media/pci/cx18/cx18-dvb.c
index d130d65828b0..53f4d6bf81fb 100644
--- a/drivers/media/pci/cx18/cx18-dvb.c
+++ b/drivers/media/pci/cx18/cx18-dvb.c
@@ -151,7 +151,7 @@ static int yuan_mpc718_mt352_reqfw(struct cx18_stream *stream,
}
if (ret) {
- CX18_ERR("The MPC718 board variant with the MT352 DVB-Tdemodualtor will not work without it\n");
+ CX18_ERR("The MPC718 board variant with the MT352 DVB-T demodulator will not work without it\n");
CX18_ERR("Run 'linux/Documentation/dvb/get_dvb_firmware mpc718' if you need the firmware\n");
}
return ret;
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 9e39aea85df6..c48fa8e25a70 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -2081,7 +2081,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
ts2->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
- /* break omitted intentionally */
+ /* fall-through */
case CX23885_BOARD_DVICO_FUSIONHDTV_5_EXP:
ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
@@ -2238,6 +2238,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
/* Currently only enabled for the integrated IR controller */
if (!enable_885_ir)
break;
+ /* fall-through */
case CX23885_BOARD_HAUPPAUGE_HVR1250:
case CX23885_BOARD_HAUPPAUGE_HVR1800:
case CX23885_BOARD_HAUPPAUGE_IMPACTVCBE:
diff --git a/drivers/media/pci/cx88/cx88-cards.c b/drivers/media/pci/cx88/cx88-cards.c
index 73cc7a67a8bc..6df21b29ea17 100644
--- a/drivers/media/pci/cx88/cx88-cards.c
+++ b/drivers/media/pci/cx88/cx88-cards.c
@@ -3681,7 +3681,14 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
core->nr = nr;
sprintf(core->name, "cx88[%d]", core->nr);
- core->tvnorm = V4L2_STD_NTSC_M;
+ /*
+ * Note: Setting initial standard here would cause first call to
+ * cx88_set_tvnorm() to return without programming any registers. Leave
+ * it blank for at this point and it will get set later in
+ * cx8800_initdev()
+ */
+ core->tvnorm = 0;
+
core->width = 320;
core->height = 240;
core->field = V4L2_FIELD_INTERLACED;
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index c7d4e87ccb64..7d25ecd4404b 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -1420,7 +1420,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
request_module("rtc-isl1208");
core->i2c_rtc = i2c_new_device(&core->i2c_adap, &rtc_info);
}
- /* break intentionally omitted */
+ /* fall-through */
case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
request_module("ir-kbd-i2c");
}
@@ -1435,7 +1435,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
/* initial device configuration */
mutex_lock(&core->lock);
- cx88_set_tvnorm(core, core->tvnorm);
+ cx88_set_tvnorm(core, V4L2_STD_NTSC_M);
v4l2_ctrl_handler_setup(&core->video_hdl);
v4l2_ctrl_handler_setup(&core->audio_hdl);
cx88_video_mux(core, 0);
diff --git a/drivers/media/pci/ddbridge/Kconfig b/drivers/media/pci/ddbridge/Kconfig
index 44e5dc15e60a..ffed78c2ffb4 100644
--- a/drivers/media/pci/ddbridge/Kconfig
+++ b/drivers/media/pci/ddbridge/Kconfig
@@ -6,6 +6,9 @@ config DVB_DDBRIDGE
select DVB_STV090x if MEDIA_SUBDRV_AUTOSELECT
select DVB_DRXK if MEDIA_SUBDRV_AUTOSELECT
select DVB_TDA18271C2DD if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_STV0367 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_CXD2841ER if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_TDA18212 if MEDIA_SUBDRV_AUTOSELECT
---help---
Support for cards with the Digital Devices PCI express bridge:
- Octopus PCIe Bridge
@@ -14,5 +17,8 @@ config DVB_DDBRIDGE
- DuoFlex S2 Octopus
- DuoFlex CT Octopus
- cineS2(v6)
+ - CineCTv6 and DuoFlex CT (STV0367-based)
+ - CineCTv7 and DuoFlex CT2/C2T2/C2T2I (Sony CXD28xx-based)
+ - MaxA8 series
Say Y if you own such a card and want to use it.
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index 340cff02dee2..9420479bee9a 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -39,6 +39,14 @@
#include "stv090x.h"
#include "lnbh24.h"
#include "drxk.h"
+#include "stv0367.h"
+#include "stv0367_priv.h"
+#include "cxd2841er.h"
+#include "tda18212.h"
+
+static int xo2_speed = 2;
+module_param(xo2_speed, int, 0444);
+MODULE_PARM_DESC(xo2_speed, "default transfer speed for xo2 based duoflex, 0=55,1=75,2=90,3=104 MBit/s, default=2, use attribute to change for individual cards");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
@@ -47,6 +55,24 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
/******************************************************************************/
+static int i2c_io(struct i2c_adapter *adapter, u8 adr,
+ u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen)
+{
+ struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
+ .buf = wbuf, .len = wlen },
+ {.addr = adr, .flags = I2C_M_RD,
+ .buf = rbuf, .len = rlen } };
+ return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1;
+}
+
+static int i2c_write(struct i2c_adapter *adap, u8 adr, u8 *data, int len)
+{
+ struct i2c_msg msg = {.addr = adr, .flags = 0,
+ .buf = data, .len = len};
+
+ return (i2c_transfer(adap, &msg, 1) == 1) ? 0 : -1;
+}
+
static int i2c_read(struct i2c_adapter *adapter, u8 adr, u8 *val)
{
struct i2c_msg msgs[1] = {{.addr = adr, .flags = I2C_M_RD,
@@ -54,15 +80,21 @@ static int i2c_read(struct i2c_adapter *adapter, u8 adr, u8 *val)
return (i2c_transfer(adapter, msgs, 1) == 1) ? 0 : -1;
}
-static int i2c_read_reg(struct i2c_adapter *adapter, u8 adr, u8 reg, u8 *val)
+static int i2c_read_regs(struct i2c_adapter *adapter,
+ u8 adr, u8 reg, u8 *val, u8 len)
{
struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
.buf = &reg, .len = 1 },
{.addr = adr, .flags = I2C_M_RD,
- .buf = val, .len = 1 } };
+ .buf = val, .len = len } };
return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1;
}
+static int i2c_read_reg(struct i2c_adapter *adapter, u8 adr, u8 reg, u8 *val)
+{
+ return i2c_read_regs(adapter, adr, reg, val, 1);
+}
+
static int i2c_read_reg16(struct i2c_adapter *adapter, u8 adr,
u16 reg, u8 *val)
{
@@ -74,6 +106,14 @@ static int i2c_read_reg16(struct i2c_adapter *adapter, u8 adr,
return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1;
}
+static int i2c_write_reg(struct i2c_adapter *adap, u8 adr,
+ u8 reg, u8 val)
+{
+ u8 msg[2] = {reg, val};
+
+ return i2c_write(adap, adr, msg, 2);
+}
+
static int ddb_i2c_cmd(struct ddb_i2c *i2c, u32 adr, u32 cmd)
{
struct ddb *dev = i2c->dev;
@@ -609,6 +649,151 @@ static int tuner_attach_tda18271(struct ddb_input *input)
/******************************************************************************/
/******************************************************************************/
+static struct stv0367_config ddb_stv0367_config[] = {
+ {
+ .demod_address = 0x1f,
+ .xtal = 27000000,
+ .if_khz = 0,
+ .if_iq_mode = FE_TER_NORMAL_IF_TUNER,
+ .ts_mode = STV0367_SERIAL_PUNCT_CLOCK,
+ .clk_pol = STV0367_CLOCKPOLARITY_DEFAULT,
+ }, {
+ .demod_address = 0x1e,
+ .xtal = 27000000,
+ .if_khz = 0,
+ .if_iq_mode = FE_TER_NORMAL_IF_TUNER,
+ .ts_mode = STV0367_SERIAL_PUNCT_CLOCK,
+ .clk_pol = STV0367_CLOCKPOLARITY_DEFAULT,
+ },
+};
+
+static int demod_attach_stv0367(struct ddb_input *input)
+{
+ struct i2c_adapter *i2c = &input->port->i2c->adap;
+
+ /* attach frontend */
+ input->fe = dvb_attach(stv0367ddb_attach,
+ &ddb_stv0367_config[(input->nr & 1)], i2c);
+
+ if (!input->fe) {
+ printk(KERN_ERR "stv0367ddb_attach failed (not found?)\n");
+ return -ENODEV;
+ }
+
+ input->fe->sec_priv = input;
+ input->gate_ctrl = input->fe->ops.i2c_gate_ctrl;
+ input->fe->ops.i2c_gate_ctrl = drxk_gate_ctrl;
+
+ return 0;
+}
+
+static int tuner_tda18212_ping(struct ddb_input *input, unsigned short adr)
+{
+ struct i2c_adapter *adapter = &input->port->i2c->adap;
+ u8 tda_id[2];
+ u8 subaddr = 0x00;
+
+ printk(KERN_DEBUG "stv0367-tda18212 tuner ping\n");
+ if (input->fe->ops.i2c_gate_ctrl)
+ input->fe->ops.i2c_gate_ctrl(input->fe, 1);
+
+ if (i2c_read_regs(adapter, adr, subaddr, tda_id, sizeof(tda_id)) < 0)
+ printk(KERN_DEBUG "tda18212 ping 1 fail\n");
+ if (i2c_read_regs(adapter, adr, subaddr, tda_id, sizeof(tda_id)) < 0)
+ printk(KERN_DEBUG "tda18212 ping 2 fail\n");
+
+ if (input->fe->ops.i2c_gate_ctrl)
+ input->fe->ops.i2c_gate_ctrl(input->fe, 0);
+
+ return 0;
+}
+
+static int demod_attach_cxd28xx(struct ddb_input *input, int par, int osc24)
+{
+ struct i2c_adapter *i2c = &input->port->i2c->adap;
+ struct cxd2841er_config cfg;
+
+ /* the cxd2841er driver expects 8bit/shifted I2C addresses */
+ cfg.i2c_addr = ((input->nr & 1) ? 0x6d : 0x6c) << 1;
+
+ cfg.xtal = osc24 ? SONY_XTAL_24000 : SONY_XTAL_20500;
+ cfg.flags = CXD2841ER_AUTO_IFHZ | CXD2841ER_EARLY_TUNE |
+ CXD2841ER_NO_WAIT_LOCK | CXD2841ER_NO_AGCNEG |
+ CXD2841ER_TSBITS;
+
+ if (!par)
+ cfg.flags |= CXD2841ER_TS_SERIAL;
+
+ /* attach frontend */
+ input->fe = dvb_attach(cxd2841er_attach_t_c, &cfg, i2c);
+
+ if (!input->fe) {
+ printk(KERN_ERR "No Sony CXD28xx found!\n");
+ return -ENODEV;
+ }
+
+ input->fe->sec_priv = input;
+ input->gate_ctrl = input->fe->ops.i2c_gate_ctrl;
+ input->fe->ops.i2c_gate_ctrl = drxk_gate_ctrl;
+
+ return 0;
+}
+
+static int tuner_attach_tda18212(struct ddb_input *input, u32 porttype)
+{
+ struct i2c_adapter *adapter = &input->port->i2c->adap;
+ struct i2c_client *client;
+ struct tda18212_config config = {
+ .fe = input->fe,
+ .if_dvbt_6 = 3550,
+ .if_dvbt_7 = 3700,
+ .if_dvbt_8 = 4150,
+ .if_dvbt2_6 = 3250,
+ .if_dvbt2_7 = 4000,
+ .if_dvbt2_8 = 4000,
+ .if_dvbc = 5000,
+ };
+ struct i2c_board_info board_info = {
+ .type = "tda18212",
+ .platform_data = &config,
+ };
+
+ if (input->nr & 1)
+ board_info.addr = 0x63;
+ else
+ board_info.addr = 0x60;
+
+ /* due to a hardware quirk with the I2C gate on the stv0367+tda18212
+ * combo, the tda18212 must be probed by reading it's id _twice_ when
+ * cold started, or it very likely will fail.
+ */
+ if (porttype == DDB_TUNER_DVBCT_ST)
+ tuner_tda18212_ping(input, board_info.addr);
+
+ request_module(board_info.type);
+
+ /* perform tuner init/attach */
+ client = i2c_new_device(adapter, &board_info);
+ if (client == NULL || client->dev.driver == NULL)
+ goto err;
+
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ goto err;
+ }
+
+ input->i2c_client[0] = client;
+
+ return 0;
+err:
+ printk(KERN_INFO "TDA18212 tuner not found. Device is not fully operational.\n");
+ return -ENODEV;
+}
+
+/******************************************************************************/
+/******************************************************************************/
+/******************************************************************************/
+
static struct stv090x_config stv0900 = {
.device = STV0900,
.demod_mode = STV090x_DUAL,
@@ -779,19 +964,28 @@ static void dvb_input_detach(struct ddb_input *input)
{
struct dvb_adapter *adap = &input->adap;
struct dvb_demux *dvbdemux = &input->demux;
+ struct i2c_client *client;
switch (input->attached) {
case 5:
- if (input->fe2)
+ client = input->i2c_client[0];
+ if (client) {
+ module_put(client->dev.driver->owner);
+ i2c_unregister_device(client);
+ }
+ if (input->fe2) {
dvb_unregister_frontend(input->fe2);
+ input->fe2 = NULL;
+ }
if (input->fe) {
dvb_unregister_frontend(input->fe);
dvb_frontend_detach(input->fe);
input->fe = NULL;
}
+ /* fall-through */
case 4:
dvb_net_release(&input->dvbnet);
-
+ /* fall-through */
case 3:
dvbdemux->dmx.close(&dvbdemux->dmx);
dvbdemux->dmx.remove_frontend(&dvbdemux->dmx,
@@ -799,10 +993,10 @@ static void dvb_input_detach(struct ddb_input *input)
dvbdemux->dmx.remove_frontend(&dvbdemux->dmx,
&input->mem_frontend);
dvb_dmxdev_release(&input->dmxdev);
-
+ /* fall-through */
case 2:
dvb_dmx_release(&input->demux);
-
+ /* fall-through */
case 1:
dvb_unregister_adapter(adap);
}
@@ -815,6 +1009,7 @@ static int dvb_input_attach(struct ddb_input *input)
struct ddb_port *port = input->port;
struct dvb_adapter *adap = &input->adap;
struct dvb_demux *dvbdemux = &input->demux;
+ int sony_osc24 = 0, sony_tspar = 0;
ret = dvb_register_adapter(adap, "DDBridge", THIS_MODULE,
&input->port->dev->pdev->dev,
@@ -882,7 +1077,56 @@ static int dvb_input_attach(struct ddb_input *input)
sizeof(struct dvb_tuner_ops));
}
break;
+ case DDB_TUNER_DVBCT_ST:
+ if (demod_attach_stv0367(input) < 0)
+ return -ENODEV;
+ if (tuner_attach_tda18212(input, port->type) < 0)
+ return -ENODEV;
+ if (input->fe) {
+ if (dvb_register_frontend(adap, input->fe) < 0)
+ return -ENODEV;
+ }
+ break;
+ case DDB_TUNER_DVBC2T2I_SONY_P:
+ case DDB_TUNER_DVBCT2_SONY_P:
+ case DDB_TUNER_DVBC2T2_SONY_P:
+ case DDB_TUNER_ISDBT_SONY_P:
+ if (port->type == DDB_TUNER_DVBC2T2I_SONY_P)
+ sony_osc24 = 1;
+ if (input->port->dev->info->ts_quirks & TS_QUIRK_ALT_OSC)
+ sony_osc24 = 0;
+ if (input->port->dev->info->ts_quirks & TS_QUIRK_SERIAL)
+ sony_tspar = 0;
+ else
+ sony_tspar = 1;
+
+ if (demod_attach_cxd28xx(input, sony_tspar, sony_osc24) < 0)
+ return -ENODEV;
+ if (tuner_attach_tda18212(input, port->type) < 0)
+ return -ENODEV;
+ if (input->fe) {
+ if (dvb_register_frontend(adap, input->fe) < 0)
+ return -ENODEV;
+ }
+ break;
+ case DDB_TUNER_XO2_DVBC2T2I_SONY:
+ case DDB_TUNER_XO2_DVBCT2_SONY:
+ case DDB_TUNER_XO2_DVBC2T2_SONY:
+ case DDB_TUNER_XO2_ISDBT_SONY:
+ if (port->type == DDB_TUNER_XO2_DVBC2T2I_SONY)
+ sony_osc24 = 1;
+
+ if (demod_attach_cxd28xx(input, 0, sony_osc24) < 0)
+ return -ENODEV;
+ if (tuner_attach_tda18212(input, port->type) < 0)
+ return -ENODEV;
+ if (input->fe) {
+ if (dvb_register_frontend(adap, input->fe) < 0)
+ return -ENODEV;
+ }
+ break;
}
+
input->attached = 5;
return 0;
}
@@ -1130,6 +1374,70 @@ static void ddb_ports_detach(struct ddb *dev)
/****************************************************************************/
/****************************************************************************/
+static int init_xo2(struct ddb_port *port)
+{
+ struct i2c_adapter *i2c = &port->i2c->adap;
+ u8 val, data[2];
+ int res;
+
+ res = i2c_read_regs(i2c, 0x10, 0x04, data, 2);
+ if (res < 0)
+ return res;
+
+ if (data[0] != 0x01) {
+ pr_info("Port %d: invalid XO2\n", port->nr);
+ return -1;
+ }
+
+ i2c_read_reg(i2c, 0x10, 0x08, &val);
+ if (val != 0) {
+ i2c_write_reg(i2c, 0x10, 0x08, 0x00);
+ msleep(100);
+ }
+ /* Enable tuner power, disable pll, reset demods */
+ i2c_write_reg(i2c, 0x10, 0x08, 0x04);
+ usleep_range(2000, 3000);
+ /* Release demod resets */
+ i2c_write_reg(i2c, 0x10, 0x08, 0x07);
+
+ /* speed: 0=55,1=75,2=90,3=104 MBit/s */
+ i2c_write_reg(i2c, 0x10, 0x09,
+ ((xo2_speed >= 0 && xo2_speed <= 3) ? xo2_speed : 2));
+
+ i2c_write_reg(i2c, 0x10, 0x0a, 0x01);
+ i2c_write_reg(i2c, 0x10, 0x0b, 0x01);
+
+ usleep_range(2000, 3000);
+ /* Start XO2 PLL */
+ i2c_write_reg(i2c, 0x10, 0x08, 0x87);
+
+ return 0;
+}
+
+static int port_has_xo2(struct ddb_port *port, u8 *type, u8 *id)
+{
+ u8 probe[1] = { 0x00 }, data[4];
+
+ *type = DDB_XO2_TYPE_NONE;
+
+ if (i2c_io(&port->i2c->adap, 0x10, probe, 1, data, 4))
+ return 0;
+ if (data[0] == 'D' && data[1] == 'F') {
+ *id = data[2];
+ *type = DDB_XO2_TYPE_DUOFLEX;
+ return 1;
+ }
+ if (data[0] == 'C' && data[1] == 'I') {
+ *id = data[2];
+ *type = DDB_XO2_TYPE_CI;
+ return 1;
+ }
+ return 0;
+}
+
+/****************************************************************************/
+/****************************************************************************/
+
static int port_has_ci(struct ddb_port *port)
{
u8 val;
@@ -1162,10 +1470,39 @@ static int port_has_drxks(struct ddb_port *port)
return 1;
}
+static int port_has_stv0367(struct ddb_port *port)
+{
+ u8 val;
+ if (i2c_read_reg16(&port->i2c->adap, 0x1e, 0xf000, &val) < 0)
+ return 0;
+ if (val != 0x60)
+ return 0;
+ if (i2c_read_reg16(&port->i2c->adap, 0x1f, 0xf000, &val) < 0)
+ return 0;
+ if (val != 0x60)
+ return 0;
+ return 1;
+}
+
+static int port_has_cxd28xx(struct ddb_port *port, u8 *id)
+{
+ struct i2c_adapter *i2c = &port->i2c->adap;
+ int status;
+
+ status = i2c_write_reg(&port->i2c->adap, 0x6e, 0, 0);
+ if (status)
+ return 0;
+ status = i2c_read_reg(i2c, 0x6e, 0xfd, id);
+ if (status)
+ return 0;
+ return 1;
+}
+
static void ddb_port_probe(struct ddb_port *port)
{
struct ddb *dev = port->dev;
char *modname = "NO MODULE";
+ u8 xo2_type, xo2_id, cxd_id;
port->class = DDB_PORT_NONE;
@@ -1173,6 +1510,85 @@ static void ddb_port_probe(struct ddb_port *port)
modname = "CI";
port->class = DDB_PORT_CI;
ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING);
+ } else if (port_has_xo2(port, &xo2_type, &xo2_id)) {
+ printk(KERN_INFO "Port %d (TAB %d): XO2 type: %d, id: %d\n",
+ port->nr, port->nr+1, xo2_type, xo2_id);
+
+ ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING);
+
+ switch (xo2_type) {
+ case DDB_XO2_TYPE_DUOFLEX:
+ init_xo2(port);
+ switch (xo2_id >> 2) {
+ case 0:
+ modname = "DUAL DVB-S2 (unsupported)";
+ port->class = DDB_PORT_NONE;
+ port->type = DDB_TUNER_XO2_DVBS_STV0910;
+ break;
+ case 1:
+ modname = "DUAL DVB-C/T/T2";
+ port->class = DDB_PORT_TUNER;
+ port->type = DDB_TUNER_XO2_DVBCT2_SONY;
+ break;
+ case 2:
+ modname = "DUAL DVB-ISDBT";
+ port->class = DDB_PORT_TUNER;
+ port->type = DDB_TUNER_XO2_ISDBT_SONY;
+ break;
+ case 3:
+ modname = "DUAL DVB-C/C2/T/T2";
+ port->class = DDB_PORT_TUNER;
+ port->type = DDB_TUNER_XO2_DVBC2T2_SONY;
+ break;
+ case 4:
+ modname = "DUAL ATSC (unsupported)";
+ port->class = DDB_PORT_NONE;
+ port->type = DDB_TUNER_XO2_ATSC_ST;
+ break;
+ case 5:
+ modname = "DUAL DVB-C/C2/T/T2/ISDBT";
+ port->class = DDB_PORT_TUNER;
+ port->type = DDB_TUNER_XO2_DVBC2T2I_SONY;
+ break;
+ default:
+ modname = "Unknown XO2 DuoFlex module\n";
+ break;
+ }
+ break;
+ case DDB_XO2_TYPE_CI:
+ printk(KERN_INFO "DuoFlex CI modules not supported\n");
+ break;
+ default:
+ printk(KERN_INFO "Unknown XO2 DuoFlex module\n");
+ break;
+ }
+ } else if (port_has_cxd28xx(port, &cxd_id)) {
+ switch (cxd_id) {
+ case 0xa4:
+ modname = "DUAL DVB-C2T2 CXD2843";
+ port->class = DDB_PORT_TUNER;
+ port->type = DDB_TUNER_DVBC2T2_SONY_P;
+ break;
+ case 0xb1:
+ modname = "DUAL DVB-CT2 CXD2837";
+ port->class = DDB_PORT_TUNER;
+ port->type = DDB_TUNER_DVBCT2_SONY_P;
+ break;
+ case 0xb0:
+ modname = "DUAL ISDB-T CXD2838";
+ port->class = DDB_PORT_TUNER;
+ port->type = DDB_TUNER_ISDBT_SONY_P;
+ break;
+ case 0xc1:
+ modname = "DUAL DVB-C2T2 ISDB-T CXD2854";
+ port->class = DDB_PORT_TUNER;
+ port->type = DDB_TUNER_DVBC2T2I_SONY_P;
+ break;
+ default:
+ modname = "Unknown CXD28xx tuner";
+ break;
+ }
+ ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING);
} else if (port_has_stv0900(port)) {
modname = "DUAL DVB-S2";
port->class = DDB_PORT_TUNER;
@@ -1188,7 +1604,13 @@ static void ddb_port_probe(struct ddb_port *port)
port->class = DDB_PORT_TUNER;
port->type = DDB_TUNER_DVBCT_TR;
ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING);
+ } else if (port_has_stv0367(port)) {
+ modname = "DUAL DVB-C/T";
+ port->class = DDB_PORT_TUNER;
+ port->type = DDB_TUNER_DVBCT_ST;
+ ddbwritel(I2C_SPEED_100, port->i2c->regs + I2C_TIMING);
}
+
printk(KERN_INFO "Port %d (TAB %d): %s\n",
port->nr, port->nr+1, modname);
}
@@ -1601,6 +2023,19 @@ static int ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ddbwritel(0xfff0f, INTERRUPT_ENABLE);
ddbwritel(0, MSI1_ENABLE);
+ /* board control */
+ if (dev->info->board_control) {
+ ddbwritel(0, DDB_LINK_TAG(0) | BOARD_CONTROL);
+ msleep(100);
+ ddbwritel(dev->info->board_control_2,
+ DDB_LINK_TAG(0) | BOARD_CONTROL);
+ usleep_range(2000, 3000);
+ ddbwritel(dev->info->board_control_2
+ | dev->info->board_control,
+ DDB_LINK_TAG(0) | BOARD_CONTROL);
+ usleep_range(2000, 3000);
+ }
+
if (ddb_i2c_init(dev) < 0)
goto fail1;
ddb_ports_init(dev);
@@ -1655,6 +2090,12 @@ static const struct ddb_info ddb_octopus_le = {
.port_num = 2,
};
+static const struct ddb_info ddb_octopus_oem = {
+ .type = DDB_OCTOPUS,
+ .name = "Digital Devices Octopus OEM",
+ .port_num = 4,
+};
+
static const struct ddb_info ddb_octopus_mini = {
.type = DDB_OCTOPUS,
.name = "Digital Devices Octopus Mini",
@@ -1678,6 +2119,14 @@ static const struct ddb_info ddb_dvbct = {
.port_num = 3,
};
+static const struct ddb_info ddb_ctv7 = {
+ .type = DDB_OCTOPUS,
+ .name = "Digital Devices Cine CT V7 DVB adapter",
+ .port_num = 4,
+ .board_control = 3,
+ .board_control_2 = 4,
+};
+
static const struct ddb_info ddb_satixS2v3 = {
.type = DDB_OCTOPUS,
.name = "Mystique SaTiX-S2 V3 DVB adapter",
@@ -1690,6 +2139,55 @@ static const struct ddb_info ddb_octopusv3 = {
.port_num = 4,
};
+/*** MaxA8 adapters ***********************************************************/
+
+static struct ddb_info ddb_ct2_8 = {
+ .type = DDB_OCTOPUS_MAX_CT,
+ .name = "Digital Devices MAX A8 CT2",
+ .port_num = 4,
+ .board_control = 0x0ff,
+ .board_control_2 = 0xf00,
+ .ts_quirks = TS_QUIRK_SERIAL,
+};
+
+static struct ddb_info ddb_c2t2_8 = {
+ .type = DDB_OCTOPUS_MAX_CT,
+ .name = "Digital Devices MAX A8 C2T2",
+ .port_num = 4,
+ .board_control = 0x0ff,
+ .board_control_2 = 0xf00,
+ .ts_quirks = TS_QUIRK_SERIAL,
+};
+
+static struct ddb_info ddb_isdbt_8 = {
+ .type = DDB_OCTOPUS_MAX_CT,
+ .name = "Digital Devices MAX A8 ISDBT",
+ .port_num = 4,
+ .board_control = 0x0ff,
+ .board_control_2 = 0xf00,
+ .ts_quirks = TS_QUIRK_SERIAL,
+};
+
+static struct ddb_info ddb_c2t2i_v0_8 = {
+ .type = DDB_OCTOPUS_MAX_CT,
+ .name = "Digital Devices MAX A8 C2T2I V0",
+ .port_num = 4,
+ .board_control = 0x0ff,
+ .board_control_2 = 0xf00,
+ .ts_quirks = TS_QUIRK_SERIAL | TS_QUIRK_ALT_OSC,
+};
+
+static struct ddb_info ddb_c2t2i_8 = {
+ .type = DDB_OCTOPUS_MAX_CT,
+ .name = "Digital Devices MAX A8 C2T2I",
+ .port_num = 4,
+ .board_control = 0x0ff,
+ .board_control_2 = 0xf00,
+ .ts_quirks = TS_QUIRK_SERIAL,
+};
+
+/******************************************************************************/
+
#define DDVID 0xdd01 /* Digital Devices Vendor ID */
#define DDB_ID(_vend, _dev, _subvend, _subdev, _driverdata) { \
@@ -1700,15 +2198,34 @@ static const struct ddb_info ddb_octopusv3 = {
static const struct pci_device_id ddb_id_tbl[] = {
DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
+ DDB_ID(DDVID, 0x0005, DDVID, 0x0004, ddb_octopusv3),
DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
+ DDB_ID(DDVID, 0x0003, DDVID, 0x0003, ddb_octopus_oem),
DDB_ID(DDVID, 0x0003, DDVID, 0x0010, ddb_octopus_mini),
+ DDB_ID(DDVID, 0x0005, DDVID, 0x0011, ddb_octopus_mini),
DDB_ID(DDVID, 0x0003, DDVID, 0x0020, ddb_v6),
DDB_ID(DDVID, 0x0003, DDVID, 0x0021, ddb_v6_5),
DDB_ID(DDVID, 0x0003, DDVID, 0x0030, ddb_dvbct),
DDB_ID(DDVID, 0x0003, DDVID, 0xdb03, ddb_satixS2v3),
- DDB_ID(DDVID, 0x0005, DDVID, 0x0004, ddb_octopusv3),
+ DDB_ID(DDVID, 0x0006, DDVID, 0x0031, ddb_ctv7),
+ DDB_ID(DDVID, 0x0006, DDVID, 0x0032, ddb_ctv7),
+ DDB_ID(DDVID, 0x0006, DDVID, 0x0033, ddb_ctv7),
+ DDB_ID(DDVID, 0x0008, DDVID, 0x0034, ddb_ct2_8),
+ DDB_ID(DDVID, 0x0008, DDVID, 0x0035, ddb_c2t2_8),
+ DDB_ID(DDVID, 0x0008, DDVID, 0x0036, ddb_isdbt_8),
+ DDB_ID(DDVID, 0x0008, DDVID, 0x0037, ddb_c2t2i_v0_8),
+ DDB_ID(DDVID, 0x0008, DDVID, 0x0038, ddb_c2t2i_8),
+ DDB_ID(DDVID, 0x0006, DDVID, 0x0039, ddb_ctv7),
/* in case sub-ids got deleted in flash */
DDB_ID(DDVID, 0x0003, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
+ DDB_ID(DDVID, 0x0005, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
+ DDB_ID(DDVID, 0x0006, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
+ DDB_ID(DDVID, 0x0007, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
+ DDB_ID(DDVID, 0x0008, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
+ DDB_ID(DDVID, 0x0011, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
+ DDB_ID(DDVID, 0x0013, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
+ DDB_ID(DDVID, 0x0201, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
+ DDB_ID(DDVID, 0x0320, PCI_ANY_ID, PCI_ANY_ID, ddb_none),
{0}
};
MODULE_DEVICE_TABLE(pci, ddb_id_tbl);
diff --git a/drivers/media/pci/ddbridge/ddbridge-regs.h b/drivers/media/pci/ddbridge/ddbridge-regs.h
index 6ae810324b4e..98cebb97d64f 100644
--- a/drivers/media/pci/ddbridge/ddbridge-regs.h
+++ b/drivers/media/pci/ddbridge/ddbridge-regs.h
@@ -34,6 +34,10 @@
/* ------------------------------------------------------------------------- */
+#define BOARD_CONTROL 0x30
+
+/* ------------------------------------------------------------------------- */
+
/* Interrupt controller */
/* How many MSI's are available depends on HW (Min 2 max 8) */
/* How many are usable also depends on Host platform */
diff --git a/drivers/media/pci/ddbridge/ddbridge.h b/drivers/media/pci/ddbridge/ddbridge.h
index 185b423818d3..4a0e3283d646 100644
--- a/drivers/media/pci/ddbridge/ddbridge.h
+++ b/drivers/media/pci/ddbridge/ddbridge.h
@@ -43,14 +43,29 @@
#define DDB_MAX_PORT 4
#define DDB_MAX_INPUT 8
#define DDB_MAX_OUTPUT 4
+#define DDB_MAX_LINK 4
+#define DDB_LINK_SHIFT 28
+
+#define DDB_LINK_TAG(_x) (_x << DDB_LINK_SHIFT)
+
+#define DDB_XO2_TYPE_NONE 0
+#define DDB_XO2_TYPE_DUOFLEX 1
+#define DDB_XO2_TYPE_CI 2
struct ddb_info {
int type;
-#define DDB_NONE 0
-#define DDB_OCTOPUS 1
+#define DDB_NONE 0
+#define DDB_OCTOPUS 1
+#define DDB_OCTOPUS_MAX_CT 6
char *name;
int port_num;
u32 port_type[DDB_MAX_PORT];
+ u32 board_control;
+ u32 board_control_2;
+ u8 ts_quirks;
+#define TS_QUIRK_SERIAL 1
+#define TS_QUIRK_REVERSED 2
+#define TS_QUIRK_ALT_OSC 8
};
/* DMA_SIZE MUST be divisible by 188 and 128 !!! */
@@ -86,6 +101,7 @@ struct ddb_input {
struct dvb_adapter adap;
struct dvb_device *dev;
+ struct i2c_client *i2c_client[1];
struct dvb_frontend *fe;
struct dvb_frontend *fe2;
struct dmxdev dmxdev;
@@ -138,11 +154,22 @@ struct ddb_port {
#define DDB_PORT_CI 1
#define DDB_PORT_TUNER 2
u32 type;
-#define DDB_TUNER_NONE 0
-#define DDB_TUNER_DVBS_ST 1
-#define DDB_TUNER_DVBS_ST_AA 2
-#define DDB_TUNER_DVBCT_TR 16
-#define DDB_TUNER_DVBCT_ST 17
+#define DDB_TUNER_NONE 0
+#define DDB_TUNER_DVBS_ST 1
+#define DDB_TUNER_DVBS_ST_AA 2
+#define DDB_TUNER_DVBCT2_SONY_P 7
+#define DDB_TUNER_DVBC2T2_SONY_P 8
+#define DDB_TUNER_ISDBT_SONY_P 9
+#define DDB_TUNER_DVBC2T2I_SONY_P 15
+#define DDB_TUNER_DVBCT_TR 16
+#define DDB_TUNER_DVBCT_ST 17
+#define DDB_TUNER_XO2_DVBS_STV0910 32
+#define DDB_TUNER_XO2_DVBCT2_SONY 33
+#define DDB_TUNER_XO2_ISDBT_SONY 34
+#define DDB_TUNER_XO2_DVBC2T2_SONY 35
+#define DDB_TUNER_XO2_ATSC_ST 36
+#define DDB_TUNER_XO2_DVBC2T2I_SONY 37
+
u32 adr;
struct ddb_input *input[2];
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
index 807ead20d212..417d03da01f0 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
@@ -262,14 +262,16 @@ static int snd_ivtv_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct snd_ivtv_card *itvsc = snd_pcm_substream_chip(substream);
unsigned long flags;
+ unsigned char *dma_area = NULL;
spin_lock_irqsave(&itvsc->slock, flags);
if (substream->runtime->dma_area) {
dprintk("freeing pcm capture region\n");
- vfree(substream->runtime->dma_area);
+ dma_area = substream->runtime->dma_area;
substream->runtime->dma_area = NULL;
}
spin_unlock_irqrestore(&itvsc->slock, flags);
+ vfree(dma_area);
return 0;
}
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index 9444483fb942..5c0a4e614413 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -122,7 +122,8 @@ static void netup_unidvb_queue_cleanup(struct netup_dma *dma);
static struct cxd2841er_config demod_config = {
.i2c_addr = 0xc8,
- .xtal = SONY_XTAL_24000
+ .xtal = SONY_XTAL_24000,
+ .flags = CXD2841ER_USE_GATECTRL | CXD2841ER_ASCOT
};
static struct horus3a_config horus3a_conf = {
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index f79380faf499..9965d3531c80 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -7806,7 +7806,7 @@ int saa7134_board_init2(struct saa7134_dev *dev)
dev->name, saa7134_boards[dev->board].name);
break;
}
- /* break intentionally omitted */
+ /* fall-through */
case SAA7134_BOARD_VIDEOMATE_DVBT_300:
case SAA7134_BOARD_ASUS_EUROPA2_HYBRID:
case SAA7134_BOARD_ASUS_EUROPA_HYBRID:
@@ -7864,7 +7864,7 @@ int saa7134_board_init2(struct saa7134_dev *dev)
break;
case SAA7134_BOARD_HAUPPAUGE_HVR1110:
hauppauge_eeprom(dev, dev->eedata+0x80);
- /* break intentionally omitted */
+ /* fall-through */
case SAA7134_BOARD_PINNACLE_PCTV_310i:
case SAA7134_BOARD_KWORLD_DVBT_210:
case SAA7134_BOARD_TEVION_DVBT_220RF:
diff --git a/drivers/media/pci/saa7164/saa7164-bus.c b/drivers/media/pci/saa7164/saa7164-bus.c
index b2ff82fa7116..ecfeac5cdbed 100644
--- a/drivers/media/pci/saa7164/saa7164-bus.c
+++ b/drivers/media/pci/saa7164/saa7164-bus.c
@@ -389,11 +389,11 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
msg_tmp.size = le16_to_cpu((__force __le16)msg_tmp.size);
msg_tmp.command = le32_to_cpu((__force __le32)msg_tmp.command);
msg_tmp.controlselector = le16_to_cpu((__force __le16)msg_tmp.controlselector);
+ memcpy(msg, &msg_tmp, sizeof(*msg));
/* No need to update the read positions, because this was a peek */
/* If the caller specifically want to peek, return */
if (peekonly) {
- memcpy(msg, &msg_tmp, sizeof(*msg));
goto peekout;
}
@@ -438,21 +438,15 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
space_rem = bus->m_dwSizeGetRing - curr_grp;
if (space_rem < sizeof(*msg)) {
- /* msg wraps around the ring */
- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, space_rem);
- memcpy_fromio((u8 *)msg + space_rem, bus->m_pdwGetRing,
- sizeof(*msg) - space_rem);
if (buf)
memcpy_fromio(buf, bus->m_pdwGetRing + sizeof(*msg) -
space_rem, buf_size);
} else if (space_rem == sizeof(*msg)) {
- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
if (buf)
memcpy_fromio(buf, bus->m_pdwGetRing, buf_size);
} else {
/* Additional data wraps around the ring */
- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
if (buf) {
memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp +
sizeof(*msg), space_rem - sizeof(*msg));
@@ -465,15 +459,10 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
} else {
/* No wrapping */
- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
if (buf)
memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg),
buf_size);
}
- /* Convert from little endian to CPU */
- msg->size = le16_to_cpu((__force __le16)msg->size);
- msg->command = le32_to_cpu((__force __le32)msg->command);
- msg->controlselector = le16_to_cpu((__force __le16)msg->controlselector);
/* Update the read positions, adjusting the ring */
saa7164_writel(bus->m_dwGetReadPos, new_grp);
diff --git a/drivers/media/pci/saa7164/saa7164-cmd.c b/drivers/media/pci/saa7164/saa7164-cmd.c
index 175015ca79f2..dfebd77ada59 100644
--- a/drivers/media/pci/saa7164/saa7164-cmd.c
+++ b/drivers/media/pci/saa7164/saa7164-cmd.c
@@ -506,6 +506,8 @@ int saa7164_cmd_send(struct saa7164_dev *dev, u8 id, enum tmComResCmd command,
dprintk(DBGLVL_CMD,
"%s() UNKNOWN OR INVALID CONTROL\n",
__func__);
+ ret = SAA_ERR_NOT_SUPPORTED;
+ break;
default:
dprintk(DBGLVL_CMD, "%s() UNKNOWN\n", __func__);
ret = SAA_ERR_NOT_SUPPORTED;
diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
index f50d07229236..ca0873e47bea 100644
--- a/drivers/media/pci/solo6x10/solo6x10-core.c
+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
@@ -511,6 +511,7 @@ static int solo_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
default:
dev_warn(&pdev->dev, "Invalid chip_id 0x%02x, assuming 4 ch\n",
chip_id);
+ /* fall through */
case 5:
solo_dev->nr_chans = 4;
solo_dev->nr_ext = 1;
diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
index 36e93540bb49..3ca947092775 100644
--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
+++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
@@ -223,9 +223,9 @@ static snd_pcm_uframes_t snd_solo_pcm_pointer(struct snd_pcm_substream *ss)
return idx * G723_FRAMES_PER_PAGE;
}
-static int snd_solo_pcm_copy(struct snd_pcm_substream *ss, int channel,
- snd_pcm_uframes_t pos, void __user *dst,
- snd_pcm_uframes_t count)
+static int __snd_solo_pcm_copy(struct snd_pcm_substream *ss,
+ unsigned long pos, void *dst,
+ unsigned long count, bool in_kernel)
{
struct solo_snd_pcm *solo_pcm = snd_pcm_substream_chip(ss);
struct solo_dev *solo_dev = solo_pcm->solo_dev;
@@ -242,16 +242,31 @@ static int snd_solo_pcm_copy(struct snd_pcm_substream *ss, int channel,
if (err)
return err;
- err = copy_to_user(dst + (i * G723_PERIOD_BYTES),
- solo_pcm->g723_buf, G723_PERIOD_BYTES);
-
- if (err)
+ if (in_kernel)
+ memcpy(dst, solo_pcm->g723_buf, G723_PERIOD_BYTES);
+ else if (copy_to_user((void __user *)dst,
+ solo_pcm->g723_buf, G723_PERIOD_BYTES))
return -EFAULT;
+ dst += G723_PERIOD_BYTES;
}
return 0;
}
+static int snd_solo_pcm_copy_user(struct snd_pcm_substream *ss, int channel,
+ unsigned long pos, void __user *dst,
+ unsigned long count)
+{
+ return __snd_solo_pcm_copy(ss, pos, (void *)dst, count, false);
+}
+
+static int snd_solo_pcm_copy_kernel(struct snd_pcm_substream *ss, int channel,
+ unsigned long pos, void *dst,
+ unsigned long count)
+{
+ return __snd_solo_pcm_copy(ss, pos, dst, count, true);
+}
+
static const struct snd_pcm_ops snd_solo_pcm_ops = {
.open = snd_solo_pcm_open,
.close = snd_solo_pcm_close,
@@ -261,7 +276,8 @@ static const struct snd_pcm_ops snd_solo_pcm_ops = {
.prepare = snd_solo_pcm_prepare,
.trigger = snd_solo_pcm_trigger,
.pointer = snd_solo_pcm_pointer,
- .copy = snd_solo_pcm_copy,
+ .copy_user = snd_solo_pcm_copy_user,
+ .copy_kernel = snd_solo_pcm_copy_kernel,
};
static int snd_solo_capture_volume_info(struct snd_kcontrol *kcontrol,
diff --git a/drivers/media/pci/solo6x10/solo6x10-i2c.c b/drivers/media/pci/solo6x10/solo6x10-i2c.c
index e83bb79f9349..89f2f2a493c2 100644
--- a/drivers/media/pci/solo6x10/solo6x10-i2c.c
+++ b/drivers/media/pci/solo6x10/solo6x10-i2c.c
@@ -192,6 +192,7 @@ int solo_i2c_isr(struct solo_dev *solo_dev)
}
solo_dev->i2c_state = IIC_STATE_WRITE;
+ /* fall through */
case IIC_STATE_WRITE:
ret = solo_i2c_handle_write(solo_dev);
break;
diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
index df9395c87178..f2905bd80366 100644
--- a/drivers/media/pci/ttpci/av7110.c
+++ b/drivers/media/pci/ttpci/av7110.c
@@ -336,6 +336,7 @@ static int DvbDmxFilterCallback(u8 *buffer1, size_t buffer1_len,
av7110_p2t_write(buffer1, buffer1_len,
dvbdmxfilter->feed->pid,
&av7110->p2t_filter[dvbdmxfilter->index]);
+ return 0;
default:
return 0;
}
@@ -451,8 +452,12 @@ static void debiirq(unsigned long cookie)
case DATA_CI_PUT:
dprintk(4, "debi DATA_CI_PUT\n");
+ xfer = TX_BUFF;
+ break;
case DATA_MPEG_PLAY:
dprintk(4, "debi DATA_MPEG_PLAY\n");
+ xfer = TX_BUFF;
+ break;
case DATA_BMP_LOAD:
dprintk(4, "debi DATA_BMP_LOAD\n");
xfer = TX_BUFF;
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index 180f3d7af3e1..a11cb501c550 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -534,6 +534,7 @@ static int zoran_v4l_queue_frame(struct zoran_fh *fh, int num)
KERN_WARNING
"%s: %s - queueing buffer %d in state DONE!?\n",
ZR_DEVNAME(zr), __func__, num);
+ /* fall through */
case BUZ_STATE_USER:
/* since there is at least one unused buffer there's room for at least
* one more pend[] entry */
@@ -693,6 +694,7 @@ static int zoran_jpg_queue_frame(struct zoran_fh *fh, int num,
KERN_WARNING
"%s: %s - queing frame in BUZ_STATE_DONE state!?\n",
ZR_DEVNAME(zr), __func__);
+ /* fall through */
case BUZ_STATE_USER:
/* since there is at least one unused buffer there's room for at
*least one more pend[] entry */
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 041cb80a26b1..1313cd533436 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -74,6 +74,13 @@ config VIDEO_M32R_AR_M64278
To compile this driver as a module, choose M here: the
module will be called arv.
+config VIDEO_MUX
+ tristate "Video Multiplexer"
+ depends on OF && VIDEO_V4L2_SUBDEV_API && MEDIA_CONTROLLER
+ select REGMAP
+ help
+ This driver provides support for N:1 video bus multiplexers.
+
config VIDEO_OMAP3
tristate "OMAP 3 Camera support"
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API && ARCH_OMAP3
@@ -82,6 +89,7 @@ config VIDEO_OMAP3
select ARM_DMA_USE_IOMMU
select VIDEOBUF2_DMA_CONTIG
select MFD_SYSCON
+ select V4L2_FWNODE
---help---
Driver for an OMAP 3 camera controller.
@@ -97,6 +105,7 @@ config VIDEO_PXA27x
depends on PXA27x || COMPILE_TEST
select VIDEOBUF2_DMA_SG
select SG_SPLIT
+ select V4L2_FWNODE
---help---
This is a v4l2 driver for the PXA27x Quick Capture Interface
@@ -114,6 +123,19 @@ config VIDEO_S3C_CAMIF
To compile this driver as a module, choose M here: the module
will be called s3c-camif.
+config VIDEO_STM32_DCMI
+ tristate "STM32 Digital Camera Memory Interface (DCMI) support"
+ depends on VIDEO_V4L2 && OF && HAS_DMA
+ depends on ARCH_STM32 || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
+ ---help---
+ This module makes the STM32 Digital Camera Memory Interface (DCMI)
+ available as a v4l2 device.
+
+ To compile this driver as a module, choose M here: the module
+ will be called stm32-dcmi.
+
source "drivers/media/platform/soc_camera/Kconfig"
source "drivers/media/platform/exynos4-is/Kconfig"
source "drivers/media/platform/am437x/Kconfig"
@@ -127,6 +149,7 @@ config VIDEO_TI_CAL
depends on SOC_DRA7XX || COMPILE_TEST
depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
default n
---help---
Support for the TI CAL (Camera Adaptation Layer) block
@@ -448,6 +471,20 @@ config VIDEO_TI_VPE_DEBUG
---help---
Enable debug messages on VPE driver.
+config VIDEO_QCOM_VENUS
+ tristate "Qualcomm Venus V4L2 encoder/decoder driver"
+ depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
+ depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
+ select QCOM_MDT_LOADER if (ARM || ARM64)
+ select QCOM_SCM if (ARM || ARM64)
+ select VIDEOBUF2_DMA_SG
+ select V4L2_MEM2MEM_DEV
+ ---help---
+ This is a V4L2 driver for Qualcomm Venus video accelerator
+ hardware. It accelerates encoding and decoding operations
+ on various Qualcomm SoCs.
+ To compile this driver as a module choose m here.
+
endif # V4L_MEM2MEM_DRIVERS
# TI VIDEO PORT Helper Modules
@@ -521,4 +558,41 @@ config VIDEO_STI_HDMI_CEC
CEC bus is present in the HDMI connector and enables communication
between compatible devices.
+config VIDEO_STM32_HDMI_CEC
+ tristate "STMicroelectronics STM32 HDMI CEC driver"
+ depends on ARCH_STM32 || COMPILE_TEST
+ select REGMAP
+ select REGMAP_MMIO
+ select CEC_CORE
+ ---help---
+ This is a driver for STM32 interface. It uses the
+ generic CEC framework interface.
+ CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
+
endif #CEC_PLATFORM_DRIVERS
+
+menuconfig SDR_PLATFORM_DRIVERS
+ bool "SDR platform devices"
+ depends on MEDIA_SDR_SUPPORT
+ default n
+ ---help---
+ Say Y here to enable support for platform-specific SDR Drivers.
+
+if SDR_PLATFORM_DRIVERS
+
+config VIDEO_RCAR_DRIF
+ tristate "Renesas Digitial Radio Interface (DRIF)"
+ depends on VIDEO_V4L2 && HAS_DMA
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select VIDEOBUF2_VMALLOC
+ ---help---
+ Say Y if you want to enable R-Car Gen3 DRIF support. DRIF is Digital
+ Radio Interface that interfaces with an RF front end chip. It is a
+ receiver of digital data which uses DMA to transfer received data to
+ a configured location for an application to use.
+
+ To compile this driver as a module, choose M here; the module
+ will be called rcar_drif.
+
+endif # SDR_PLATFORM_DRIVERS
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 63303d63c64c..9beadc760467 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -28,6 +28,8 @@ obj-$(CONFIG_VIDEO_SH_VEU) += sh_veu.o
obj-$(CONFIG_VIDEO_MEM2MEM_DEINTERLACE) += m2m-deinterlace.o
+obj-$(CONFIG_VIDEO_MUX) += video-mux.o
+
obj-$(CONFIG_VIDEO_S3C_CAMIF) += s3c-camif/
obj-$(CONFIG_VIDEO_SAMSUNG_EXYNOS4_IS) += exynos4-is/
obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg/
@@ -44,14 +46,17 @@ obj-$(CONFIG_VIDEO_STI_HDMI_CEC) += sti/cec/
obj-$(CONFIG_VIDEO_STI_DELTA) += sti/delta/
-obj-$(CONFIG_BLACKFIN) += blackfin/
+obj-y += stm32/
+
+obj-y += blackfin/
-obj-$(CONFIG_ARCH_DAVINCI) += davinci/
+obj-y += davinci/
obj-$(CONFIG_VIDEO_SH_VOU) += sh_vou.o
obj-$(CONFIG_SOC_CAMERA) += soc_camera/
+obj-$(CONFIG_VIDEO_RCAR_DRIF) += rcar_drif.o
obj-$(CONFIG_VIDEO_RENESAS_FCP) += rcar-fcp.o
obj-$(CONFIG_VIDEO_RENESAS_FDP1) += rcar_fdp1.o
obj-$(CONFIG_VIDEO_RENESAS_JPU) += rcar_jpu.o
@@ -68,6 +73,8 @@ obj-$(CONFIG_VIDEO_RCAR_VIN) += rcar-vin/
obj-$(CONFIG_VIDEO_ATMEL_ISC) += atmel/
obj-$(CONFIG_VIDEO_ATMEL_ISI) += atmel/
+obj-$(CONFIG_VIDEO_STM32_DCMI) += stm32/
+
ccflags-y += -I$(srctree)/drivers/media/i2c
obj-$(CONFIG_VIDEO_MEDIATEK_VPU) += mtk-vpu/
@@ -77,3 +84,5 @@ obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec/
obj-$(CONFIG_VIDEO_MEDIATEK_MDP) += mtk-mdp/
obj-$(CONFIG_VIDEO_MEDIATEK_JPEG) += mtk-jpeg/
+
+obj-$(CONFIG_VIDEO_QCOM_VENUS) += qcom/venus/
diff --git a/drivers/media/platform/am437x/Kconfig b/drivers/media/platform/am437x/Kconfig
index 42d9c186710a..160e77e9a0fb 100644
--- a/drivers/media/platform/am437x/Kconfig
+++ b/drivers/media/platform/am437x/Kconfig
@@ -3,6 +3,7 @@ config VIDEO_AM437X_VPFE
depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA
depends on SOC_AM43XX || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
help
Support for AM437x Video Processing Front End based Video
Capture Driver.
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index 05489a401c5c..466aba8b0e00 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -26,6 +26,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of_graph.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -36,7 +37,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
-#include <media/v4l2-of.h>
+#include <media/v4l2-fwnode.h>
#include "am437x-vpfe.h"
@@ -2303,7 +2304,8 @@ vpfe_async_bound(struct v4l2_async_notifier *notifier,
vpfe_dbg(1, vpfe, "vpfe_async_bound\n");
for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
- if (vpfe->cfg->asd[i]->match.of.node == asd[i].match.of.node) {
+ if (vpfe->cfg->asd[i]->match.fwnode.fwnode ==
+ asd[i].match.fwnode.fwnode) {
sdinfo = &vpfe->cfg->sub_devs[i];
vpfe->sd[i] = subdev;
vpfe->sd[i]->grp_id = sdinfo->grp_id;
@@ -2419,7 +2421,7 @@ static struct vpfe_config *
vpfe_get_pdata(struct platform_device *pdev)
{
struct device_node *endpoint = NULL;
- struct v4l2_of_endpoint bus_cfg;
+ struct v4l2_fwnode_endpoint bus_cfg;
struct vpfe_subdev_info *sdinfo;
struct vpfe_config *pdata;
unsigned int flags;
@@ -2463,7 +2465,8 @@ vpfe_get_pdata(struct platform_device *pdev)
sdinfo->vpfe_param.if_type = VPFE_RAW_BAYER;
}
- err = v4l2_of_parse_endpoint(endpoint, &bus_cfg);
+ err = v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint),
+ &bus_cfg);
if (err) {
dev_err(&pdev->dev, "Could not parse the endpoint\n");
goto done;
@@ -2501,8 +2504,8 @@ vpfe_get_pdata(struct platform_device *pdev)
goto done;
}
- pdata->asd[i]->match_type = V4L2_ASYNC_MATCH_OF;
- pdata->asd[i]->match.of.node = rem;
+ pdata->asd[i]->match_type = V4L2_ASYNC_MATCH_FWNODE;
+ pdata->asd[i]->match.fwnode.fwnode = of_fwnode_handle(rem);
of_node_put(rem);
}
diff --git a/drivers/media/platform/atmel/Kconfig b/drivers/media/platform/atmel/Kconfig
index 9bd0f19b127f..55de751e5f51 100644
--- a/drivers/media/platform/atmel/Kconfig
+++ b/drivers/media/platform/atmel/Kconfig
@@ -4,6 +4,7 @@ config VIDEO_ATMEL_ISC
depends on ARCH_AT91 || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select REGMAP_MMIO
+ select V4L2_FWNODE
help
This module makes the ATMEL Image Sensor Controller available
as a v4l2 device.
@@ -13,6 +14,7 @@ config VIDEO_ATMEL_ISI
depends on VIDEO_V4L2 && OF && HAS_DMA
depends on ARCH_AT91 || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
---help---
This module makes the ATMEL Image Sensor Interface available
as a v4l2 device.
diff --git a/drivers/media/platform/atmel/atmel-isc.c b/drivers/media/platform/atmel/atmel-isc.c
index c4b2115559a5..d6534252cdcd 100644
--- a/drivers/media/platform/atmel/atmel-isc.c
+++ b/drivers/media/platform/atmel/atmel-isc.c
@@ -32,6 +32,7 @@
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -42,7 +43,7 @@
#include <media/v4l2-event.h>
#include <media/v4l2-image-sizes.h>
#include <media/v4l2-ioctl.h>
-#include <media/v4l2-of.h>
+#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
#include <media/videobuf2-dma-contig.h>
@@ -239,13 +240,11 @@ static struct isc_format isc_formats[] = {
{ V4L2_PIX_FMT_YUV420, 0x0, 12,
ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_YYCC,
- ISC_DCFG_IMODE_YC420P | ISC_DCFG_YMBSIZE_BEATS8 |
- ISC_DCFG_CMBSIZE_BEATS8, ISC_DCTRL_DVIEW_PLANAR, 0x7fb,
+ ISC_DCFG_IMODE_YC420P, ISC_DCTRL_DVIEW_PLANAR, 0x7fb,
false, false },
{ V4L2_PIX_FMT_YUV422P, 0x0, 16,
ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_YYCC,
- ISC_DCFG_IMODE_YC422P | ISC_DCFG_YMBSIZE_BEATS8 |
- ISC_DCFG_CMBSIZE_BEATS8, ISC_DCTRL_DVIEW_PLANAR, 0x3fb,
+ ISC_DCFG_IMODE_YC422P, ISC_DCTRL_DVIEW_PLANAR, 0x3fb,
false, false },
{ V4L2_PIX_FMT_RGB565, MEDIA_BUS_FMT_RGB565_2X8_LE, 16,
ISC_PFE_CFG0_BPS_EIGHT, ISC_BAY_CFG_BGBG, ISC_RLP_CFG_MODE_RGB565,
@@ -700,8 +699,10 @@ static void isc_set_histogram(struct isc_device *isc)
}
static inline void isc_get_param(const struct isc_format *fmt,
- u32 *rlp_mode, u32 *dcfg_imode)
+ u32 *rlp_mode, u32 *dcfg)
{
+ *dcfg = ISC_DCFG_YMBSIZE_BEATS8;
+
switch (fmt->fourcc) {
case V4L2_PIX_FMT_SBGGR10:
case V4L2_PIX_FMT_SGBRG10:
@@ -712,11 +713,11 @@ static inline void isc_get_param(const struct isc_format *fmt,
case V4L2_PIX_FMT_SGRBG12:
case V4L2_PIX_FMT_SRGGB12:
*rlp_mode = fmt->reg_rlp_mode;
- *dcfg_imode = fmt->reg_dcfg_imode;
+ *dcfg |= fmt->reg_dcfg_imode;
break;
default:
*rlp_mode = ISC_RLP_CFG_MODE_DAT8;
- *dcfg_imode = ISC_DCFG_IMODE_PACKED8;
+ *dcfg |= ISC_DCFG_IMODE_PACKED8;
break;
}
}
@@ -726,18 +727,19 @@ static int isc_configure(struct isc_device *isc)
struct regmap *regmap = isc->regmap;
const struct isc_format *current_fmt = isc->current_fmt;
struct isc_subdev_entity *subdev = isc->current_subdev;
- u32 pfe_cfg0, rlp_mode, dcfg_imode, mask, pipeline;
+ u32 pfe_cfg0, rlp_mode, dcfg, mask, pipeline;
if (sensor_is_preferred(current_fmt)) {
pfe_cfg0 = current_fmt->reg_bps;
pipeline = 0x0;
- isc_get_param(current_fmt, &rlp_mode, &dcfg_imode);
+ isc_get_param(current_fmt, &rlp_mode, &dcfg);
isc->ctrls.hist_stat = HIST_INIT;
} else {
pfe_cfg0 = isc->raw_fmt->reg_bps;
pipeline = current_fmt->pipeline;
rlp_mode = current_fmt->reg_rlp_mode;
- dcfg_imode = current_fmt->reg_dcfg_imode;
+ dcfg = current_fmt->reg_dcfg_imode | ISC_DCFG_YMBSIZE_BEATS8 |
+ ISC_DCFG_CMBSIZE_BEATS8;
}
pfe_cfg0 |= subdev->pfe_cfg0 | ISC_PFE_CFG0_MODE_PROGRESSIVE;
@@ -750,7 +752,7 @@ static int isc_configure(struct isc_device *isc)
regmap_update_bits(regmap, ISC_RLP_CFG, ISC_RLP_CFG_MODE_MASK,
rlp_mode);
- regmap_update_bits(regmap, ISC_DCFG, ISC_DCFG_IMODE_MASK, dcfg_imode);
+ regmap_write(regmap, ISC_DCFG, dcfg);
/* Set the pipeline */
isc_set_pipeline(isc, pipeline);
@@ -1684,7 +1686,7 @@ static int isc_parse_dt(struct device *dev, struct isc_device *isc)
{
struct device_node *np = dev->of_node;
struct device_node *epn = NULL, *rem;
- struct v4l2_of_endpoint v4l2_epn;
+ struct v4l2_fwnode_endpoint v4l2_epn;
struct isc_subdev_entity *subdev_entity;
unsigned int flags;
int ret;
@@ -1703,7 +1705,8 @@ static int isc_parse_dt(struct device *dev, struct isc_device *isc)
continue;
}
- ret = v4l2_of_parse_endpoint(epn, &v4l2_epn);
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(epn),
+ &v4l2_epn);
if (ret) {
of_node_put(rem);
ret = -EINVAL;
@@ -1738,8 +1741,9 @@ static int isc_parse_dt(struct device *dev, struct isc_device *isc)
if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_PPOL_LOW;
- subdev_entity->asd->match_type = V4L2_ASYNC_MATCH_OF;
- subdev_entity->asd->match.of.node = rem;
+ subdev_entity->asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
+ subdev_entity->asd->match.fwnode.fwnode =
+ of_fwnode_handle(rem);
list_add_tail(&subdev_entity->list, &isc->subdev_entities);
}
diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
index e4867f84514c..891fa2505efa 100644
--- a/drivers/media/platform/atmel/atmel-isi.c
+++ b/drivers/media/platform/atmel/atmel-isi.c
@@ -19,6 +19,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
@@ -30,14 +31,14 @@
#include <media/v4l2-dev.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
-#include <media/v4l2-of.h>
+#include <media/v4l2-fwnode.h>
#include <media/videobuf2-dma-contig.h>
#include <media/v4l2-image-sizes.h>
#include "atmel-isi.h"
-#define MAX_SUPPORT_WIDTH 2048
-#define MAX_SUPPORT_HEIGHT 2048
+#define MAX_SUPPORT_WIDTH 2048U
+#define MAX_SUPPORT_HEIGHT 2048U
#define MIN_FRAME_RATE 15
#define FRAME_INTERVAL_MILLI_SEC (1000 / MIN_FRAME_RATE)
@@ -424,6 +425,8 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
struct frame_buffer *buf, *node;
int ret;
+ pm_runtime_get_sync(isi->dev);
+
/* Enable stream on the sub device */
ret = v4l2_subdev_call(isi->entity.subdev, video, s_stream, 1);
if (ret && ret != -ENOIOCTLCMD) {
@@ -431,8 +434,6 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
goto err_start_stream;
}
- pm_runtime_get_sync(isi->dev);
-
/* Reset ISI */
ret = atmel_isi_wait_status(isi, WAIT_ISI_RESET);
if (ret < 0) {
@@ -455,10 +456,11 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
return 0;
err_reset:
- pm_runtime_put(isi->dev);
v4l2_subdev_call(isi->entity.subdev, video, s_stream, 0);
err_start_stream:
+ pm_runtime_put(isi->dev);
+
spin_lock_irq(&isi->irqlock);
isi->active = NULL;
/* Release all active buffers */
@@ -566,20 +568,15 @@ static int isi_try_fmt(struct atmel_isi *isi, struct v4l2_format *f,
};
int ret;
- if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
isi_fmt = find_format_by_fourcc(isi, pixfmt->pixelformat);
if (!isi_fmt) {
isi_fmt = isi->user_formats[isi->num_user_formats - 1];
pixfmt->pixelformat = isi_fmt->fourcc;
}
- /* Limit to Atmel ISC hardware capabilities */
- if (pixfmt->width > MAX_SUPPORT_WIDTH)
- pixfmt->width = MAX_SUPPORT_WIDTH;
- if (pixfmt->height > MAX_SUPPORT_HEIGHT)
- pixfmt->height = MAX_SUPPORT_HEIGHT;
+ /* Limit to Atmel ISI hardware capabilities */
+ pixfmt->width = clamp(pixfmt->width, 0U, MAX_SUPPORT_WIDTH);
+ pixfmt->height = clamp(pixfmt->height, 0U, MAX_SUPPORT_HEIGHT);
v4l2_fill_mbus_format(&format.format, pixfmt, isi_fmt->mbus_code);
ret = v4l2_subdev_call(isi->entity.subdev, pad, set_fmt,
@@ -801,7 +798,7 @@ static int atmel_isi_parse_dt(struct atmel_isi *isi,
struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct v4l2_of_endpoint ep;
+ struct v4l2_fwnode_endpoint ep;
int err;
/* Default settings for ISI */
@@ -814,7 +811,7 @@ static int atmel_isi_parse_dt(struct atmel_isi *isi,
return -EINVAL;
}
- err = v4l2_of_parse_endpoint(np, &ep);
+ err = v4l2_fwnode_endpoint_parse(of_fwnode_handle(np), &ep);
of_node_put(np);
if (err) {
dev_err(&pdev->dev, "Could not parse the endpoint\n");
@@ -1058,7 +1055,7 @@ static int isi_graph_notify_complete(struct v4l2_async_notifier *notifier)
struct atmel_isi *isi = notifier_to_isi(notifier);
int ret;
- isi->vdev->ctrl_handler = isi->entity.subdev->ctrl_handler;
+ isi->vdev->ctrl_handler = isi->entity.subdev->ctrl_handler;
ret = isi_formats_init(isi);
if (ret) {
dev_err(isi->dev, "No supported mediabus format found\n");
@@ -1126,8 +1123,8 @@ static int isi_graph_parse(struct atmel_isi *isi, struct device_node *node)
/* Remote node to connect */
isi->entity.node = remote;
- isi->entity.asd.match_type = V4L2_ASYNC_MATCH_OF;
- isi->entity.asd.match.of.node = remote;
+ isi->entity.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ isi->entity.asd.match.fwnode.fwnode = of_fwnode_handle(remote);
return 0;
}
}
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index 403214e00e95..25cbf9e5ac5a 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -427,14 +427,16 @@ static int coda_alloc_framebuffers(struct coda_ctx *ctx,
/* Register frame buffers in the parameter buffer */
for (i = 0; i < ctx->num_internal_frames; i++) {
- u32 y, cb, cr;
+ u32 y, cb, cr, mvcol;
/* Start addresses of Y, Cb, Cr planes */
y = ctx->internal_frames[i].paddr;
cb = y + ysize;
cr = y + ysize + ysize/4;
+ mvcol = y + ysize + ysize/4 + ysize/4;
if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP) {
cb = round_up(cb, 4096);
+ mvcol = cb + ysize/2;
cr = 0;
/* Packed 20-bit MSB of base addresses */
/* YYYYYCCC, CCyyyyyc, cccc.... */
@@ -448,9 +450,7 @@ static int coda_alloc_framebuffers(struct coda_ctx *ctx,
/* mvcol buffer for h.264 */
if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 &&
dev->devtype->product != CODA_DX6)
- coda_parabuf_write(ctx, 96 + i,
- ctx->internal_frames[i].paddr +
- ysize + ysize/4 + ysize/4);
+ coda_parabuf_write(ctx, 96 + i, mvcol);
}
/* mvcol buffer for mpeg4 */
@@ -1247,12 +1247,18 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
dst_buf->sequence = ctx->osequence;
ctx->osequence++;
+ force_ipicture = ctx->params.force_ipicture;
+ if (force_ipicture)
+ ctx->params.force_ipicture = false;
+ else if ((src_buf->sequence % ctx->params.gop_size) == 0)
+ force_ipicture = 1;
+
/*
* Workaround coda firmware BUG that only marks the first
* frame as IDR. This is a problem for some decoders that can't
* recover when a frame is lost.
*/
- if (src_buf->sequence % ctx->params.gop_size) {
+ if (!force_ipicture) {
src_buf->flags |= V4L2_BUF_FLAG_PFRAME;
src_buf->flags &= ~V4L2_BUF_FLAG_KEYFRAME;
} else {
@@ -1264,10 +1270,10 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
coda_set_gdi_regs(ctx);
/*
- * Copy headers at the beginning of the first frame for H.264 only.
- * In MPEG4 they are already copied by the coda.
+ * Copy headers in front of the first frame and forced I frames for
+ * H.264 only. In MPEG4 they are already copied by the CODA.
*/
- if (src_buf->sequence == 0) {
+ if (src_buf->sequence == 0 || force_ipicture) {
pic_stream_buffer_addr =
vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0) +
ctx->vpu_header_size[0] +
@@ -1291,8 +1297,7 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
pic_stream_buffer_size = q_data_dst->sizeimage;
}
- if (src_buf->flags & V4L2_BUF_FLAG_KEYFRAME) {
- force_ipicture = 1;
+ if (force_ipicture) {
switch (dst_fourcc) {
case V4L2_PIX_FMT_H264:
quant_param = ctx->params.h264_intra_qp;
@@ -1309,7 +1314,6 @@ static int coda_prepare_encode(struct coda_ctx *ctx)
break;
}
} else {
- force_ipicture = 0;
switch (dst_fourcc) {
case V4L2_PIX_FMT_H264:
quant_param = ctx->params.h264_inter_qp;
@@ -1382,7 +1386,8 @@ static void coda_finish_encode(struct coda_ctx *ctx)
wr_ptr = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
/* Calculate bytesused field */
- if (dst_buf->sequence == 0) {
+ if (dst_buf->sequence == 0 ||
+ src_buf->flags & V4L2_BUF_FLAG_KEYFRAME) {
vb2_set_plane_payload(&dst_buf->vb2_buf, 0, wr_ptr - start_ptr +
ctx->vpu_header_size[0] +
ctx->vpu_header_size[1] +
@@ -2193,12 +2198,32 @@ static void coda_finish_decode(struct coda_ctx *ctx)
ctx->display_idx = display_idx;
}
+static void coda_error_decode(struct coda_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *dst_buf;
+
+ /*
+ * For now this only handles the case where we would deadlock with
+ * userspace, i.e. userspace issued DEC_CMD_STOP and waits for EOS,
+ * but after a failed decode run we would hold the context and wait for
+ * userspace to queue more buffers.
+ */
+ if (!(ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG))
+ return;
+
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ dst_buf->sequence = ctx->qsequence - 1;
+
+ coda_m2m_buf_done(ctx, dst_buf, VB2_BUF_STATE_ERROR);
+}
+
const struct coda_context_ops coda_bit_decode_ops = {
.queue_init = coda_decoder_queue_init,
.reqbufs = coda_decoder_reqbufs,
.start_streaming = coda_start_decoding,
.prepare_run = coda_prepare_decode,
.finish_run = coda_finish_decode,
+ .error_run = coda_error_decode,
.seq_end_work = coda_seq_end_work,
.release = coda_bit_release,
};
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index d523e990d509..f92cc7df58fb 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -430,10 +430,10 @@ static int coda_g_fmt(struct file *file, void *priv,
f->fmt.pix.bytesperline = q_data->bytesperline;
f->fmt.pix.sizeimage = q_data->sizeimage;
- if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG)
- f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
- else
- f->fmt.pix.colorspace = ctx->colorspace;
+ f->fmt.pix.colorspace = ctx->colorspace;
+ f->fmt.pix.xfer_func = ctx->xfer_func;
+ f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
+ f->fmt.pix.quantization = ctx->quantization;
return 0;
}
@@ -599,6 +599,9 @@ static int coda_try_fmt_vid_cap(struct file *file, void *priv,
}
f->fmt.pix.colorspace = ctx->colorspace;
+ f->fmt.pix.xfer_func = ctx->xfer_func;
+ f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
+ f->fmt.pix.quantization = ctx->quantization;
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
codec = coda_find_codec(ctx->dev, q_data_src->fourcc,
@@ -612,7 +615,6 @@ static int coda_try_fmt_vid_cap(struct file *file, void *priv,
/* The h.264 decoder only returns complete 16x16 macroblocks */
if (codec && codec->src_fourcc == V4L2_PIX_FMT_H264) {
- f->fmt.pix.width = f->fmt.pix.width;
f->fmt.pix.height = round_up(f->fmt.pix.height, 16);
f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16);
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
@@ -635,6 +637,23 @@ static int coda_try_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
+static void coda_set_default_colorspace(struct v4l2_pix_format *fmt)
+{
+ enum v4l2_colorspace colorspace;
+
+ if (fmt->pixelformat == V4L2_PIX_FMT_JPEG)
+ colorspace = V4L2_COLORSPACE_JPEG;
+ else if (fmt->width <= 720 && fmt->height <= 576)
+ colorspace = V4L2_COLORSPACE_SMPTE170M;
+ else
+ colorspace = V4L2_COLORSPACE_REC709;
+
+ fmt->colorspace = colorspace;
+ fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ fmt->quantization = V4L2_QUANTIZATION_DEFAULT;
+}
+
static int coda_try_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
@@ -648,16 +667,8 @@ static int coda_try_fmt_vid_out(struct file *file, void *priv,
if (ret < 0)
return ret;
- switch (f->fmt.pix.colorspace) {
- case V4L2_COLORSPACE_REC709:
- case V4L2_COLORSPACE_JPEG:
- break;
- default:
- if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG)
- f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
- else
- f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
- }
+ if (f->fmt.pix.colorspace == V4L2_COLORSPACE_DEFAULT)
+ coda_set_default_colorspace(&f->fmt.pix);
q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
codec = coda_find_codec(dev, f->fmt.pix.pixelformat, q_data_dst->fourcc);
@@ -772,6 +783,9 @@ static int coda_s_fmt_vid_out(struct file *file, void *priv,
return ret;
ctx->colorspace = f->fmt.pix.colorspace;
+ ctx->xfer_func = f->fmt.pix.xfer_func;
+ ctx->ycbcr_enc = f->fmt.pix.ycbcr_enc;
+ ctx->quantization = f->fmt.pix.quantization;
memset(&f_cap, 0, sizeof(f_cap));
f_cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
@@ -1149,6 +1163,9 @@ static void coda_pic_run_work(struct work_struct *work)
ctx->hold = true;
coda_hw_reset(ctx);
+
+ if (ctx->ops->error_run)
+ ctx->ops->error_run(ctx);
} else if (!ctx->aborting) {
ctx->ops->finish_run(ctx);
}
@@ -1282,7 +1299,13 @@ static void set_default_params(struct coda_ctx *ctx)
csize = coda_estimate_sizeimage(ctx, usize, max_w, max_h);
ctx->params.codec_mode = ctx->codec->mode;
- ctx->colorspace = V4L2_COLORSPACE_REC709;
+ if (ctx->cvd->src_formats[0] == V4L2_PIX_FMT_JPEG)
+ ctx->colorspace = V4L2_COLORSPACE_JPEG;
+ else
+ ctx->colorspace = V4L2_COLORSPACE_REC709;
+ ctx->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ ctx->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ ctx->quantization = V4L2_QUANTIZATION_DEFAULT;
ctx->params.framerate = 30;
/* Default formats for output and input queues */
@@ -1680,6 +1703,9 @@ static int coda_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB:
ctx->params.intra_refresh = ctrl->val;
break;
+ case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME:
+ ctx->params.force_ipicture = true;
+ break;
case V4L2_CID_JPEG_COMPRESSION_QUALITY:
coda_set_jpeg_compression_quality(ctx, ctrl->val);
break;
@@ -2063,8 +2089,7 @@ static int coda_hw_init(struct coda_dev *dev)
if (ret)
goto err_clk_ahb;
- if (dev->rstc)
- reset_control_reset(dev->rstc);
+ reset_control_reset(dev->rstc);
/*
* Copy the first CODA_ISRAM_SIZE in the internal SRAM.
@@ -2448,13 +2473,8 @@ static int coda_probe(struct platform_device *pdev)
dev->rstc = devm_reset_control_get_optional(&pdev->dev, NULL);
if (IS_ERR(dev->rstc)) {
ret = PTR_ERR(dev->rstc);
- if (ret == -ENOENT || ret == -ENOTSUPP) {
- dev->rstc = NULL;
- } else {
- dev_err(&pdev->dev, "failed get reset control: %d\n",
- ret);
- return ret;
- }
+ dev_err(&pdev->dev, "failed get reset control: %d\n", ret);
+ return ret;
}
/* Get IRAM pool from device tree or platform data */
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index 20222befb9b2..40fe22f0d757 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -135,6 +135,7 @@ struct coda_params {
u32 vbv_size;
u32 slice_max_bits;
u32 slice_max_mb;
+ bool force_ipicture;
};
struct coda_buffer_meta {
@@ -182,6 +183,7 @@ struct coda_context_ops {
int (*start_streaming)(struct coda_ctx *ctx);
int (*prepare_run)(struct coda_ctx *ctx);
void (*finish_run)(struct coda_ctx *ctx);
+ void (*error_run)(struct coda_ctx *ctx);
void (*seq_end_work)(struct work_struct *work);
void (*release)(struct coda_ctx *ctx);
};
@@ -206,6 +208,9 @@ struct coda_ctx {
enum coda_inst_type inst_type;
const struct coda_codec *codec;
enum v4l2_colorspace colorspace;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
struct coda_params params;
struct v4l2_ctrl_handler ctrls;
struct v4l2_fh fh;
diff --git a/drivers/media/platform/coda/imx-vdoa.c b/drivers/media/platform/coda/imx-vdoa.c
index 669a4c82f1ff..df9b71621420 100644
--- a/drivers/media/platform/coda/imx-vdoa.c
+++ b/drivers/media/platform/coda/imx-vdoa.c
@@ -101,6 +101,8 @@ struct vdoa_ctx {
struct vdoa_data *vdoa;
struct completion completion;
struct vdoa_q_data q_data[2];
+ unsigned int submitted_job;
+ unsigned int completed_job;
};
static irqreturn_t vdoa_irq_handler(int irq, void *data)
@@ -114,7 +116,7 @@ static irqreturn_t vdoa_irq_handler(int irq, void *data)
curr_ctx = vdoa->curr_ctx;
if (!curr_ctx) {
- dev_dbg(vdoa->dev,
+ dev_warn(vdoa->dev,
"Instance released before the end of transaction\n");
return IRQ_HANDLED;
}
@@ -127,19 +129,44 @@ static irqreturn_t vdoa_irq_handler(int irq, void *data)
} else if (!(val & VDOAIST_EOT)) {
dev_warn(vdoa->dev, "Spurious interrupt\n");
}
+ curr_ctx->completed_job++;
complete(&curr_ctx->completion);
return IRQ_HANDLED;
}
+int vdoa_wait_for_completion(struct vdoa_ctx *ctx)
+{
+ struct vdoa_data *vdoa = ctx->vdoa;
+
+ if (ctx->submitted_job == ctx->completed_job)
+ return 0;
+
+ if (!wait_for_completion_timeout(&ctx->completion,
+ msecs_to_jiffies(300))) {
+ dev_err(vdoa->dev,
+ "Timeout waiting for transfer result\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(vdoa_wait_for_completion);
+
void vdoa_device_run(struct vdoa_ctx *ctx, dma_addr_t dst, dma_addr_t src)
{
struct vdoa_q_data *src_q_data, *dst_q_data;
struct vdoa_data *vdoa = ctx->vdoa;
u32 val;
+ if (vdoa->curr_ctx)
+ vdoa_wait_for_completion(vdoa->curr_ctx);
+
vdoa->curr_ctx = ctx;
+ reinit_completion(&ctx->completion);
+ ctx->submitted_job++;
+
src_q_data = &ctx->q_data[V4L2_M2M_SRC];
dst_q_data = &ctx->q_data[V4L2_M2M_DST];
@@ -177,21 +204,6 @@ void vdoa_device_run(struct vdoa_ctx *ctx, dma_addr_t dst, dma_addr_t src)
}
EXPORT_SYMBOL(vdoa_device_run);
-int vdoa_wait_for_completion(struct vdoa_ctx *ctx)
-{
- struct vdoa_data *vdoa = ctx->vdoa;
-
- if (!wait_for_completion_timeout(&ctx->completion,
- msecs_to_jiffies(300))) {
- dev_err(vdoa->dev,
- "Timeout waiting for transfer result\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(vdoa_wait_for_completion);
-
struct vdoa_ctx *vdoa_context_create(struct vdoa_data *vdoa)
{
struct vdoa_ctx *ctx;
@@ -218,6 +230,11 @@ void vdoa_context_destroy(struct vdoa_ctx *ctx)
{
struct vdoa_data *vdoa = ctx->vdoa;
+ if (vdoa->curr_ctx == ctx) {
+ vdoa_wait_for_completion(vdoa->curr_ctx);
+ vdoa->curr_ctx = NULL;
+ }
+
clk_disable_unprepare(vdoa->vdoa_clk);
kfree(ctx);
}
diff --git a/drivers/media/platform/davinci/Kconfig b/drivers/media/platform/davinci/Kconfig
index 554e710de487..55982e681d77 100644
--- a/drivers/media/platform/davinci/Kconfig
+++ b/drivers/media/platform/davinci/Kconfig
@@ -22,6 +22,7 @@ config VIDEO_DAVINCI_VPIF_CAPTURE
depends on HAS_DMA
depends on I2C
select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
help
Enables Davinci VPIF module used for capture devices.
This module is used for capture on TI DM6467/DA850/OMAPL138
diff --git a/drivers/media/platform/davinci/vpif.c b/drivers/media/platform/davinci/vpif.c
index 1b02a6363f77..07e89a4985a6 100644
--- a/drivers/media/platform/davinci/vpif.c
+++ b/drivers/media/platform/davinci/vpif.c
@@ -26,6 +26,7 @@
#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
#include <linux/v4l2-dv-timings.h>
+#include <linux/of_graph.h>
#include "vpif.h"
@@ -423,7 +424,9 @@ EXPORT_SYMBOL(vpif_channel_getfid);
static int vpif_probe(struct platform_device *pdev)
{
- static struct resource *res;
+ static struct resource *res, *res_irq;
+ struct platform_device *pdev_capture, *pdev_display;
+ struct device_node *endpoint = NULL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
vpif_base = devm_ioremap_resource(&pdev->dev, res);
@@ -435,6 +438,58 @@ static int vpif_probe(struct platform_device *pdev)
spin_lock_init(&vpif_lock);
dev_info(&pdev->dev, "vpif probe success\n");
+
+ /*
+ * If VPIF Node has endpoints, assume "new" DT support,
+ * where capture and display drivers don't have DT nodes
+ * so their devices need to be registered manually here
+ * for their legacy platform_drivers to work.
+ */
+ endpoint = of_graph_get_next_endpoint(pdev->dev.of_node,
+ endpoint);
+ if (!endpoint)
+ return 0;
+
+ /*
+ * For DT platforms, manually create platform_devices for
+ * capture/display drivers.
+ */
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res_irq) {
+ dev_warn(&pdev->dev, "Missing IRQ resource.\n");
+ return -EINVAL;
+ }
+
+ pdev_capture = devm_kzalloc(&pdev->dev, sizeof(*pdev_capture),
+ GFP_KERNEL);
+ if (pdev_capture) {
+ pdev_capture->name = "vpif_capture";
+ pdev_capture->id = -1;
+ pdev_capture->resource = res_irq;
+ pdev_capture->num_resources = 1;
+ pdev_capture->dev.dma_mask = pdev->dev.dma_mask;
+ pdev_capture->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask;
+ pdev_capture->dev.parent = &pdev->dev;
+ platform_device_register(pdev_capture);
+ } else {
+ dev_warn(&pdev->dev, "Unable to allocate memory for pdev_capture.\n");
+ }
+
+ pdev_display = devm_kzalloc(&pdev->dev, sizeof(*pdev_display),
+ GFP_KERNEL);
+ if (pdev_display) {
+ pdev_display->name = "vpif_display";
+ pdev_display->id = -1;
+ pdev_display->resource = res_irq;
+ pdev_display->num_resources = 1;
+ pdev_display->dev.dma_mask = pdev->dev.dma_mask;
+ pdev_display->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask;
+ pdev_display->dev.parent = &pdev->dev;
+ platform_device_register(pdev_display);
+ } else {
+ dev_warn(&pdev->dev, "Unable to allocate memory for pdev_display.\n");
+ }
+
return 0;
}
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 44f702752d3a..d78580f9e431 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -18,10 +18,16 @@
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <media/v4l2-fwnode.h>
#include <media/v4l2-ioctl.h>
+#include <media/i2c/tvp514x.h>
+#include <media/v4l2-mediabus.h>
+
+#include <linux/videodev2.h>
#include "vpif.h"
#include "vpif_capture.h"
@@ -385,7 +391,8 @@ static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
common = &ch->common[i];
/* skip If streaming is not started in this channel */
/* Check the field format */
- if (1 == ch->vpifparams.std_info.frm_fmt) {
+ if (1 == ch->vpifparams.std_info.frm_fmt ||
+ common->fmt.fmt.pix.field == V4L2_FIELD_NONE) {
/* Progressive mode */
spin_lock(&common->irqlock);
if (list_empty(&common->dma_queue)) {
@@ -466,9 +473,38 @@ static int vpif_update_std_info(struct channel_obj *ch)
struct vpif_channel_config_params *std_info = &vpifparams->std_info;
struct video_obj *vid_ch = &ch->video;
int index;
+ struct v4l2_pix_format *pixfmt = &common->fmt.fmt.pix;
vpif_dbg(2, debug, "vpif_update_std_info\n");
+ /*
+ * if called after try_fmt or g_fmt, there will already be a size
+ * so use that by default.
+ */
+ if (pixfmt->width && pixfmt->height) {
+ if (pixfmt->field == V4L2_FIELD_ANY ||
+ pixfmt->field == V4L2_FIELD_NONE)
+ pixfmt->field = V4L2_FIELD_NONE;
+
+ vpifparams->iface.if_type = VPIF_IF_BT656;
+ if (pixfmt->pixelformat == V4L2_PIX_FMT_SGRBG10 ||
+ pixfmt->pixelformat == V4L2_PIX_FMT_SBGGR8)
+ vpifparams->iface.if_type = VPIF_IF_RAW_BAYER;
+
+ if (pixfmt->pixelformat == V4L2_PIX_FMT_SGRBG10)
+ vpifparams->params.data_sz = 1; /* 10 bits/pixel. */
+
+ /*
+ * For raw formats from camera sensors, we don't need
+ * the std_info from table lookup, so nothing else to do here.
+ */
+ if (vpifparams->iface.if_type == VPIF_IF_RAW_BAYER) {
+ memset(std_info, 0, sizeof(struct vpif_channel_config_params));
+ vpifparams->std_info.capture_format = 1; /* CCD/raw mode */
+ return 0;
+ }
+ }
+
for (index = 0; index < vpif_ch_params_count; index++) {
config = &vpif_ch_params[index];
if (config->hd_sd == 0) {
@@ -513,7 +549,7 @@ static int vpif_update_std_info(struct channel_obj *ch)
if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER)
common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_SBGGR8;
else
- common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUV422P;
+ common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_NV16;
common->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
@@ -655,7 +691,7 @@ static int vpif_input_to_subdev(
/* loop through the sub device list to get the sub device info */
for (i = 0; i < vpif_cfg->subdev_count; i++) {
subdev_info = &vpif_cfg->subdev_info[i];
- if (!strcmp(subdev_info->name, subdev_name))
+ if (subdev_info && !strcmp(subdev_info->name, subdev_name))
return i;
}
return -1;
@@ -917,8 +953,8 @@ static int vpif_enum_fmt_vid_cap(struct file *file, void *priv,
fmt->pixelformat = V4L2_PIX_FMT_SBGGR8;
} else {
fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- strcpy(fmt->description, "YCbCr4:2:2 YC Planar");
- fmt->pixelformat = V4L2_PIX_FMT_YUV422P;
+ strcpy(fmt->description, "YCbCr4:2:2 Semi-Planar");
+ fmt->pixelformat = V4L2_PIX_FMT_NV16;
}
return 0;
}
@@ -936,22 +972,8 @@ static int vpif_try_fmt_vid_cap(struct file *file, void *priv,
struct channel_obj *ch = video_get_drvdata(vdev);
struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
struct common_obj *common = &(ch->common[VPIF_VIDEO_INDEX]);
- struct vpif_params *vpif_params = &ch->vpifparams;
-
- /*
- * to supress v4l-compliance warnings silently correct
- * the pixelformat
- */
- if (vpif_params->iface.if_type == VPIF_IF_RAW_BAYER) {
- if (pixfmt->pixelformat != V4L2_PIX_FMT_SBGGR8)
- pixfmt->pixelformat = V4L2_PIX_FMT_SBGGR8;
- } else {
- if (pixfmt->pixelformat != V4L2_PIX_FMT_YUV422P)
- pixfmt->pixelformat = V4L2_PIX_FMT_YUV422P;
- }
-
- common->fmt.fmt.pix.pixelformat = pixfmt->pixelformat;
+ common->fmt = *fmt;
vpif_update_std_info(ch);
pixfmt->field = common->fmt.fmt.pix.field;
@@ -960,8 +982,17 @@ static int vpif_try_fmt_vid_cap(struct file *file, void *priv,
pixfmt->width = common->fmt.fmt.pix.width;
pixfmt->height = common->fmt.fmt.pix.height;
pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height * 2;
+ if (pixfmt->pixelformat == V4L2_PIX_FMT_SGRBG10) {
+ pixfmt->bytesperline = common->fmt.fmt.pix.width * 2;
+ pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
+ }
pixfmt->priv = 0;
+ dev_dbg(vpif_dev, "%s: %d x %d; pitch=%d pixelformat=0x%08x, field=%d, size=%d\n", __func__,
+ pixfmt->width, pixfmt->height,
+ pixfmt->bytesperline, pixfmt->pixelformat,
+ pixfmt->field, pixfmt->sizeimage);
+
return 0;
}
@@ -978,13 +1009,47 @@ static int vpif_g_fmt_vid_cap(struct file *file, void *priv,
struct video_device *vdev = video_devdata(file);
struct channel_obj *ch = video_get_drvdata(vdev);
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+ struct v4l2_pix_format *pix_fmt = &fmt->fmt.pix;
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ struct v4l2_mbus_framefmt *mbus_fmt = &format.format;
+ int ret;
/* Check the validity of the buffer type */
if (common->fmt.type != fmt->type)
return -EINVAL;
- /* Fill in the information about format */
+ /* By default, use currently set fmt */
*fmt = common->fmt;
+
+ /* If subdev has get_fmt, use that to override */
+ ret = v4l2_subdev_call(ch->sd, pad, get_fmt, NULL, &format);
+ if (!ret && mbus_fmt->code) {
+ v4l2_fill_pix_format(pix_fmt, mbus_fmt);
+ pix_fmt->bytesperline = pix_fmt->width;
+ if (mbus_fmt->code == MEDIA_BUS_FMT_SGRBG10_1X10) {
+ /* e.g. mt9v032 */
+ pix_fmt->pixelformat = V4L2_PIX_FMT_SGRBG10;
+ pix_fmt->bytesperline = pix_fmt->width * 2;
+ } else if (mbus_fmt->code == MEDIA_BUS_FMT_UYVY8_2X8) {
+ /* e.g. tvp514x */
+ pix_fmt->pixelformat = V4L2_PIX_FMT_NV16;
+ pix_fmt->bytesperline = pix_fmt->width * 2;
+ } else {
+ dev_warn(vpif_dev, "%s: Unhandled media-bus format 0x%x\n",
+ __func__, mbus_fmt->code);
+ }
+ pix_fmt->sizeimage = pix_fmt->bytesperline * pix_fmt->height;
+ dev_dbg(vpif_dev, "%s: %d x %d; pitch=%d, pixelformat=0x%08x, code=0x%x, field=%d, size=%d\n", __func__,
+ pix_fmt->width, pix_fmt->height,
+ pix_fmt->bytesperline, pix_fmt->pixelformat,
+ mbus_fmt->code, pix_fmt->field, pix_fmt->sizeimage);
+
+ common->fmt = *fmt;
+ vpif_update_std_info(ch);
+ }
+
return 0;
}
@@ -1323,6 +1388,22 @@ static int vpif_async_bound(struct v4l2_async_notifier *notifier,
{
int i;
+ for (i = 0; i < vpif_obj.config->asd_sizes[0]; i++) {
+ struct v4l2_async_subdev *_asd = vpif_obj.config->asd[i];
+ const struct fwnode_handle *fwnode = _asd->match.fwnode.fwnode;
+
+ if (fwnode == subdev->fwnode) {
+ vpif_obj.sd[i] = subdev;
+ vpif_obj.config->chan_config->inputs[i].subdev_name =
+ (char *)to_of_node(subdev->fwnode)->full_name;
+ vpif_dbg(2, debug,
+ "%s: setting input %d subdev_name = %s\n",
+ __func__, i,
+ to_of_node(subdev->fwnode)->full_name);
+ return 0;
+ }
+ }
+
for (i = 0; i < vpif_obj.config->subdev_count; i++)
if (!strcmp(vpif_obj.config->subdev_info[i].name,
subdev->name)) {
@@ -1356,6 +1437,7 @@ static int vpif_probe_complete(void)
/* set initial format */
ch->video.stdid = V4L2_STD_525_60;
memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings));
+ common->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
vpif_update_std_info(ch);
/* Initialize vb2 queue */
@@ -1418,6 +1500,106 @@ static int vpif_async_complete(struct v4l2_async_notifier *notifier)
return vpif_probe_complete();
}
+static struct vpif_capture_config *
+vpif_capture_get_pdata(struct platform_device *pdev)
+{
+ struct device_node *endpoint = NULL;
+ struct v4l2_fwnode_endpoint bus_cfg;
+ struct vpif_capture_config *pdata;
+ struct vpif_subdev_info *sdinfo;
+ struct vpif_capture_chan_config *chan;
+ unsigned int i;
+
+ /*
+ * DT boot: OF node from parent device contains
+ * video ports & endpoints data.
+ */
+ if (pdev->dev.parent && pdev->dev.parent->of_node)
+ pdev->dev.of_node = pdev->dev.parent->of_node;
+ if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node)
+ return pdev->dev.platform_data;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+ pdata->subdev_info =
+ devm_kzalloc(&pdev->dev, sizeof(*pdata->subdev_info) *
+ VPIF_CAPTURE_NUM_CHANNELS, GFP_KERNEL);
+
+ if (!pdata->subdev_info)
+ return NULL;
+
+ for (i = 0; i < VPIF_CAPTURE_NUM_CHANNELS; i++) {
+ struct device_node *rem;
+ unsigned int flags;
+ int err;
+
+ endpoint = of_graph_get_next_endpoint(pdev->dev.of_node,
+ endpoint);
+ if (!endpoint)
+ break;
+
+ sdinfo = &pdata->subdev_info[i];
+ chan = &pdata->chan_config[i];
+ chan->inputs = devm_kzalloc(&pdev->dev,
+ sizeof(*chan->inputs) *
+ VPIF_CAPTURE_NUM_CHANNELS,
+ GFP_KERNEL);
+
+ chan->input_count++;
+ chan->inputs[i].input.type = V4L2_INPUT_TYPE_CAMERA;
+ chan->inputs[i].input.std = V4L2_STD_ALL;
+ chan->inputs[i].input.capabilities = V4L2_IN_CAP_STD;
+
+ err = v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint),
+ &bus_cfg);
+ if (err) {
+ dev_err(&pdev->dev, "Could not parse the endpoint\n");
+ goto done;
+ }
+ dev_dbg(&pdev->dev, "Endpoint %s, bus_width = %d\n",
+ endpoint->full_name, bus_cfg.bus.parallel.bus_width);
+ flags = bus_cfg.bus.parallel.flags;
+
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
+ chan->vpif_if.hd_pol = 1;
+
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
+ chan->vpif_if.vd_pol = 1;
+
+ rem = of_graph_get_remote_port_parent(endpoint);
+ if (!rem) {
+ dev_dbg(&pdev->dev, "Remote device at %s not found\n",
+ endpoint->full_name);
+ goto done;
+ }
+
+ dev_dbg(&pdev->dev, "Remote device %s, %s found\n",
+ rem->name, rem->full_name);
+ sdinfo->name = rem->full_name;
+
+ pdata->asd[i] = devm_kzalloc(&pdev->dev,
+ sizeof(struct v4l2_async_subdev),
+ GFP_KERNEL);
+ if (!pdata->asd[i]) {
+ of_node_put(rem);
+ pdata = NULL;
+ goto done;
+ }
+
+ pdata->asd[i]->match_type = V4L2_ASYNC_MATCH_FWNODE;
+ pdata->asd[i]->match.fwnode.fwnode = of_fwnode_handle(rem);
+ of_node_put(rem);
+ }
+
+done:
+ pdata->asd_sizes[0] = i;
+ pdata->subdev_count = i;
+ pdata->card_name = "DA850/OMAP-L138 Video Capture";
+
+ return pdata;
+}
+
/**
* vpif_probe : This function probes the vpif capture driver
* @pdev: platform device pointer
@@ -1434,6 +1616,12 @@ static __init int vpif_probe(struct platform_device *pdev)
int res_idx = 0;
int i, err;
+ pdev->dev.platform_data = vpif_capture_get_pdata(pdev);
+ if (!pdev->dev.platform_data) {
+ dev_warn(&pdev->dev, "Missing platform data. Giving up.\n");
+ return -EINVAL;
+ }
+
if (!pdev->dev.platform_data) {
dev_warn(&pdev->dev, "Missing platform data. Giving up.\n");
return -EINVAL;
@@ -1474,7 +1662,7 @@ static __init int vpif_probe(struct platform_device *pdev)
goto vpif_unregister;
}
- if (!vpif_obj.config->asd_sizes) {
+ if (!vpif_obj.config->asd_sizes[0]) {
int i2c_id = vpif_obj.config->i2c_adapter_id;
i2c_adap = i2c_get_adapter(i2c_id);
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 7e5cf9923c8d..b5ac6ce626b3 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -1250,6 +1250,11 @@ static __init int vpif_probe(struct platform_device *pdev)
return -EINVAL;
}
+ if (!pdev->dev.platform_data) {
+ dev_warn(&pdev->dev, "Missing platform data. Giving up.\n");
+ return -EINVAL;
+ }
+
vpif_dev = &pdev->dev;
err = initialize_vpif();
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index 59a634201830..43801509dabb 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -454,6 +454,7 @@ int gsc_try_fmt_mplane(struct gsc_ctx *ctx, struct v4l2_format *f)
} else {
min_w = variant->pix_min->target_rot_dis_w;
min_h = variant->pix_min->target_rot_dis_h;
+ pix_mp->colorspace = ctx->out_colorspace;
}
pr_debug("mod_x: %d, mod_y: %d, max_w: %d, max_h = %d",
@@ -472,10 +473,8 @@ int gsc_try_fmt_mplane(struct gsc_ctx *ctx, struct v4l2_format *f)
pix_mp->num_planes = fmt->num_planes;
- if (pix_mp->width >= 1280) /* HD */
- pix_mp->colorspace = V4L2_COLORSPACE_REC709;
- else /* SD */
- pix_mp->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ if (V4L2_TYPE_IS_OUTPUT(f->type))
+ ctx->out_colorspace = pix_mp->colorspace;
for (i = 0; i < pix_mp->num_planes; ++i) {
struct v4l2_plane_pix_format *plane_fmt = &pix_mp->plane_fmt[i];
@@ -519,8 +518,8 @@ int gsc_g_fmt_mplane(struct gsc_ctx *ctx, struct v4l2_format *f)
pix_mp->height = frame->f_height;
pix_mp->field = V4L2_FIELD_NONE;
pix_mp->pixelformat = frame->fmt->pixelformat;
- pix_mp->colorspace = V4L2_COLORSPACE_REC709;
pix_mp->num_planes = frame->fmt->num_planes;
+ pix_mp->colorspace = ctx->out_colorspace;
for (i = 0; i < pix_mp->num_planes; ++i) {
pix_mp->plane_fmt[i].bytesperline = (frame->f_width *
@@ -569,9 +568,9 @@ int gsc_try_crop(struct gsc_ctx *ctx, struct v4l2_crop *cr)
}
pr_debug("user put w: %d, h: %d", cr->c.width, cr->c.height);
- if (cr->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ if (cr->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
f = &ctx->d_frame;
- else if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ else if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
f = &ctx->s_frame;
else
return -EINVAL;
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
index 696217e9af66..715d9c9d8d30 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/exynos-gsc/gsc-core.h
@@ -376,6 +376,7 @@ struct gsc_ctx {
struct v4l2_ctrl_handler ctrl_handler;
struct gsc_ctrls gsc_ctrls;
bool ctrls_rdy;
+ enum v4l2_colorspace out_colorspace;
};
void gsc_set_prefbuf(struct gsc_dev *gsc, struct gsc_frame *frm);
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index 82505025d96c..33611a46ce35 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -460,8 +460,8 @@ static int gsc_m2m_g_selection(struct file *file, void *fh,
struct gsc_frame *frame;
struct gsc_ctx *ctx = fh_to_ctx(fh);
- if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
- (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE))
+ if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
+ (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
return -EINVAL;
frame = ctx_get_frame(ctx, s->type);
@@ -503,8 +503,8 @@ static int gsc_m2m_s_selection(struct file *file, void *fh,
cr.type = s->type;
cr.c = s->r;
- if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
- (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE))
+ if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
+ (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
return -EINVAL;
ret = gsc_try_crop(ctx, &cr);
diff --git a/drivers/media/platform/exynos4-is/Kconfig b/drivers/media/platform/exynos4-is/Kconfig
index 57d42c6172c5..c480efb755f5 100644
--- a/drivers/media/platform/exynos4-is/Kconfig
+++ b/drivers/media/platform/exynos4-is/Kconfig
@@ -4,6 +4,7 @@ config VIDEO_SAMSUNG_EXYNOS4_IS
depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
depends on OF && COMMON_CLK
+ select V4L2_FWNODE
help
Say Y here to enable camera host interface devices for
Samsung S5P and EXYNOS SoC series.
@@ -32,6 +33,7 @@ config VIDEO_S5P_MIPI_CSIS
tristate "S5P/EXYNOS MIPI-CSI2 receiver (MIPI-CSIS) driver"
depends on REGULATOR
select GENERIC_PHY
+ select V4L2_FWNODE
help
This is a V4L2 driver for Samsung S5P and EXYNOS4 SoC MIPI-CSI2
receiver (MIPI-CSIS) devices.
diff --git a/drivers/media/platform/exynos4-is/fimc-capture.c b/drivers/media/platform/exynos4-is/fimc-capture.c
index 8a7cd07dbe28..948fe01f6c96 100644
--- a/drivers/media/platform/exynos4-is/fimc-capture.c
+++ b/drivers/media/platform/exynos4-is/fimc-capture.c
@@ -1270,13 +1270,14 @@ static int fimc_cap_g_selection(struct file *file, void *fh,
struct fimc_ctx *ctx = fimc->vid_cap.ctx;
struct fimc_frame *f = &ctx->s_frame;
- if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
switch (s->target) {
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
f = &ctx->d_frame;
+ /* fall through */
case V4L2_SEL_TGT_CROP_BOUNDS:
case V4L2_SEL_TGT_CROP_DEFAULT:
s->r.left = 0;
@@ -1287,6 +1288,7 @@ static int fimc_cap_g_selection(struct file *file, void *fh,
case V4L2_SEL_TGT_COMPOSE:
f = &ctx->d_frame;
+ /* fall through */
case V4L2_SEL_TGT_CROP:
s->r.left = f->offs_h;
s->r.top = f->offs_v;
@@ -1320,7 +1322,7 @@ static int fimc_cap_s_selection(struct file *file, void *fh,
struct fimc_frame *f;
unsigned long flags;
- if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (s->target == V4L2_SEL_TGT_COMPOSE)
@@ -1610,6 +1612,7 @@ static int fimc_subdev_get_selection(struct v4l2_subdev *sd,
switch (sel->target) {
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
f = &ctx->d_frame;
+ /* fall through */
case V4L2_SEL_TGT_CROP_BOUNDS:
r->width = f->o_width;
r->height = f->o_height;
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index 7f92144a1de3..340d906db370 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -854,7 +854,7 @@ static int fimc_is_probe(struct platform_device *pdev)
vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32));
- ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ ret = devm_of_platform_populate(dev);
if (ret < 0)
goto err_pm;
@@ -864,7 +864,7 @@ static int fimc_is_probe(struct platform_device *pdev)
*/
ret = fimc_is_register_subdevs(is);
if (ret < 0)
- goto err_of_dep;
+ goto err_pm;
ret = fimc_is_debugfs_create(is);
if (ret < 0)
@@ -883,8 +883,6 @@ err_dfs:
fimc_is_debugfs_remove(is);
err_sd:
fimc_is_unregister_subdevs(is);
-err_of_dep:
- of_platform_depopulate(dev);
err_pm:
if (!pm_runtime_enabled(dev))
fimc_is_runtime_suspend(dev);
@@ -946,7 +944,6 @@ static int fimc_is_remove(struct platform_device *pdev)
if (!pm_runtime_status_suspended(dev))
fimc_is_runtime_suspend(dev);
free_irq(is->irq, is);
- of_platform_depopulate(dev);
fimc_is_unregister_subdevs(is);
vb2_dma_contig_clear_max_seg_size(dev);
fimc_is_put_clocks(is);
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index b4c4a33784c4..7d3ec5cc6608 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -901,7 +901,7 @@ static int fimc_lite_g_selection(struct file *file, void *fh,
struct fimc_lite *fimc = video_drvdata(file);
struct flite_frame *f = &fimc->out_frame;
- if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
switch (sel->target) {
@@ -929,7 +929,7 @@ static int fimc_lite_s_selection(struct file *file, void *fh,
struct v4l2_rect rect = sel->r;
unsigned long flags;
- if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
sel->target != V4L2_SEL_TGT_COMPOSE)
return -EINVAL;
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index e82450e90a67..7d1cf78846c4 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -29,7 +29,7 @@
#include <linux/slab.h>
#include <media/v4l2-async.h>
#include <media/v4l2-ctrls.h>
-#include <media/v4l2-of.h>
+#include <media/v4l2-fwnode.h>
#include <media/media-device.h>
#include <media/drv-intf/exynos-fimc.h>
@@ -388,7 +388,7 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd,
{
struct fimc_source_info *pd = &fmd->sensor[index].pdata;
struct device_node *rem, *ep, *np;
- struct v4l2_of_endpoint endpoint;
+ struct v4l2_fwnode_endpoint endpoint;
int ret;
/* Assume here a port node can have only one endpoint node. */
@@ -396,7 +396,7 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd,
if (!ep)
return 0;
- ret = v4l2_of_parse_endpoint(ep, &endpoint);
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &endpoint);
if (ret) {
of_node_put(ep);
return ret;
@@ -453,8 +453,8 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd,
return -EINVAL;
}
- fmd->sensor[index].asd.match_type = V4L2_ASYNC_MATCH_OF;
- fmd->sensor[index].asd.match.of.node = rem;
+ fmd->sensor[index].asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ fmd->sensor[index].asd.match.fwnode.fwnode = of_fwnode_handle(rem);
fmd->async_subdevs[index] = &fmd->sensor[index].asd;
fmd->num_sensors++;
@@ -1361,7 +1361,8 @@ static int subdev_notifier_bound(struct v4l2_async_notifier *notifier,
/* Find platform data for this sensor subdev */
for (i = 0; i < ARRAY_SIZE(fmd->sensor); i++)
- if (fmd->sensor[i].asd.match.of.node == subdev->dev->of_node)
+ if (fmd->sensor[i].asd.match.fwnode.fwnode ==
+ of_fwnode_handle(subdev->dev->of_node))
si = &fmd->sensor[i];
if (si == NULL)
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index f819b29efc38..98c89873c2dc 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -30,7 +30,7 @@
#include <linux/spinlock.h>
#include <linux/videodev2.h>
#include <media/drv-intf/exynos-fimc.h>
-#include <media/v4l2-of.h>
+#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
#include "mipi-csis.h"
@@ -718,7 +718,7 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
struct csis_state *state)
{
struct device_node *node = pdev->dev.of_node;
- struct v4l2_of_endpoint endpoint;
+ struct v4l2_fwnode_endpoint endpoint;
int ret;
if (of_property_read_u32(node, "clock-frequency",
@@ -735,7 +735,7 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
return -EINVAL;
}
/* Get port node and validate MIPI-CSI channel id. */
- ret = v4l2_of_parse_endpoint(node, &endpoint);
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &endpoint);
if (ret)
goto err;
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index a8bda6679422..8cac2f202099 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -393,6 +393,7 @@ static int mcam_alloc_dma_bufs(struct mcam_camera *cam, int loadtime)
dma_free_coherent(cam->dev, cam->dma_buf_size,
cam->dma_bufs[0], cam->dma_handles[0]);
cam->nbufs = 0;
+ /* fall-through */
case 0:
cam_err(cam, "Insufficient DMA buffers, cannot operate\n");
return -ENOMEM;
diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
index 9e4eb7dcc424..81347558b24a 100644
--- a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
+++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
@@ -103,7 +103,7 @@ static int mtk_mdp_probe(struct platform_device *pdev)
{
struct mtk_mdp_dev *mdp;
struct device *dev = &pdev->dev;
- struct device_node *node;
+ struct device_node *node, *parent;
int i, ret = 0;
mdp = devm_kzalloc(dev, sizeof(*mdp), GFP_KERNEL);
@@ -117,8 +117,16 @@ static int mtk_mdp_probe(struct platform_device *pdev)
mutex_init(&mdp->lock);
mutex_init(&mdp->vpulock);
+ /* Old dts had the components as child nodes */
+ if (of_get_next_child(dev->of_node, NULL)) {
+ parent = dev->of_node;
+ dev_warn(dev, "device tree is out of date\n");
+ } else {
+ parent = dev->of_node->parent;
+ }
+
/* Iterate over sibling MDP function blocks */
- for_each_child_of_node(dev->of_node, node) {
+ for_each_child_of_node(parent, node) {
const struct of_device_id *of_id;
enum mtk_mdp_comp_type comp_type;
int comp_id;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
index a60b538686ea..843510979ad8 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
@@ -278,7 +278,7 @@ static void mtk_vdec_flush_decoder(struct mtk_vcodec_ctx *ctx)
clean_free_buffer(ctx);
}
-static void mtk_vdec_pic_info_update(struct mtk_vcodec_ctx *ctx)
+static int mtk_vdec_pic_info_update(struct mtk_vcodec_ctx *ctx)
{
unsigned int dpbsize = 0;
int ret;
@@ -288,7 +288,7 @@ static void mtk_vdec_pic_info_update(struct mtk_vcodec_ctx *ctx)
&ctx->last_decoded_picinfo)) {
mtk_v4l2_err("[%d]Error!! Cannot get param : GET_PARAM_PICTURE_INFO ERR",
ctx->id);
- return;
+ return -EINVAL;
}
if (ctx->last_decoded_picinfo.pic_w == 0 ||
@@ -296,12 +296,12 @@ static void mtk_vdec_pic_info_update(struct mtk_vcodec_ctx *ctx)
ctx->last_decoded_picinfo.buf_w == 0 ||
ctx->last_decoded_picinfo.buf_h == 0) {
mtk_v4l2_err("Cannot get correct pic info");
- return;
+ return -EINVAL;
}
if ((ctx->last_decoded_picinfo.pic_w == ctx->picinfo.pic_w) ||
(ctx->last_decoded_picinfo.pic_h == ctx->picinfo.pic_h))
- return;
+ return 0;
mtk_v4l2_debug(1,
"[%d]-> new(%d,%d), old(%d,%d), real(%d,%d)",
@@ -316,6 +316,8 @@ static void mtk_vdec_pic_info_update(struct mtk_vcodec_ctx *ctx)
mtk_v4l2_err("Incorrect dpb size, ret=%d", ret);
ctx->dpb_size = dpbsize;
+
+ return ret;
}
static void mtk_vdec_worker(struct work_struct *work)
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
index 237e144c194f..06c254f5c171 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
@@ -32,6 +32,15 @@ extern int mtk_v4l2_dbg_level;
extern bool mtk_vcodec_dbg;
+#define mtk_v4l2_err(fmt, args...) \
+ pr_err("[MTK_V4L2][ERROR] %s:%d: " fmt "\n", __func__, __LINE__, \
+ ##args)
+
+#define mtk_vcodec_err(h, fmt, args...) \
+ pr_err("[MTK_VCODEC][ERROR][%d]: %s() " fmt "\n", \
+ ((struct mtk_vcodec_ctx *)h->ctx)->id, __func__, ##args)
+
+
#if defined(DEBUG)
#define mtk_v4l2_debug(level, fmt, args...) \
@@ -41,11 +50,6 @@ extern bool mtk_vcodec_dbg;
level, __func__, __LINE__, ##args); \
} while (0)
-#define mtk_v4l2_err(fmt, args...) \
- pr_err("[MTK_V4L2][ERROR] %s:%d: " fmt "\n", __func__, __LINE__, \
- ##args)
-
-
#define mtk_v4l2_debug_enter() mtk_v4l2_debug(3, "+")
#define mtk_v4l2_debug_leave() mtk_v4l2_debug(3, "-")
@@ -57,22 +61,16 @@ extern bool mtk_vcodec_dbg;
__func__, ##args); \
} while (0)
-#define mtk_vcodec_err(h, fmt, args...) \
- pr_err("[MTK_VCODEC][ERROR][%d]: %s() " fmt "\n", \
- ((struct mtk_vcodec_ctx *)h->ctx)->id, __func__, ##args)
-
#define mtk_vcodec_debug_enter(h) mtk_vcodec_debug(h, "+")
#define mtk_vcodec_debug_leave(h) mtk_vcodec_debug(h, "-")
#else
#define mtk_v4l2_debug(level, fmt, args...) {}
-#define mtk_v4l2_err(fmt, args...) {}
#define mtk_v4l2_debug_enter() {}
#define mtk_v4l2_debug_leave() {}
#define mtk_vcodec_debug(h, fmt, args...) {}
-#define mtk_vcodec_err(h, fmt, args...) {}
#define mtk_vcodec_debug_enter(h) {}
#define mtk_vcodec_debug_leave(h) {}
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 0d984a28a003..9df64c189883 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -55,6 +55,7 @@
#include <linux/module.h>
#include <linux/omap-iommu.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/sched.h>
@@ -63,9 +64,9 @@
#include <asm/dma-iommu.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-fwnode.h>
#include <media/v4l2-device.h>
#include <media/v4l2-mc.h>
-#include <media/v4l2-of.h>
#include "isp.h"
#include "ispreg.h"
@@ -2007,20 +2008,20 @@ enum isp_of_phy {
ISP_OF_PHY_CSIPHY2,
};
-static int isp_of_parse_node(struct device *dev, struct device_node *node,
- struct isp_async_subdev *isd)
+static int isp_fwnode_parse(struct device *dev, struct fwnode_handle *fwnode,
+ struct isp_async_subdev *isd)
{
struct isp_bus_cfg *buscfg = &isd->bus;
- struct v4l2_of_endpoint vep;
+ struct v4l2_fwnode_endpoint vep;
unsigned int i;
int ret;
- ret = v4l2_of_parse_endpoint(node, &vep);
+ ret = v4l2_fwnode_endpoint_parse(fwnode, &vep);
if (ret)
return ret;
- dev_dbg(dev, "parsing endpoint %s, interface %u\n", node->full_name,
- vep.base.port);
+ dev_dbg(dev, "parsing endpoint %s, interface %u\n",
+ to_of_node(fwnode)->full_name, vep.base.port);
switch (vep.base.port) {
case ISP_OF_PHY_PARALLEL:
@@ -2077,18 +2078,18 @@ static int isp_of_parse_node(struct device *dev, struct device_node *node,
break;
default:
- dev_warn(dev, "%s: invalid interface %u\n", node->full_name,
- vep.base.port);
+ dev_warn(dev, "%s: invalid interface %u\n",
+ to_of_node(fwnode)->full_name, vep.base.port);
break;
}
return 0;
}
-static int isp_of_parse_nodes(struct device *dev,
- struct v4l2_async_notifier *notifier)
+static int isp_fwnodes_parse(struct device *dev,
+ struct v4l2_async_notifier *notifier)
{
- struct device_node *node = NULL;
+ struct fwnode_handle *fwnode = NULL;
notifier->subdevs = devm_kcalloc(
dev, ISP_MAX_SUBDEVS, sizeof(*notifier->subdevs), GFP_KERNEL);
@@ -2096,7 +2097,8 @@ static int isp_of_parse_nodes(struct device *dev,
return -ENOMEM;
while (notifier->num_subdevs < ISP_MAX_SUBDEVS &&
- (node = of_graph_get_next_endpoint(dev->of_node, node))) {
+ (fwnode = fwnode_graph_get_next_endpoint(
+ of_fwnode_handle(dev->of_node), fwnode))) {
struct isp_async_subdev *isd;
isd = devm_kzalloc(dev, sizeof(*isd), GFP_KERNEL);
@@ -2105,23 +2107,24 @@ static int isp_of_parse_nodes(struct device *dev,
notifier->subdevs[notifier->num_subdevs] = &isd->asd;
- if (isp_of_parse_node(dev, node, isd))
+ if (isp_fwnode_parse(dev, fwnode, isd))
goto error;
- isd->asd.match.of.node = of_graph_get_remote_port_parent(node);
- if (!isd->asd.match.of.node) {
+ isd->asd.match.fwnode.fwnode =
+ fwnode_graph_get_remote_port_parent(fwnode);
+ if (!isd->asd.match.fwnode.fwnode) {
dev_warn(dev, "bad remote port parent\n");
goto error;
}
- isd->asd.match_type = V4L2_ASYNC_MATCH_OF;
+ isd->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
notifier->num_subdevs++;
}
return notifier->num_subdevs;
error:
- of_node_put(node);
+ fwnode_handle_put(fwnode);
return -EINVAL;
}
@@ -2192,8 +2195,8 @@ static int isp_probe(struct platform_device *pdev)
return -ENOMEM;
}
- ret = of_property_read_u32(pdev->dev.of_node, "ti,phy-type",
- &isp->phy_type);
+ ret = fwnode_property_read_u32(of_fwnode_handle(pdev->dev.of_node),
+ "ti,phy-type", &isp->phy_type);
if (ret)
return ret;
@@ -2202,12 +2205,12 @@ static int isp_probe(struct platform_device *pdev)
if (IS_ERR(isp->syscon))
return PTR_ERR(isp->syscon);
- ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 1,
- &isp->syscon_offset);
+ ret = of_property_read_u32_index(pdev->dev.of_node,
+ "syscon", 1, &isp->syscon_offset);
if (ret)
return ret;
- ret = isp_of_parse_nodes(&pdev->dev, &isp->notifier);
+ ret = isp_fwnodes_parse(&pdev->dev, &isp->notifier);
if (ret < 0)
return ret;
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index 929006f65cc7..399095170b6e 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -25,6 +25,7 @@
#include <linux/mm.h>
#include <linux/moduleparam.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <linux/time.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
@@ -37,9 +38,11 @@
#include <media/v4l2-async.h>
#include <media/v4l2-clk.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
-#include <media/v4l2-of.h>
+#include <media/v4l2-fwnode.h>
#include <media/videobuf2-dma-sg.h>
@@ -345,6 +348,36 @@ static const struct pxa_mbus_lookup mbus_fmt[] = {
.layout = PXA_MBUS_LAYOUT_PACKED,
},
}, {
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .name = "Bayer 8 GBRG",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_NONE,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .name = "Bayer 8 GRBG",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_NONE,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .fmt = {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .name = "Bayer 8 RGGB",
+ .bits_per_sample = 8,
+ .packing = PXA_MBUS_PACKING_NONE,
+ .order = PXA_MBUS_ORDER_LE,
+ .layout = PXA_MBUS_LAYOUT_PACKED,
+ },
+}, {
.code = MEDIA_BUS_FMT_SBGGR10_1X10,
.fmt = {
.fourcc = V4L2_PIX_FMT_SBGGR10,
@@ -445,16 +478,6 @@ static const struct pxa_mbus_lookup mbus_fmt[] = {
.layout = PXA_MBUS_LAYOUT_PACKED,
},
}, {
- .code = MEDIA_BUS_FMT_SGRBG8_1X8,
- .fmt = {
- .fourcc = V4L2_PIX_FMT_SGRBG8,
- .name = "Bayer 8 GRBG",
- .bits_per_sample = 8,
- .packing = PXA_MBUS_PACKING_NONE,
- .order = PXA_MBUS_ORDER_LE,
- .layout = PXA_MBUS_LAYOUT_PACKED,
- },
-}, {
.code = MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
.fmt = {
.fourcc = V4L2_PIX_FMT_SGRBG10DPCM8,
@@ -555,6 +578,9 @@ static s32 pxa_mbus_bytes_per_line(u32 width, const struct pxa_mbus_pixelfmt *mf
static s32 pxa_mbus_image_size(const struct pxa_mbus_pixelfmt *mf,
u32 bytes_per_line, u32 height)
{
+ if (mf->layout == PXA_MBUS_LAYOUT_PACKED)
+ return bytes_per_line * height;
+
switch (mf->packing) {
case PXA_MBUS_PACKING_2X8_PADHI:
return bytes_per_line * height * 2;
@@ -1099,7 +1125,7 @@ static u32 mclk_get_divisor(struct platform_device *pdev,
/* mclk <= ciclk / 4 (27.4.2) */
if (mclk > lcdclk / 4) {
mclk = lcdclk / 4;
- dev_warn(pcdev_to_dev(pcdev),
+ dev_warn(&pdev->dev,
"Limiting master clock to %lu\n", mclk);
}
@@ -1110,7 +1136,7 @@ static u32 mclk_get_divisor(struct platform_device *pdev,
if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN)
pcdev->mclk = lcdclk / (2 * (div + 1));
- dev_dbg(pcdev_to_dev(pcdev), "LCD clock %luHz, target freq %luHz, divisor %u\n",
+ dev_dbg(&pdev->dev, "LCD clock %luHz, target freq %luHz, divisor %u\n",
lcdclk, mclk, div);
return div;
@@ -1291,6 +1317,7 @@ static void pxa_camera_setup_cicr(struct pxa_camera_dev *pcdev,
* transformation. Note that UYVY is the only format that
* should be used if pxa framebuffer Overlay2 is used.
*/
+ /* fall through */
case V4L2_PIX_FMT_UYVY:
case V4L2_PIX_FMT_VYUY:
case V4L2_PIX_FMT_YUYV:
@@ -2066,6 +2093,8 @@ static const struct v4l2_ioctl_ops pxa_camera_ioctl_ops = {
.vidioc_g_register = pxac_vidioc_g_register,
.vidioc_s_register = pxac_vidioc_s_register,
#endif
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static struct v4l2_clk_ops pxa_camera_mclk_ops = {
@@ -2177,6 +2206,12 @@ static void pxa_camera_sensor_unbind(struct v4l2_async_notifier *notifier,
pxa_dma_stop_channels(pcdev);
pxa_camera_destroy_formats(pcdev);
+
+ if (pcdev->mclk_clk) {
+ v4l2_clk_unregister(pcdev->mclk_clk);
+ pcdev->mclk_clk = NULL;
+ }
+
video_unregister_device(&pcdev->vdev);
pcdev->sensor = NULL;
@@ -2236,7 +2271,7 @@ static int pxa_camera_pdata_from_dt(struct device *dev,
{
u32 mclk_rate;
struct device_node *remote, *np = dev->of_node;
- struct v4l2_of_endpoint ep;
+ struct v4l2_fwnode_endpoint ep;
int err = of_property_read_u32(np, "clock-frequency",
&mclk_rate);
if (!err) {
@@ -2250,7 +2285,7 @@ static int pxa_camera_pdata_from_dt(struct device *dev,
return -EINVAL;
}
- err = v4l2_of_parse_endpoint(np, &ep);
+ err = v4l2_fwnode_endpoint_parse(of_fwnode_handle(np), &ep);
if (err) {
dev_err(dev, "could not parse endpoint\n");
goto out;
@@ -2287,10 +2322,10 @@ static int pxa_camera_pdata_from_dt(struct device *dev,
if (ep.bus.parallel.flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
pcdev->platform_flags |= PXA_CAMERA_PCLK_EN;
- asd->match_type = V4L2_ASYNC_MATCH_OF;
+ asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
remote = of_graph_get_remote_port(np);
if (remote) {
- asd->match.of.node = remote;
+ asd->match.fwnode.fwnode = of_fwnode_handle(remote);
of_node_put(remote);
} else {
dev_notice(dev, "no remote for %s\n", of_node_full_name(np));
@@ -2501,7 +2536,13 @@ static int pxa_camera_remove(struct platform_device *pdev)
dma_release_channel(pcdev->dma_chans[1]);
dma_release_channel(pcdev->dma_chans[2]);
- v4l2_clk_unregister(pcdev->mclk_clk);
+ v4l2_async_notifier_unregister(&pcdev->notifier);
+
+ if (pcdev->mclk_clk) {
+ v4l2_clk_unregister(pcdev->mclk_clk);
+ pcdev->mclk_clk = NULL;
+ }
+
v4l2_device_unregister(&pcdev->v4l2_dev);
dev_info(&pdev->dev, "PXA Camera driver unloaded\n");
diff --git a/drivers/media/platform/qcom/venus/Makefile b/drivers/media/platform/qcom/venus/Makefile
new file mode 100644
index 000000000000..0fe9afb83697
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/Makefile
@@ -0,0 +1,11 @@
+# Makefile for Qualcomm Venus driver
+
+venus-core-objs += core.o helpers.o firmware.o \
+ hfi_venus.o hfi_msgs.o hfi_cmds.o hfi.o
+
+venus-dec-objs += vdec.o vdec_ctrls.o
+venus-enc-objs += venc.o venc_ctrls.o
+
+obj-$(CONFIG_VIDEO_QCOM_VENUS) += venus-core.o
+obj-$(CONFIG_VIDEO_QCOM_VENUS) += venus-dec.o
+obj-$(CONFIG_VIDEO_QCOM_VENUS) += venus-enc.o
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
new file mode 100644
index 000000000000..776d2bae6979
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -0,0 +1,390 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pm_runtime.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-ioctl.h>
+
+#include "core.h"
+#include "vdec.h"
+#include "venc.h"
+#include "firmware.h"
+
+static void venus_event_notify(struct venus_core *core, u32 event)
+{
+ struct venus_inst *inst;
+
+ switch (event) {
+ case EVT_SYS_WATCHDOG_TIMEOUT:
+ case EVT_SYS_ERROR:
+ break;
+ default:
+ return;
+ }
+
+ mutex_lock(&core->lock);
+ core->sys_error = true;
+ list_for_each_entry(inst, &core->instances, list)
+ inst->ops->event_notify(inst, EVT_SESSION_ERROR, NULL);
+ mutex_unlock(&core->lock);
+
+ disable_irq_nosync(core->irq);
+
+ /*
+ * Delay recovery to ensure venus has completed any pending cache
+ * operations. Without this sleep, we see device reset when firmware is
+ * unloaded after a system error.
+ */
+ schedule_delayed_work(&core->work, msecs_to_jiffies(100));
+}
+
+static const struct hfi_core_ops venus_core_ops = {
+ .event_notify = venus_event_notify,
+};
+
+static void venus_sys_error_handler(struct work_struct *work)
+{
+ struct venus_core *core =
+ container_of(work, struct venus_core, work.work);
+ int ret = 0;
+
+ dev_warn(core->dev, "system error has occurred, starting recovery!\n");
+
+ pm_runtime_get_sync(core->dev);
+
+ hfi_core_deinit(core, true);
+ hfi_destroy(core);
+ mutex_lock(&core->lock);
+ venus_shutdown(&core->dev_fw);
+
+ pm_runtime_put_sync(core->dev);
+
+ ret |= hfi_create(core, &venus_core_ops);
+
+ pm_runtime_get_sync(core->dev);
+
+ ret |= venus_boot(core->dev, &core->dev_fw, core->res->fwname);
+
+ ret |= hfi_core_resume(core, true);
+
+ enable_irq(core->irq);
+
+ mutex_unlock(&core->lock);
+
+ ret |= hfi_core_init(core);
+
+ pm_runtime_put_sync(core->dev);
+
+ if (ret) {
+ disable_irq_nosync(core->irq);
+ dev_warn(core->dev, "recovery failed (%d)\n", ret);
+ schedule_delayed_work(&core->work, msecs_to_jiffies(10));
+ return;
+ }
+
+ mutex_lock(&core->lock);
+ core->sys_error = false;
+ mutex_unlock(&core->lock);
+}
+
+static int venus_clks_get(struct venus_core *core)
+{
+ const struct venus_resources *res = core->res;
+ struct device *dev = core->dev;
+ unsigned int i;
+
+ for (i = 0; i < res->clks_num; i++) {
+ core->clks[i] = devm_clk_get(dev, res->clks[i]);
+ if (IS_ERR(core->clks[i]))
+ return PTR_ERR(core->clks[i]);
+ }
+
+ return 0;
+}
+
+static int venus_clks_enable(struct venus_core *core)
+{
+ const struct venus_resources *res = core->res;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < res->clks_num; i++) {
+ ret = clk_prepare_enable(core->clks[i]);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ while (--i)
+ clk_disable_unprepare(core->clks[i]);
+
+ return ret;
+}
+
+static void venus_clks_disable(struct venus_core *core)
+{
+ const struct venus_resources *res = core->res;
+ unsigned int i = res->clks_num;
+
+ while (i--)
+ clk_disable_unprepare(core->clks[i]);
+}
+
+static int venus_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct venus_core *core;
+ struct resource *r;
+ int ret;
+
+ core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
+ if (!core)
+ return -ENOMEM;
+
+ core->dev = dev;
+ platform_set_drvdata(pdev, core);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ core->base = devm_ioremap_resource(dev, r);
+ if (IS_ERR(core->base))
+ return PTR_ERR(core->base);
+
+ core->irq = platform_get_irq(pdev, 0);
+ if (core->irq < 0)
+ return core->irq;
+
+ core->res = of_device_get_match_data(dev);
+ if (!core->res)
+ return -ENODEV;
+
+ ret = venus_clks_get(core);
+ if (ret)
+ return ret;
+
+ ret = dma_set_mask_and_coherent(dev, core->res->dma_mask);
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&core->instances);
+ mutex_init(&core->lock);
+ INIT_DELAYED_WORK(&core->work, venus_sys_error_handler);
+
+ ret = devm_request_threaded_irq(dev, core->irq, hfi_isr, hfi_isr_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "venus", core);
+ if (ret)
+ return ret;
+
+ ret = hfi_create(core, &venus_core_ops);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(dev);
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ goto err_runtime_disable;
+
+ ret = venus_boot(dev, &core->dev_fw, core->res->fwname);
+ if (ret)
+ goto err_runtime_disable;
+
+ ret = hfi_core_resume(core, true);
+ if (ret)
+ goto err_venus_shutdown;
+
+ ret = hfi_core_init(core);
+ if (ret)
+ goto err_venus_shutdown;
+
+ ret = v4l2_device_register(dev, &core->v4l2_dev);
+ if (ret)
+ goto err_core_deinit;
+
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret)
+ goto err_dev_unregister;
+
+ ret = pm_runtime_put_sync(dev);
+ if (ret)
+ goto err_dev_unregister;
+
+ return 0;
+
+err_dev_unregister:
+ v4l2_device_unregister(&core->v4l2_dev);
+err_core_deinit:
+ hfi_core_deinit(core, false);
+err_venus_shutdown:
+ venus_shutdown(&core->dev_fw);
+err_runtime_disable:
+ pm_runtime_set_suspended(dev);
+ pm_runtime_disable(dev);
+ hfi_destroy(core);
+ return ret;
+}
+
+static int venus_remove(struct platform_device *pdev)
+{
+ struct venus_core *core = platform_get_drvdata(pdev);
+ struct device *dev = core->dev;
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ WARN_ON(ret < 0);
+
+ ret = hfi_core_deinit(core, true);
+ WARN_ON(ret);
+
+ hfi_destroy(core);
+ venus_shutdown(&core->dev_fw);
+ of_platform_depopulate(dev);
+
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
+ v4l2_device_unregister(&core->v4l2_dev);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int venus_runtime_suspend(struct device *dev)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+ int ret;
+
+ ret = hfi_core_suspend(core);
+
+ venus_clks_disable(core);
+
+ return ret;
+}
+
+static int venus_runtime_resume(struct device *dev)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+ int ret;
+
+ ret = venus_clks_enable(core);
+ if (ret)
+ return ret;
+
+ ret = hfi_core_resume(core, false);
+ if (ret)
+ goto err_clks_disable;
+
+ return 0;
+
+err_clks_disable:
+ venus_clks_disable(core);
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops venus_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(venus_runtime_suspend, venus_runtime_resume, NULL)
+};
+
+static const struct freq_tbl msm8916_freq_table[] = {
+ { 352800, 228570000 }, /* 1920x1088 @ 30 + 1280x720 @ 30 */
+ { 244800, 160000000 }, /* 1920x1088 @ 30 */
+ { 108000, 100000000 }, /* 1280x720 @ 30 */
+};
+
+static const struct reg_val msm8916_reg_preset[] = {
+ { 0xe0020, 0x05555556 },
+ { 0xe0024, 0x05555556 },
+ { 0x80124, 0x00000003 },
+};
+
+static const struct venus_resources msm8916_res = {
+ .freq_tbl = msm8916_freq_table,
+ .freq_tbl_size = ARRAY_SIZE(msm8916_freq_table),
+ .reg_tbl = msm8916_reg_preset,
+ .reg_tbl_size = ARRAY_SIZE(msm8916_reg_preset),
+ .clks = { "core", "iface", "bus", },
+ .clks_num = 3,
+ .max_load = 352800, /* 720p@30 + 1080p@30 */
+ .hfi_version = HFI_VERSION_1XX,
+ .vmem_id = VIDC_RESOURCE_NONE,
+ .vmem_size = 0,
+ .vmem_addr = 0,
+ .dma_mask = 0xddc00000 - 1,
+ .fwname = "qcom/venus-1.8/venus.mdt",
+};
+
+static const struct freq_tbl msm8996_freq_table[] = {
+ { 1944000, 490000000 }, /* 4k UHD @ 60 */
+ { 972000, 320000000 }, /* 4k UHD @ 30 */
+ { 489600, 150000000 }, /* 1080p @ 60 */
+ { 244800, 75000000 }, /* 1080p @ 30 */
+};
+
+static const struct reg_val msm8996_reg_preset[] = {
+ { 0x80010, 0xffffffff },
+ { 0x80018, 0x00001556 },
+ { 0x8001C, 0x00001556 },
+};
+
+static const struct venus_resources msm8996_res = {
+ .freq_tbl = msm8996_freq_table,
+ .freq_tbl_size = ARRAY_SIZE(msm8996_freq_table),
+ .reg_tbl = msm8996_reg_preset,
+ .reg_tbl_size = ARRAY_SIZE(msm8996_reg_preset),
+ .clks = {"core", "iface", "bus", "mbus" },
+ .clks_num = 4,
+ .max_load = 2563200,
+ .hfi_version = HFI_VERSION_3XX,
+ .vmem_id = VIDC_RESOURCE_NONE,
+ .vmem_size = 0,
+ .vmem_addr = 0,
+ .dma_mask = 0xddc00000 - 1,
+ .fwname = "qcom/venus-4.2/venus.mdt",
+};
+
+static const struct of_device_id venus_dt_match[] = {
+ { .compatible = "qcom,msm8916-venus", .data = &msm8916_res, },
+ { .compatible = "qcom,msm8996-venus", .data = &msm8996_res, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, venus_dt_match);
+
+static struct platform_driver qcom_venus_driver = {
+ .probe = venus_probe,
+ .remove = venus_remove,
+ .driver = {
+ .name = "qcom-venus",
+ .of_match_table = venus_dt_match,
+ .pm = &venus_pm_ops,
+ },
+};
+module_platform_driver(qcom_venus_driver);
+
+MODULE_ALIAS("platform:qcom-venus");
+MODULE_DESCRIPTION("Qualcomm Venus video encoder and decoder driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
new file mode 100644
index 000000000000..e542700eee32
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -0,0 +1,324 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VENUS_CORE_H_
+#define __VENUS_CORE_H_
+
+#include <linux/list.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+
+#include "hfi.h"
+
+#define VIDC_CLKS_NUM_MAX 4
+
+struct freq_tbl {
+ unsigned int load;
+ unsigned long freq;
+};
+
+struct reg_val {
+ u32 reg;
+ u32 value;
+};
+
+struct venus_resources {
+ u64 dma_mask;
+ const struct freq_tbl *freq_tbl;
+ unsigned int freq_tbl_size;
+ const struct reg_val *reg_tbl;
+ unsigned int reg_tbl_size;
+ const char * const clks[VIDC_CLKS_NUM_MAX];
+ unsigned int clks_num;
+ enum hfi_version hfi_version;
+ u32 max_load;
+ unsigned int vmem_id;
+ u32 vmem_size;
+ u32 vmem_addr;
+ const char *fwname;
+};
+
+struct venus_format {
+ u32 pixfmt;
+ unsigned int num_planes;
+ u32 type;
+};
+
+/**
+ * struct venus_core - holds core parameters valid for all instances
+ *
+ * @base: IO memory base address
+ * @irq: Venus irq
+ * @clks: an array of struct clk pointers
+ * @core0_clk: a struct clk pointer for core0
+ * @core1_clk: a struct clk pointer for core1
+ * @vdev_dec: a reference to video device structure for decoder instances
+ * @vdev_enc: a reference to video device structure for encoder instances
+ * @v4l2_dev: a holder for v4l2 device structure
+ * @res: a reference to venus resources structure
+ * @dev: convenience struct device pointer
+ * @dev_dec: convenience struct device pointer for decoder device
+ * @dev_enc: convenience struct device pointer for encoder device
+ * @lock: a lock for this strucure
+ * @instances: a list_head of all instances
+ * @insts_count: num of instances
+ * @state: the state of the venus core
+ * @done: a completion for sync HFI operations
+ * @error: an error returned during last HFI sync operations
+ * @sys_error: an error flag that signal system error event
+ * @core_ops: the core operations
+ * @enc_codecs: encoders supported by this core
+ * @dec_codecs: decoders supported by this core
+ * @max_sessions_supported: holds the maximum number of sessions
+ * @core_caps: core capabilities
+ * @priv: a private filed for HFI operations
+ * @ops: the core HFI operations
+ * @work: a delayed work for handling system fatal error
+ */
+struct venus_core {
+ void __iomem *base;
+ int irq;
+ struct clk *clks[VIDC_CLKS_NUM_MAX];
+ struct clk *core0_clk;
+ struct clk *core1_clk;
+ struct video_device *vdev_dec;
+ struct video_device *vdev_enc;
+ struct v4l2_device v4l2_dev;
+ const struct venus_resources *res;
+ struct device *dev;
+ struct device *dev_dec;
+ struct device *dev_enc;
+ struct device dev_fw;
+ struct mutex lock;
+ struct list_head instances;
+ atomic_t insts_count;
+ unsigned int state;
+ struct completion done;
+ unsigned int error;
+ bool sys_error;
+ const struct hfi_core_ops *core_ops;
+ u32 enc_codecs;
+ u32 dec_codecs;
+ unsigned int max_sessions_supported;
+#define ENC_ROTATION_CAPABILITY 0x1
+#define ENC_SCALING_CAPABILITY 0x2
+#define ENC_DEINTERLACE_CAPABILITY 0x4
+#define DEC_MULTI_STREAM_CAPABILITY 0x8
+ unsigned int core_caps;
+ void *priv;
+ const struct hfi_ops *ops;
+ struct delayed_work work;
+};
+
+struct vdec_controls {
+ u32 post_loop_deb_mode;
+ u32 profile;
+ u32 level;
+};
+
+struct venc_controls {
+ u16 gop_size;
+ u32 num_p_frames;
+ u32 num_b_frames;
+ u32 bitrate_mode;
+ u32 bitrate;
+ u32 bitrate_peak;
+
+ u32 h264_i_period;
+ u32 h264_entropy_mode;
+ u32 h264_i_qp;
+ u32 h264_p_qp;
+ u32 h264_b_qp;
+ u32 h264_min_qp;
+ u32 h264_max_qp;
+ u32 h264_loop_filter_mode;
+ u32 h264_loop_filter_alpha;
+ u32 h264_loop_filter_beta;
+
+ u32 vp8_min_qp;
+ u32 vp8_max_qp;
+
+ u32 multi_slice_mode;
+ u32 multi_slice_max_bytes;
+ u32 multi_slice_max_mb;
+
+ u32 header_mode;
+
+ struct {
+ u32 mpeg4;
+ u32 h264;
+ u32 vpx;
+ } profile;
+ struct {
+ u32 mpeg4;
+ u32 h264;
+ } level;
+};
+
+struct venus_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+ dma_addr_t dma_addr;
+ u32 size;
+ struct list_head reg_list;
+ u32 flags;
+ struct list_head ref_list;
+};
+
+#define to_venus_buffer(ptr) container_of(ptr, struct venus_buffer, vb)
+
+/**
+ * struct venus_inst - holds per instance paramerters
+ *
+ * @list: used for attach an instance to the core
+ * @lock: instance lock
+ * @core: a reference to the core struct
+ * @internalbufs: a list of internal bufferes
+ * @registeredbufs: a list of registered capture bufferes
+ * @delayed_process a list of delayed buffers
+ * @delayed_process_work: a work_struct for process delayed buffers
+ * @ctrl_handler: v4l control handler
+ * @controls: a union of decoder and encoder control parameters
+ * @fh: a holder of v4l file handle structure
+ * @streamon_cap: stream on flag for capture queue
+ * @streamon_out: stream on flag for output queue
+ * @cmd_stop: a flag to signal encoder/decoder commands
+ * @width: current capture width
+ * @height: current capture height
+ * @out_width: current output width
+ * @out_height: current output height
+ * @colorspace: current color space
+ * @quantization: current quantization
+ * @xfer_func: current xfer function
+ * @fps: holds current FPS
+ * @timeperframe: holds current time per frame structure
+ * @fmt_out: a reference to output format structure
+ * @fmt_cap: a reference to capture format structure
+ * @num_input_bufs: holds number of input buffers
+ * @num_output_bufs: holds number of output buffers
+ * @input_buf_size holds input buffer size
+ * @output_buf_size: holds output buffer size
+ * @reconfig: a flag raised by decoder when the stream resolution changed
+ * @reconfig_width: holds the new width
+ * @reconfig_height: holds the new height
+ * @sequence_cap: a sequence counter for capture queue
+ * @sequence_out: a sequence counter for output queue
+ * @m2m_dev: a reference to m2m device structure
+ * @m2m_ctx: a reference to m2m context structure
+ * @state: current state of the instance
+ * @done: a completion for sync HFI operation
+ * @error: an error returned during last HFI sync operation
+ * @session_error: a flag rised by HFI interface in case of session error
+ * @ops: HFI operations
+ * @priv: a private for HFI operations callbacks
+ * @session_type: the type of the session (decoder or encoder)
+ * @hprop: a union used as a holder by get property
+ * @cap_width: width capability
+ * @cap_height: height capability
+ * @cap_mbs_per_frame: macroblocks per frame capability
+ * @cap_mbs_per_sec: macroblocks per second capability
+ * @cap_framerate: framerate capability
+ * @cap_scale_x: horizontal scaling capability
+ * @cap_scale_y: vertical scaling capability
+ * @cap_bitrate: bitrate capability
+ * @cap_hier_p: hier capability
+ * @cap_ltr_count: LTR count capability
+ * @cap_secure_output2_threshold: secure OUTPUT2 threshold capability
+ * @cap_bufs_mode_static: buffers allocation mode capability
+ * @cap_bufs_mode_dynamic: buffers allocation mode capability
+ * @pl_count: count of supported profiles/levels
+ * @pl: supported profiles/levels
+ * @bufreq: holds buffer requirements
+ */
+struct venus_inst {
+ struct list_head list;
+ struct mutex lock;
+ struct venus_core *core;
+ struct list_head internalbufs;
+ struct list_head registeredbufs;
+ struct list_head delayed_process;
+ struct work_struct delayed_process_work;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ union {
+ struct vdec_controls dec;
+ struct venc_controls enc;
+ } controls;
+ struct v4l2_fh fh;
+ unsigned int streamon_cap, streamon_out;
+ bool cmd_stop;
+ u32 width;
+ u32 height;
+ u32 out_width;
+ u32 out_height;
+ u32 colorspace;
+ u8 ycbcr_enc;
+ u8 quantization;
+ u8 xfer_func;
+ u64 fps;
+ struct v4l2_fract timeperframe;
+ const struct venus_format *fmt_out;
+ const struct venus_format *fmt_cap;
+ unsigned int num_input_bufs;
+ unsigned int num_output_bufs;
+ unsigned int input_buf_size;
+ unsigned int output_buf_size;
+ bool reconfig;
+ u32 reconfig_width;
+ u32 reconfig_height;
+ u32 sequence_cap;
+ u32 sequence_out;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ unsigned int state;
+ struct completion done;
+ unsigned int error;
+ bool session_error;
+ const struct hfi_inst_ops *ops;
+ u32 session_type;
+ union hfi_get_property hprop;
+ struct hfi_capability cap_width;
+ struct hfi_capability cap_height;
+ struct hfi_capability cap_mbs_per_frame;
+ struct hfi_capability cap_mbs_per_sec;
+ struct hfi_capability cap_framerate;
+ struct hfi_capability cap_scale_x;
+ struct hfi_capability cap_scale_y;
+ struct hfi_capability cap_bitrate;
+ struct hfi_capability cap_hier_p;
+ struct hfi_capability cap_ltr_count;
+ struct hfi_capability cap_secure_output2_threshold;
+ bool cap_bufs_mode_static;
+ bool cap_bufs_mode_dynamic;
+ unsigned int pl_count;
+ struct hfi_profile_level pl[HFI_MAX_PROFILE_COUNT];
+ struct hfi_buffer_requirements bufreq[HFI_BUFFER_TYPE_MAX];
+};
+
+#define ctrl_to_inst(ctrl) \
+ container_of((ctrl)->handler, struct venus_inst, ctrl_handler)
+
+static inline struct venus_inst *to_inst(struct file *filp)
+{
+ return container_of(filp->private_data, struct venus_inst, fh);
+}
+
+static inline void *to_hfi_priv(struct venus_core *core)
+{
+ return core->priv;
+}
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
new file mode 100644
index 000000000000..1b1a4f355918
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/firmware.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/slab.h>
+#include <linux/qcom_scm.h>
+#include <linux/soc/qcom/mdt_loader.h>
+
+#include "firmware.h"
+
+#define VENUS_PAS_ID 9
+#define VENUS_FW_MEM_SIZE SZ_8M
+
+static void device_release_dummy(struct device *dev)
+{
+ of_reserved_mem_device_release(dev);
+}
+
+int venus_boot(struct device *parent, struct device *fw_dev, const char *fwname)
+{
+ const struct firmware *mdt;
+ phys_addr_t mem_phys;
+ ssize_t fw_size;
+ size_t mem_size;
+ void *mem_va;
+ int ret;
+
+ if (!qcom_scm_is_available())
+ return -EPROBE_DEFER;
+
+ fw_dev->parent = parent;
+ fw_dev->release = device_release_dummy;
+
+ ret = dev_set_name(fw_dev, "%s:%s", dev_name(parent), "firmware");
+ if (ret)
+ return ret;
+
+ ret = device_register(fw_dev);
+ if (ret < 0)
+ return ret;
+
+ ret = of_reserved_mem_device_init_by_idx(fw_dev, parent->of_node, 0);
+ if (ret)
+ goto err_unreg_device;
+
+ mem_size = VENUS_FW_MEM_SIZE;
+
+ mem_va = dmam_alloc_coherent(fw_dev, mem_size, &mem_phys, GFP_KERNEL);
+ if (!mem_va) {
+ ret = -ENOMEM;
+ goto err_unreg_device;
+ }
+
+ ret = request_firmware(&mdt, fwname, fw_dev);
+ if (ret < 0)
+ goto err_unreg_device;
+
+ fw_size = qcom_mdt_get_size(mdt);
+ if (fw_size < 0) {
+ ret = fw_size;
+ release_firmware(mdt);
+ goto err_unreg_device;
+ }
+
+ ret = qcom_mdt_load(fw_dev, mdt, fwname, VENUS_PAS_ID, mem_va, mem_phys,
+ mem_size);
+
+ release_firmware(mdt);
+
+ if (ret)
+ goto err_unreg_device;
+
+ ret = qcom_scm_pas_auth_and_reset(VENUS_PAS_ID);
+ if (ret)
+ goto err_unreg_device;
+
+ return 0;
+
+err_unreg_device:
+ device_unregister(fw_dev);
+ return ret;
+}
+
+int venus_shutdown(struct device *fw_dev)
+{
+ int ret;
+
+ ret = qcom_scm_pas_shutdown(VENUS_PAS_ID);
+ device_unregister(fw_dev);
+ memset(fw_dev, 0, sizeof(*fw_dev));
+
+ return ret;
+}
diff --git a/drivers/media/platform/qcom/venus/firmware.h b/drivers/media/platform/qcom/venus/firmware.h
new file mode 100644
index 000000000000..f81a98979798
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/firmware.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __VENUS_FIRMWARE_H__
+#define __VENUS_FIRMWARE_H__
+
+struct device;
+
+int venus_boot(struct device *parent, struct device *fw_dev,
+ const char *fwname);
+int venus_shutdown(struct device *fw_dev);
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
new file mode 100644
index 000000000000..5f4434c0a8f1
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -0,0 +1,725 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <media/videobuf2-dma-sg.h>
+#include <media/v4l2-mem2mem.h>
+#include <asm/div64.h>
+
+#include "core.h"
+#include "helpers.h"
+#include "hfi_helper.h"
+
+struct intbuf {
+ struct list_head list;
+ u32 type;
+ size_t size;
+ void *va;
+ dma_addr_t da;
+ unsigned long attrs;
+};
+
+static int intbufs_set_buffer(struct venus_inst *inst, u32 type)
+{
+ struct venus_core *core = inst->core;
+ struct device *dev = core->dev;
+ struct hfi_buffer_requirements bufreq;
+ struct hfi_buffer_desc bd;
+ struct intbuf *buf;
+ unsigned int i;
+ int ret;
+
+ ret = venus_helper_get_bufreq(inst, type, &bufreq);
+ if (ret)
+ return 0;
+
+ if (!bufreq.size)
+ return 0;
+
+ for (i = 0; i < bufreq.count_actual; i++) {
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ buf->type = bufreq.type;
+ buf->size = bufreq.size;
+ buf->attrs = DMA_ATTR_WRITE_COMBINE |
+ DMA_ATTR_NO_KERNEL_MAPPING;
+ buf->va = dma_alloc_attrs(dev, buf->size, &buf->da, GFP_KERNEL,
+ buf->attrs);
+ if (!buf->va) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ memset(&bd, 0, sizeof(bd));
+ bd.buffer_size = buf->size;
+ bd.buffer_type = buf->type;
+ bd.num_buffers = 1;
+ bd.device_addr = buf->da;
+
+ ret = hfi_session_set_buffers(inst, &bd);
+ if (ret) {
+ dev_err(dev, "set session buffers failed\n");
+ goto dma_free;
+ }
+
+ list_add_tail(&buf->list, &inst->internalbufs);
+ }
+
+ return 0;
+
+dma_free:
+ dma_free_attrs(dev, buf->size, buf->va, buf->da, buf->attrs);
+fail:
+ kfree(buf);
+ return ret;
+}
+
+static int intbufs_unset_buffers(struct venus_inst *inst)
+{
+ struct hfi_buffer_desc bd = {0};
+ struct intbuf *buf, *n;
+ int ret = 0;
+
+ list_for_each_entry_safe(buf, n, &inst->internalbufs, list) {
+ bd.buffer_size = buf->size;
+ bd.buffer_type = buf->type;
+ bd.num_buffers = 1;
+ bd.device_addr = buf->da;
+ bd.response_required = true;
+
+ ret = hfi_session_unset_buffers(inst, &bd);
+
+ list_del_init(&buf->list);
+ dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da,
+ buf->attrs);
+ kfree(buf);
+ }
+
+ return ret;
+}
+
+static const unsigned int intbuf_types[] = {
+ HFI_BUFFER_INTERNAL_SCRATCH,
+ HFI_BUFFER_INTERNAL_SCRATCH_1,
+ HFI_BUFFER_INTERNAL_SCRATCH_2,
+ HFI_BUFFER_INTERNAL_PERSIST,
+ HFI_BUFFER_INTERNAL_PERSIST_1,
+};
+
+static int intbufs_alloc(struct venus_inst *inst)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(intbuf_types); i++) {
+ ret = intbufs_set_buffer(inst, intbuf_types[i]);
+ if (ret)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ intbufs_unset_buffers(inst);
+ return ret;
+}
+
+static int intbufs_free(struct venus_inst *inst)
+{
+ return intbufs_unset_buffers(inst);
+}
+
+static u32 load_per_instance(struct venus_inst *inst)
+{
+ u32 mbs;
+
+ if (!inst || !(inst->state >= INST_INIT && inst->state < INST_STOP))
+ return 0;
+
+ mbs = (ALIGN(inst->width, 16) / 16) * (ALIGN(inst->height, 16) / 16);
+
+ return mbs * inst->fps;
+}
+
+static u32 load_per_type(struct venus_core *core, u32 session_type)
+{
+ struct venus_inst *inst = NULL;
+ u32 mbs_per_sec = 0;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(inst, &core->instances, list) {
+ if (inst->session_type != session_type)
+ continue;
+
+ mbs_per_sec += load_per_instance(inst);
+ }
+ mutex_unlock(&core->lock);
+
+ return mbs_per_sec;
+}
+
+static int load_scale_clocks(struct venus_core *core)
+{
+ const struct freq_tbl *table = core->res->freq_tbl;
+ unsigned int num_rows = core->res->freq_tbl_size;
+ unsigned long freq = table[0].freq;
+ struct clk *clk = core->clks[0];
+ struct device *dev = core->dev;
+ u32 mbs_per_sec;
+ unsigned int i;
+ int ret;
+
+ mbs_per_sec = load_per_type(core, VIDC_SESSION_TYPE_ENC) +
+ load_per_type(core, VIDC_SESSION_TYPE_DEC);
+
+ if (mbs_per_sec > core->res->max_load)
+ dev_warn(dev, "HW is overloaded, needed: %d max: %d\n",
+ mbs_per_sec, core->res->max_load);
+
+ if (!mbs_per_sec && num_rows > 1) {
+ freq = table[num_rows - 1].freq;
+ goto set_freq;
+ }
+
+ for (i = 0; i < num_rows; i++) {
+ if (mbs_per_sec > table[i].load)
+ break;
+ freq = table[i].freq;
+ }
+
+set_freq:
+
+ if (core->res->hfi_version == HFI_VERSION_3XX) {
+ ret = clk_set_rate(clk, freq);
+ ret |= clk_set_rate(core->core0_clk, freq);
+ ret |= clk_set_rate(core->core1_clk, freq);
+ } else {
+ ret = clk_set_rate(clk, freq);
+ }
+
+ if (ret) {
+ dev_err(dev, "failed to set clock rate %lu (%d)\n", freq, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void fill_buffer_desc(const struct venus_buffer *buf,
+ struct hfi_buffer_desc *bd, bool response)
+{
+ memset(bd, 0, sizeof(*bd));
+ bd->buffer_type = HFI_BUFFER_OUTPUT;
+ bd->buffer_size = buf->size;
+ bd->num_buffers = 1;
+ bd->device_addr = buf->dma_addr;
+ bd->response_required = response;
+}
+
+static void return_buf_error(struct venus_inst *inst,
+ struct vb2_v4l2_buffer *vbuf)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+
+ if (vbuf->vb2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ v4l2_m2m_src_buf_remove_by_buf(m2m_ctx, vbuf);
+ else
+ v4l2_m2m_src_buf_remove_by_buf(m2m_ctx, vbuf);
+
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+}
+
+static int
+session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
+{
+ struct venus_buffer *buf = to_venus_buffer(vbuf);
+ struct vb2_buffer *vb = &vbuf->vb2_buf;
+ unsigned int type = vb->type;
+ struct hfi_frame_data fdata;
+ int ret;
+
+ memset(&fdata, 0, sizeof(fdata));
+ fdata.alloc_len = buf->size;
+ fdata.device_addr = buf->dma_addr;
+ fdata.timestamp = vb->timestamp;
+ do_div(fdata.timestamp, NSEC_PER_USEC);
+ fdata.flags = 0;
+ fdata.clnt_data = vbuf->vb2_buf.index;
+
+ if (!fdata.timestamp)
+ fdata.flags |= HFI_BUFFERFLAG_TIMESTAMPINVALID;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ fdata.buffer_type = HFI_BUFFER_INPUT;
+ fdata.filled_len = vb2_get_plane_payload(vb, 0);
+ fdata.offset = vb->planes[0].data_offset;
+
+ if (vbuf->flags & V4L2_BUF_FLAG_LAST || !fdata.filled_len)
+ fdata.flags |= HFI_BUFFERFLAG_EOS;
+ } else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ fdata.buffer_type = HFI_BUFFER_OUTPUT;
+ fdata.filled_len = 0;
+ fdata.offset = 0;
+ }
+
+ ret = hfi_session_process_buf(inst, &fdata);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static inline int is_reg_unreg_needed(struct venus_inst *inst)
+{
+ if (inst->session_type == VIDC_SESSION_TYPE_DEC &&
+ inst->core->res->hfi_version == HFI_VERSION_3XX)
+ return 0;
+
+ if (inst->session_type == VIDC_SESSION_TYPE_DEC &&
+ inst->cap_bufs_mode_dynamic &&
+ inst->core->res->hfi_version == HFI_VERSION_1XX)
+ return 0;
+
+ return 1;
+}
+
+static int session_unregister_bufs(struct venus_inst *inst)
+{
+ struct venus_buffer *buf, *n;
+ struct hfi_buffer_desc bd;
+ int ret = 0;
+
+ if (!is_reg_unreg_needed(inst))
+ return 0;
+
+ list_for_each_entry_safe(buf, n, &inst->registeredbufs, reg_list) {
+ fill_buffer_desc(buf, &bd, true);
+ ret = hfi_session_unset_buffers(inst, &bd);
+ list_del_init(&buf->reg_list);
+ }
+
+ return ret;
+}
+
+static int session_register_bufs(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ struct device *dev = core->dev;
+ struct hfi_buffer_desc bd;
+ struct venus_buffer *buf;
+ int ret = 0;
+
+ if (!is_reg_unreg_needed(inst))
+ return 0;
+
+ list_for_each_entry(buf, &inst->registeredbufs, reg_list) {
+ fill_buffer_desc(buf, &bd, false);
+ ret = hfi_session_set_buffers(inst, &bd);
+ if (ret) {
+ dev_err(dev, "%s: set buffer failed\n", __func__);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
+ struct hfi_buffer_requirements *req)
+{
+ u32 ptype = HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS;
+ union hfi_get_property hprop;
+ unsigned int i;
+ int ret;
+
+ if (req)
+ memset(req, 0, sizeof(*req));
+
+ ret = hfi_session_get_property(inst, ptype, &hprop);
+ if (ret)
+ return ret;
+
+ ret = -EINVAL;
+
+ for (i = 0; i < HFI_BUFFER_TYPE_MAX; i++) {
+ if (hprop.bufreq[i].type != type)
+ continue;
+
+ if (req)
+ memcpy(req, &hprop.bufreq[i], sizeof(*req));
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(venus_helper_get_bufreq);
+
+int venus_helper_set_input_resolution(struct venus_inst *inst,
+ unsigned int width, unsigned int height)
+{
+ u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE;
+ struct hfi_framesize fs;
+
+ fs.buffer_type = HFI_BUFFER_INPUT;
+ fs.width = width;
+ fs.height = height;
+
+ return hfi_session_set_property(inst, ptype, &fs);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_input_resolution);
+
+int venus_helper_set_output_resolution(struct venus_inst *inst,
+ unsigned int width, unsigned int height)
+{
+ u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE;
+ struct hfi_framesize fs;
+
+ fs.buffer_type = HFI_BUFFER_OUTPUT;
+ fs.width = width;
+ fs.height = height;
+
+ return hfi_session_set_property(inst, ptype, &fs);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_output_resolution);
+
+int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
+ unsigned int output_bufs)
+{
+ u32 ptype = HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL;
+ struct hfi_buffer_count_actual buf_count;
+ int ret;
+
+ buf_count.type = HFI_BUFFER_INPUT;
+ buf_count.count_actual = input_bufs;
+
+ ret = hfi_session_set_property(inst, ptype, &buf_count);
+ if (ret)
+ return ret;
+
+ buf_count.type = HFI_BUFFER_OUTPUT;
+ buf_count.count_actual = output_bufs;
+
+ return hfi_session_set_property(inst, ptype, &buf_count);
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_num_bufs);
+
+int venus_helper_set_color_format(struct venus_inst *inst, u32 pixfmt)
+{
+ struct hfi_uncompressed_format_select fmt;
+ u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT;
+ int ret;
+
+ if (inst->session_type == VIDC_SESSION_TYPE_DEC)
+ fmt.buffer_type = HFI_BUFFER_OUTPUT;
+ else if (inst->session_type == VIDC_SESSION_TYPE_ENC)
+ fmt.buffer_type = HFI_BUFFER_INPUT;
+ else
+ return -EINVAL;
+
+ switch (pixfmt) {
+ case V4L2_PIX_FMT_NV12:
+ fmt.format = HFI_COLOR_FORMAT_NV12;
+ break;
+ case V4L2_PIX_FMT_NV21:
+ fmt.format = HFI_COLOR_FORMAT_NV21;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = hfi_session_set_property(inst, ptype, &fmt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(venus_helper_set_color_format);
+
+static void delayed_process_buf_func(struct work_struct *work)
+{
+ struct venus_buffer *buf, *n;
+ struct venus_inst *inst;
+ int ret;
+
+ inst = container_of(work, struct venus_inst, delayed_process_work);
+
+ mutex_lock(&inst->lock);
+
+ if (!(inst->streamon_out & inst->streamon_cap))
+ goto unlock;
+
+ list_for_each_entry_safe(buf, n, &inst->delayed_process, ref_list) {
+ if (buf->flags & HFI_BUFFERFLAG_READONLY)
+ continue;
+
+ ret = session_process_buf(inst, &buf->vb);
+ if (ret)
+ return_buf_error(inst, &buf->vb);
+
+ list_del_init(&buf->ref_list);
+ }
+unlock:
+ mutex_unlock(&inst->lock);
+}
+
+void venus_helper_release_buf_ref(struct venus_inst *inst, unsigned int idx)
+{
+ struct venus_buffer *buf;
+
+ list_for_each_entry(buf, &inst->registeredbufs, reg_list) {
+ if (buf->vb.vb2_buf.index == idx) {
+ buf->flags &= ~HFI_BUFFERFLAG_READONLY;
+ schedule_work(&inst->delayed_process_work);
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(venus_helper_release_buf_ref);
+
+void venus_helper_acquire_buf_ref(struct vb2_v4l2_buffer *vbuf)
+{
+ struct venus_buffer *buf = to_venus_buffer(vbuf);
+
+ buf->flags |= HFI_BUFFERFLAG_READONLY;
+}
+EXPORT_SYMBOL_GPL(venus_helper_acquire_buf_ref);
+
+static int is_buf_refed(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
+{
+ struct venus_buffer *buf = to_venus_buffer(vbuf);
+
+ if (buf->flags & HFI_BUFFERFLAG_READONLY) {
+ list_add_tail(&buf->ref_list, &inst->delayed_process);
+ schedule_work(&inst->delayed_process_work);
+ return 1;
+ }
+
+ return 0;
+}
+
+struct vb2_v4l2_buffer *
+venus_helper_find_buf(struct venus_inst *inst, unsigned int type, u32 idx)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return v4l2_m2m_src_buf_remove_by_idx(m2m_ctx, idx);
+ else
+ return v4l2_m2m_dst_buf_remove_by_idx(m2m_ctx, idx);
+}
+EXPORT_SYMBOL_GPL(venus_helper_find_buf);
+
+int venus_helper_vb2_buf_init(struct vb2_buffer *vb)
+{
+ struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct venus_buffer *buf = to_venus_buffer(vbuf);
+ struct sg_table *sgt;
+
+ sgt = vb2_dma_sg_plane_desc(vb, 0);
+ if (!sgt)
+ return -EFAULT;
+
+ buf->size = vb2_plane_size(vb, 0);
+ buf->dma_addr = sg_dma_address(sgt->sgl);
+
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ list_add_tail(&buf->reg_list, &inst->registeredbufs);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_init);
+
+int venus_helper_vb2_buf_prepare(struct vb2_buffer *vb)
+{
+ struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
+
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ vb2_plane_size(vb, 0) < inst->output_buf_size)
+ return -EINVAL;
+ if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ vb2_plane_size(vb, 0) < inst->input_buf_size)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_prepare);
+
+void venus_helper_vb2_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ int ret;
+
+ mutex_lock(&inst->lock);
+
+ if (inst->cmd_stop) {
+ vbuf->flags |= V4L2_BUF_FLAG_LAST;
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
+ inst->cmd_stop = false;
+ goto unlock;
+ }
+
+ v4l2_m2m_buf_queue(m2m_ctx, vbuf);
+
+ if (!(inst->streamon_out & inst->streamon_cap))
+ goto unlock;
+
+ ret = is_buf_refed(inst, vbuf);
+ if (ret)
+ goto unlock;
+
+ ret = session_process_buf(inst, vbuf);
+ if (ret)
+ return_buf_error(inst, vbuf);
+
+unlock:
+ mutex_unlock(&inst->lock);
+}
+EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_queue);
+
+void venus_helper_buffers_done(struct venus_inst *inst,
+ enum vb2_buffer_state state)
+{
+ struct vb2_v4l2_buffer *buf;
+
+ while ((buf = v4l2_m2m_src_buf_remove(inst->m2m_ctx)))
+ v4l2_m2m_buf_done(buf, state);
+ while ((buf = v4l2_m2m_dst_buf_remove(inst->m2m_ctx)))
+ v4l2_m2m_buf_done(buf, state);
+}
+EXPORT_SYMBOL_GPL(venus_helper_buffers_done);
+
+void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
+{
+ struct venus_inst *inst = vb2_get_drv_priv(q);
+ struct venus_core *core = inst->core;
+ int ret;
+
+ mutex_lock(&inst->lock);
+
+ if (inst->streamon_out & inst->streamon_cap) {
+ ret = hfi_session_stop(inst);
+ ret |= hfi_session_unload_res(inst);
+ ret |= session_unregister_bufs(inst);
+ ret |= intbufs_free(inst);
+ ret |= hfi_session_deinit(inst);
+
+ if (inst->session_error || core->sys_error)
+ ret = -EIO;
+
+ if (ret)
+ hfi_session_abort(inst);
+
+ load_scale_clocks(core);
+ }
+
+ venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR);
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ inst->streamon_out = 0;
+ else
+ inst->streamon_cap = 0;
+
+ mutex_unlock(&inst->lock);
+}
+EXPORT_SYMBOL_GPL(venus_helper_vb2_stop_streaming);
+
+int venus_helper_vb2_start_streaming(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ int ret;
+
+ ret = intbufs_alloc(inst);
+ if (ret)
+ return ret;
+
+ ret = session_register_bufs(inst);
+ if (ret)
+ goto err_bufs_free;
+
+ load_scale_clocks(core);
+
+ ret = hfi_session_load_res(inst);
+ if (ret)
+ goto err_unreg_bufs;
+
+ ret = hfi_session_start(inst);
+ if (ret)
+ goto err_unload_res;
+
+ return 0;
+
+err_unload_res:
+ hfi_session_unload_res(inst);
+err_unreg_bufs:
+ session_unregister_bufs(inst);
+err_bufs_free:
+ intbufs_free(inst);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(venus_helper_vb2_start_streaming);
+
+void venus_helper_m2m_device_run(void *priv)
+{
+ struct venus_inst *inst = priv;
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct v4l2_m2m_buffer *buf, *n;
+ int ret;
+
+ mutex_lock(&inst->lock);
+
+ v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buf, n) {
+ ret = session_process_buf(inst, &buf->vb);
+ if (ret)
+ return_buf_error(inst, &buf->vb);
+ }
+
+ v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) {
+ ret = session_process_buf(inst, &buf->vb);
+ if (ret)
+ return_buf_error(inst, &buf->vb);
+ }
+
+ mutex_unlock(&inst->lock);
+}
+EXPORT_SYMBOL_GPL(venus_helper_m2m_device_run);
+
+void venus_helper_m2m_job_abort(void *priv)
+{
+ struct venus_inst *inst = priv;
+
+ v4l2_m2m_job_finish(inst->m2m_dev, inst->m2m_ctx);
+}
+EXPORT_SYMBOL_GPL(venus_helper_m2m_job_abort);
+
+void venus_helper_init_instance(struct venus_inst *inst)
+{
+ if (inst->session_type == VIDC_SESSION_TYPE_DEC) {
+ INIT_LIST_HEAD(&inst->delayed_process);
+ INIT_WORK(&inst->delayed_process_work,
+ delayed_process_buf_func);
+ }
+}
+EXPORT_SYMBOL_GPL(venus_helper_init_instance);
diff --git a/drivers/media/platform/qcom/venus/helpers.h b/drivers/media/platform/qcom/venus/helpers.h
new file mode 100644
index 000000000000..6a061b417a93
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/helpers.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __VENUS_HELPERS_H__
+#define __VENUS_HELPERS_H__
+
+#include <media/videobuf2-v4l2.h>
+
+struct venus_inst;
+
+struct vb2_v4l2_buffer *venus_helper_find_buf(struct venus_inst *inst,
+ unsigned int type, u32 idx);
+void venus_helper_buffers_done(struct venus_inst *inst,
+ enum vb2_buffer_state state);
+int venus_helper_vb2_buf_init(struct vb2_buffer *vb);
+int venus_helper_vb2_buf_prepare(struct vb2_buffer *vb);
+void venus_helper_vb2_buf_queue(struct vb2_buffer *vb);
+void venus_helper_vb2_stop_streaming(struct vb2_queue *q);
+int venus_helper_vb2_start_streaming(struct venus_inst *inst);
+void venus_helper_m2m_device_run(void *priv);
+void venus_helper_m2m_job_abort(void *priv);
+int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
+ struct hfi_buffer_requirements *req);
+int venus_helper_set_input_resolution(struct venus_inst *inst,
+ unsigned int width, unsigned int height);
+int venus_helper_set_output_resolution(struct venus_inst *inst,
+ unsigned int width, unsigned int height);
+int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
+ unsigned int output_bufs);
+int venus_helper_set_color_format(struct venus_inst *inst, u32 fmt);
+void venus_helper_acquire_buf_ref(struct vb2_v4l2_buffer *vbuf);
+void venus_helper_release_buf_ref(struct venus_inst *inst, unsigned int idx);
+void venus_helper_init_instance(struct venus_inst *inst);
+#endif
diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c
new file mode 100644
index 000000000000..c09490876516
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi.c
@@ -0,0 +1,522 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+
+#include "core.h"
+#include "hfi.h"
+#include "hfi_cmds.h"
+#include "hfi_venus.h"
+
+#define TIMEOUT msecs_to_jiffies(1000)
+
+static u32 to_codec_type(u32 pixfmt)
+{
+ switch (pixfmt) {
+ case V4L2_PIX_FMT_H264:
+ case V4L2_PIX_FMT_H264_NO_SC:
+ return HFI_VIDEO_CODEC_H264;
+ case V4L2_PIX_FMT_H263:
+ return HFI_VIDEO_CODEC_H263;
+ case V4L2_PIX_FMT_MPEG1:
+ return HFI_VIDEO_CODEC_MPEG1;
+ case V4L2_PIX_FMT_MPEG2:
+ return HFI_VIDEO_CODEC_MPEG2;
+ case V4L2_PIX_FMT_MPEG4:
+ return HFI_VIDEO_CODEC_MPEG4;
+ case V4L2_PIX_FMT_VC1_ANNEX_G:
+ case V4L2_PIX_FMT_VC1_ANNEX_L:
+ return HFI_VIDEO_CODEC_VC1;
+ case V4L2_PIX_FMT_VP8:
+ return HFI_VIDEO_CODEC_VP8;
+ case V4L2_PIX_FMT_VP9:
+ return HFI_VIDEO_CODEC_VP9;
+ case V4L2_PIX_FMT_XVID:
+ return HFI_VIDEO_CODEC_DIVX;
+ default:
+ return 0;
+ }
+}
+
+int hfi_core_init(struct venus_core *core)
+{
+ int ret = 0;
+
+ mutex_lock(&core->lock);
+
+ if (core->state >= CORE_INIT)
+ goto unlock;
+
+ reinit_completion(&core->done);
+
+ ret = core->ops->core_init(core);
+ if (ret)
+ goto unlock;
+
+ ret = wait_for_completion_timeout(&core->done, TIMEOUT);
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ goto unlock;
+ }
+
+ ret = 0;
+
+ if (core->error != HFI_ERR_NONE) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ core->state = CORE_INIT;
+unlock:
+ mutex_unlock(&core->lock);
+ return ret;
+}
+
+static int core_deinit_wait_atomic_t(atomic_t *p)
+{
+ schedule();
+ return 0;
+}
+
+int hfi_core_deinit(struct venus_core *core, bool blocking)
+{
+ int ret = 0, empty;
+
+ mutex_lock(&core->lock);
+
+ if (core->state == CORE_UNINIT)
+ goto unlock;
+
+ empty = list_empty(&core->instances);
+
+ if (!empty && !blocking) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (!empty) {
+ mutex_unlock(&core->lock);
+ wait_on_atomic_t(&core->insts_count, core_deinit_wait_atomic_t,
+ TASK_UNINTERRUPTIBLE);
+ mutex_lock(&core->lock);
+ }
+
+ ret = core->ops->core_deinit(core);
+
+ if (!ret)
+ core->state = CORE_UNINIT;
+
+unlock:
+ mutex_unlock(&core->lock);
+ return ret;
+}
+
+int hfi_core_suspend(struct venus_core *core)
+{
+ if (core->state != CORE_INIT)
+ return 0;
+
+ return core->ops->suspend(core);
+}
+
+int hfi_core_resume(struct venus_core *core, bool force)
+{
+ if (!force && core->state != CORE_INIT)
+ return 0;
+
+ return core->ops->resume(core);
+}
+
+int hfi_core_trigger_ssr(struct venus_core *core, u32 type)
+{
+ return core->ops->core_trigger_ssr(core, type);
+}
+
+int hfi_core_ping(struct venus_core *core)
+{
+ int ret;
+
+ mutex_lock(&core->lock);
+
+ ret = core->ops->core_ping(core, 0xbeef);
+ if (ret)
+ goto unlock;
+
+ ret = wait_for_completion_timeout(&core->done, TIMEOUT);
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ goto unlock;
+ }
+ ret = 0;
+ if (core->error != HFI_ERR_NONE)
+ ret = -ENODEV;
+unlock:
+ mutex_unlock(&core->lock);
+ return ret;
+}
+
+static int wait_session_msg(struct venus_inst *inst)
+{
+ int ret;
+
+ ret = wait_for_completion_timeout(&inst->done, TIMEOUT);
+ if (!ret)
+ return -ETIMEDOUT;
+
+ if (inst->error != HFI_ERR_NONE)
+ return -EIO;
+
+ return 0;
+}
+
+int hfi_session_create(struct venus_inst *inst, const struct hfi_inst_ops *ops)
+{
+ struct venus_core *core = inst->core;
+
+ if (!ops)
+ return -EINVAL;
+
+ inst->state = INST_UNINIT;
+ init_completion(&inst->done);
+ inst->ops = ops;
+
+ mutex_lock(&core->lock);
+ list_add_tail(&inst->list, &core->instances);
+ atomic_inc(&core->insts_count);
+ mutex_unlock(&core->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hfi_session_create);
+
+int hfi_session_init(struct venus_inst *inst, u32 pixfmt)
+{
+ struct venus_core *core = inst->core;
+ const struct hfi_ops *ops = core->ops;
+ u32 codec;
+ int ret;
+
+ codec = to_codec_type(pixfmt);
+ reinit_completion(&inst->done);
+
+ ret = ops->session_init(inst, inst->session_type, codec);
+ if (ret)
+ return ret;
+
+ ret = wait_session_msg(inst);
+ if (ret)
+ return ret;
+
+ inst->state = INST_INIT;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hfi_session_init);
+
+void hfi_session_destroy(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+
+ mutex_lock(&core->lock);
+ list_del_init(&inst->list);
+ atomic_dec(&core->insts_count);
+ wake_up_atomic_t(&core->insts_count);
+ mutex_unlock(&core->lock);
+}
+EXPORT_SYMBOL_GPL(hfi_session_destroy);
+
+int hfi_session_deinit(struct venus_inst *inst)
+{
+ const struct hfi_ops *ops = inst->core->ops;
+ int ret;
+
+ if (inst->state == INST_UNINIT)
+ return 0;
+
+ if (inst->state < INST_INIT)
+ return -EINVAL;
+
+ reinit_completion(&inst->done);
+
+ ret = ops->session_end(inst);
+ if (ret)
+ return ret;
+
+ ret = wait_session_msg(inst);
+ if (ret)
+ return ret;
+
+ inst->state = INST_UNINIT;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hfi_session_deinit);
+
+int hfi_session_start(struct venus_inst *inst)
+{
+ const struct hfi_ops *ops = inst->core->ops;
+ int ret;
+
+ if (inst->state != INST_LOAD_RESOURCES)
+ return -EINVAL;
+
+ reinit_completion(&inst->done);
+
+ ret = ops->session_start(inst);
+ if (ret)
+ return ret;
+
+ ret = wait_session_msg(inst);
+ if (ret)
+ return ret;
+
+ inst->state = INST_START;
+
+ return 0;
+}
+
+int hfi_session_stop(struct venus_inst *inst)
+{
+ const struct hfi_ops *ops = inst->core->ops;
+ int ret;
+
+ if (inst->state != INST_START)
+ return -EINVAL;
+
+ reinit_completion(&inst->done);
+
+ ret = ops->session_stop(inst);
+ if (ret)
+ return ret;
+
+ ret = wait_session_msg(inst);
+ if (ret)
+ return ret;
+
+ inst->state = INST_STOP;
+
+ return 0;
+}
+
+int hfi_session_continue(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+
+ if (core->res->hfi_version != HFI_VERSION_3XX)
+ return 0;
+
+ return core->ops->session_continue(inst);
+}
+EXPORT_SYMBOL_GPL(hfi_session_continue);
+
+int hfi_session_abort(struct venus_inst *inst)
+{
+ const struct hfi_ops *ops = inst->core->ops;
+ int ret;
+
+ reinit_completion(&inst->done);
+
+ ret = ops->session_abort(inst);
+ if (ret)
+ return ret;
+
+ ret = wait_session_msg(inst);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int hfi_session_load_res(struct venus_inst *inst)
+{
+ const struct hfi_ops *ops = inst->core->ops;
+ int ret;
+
+ if (inst->state != INST_INIT)
+ return -EINVAL;
+
+ reinit_completion(&inst->done);
+
+ ret = ops->session_load_res(inst);
+ if (ret)
+ return ret;
+
+ ret = wait_session_msg(inst);
+ if (ret)
+ return ret;
+
+ inst->state = INST_LOAD_RESOURCES;
+
+ return 0;
+}
+
+int hfi_session_unload_res(struct venus_inst *inst)
+{
+ const struct hfi_ops *ops = inst->core->ops;
+ int ret;
+
+ if (inst->state != INST_STOP)
+ return -EINVAL;
+
+ reinit_completion(&inst->done);
+
+ ret = ops->session_release_res(inst);
+ if (ret)
+ return ret;
+
+ ret = wait_session_msg(inst);
+ if (ret)
+ return ret;
+
+ inst->state = INST_RELEASE_RESOURCES;
+
+ return 0;
+}
+
+int hfi_session_flush(struct venus_inst *inst)
+{
+ const struct hfi_ops *ops = inst->core->ops;
+ int ret;
+
+ reinit_completion(&inst->done);
+
+ ret = ops->session_flush(inst, HFI_FLUSH_ALL);
+ if (ret)
+ return ret;
+
+ ret = wait_session_msg(inst);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hfi_session_flush);
+
+int hfi_session_set_buffers(struct venus_inst *inst, struct hfi_buffer_desc *bd)
+{
+ const struct hfi_ops *ops = inst->core->ops;
+
+ return ops->session_set_buffers(inst, bd);
+}
+
+int hfi_session_unset_buffers(struct venus_inst *inst,
+ struct hfi_buffer_desc *bd)
+{
+ const struct hfi_ops *ops = inst->core->ops;
+ int ret;
+
+ reinit_completion(&inst->done);
+
+ ret = ops->session_unset_buffers(inst, bd);
+ if (ret)
+ return ret;
+
+ if (!bd->response_required)
+ return 0;
+
+ ret = wait_session_msg(inst);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int hfi_session_get_property(struct venus_inst *inst, u32 ptype,
+ union hfi_get_property *hprop)
+{
+ const struct hfi_ops *ops = inst->core->ops;
+ int ret;
+
+ if (inst->state < INST_INIT || inst->state >= INST_STOP)
+ return -EINVAL;
+
+ reinit_completion(&inst->done);
+
+ ret = ops->session_get_property(inst, ptype);
+ if (ret)
+ return ret;
+
+ ret = wait_session_msg(inst);
+ if (ret)
+ return ret;
+
+ *hprop = inst->hprop;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hfi_session_get_property);
+
+int hfi_session_set_property(struct venus_inst *inst, u32 ptype, void *pdata)
+{
+ const struct hfi_ops *ops = inst->core->ops;
+
+ if (inst->state < INST_INIT || inst->state >= INST_STOP)
+ return -EINVAL;
+
+ return ops->session_set_property(inst, ptype, pdata);
+}
+EXPORT_SYMBOL_GPL(hfi_session_set_property);
+
+int hfi_session_process_buf(struct venus_inst *inst, struct hfi_frame_data *fd)
+{
+ const struct hfi_ops *ops = inst->core->ops;
+
+ if (fd->buffer_type == HFI_BUFFER_INPUT)
+ return ops->session_etb(inst, fd);
+ else if (fd->buffer_type == HFI_BUFFER_OUTPUT)
+ return ops->session_ftb(inst, fd);
+
+ return -EINVAL;
+}
+
+irqreturn_t hfi_isr_thread(int irq, void *dev_id)
+{
+ struct venus_core *core = dev_id;
+
+ return core->ops->isr_thread(core);
+}
+
+irqreturn_t hfi_isr(int irq, void *dev)
+{
+ struct venus_core *core = dev;
+
+ return core->ops->isr(core);
+}
+
+int hfi_create(struct venus_core *core, const struct hfi_core_ops *ops)
+{
+ int ret;
+
+ if (!ops)
+ return -EINVAL;
+
+ atomic_set(&core->insts_count, 0);
+ core->core_ops = ops;
+ core->state = CORE_UNINIT;
+ init_completion(&core->done);
+ pkt_set_version(core->res->hfi_version);
+ ret = venus_hfi_create(core);
+
+ return ret;
+}
+
+void hfi_destroy(struct venus_core *core)
+{
+ venus_hfi_destroy(core);
+}
diff --git a/drivers/media/platform/qcom/venus/hfi.h b/drivers/media/platform/qcom/venus/hfi.h
new file mode 100644
index 000000000000..5466b7d60dd0
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __HFI_H__
+#define __HFI_H__
+
+#include <linux/interrupt.h>
+
+#include "hfi_helper.h"
+
+#define VIDC_SESSION_TYPE_VPE 0
+#define VIDC_SESSION_TYPE_ENC 1
+#define VIDC_SESSION_TYPE_DEC 2
+
+#define VIDC_RESOURCE_NONE 0
+#define VIDC_RESOURCE_OCMEM 1
+#define VIDC_RESOURCE_VMEM 2
+
+struct hfi_buffer_desc {
+ u32 buffer_type;
+ u32 buffer_size;
+ u32 num_buffers;
+ u32 device_addr;
+ u32 extradata_addr;
+ u32 extradata_size;
+ u32 response_required;
+};
+
+struct hfi_frame_data {
+ u32 buffer_type;
+ u32 device_addr;
+ u32 extradata_addr;
+ u64 timestamp;
+ u32 flags;
+ u32 offset;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 mark_target;
+ u32 mark_data;
+ u32 clnt_data;
+ u32 extradata_size;
+};
+
+union hfi_get_property {
+ struct hfi_profile_level profile_level;
+ struct hfi_buffer_requirements bufreq[HFI_BUFFER_TYPE_MAX];
+};
+
+/* HFI events */
+#define EVT_SYS_EVENT_CHANGE 1
+#define EVT_SYS_WATCHDOG_TIMEOUT 2
+#define EVT_SYS_ERROR 3
+#define EVT_SESSION_ERROR 4
+
+/* HFI event callback structure */
+struct hfi_event_data {
+ u32 error;
+ u32 height;
+ u32 width;
+ u32 event_type;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 tag;
+ u32 profile;
+ u32 level;
+};
+
+/* define core states */
+#define CORE_UNINIT 0
+#define CORE_INIT 1
+
+/* define instance states */
+#define INST_UNINIT 2
+#define INST_INIT 3
+#define INST_LOAD_RESOURCES 4
+#define INST_START 5
+#define INST_STOP 6
+#define INST_RELEASE_RESOURCES 7
+
+struct venus_core;
+struct venus_inst;
+
+struct hfi_core_ops {
+ void (*event_notify)(struct venus_core *core, u32 event);
+};
+
+struct hfi_inst_ops {
+ void (*buf_done)(struct venus_inst *inst, unsigned int buf_type,
+ u32 tag, u32 bytesused, u32 data_offset, u32 flags,
+ u32 hfi_flags, u64 timestamp_us);
+ void (*event_notify)(struct venus_inst *inst, u32 event,
+ struct hfi_event_data *data);
+};
+
+struct hfi_ops {
+ int (*core_init)(struct venus_core *core);
+ int (*core_deinit)(struct venus_core *core);
+ int (*core_ping)(struct venus_core *core, u32 cookie);
+ int (*core_trigger_ssr)(struct venus_core *core, u32 trigger_type);
+
+ int (*session_init)(struct venus_inst *inst, u32 session_type,
+ u32 codec);
+ int (*session_end)(struct venus_inst *inst);
+ int (*session_abort)(struct venus_inst *inst);
+ int (*session_flush)(struct venus_inst *inst, u32 flush_mode);
+ int (*session_start)(struct venus_inst *inst);
+ int (*session_stop)(struct venus_inst *inst);
+ int (*session_continue)(struct venus_inst *inst);
+ int (*session_etb)(struct venus_inst *inst, struct hfi_frame_data *fd);
+ int (*session_ftb)(struct venus_inst *inst, struct hfi_frame_data *fd);
+ int (*session_set_buffers)(struct venus_inst *inst,
+ struct hfi_buffer_desc *bd);
+ int (*session_unset_buffers)(struct venus_inst *inst,
+ struct hfi_buffer_desc *bd);
+ int (*session_load_res)(struct venus_inst *inst);
+ int (*session_release_res)(struct venus_inst *inst);
+ int (*session_parse_seq_hdr)(struct venus_inst *inst, u32 seq_hdr,
+ u32 seq_hdr_len);
+ int (*session_get_seq_hdr)(struct venus_inst *inst, u32 seq_hdr,
+ u32 seq_hdr_len);
+ int (*session_set_property)(struct venus_inst *inst, u32 ptype,
+ void *pdata);
+ int (*session_get_property)(struct venus_inst *inst, u32 ptype);
+
+ int (*resume)(struct venus_core *core);
+ int (*suspend)(struct venus_core *core);
+
+ /* interrupt operations */
+ irqreturn_t (*isr)(struct venus_core *core);
+ irqreturn_t (*isr_thread)(struct venus_core *core);
+};
+
+int hfi_create(struct venus_core *core, const struct hfi_core_ops *ops);
+void hfi_destroy(struct venus_core *core);
+
+int hfi_core_init(struct venus_core *core);
+int hfi_core_deinit(struct venus_core *core, bool blocking);
+int hfi_core_suspend(struct venus_core *core);
+int hfi_core_resume(struct venus_core *core, bool force);
+int hfi_core_trigger_ssr(struct venus_core *core, u32 type);
+int hfi_core_ping(struct venus_core *core);
+int hfi_session_create(struct venus_inst *inst, const struct hfi_inst_ops *ops);
+void hfi_session_destroy(struct venus_inst *inst);
+int hfi_session_init(struct venus_inst *inst, u32 pixfmt);
+int hfi_session_deinit(struct venus_inst *inst);
+int hfi_session_start(struct venus_inst *inst);
+int hfi_session_stop(struct venus_inst *inst);
+int hfi_session_continue(struct venus_inst *inst);
+int hfi_session_abort(struct venus_inst *inst);
+int hfi_session_load_res(struct venus_inst *inst);
+int hfi_session_unload_res(struct venus_inst *inst);
+int hfi_session_flush(struct venus_inst *inst);
+int hfi_session_set_buffers(struct venus_inst *inst,
+ struct hfi_buffer_desc *bd);
+int hfi_session_unset_buffers(struct venus_inst *inst,
+ struct hfi_buffer_desc *bd);
+int hfi_session_get_property(struct venus_inst *inst, u32 ptype,
+ union hfi_get_property *hprop);
+int hfi_session_set_property(struct venus_inst *inst, u32 ptype, void *pdata);
+int hfi_session_process_buf(struct venus_inst *inst, struct hfi_frame_data *f);
+irqreturn_t hfi_isr_thread(int irq, void *dev_id);
+irqreturn_t hfi_isr(int irq, void *dev);
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c
new file mode 100644
index 000000000000..b83c5b8ddccb
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.c
@@ -0,0 +1,1259 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/errno.h>
+#include <linux/hash.h>
+
+#include "hfi_cmds.h"
+
+static enum hfi_version hfi_ver;
+
+void pkt_sys_init(struct hfi_sys_init_pkt *pkt, u32 arch_type)
+{
+ pkt->hdr.size = sizeof(*pkt);
+ pkt->hdr.pkt_type = HFI_CMD_SYS_INIT;
+ pkt->arch_type = arch_type;
+}
+
+void pkt_sys_pc_prep(struct hfi_sys_pc_prep_pkt *pkt)
+{
+ pkt->hdr.size = sizeof(*pkt);
+ pkt->hdr.pkt_type = HFI_CMD_SYS_PC_PREP;
+}
+
+void pkt_sys_idle_indicator(struct hfi_sys_set_property_pkt *pkt, u32 enable)
+{
+ struct hfi_enable *hfi = (struct hfi_enable *)&pkt->data[1];
+
+ pkt->hdr.size = sizeof(*pkt) + sizeof(*hfi) + sizeof(u32);
+ pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+ pkt->num_properties = 1;
+ pkt->data[0] = HFI_PROPERTY_SYS_IDLE_INDICATOR;
+ hfi->enable = enable;
+}
+
+void pkt_sys_debug_config(struct hfi_sys_set_property_pkt *pkt, u32 mode,
+ u32 config)
+{
+ struct hfi_debug_config *hfi;
+
+ pkt->hdr.size = sizeof(*pkt) + sizeof(*hfi) + sizeof(u32);
+ pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+ pkt->num_properties = 1;
+ pkt->data[0] = HFI_PROPERTY_SYS_DEBUG_CONFIG;
+ hfi = (struct hfi_debug_config *)&pkt->data[1];
+ hfi->config = config;
+ hfi->mode = mode;
+}
+
+void pkt_sys_coverage_config(struct hfi_sys_set_property_pkt *pkt, u32 mode)
+{
+ pkt->hdr.size = sizeof(*pkt) + sizeof(u32);
+ pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+ pkt->num_properties = 1;
+ pkt->data[0] = HFI_PROPERTY_SYS_CONFIG_COVERAGE;
+ pkt->data[1] = mode;
+}
+
+int pkt_sys_set_resource(struct hfi_sys_set_resource_pkt *pkt, u32 id, u32 size,
+ u32 addr, void *cookie)
+{
+ pkt->hdr.size = sizeof(*pkt);
+ pkt->hdr.pkt_type = HFI_CMD_SYS_SET_RESOURCE;
+ pkt->resource_handle = hash32_ptr(cookie);
+
+ switch (id) {
+ case VIDC_RESOURCE_OCMEM:
+ case VIDC_RESOURCE_VMEM: {
+ struct hfi_resource_ocmem *res =
+ (struct hfi_resource_ocmem *)&pkt->resource_data[0];
+
+ res->size = size;
+ res->mem = addr;
+ pkt->resource_type = HFI_RESOURCE_OCMEM;
+ pkt->hdr.size += sizeof(*res) - sizeof(u32);
+ break;
+ }
+ case VIDC_RESOURCE_NONE:
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+int pkt_sys_unset_resource(struct hfi_sys_release_resource_pkt *pkt, u32 id,
+ u32 size, void *cookie)
+{
+ pkt->hdr.size = sizeof(*pkt);
+ pkt->hdr.pkt_type = HFI_CMD_SYS_RELEASE_RESOURCE;
+ pkt->resource_handle = hash32_ptr(cookie);
+
+ switch (id) {
+ case VIDC_RESOURCE_OCMEM:
+ case VIDC_RESOURCE_VMEM:
+ pkt->resource_type = HFI_RESOURCE_OCMEM;
+ break;
+ case VIDC_RESOURCE_NONE:
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+void pkt_sys_ping(struct hfi_sys_ping_pkt *pkt, u32 cookie)
+{
+ pkt->hdr.size = sizeof(*pkt);
+ pkt->hdr.pkt_type = HFI_CMD_SYS_PING;
+ pkt->client_data = cookie;
+}
+
+void pkt_sys_power_control(struct hfi_sys_set_property_pkt *pkt, u32 enable)
+{
+ struct hfi_enable *hfi = (struct hfi_enable *)&pkt->data[1];
+
+ pkt->hdr.size = sizeof(*pkt) + sizeof(*hfi) + sizeof(u32);
+ pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+ pkt->num_properties = 1;
+ pkt->data[0] = HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL;
+ hfi->enable = enable;
+}
+
+int pkt_sys_ssr_cmd(struct hfi_sys_test_ssr_pkt *pkt, u32 trigger_type)
+{
+ switch (trigger_type) {
+ case HFI_TEST_SSR_SW_ERR_FATAL:
+ case HFI_TEST_SSR_SW_DIV_BY_ZERO:
+ case HFI_TEST_SSR_HW_WDOG_IRQ:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ pkt->hdr.size = sizeof(*pkt);
+ pkt->hdr.pkt_type = HFI_CMD_SYS_TEST_SSR;
+ pkt->trigger_type = trigger_type;
+
+ return 0;
+}
+
+void pkt_sys_image_version(struct hfi_sys_get_property_pkt *pkt)
+{
+ pkt->hdr.size = sizeof(*pkt);
+ pkt->hdr.pkt_type = HFI_CMD_SYS_GET_PROPERTY;
+ pkt->num_properties = 1;
+ pkt->data[0] = HFI_PROPERTY_SYS_IMAGE_VERSION;
+}
+
+int pkt_session_init(struct hfi_session_init_pkt *pkt, void *cookie,
+ u32 session_type, u32 codec)
+{
+ if (!pkt || !cookie || !codec)
+ return -EINVAL;
+
+ pkt->shdr.hdr.size = sizeof(*pkt);
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SYS_SESSION_INIT;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->session_domain = session_type;
+ pkt->session_codec = codec;
+
+ return 0;
+}
+
+void pkt_session_cmd(struct hfi_session_pkt *pkt, u32 pkt_type, void *cookie)
+{
+ pkt->shdr.hdr.size = sizeof(*pkt);
+ pkt->shdr.hdr.pkt_type = pkt_type;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+}
+
+int pkt_session_set_buffers(struct hfi_session_set_buffers_pkt *pkt,
+ void *cookie, struct hfi_buffer_desc *bd)
+{
+ unsigned int i;
+
+ if (!cookie || !pkt || !bd)
+ return -EINVAL;
+
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_BUFFERS;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->buffer_size = bd->buffer_size;
+ pkt->min_buffer_size = bd->buffer_size;
+ pkt->num_buffers = bd->num_buffers;
+
+ if (bd->buffer_type == HFI_BUFFER_OUTPUT ||
+ bd->buffer_type == HFI_BUFFER_OUTPUT2) {
+ struct hfi_buffer_info *bi;
+
+ pkt->extradata_size = bd->extradata_size;
+ pkt->shdr.hdr.size = sizeof(*pkt) - sizeof(u32) +
+ (bd->num_buffers * sizeof(*bi));
+ bi = (struct hfi_buffer_info *)pkt->buffer_info;
+ for (i = 0; i < pkt->num_buffers; i++) {
+ bi->buffer_addr = bd->device_addr;
+ bi->extradata_addr = bd->extradata_addr;
+ }
+ } else {
+ pkt->extradata_size = 0;
+ pkt->shdr.hdr.size = sizeof(*pkt) +
+ ((bd->num_buffers - 1) * sizeof(u32));
+ for (i = 0; i < pkt->num_buffers; i++)
+ pkt->buffer_info[i] = bd->device_addr;
+ }
+
+ pkt->buffer_type = bd->buffer_type;
+
+ return 0;
+}
+
+int pkt_session_unset_buffers(struct hfi_session_release_buffer_pkt *pkt,
+ void *cookie, struct hfi_buffer_desc *bd)
+{
+ unsigned int i;
+
+ if (!cookie || !pkt || !bd)
+ return -EINVAL;
+
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_RELEASE_BUFFERS;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->buffer_size = bd->buffer_size;
+ pkt->num_buffers = bd->num_buffers;
+
+ if (bd->buffer_type == HFI_BUFFER_OUTPUT ||
+ bd->buffer_type == HFI_BUFFER_OUTPUT2) {
+ struct hfi_buffer_info *bi;
+
+ bi = (struct hfi_buffer_info *)pkt->buffer_info;
+ for (i = 0; i < pkt->num_buffers; i++) {
+ bi->buffer_addr = bd->device_addr;
+ bi->extradata_addr = bd->extradata_addr;
+ }
+ pkt->shdr.hdr.size =
+ sizeof(struct hfi_session_set_buffers_pkt) -
+ sizeof(u32) + (bd->num_buffers * sizeof(*bi));
+ } else {
+ for (i = 0; i < pkt->num_buffers; i++)
+ pkt->buffer_info[i] = bd->device_addr;
+
+ pkt->extradata_size = 0;
+ pkt->shdr.hdr.size =
+ sizeof(struct hfi_session_set_buffers_pkt) +
+ ((bd->num_buffers - 1) * sizeof(u32));
+ }
+
+ pkt->response_req = bd->response_required;
+ pkt->buffer_type = bd->buffer_type;
+
+ return 0;
+}
+
+int pkt_session_etb_decoder(struct hfi_session_empty_buffer_compressed_pkt *pkt,
+ void *cookie, struct hfi_frame_data *in_frame)
+{
+ if (!cookie || !in_frame->device_addr)
+ return -EINVAL;
+
+ pkt->shdr.hdr.size = sizeof(*pkt);
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->time_stamp_hi = upper_32_bits(in_frame->timestamp);
+ pkt->time_stamp_lo = lower_32_bits(in_frame->timestamp);
+ pkt->flags = in_frame->flags;
+ pkt->mark_target = in_frame->mark_target;
+ pkt->mark_data = in_frame->mark_data;
+ pkt->offset = in_frame->offset;
+ pkt->alloc_len = in_frame->alloc_len;
+ pkt->filled_len = in_frame->filled_len;
+ pkt->input_tag = in_frame->clnt_data;
+ pkt->packet_buffer = in_frame->device_addr;
+
+ return 0;
+}
+
+int pkt_session_etb_encoder(
+ struct hfi_session_empty_buffer_uncompressed_plane0_pkt *pkt,
+ void *cookie, struct hfi_frame_data *in_frame)
+{
+ if (!cookie || !in_frame->device_addr)
+ return -EINVAL;
+
+ pkt->shdr.hdr.size = sizeof(*pkt);
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->view_id = 0;
+ pkt->time_stamp_hi = upper_32_bits(in_frame->timestamp);
+ pkt->time_stamp_lo = lower_32_bits(in_frame->timestamp);
+ pkt->flags = in_frame->flags;
+ pkt->mark_target = in_frame->mark_target;
+ pkt->mark_data = in_frame->mark_data;
+ pkt->offset = in_frame->offset;
+ pkt->alloc_len = in_frame->alloc_len;
+ pkt->filled_len = in_frame->filled_len;
+ pkt->input_tag = in_frame->clnt_data;
+ pkt->packet_buffer = in_frame->device_addr;
+ pkt->extradata_buffer = in_frame->extradata_addr;
+
+ return 0;
+}
+
+int pkt_session_ftb(struct hfi_session_fill_buffer_pkt *pkt, void *cookie,
+ struct hfi_frame_data *out_frame)
+{
+ if (!cookie || !out_frame || !out_frame->device_addr)
+ return -EINVAL;
+
+ pkt->shdr.hdr.size = sizeof(*pkt);
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_FILL_BUFFER;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+
+ if (out_frame->buffer_type == HFI_BUFFER_OUTPUT)
+ pkt->stream_id = 0;
+ else if (out_frame->buffer_type == HFI_BUFFER_OUTPUT2)
+ pkt->stream_id = 1;
+
+ pkt->output_tag = out_frame->clnt_data;
+ pkt->packet_buffer = out_frame->device_addr;
+ pkt->extradata_buffer = out_frame->extradata_addr;
+ pkt->alloc_len = out_frame->alloc_len;
+ pkt->filled_len = out_frame->filled_len;
+ pkt->offset = out_frame->offset;
+ pkt->data[0] = out_frame->extradata_size;
+
+ return 0;
+}
+
+int pkt_session_parse_seq_header(
+ struct hfi_session_parse_sequence_header_pkt *pkt,
+ void *cookie, u32 seq_hdr, u32 seq_hdr_len)
+{
+ if (!cookie || !seq_hdr || !seq_hdr_len)
+ return -EINVAL;
+
+ pkt->shdr.hdr.size = sizeof(*pkt);
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_PARSE_SEQUENCE_HEADER;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->header_len = seq_hdr_len;
+ pkt->packet_buffer = seq_hdr;
+
+ return 0;
+}
+
+int pkt_session_get_seq_hdr(struct hfi_session_get_sequence_header_pkt *pkt,
+ void *cookie, u32 seq_hdr, u32 seq_hdr_len)
+{
+ if (!cookie || !seq_hdr || !seq_hdr_len)
+ return -EINVAL;
+
+ pkt->shdr.hdr.size = sizeof(*pkt);
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_GET_SEQUENCE_HEADER;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->buffer_len = seq_hdr_len;
+ pkt->packet_buffer = seq_hdr;
+
+ return 0;
+}
+
+int pkt_session_flush(struct hfi_session_flush_pkt *pkt, void *cookie, u32 type)
+{
+ switch (type) {
+ case HFI_FLUSH_INPUT:
+ case HFI_FLUSH_OUTPUT:
+ case HFI_FLUSH_OUTPUT2:
+ case HFI_FLUSH_ALL:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ pkt->shdr.hdr.size = sizeof(*pkt);
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_FLUSH;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->flush_type = type;
+
+ return 0;
+}
+
+static int pkt_session_get_property_1x(struct hfi_session_get_property_pkt *pkt,
+ void *cookie, u32 ptype)
+{
+ switch (ptype) {
+ case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT:
+ case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ pkt->shdr.hdr.size = sizeof(*pkt);
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_GET_PROPERTY;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->num_properties = 1;
+ pkt->data[0] = ptype;
+
+ return 0;
+}
+
+static int pkt_session_set_property_1x(struct hfi_session_set_property_pkt *pkt,
+ void *cookie, u32 ptype, void *pdata)
+{
+ void *prop_data;
+ int ret = 0;
+
+ if (!pkt || !cookie || !pdata)
+ return -EINVAL;
+
+ prop_data = &pkt->data[1];
+
+ pkt->shdr.hdr.size = sizeof(*pkt);
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_PROPERTY;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->num_properties = 1;
+
+ switch (ptype) {
+ case HFI_PROPERTY_CONFIG_FRAME_RATE: {
+ struct hfi_framerate *in = pdata, *frate = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_CONFIG_FRAME_RATE;
+ frate->buffer_type = in->buffer_type;
+ frate->framerate = in->framerate;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*frate);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT: {
+ struct hfi_uncompressed_format_select *in = pdata;
+ struct hfi_uncompressed_format_select *hfi = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT;
+ hfi->buffer_type = in->buffer_type;
+ hfi->format = in->format;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hfi);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_FRAME_SIZE: {
+ struct hfi_framesize *in = pdata, *fsize = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_FRAME_SIZE;
+ fsize->buffer_type = in->buffer_type;
+ fsize->height = in->height;
+ fsize->width = in->width;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*fsize);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_REALTIME: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_CONFIG_REALTIME;
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) * 2;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL: {
+ struct hfi_buffer_count_actual *in = pdata, *count = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL;
+ count->count_actual = in->count_actual;
+ count->type = in->type;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*count);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL: {
+ struct hfi_buffer_size_actual *in = pdata, *sz = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL;
+ sz->size = in->size;
+ sz->type = in->type;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*sz);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_BUFFER_DISPLAY_HOLD_COUNT_ACTUAL: {
+ struct hfi_buffer_display_hold_count_actual *in = pdata;
+ struct hfi_buffer_display_hold_count_actual *count = prop_data;
+
+ pkt->data[0] =
+ HFI_PROPERTY_PARAM_BUFFER_DISPLAY_HOLD_COUNT_ACTUAL;
+ count->hold_count = in->hold_count;
+ count->type = in->type;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*count);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT: {
+ struct hfi_nal_stream_format_select *in = pdata;
+ struct hfi_nal_stream_format_select *fmt = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT;
+ fmt->format = in->format;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*fmt);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER: {
+ u32 *in = pdata;
+
+ switch (*in) {
+ case HFI_OUTPUT_ORDER_DECODE:
+ case HFI_OUTPUT_ORDER_DISPLAY:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER;
+ pkt->data[1] = *in;
+ pkt->shdr.hdr.size += sizeof(u32) * 2;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE: {
+ struct hfi_enable_picture *in = pdata, *en = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE;
+ en->picture_type = in->picture_type;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ pkt->data[0] =
+ HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO;
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER: {
+ struct hfi_enable *in = pdata;
+ struct hfi_enable *en = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER;
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM: {
+ struct hfi_multi_stream *in = pdata, *multi = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM;
+ multi->buffer_type = in->buffer_type;
+ multi->enable = in->enable;
+ multi->width = in->width;
+ multi->height = in->height;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*multi);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT: {
+ struct hfi_display_picture_buffer_count *in = pdata;
+ struct hfi_display_picture_buffer_count *count = prop_data;
+
+ pkt->data[0] =
+ HFI_PROPERTY_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT;
+ count->count = in->count;
+ count->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*count);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_DIVX_FORMAT: {
+ u32 *in = pdata;
+
+ switch (*in) {
+ case HFI_DIVX_FORMAT_4:
+ case HFI_DIVX_FORMAT_5:
+ case HFI_DIVX_FORMAT_6:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_DIVX_FORMAT;
+ pkt->data[1] = *in;
+ pkt->shdr.hdr.size += sizeof(u32) * 2;
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING;
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER;
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VDEC_THUMBNAIL_MODE: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VDEC_THUMBNAIL_MODE;
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ pkt->data[0] =
+ HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER;
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME:
+ pkt->data[0] = HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME;
+ pkt->shdr.hdr.size += sizeof(u32);
+ break;
+ case HFI_PROPERTY_PARAM_VENC_MPEG4_SHORT_HEADER:
+ break;
+ case HFI_PROPERTY_PARAM_VENC_MPEG4_AC_PREDICTION:
+ break;
+ case HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE: {
+ struct hfi_bitrate *in = pdata, *brate = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE;
+ brate->bitrate = in->bitrate;
+ brate->layer_id = in->layer_id;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*brate);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE: {
+ struct hfi_bitrate *in = pdata, *hfi = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE;
+ hfi->bitrate = in->bitrate;
+ hfi->layer_id = in->layer_id;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hfi);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT: {
+ struct hfi_profile_level *in = pdata, *pl = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
+ pl->level = in->level;
+ pl->profile = in->profile;
+ if (pl->profile <= 0)
+ /* Profile not supported, falling back to high */
+ pl->profile = HFI_H264_PROFILE_HIGH;
+
+ if (!pl->level)
+ /* Level not supported, falling back to 1 */
+ pl->level = 1;
+
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*pl);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL: {
+ struct hfi_h264_entropy_control *in = pdata, *hfi = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL;
+ hfi->entropy_mode = in->entropy_mode;
+ if (hfi->entropy_mode == HFI_H264_ENTROPY_CABAC)
+ hfi->cabac_model = in->cabac_model;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hfi);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_RATE_CONTROL: {
+ u32 *in = pdata;
+
+ switch (*in) {
+ case HFI_RATE_CONTROL_OFF:
+ case HFI_RATE_CONTROL_CBR_CFR:
+ case HFI_RATE_CONTROL_CBR_VFR:
+ case HFI_RATE_CONTROL_VBR_CFR:
+ case HFI_RATE_CONTROL_VBR_VFR:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VENC_RATE_CONTROL;
+ pkt->data[1] = *in;
+ pkt->shdr.hdr.size += sizeof(u32) * 2;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_MPEG4_TIME_RESOLUTION: {
+ struct hfi_mpeg4_time_resolution *in = pdata, *res = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VENC_MPEG4_TIME_RESOLUTION;
+ res->time_increment_resolution = in->time_increment_resolution;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*res);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION: {
+ struct hfi_mpeg4_header_extension *in = pdata, *ext = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION;
+ ext->header_extension = in->header_extension;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ext);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL: {
+ struct hfi_h264_db_control *in = pdata, *db = prop_data;
+
+ switch (in->mode) {
+ case HFI_H264_DB_MODE_DISABLE:
+ case HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY:
+ case HFI_H264_DB_MODE_ALL_BOUNDARY:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL;
+ db->mode = in->mode;
+ db->slice_alpha_offset = in->slice_alpha_offset;
+ db->slice_beta_offset = in->slice_beta_offset;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*db);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_SESSION_QP: {
+ struct hfi_quantization *in = pdata, *quant = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VENC_SESSION_QP;
+ quant->qp_i = in->qp_i;
+ quant->qp_p = in->qp_p;
+ quant->qp_b = in->qp_b;
+ quant->layer_id = in->layer_id;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*quant);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE: {
+ struct hfi_quantization_range *in = pdata, *range = prop_data;
+ u32 min_qp, max_qp;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE;
+ min_qp = in->min_qp;
+ max_qp = in->max_qp;
+
+ /* We'll be packing in the qp, so make sure we
+ * won't be losing data when masking
+ */
+ if (min_qp > 0xff || max_qp > 0xff) {
+ ret = -ERANGE;
+ break;
+ }
+
+ /* When creating the packet, pack the qp value as
+ * 0xiippbb, where ii = qp range for I-frames,
+ * pp = qp range for P-frames, etc.
+ */
+ range->min_qp = min_qp | min_qp << 8 | min_qp << 16;
+ range->max_qp = max_qp | max_qp << 8 | max_qp << 16;
+ range->layer_id = in->layer_id;
+
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*range);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_VC1_PERF_CFG: {
+ struct hfi_vc1e_perf_cfg_type *in = pdata, *perf = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VENC_VC1_PERF_CFG;
+
+ memcpy(perf->search_range_x_subsampled,
+ in->search_range_x_subsampled,
+ sizeof(perf->search_range_x_subsampled));
+ memcpy(perf->search_range_y_subsampled,
+ in->search_range_y_subsampled,
+ sizeof(perf->search_range_y_subsampled));
+
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*perf);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES: {
+ struct hfi_max_num_b_frames *bframes = prop_data;
+ u32 *in = pdata;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES;
+ bframes->max_num_b_frames = *in;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*bframes);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD: {
+ struct hfi_intra_period *in = pdata, *intra = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD;
+ intra->pframes = in->pframes;
+ intra->bframes = in->bframes;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*intra);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD: {
+ struct hfi_idr_period *in = pdata, *idr = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD;
+ idr->idr_period = in->idr_period;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*idr);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR: {
+ struct hfi_conceal_color *color = prop_data;
+ u32 *in = pdata;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR;
+ color->conceal_color = *in;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*color);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VPE_OPERATIONS: {
+ struct hfi_operations_type *in = pdata, *ops = prop_data;
+
+ switch (in->rotation) {
+ case HFI_ROTATE_NONE:
+ case HFI_ROTATE_90:
+ case HFI_ROTATE_180:
+ case HFI_ROTATE_270:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ switch (in->flip) {
+ case HFI_FLIP_NONE:
+ case HFI_FLIP_HORIZONTAL:
+ case HFI_FLIP_VERTICAL:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pkt->data[0] = HFI_PROPERTY_CONFIG_VPE_OPERATIONS;
+ ops->rotation = in->rotation;
+ ops->flip = in->flip;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ops);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH: {
+ struct hfi_intra_refresh *in = pdata, *intra = prop_data;
+
+ switch (in->mode) {
+ case HFI_INTRA_REFRESH_NONE:
+ case HFI_INTRA_REFRESH_ADAPTIVE:
+ case HFI_INTRA_REFRESH_CYCLIC:
+ case HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE:
+ case HFI_INTRA_REFRESH_RANDOM:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH;
+ intra->mode = in->mode;
+ intra->air_mbs = in->air_mbs;
+ intra->air_ref = in->air_ref;
+ intra->cir_mbs = in->cir_mbs;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*intra);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL: {
+ struct hfi_multi_slice_control *in = pdata, *multi = prop_data;
+
+ switch (in->multi_slice) {
+ case HFI_MULTI_SLICE_OFF:
+ case HFI_MULTI_SLICE_GOB:
+ case HFI_MULTI_SLICE_BY_MB_COUNT:
+ case HFI_MULTI_SLICE_BY_BYTE_COUNT:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL;
+ multi->multi_slice = in->multi_slice;
+ multi->slice_size = in->slice_size;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*multi);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_SLICE_DELIVERY_MODE: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VENC_SLICE_DELIVERY_MODE;
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO: {
+ struct hfi_h264_vui_timing_info *in = pdata, *vui = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO;
+ vui->enable = in->enable;
+ vui->fixed_framerate = in->fixed_framerate;
+ vui->time_scale = in->time_scale;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*vui);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VPE_DEINTERLACE: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_CONFIG_VPE_DEINTERLACE;
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_H264_GENERATE_AUDNAL: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VENC_H264_GENERATE_AUDNAL;
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE: {
+ struct hfi_buffer_alloc_mode *in = pdata, *mode = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE;
+ mode->type = in->type;
+ mode->mode = in->mode;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*mode);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VDEC_FRAME_ASSEMBLY: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VDEC_FRAME_ASSEMBLY;
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ pkt->data[0] =
+ HFI_PROPERTY_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC;
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_PRESERVE_TEXT_QUALITY: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VENC_PRESERVE_TEXT_QUALITY;
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VDEC_SCS_THRESHOLD: {
+ struct hfi_scs_threshold *thres = prop_data;
+ u32 *in = pdata;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VDEC_SCS_THRESHOLD;
+ thres->threshold_value = *in;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*thres);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_MVC_BUFFER_LAYOUT: {
+ struct hfi_mvc_buffer_layout_descp_type *in = pdata;
+ struct hfi_mvc_buffer_layout_descp_type *mvc = prop_data;
+
+ switch (in->layout_type) {
+ case HFI_MVC_BUFFER_LAYOUT_TOP_BOTTOM:
+ case HFI_MVC_BUFFER_LAYOUT_SEQ:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_MVC_BUFFER_LAYOUT;
+ mvc->layout_type = in->layout_type;
+ mvc->bright_view_first = in->bright_view_first;
+ mvc->ngap = in->ngap;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*mvc);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_LTRMODE: {
+ struct hfi_ltr_mode *in = pdata, *ltr = prop_data;
+
+ switch (in->ltr_mode) {
+ case HFI_LTR_MODE_DISABLE:
+ case HFI_LTR_MODE_MANUAL:
+ case HFI_LTR_MODE_PERIODIC:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VENC_LTRMODE;
+ ltr->ltr_mode = in->ltr_mode;
+ ltr->ltr_count = in->ltr_count;
+ ltr->trust_mode = in->trust_mode;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ltr);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VENC_USELTRFRAME: {
+ struct hfi_ltr_use *in = pdata, *ltr_use = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_CONFIG_VENC_USELTRFRAME;
+ ltr_use->frames = in->frames;
+ ltr_use->ref_ltr = in->ref_ltr;
+ ltr_use->use_constrnt = in->use_constrnt;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ltr_use);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VENC_MARKLTRFRAME: {
+ struct hfi_ltr_mark *in = pdata, *ltr_mark = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_CONFIG_VENC_MARKLTRFRAME;
+ ltr_mark->mark_frame = in->mark_frame;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ltr_mark);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_HIER_P_MAX_NUM_ENH_LAYER: {
+ u32 *in = pdata;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VENC_HIER_P_MAX_NUM_ENH_LAYER;
+ pkt->data[1] = *in;
+ pkt->shdr.hdr.size += sizeof(u32) * 2;
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VENC_HIER_P_ENH_LAYER: {
+ u32 *in = pdata;
+
+ pkt->data[0] = HFI_PROPERTY_CONFIG_VENC_HIER_P_ENH_LAYER;
+ pkt->data[1] = *in;
+ pkt->shdr.hdr.size += sizeof(u32) * 2;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_DISABLE_RC_TIMESTAMP: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VENC_DISABLE_RC_TIMESTAMP;
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_INITIAL_QP: {
+ struct hfi_initial_quantization *in = pdata, *quant = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VENC_INITIAL_QP;
+ quant->init_qp_enable = in->init_qp_enable;
+ quant->qp_i = in->qp_i;
+ quant->qp_p = in->qp_p;
+ quant->qp_b = in->qp_b;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*quant);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VPE_COLOR_SPACE_CONVERSION: {
+ struct hfi_vpe_color_space_conversion *in = pdata;
+ struct hfi_vpe_color_space_conversion *csc = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VPE_COLOR_SPACE_CONVERSION;
+ memcpy(csc->csc_matrix, in->csc_matrix,
+ sizeof(csc->csc_matrix));
+ memcpy(csc->csc_bias, in->csc_bias, sizeof(csc->csc_bias));
+ memcpy(csc->csc_limit, in->csc_limit, sizeof(csc->csc_limit));
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*csc);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_VPX_ERROR_RESILIENCE_MODE: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ pkt->data[0] =
+ HFI_PROPERTY_PARAM_VENC_VPX_ERROR_RESILIENCE_MODE;
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_H264_NAL_SVC_EXT: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VENC_H264_NAL_SVC_EXT;
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_CONFIG_VENC_PERF_MODE: {
+ u32 *in = pdata;
+
+ pkt->data[0] = HFI_PROPERTY_CONFIG_VENC_PERF_MODE;
+ pkt->data[1] = *in;
+ pkt->shdr.hdr.size += sizeof(u32) * 2;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_HIER_B_MAX_NUM_ENH_LAYER: {
+ u32 *in = pdata;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VENC_HIER_B_MAX_NUM_ENH_LAYER;
+ pkt->data[1] = *in;
+ pkt->shdr.hdr.size += sizeof(u32) * 2;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VDEC_NONCP_OUTPUT2: {
+ struct hfi_enable *in = pdata, *en = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VDEC_NONCP_OUTPUT2;
+ en->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_HIER_P_HYBRID_MODE: {
+ struct hfi_hybrid_hierp *in = pdata, *hierp = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VENC_HIER_P_HYBRID_MODE;
+ hierp->layers = in->layers;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hierp);
+ break;
+ }
+
+ /* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
+ case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
+ case HFI_PROPERTY_CONFIG_PRIORITY:
+ case HFI_PROPERTY_CONFIG_BATCH_INFO:
+ case HFI_PROPERTY_SYS_IDLE_INDICATOR:
+ case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED:
+ case HFI_PROPERTY_PARAM_INTERLACE_FORMAT_SUPPORTED:
+ case HFI_PROPERTY_PARAM_CHROMA_SITE:
+ case HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED:
+ case HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED:
+ case HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED:
+ case HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SUPPORTED:
+ case HFI_PROPERTY_PARAM_MULTI_VIEW_FORMAT:
+ case HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE:
+ case HFI_PROPERTY_PARAM_CODEC_SUPPORTED:
+ case HFI_PROPERTY_PARAM_VDEC_MULTI_VIEW_SELECT:
+ case HFI_PROPERTY_PARAM_VDEC_MB_QUANTIZATION:
+ case HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB:
+ case HFI_PROPERTY_PARAM_VDEC_H264_ENTROPY_SWITCHING:
+ case HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO:
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int
+pkt_session_get_property_3xx(struct hfi_session_get_property_pkt *pkt,
+ void *cookie, u32 ptype)
+{
+ int ret = 0;
+
+ if (!pkt || !cookie)
+ return -EINVAL;
+
+ pkt->shdr.hdr.size = sizeof(struct hfi_session_get_property_pkt);
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_GET_PROPERTY;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->num_properties = 1;
+
+ switch (ptype) {
+ case HFI_PROPERTY_CONFIG_VDEC_ENTROPY:
+ pkt->data[0] = HFI_PROPERTY_CONFIG_VDEC_ENTROPY;
+ break;
+ default:
+ ret = pkt_session_get_property_1x(pkt, cookie, ptype);
+ break;
+ }
+
+ return ret;
+}
+
+static int
+pkt_session_set_property_3xx(struct hfi_session_set_property_pkt *pkt,
+ void *cookie, u32 ptype, void *pdata)
+{
+ void *prop_data;
+ int ret = 0;
+
+ if (!pkt || !cookie || !pdata)
+ return -EINVAL;
+
+ prop_data = &pkt->data[1];
+
+ pkt->shdr.hdr.size = sizeof(*pkt);
+ pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_PROPERTY;
+ pkt->shdr.session_id = hash32_ptr(cookie);
+ pkt->num_properties = 1;
+
+ /*
+ * Any session set property which is different in 3XX packetization
+ * should be added as a new case below. All unchanged session set
+ * properties will be handled in the default case.
+ */
+ switch (ptype) {
+ case HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM: {
+ struct hfi_multi_stream *in = pdata;
+ struct hfi_multi_stream_3x *multi = prop_data;
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM;
+ multi->buffer_type = in->buffer_type;
+ multi->enable = in->enable;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*multi);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH: {
+ struct hfi_intra_refresh *in = pdata;
+ struct hfi_intra_refresh_3x *intra = prop_data;
+
+ switch (in->mode) {
+ case HFI_INTRA_REFRESH_NONE:
+ case HFI_INTRA_REFRESH_ADAPTIVE:
+ case HFI_INTRA_REFRESH_CYCLIC:
+ case HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE:
+ case HFI_INTRA_REFRESH_RANDOM:
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pkt->data[0] = HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH;
+ intra->mode = in->mode;
+ intra->mbs = in->cir_mbs;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*intra);
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER:
+ /* for 3xx fw version session_continue is used */
+ break;
+ default:
+ ret = pkt_session_set_property_1x(pkt, cookie, ptype, pdata);
+ break;
+ }
+
+ return ret;
+}
+
+int pkt_session_get_property(struct hfi_session_get_property_pkt *pkt,
+ void *cookie, u32 ptype)
+{
+ if (hfi_ver == HFI_VERSION_1XX)
+ return pkt_session_get_property_1x(pkt, cookie, ptype);
+
+ return pkt_session_get_property_3xx(pkt, cookie, ptype);
+}
+
+int pkt_session_set_property(struct hfi_session_set_property_pkt *pkt,
+ void *cookie, u32 ptype, void *pdata)
+{
+ if (hfi_ver == HFI_VERSION_1XX)
+ return pkt_session_set_property_1x(pkt, cookie, ptype, pdata);
+
+ return pkt_session_set_property_3xx(pkt, cookie, ptype, pdata);
+}
+
+void pkt_set_version(enum hfi_version version)
+{
+ hfi_ver = version;
+}
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.h b/drivers/media/platform/qcom/venus/hfi_cmds.h
new file mode 100644
index 000000000000..f7617cf59914
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.h
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __VENUS_HFI_CMDS_H__
+#define __VENUS_HFI_CMDS_H__
+
+#include "hfi.h"
+
+/* commands */
+#define HFI_CMD_SYS_INIT 0x10001
+#define HFI_CMD_SYS_PC_PREP 0x10002
+#define HFI_CMD_SYS_SET_RESOURCE 0x10003
+#define HFI_CMD_SYS_RELEASE_RESOURCE 0x10004
+#define HFI_CMD_SYS_SET_PROPERTY 0x10005
+#define HFI_CMD_SYS_GET_PROPERTY 0x10006
+#define HFI_CMD_SYS_SESSION_INIT 0x10007
+#define HFI_CMD_SYS_SESSION_END 0x10008
+#define HFI_CMD_SYS_SET_BUFFERS 0x10009
+#define HFI_CMD_SYS_TEST_SSR 0x10101
+
+#define HFI_CMD_SESSION_SET_PROPERTY 0x11001
+#define HFI_CMD_SESSION_SET_BUFFERS 0x11002
+#define HFI_CMD_SESSION_GET_SEQUENCE_HEADER 0x11003
+
+#define HFI_CMD_SYS_SESSION_ABORT 0x210001
+#define HFI_CMD_SYS_PING 0x210002
+
+#define HFI_CMD_SESSION_LOAD_RESOURCES 0x211001
+#define HFI_CMD_SESSION_START 0x211002
+#define HFI_CMD_SESSION_STOP 0x211003
+#define HFI_CMD_SESSION_EMPTY_BUFFER 0x211004
+#define HFI_CMD_SESSION_FILL_BUFFER 0x211005
+#define HFI_CMD_SESSION_SUSPEND 0x211006
+#define HFI_CMD_SESSION_RESUME 0x211007
+#define HFI_CMD_SESSION_FLUSH 0x211008
+#define HFI_CMD_SESSION_GET_PROPERTY 0x211009
+#define HFI_CMD_SESSION_PARSE_SEQUENCE_HEADER 0x21100a
+#define HFI_CMD_SESSION_RELEASE_BUFFERS 0x21100b
+#define HFI_CMD_SESSION_RELEASE_RESOURCES 0x21100c
+#define HFI_CMD_SESSION_CONTINUE 0x21100d
+#define HFI_CMD_SESSION_SYNC 0x21100e
+
+/* command packets */
+struct hfi_sys_init_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 arch_type;
+};
+
+struct hfi_sys_pc_prep_pkt {
+ struct hfi_pkt_hdr hdr;
+};
+
+struct hfi_sys_set_resource_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 resource_handle;
+ u32 resource_type;
+ u32 resource_data[1];
+};
+
+struct hfi_sys_release_resource_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 resource_type;
+ u32 resource_handle;
+};
+
+struct hfi_sys_set_property_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 num_properties;
+ u32 data[1];
+};
+
+struct hfi_sys_get_property_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 num_properties;
+ u32 data[1];
+};
+
+struct hfi_sys_set_buffers_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 buffer_type;
+ u32 buffer_size;
+ u32 num_buffers;
+ u32 buffer_addr[1];
+};
+
+struct hfi_sys_ping_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 client_data;
+};
+
+struct hfi_session_init_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 session_domain;
+ u32 session_codec;
+};
+
+struct hfi_session_end_pkt {
+ struct hfi_session_hdr_pkt shdr;
+};
+
+struct hfi_session_abort_pkt {
+ struct hfi_session_hdr_pkt shdr;
+};
+
+struct hfi_session_set_property_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 num_properties;
+ u32 data[0];
+};
+
+struct hfi_session_set_buffers_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 buffer_type;
+ u32 buffer_size;
+ u32 extradata_size;
+ u32 min_buffer_size;
+ u32 num_buffers;
+ u32 buffer_info[1];
+};
+
+struct hfi_session_get_sequence_header_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 buffer_len;
+ u32 packet_buffer;
+};
+
+struct hfi_session_load_resources_pkt {
+ struct hfi_session_hdr_pkt shdr;
+};
+
+struct hfi_session_start_pkt {
+ struct hfi_session_hdr_pkt shdr;
+};
+
+struct hfi_session_stop_pkt {
+ struct hfi_session_hdr_pkt shdr;
+};
+
+struct hfi_session_empty_buffer_compressed_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u32 flags;
+ u32 mark_target;
+ u32 mark_data;
+ u32 offset;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 input_tag;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 data[1];
+};
+
+struct hfi_session_empty_buffer_uncompressed_plane0_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 view_id;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u32 flags;
+ u32 mark_target;
+ u32 mark_data;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 offset;
+ u32 input_tag;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 data[1];
+};
+
+struct hfi_session_empty_buffer_uncompressed_plane1_pkt {
+ u32 flags;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 offset;
+ u32 packet_buffer2;
+ u32 data[1];
+};
+
+struct hfi_session_empty_buffer_uncompressed_plane2_pkt {
+ u32 flags;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 offset;
+ u32 packet_buffer3;
+ u32 data[1];
+};
+
+struct hfi_session_fill_buffer_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 stream_id;
+ u32 offset;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 output_tag;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 data[1];
+};
+
+struct hfi_session_flush_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 flush_type;
+};
+
+struct hfi_session_suspend_pkt {
+ struct hfi_session_hdr_pkt shdr;
+};
+
+struct hfi_session_resume_pkt {
+ struct hfi_session_hdr_pkt shdr;
+};
+
+struct hfi_session_get_property_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 num_properties;
+ u32 data[1];
+};
+
+struct hfi_session_release_buffer_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 buffer_type;
+ u32 buffer_size;
+ u32 extradata_size;
+ u32 response_req;
+ u32 num_buffers;
+ u32 buffer_info[1];
+};
+
+struct hfi_session_release_resources_pkt {
+ struct hfi_session_hdr_pkt shdr;
+};
+
+struct hfi_session_parse_sequence_header_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 header_len;
+ u32 packet_buffer;
+};
+
+struct hfi_sfr {
+ u32 buf_size;
+ u8 data[1];
+};
+
+struct hfi_sys_test_ssr_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 trigger_type;
+};
+
+void pkt_set_version(enum hfi_version version);
+
+void pkt_sys_init(struct hfi_sys_init_pkt *pkt, u32 arch_type);
+void pkt_sys_pc_prep(struct hfi_sys_pc_prep_pkt *pkt);
+void pkt_sys_idle_indicator(struct hfi_sys_set_property_pkt *pkt, u32 enable);
+void pkt_sys_power_control(struct hfi_sys_set_property_pkt *pkt, u32 enable);
+int pkt_sys_set_resource(struct hfi_sys_set_resource_pkt *pkt, u32 id, u32 size,
+ u32 addr, void *cookie);
+int pkt_sys_unset_resource(struct hfi_sys_release_resource_pkt *pkt, u32 id,
+ u32 size, void *cookie);
+void pkt_sys_debug_config(struct hfi_sys_set_property_pkt *pkt, u32 mode,
+ u32 config);
+void pkt_sys_coverage_config(struct hfi_sys_set_property_pkt *pkt, u32 mode);
+void pkt_sys_ping(struct hfi_sys_ping_pkt *pkt, u32 cookie);
+void pkt_sys_image_version(struct hfi_sys_get_property_pkt *pkt);
+int pkt_sys_ssr_cmd(struct hfi_sys_test_ssr_pkt *pkt, u32 trigger_type);
+int pkt_session_init(struct hfi_session_init_pkt *pkt, void *cookie,
+ u32 session_type, u32 codec);
+void pkt_session_cmd(struct hfi_session_pkt *pkt, u32 pkt_type, void *cookie);
+int pkt_session_set_buffers(struct hfi_session_set_buffers_pkt *pkt,
+ void *cookie, struct hfi_buffer_desc *bd);
+int pkt_session_unset_buffers(struct hfi_session_release_buffer_pkt *pkt,
+ void *cookie, struct hfi_buffer_desc *bd);
+int pkt_session_etb_decoder(struct hfi_session_empty_buffer_compressed_pkt *pkt,
+ void *cookie, struct hfi_frame_data *input_frame);
+int pkt_session_etb_encoder(
+ struct hfi_session_empty_buffer_uncompressed_plane0_pkt *pkt,
+ void *cookie, struct hfi_frame_data *input_frame);
+int pkt_session_ftb(struct hfi_session_fill_buffer_pkt *pkt,
+ void *cookie, struct hfi_frame_data *output_frame);
+int pkt_session_parse_seq_header(
+ struct hfi_session_parse_sequence_header_pkt *pkt,
+ void *cookie, u32 seq_hdr, u32 seq_hdr_len);
+int pkt_session_get_seq_hdr(struct hfi_session_get_sequence_header_pkt *pkt,
+ void *cookie, u32 seq_hdr, u32 seq_hdr_len);
+int pkt_session_flush(struct hfi_session_flush_pkt *pkt, void *cookie,
+ u32 flush_mode);
+int pkt_session_get_property(struct hfi_session_get_property_pkt *pkt,
+ void *cookie, u32 ptype);
+int pkt_session_set_property(struct hfi_session_set_property_pkt *pkt,
+ void *cookie, u32 ptype, void *pdata);
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/hfi_helper.h b/drivers/media/platform/qcom/venus/hfi_helper.h
new file mode 100644
index 000000000000..8d282dba9e57
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_helper.h
@@ -0,0 +1,1050 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __VENUS_HFI_HELPER_H__
+#define __VENUS_HFI_HELPER_H__
+
+#define HFI_DOMAIN_BASE_COMMON 0
+
+#define HFI_DOMAIN_BASE_VDEC 0x1000000
+#define HFI_DOMAIN_BASE_VENC 0x2000000
+#define HFI_DOMAIN_BASE_VPE 0x3000000
+
+#define HFI_VIDEO_ARCH_OX 0x1
+
+#define HFI_ARCH_COMMON_OFFSET 0
+#define HFI_ARCH_OX_OFFSET 0x200000
+
+#define HFI_OX_BASE 0x1000000
+
+#define HFI_CMD_START_OFFSET 0x10000
+#define HFI_MSG_START_OFFSET 0x20000
+
+#define HFI_ERR_NONE 0x0
+#define HFI_ERR_SYS_FATAL 0x1
+#define HFI_ERR_SYS_INVALID_PARAMETER 0x2
+#define HFI_ERR_SYS_VERSION_MISMATCH 0x3
+#define HFI_ERR_SYS_INSUFFICIENT_RESOURCES 0x4
+#define HFI_ERR_SYS_MAX_SESSIONS_REACHED 0x5
+#define HFI_ERR_SYS_UNSUPPORTED_CODEC 0x6
+#define HFI_ERR_SYS_SESSION_IN_USE 0x7
+#define HFI_ERR_SYS_SESSION_ID_OUT_OF_RANGE 0x8
+#define HFI_ERR_SYS_UNSUPPORTED_DOMAIN 0x9
+
+#define HFI_ERR_SESSION_FATAL 0x1001
+#define HFI_ERR_SESSION_INVALID_PARAMETER 0x1002
+#define HFI_ERR_SESSION_BAD_POINTER 0x1003
+#define HFI_ERR_SESSION_INVALID_SESSION_ID 0x1004
+#define HFI_ERR_SESSION_INVALID_STREAM_ID 0x1005
+#define HFI_ERR_SESSION_INCORRECT_STATE_OPERATION 0x1006
+#define HFI_ERR_SESSION_UNSUPPORTED_PROPERTY 0x1007
+#define HFI_ERR_SESSION_UNSUPPORTED_SETTING 0x1008
+#define HFI_ERR_SESSION_INSUFFICIENT_RESOURCES 0x1009
+#define HFI_ERR_SESSION_STREAM_CORRUPT_OUTPUT_STALLED 0x100a
+#define HFI_ERR_SESSION_STREAM_CORRUPT 0x100b
+#define HFI_ERR_SESSION_ENC_OVERFLOW 0x100c
+#define HFI_ERR_SESSION_UNSUPPORTED_STREAM 0x100d
+#define HFI_ERR_SESSION_CMDSIZE 0x100e
+#define HFI_ERR_SESSION_UNSUPPORT_CMD 0x100f
+#define HFI_ERR_SESSION_UNSUPPORT_BUFFERTYPE 0x1010
+#define HFI_ERR_SESSION_BUFFERCOUNT_TOOSMALL 0x1011
+#define HFI_ERR_SESSION_INVALID_SCALE_FACTOR 0x1012
+#define HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED 0x1013
+
+#define HFI_EVENT_SYS_ERROR 0x1
+#define HFI_EVENT_SESSION_ERROR 0x2
+
+#define HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUF_RESOURCES 0x1000001
+#define HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUF_RESOURCES 0x1000002
+#define HFI_EVENT_SESSION_SEQUENCE_CHANGED 0x1000003
+#define HFI_EVENT_SESSION_PROPERTY_CHANGED 0x1000004
+#define HFI_EVENT_SESSION_LTRUSE_FAILED 0x1000005
+#define HFI_EVENT_RELEASE_BUFFER_REFERENCE 0x1000006
+
+#define HFI_BUFFERFLAG_EOS 0x00000001
+#define HFI_BUFFERFLAG_STARTTIME 0x00000002
+#define HFI_BUFFERFLAG_DECODEONLY 0x00000004
+#define HFI_BUFFERFLAG_DATACORRUPT 0x00000008
+#define HFI_BUFFERFLAG_ENDOFFRAME 0x00000010
+#define HFI_BUFFERFLAG_SYNCFRAME 0x00000020
+#define HFI_BUFFERFLAG_EXTRADATA 0x00000040
+#define HFI_BUFFERFLAG_CODECCONFIG 0x00000080
+#define HFI_BUFFERFLAG_TIMESTAMPINVALID 0x00000100
+#define HFI_BUFFERFLAG_READONLY 0x00000200
+#define HFI_BUFFERFLAG_ENDOFSUBFRAME 0x00000400
+#define HFI_BUFFERFLAG_EOSEQ 0x00200000
+#define HFI_BUFFERFLAG_MBAFF 0x08000000
+#define HFI_BUFFERFLAG_VPE_YUV_601_709_CSC_CLAMP 0x10000000
+#define HFI_BUFFERFLAG_DROP_FRAME 0x20000000
+#define HFI_BUFFERFLAG_TEI 0x40000000
+#define HFI_BUFFERFLAG_DISCONTINUITY 0x80000000
+
+#define HFI_ERR_SESSION_EMPTY_BUFFER_DONE_OUTPUT_PENDING 0x1001001
+#define HFI_ERR_SESSION_SAME_STATE_OPERATION 0x1001002
+#define HFI_ERR_SESSION_SYNC_FRAME_NOT_DETECTED 0x1001003
+#define HFI_ERR_SESSION_START_CODE_NOT_FOUND 0x1001004
+
+#define HFI_FLUSH_INPUT 0x1000001
+#define HFI_FLUSH_OUTPUT 0x1000002
+#define HFI_FLUSH_OUTPUT2 0x1000003
+#define HFI_FLUSH_ALL 0x1000004
+
+#define HFI_EXTRADATA_NONE 0x00000000
+#define HFI_EXTRADATA_MB_QUANTIZATION 0x00000001
+#define HFI_EXTRADATA_INTERLACE_VIDEO 0x00000002
+#define HFI_EXTRADATA_VC1_FRAMEDISP 0x00000003
+#define HFI_EXTRADATA_VC1_SEQDISP 0x00000004
+#define HFI_EXTRADATA_TIMESTAMP 0x00000005
+#define HFI_EXTRADATA_S3D_FRAME_PACKING 0x00000006
+#define HFI_EXTRADATA_FRAME_RATE 0x00000007
+#define HFI_EXTRADATA_PANSCAN_WINDOW 0x00000008
+#define HFI_EXTRADATA_RECOVERY_POINT_SEI 0x00000009
+#define HFI_EXTRADATA_MPEG2_SEQDISP 0x0000000d
+#define HFI_EXTRADATA_STREAM_USERDATA 0x0000000e
+#define HFI_EXTRADATA_FRAME_QP 0x0000000f
+#define HFI_EXTRADATA_FRAME_BITS_INFO 0x00000010
+#define HFI_EXTRADATA_MULTISLICE_INFO 0x7f100000
+#define HFI_EXTRADATA_NUM_CONCEALED_MB 0x7f100001
+#define HFI_EXTRADATA_INDEX 0x7f100002
+#define HFI_EXTRADATA_METADATA_LTR 0x7f100004
+#define HFI_EXTRADATA_METADATA_FILLER 0x7fe00002
+
+#define HFI_INDEX_EXTRADATA_INPUT_CROP 0x0700000e
+#define HFI_INDEX_EXTRADATA_DIGITAL_ZOOM 0x07000010
+#define HFI_INDEX_EXTRADATA_ASPECT_RATIO 0x7f100003
+
+#define HFI_INTERLACE_FRAME_PROGRESSIVE 0x01
+#define HFI_INTERLACE_INTERLEAVE_FRAME_TOPFIELDFIRST 0x02
+#define HFI_INTERLACE_INTERLEAVE_FRAME_BOTTOMFIELDFIRST 0x04
+#define HFI_INTERLACE_FRAME_TOPFIELDFIRST 0x08
+#define HFI_INTERLACE_FRAME_BOTTOMFIELDFIRST 0x10
+
+/*
+ * HFI_PROPERTY_PARAM_OX_START
+ * HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x1000
+ */
+#define HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL 0x201001
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO 0x201002
+#define HFI_PROPERTY_PARAM_INTERLACE_FORMAT_SUPPORTED 0x201003
+#define HFI_PROPERTY_PARAM_CHROMA_SITE 0x201004
+#define HFI_PROPERTY_PARAM_EXTRA_DATA_HEADER_CONFIG 0x201005
+#define HFI_PROPERTY_PARAM_INDEX_EXTRADATA 0x201006
+#define HFI_PROPERTY_PARAM_DIVX_FORMAT 0x201007
+#define HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE 0x201008
+#define HFI_PROPERTY_PARAM_S3D_FRAME_PACKING_EXTRADATA 0x201009
+#define HFI_PROPERTY_PARAM_ERR_DETECTION_CODE_EXTRADATA 0x20100a
+#define HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE_SUPPORTED 0x20100b
+#define HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL 0x20100c
+#define HFI_PROPERTY_PARAM_BUFFER_DISPLAY_HOLD_COUNT_ACTUAL 0x20100d
+
+/*
+ * HFI_PROPERTY_CONFIG_OX_START
+ * HFI_DOMAIN_BASE_COMMON + HFI_ARCH_OX_OFFSET + 0x2000
+ */
+#define HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS 0x202001
+#define HFI_PROPERTY_CONFIG_REALTIME 0x202002
+#define HFI_PROPERTY_CONFIG_PRIORITY 0x202003
+#define HFI_PROPERTY_CONFIG_BATCH_INFO 0x202004
+
+/*
+ * HFI_PROPERTY_PARAM_VDEC_OX_START \
+ * HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x3000
+ */
+#define HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER 0x1203001
+#define HFI_PROPERTY_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT 0x1203002
+#define HFI_PROPERTY_PARAM_VDEC_MULTI_VIEW_SELECT 0x1203003
+#define HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE 0x1203004
+#define HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER 0x1203005
+#define HFI_PROPERTY_PARAM_VDEC_MB_QUANTIZATION 0x1203006
+#define HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB 0x1203007
+#define HFI_PROPERTY_PARAM_VDEC_H264_ENTROPY_SWITCHING 0x1203008
+#define HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO 0x1203009
+#define HFI_PROPERTY_PARAM_VDEC_FRAME_RATE_EXTRADATA 0x120300a
+#define HFI_PROPERTY_PARAM_VDEC_PANSCAN_WNDW_EXTRADATA 0x120300b
+#define HFI_PROPERTY_PARAM_VDEC_RECOVERY_POINT_SEI_EXTRADATA 0x120300c
+#define HFI_PROPERTY_PARAM_VDEC_THUMBNAIL_MODE 0x120300d
+#define HFI_PROPERTY_PARAM_VDEC_FRAME_ASSEMBLY 0x120300e
+#define HFI_PROPERTY_PARAM_VDEC_VC1_FRAMEDISP_EXTRADATA 0x1203011
+#define HFI_PROPERTY_PARAM_VDEC_VC1_SEQDISP_EXTRADATA 0x1203012
+#define HFI_PROPERTY_PARAM_VDEC_TIMESTAMP_EXTRADATA 0x1203013
+#define HFI_PROPERTY_PARAM_VDEC_INTERLACE_VIDEO_EXTRADATA 0x1203014
+#define HFI_PROPERTY_PARAM_VDEC_AVC_SESSION_SELECT 0x1203015
+#define HFI_PROPERTY_PARAM_VDEC_MPEG2_SEQDISP_EXTRADATA 0x1203016
+#define HFI_PROPERTY_PARAM_VDEC_STREAM_USERDATA_EXTRADATA 0x1203017
+#define HFI_PROPERTY_PARAM_VDEC_FRAME_QP_EXTRADATA 0x1203018
+#define HFI_PROPERTY_PARAM_VDEC_FRAME_BITS_INFO_EXTRADATA 0x1203019
+#define HFI_PROPERTY_PARAM_VDEC_SCS_THRESHOLD 0x120301a
+
+/*
+ * HFI_PROPERTY_CONFIG_VDEC_OX_START
+ * HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x0000
+ */
+#define HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER 0x1200001
+#define HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING 0x1200002
+#define HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP 0x1200003
+
+#define HFI_PROPERTY_CONFIG_VDEC_ENTROPY 0x1204004
+
+/*
+ * HFI_PROPERTY_PARAM_VENC_OX_START
+ * HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x5000
+ */
+#define HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO 0x2205001
+#define HFI_PROPERTY_PARAM_VENC_H264_IDR_S3D_FRAME_PACKING_NAL 0x2205002
+#define HFI_PROPERTY_PARAM_VENC_LTR_INFO 0x2205003
+#define HFI_PROPERTY_PARAM_VENC_MBI_DUMPING 0x2205005
+
+/*
+ * HFI_PROPERTY_CONFIG_VENC_OX_START
+ * HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x6000
+ */
+#define HFI_PROPERTY_CONFIG_VENC_FRAME_QP 0x2206001
+
+/*
+ * HFI_PROPERTY_PARAM_VPE_OX_START
+ * HFI_DOMAIN_BASE_VPE + HFI_ARCH_OX_OFFSET + 0x7000
+ */
+#define HFI_PROPERTY_PARAM_VPE_COLOR_SPACE_CONVERSION 0x3207001
+
+#define HFI_PROPERTY_CONFIG_VPE_OX_START \
+ (HFI_DOMAIN_BASE_VPE + HFI_ARCH_OX_OFFSET + 0x8000)
+
+#define HFI_CHROMA_SITE_0 0x1000001
+#define HFI_CHROMA_SITE_1 0x1000002
+#define HFI_CHROMA_SITE_2 0x1000003
+#define HFI_CHROMA_SITE_3 0x1000004
+#define HFI_CHROMA_SITE_4 0x1000005
+#define HFI_CHROMA_SITE_5 0x1000006
+
+#define HFI_PRIORITY_LOW 10
+#define HFI_PRIOIRTY_MEDIUM 20
+#define HFI_PRIORITY_HIGH 30
+
+#define HFI_OUTPUT_ORDER_DISPLAY 0x1000001
+#define HFI_OUTPUT_ORDER_DECODE 0x1000002
+
+#define HFI_RATE_CONTROL_OFF 0x1000001
+#define HFI_RATE_CONTROL_VBR_VFR 0x1000002
+#define HFI_RATE_CONTROL_VBR_CFR 0x1000003
+#define HFI_RATE_CONTROL_CBR_VFR 0x1000004
+#define HFI_RATE_CONTROL_CBR_CFR 0x1000005
+
+#define HFI_VIDEO_CODEC_H264 0x00000002
+#define HFI_VIDEO_CODEC_H263 0x00000004
+#define HFI_VIDEO_CODEC_MPEG1 0x00000008
+#define HFI_VIDEO_CODEC_MPEG2 0x00000010
+#define HFI_VIDEO_CODEC_MPEG4 0x00000020
+#define HFI_VIDEO_CODEC_DIVX_311 0x00000040
+#define HFI_VIDEO_CODEC_DIVX 0x00000080
+#define HFI_VIDEO_CODEC_VC1 0x00000100
+#define HFI_VIDEO_CODEC_SPARK 0x00000200
+#define HFI_VIDEO_CODEC_VP8 0x00001000
+#define HFI_VIDEO_CODEC_HEVC 0x00002000
+#define HFI_VIDEO_CODEC_VP9 0x00004000
+#define HFI_VIDEO_CODEC_HEVC_HYBRID 0x80000000
+
+#define HFI_H264_PROFILE_BASELINE 0x00000001
+#define HFI_H264_PROFILE_MAIN 0x00000002
+#define HFI_H264_PROFILE_HIGH 0x00000004
+#define HFI_H264_PROFILE_STEREO_HIGH 0x00000008
+#define HFI_H264_PROFILE_MULTIVIEW_HIGH 0x00000010
+#define HFI_H264_PROFILE_CONSTRAINED_BASE 0x00000020
+#define HFI_H264_PROFILE_CONSTRAINED_HIGH 0x00000040
+
+#define HFI_H264_LEVEL_1 0x00000001
+#define HFI_H264_LEVEL_1b 0x00000002
+#define HFI_H264_LEVEL_11 0x00000004
+#define HFI_H264_LEVEL_12 0x00000008
+#define HFI_H264_LEVEL_13 0x00000010
+#define HFI_H264_LEVEL_2 0x00000020
+#define HFI_H264_LEVEL_21 0x00000040
+#define HFI_H264_LEVEL_22 0x00000080
+#define HFI_H264_LEVEL_3 0x00000100
+#define HFI_H264_LEVEL_31 0x00000200
+#define HFI_H264_LEVEL_32 0x00000400
+#define HFI_H264_LEVEL_4 0x00000800
+#define HFI_H264_LEVEL_41 0x00001000
+#define HFI_H264_LEVEL_42 0x00002000
+#define HFI_H264_LEVEL_5 0x00004000
+#define HFI_H264_LEVEL_51 0x00008000
+#define HFI_H264_LEVEL_52 0x00010000
+
+#define HFI_H263_PROFILE_BASELINE 0x00000001
+
+#define HFI_H263_LEVEL_10 0x00000001
+#define HFI_H263_LEVEL_20 0x00000002
+#define HFI_H263_LEVEL_30 0x00000004
+#define HFI_H263_LEVEL_40 0x00000008
+#define HFI_H263_LEVEL_45 0x00000010
+#define HFI_H263_LEVEL_50 0x00000020
+#define HFI_H263_LEVEL_60 0x00000040
+#define HFI_H263_LEVEL_70 0x00000080
+
+#define HFI_MPEG2_PROFILE_SIMPLE 0x00000001
+#define HFI_MPEG2_PROFILE_MAIN 0x00000002
+#define HFI_MPEG2_PROFILE_422 0x00000004
+#define HFI_MPEG2_PROFILE_SNR 0x00000008
+#define HFI_MPEG2_PROFILE_SPATIAL 0x00000010
+#define HFI_MPEG2_PROFILE_HIGH 0x00000020
+
+#define HFI_MPEG2_LEVEL_LL 0x00000001
+#define HFI_MPEG2_LEVEL_ML 0x00000002
+#define HFI_MPEG2_LEVEL_H14 0x00000004
+#define HFI_MPEG2_LEVEL_HL 0x00000008
+
+#define HFI_MPEG4_PROFILE_SIMPLE 0x00000001
+#define HFI_MPEG4_PROFILE_ADVANCEDSIMPLE 0x00000002
+
+#define HFI_MPEG4_LEVEL_0 0x00000001
+#define HFI_MPEG4_LEVEL_0b 0x00000002
+#define HFI_MPEG4_LEVEL_1 0x00000004
+#define HFI_MPEG4_LEVEL_2 0x00000008
+#define HFI_MPEG4_LEVEL_3 0x00000010
+#define HFI_MPEG4_LEVEL_4 0x00000020
+#define HFI_MPEG4_LEVEL_4a 0x00000040
+#define HFI_MPEG4_LEVEL_5 0x00000080
+#define HFI_MPEG4_LEVEL_6 0x00000100
+#define HFI_MPEG4_LEVEL_7 0x00000200
+#define HFI_MPEG4_LEVEL_8 0x00000400
+#define HFI_MPEG4_LEVEL_9 0x00000800
+#define HFI_MPEG4_LEVEL_3b 0x00001000
+
+#define HFI_VC1_PROFILE_SIMPLE 0x00000001
+#define HFI_VC1_PROFILE_MAIN 0x00000002
+#define HFI_VC1_PROFILE_ADVANCED 0x00000004
+
+#define HFI_VC1_LEVEL_LOW 0x00000001
+#define HFI_VC1_LEVEL_MEDIUM 0x00000002
+#define HFI_VC1_LEVEL_HIGH 0x00000004
+#define HFI_VC1_LEVEL_0 0x00000008
+#define HFI_VC1_LEVEL_1 0x00000010
+#define HFI_VC1_LEVEL_2 0x00000020
+#define HFI_VC1_LEVEL_3 0x00000040
+#define HFI_VC1_LEVEL_4 0x00000080
+
+#define HFI_VPX_PROFILE_SIMPLE 0x00000001
+#define HFI_VPX_PROFILE_ADVANCED 0x00000002
+#define HFI_VPX_PROFILE_VERSION_0 0x00000004
+#define HFI_VPX_PROFILE_VERSION_1 0x00000008
+#define HFI_VPX_PROFILE_VERSION_2 0x00000010
+#define HFI_VPX_PROFILE_VERSION_3 0x00000020
+
+#define HFI_DIVX_FORMAT_4 0x1
+#define HFI_DIVX_FORMAT_5 0x2
+#define HFI_DIVX_FORMAT_6 0x3
+
+#define HFI_DIVX_PROFILE_QMOBILE 0x00000001
+#define HFI_DIVX_PROFILE_MOBILE 0x00000002
+#define HFI_DIVX_PROFILE_MT 0x00000004
+#define HFI_DIVX_PROFILE_HT 0x00000008
+#define HFI_DIVX_PROFILE_HD 0x00000010
+
+#define HFI_HEVC_PROFILE_MAIN 0x00000001
+#define HFI_HEVC_PROFILE_MAIN10 0x00000002
+#define HFI_HEVC_PROFILE_MAIN_STILL_PIC 0x00000004
+
+#define HFI_HEVC_LEVEL_1 0x00000001
+#define HFI_HEVC_LEVEL_2 0x00000002
+#define HFI_HEVC_LEVEL_21 0x00000004
+#define HFI_HEVC_LEVEL_3 0x00000008
+#define HFI_HEVC_LEVEL_31 0x00000010
+#define HFI_HEVC_LEVEL_4 0x00000020
+#define HFI_HEVC_LEVEL_41 0x00000040
+#define HFI_HEVC_LEVEL_5 0x00000080
+#define HFI_HEVC_LEVEL_51 0x00000100
+#define HFI_HEVC_LEVEL_52 0x00000200
+#define HFI_HEVC_LEVEL_6 0x00000400
+#define HFI_HEVC_LEVEL_61 0x00000800
+#define HFI_HEVC_LEVEL_62 0x00001000
+
+#define HFI_HEVC_TIER_MAIN 0x1
+#define HFI_HEVC_TIER_HIGH0 0x2
+
+#define HFI_BUFFER_INPUT 0x1
+#define HFI_BUFFER_OUTPUT 0x2
+#define HFI_BUFFER_OUTPUT2 0x3
+#define HFI_BUFFER_INTERNAL_PERSIST 0x4
+#define HFI_BUFFER_INTERNAL_PERSIST_1 0x5
+#define HFI_BUFFER_INTERNAL_SCRATCH 0x1000001
+#define HFI_BUFFER_EXTRADATA_INPUT 0x1000002
+#define HFI_BUFFER_EXTRADATA_OUTPUT 0x1000003
+#define HFI_BUFFER_EXTRADATA_OUTPUT2 0x1000004
+#define HFI_BUFFER_INTERNAL_SCRATCH_1 0x1000005
+#define HFI_BUFFER_INTERNAL_SCRATCH_2 0x1000006
+
+#define HFI_BUFFER_TYPE_MAX 11
+
+#define HFI_BUFFER_MODE_STATIC 0x1000001
+#define HFI_BUFFER_MODE_RING 0x1000002
+#define HFI_BUFFER_MODE_DYNAMIC 0x1000003
+
+#define HFI_VENC_PERFMODE_MAX_QUALITY 0x1
+#define HFI_VENC_PERFMODE_POWER_SAVE 0x2
+
+/*
+ * HFI_PROPERTY_SYS_COMMON_START
+ * HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x0000
+ */
+#define HFI_PROPERTY_SYS_DEBUG_CONFIG 0x1
+#define HFI_PROPERTY_SYS_RESOURCE_OCMEM_REQUIREMENT_INFO 0x2
+#define HFI_PROPERTY_SYS_CONFIG_VCODEC_CLKFREQ 0x3
+#define HFI_PROPERTY_SYS_IDLE_INDICATOR 0x4
+#define HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL 0x5
+#define HFI_PROPERTY_SYS_IMAGE_VERSION 0x6
+#define HFI_PROPERTY_SYS_CONFIG_COVERAGE 0x7
+
+/*
+ * HFI_PROPERTY_PARAM_COMMON_START
+ * HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x1000
+ */
+#define HFI_PROPERTY_PARAM_FRAME_SIZE 0x1001
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO 0x1002
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT 0x1003
+#define HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED 0x1004
+#define HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT 0x1005
+#define HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED 0x1006
+#define HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED 0x1007
+#define HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED 0x1008
+#define HFI_PROPERTY_PARAM_CODEC_SUPPORTED 0x1009
+#define HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SUPPORTED 0x100a
+#define HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT 0x100b
+#define HFI_PROPERTY_PARAM_MULTI_VIEW_FORMAT 0x100c
+#define HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE 0x100d
+#define HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED 0x100e
+#define HFI_PROPERTY_PARAM_MVC_BUFFER_LAYOUT 0x100f
+#define HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED 0x1010
+
+/*
+ * HFI_PROPERTY_CONFIG_COMMON_START
+ * HFI_DOMAIN_BASE_COMMON + HFI_ARCH_COMMON_OFFSET + 0x2000
+ */
+#define HFI_PROPERTY_CONFIG_FRAME_RATE 0x2001
+
+/*
+ * HFI_PROPERTY_PARAM_VDEC_COMMON_START
+ * HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x3000
+ */
+#define HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM 0x1003001
+#define HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR 0x1003002
+#define HFI_PROPERTY_PARAM_VDEC_NONCP_OUTPUT2 0x1003003
+
+/*
+ * HFI_PROPERTY_CONFIG_VDEC_COMMON_START
+ * HFI_DOMAIN_BASE_VDEC + HFI_ARCH_COMMON_OFFSET + 0x4000
+ */
+
+/*
+ * HFI_PROPERTY_PARAM_VENC_COMMON_START
+ * HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x5000
+ */
+#define HFI_PROPERTY_PARAM_VENC_SLICE_DELIVERY_MODE 0x2005001
+#define HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL 0x2005002
+#define HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL 0x2005003
+#define HFI_PROPERTY_PARAM_VENC_RATE_CONTROL 0x2005004
+#define HFI_PROPERTY_PARAM_VENC_H264_PICORDER_CNT_TYPE 0x2005005
+#define HFI_PROPERTY_PARAM_VENC_SESSION_QP 0x2005006
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_AC_PREDICTION 0x2005007
+#define HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE 0x2005008
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_TIME_RESOLUTION 0x2005009
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_SHORT_HEADER 0x200500a
+#define HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION 0x200500b
+#define HFI_PROPERTY_PARAM_VENC_OPEN_GOP 0x200500c
+#define HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH 0x200500d
+#define HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL 0x200500e
+#define HFI_PROPERTY_PARAM_VENC_VBV_HRD_BUF_SIZE 0x200500f
+#define HFI_PROPERTY_PARAM_VENC_QUALITY_VS_SPEED 0x2005010
+#define HFI_PROPERTY_PARAM_VENC_ADVANCED 0x2005012
+#define HFI_PROPERTY_PARAM_VENC_H264_SPS_ID 0x2005014
+#define HFI_PROPERTY_PARAM_VENC_H264_PPS_ID 0x2005015
+#define HFI_PROPERTY_PARAM_VENC_H264_GENERATE_AUDNAL 0x2005016
+#define HFI_PROPERTY_PARAM_VENC_ASPECT_RATIO 0x2005017
+#define HFI_PROPERTY_PARAM_VENC_NUMREF 0x2005018
+#define HFI_PROPERTY_PARAM_VENC_MULTIREF_P 0x2005019
+#define HFI_PROPERTY_PARAM_VENC_H264_NAL_SVC_EXT 0x200501b
+#define HFI_PROPERTY_PARAM_VENC_LTRMODE 0x200501c
+#define HFI_PROPERTY_PARAM_VENC_VIDEO_FULL_RANGE 0x200501d
+#define HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO 0x200501e
+#define HFI_PROPERTY_PARAM_VENC_VC1_PERF_CFG 0x200501f
+#define HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES 0x2005020
+#define HFI_PROPERTY_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC 0x2005021
+#define HFI_PROPERTY_PARAM_VENC_PRESERVE_TEXT_QUALITY 0x2005023
+#define HFI_PROPERTY_PARAM_VENC_HIER_P_MAX_NUM_ENH_LAYER 0x2005026
+#define HFI_PROPERTY_PARAM_VENC_DISABLE_RC_TIMESTAMP 0x2005027
+#define HFI_PROPERTY_PARAM_VENC_INITIAL_QP 0x2005028
+#define HFI_PROPERTY_PARAM_VENC_VPX_ERROR_RESILIENCE_MODE 0x2005029
+#define HFI_PROPERTY_PARAM_VENC_HIER_B_MAX_NUM_ENH_LAYER 0x200502c
+#define HFI_PROPERTY_PARAM_VENC_HIER_P_HYBRID_MODE 0x200502f
+
+/*
+ * HFI_PROPERTY_CONFIG_VENC_COMMON_START
+ * HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x6000
+ */
+#define HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE 0x2006001
+#define HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD 0x2006002
+#define HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD 0x2006003
+#define HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME 0x2006004
+#define HFI_PROPERTY_CONFIG_VENC_SLICE_SIZE 0x2006005
+#define HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE 0x2006007
+#define HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER 0x2006008
+#define HFI_PROPERTY_CONFIG_VENC_MARKLTRFRAME 0x2006009
+#define HFI_PROPERTY_CONFIG_VENC_USELTRFRAME 0x200600a
+#define HFI_PROPERTY_CONFIG_VENC_HIER_P_ENH_LAYER 0x200600b
+#define HFI_PROPERTY_CONFIG_VENC_LTRPERIOD 0x200600c
+#define HFI_PROPERTY_CONFIG_VENC_PERF_MODE 0x200600e
+
+/*
+ * HFI_PROPERTY_PARAM_VPE_COMMON_START
+ * HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x7000
+ */
+
+/*
+ * HFI_PROPERTY_CONFIG_VPE_COMMON_START
+ * HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x8000
+ */
+#define HFI_PROPERTY_CONFIG_VPE_DEINTERLACE 0x3008001
+#define HFI_PROPERTY_CONFIG_VPE_OPERATIONS 0x3008002
+
+enum hfi_version {
+ HFI_VERSION_1XX,
+ HFI_VERSION_3XX,
+};
+
+struct hfi_buffer_info {
+ u32 buffer_addr;
+ u32 extradata_addr;
+};
+
+struct hfi_bitrate {
+ u32 bitrate;
+ u32 layer_id;
+};
+
+#define HFI_CAPABILITY_FRAME_WIDTH 0x01
+#define HFI_CAPABILITY_FRAME_HEIGHT 0x02
+#define HFI_CAPABILITY_MBS_PER_FRAME 0x03
+#define HFI_CAPABILITY_MBS_PER_SECOND 0x04
+#define HFI_CAPABILITY_FRAMERATE 0x05
+#define HFI_CAPABILITY_SCALE_X 0x06
+#define HFI_CAPABILITY_SCALE_Y 0x07
+#define HFI_CAPABILITY_BITRATE 0x08
+#define HFI_CAPABILITY_BFRAME 0x09
+#define HFI_CAPABILITY_PEAKBITRATE 0x0a
+#define HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS 0x10
+#define HFI_CAPABILITY_ENC_LTR_COUNT 0x11
+#define HFI_CAPABILITY_CP_OUTPUT2_THRESH 0x12
+#define HFI_CAPABILITY_HIER_B_NUM_ENH_LAYERS 0x13
+#define HFI_CAPABILITY_LCU_SIZE 0x14
+#define HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS 0x15
+#define HFI_CAPABILITY_MBS_PER_SECOND_POWERSAVE 0x16
+
+struct hfi_capability {
+ u32 capability_type;
+ u32 min;
+ u32 max;
+ u32 step_size;
+};
+
+struct hfi_capabilities {
+ u32 num_capabilities;
+ struct hfi_capability data[1];
+};
+
+#define HFI_DEBUG_MSG_LOW 0x01
+#define HFI_DEBUG_MSG_MEDIUM 0x02
+#define HFI_DEBUG_MSG_HIGH 0x04
+#define HFI_DEBUG_MSG_ERROR 0x08
+#define HFI_DEBUG_MSG_FATAL 0x10
+#define HFI_DEBUG_MSG_PERF 0x20
+
+#define HFI_DEBUG_MODE_QUEUE 0x01
+#define HFI_DEBUG_MODE_QDSS 0x02
+
+struct hfi_debug_config {
+ u32 config;
+ u32 mode;
+};
+
+struct hfi_enable {
+ u32 enable;
+};
+
+#define HFI_H264_DB_MODE_DISABLE 0x1
+#define HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY 0x2
+#define HFI_H264_DB_MODE_ALL_BOUNDARY 0x3
+
+struct hfi_h264_db_control {
+ u32 mode;
+ u32 slice_alpha_offset;
+ u32 slice_beta_offset;
+};
+
+#define HFI_H264_ENTROPY_CAVLC 0x1
+#define HFI_H264_ENTROPY_CABAC 0x2
+
+#define HFI_H264_CABAC_MODEL_0 0x1
+#define HFI_H264_CABAC_MODEL_1 0x2
+#define HFI_H264_CABAC_MODEL_2 0x3
+
+struct hfi_h264_entropy_control {
+ u32 entropy_mode;
+ u32 cabac_model;
+};
+
+struct hfi_framerate {
+ u32 buffer_type;
+ u32 framerate;
+};
+
+#define HFI_INTRA_REFRESH_NONE 0x1
+#define HFI_INTRA_REFRESH_CYCLIC 0x2
+#define HFI_INTRA_REFRESH_ADAPTIVE 0x3
+#define HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE 0x4
+#define HFI_INTRA_REFRESH_RANDOM 0x5
+
+struct hfi_intra_refresh {
+ u32 mode;
+ u32 air_mbs;
+ u32 air_ref;
+ u32 cir_mbs;
+};
+
+struct hfi_intra_refresh_3x {
+ u32 mode;
+ u32 mbs;
+};
+
+struct hfi_idr_period {
+ u32 idr_period;
+};
+
+struct hfi_operations_type {
+ u32 rotation;
+ u32 flip;
+};
+
+struct hfi_max_num_b_frames {
+ u32 max_num_b_frames;
+};
+
+struct hfi_vc1e_perf_cfg_type {
+ u32 search_range_x_subsampled[3];
+ u32 search_range_y_subsampled[3];
+};
+
+struct hfi_conceal_color {
+ u32 conceal_color;
+};
+
+struct hfi_intra_period {
+ u32 pframes;
+ u32 bframes;
+};
+
+struct hfi_mpeg4_header_extension {
+ u32 header_extension;
+};
+
+struct hfi_mpeg4_time_resolution {
+ u32 time_increment_resolution;
+};
+
+struct hfi_multi_stream {
+ u32 buffer_type;
+ u32 enable;
+ u32 width;
+ u32 height;
+};
+
+struct hfi_multi_stream_3x {
+ u32 buffer_type;
+ u32 enable;
+};
+
+struct hfi_multi_view_format {
+ u32 views;
+ u32 view_order[1];
+};
+
+#define HFI_MULTI_SLICE_OFF 0x1
+#define HFI_MULTI_SLICE_BY_MB_COUNT 0x2
+#define HFI_MULTI_SLICE_BY_BYTE_COUNT 0x3
+#define HFI_MULTI_SLICE_GOB 0x4
+
+struct hfi_multi_slice_control {
+ u32 multi_slice;
+ u32 slice_size;
+};
+
+#define HFI_NAL_FORMAT_STARTCODES 0x01
+#define HFI_NAL_FORMAT_ONE_NAL_PER_BUFFER 0x02
+#define HFI_NAL_FORMAT_ONE_BYTE_LENGTH 0x04
+#define HFI_NAL_FORMAT_TWO_BYTE_LENGTH 0x08
+#define HFI_NAL_FORMAT_FOUR_BYTE_LENGTH 0x10
+
+struct hfi_nal_stream_format {
+ u32 format;
+};
+
+struct hfi_nal_stream_format_select {
+ u32 format;
+};
+
+#define HFI_PICTURE_TYPE_I 0x01
+#define HFI_PICTURE_TYPE_P 0x02
+#define HFI_PICTURE_TYPE_B 0x04
+#define HFI_PICTURE_TYPE_IDR 0x08
+
+struct hfi_profile_level {
+ u32 profile;
+ u32 level;
+};
+
+#define HFI_MAX_PROFILE_COUNT 16
+
+struct hfi_profile_level_supported {
+ u32 profile_count;
+ struct hfi_profile_level profile_level[1];
+};
+
+struct hfi_quality_vs_speed {
+ u32 quality_vs_speed;
+};
+
+struct hfi_quantization {
+ u32 qp_i;
+ u32 qp_p;
+ u32 qp_b;
+ u32 layer_id;
+};
+
+struct hfi_initial_quantization {
+ u32 qp_i;
+ u32 qp_p;
+ u32 qp_b;
+ u32 init_qp_enable;
+};
+
+struct hfi_quantization_range {
+ u32 min_qp;
+ u32 max_qp;
+ u32 layer_id;
+};
+
+#define HFI_LTR_MODE_DISABLE 0x0
+#define HFI_LTR_MODE_MANUAL 0x1
+#define HFI_LTR_MODE_PERIODIC 0x2
+
+struct hfi_ltr_mode {
+ u32 ltr_mode;
+ u32 ltr_count;
+ u32 trust_mode;
+};
+
+struct hfi_ltr_use {
+ u32 ref_ltr;
+ u32 use_constrnt;
+ u32 frames;
+};
+
+struct hfi_ltr_mark {
+ u32 mark_frame;
+};
+
+struct hfi_framesize {
+ u32 buffer_type;
+ u32 width;
+ u32 height;
+};
+
+struct hfi_h264_vui_timing_info {
+ u32 enable;
+ u32 fixed_framerate;
+ u32 time_scale;
+};
+
+#define HFI_COLOR_FORMAT_MONOCHROME 0x01
+#define HFI_COLOR_FORMAT_NV12 0x02
+#define HFI_COLOR_FORMAT_NV21 0x03
+#define HFI_COLOR_FORMAT_NV12_4x4TILE 0x04
+#define HFI_COLOR_FORMAT_NV21_4x4TILE 0x05
+#define HFI_COLOR_FORMAT_YUYV 0x06
+#define HFI_COLOR_FORMAT_YVYU 0x07
+#define HFI_COLOR_FORMAT_UYVY 0x08
+#define HFI_COLOR_FORMAT_VYUY 0x09
+#define HFI_COLOR_FORMAT_RGB565 0x0a
+#define HFI_COLOR_FORMAT_BGR565 0x0b
+#define HFI_COLOR_FORMAT_RGB888 0x0c
+#define HFI_COLOR_FORMAT_BGR888 0x0d
+#define HFI_COLOR_FORMAT_YUV444 0x0e
+#define HFI_COLOR_FORMAT_RGBA8888 0x10
+
+#define HFI_COLOR_FORMAT_UBWC_BASE 0x8000
+#define HFI_COLOR_FORMAT_10_BIT_BASE 0x4000
+
+#define HFI_COLOR_FORMAT_YUV420_TP10 0x4002
+#define HFI_COLOR_FORMAT_NV12_UBWC 0x8002
+#define HFI_COLOR_FORMAT_YUV420_TP10_UBWC 0xc002
+#define HFI_COLOR_FORMAT_RGBA8888_UBWC 0x8010
+
+struct hfi_uncompressed_format_select {
+ u32 buffer_type;
+ u32 format;
+};
+
+struct hfi_uncompressed_format_supported {
+ u32 buffer_type;
+ u32 format_entries;
+ u32 format_info[1];
+};
+
+struct hfi_uncompressed_plane_actual {
+ int actual_stride;
+ u32 actual_plane_buffer_height;
+};
+
+struct hfi_uncompressed_plane_actual_info {
+ u32 buffer_type;
+ u32 num_planes;
+ struct hfi_uncompressed_plane_actual plane_format[1];
+};
+
+struct hfi_uncompressed_plane_constraints {
+ u32 stride_multiples;
+ u32 max_stride;
+ u32 min_plane_buffer_height_multiple;
+ u32 buffer_alignment;
+};
+
+struct hfi_uncompressed_plane_info {
+ u32 format;
+ u32 num_planes;
+ struct hfi_uncompressed_plane_constraints plane_format[1];
+};
+
+struct hfi_uncompressed_plane_actual_constraints_info {
+ u32 buffer_type;
+ u32 num_planes;
+ struct hfi_uncompressed_plane_constraints plane_format[1];
+};
+
+struct hfi_codec_supported {
+ u32 dec_codecs;
+ u32 enc_codecs;
+};
+
+struct hfi_properties_supported {
+ u32 num_properties;
+ u32 properties[1];
+};
+
+struct hfi_max_sessions_supported {
+ u32 max_sessions;
+};
+
+#define HFI_MAX_MATRIX_COEFFS 9
+#define HFI_MAX_BIAS_COEFFS 3
+#define HFI_MAX_LIMIT_COEFFS 6
+
+struct hfi_vpe_color_space_conversion {
+ u32 csc_matrix[HFI_MAX_MATRIX_COEFFS];
+ u32 csc_bias[HFI_MAX_BIAS_COEFFS];
+ u32 csc_limit[HFI_MAX_LIMIT_COEFFS];
+};
+
+#define HFI_ROTATE_NONE 0x1
+#define HFI_ROTATE_90 0x2
+#define HFI_ROTATE_180 0x3
+#define HFI_ROTATE_270 0x4
+
+#define HFI_FLIP_NONE 0x1
+#define HFI_FLIP_HORIZONTAL 0x2
+#define HFI_FLIP_VERTICAL 0x3
+
+struct hfi_operations {
+ u32 rotate;
+ u32 flip;
+};
+
+#define HFI_RESOURCE_OCMEM 0x1
+
+struct hfi_resource_ocmem {
+ u32 size;
+ u32 mem;
+};
+
+struct hfi_resource_ocmem_requirement {
+ u32 session_domain;
+ u32 width;
+ u32 height;
+ u32 size;
+};
+
+struct hfi_resource_ocmem_requirement_info {
+ u32 num_entries;
+ struct hfi_resource_ocmem_requirement requirements[1];
+};
+
+struct hfi_property_sys_image_version_info_type {
+ u32 string_size;
+ u8 str_image_version[1];
+};
+
+struct hfi_codec_mask_supported {
+ u32 codecs;
+ u32 video_domains;
+};
+
+struct hfi_seq_header_info {
+ u32 max_hader_len;
+};
+
+struct hfi_aspect_ratio {
+ u32 aspect_width;
+ u32 aspect_height;
+};
+
+#define HFI_MVC_BUFFER_LAYOUT_TOP_BOTTOM 0
+#define HFI_MVC_BUFFER_LAYOUT_SIDEBYSIDE 1
+#define HFI_MVC_BUFFER_LAYOUT_SEQ 2
+
+struct hfi_mvc_buffer_layout_descp_type {
+ u32 layout_type;
+ u32 bright_view_first;
+ u32 ngap;
+};
+
+struct hfi_scs_threshold {
+ u32 threshold_value;
+};
+
+#define HFI_TEST_SSR_SW_ERR_FATAL 0x1
+#define HFI_TEST_SSR_SW_DIV_BY_ZERO 0x2
+#define HFI_TEST_SSR_HW_WDOG_IRQ 0x3
+
+struct hfi_buffer_alloc_mode {
+ u32 type;
+ u32 mode;
+};
+
+struct hfi_index_extradata_config {
+ u32 enable;
+ u32 index_extra_data_id;
+};
+
+struct hfi_extradata_header {
+ u32 size;
+ u32 version;
+ u32 port_index;
+ u32 type;
+ u32 data_size;
+ u8 data[1];
+};
+
+struct hfi_batch_info {
+ u32 input_batch_count;
+ u32 output_batch_count;
+};
+
+struct hfi_buffer_count_actual {
+ u32 type;
+ u32 count_actual;
+};
+
+struct hfi_buffer_size_actual {
+ u32 type;
+ u32 size;
+};
+
+struct hfi_buffer_display_hold_count_actual {
+ u32 type;
+ u32 hold_count;
+};
+
+struct hfi_buffer_requirements {
+ u32 type;
+ u32 size;
+ u32 region_size;
+ u32 hold_count;
+ u32 count_min;
+ u32 count_actual;
+ u32 contiguous;
+ u32 alignment;
+};
+
+struct hfi_data_payload {
+ u32 size;
+ u8 data[1];
+};
+
+struct hfi_enable_picture {
+ u32 picture_type;
+};
+
+struct hfi_display_picture_buffer_count {
+ int enable;
+ u32 count;
+};
+
+struct hfi_extra_data_header_config {
+ u32 type;
+ u32 buffer_type;
+ u32 version;
+ u32 port_index;
+ u32 client_extra_data_id;
+};
+
+struct hfi_interlace_format_supported {
+ u32 buffer_type;
+ u32 format;
+};
+
+struct hfi_buffer_alloc_mode_supported {
+ u32 buffer_type;
+ u32 num_entries;
+ u32 data[1];
+};
+
+struct hfi_mb_error_map {
+ u32 error_map_size;
+ u8 error_map[1];
+};
+
+struct hfi_metadata_pass_through {
+ int enable;
+ u32 size;
+};
+
+struct hfi_multi_view_select {
+ u32 view_index;
+};
+
+struct hfi_hybrid_hierp {
+ u32 layers;
+};
+
+struct hfi_pkt_hdr {
+ u32 size;
+ u32 pkt_type;
+};
+
+struct hfi_session_hdr_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 session_id;
+};
+
+struct hfi_session_pkt {
+ struct hfi_session_hdr_pkt shdr;
+};
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
new file mode 100644
index 000000000000..f8841713e417
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
@@ -0,0 +1,1052 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/hash.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "core.h"
+#include "hfi.h"
+#include "hfi_helper.h"
+#include "hfi_msgs.h"
+
+static void event_seq_changed(struct venus_core *core, struct venus_inst *inst,
+ struct hfi_msg_event_notify_pkt *pkt)
+{
+ struct hfi_event_data event = {0};
+ int num_properties_changed;
+ struct hfi_framesize *frame_sz;
+ struct hfi_profile_level *profile_level;
+ u8 *data_ptr;
+ u32 ptype;
+
+ inst->error = HFI_ERR_NONE;
+
+ switch (pkt->event_data1) {
+ case HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUF_RESOURCES:
+ case HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUF_RESOURCES:
+ break;
+ default:
+ inst->error = HFI_ERR_SESSION_INVALID_PARAMETER;
+ goto done;
+ }
+
+ event.event_type = pkt->event_data1;
+
+ num_properties_changed = pkt->event_data2;
+ if (!num_properties_changed) {
+ inst->error = HFI_ERR_SESSION_INSUFFICIENT_RESOURCES;
+ goto done;
+ }
+
+ data_ptr = (u8 *)&pkt->ext_event_data[0];
+ do {
+ ptype = *((u32 *)data_ptr);
+ switch (ptype) {
+ case HFI_PROPERTY_PARAM_FRAME_SIZE:
+ data_ptr += sizeof(u32);
+ frame_sz = (struct hfi_framesize *)data_ptr;
+ event.width = frame_sz->width;
+ event.height = frame_sz->height;
+ data_ptr += sizeof(frame_sz);
+ break;
+ case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT:
+ data_ptr += sizeof(u32);
+ profile_level = (struct hfi_profile_level *)data_ptr;
+ event.profile = profile_level->profile;
+ event.level = profile_level->level;
+ data_ptr += sizeof(profile_level);
+ break;
+ default:
+ break;
+ }
+ num_properties_changed--;
+ } while (num_properties_changed > 0);
+
+done:
+ inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event);
+}
+
+static void event_release_buffer_ref(struct venus_core *core,
+ struct venus_inst *inst,
+ struct hfi_msg_event_notify_pkt *pkt)
+{
+ struct hfi_event_data event = {0};
+ struct hfi_msg_event_release_buffer_ref_pkt *data;
+
+ data = (struct hfi_msg_event_release_buffer_ref_pkt *)
+ pkt->ext_event_data;
+
+ event.event_type = HFI_EVENT_RELEASE_BUFFER_REFERENCE;
+ event.packet_buffer = data->packet_buffer;
+ event.extradata_buffer = data->extradata_buffer;
+ event.tag = data->output_tag;
+
+ inst->error = HFI_ERR_NONE;
+ inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event);
+}
+
+static void event_sys_error(struct venus_core *core, u32 event,
+ struct hfi_msg_event_notify_pkt *pkt)
+{
+ if (pkt)
+ dev_dbg(core->dev,
+ "sys error (session id:%x, data1:%x, data2:%x)\n",
+ pkt->shdr.session_id, pkt->event_data1,
+ pkt->event_data2);
+
+ core->core_ops->event_notify(core, event);
+}
+
+static void
+event_session_error(struct venus_core *core, struct venus_inst *inst,
+ struct hfi_msg_event_notify_pkt *pkt)
+{
+ struct device *dev = core->dev;
+
+ dev_dbg(dev, "session error: event id:%x, session id:%x\n",
+ pkt->event_data1, pkt->shdr.session_id);
+
+ if (!inst)
+ return;
+
+ switch (pkt->event_data1) {
+ /* non fatal session errors */
+ case HFI_ERR_SESSION_INVALID_SCALE_FACTOR:
+ case HFI_ERR_SESSION_UNSUPPORT_BUFFERTYPE:
+ case HFI_ERR_SESSION_UNSUPPORTED_SETTING:
+ case HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED:
+ inst->error = HFI_ERR_NONE;
+ break;
+ default:
+ dev_err(dev, "session error: event id:%x (%x), session id:%x\n",
+ pkt->event_data1, pkt->event_data2,
+ pkt->shdr.session_id);
+
+ inst->error = pkt->event_data1;
+ inst->ops->event_notify(inst, EVT_SESSION_ERROR, NULL);
+ break;
+ }
+}
+
+static void hfi_event_notify(struct venus_core *core, struct venus_inst *inst,
+ void *packet)
+{
+ struct hfi_msg_event_notify_pkt *pkt = packet;
+
+ if (!packet)
+ return;
+
+ switch (pkt->event_id) {
+ case HFI_EVENT_SYS_ERROR:
+ event_sys_error(core, EVT_SYS_ERROR, pkt);
+ break;
+ case HFI_EVENT_SESSION_ERROR:
+ event_session_error(core, inst, pkt);
+ break;
+ case HFI_EVENT_SESSION_SEQUENCE_CHANGED:
+ event_seq_changed(core, inst, pkt);
+ break;
+ case HFI_EVENT_RELEASE_BUFFER_REFERENCE:
+ event_release_buffer_ref(core, inst, pkt);
+ break;
+ case HFI_EVENT_SESSION_PROPERTY_CHANGED:
+ break;
+ default:
+ break;
+ }
+}
+
+static void hfi_sys_init_done(struct venus_core *core, struct venus_inst *inst,
+ void *packet)
+{
+ struct hfi_msg_sys_init_done_pkt *pkt = packet;
+ u32 rem_bytes, read_bytes = 0, num_properties;
+ u32 error, ptype;
+ u8 *data;
+
+ error = pkt->error_type;
+ if (error != HFI_ERR_NONE)
+ goto err_no_prop;
+
+ num_properties = pkt->num_properties;
+
+ if (!num_properties) {
+ error = HFI_ERR_SYS_INVALID_PARAMETER;
+ goto err_no_prop;
+ }
+
+ rem_bytes = pkt->hdr.size - sizeof(*pkt) + sizeof(u32);
+
+ if (!rem_bytes) {
+ /* missing property data */
+ error = HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
+ goto err_no_prop;
+ }
+
+ data = (u8 *)&pkt->data[0];
+
+ if (core->res->hfi_version == HFI_VERSION_3XX)
+ goto err_no_prop;
+
+ while (num_properties && rem_bytes >= sizeof(u32)) {
+ ptype = *((u32 *)data);
+ data += sizeof(u32);
+
+ switch (ptype) {
+ case HFI_PROPERTY_PARAM_CODEC_SUPPORTED: {
+ struct hfi_codec_supported *prop;
+
+ prop = (struct hfi_codec_supported *)data;
+
+ if (rem_bytes < sizeof(*prop)) {
+ error = HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
+ break;
+ }
+
+ read_bytes += sizeof(*prop) + sizeof(u32);
+ core->dec_codecs = prop->dec_codecs;
+ core->enc_codecs = prop->enc_codecs;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED: {
+ struct hfi_max_sessions_supported *prop;
+
+ if (rem_bytes < sizeof(*prop)) {
+ error = HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
+ break;
+ }
+
+ prop = (struct hfi_max_sessions_supported *)data;
+ read_bytes += sizeof(*prop) + sizeof(u32);
+ core->max_sessions_supported = prop->max_sessions;
+ break;
+ }
+ default:
+ error = HFI_ERR_SYS_INVALID_PARAMETER;
+ break;
+ }
+
+ if (!error) {
+ rem_bytes -= read_bytes;
+ data += read_bytes;
+ num_properties--;
+ }
+ }
+
+err_no_prop:
+ core->error = error;
+ complete(&core->done);
+}
+
+static void
+sys_get_prop_image_version(struct device *dev,
+ struct hfi_msg_sys_property_info_pkt *pkt)
+{
+ int req_bytes;
+
+ req_bytes = pkt->hdr.size - sizeof(*pkt);
+
+ if (req_bytes < 128 || !pkt->data[1] || pkt->num_properties > 1)
+ /* bad packet */
+ return;
+
+ dev_dbg(dev, "F/W version: %s\n", (u8 *)&pkt->data[1]);
+}
+
+static void hfi_sys_property_info(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_sys_property_info_pkt *pkt = packet;
+ struct device *dev = core->dev;
+
+ if (!pkt->num_properties) {
+ dev_dbg(dev, "%s: no properties\n", __func__);
+ return;
+ }
+
+ switch (pkt->data[0]) {
+ case HFI_PROPERTY_SYS_IMAGE_VERSION:
+ sys_get_prop_image_version(dev, pkt);
+ break;
+ default:
+ dev_dbg(dev, "%s: unknown property data\n", __func__);
+ break;
+ }
+}
+
+static void hfi_sys_rel_resource_done(struct venus_core *core,
+ struct venus_inst *inst,
+ void *packet)
+{
+ struct hfi_msg_sys_release_resource_done_pkt *pkt = packet;
+
+ core->error = pkt->error_type;
+ complete(&core->done);
+}
+
+static void hfi_sys_ping_done(struct venus_core *core, struct venus_inst *inst,
+ void *packet)
+{
+ struct hfi_msg_sys_ping_ack_pkt *pkt = packet;
+
+ core->error = HFI_ERR_NONE;
+
+ if (pkt->client_data != 0xbeef)
+ core->error = HFI_ERR_SYS_FATAL;
+
+ complete(&core->done);
+}
+
+static void hfi_sys_idle_done(struct venus_core *core, struct venus_inst *inst,
+ void *packet)
+{
+ dev_dbg(core->dev, "sys idle\n");
+}
+
+static void hfi_sys_pc_prepare_done(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_sys_pc_prep_done_pkt *pkt = packet;
+
+ dev_dbg(core->dev, "pc prepare done (error %x)\n", pkt->error_type);
+}
+
+static void
+hfi_copy_cap_prop(struct hfi_capability *in, struct venus_inst *inst)
+{
+ if (!in || !inst)
+ return;
+
+ switch (in->capability_type) {
+ case HFI_CAPABILITY_FRAME_WIDTH:
+ inst->cap_width = *in;
+ break;
+ case HFI_CAPABILITY_FRAME_HEIGHT:
+ inst->cap_height = *in;
+ break;
+ case HFI_CAPABILITY_MBS_PER_FRAME:
+ inst->cap_mbs_per_frame = *in;
+ break;
+ case HFI_CAPABILITY_MBS_PER_SECOND:
+ inst->cap_mbs_per_sec = *in;
+ break;
+ case HFI_CAPABILITY_FRAMERATE:
+ inst->cap_framerate = *in;
+ break;
+ case HFI_CAPABILITY_SCALE_X:
+ inst->cap_scale_x = *in;
+ break;
+ case HFI_CAPABILITY_SCALE_Y:
+ inst->cap_scale_y = *in;
+ break;
+ case HFI_CAPABILITY_BITRATE:
+ inst->cap_bitrate = *in;
+ break;
+ case HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS:
+ inst->cap_hier_p = *in;
+ break;
+ case HFI_CAPABILITY_ENC_LTR_COUNT:
+ inst->cap_ltr_count = *in;
+ break;
+ case HFI_CAPABILITY_CP_OUTPUT2_THRESH:
+ inst->cap_secure_output2_threshold = *in;
+ break;
+ default:
+ break;
+ }
+}
+
+static unsigned int
+session_get_prop_profile_level(struct hfi_msg_session_property_info_pkt *pkt,
+ struct hfi_profile_level *profile_level)
+{
+ struct hfi_profile_level *hfi;
+ u32 req_bytes;
+
+ req_bytes = pkt->shdr.hdr.size - sizeof(*pkt);
+
+ if (!req_bytes || req_bytes % sizeof(struct hfi_profile_level))
+ /* bad packet */
+ return HFI_ERR_SESSION_INVALID_PARAMETER;
+
+ hfi = (struct hfi_profile_level *)&pkt->data[1];
+ profile_level->profile = hfi->profile;
+ profile_level->level = hfi->level;
+
+ return HFI_ERR_NONE;
+}
+
+static unsigned int
+session_get_prop_buf_req(struct hfi_msg_session_property_info_pkt *pkt,
+ struct hfi_buffer_requirements *bufreq)
+{
+ struct hfi_buffer_requirements *buf_req;
+ u32 req_bytes;
+ unsigned int idx = 0;
+
+ req_bytes = pkt->shdr.hdr.size - sizeof(*pkt);
+
+ if (!req_bytes || req_bytes % sizeof(*buf_req) || !pkt->data[1])
+ /* bad packet */
+ return HFI_ERR_SESSION_INVALID_PARAMETER;
+
+ buf_req = (struct hfi_buffer_requirements *)&pkt->data[1];
+ if (!buf_req)
+ return HFI_ERR_SESSION_INVALID_PARAMETER;
+
+ while (req_bytes) {
+ memcpy(&bufreq[idx], buf_req, sizeof(*bufreq));
+ idx++;
+
+ if (idx > HFI_BUFFER_TYPE_MAX)
+ return HFI_ERR_SESSION_INVALID_PARAMETER;
+
+ req_bytes -= sizeof(struct hfi_buffer_requirements);
+ buf_req++;
+ }
+
+ return HFI_ERR_NONE;
+}
+
+static void hfi_session_prop_info(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_session_property_info_pkt *pkt = packet;
+ struct device *dev = core->dev;
+ union hfi_get_property *hprop = &inst->hprop;
+ unsigned int error = HFI_ERR_NONE;
+
+ if (!pkt->num_properties) {
+ error = HFI_ERR_SESSION_INVALID_PARAMETER;
+ dev_err(dev, "%s: no properties\n", __func__);
+ goto done;
+ }
+
+ switch (pkt->data[0]) {
+ case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
+ memset(hprop->bufreq, 0, sizeof(hprop->bufreq));
+ error = session_get_prop_buf_req(pkt, hprop->bufreq);
+ break;
+ case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT:
+ memset(&hprop->profile_level, 0, sizeof(hprop->profile_level));
+ error = session_get_prop_profile_level(pkt,
+ &hprop->profile_level);
+ break;
+ case HFI_PROPERTY_CONFIG_VDEC_ENTROPY:
+ break;
+ default:
+ dev_dbg(dev, "%s: unknown property id:%x\n", __func__,
+ pkt->data[0]);
+ return;
+ }
+
+done:
+ inst->error = error;
+ complete(&inst->done);
+}
+
+static u32 init_done_read_prop(struct venus_core *core, struct venus_inst *inst,
+ struct hfi_msg_session_init_done_pkt *pkt)
+{
+ struct device *dev = core->dev;
+ u32 rem_bytes, num_props;
+ u32 ptype, next_offset = 0;
+ u32 err;
+ u8 *data;
+
+ rem_bytes = pkt->shdr.hdr.size - sizeof(*pkt) + sizeof(u32);
+ if (!rem_bytes) {
+ dev_err(dev, "%s: missing property info\n", __func__);
+ return HFI_ERR_SESSION_INSUFFICIENT_RESOURCES;
+ }
+
+ err = pkt->error_type;
+ if (err)
+ return err;
+
+ data = (u8 *)&pkt->data[0];
+ num_props = pkt->num_properties;
+
+ while (err == HFI_ERR_NONE && num_props && rem_bytes >= sizeof(u32)) {
+ ptype = *((u32 *)data);
+ next_offset = sizeof(u32);
+
+ switch (ptype) {
+ case HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED: {
+ struct hfi_codec_mask_supported *masks =
+ (struct hfi_codec_mask_supported *)
+ (data + next_offset);
+
+ next_offset += sizeof(*masks);
+ num_props--;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED: {
+ struct hfi_capabilities *caps;
+ struct hfi_capability *cap;
+ u32 num_caps;
+
+ if ((rem_bytes - next_offset) < sizeof(*cap)) {
+ err = HFI_ERR_SESSION_INVALID_PARAMETER;
+ break;
+ }
+
+ caps = (struct hfi_capabilities *)(data + next_offset);
+
+ num_caps = caps->num_capabilities;
+ cap = &caps->data[0];
+ next_offset += sizeof(u32);
+
+ while (num_caps &&
+ (rem_bytes - next_offset) >= sizeof(u32)) {
+ hfi_copy_cap_prop(cap, inst);
+ cap++;
+ next_offset += sizeof(*cap);
+ num_caps--;
+ }
+ num_props--;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED: {
+ struct hfi_uncompressed_format_supported *prop =
+ (struct hfi_uncompressed_format_supported *)
+ (data + next_offset);
+ u32 num_fmt_entries;
+ u8 *fmt;
+ struct hfi_uncompressed_plane_info *inf;
+
+ if ((rem_bytes - next_offset) < sizeof(*prop)) {
+ err = HFI_ERR_SESSION_INVALID_PARAMETER;
+ break;
+ }
+
+ num_fmt_entries = prop->format_entries;
+ next_offset = sizeof(*prop) - sizeof(u32);
+ fmt = (u8 *)&prop->format_info[0];
+
+ dev_dbg(dev, "uncomm format support num entries:%u\n",
+ num_fmt_entries);
+
+ while (num_fmt_entries) {
+ struct hfi_uncompressed_plane_constraints *cnts;
+ u32 bytes_to_skip;
+
+ inf = (struct hfi_uncompressed_plane_info *)fmt;
+
+ if ((rem_bytes - next_offset) < sizeof(*inf)) {
+ err = HFI_ERR_SESSION_INVALID_PARAMETER;
+ break;
+ }
+
+ dev_dbg(dev, "plane info: fmt:%x, planes:%x\n",
+ inf->format, inf->num_planes);
+
+ cnts = &inf->plane_format[0];
+ dev_dbg(dev, "%u %u %u %u\n",
+ cnts->stride_multiples,
+ cnts->max_stride,
+ cnts->min_plane_buffer_height_multiple,
+ cnts->buffer_alignment);
+
+ bytes_to_skip = sizeof(*inf) - sizeof(*cnts) +
+ inf->num_planes * sizeof(*cnts);
+
+ fmt += bytes_to_skip;
+ next_offset += bytes_to_skip;
+ num_fmt_entries--;
+ }
+ num_props--;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED: {
+ struct hfi_properties_supported *prop =
+ (struct hfi_properties_supported *)
+ (data + next_offset);
+
+ next_offset += sizeof(*prop) - sizeof(u32)
+ + prop->num_properties * sizeof(u32);
+ num_props--;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED: {
+ struct hfi_profile_level_supported *prop =
+ (struct hfi_profile_level_supported *)
+ (data + next_offset);
+ struct hfi_profile_level *pl;
+ unsigned int prop_count = 0;
+ unsigned int count = 0;
+ u8 *ptr;
+
+ ptr = (u8 *)&prop->profile_level[0];
+ prop_count = prop->profile_count;
+
+ if (prop_count > HFI_MAX_PROFILE_COUNT)
+ prop_count = HFI_MAX_PROFILE_COUNT;
+
+ while (prop_count) {
+ ptr++;
+ pl = (struct hfi_profile_level *)ptr;
+
+ inst->pl[count].profile = pl->profile;
+ inst->pl[count].level = pl->level;
+ prop_count--;
+ count++;
+ ptr += sizeof(*pl) / sizeof(u32);
+ }
+
+ inst->pl_count = count;
+ next_offset += sizeof(*prop) - sizeof(*pl) +
+ prop->profile_count * sizeof(*pl);
+
+ num_props--;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_INTERLACE_FORMAT_SUPPORTED: {
+ next_offset +=
+ sizeof(struct hfi_interlace_format_supported);
+ num_props--;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SUPPORTED: {
+ struct hfi_nal_stream_format *nal =
+ (struct hfi_nal_stream_format *)
+ (data + next_offset);
+ dev_dbg(dev, "NAL format: %x\n", nal->format);
+ next_offset += sizeof(*nal);
+ num_props--;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT: {
+ next_offset += sizeof(u32);
+ num_props--;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE: {
+ u32 *max_seq_sz = (u32 *)(data + next_offset);
+
+ dev_dbg(dev, "max seq header sz: %x\n", *max_seq_sz);
+ next_offset += sizeof(u32);
+ num_props--;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH: {
+ next_offset += sizeof(struct hfi_intra_refresh);
+ num_props--;
+ break;
+ }
+ case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE_SUPPORTED: {
+ struct hfi_buffer_alloc_mode_supported *prop =
+ (struct hfi_buffer_alloc_mode_supported *)
+ (data + next_offset);
+ unsigned int i;
+
+ for (i = 0; i < prop->num_entries; i++) {
+ if (prop->buffer_type == HFI_BUFFER_OUTPUT ||
+ prop->buffer_type == HFI_BUFFER_OUTPUT2) {
+ switch (prop->data[i]) {
+ case HFI_BUFFER_MODE_STATIC:
+ inst->cap_bufs_mode_static = 1;
+ break;
+ case HFI_BUFFER_MODE_DYNAMIC:
+ inst->cap_bufs_mode_dynamic = 1;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ next_offset += sizeof(*prop) -
+ sizeof(u32) + prop->num_entries * sizeof(u32);
+ num_props--;
+ break;
+ }
+ default:
+ dev_dbg(dev, "%s: default case %#x\n", __func__, ptype);
+ break;
+ }
+
+ rem_bytes -= next_offset;
+ data += next_offset;
+ }
+
+ return err;
+}
+
+static void hfi_session_init_done(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_session_init_done_pkt *pkt = packet;
+ unsigned int error;
+
+ error = pkt->error_type;
+ if (error != HFI_ERR_NONE)
+ goto done;
+
+ if (core->res->hfi_version != HFI_VERSION_1XX)
+ goto done;
+
+ error = init_done_read_prop(core, inst, pkt);
+
+done:
+ inst->error = error;
+ complete(&inst->done);
+}
+
+static void hfi_session_load_res_done(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_session_load_resources_done_pkt *pkt = packet;
+
+ inst->error = pkt->error_type;
+ complete(&inst->done);
+}
+
+static void hfi_session_flush_done(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_session_flush_done_pkt *pkt = packet;
+
+ inst->error = pkt->error_type;
+ complete(&inst->done);
+}
+
+static void hfi_session_etb_done(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_session_empty_buffer_done_pkt *pkt = packet;
+
+ inst->error = pkt->error_type;
+ inst->ops->buf_done(inst, HFI_BUFFER_INPUT, pkt->input_tag,
+ pkt->filled_len, pkt->offset, 0, 0, 0);
+}
+
+static void hfi_session_ftb_done(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ u32 session_type = inst->session_type;
+ u64 timestamp_us = 0;
+ u32 timestamp_hi = 0, timestamp_lo = 0;
+ unsigned int error;
+ u32 flags = 0, hfi_flags = 0, offset = 0, filled_len = 0;
+ u32 pic_type = 0, buffer_type = 0, output_tag = -1;
+
+ if (session_type == VIDC_SESSION_TYPE_ENC) {
+ struct hfi_msg_session_fbd_compressed_pkt *pkt = packet;
+
+ timestamp_hi = pkt->time_stamp_hi;
+ timestamp_lo = pkt->time_stamp_lo;
+ hfi_flags = pkt->flags;
+ offset = pkt->offset;
+ filled_len = pkt->filled_len;
+ pic_type = pkt->picture_type;
+ output_tag = pkt->output_tag;
+ buffer_type = HFI_BUFFER_OUTPUT;
+
+ error = pkt->error_type;
+ } else if (session_type == VIDC_SESSION_TYPE_DEC) {
+ struct hfi_msg_session_fbd_uncompressed_plane0_pkt *pkt =
+ packet;
+
+ timestamp_hi = pkt->time_stamp_hi;
+ timestamp_lo = pkt->time_stamp_lo;
+ hfi_flags = pkt->flags;
+ offset = pkt->offset;
+ filled_len = pkt->filled_len;
+ pic_type = pkt->picture_type;
+ output_tag = pkt->output_tag;
+
+ if (pkt->stream_id == 0)
+ buffer_type = HFI_BUFFER_OUTPUT;
+ else if (pkt->stream_id == 1)
+ buffer_type = HFI_BUFFER_OUTPUT2;
+
+ error = pkt->error_type;
+ } else {
+ error = HFI_ERR_SESSION_INVALID_PARAMETER;
+ }
+
+ if (buffer_type != HFI_BUFFER_OUTPUT)
+ goto done;
+
+ if (hfi_flags & HFI_BUFFERFLAG_EOS)
+ flags |= V4L2_BUF_FLAG_LAST;
+
+ switch (pic_type) {
+ case HFI_PICTURE_IDR:
+ case HFI_PICTURE_I:
+ flags |= V4L2_BUF_FLAG_KEYFRAME;
+ break;
+ case HFI_PICTURE_P:
+ flags |= V4L2_BUF_FLAG_PFRAME;
+ break;
+ case HFI_PICTURE_B:
+ flags |= V4L2_BUF_FLAG_BFRAME;
+ break;
+ case HFI_FRAME_NOTCODED:
+ case HFI_UNUSED_PICT:
+ case HFI_FRAME_YUV:
+ default:
+ break;
+ }
+
+ if (!(hfi_flags & HFI_BUFFERFLAG_TIMESTAMPINVALID) && filled_len) {
+ timestamp_us = timestamp_hi;
+ timestamp_us = (timestamp_us << 32) | timestamp_lo;
+ }
+
+done:
+ inst->error = error;
+ inst->ops->buf_done(inst, buffer_type, output_tag, filled_len,
+ offset, flags, hfi_flags, timestamp_us);
+}
+
+static void hfi_session_start_done(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_session_start_done_pkt *pkt = packet;
+
+ inst->error = pkt->error_type;
+ complete(&inst->done);
+}
+
+static void hfi_session_stop_done(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_session_stop_done_pkt *pkt = packet;
+
+ inst->error = pkt->error_type;
+ complete(&inst->done);
+}
+
+static void hfi_session_rel_res_done(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_session_release_resources_done_pkt *pkt = packet;
+
+ inst->error = pkt->error_type;
+ complete(&inst->done);
+}
+
+static void hfi_session_rel_buf_done(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_session_release_buffers_done_pkt *pkt = packet;
+
+ inst->error = pkt->error_type;
+ complete(&inst->done);
+}
+
+static void hfi_session_end_done(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_session_end_done_pkt *pkt = packet;
+
+ inst->error = pkt->error_type;
+ complete(&inst->done);
+}
+
+static void hfi_session_abort_done(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_sys_session_abort_done_pkt *pkt = packet;
+
+ inst->error = pkt->error_type;
+ complete(&inst->done);
+}
+
+static void hfi_session_get_seq_hdr_done(struct venus_core *core,
+ struct venus_inst *inst, void *packet)
+{
+ struct hfi_msg_session_get_sequence_hdr_done_pkt *pkt = packet;
+
+ inst->error = pkt->error_type;
+ complete(&inst->done);
+}
+
+struct hfi_done_handler {
+ u32 pkt;
+ u32 pkt_sz;
+ u32 pkt_sz2;
+ void (*done)(struct venus_core *, struct venus_inst *, void *);
+ bool is_sys_pkt;
+};
+
+static const struct hfi_done_handler handlers[] = {
+ {.pkt = HFI_MSG_EVENT_NOTIFY,
+ .pkt_sz = sizeof(struct hfi_msg_event_notify_pkt),
+ .done = hfi_event_notify,
+ },
+ {.pkt = HFI_MSG_SYS_INIT,
+ .pkt_sz = sizeof(struct hfi_msg_sys_init_done_pkt),
+ .done = hfi_sys_init_done,
+ .is_sys_pkt = true,
+ },
+ {.pkt = HFI_MSG_SYS_PROPERTY_INFO,
+ .pkt_sz = sizeof(struct hfi_msg_sys_property_info_pkt),
+ .done = hfi_sys_property_info,
+ .is_sys_pkt = true,
+ },
+ {.pkt = HFI_MSG_SYS_RELEASE_RESOURCE,
+ .pkt_sz = sizeof(struct hfi_msg_sys_release_resource_done_pkt),
+ .done = hfi_sys_rel_resource_done,
+ .is_sys_pkt = true,
+ },
+ {.pkt = HFI_MSG_SYS_PING_ACK,
+ .pkt_sz = sizeof(struct hfi_msg_sys_ping_ack_pkt),
+ .done = hfi_sys_ping_done,
+ .is_sys_pkt = true,
+ },
+ {.pkt = HFI_MSG_SYS_IDLE,
+ .pkt_sz = sizeof(struct hfi_msg_sys_idle_pkt),
+ .done = hfi_sys_idle_done,
+ .is_sys_pkt = true,
+ },
+ {.pkt = HFI_MSG_SYS_PC_PREP,
+ .pkt_sz = sizeof(struct hfi_msg_sys_pc_prep_done_pkt),
+ .done = hfi_sys_pc_prepare_done,
+ .is_sys_pkt = true,
+ },
+ {.pkt = HFI_MSG_SYS_SESSION_INIT,
+ .pkt_sz = sizeof(struct hfi_msg_session_init_done_pkt),
+ .done = hfi_session_init_done,
+ },
+ {.pkt = HFI_MSG_SYS_SESSION_END,
+ .pkt_sz = sizeof(struct hfi_msg_session_end_done_pkt),
+ .done = hfi_session_end_done,
+ },
+ {.pkt = HFI_MSG_SESSION_LOAD_RESOURCES,
+ .pkt_sz = sizeof(struct hfi_msg_session_load_resources_done_pkt),
+ .done = hfi_session_load_res_done,
+ },
+ {.pkt = HFI_MSG_SESSION_START,
+ .pkt_sz = sizeof(struct hfi_msg_session_start_done_pkt),
+ .done = hfi_session_start_done,
+ },
+ {.pkt = HFI_MSG_SESSION_STOP,
+ .pkt_sz = sizeof(struct hfi_msg_session_stop_done_pkt),
+ .done = hfi_session_stop_done,
+ },
+ {.pkt = HFI_MSG_SYS_SESSION_ABORT,
+ .pkt_sz = sizeof(struct hfi_msg_sys_session_abort_done_pkt),
+ .done = hfi_session_abort_done,
+ },
+ {.pkt = HFI_MSG_SESSION_EMPTY_BUFFER,
+ .pkt_sz = sizeof(struct hfi_msg_session_empty_buffer_done_pkt),
+ .done = hfi_session_etb_done,
+ },
+ {.pkt = HFI_MSG_SESSION_FILL_BUFFER,
+ .pkt_sz = sizeof(struct hfi_msg_session_fbd_uncompressed_plane0_pkt),
+ .pkt_sz2 = sizeof(struct hfi_msg_session_fbd_compressed_pkt),
+ .done = hfi_session_ftb_done,
+ },
+ {.pkt = HFI_MSG_SESSION_FLUSH,
+ .pkt_sz = sizeof(struct hfi_msg_session_flush_done_pkt),
+ .done = hfi_session_flush_done,
+ },
+ {.pkt = HFI_MSG_SESSION_PROPERTY_INFO,
+ .pkt_sz = sizeof(struct hfi_msg_session_property_info_pkt),
+ .done = hfi_session_prop_info,
+ },
+ {.pkt = HFI_MSG_SESSION_RELEASE_RESOURCES,
+ .pkt_sz = sizeof(struct hfi_msg_session_release_resources_done_pkt),
+ .done = hfi_session_rel_res_done,
+ },
+ {.pkt = HFI_MSG_SESSION_GET_SEQUENCE_HEADER,
+ .pkt_sz = sizeof(struct hfi_msg_session_get_sequence_hdr_done_pkt),
+ .done = hfi_session_get_seq_hdr_done,
+ },
+ {.pkt = HFI_MSG_SESSION_RELEASE_BUFFERS,
+ .pkt_sz = sizeof(struct hfi_msg_session_release_buffers_done_pkt),
+ .done = hfi_session_rel_buf_done,
+ },
+};
+
+void hfi_process_watchdog_timeout(struct venus_core *core)
+{
+ event_sys_error(core, EVT_SYS_WATCHDOG_TIMEOUT, NULL);
+}
+
+static struct venus_inst *to_instance(struct venus_core *core, u32 session_id)
+{
+ struct venus_inst *inst;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(inst, &core->instances, list)
+ if (hash32_ptr(inst) == session_id) {
+ mutex_unlock(&core->lock);
+ return inst;
+ }
+ mutex_unlock(&core->lock);
+
+ return NULL;
+}
+
+u32 hfi_process_msg_packet(struct venus_core *core, struct hfi_pkt_hdr *hdr)
+{
+ const struct hfi_done_handler *handler;
+ struct device *dev = core->dev;
+ struct venus_inst *inst;
+ bool found = false;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(handlers); i++) {
+ handler = &handlers[i];
+ if (handler->pkt != hdr->pkt_type)
+ continue;
+ found = true;
+ break;
+ }
+
+ if (!found)
+ return hdr->pkt_type;
+
+ if (hdr->size && hdr->size < handler->pkt_sz &&
+ hdr->size < handler->pkt_sz2) {
+ dev_err(dev, "bad packet size (%d should be %d, pkt type:%x)\n",
+ hdr->size, handler->pkt_sz, hdr->pkt_type);
+
+ return hdr->pkt_type;
+ }
+
+ if (handler->is_sys_pkt) {
+ inst = NULL;
+ } else {
+ struct hfi_session_pkt *pkt;
+
+ pkt = (struct hfi_session_pkt *)hdr;
+ inst = to_instance(core, pkt->shdr.session_id);
+
+ if (!inst)
+ dev_warn(dev, "no valid instance(pkt session_id:%x, pkt:%x)\n",
+ pkt->shdr.session_id,
+ handler ? handler->pkt : 0);
+
+ /*
+ * Event of type HFI_EVENT_SYS_ERROR will not have any session
+ * associated with it
+ */
+ if (!inst && hdr->pkt_type != HFI_MSG_EVENT_NOTIFY) {
+ dev_err(dev, "got invalid session id:%x\n",
+ pkt->shdr.session_id);
+ goto invalid_session;
+ }
+ }
+
+ handler->done(core, inst, hdr);
+
+invalid_session:
+ return hdr->pkt_type;
+}
diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.h b/drivers/media/platform/qcom/venus/hfi_msgs.h
new file mode 100644
index 000000000000..14d9a3979b14
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_msgs.h
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __VENUS_HFI_MSGS_H__
+#define __VENUS_HFI_MSGS_H__
+
+/* message calls */
+#define HFI_MSG_SYS_INIT 0x20001
+#define HFI_MSG_SYS_PC_PREP 0x20002
+#define HFI_MSG_SYS_RELEASE_RESOURCE 0x20003
+#define HFI_MSG_SYS_DEBUG 0x20004
+#define HFI_MSG_SYS_SESSION_INIT 0x20006
+#define HFI_MSG_SYS_SESSION_END 0x20007
+#define HFI_MSG_SYS_IDLE 0x20008
+#define HFI_MSG_SYS_COV 0x20009
+#define HFI_MSG_SYS_PROPERTY_INFO 0x2000a
+
+#define HFI_MSG_EVENT_NOTIFY 0x21001
+#define HFI_MSG_SESSION_GET_SEQUENCE_HEADER 0x21002
+
+#define HFI_MSG_SYS_PING_ACK 0x220002
+#define HFI_MSG_SYS_SESSION_ABORT 0x220004
+
+#define HFI_MSG_SESSION_LOAD_RESOURCES 0x221001
+#define HFI_MSG_SESSION_START 0x221002
+#define HFI_MSG_SESSION_STOP 0x221003
+#define HFI_MSG_SESSION_SUSPEND 0x221004
+#define HFI_MSG_SESSION_RESUME 0x221005
+#define HFI_MSG_SESSION_FLUSH 0x221006
+#define HFI_MSG_SESSION_EMPTY_BUFFER 0x221007
+#define HFI_MSG_SESSION_FILL_BUFFER 0x221008
+#define HFI_MSG_SESSION_PROPERTY_INFO 0x221009
+#define HFI_MSG_SESSION_RELEASE_RESOURCES 0x22100a
+#define HFI_MSG_SESSION_PARSE_SEQUENCE_HEADER 0x22100b
+#define HFI_MSG_SESSION_RELEASE_BUFFERS 0x22100c
+
+#define HFI_PICTURE_I 0x00000001
+#define HFI_PICTURE_P 0x00000002
+#define HFI_PICTURE_B 0x00000004
+#define HFI_PICTURE_IDR 0x00000008
+#define HFI_FRAME_NOTCODED 0x7f002000
+#define HFI_FRAME_YUV 0x7f004000
+#define HFI_UNUSED_PICT 0x10000000
+
+/* message packets */
+struct hfi_msg_event_notify_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 event_id;
+ u32 event_data1;
+ u32 event_data2;
+ u32 ext_event_data[1];
+};
+
+struct hfi_msg_event_release_buffer_ref_pkt {
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 output_tag;
+};
+
+struct hfi_msg_sys_init_done_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 error_type;
+ u32 num_properties;
+ u32 data[1];
+};
+
+struct hfi_msg_sys_pc_prep_done_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 error_type;
+};
+
+struct hfi_msg_sys_release_resource_done_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 resource_handle;
+ u32 error_type;
+};
+
+struct hfi_msg_session_init_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+ u32 num_properties;
+ u32 data[1];
+};
+
+struct hfi_msg_session_end_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+};
+
+struct hfi_msg_session_get_sequence_hdr_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+ u32 header_len;
+ u32 sequence_header;
+};
+
+struct hfi_msg_sys_session_abort_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+};
+
+struct hfi_msg_sys_idle_pkt {
+ struct hfi_pkt_hdr hdr;
+};
+
+struct hfi_msg_sys_ping_ack_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 client_data;
+};
+
+struct hfi_msg_sys_property_info_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 num_properties;
+ u32 data[1];
+};
+
+struct hfi_msg_session_load_resources_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+};
+
+struct hfi_msg_session_start_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+};
+
+struct hfi_msg_session_stop_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+};
+
+struct hfi_msg_session_suspend_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+};
+
+struct hfi_msg_session_resume_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+};
+
+struct hfi_msg_session_flush_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+ u32 flush_type;
+};
+
+struct hfi_msg_session_empty_buffer_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+ u32 offset;
+ u32 filled_len;
+ u32 input_tag;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 data[0];
+};
+
+struct hfi_msg_session_fbd_compressed_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u32 error_type;
+ u32 flags;
+ u32 mark_target;
+ u32 mark_data;
+ u32 stats;
+ u32 offset;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 input_tag;
+ u32 output_tag;
+ u32 picture_type;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 data[0];
+};
+
+struct hfi_msg_session_fbd_uncompressed_plane0_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 stream_id;
+ u32 view_id;
+ u32 error_type;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u32 flags;
+ u32 mark_target;
+ u32 mark_data;
+ u32 stats;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 offset;
+ u32 frame_width;
+ u32 frame_height;
+ u32 start_x_coord;
+ u32 start_y_coord;
+ u32 input_tag;
+ u32 input_tag2;
+ u32 output_tag;
+ u32 picture_type;
+ u32 packet_buffer;
+ u32 extradata_buffer;
+ u32 data[0];
+};
+
+struct hfi_msg_session_fbd_uncompressed_plane1_pkt {
+ u32 flags;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 offset;
+ u32 packet_buffer2;
+ u32 data[0];
+};
+
+struct hfi_msg_session_fbd_uncompressed_plane2_pkt {
+ u32 flags;
+ u32 alloc_len;
+ u32 filled_len;
+ u32 offset;
+ u32 packet_buffer3;
+ u32 data[0];
+};
+
+struct hfi_msg_session_parse_sequence_header_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+ u32 num_properties;
+ u32 data[1];
+};
+
+struct hfi_msg_session_property_info_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 num_properties;
+ u32 data[1];
+};
+
+struct hfi_msg_session_release_resources_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+};
+
+struct hfi_msg_session_release_buffers_done_pkt {
+ struct hfi_session_hdr_pkt shdr;
+ u32 error_type;
+ u32 num_buffers;
+ u32 buffer_info[1];
+};
+
+struct hfi_msg_sys_debug_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 msg_type;
+ u32 msg_size;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u8 msg_data[1];
+};
+
+struct hfi_msg_sys_coverage_pkt {
+ struct hfi_pkt_hdr hdr;
+ u32 msg_size;
+ u32 time_stamp_hi;
+ u32 time_stamp_lo;
+ u8 msg_data[1];
+};
+
+struct venus_core;
+struct hfi_pkt_hdr;
+
+void hfi_process_watchdog_timeout(struct venus_core *core);
+u32 hfi_process_msg_packet(struct venus_core *core, struct hfi_pkt_hdr *hdr);
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
new file mode 100644
index 000000000000..1caae8feaa36
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -0,0 +1,1572 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/qcom_scm.h>
+#include <linux/slab.h>
+
+#include "core.h"
+#include "hfi_cmds.h"
+#include "hfi_msgs.h"
+#include "hfi_venus.h"
+#include "hfi_venus_io.h"
+
+#define HFI_MASK_QHDR_TX_TYPE 0xff000000
+#define HFI_MASK_QHDR_RX_TYPE 0x00ff0000
+#define HFI_MASK_QHDR_PRI_TYPE 0x0000ff00
+#define HFI_MASK_QHDR_ID_TYPE 0x000000ff
+
+#define HFI_HOST_TO_CTRL_CMD_Q 0
+#define HFI_CTRL_TO_HOST_MSG_Q 1
+#define HFI_CTRL_TO_HOST_DBG_Q 2
+#define HFI_MASK_QHDR_STATUS 0x000000ff
+
+#define IFACEQ_NUM 3
+#define IFACEQ_CMD_IDX 0
+#define IFACEQ_MSG_IDX 1
+#define IFACEQ_DBG_IDX 2
+#define IFACEQ_MAX_BUF_COUNT 50
+#define IFACEQ_MAX_PARALLEL_CLNTS 16
+#define IFACEQ_DFLT_QHDR 0x01010000
+
+#define POLL_INTERVAL_US 50
+
+#define IFACEQ_MAX_PKT_SIZE 1024
+#define IFACEQ_MED_PKT_SIZE 768
+#define IFACEQ_MIN_PKT_SIZE 8
+#define IFACEQ_VAR_SMALL_PKT_SIZE 100
+#define IFACEQ_VAR_LARGE_PKT_SIZE 512
+#define IFACEQ_VAR_HUGE_PKT_SIZE (1024 * 12)
+
+enum tzbsp_video_state {
+ TZBSP_VIDEO_STATE_SUSPEND = 0,
+ TZBSP_VIDEO_STATE_RESUME
+};
+
+struct hfi_queue_table_header {
+ u32 version;
+ u32 size;
+ u32 qhdr0_offset;
+ u32 qhdr_size;
+ u32 num_q;
+ u32 num_active_q;
+};
+
+struct hfi_queue_header {
+ u32 status;
+ u32 start_addr;
+ u32 type;
+ u32 q_size;
+ u32 pkt_size;
+ u32 pkt_drop_cnt;
+ u32 rx_wm;
+ u32 tx_wm;
+ u32 rx_req;
+ u32 tx_req;
+ u32 rx_irq_status;
+ u32 tx_irq_status;
+ u32 read_idx;
+ u32 write_idx;
+};
+
+#define IFACEQ_TABLE_SIZE \
+ (sizeof(struct hfi_queue_table_header) + \
+ sizeof(struct hfi_queue_header) * IFACEQ_NUM)
+
+#define IFACEQ_QUEUE_SIZE (IFACEQ_MAX_PKT_SIZE * \
+ IFACEQ_MAX_BUF_COUNT * IFACEQ_MAX_PARALLEL_CLNTS)
+
+#define IFACEQ_GET_QHDR_START_ADDR(ptr, i) \
+ (void *)(((ptr) + sizeof(struct hfi_queue_table_header)) + \
+ ((i) * sizeof(struct hfi_queue_header)))
+
+#define QDSS_SIZE SZ_4K
+#define SFR_SIZE SZ_4K
+#define QUEUE_SIZE \
+ (IFACEQ_TABLE_SIZE + (IFACEQ_QUEUE_SIZE * IFACEQ_NUM))
+
+#define ALIGNED_QDSS_SIZE ALIGN(QDSS_SIZE, SZ_4K)
+#define ALIGNED_SFR_SIZE ALIGN(SFR_SIZE, SZ_4K)
+#define ALIGNED_QUEUE_SIZE ALIGN(QUEUE_SIZE, SZ_4K)
+#define SHARED_QSIZE ALIGN(ALIGNED_SFR_SIZE + ALIGNED_QUEUE_SIZE + \
+ ALIGNED_QDSS_SIZE, SZ_1M)
+
+struct mem_desc {
+ dma_addr_t da; /* device address */
+ void *kva; /* kernel virtual address */
+ u32 size;
+ unsigned long attrs;
+};
+
+struct iface_queue {
+ struct hfi_queue_header *qhdr;
+ struct mem_desc qmem;
+};
+
+enum venus_state {
+ VENUS_STATE_DEINIT = 1,
+ VENUS_STATE_INIT,
+};
+
+struct venus_hfi_device {
+ struct venus_core *core;
+ u32 irq_status;
+ u32 last_packet_type;
+ bool power_enabled;
+ bool suspended;
+ enum venus_state state;
+ /* serialize read / write to the shared memory */
+ struct mutex lock;
+ struct completion pwr_collapse_prep;
+ struct completion release_resource;
+ struct mem_desc ifaceq_table;
+ struct mem_desc sfr;
+ struct iface_queue queues[IFACEQ_NUM];
+ u8 pkt_buf[IFACEQ_VAR_HUGE_PKT_SIZE];
+ u8 dbg_buf[IFACEQ_VAR_HUGE_PKT_SIZE];
+};
+
+static bool venus_pkt_debug;
+static int venus_fw_debug = HFI_DEBUG_MSG_ERROR | HFI_DEBUG_MSG_FATAL;
+static bool venus_sys_idle_indicator;
+static bool venus_fw_low_power_mode = true;
+static int venus_hw_rsp_timeout = 1000;
+static bool venus_fw_coverage;
+
+static void venus_set_state(struct venus_hfi_device *hdev,
+ enum venus_state state)
+{
+ mutex_lock(&hdev->lock);
+ hdev->state = state;
+ mutex_unlock(&hdev->lock);
+}
+
+static bool venus_is_valid_state(struct venus_hfi_device *hdev)
+{
+ return hdev->state != VENUS_STATE_DEINIT;
+}
+
+static void venus_dump_packet(struct venus_hfi_device *hdev, const void *packet)
+{
+ size_t pkt_size = *(u32 *)packet;
+
+ if (!venus_pkt_debug)
+ return;
+
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1, packet,
+ pkt_size, true);
+}
+
+static int venus_write_queue(struct venus_hfi_device *hdev,
+ struct iface_queue *queue,
+ void *packet, u32 *rx_req)
+{
+ struct hfi_queue_header *qhdr;
+ u32 dwords, new_wr_idx;
+ u32 empty_space, rd_idx, wr_idx, qsize;
+ u32 *wr_ptr;
+
+ if (!queue->qmem.kva)
+ return -EINVAL;
+
+ qhdr = queue->qhdr;
+ if (!qhdr)
+ return -EINVAL;
+
+ venus_dump_packet(hdev, packet);
+
+ dwords = (*(u32 *)packet) >> 2;
+ if (!dwords)
+ return -EINVAL;
+
+ rd_idx = qhdr->read_idx;
+ wr_idx = qhdr->write_idx;
+ qsize = qhdr->q_size;
+ /* ensure rd/wr indices's are read from memory */
+ rmb();
+
+ if (wr_idx >= rd_idx)
+ empty_space = qsize - (wr_idx - rd_idx);
+ else
+ empty_space = rd_idx - wr_idx;
+
+ if (empty_space <= dwords) {
+ qhdr->tx_req = 1;
+ /* ensure tx_req is updated in memory */
+ wmb();
+ return -ENOSPC;
+ }
+
+ qhdr->tx_req = 0;
+ /* ensure tx_req is updated in memory */
+ wmb();
+
+ new_wr_idx = wr_idx + dwords;
+ wr_ptr = (u32 *)(queue->qmem.kva + (wr_idx << 2));
+ if (new_wr_idx < qsize) {
+ memcpy(wr_ptr, packet, dwords << 2);
+ } else {
+ size_t len;
+
+ new_wr_idx -= qsize;
+ len = (dwords - new_wr_idx) << 2;
+ memcpy(wr_ptr, packet, len);
+ memcpy(queue->qmem.kva, packet + len, new_wr_idx << 2);
+ }
+
+ /* make sure packet is written before updating the write index */
+ wmb();
+
+ qhdr->write_idx = new_wr_idx;
+ *rx_req = qhdr->rx_req ? 1 : 0;
+
+ /* make sure write index is updated before an interrupt is raised */
+ mb();
+
+ return 0;
+}
+
+static int venus_read_queue(struct venus_hfi_device *hdev,
+ struct iface_queue *queue, void *pkt, u32 *tx_req)
+{
+ struct hfi_queue_header *qhdr;
+ u32 dwords, new_rd_idx;
+ u32 rd_idx, wr_idx, type, qsize;
+ u32 *rd_ptr;
+ u32 recv_request = 0;
+ int ret = 0;
+
+ if (!queue->qmem.kva)
+ return -EINVAL;
+
+ qhdr = queue->qhdr;
+ if (!qhdr)
+ return -EINVAL;
+
+ type = qhdr->type;
+ rd_idx = qhdr->read_idx;
+ wr_idx = qhdr->write_idx;
+ qsize = qhdr->q_size;
+
+ /* make sure data is valid before using it */
+ rmb();
+
+ /*
+ * Do not set receive request for debug queue, if set, Venus generates
+ * interrupt for debug messages even when there is no response message
+ * available. In general debug queue will not become full as it is being
+ * emptied out for every interrupt from Venus. Venus will anyway
+ * generates interrupt if it is full.
+ */
+ if (type & HFI_CTRL_TO_HOST_MSG_Q)
+ recv_request = 1;
+
+ if (rd_idx == wr_idx) {
+ qhdr->rx_req = recv_request;
+ *tx_req = 0;
+ /* update rx_req field in memory */
+ wmb();
+ return -ENODATA;
+ }
+
+ rd_ptr = (u32 *)(queue->qmem.kva + (rd_idx << 2));
+ dwords = *rd_ptr >> 2;
+ if (!dwords)
+ return -EINVAL;
+
+ new_rd_idx = rd_idx + dwords;
+ if (((dwords << 2) <= IFACEQ_VAR_HUGE_PKT_SIZE) && rd_idx <= qsize) {
+ if (new_rd_idx < qsize) {
+ memcpy(pkt, rd_ptr, dwords << 2);
+ } else {
+ size_t len;
+
+ new_rd_idx -= qsize;
+ len = (dwords - new_rd_idx) << 2;
+ memcpy(pkt, rd_ptr, len);
+ memcpy(pkt + len, queue->qmem.kva, new_rd_idx << 2);
+ }
+ } else {
+ /* bad packet received, dropping */
+ new_rd_idx = qhdr->write_idx;
+ ret = -EBADMSG;
+ }
+
+ /* ensure the packet is read before updating read index */
+ rmb();
+
+ qhdr->read_idx = new_rd_idx;
+ /* ensure updating read index */
+ wmb();
+
+ rd_idx = qhdr->read_idx;
+ wr_idx = qhdr->write_idx;
+ /* ensure rd/wr indices are read from memory */
+ rmb();
+
+ if (rd_idx != wr_idx)
+ qhdr->rx_req = 0;
+ else
+ qhdr->rx_req = recv_request;
+
+ *tx_req = qhdr->tx_req ? 1 : 0;
+
+ /* ensure rx_req is stored to memory and tx_req is loaded from memory */
+ mb();
+
+ venus_dump_packet(hdev, pkt);
+
+ return ret;
+}
+
+static int venus_alloc(struct venus_hfi_device *hdev, struct mem_desc *desc,
+ u32 size)
+{
+ struct device *dev = hdev->core->dev;
+
+ desc->attrs = DMA_ATTR_WRITE_COMBINE;
+ desc->size = ALIGN(size, SZ_4K);
+
+ desc->kva = dma_alloc_attrs(dev, size, &desc->da, GFP_KERNEL,
+ desc->attrs);
+ if (!desc->kva)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void venus_free(struct venus_hfi_device *hdev, struct mem_desc *mem)
+{
+ struct device *dev = hdev->core->dev;
+
+ dma_free_attrs(dev, mem->size, mem->kva, mem->da, mem->attrs);
+}
+
+static void venus_writel(struct venus_hfi_device *hdev, u32 reg, u32 value)
+{
+ writel(value, hdev->core->base + reg);
+}
+
+static u32 venus_readl(struct venus_hfi_device *hdev, u32 reg)
+{
+ return readl(hdev->core->base + reg);
+}
+
+static void venus_set_registers(struct venus_hfi_device *hdev)
+{
+ const struct venus_resources *res = hdev->core->res;
+ const struct reg_val *tbl = res->reg_tbl;
+ unsigned int count = res->reg_tbl_size;
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ venus_writel(hdev, tbl[i].reg, tbl[i].value);
+}
+
+static void venus_soft_int(struct venus_hfi_device *hdev)
+{
+ venus_writel(hdev, CPU_IC_SOFTINT, BIT(CPU_IC_SOFTINT_H2A_SHIFT));
+}
+
+static int venus_iface_cmdq_write_nolock(struct venus_hfi_device *hdev,
+ void *pkt)
+{
+ struct device *dev = hdev->core->dev;
+ struct hfi_pkt_hdr *cmd_packet;
+ struct iface_queue *queue;
+ u32 rx_req;
+ int ret;
+
+ if (!venus_is_valid_state(hdev))
+ return -EINVAL;
+
+ cmd_packet = (struct hfi_pkt_hdr *)pkt;
+ hdev->last_packet_type = cmd_packet->pkt_type;
+
+ queue = &hdev->queues[IFACEQ_CMD_IDX];
+
+ ret = venus_write_queue(hdev, queue, pkt, &rx_req);
+ if (ret) {
+ dev_err(dev, "write to iface cmd queue failed (%d)\n", ret);
+ return ret;
+ }
+
+ if (rx_req)
+ venus_soft_int(hdev);
+
+ return 0;
+}
+
+static int venus_iface_cmdq_write(struct venus_hfi_device *hdev, void *pkt)
+{
+ int ret;
+
+ mutex_lock(&hdev->lock);
+ ret = venus_iface_cmdq_write_nolock(hdev, pkt);
+ mutex_unlock(&hdev->lock);
+
+ return ret;
+}
+
+static int venus_hfi_core_set_resource(struct venus_core *core, u32 id,
+ u32 size, u32 addr, void *cookie)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(core);
+ struct hfi_sys_set_resource_pkt *pkt;
+ u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
+ int ret;
+
+ if (id == VIDC_RESOURCE_NONE)
+ return 0;
+
+ pkt = (struct hfi_sys_set_resource_pkt *)packet;
+
+ ret = pkt_sys_set_resource(pkt, id, size, addr, cookie);
+ if (ret)
+ return ret;
+
+ ret = venus_iface_cmdq_write(hdev, pkt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int venus_boot_core(struct venus_hfi_device *hdev)
+{
+ struct device *dev = hdev->core->dev;
+ static const unsigned int max_tries = 100;
+ u32 ctrl_status = 0;
+ unsigned int count = 0;
+ int ret = 0;
+
+ venus_writel(hdev, VIDC_CTRL_INIT, BIT(VIDC_CTRL_INIT_CTRL_SHIFT));
+ venus_writel(hdev, WRAPPER_INTR_MASK, WRAPPER_INTR_MASK_A2HVCODEC_MASK);
+ venus_writel(hdev, CPU_CS_SCIACMDARG3, 1);
+
+ while (!ctrl_status && count < max_tries) {
+ ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
+ if ((ctrl_status & CPU_CS_SCIACMDARG0_ERROR_STATUS_MASK) == 4) {
+ dev_err(dev, "invalid setting for UC_REGION\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ usleep_range(500, 1000);
+ count++;
+ }
+
+ if (count >= max_tries)
+ ret = -ETIMEDOUT;
+
+ return ret;
+}
+
+static u32 venus_hwversion(struct venus_hfi_device *hdev)
+{
+ struct device *dev = hdev->core->dev;
+ u32 ver = venus_readl(hdev, WRAPPER_HW_VERSION);
+ u32 major, minor, step;
+
+ major = ver & WRAPPER_HW_VERSION_MAJOR_VERSION_MASK;
+ major = major >> WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT;
+ minor = ver & WRAPPER_HW_VERSION_MINOR_VERSION_MASK;
+ minor = minor >> WRAPPER_HW_VERSION_MINOR_VERSION_SHIFT;
+ step = ver & WRAPPER_HW_VERSION_STEP_VERSION_MASK;
+
+ dev_dbg(dev, "venus hw version %x.%x.%x\n", major, minor, step);
+
+ return major;
+}
+
+static int venus_run(struct venus_hfi_device *hdev)
+{
+ struct device *dev = hdev->core->dev;
+ int ret;
+
+ /*
+ * Re-program all of the registers that get reset as a result of
+ * regulator_disable() and _enable()
+ */
+ venus_set_registers(hdev);
+
+ venus_writel(hdev, UC_REGION_ADDR, hdev->ifaceq_table.da);
+ venus_writel(hdev, UC_REGION_SIZE, SHARED_QSIZE);
+ venus_writel(hdev, CPU_CS_SCIACMDARG2, hdev->ifaceq_table.da);
+ venus_writel(hdev, CPU_CS_SCIACMDARG1, 0x01);
+ if (hdev->sfr.da)
+ venus_writel(hdev, SFR_ADDR, hdev->sfr.da);
+
+ ret = venus_boot_core(hdev);
+ if (ret) {
+ dev_err(dev, "failed to reset venus core\n");
+ return ret;
+ }
+
+ venus_hwversion(hdev);
+
+ return 0;
+}
+
+static int venus_halt_axi(struct venus_hfi_device *hdev)
+{
+ void __iomem *base = hdev->core->base;
+ struct device *dev = hdev->core->dev;
+ u32 val;
+ int ret;
+
+ /* Halt AXI and AXI IMEM VBIF Access */
+ val = venus_readl(hdev, VBIF_AXI_HALT_CTRL0);
+ val |= VBIF_AXI_HALT_CTRL0_HALT_REQ;
+ venus_writel(hdev, VBIF_AXI_HALT_CTRL0, val);
+
+ /* Request for AXI bus port halt */
+ ret = readl_poll_timeout(base + VBIF_AXI_HALT_CTRL1, val,
+ val & VBIF_AXI_HALT_CTRL1_HALT_ACK,
+ POLL_INTERVAL_US,
+ VBIF_AXI_HALT_ACK_TIMEOUT_US);
+ if (ret) {
+ dev_err(dev, "AXI bus port halt timeout\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int venus_power_off(struct venus_hfi_device *hdev)
+{
+ int ret;
+
+ if (!hdev->power_enabled)
+ return 0;
+
+ ret = qcom_scm_set_remote_state(TZBSP_VIDEO_STATE_SUSPEND, 0);
+ if (ret)
+ return ret;
+
+ ret = venus_halt_axi(hdev);
+ if (ret)
+ return ret;
+
+ hdev->power_enabled = false;
+
+ return 0;
+}
+
+static int venus_power_on(struct venus_hfi_device *hdev)
+{
+ int ret;
+
+ if (hdev->power_enabled)
+ return 0;
+
+ ret = qcom_scm_set_remote_state(TZBSP_VIDEO_STATE_RESUME, 0);
+ if (ret)
+ goto err;
+
+ ret = venus_run(hdev);
+ if (ret)
+ goto err_suspend;
+
+ hdev->power_enabled = true;
+
+ return 0;
+
+err_suspend:
+ qcom_scm_set_remote_state(TZBSP_VIDEO_STATE_SUSPEND, 0);
+err:
+ hdev->power_enabled = false;
+ return ret;
+}
+
+static int venus_iface_msgq_read_nolock(struct venus_hfi_device *hdev,
+ void *pkt)
+{
+ struct iface_queue *queue;
+ u32 tx_req;
+ int ret;
+
+ if (!venus_is_valid_state(hdev))
+ return -EINVAL;
+
+ queue = &hdev->queues[IFACEQ_MSG_IDX];
+
+ ret = venus_read_queue(hdev, queue, pkt, &tx_req);
+ if (ret)
+ return ret;
+
+ if (tx_req)
+ venus_soft_int(hdev);
+
+ return 0;
+}
+
+static int venus_iface_msgq_read(struct venus_hfi_device *hdev, void *pkt)
+{
+ int ret;
+
+ mutex_lock(&hdev->lock);
+ ret = venus_iface_msgq_read_nolock(hdev, pkt);
+ mutex_unlock(&hdev->lock);
+
+ return ret;
+}
+
+static int venus_iface_dbgq_read_nolock(struct venus_hfi_device *hdev,
+ void *pkt)
+{
+ struct iface_queue *queue;
+ u32 tx_req;
+ int ret;
+
+ ret = venus_is_valid_state(hdev);
+ if (!ret)
+ return -EINVAL;
+
+ queue = &hdev->queues[IFACEQ_DBG_IDX];
+
+ ret = venus_read_queue(hdev, queue, pkt, &tx_req);
+ if (ret)
+ return ret;
+
+ if (tx_req)
+ venus_soft_int(hdev);
+
+ return 0;
+}
+
+static int venus_iface_dbgq_read(struct venus_hfi_device *hdev, void *pkt)
+{
+ int ret;
+
+ if (!pkt)
+ return -EINVAL;
+
+ mutex_lock(&hdev->lock);
+ ret = venus_iface_dbgq_read_nolock(hdev, pkt);
+ mutex_unlock(&hdev->lock);
+
+ return ret;
+}
+
+static void venus_set_qhdr_defaults(struct hfi_queue_header *qhdr)
+{
+ qhdr->status = 1;
+ qhdr->type = IFACEQ_DFLT_QHDR;
+ qhdr->q_size = IFACEQ_QUEUE_SIZE / 4;
+ qhdr->pkt_size = 0;
+ qhdr->rx_wm = 1;
+ qhdr->tx_wm = 1;
+ qhdr->rx_req = 1;
+ qhdr->tx_req = 0;
+ qhdr->rx_irq_status = 0;
+ qhdr->tx_irq_status = 0;
+ qhdr->read_idx = 0;
+ qhdr->write_idx = 0;
+}
+
+static void venus_interface_queues_release(struct venus_hfi_device *hdev)
+{
+ mutex_lock(&hdev->lock);
+
+ venus_free(hdev, &hdev->ifaceq_table);
+ venus_free(hdev, &hdev->sfr);
+
+ memset(hdev->queues, 0, sizeof(hdev->queues));
+ memset(&hdev->ifaceq_table, 0, sizeof(hdev->ifaceq_table));
+ memset(&hdev->sfr, 0, sizeof(hdev->sfr));
+
+ mutex_unlock(&hdev->lock);
+}
+
+static int venus_interface_queues_init(struct venus_hfi_device *hdev)
+{
+ struct hfi_queue_table_header *tbl_hdr;
+ struct iface_queue *queue;
+ struct hfi_sfr *sfr;
+ struct mem_desc desc = {0};
+ unsigned int offset;
+ unsigned int i;
+ int ret;
+
+ ret = venus_alloc(hdev, &desc, ALIGNED_QUEUE_SIZE);
+ if (ret)
+ return ret;
+
+ hdev->ifaceq_table.kva = desc.kva;
+ hdev->ifaceq_table.da = desc.da;
+ hdev->ifaceq_table.size = IFACEQ_TABLE_SIZE;
+ offset = hdev->ifaceq_table.size;
+
+ for (i = 0; i < IFACEQ_NUM; i++) {
+ queue = &hdev->queues[i];
+ queue->qmem.da = desc.da + offset;
+ queue->qmem.kva = desc.kva + offset;
+ queue->qmem.size = IFACEQ_QUEUE_SIZE;
+ offset += queue->qmem.size;
+ queue->qhdr =
+ IFACEQ_GET_QHDR_START_ADDR(hdev->ifaceq_table.kva, i);
+
+ venus_set_qhdr_defaults(queue->qhdr);
+
+ queue->qhdr->start_addr = queue->qmem.da;
+
+ if (i == IFACEQ_CMD_IDX)
+ queue->qhdr->type |= HFI_HOST_TO_CTRL_CMD_Q;
+ else if (i == IFACEQ_MSG_IDX)
+ queue->qhdr->type |= HFI_CTRL_TO_HOST_MSG_Q;
+ else if (i == IFACEQ_DBG_IDX)
+ queue->qhdr->type |= HFI_CTRL_TO_HOST_DBG_Q;
+ }
+
+ tbl_hdr = hdev->ifaceq_table.kva;
+ tbl_hdr->version = 0;
+ tbl_hdr->size = IFACEQ_TABLE_SIZE;
+ tbl_hdr->qhdr0_offset = sizeof(struct hfi_queue_table_header);
+ tbl_hdr->qhdr_size = sizeof(struct hfi_queue_header);
+ tbl_hdr->num_q = IFACEQ_NUM;
+ tbl_hdr->num_active_q = IFACEQ_NUM;
+
+ /*
+ * Set receive request to zero on debug queue as there is no
+ * need of interrupt from video hardware for debug messages
+ */
+ queue = &hdev->queues[IFACEQ_DBG_IDX];
+ queue->qhdr->rx_req = 0;
+
+ ret = venus_alloc(hdev, &desc, ALIGNED_SFR_SIZE);
+ if (ret) {
+ hdev->sfr.da = 0;
+ } else {
+ hdev->sfr.da = desc.da;
+ hdev->sfr.kva = desc.kva;
+ hdev->sfr.size = ALIGNED_SFR_SIZE;
+ sfr = hdev->sfr.kva;
+ sfr->buf_size = ALIGNED_SFR_SIZE;
+ }
+
+ /* ensure table and queue header structs are settled in memory */
+ wmb();
+
+ return 0;
+}
+
+static int venus_sys_set_debug(struct venus_hfi_device *hdev, u32 debug)
+{
+ struct hfi_sys_set_property_pkt *pkt;
+ u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
+ int ret;
+
+ pkt = (struct hfi_sys_set_property_pkt *)packet;
+
+ pkt_sys_debug_config(pkt, HFI_DEBUG_MODE_QUEUE, debug);
+
+ ret = venus_iface_cmdq_write(hdev, pkt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int venus_sys_set_coverage(struct venus_hfi_device *hdev, u32 mode)
+{
+ struct hfi_sys_set_property_pkt *pkt;
+ u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
+ int ret;
+
+ pkt = (struct hfi_sys_set_property_pkt *)packet;
+
+ pkt_sys_coverage_config(pkt, mode);
+
+ ret = venus_iface_cmdq_write(hdev, pkt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int venus_sys_set_idle_message(struct venus_hfi_device *hdev,
+ bool enable)
+{
+ struct hfi_sys_set_property_pkt *pkt;
+ u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
+ int ret;
+
+ if (!enable)
+ return 0;
+
+ pkt = (struct hfi_sys_set_property_pkt *)packet;
+
+ pkt_sys_idle_indicator(pkt, enable);
+
+ ret = venus_iface_cmdq_write(hdev, pkt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int venus_sys_set_power_control(struct venus_hfi_device *hdev,
+ bool enable)
+{
+ struct hfi_sys_set_property_pkt *pkt;
+ u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
+ int ret;
+
+ pkt = (struct hfi_sys_set_property_pkt *)packet;
+
+ pkt_sys_power_control(pkt, enable);
+
+ ret = venus_iface_cmdq_write(hdev, pkt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int venus_get_queue_size(struct venus_hfi_device *hdev,
+ unsigned int index)
+{
+ struct hfi_queue_header *qhdr;
+
+ if (index >= IFACEQ_NUM)
+ return -EINVAL;
+
+ qhdr = hdev->queues[index].qhdr;
+ if (!qhdr)
+ return -EINVAL;
+
+ return abs(qhdr->read_idx - qhdr->write_idx);
+}
+
+static int venus_sys_set_default_properties(struct venus_hfi_device *hdev)
+{
+ struct device *dev = hdev->core->dev;
+ int ret;
+
+ ret = venus_sys_set_debug(hdev, venus_fw_debug);
+ if (ret)
+ dev_warn(dev, "setting fw debug msg ON failed (%d)\n", ret);
+
+ ret = venus_sys_set_idle_message(hdev, venus_sys_idle_indicator);
+ if (ret)
+ dev_warn(dev, "setting idle response ON failed (%d)\n", ret);
+
+ ret = venus_sys_set_power_control(hdev, venus_fw_low_power_mode);
+ if (ret)
+ dev_warn(dev, "setting hw power collapse ON failed (%d)\n",
+ ret);
+
+ return ret;
+}
+
+static int venus_session_cmd(struct venus_inst *inst, u32 pkt_type)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
+ struct hfi_session_pkt pkt;
+
+ pkt_session_cmd(&pkt, pkt_type, inst);
+
+ return venus_iface_cmdq_write(hdev, &pkt);
+}
+
+static void venus_flush_debug_queue(struct venus_hfi_device *hdev)
+{
+ struct device *dev = hdev->core->dev;
+ void *packet = hdev->dbg_buf;
+
+ while (!venus_iface_dbgq_read(hdev, packet)) {
+ struct hfi_msg_sys_coverage_pkt *pkt = packet;
+
+ if (pkt->hdr.pkt_type != HFI_MSG_SYS_COV) {
+ struct hfi_msg_sys_debug_pkt *pkt = packet;
+
+ dev_dbg(dev, "%s", pkt->msg_data);
+ }
+ }
+}
+
+static int venus_prepare_power_collapse(struct venus_hfi_device *hdev,
+ bool wait)
+{
+ unsigned long timeout = msecs_to_jiffies(venus_hw_rsp_timeout);
+ struct hfi_sys_pc_prep_pkt pkt;
+ int ret;
+
+ init_completion(&hdev->pwr_collapse_prep);
+
+ pkt_sys_pc_prep(&pkt);
+
+ ret = venus_iface_cmdq_write(hdev, &pkt);
+ if (ret)
+ return ret;
+
+ if (!wait)
+ return 0;
+
+ ret = wait_for_completion_timeout(&hdev->pwr_collapse_prep, timeout);
+ if (!ret) {
+ venus_flush_debug_queue(hdev);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int venus_are_queues_empty(struct venus_hfi_device *hdev)
+{
+ int ret1, ret2;
+
+ ret1 = venus_get_queue_size(hdev, IFACEQ_MSG_IDX);
+ if (ret1 < 0)
+ return ret1;
+
+ ret2 = venus_get_queue_size(hdev, IFACEQ_CMD_IDX);
+ if (ret2 < 0)
+ return ret2;
+
+ if (!ret1 && !ret2)
+ return 1;
+
+ return 0;
+}
+
+static void venus_sfr_print(struct venus_hfi_device *hdev)
+{
+ struct device *dev = hdev->core->dev;
+ struct hfi_sfr *sfr = hdev->sfr.kva;
+ void *p;
+
+ if (!sfr)
+ return;
+
+ p = memchr(sfr->data, '\0', sfr->buf_size);
+ /*
+ * SFR isn't guaranteed to be NULL terminated since SYS_ERROR indicates
+ * that Venus is in the process of crashing.
+ */
+ if (!p)
+ sfr->data[sfr->buf_size - 1] = '\0';
+
+ dev_err_ratelimited(dev, "SFR message from FW: %s\n", sfr->data);
+}
+
+static void venus_process_msg_sys_error(struct venus_hfi_device *hdev,
+ void *packet)
+{
+ struct hfi_msg_event_notify_pkt *event_pkt = packet;
+
+ if (event_pkt->event_id != HFI_EVENT_SYS_ERROR)
+ return;
+
+ venus_set_state(hdev, VENUS_STATE_DEINIT);
+
+ /*
+ * Once SYS_ERROR received from HW, it is safe to halt the AXI.
+ * With SYS_ERROR, Venus FW may have crashed and HW might be
+ * active and causing unnecessary transactions. Hence it is
+ * safe to stop all AXI transactions from venus subsystem.
+ */
+ venus_halt_axi(hdev);
+ venus_sfr_print(hdev);
+}
+
+static irqreturn_t venus_isr_thread(struct venus_core *core)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(core);
+ const struct venus_resources *res;
+ void *pkt;
+ u32 msg_ret;
+
+ if (!hdev)
+ return IRQ_NONE;
+
+ res = hdev->core->res;
+ pkt = hdev->pkt_buf;
+
+ if (hdev->irq_status & WRAPPER_INTR_STATUS_A2HWD_MASK) {
+ venus_sfr_print(hdev);
+ hfi_process_watchdog_timeout(core);
+ }
+
+ while (!venus_iface_msgq_read(hdev, pkt)) {
+ msg_ret = hfi_process_msg_packet(core, pkt);
+ switch (msg_ret) {
+ case HFI_MSG_EVENT_NOTIFY:
+ venus_process_msg_sys_error(hdev, pkt);
+ break;
+ case HFI_MSG_SYS_INIT:
+ venus_hfi_core_set_resource(core, res->vmem_id,
+ res->vmem_size,
+ res->vmem_addr,
+ hdev);
+ break;
+ case HFI_MSG_SYS_RELEASE_RESOURCE:
+ complete(&hdev->release_resource);
+ break;
+ case HFI_MSG_SYS_PC_PREP:
+ complete(&hdev->pwr_collapse_prep);
+ break;
+ default:
+ break;
+ }
+ }
+
+ venus_flush_debug_queue(hdev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t venus_isr(struct venus_core *core)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(core);
+ u32 status;
+
+ if (!hdev)
+ return IRQ_NONE;
+
+ status = venus_readl(hdev, WRAPPER_INTR_STATUS);
+
+ if (status & WRAPPER_INTR_STATUS_A2H_MASK ||
+ status & WRAPPER_INTR_STATUS_A2HWD_MASK ||
+ status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
+ hdev->irq_status = status;
+
+ venus_writel(hdev, CPU_CS_A2HSOFTINTCLR, 1);
+ venus_writel(hdev, WRAPPER_INTR_CLEAR, status);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static int venus_core_init(struct venus_core *core)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(core);
+ struct device *dev = core->dev;
+ struct hfi_sys_get_property_pkt version_pkt;
+ struct hfi_sys_init_pkt pkt;
+ int ret;
+
+ pkt_sys_init(&pkt, HFI_VIDEO_ARCH_OX);
+
+ venus_set_state(hdev, VENUS_STATE_INIT);
+
+ ret = venus_iface_cmdq_write(hdev, &pkt);
+ if (ret)
+ return ret;
+
+ pkt_sys_image_version(&version_pkt);
+
+ ret = venus_iface_cmdq_write(hdev, &version_pkt);
+ if (ret)
+ dev_warn(dev, "failed to send image version pkt to fw\n");
+
+ return 0;
+}
+
+static int venus_core_deinit(struct venus_core *core)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(core);
+
+ venus_set_state(hdev, VENUS_STATE_DEINIT);
+ hdev->suspended = true;
+ hdev->power_enabled = false;
+
+ return 0;
+}
+
+static int venus_core_ping(struct venus_core *core, u32 cookie)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(core);
+ struct hfi_sys_ping_pkt pkt;
+
+ pkt_sys_ping(&pkt, cookie);
+
+ return venus_iface_cmdq_write(hdev, &pkt);
+}
+
+static int venus_core_trigger_ssr(struct venus_core *core, u32 trigger_type)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(core);
+ struct hfi_sys_test_ssr_pkt pkt;
+ int ret;
+
+ ret = pkt_sys_ssr_cmd(&pkt, trigger_type);
+ if (ret)
+ return ret;
+
+ return venus_iface_cmdq_write(hdev, &pkt);
+}
+
+static int venus_session_init(struct venus_inst *inst, u32 session_type,
+ u32 codec)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
+ struct hfi_session_init_pkt pkt;
+ int ret;
+
+ ret = venus_sys_set_default_properties(hdev);
+ if (ret)
+ return ret;
+
+ ret = pkt_session_init(&pkt, inst, session_type, codec);
+ if (ret)
+ goto err;
+
+ ret = venus_iface_cmdq_write(hdev, &pkt);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ venus_flush_debug_queue(hdev);
+ return ret;
+}
+
+static int venus_session_end(struct venus_inst *inst)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
+ struct device *dev = hdev->core->dev;
+
+ if (venus_fw_coverage) {
+ if (venus_sys_set_coverage(hdev, venus_fw_coverage))
+ dev_warn(dev, "fw coverage msg ON failed\n");
+ }
+
+ return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_END);
+}
+
+static int venus_session_abort(struct venus_inst *inst)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
+
+ venus_flush_debug_queue(hdev);
+
+ return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_ABORT);
+}
+
+static int venus_session_flush(struct venus_inst *inst, u32 flush_mode)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
+ struct hfi_session_flush_pkt pkt;
+ int ret;
+
+ ret = pkt_session_flush(&pkt, inst, flush_mode);
+ if (ret)
+ return ret;
+
+ return venus_iface_cmdq_write(hdev, &pkt);
+}
+
+static int venus_session_start(struct venus_inst *inst)
+{
+ return venus_session_cmd(inst, HFI_CMD_SESSION_START);
+}
+
+static int venus_session_stop(struct venus_inst *inst)
+{
+ return venus_session_cmd(inst, HFI_CMD_SESSION_STOP);
+}
+
+static int venus_session_continue(struct venus_inst *inst)
+{
+ return venus_session_cmd(inst, HFI_CMD_SESSION_CONTINUE);
+}
+
+static int venus_session_etb(struct venus_inst *inst,
+ struct hfi_frame_data *in_frame)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
+ u32 session_type = inst->session_type;
+ int ret;
+
+ if (session_type == VIDC_SESSION_TYPE_DEC) {
+ struct hfi_session_empty_buffer_compressed_pkt pkt;
+
+ ret = pkt_session_etb_decoder(&pkt, inst, in_frame);
+ if (ret)
+ return ret;
+
+ ret = venus_iface_cmdq_write(hdev, &pkt);
+ } else if (session_type == VIDC_SESSION_TYPE_ENC) {
+ struct hfi_session_empty_buffer_uncompressed_plane0_pkt pkt;
+
+ ret = pkt_session_etb_encoder(&pkt, inst, in_frame);
+ if (ret)
+ return ret;
+
+ ret = venus_iface_cmdq_write(hdev, &pkt);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int venus_session_ftb(struct venus_inst *inst,
+ struct hfi_frame_data *out_frame)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
+ struct hfi_session_fill_buffer_pkt pkt;
+ int ret;
+
+ ret = pkt_session_ftb(&pkt, inst, out_frame);
+ if (ret)
+ return ret;
+
+ return venus_iface_cmdq_write(hdev, &pkt);
+}
+
+static int venus_session_set_buffers(struct venus_inst *inst,
+ struct hfi_buffer_desc *bd)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
+ struct hfi_session_set_buffers_pkt *pkt;
+ u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
+ int ret;
+
+ if (bd->buffer_type == HFI_BUFFER_INPUT)
+ return 0;
+
+ pkt = (struct hfi_session_set_buffers_pkt *)packet;
+
+ ret = pkt_session_set_buffers(pkt, inst, bd);
+ if (ret)
+ return ret;
+
+ return venus_iface_cmdq_write(hdev, pkt);
+}
+
+static int venus_session_unset_buffers(struct venus_inst *inst,
+ struct hfi_buffer_desc *bd)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
+ struct hfi_session_release_buffer_pkt *pkt;
+ u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
+ int ret;
+
+ if (bd->buffer_type == HFI_BUFFER_INPUT)
+ return 0;
+
+ pkt = (struct hfi_session_release_buffer_pkt *)packet;
+
+ ret = pkt_session_unset_buffers(pkt, inst, bd);
+ if (ret)
+ return ret;
+
+ return venus_iface_cmdq_write(hdev, pkt);
+}
+
+static int venus_session_load_res(struct venus_inst *inst)
+{
+ return venus_session_cmd(inst, HFI_CMD_SESSION_LOAD_RESOURCES);
+}
+
+static int venus_session_release_res(struct venus_inst *inst)
+{
+ return venus_session_cmd(inst, HFI_CMD_SESSION_RELEASE_RESOURCES);
+}
+
+static int venus_session_parse_seq_hdr(struct venus_inst *inst, u32 seq_hdr,
+ u32 seq_hdr_len)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
+ struct hfi_session_parse_sequence_header_pkt *pkt;
+ u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
+ int ret;
+
+ pkt = (struct hfi_session_parse_sequence_header_pkt *)packet;
+
+ ret = pkt_session_parse_seq_header(pkt, inst, seq_hdr, seq_hdr_len);
+ if (ret)
+ return ret;
+
+ ret = venus_iface_cmdq_write(hdev, pkt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int venus_session_get_seq_hdr(struct venus_inst *inst, u32 seq_hdr,
+ u32 seq_hdr_len)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
+ struct hfi_session_get_sequence_header_pkt *pkt;
+ u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
+ int ret;
+
+ pkt = (struct hfi_session_get_sequence_header_pkt *)packet;
+
+ ret = pkt_session_get_seq_hdr(pkt, inst, seq_hdr, seq_hdr_len);
+ if (ret)
+ return ret;
+
+ return venus_iface_cmdq_write(hdev, pkt);
+}
+
+static int venus_session_set_property(struct venus_inst *inst, u32 ptype,
+ void *pdata)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
+ struct hfi_session_set_property_pkt *pkt;
+ u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
+ int ret;
+
+ pkt = (struct hfi_session_set_property_pkt *)packet;
+
+ ret = pkt_session_set_property(pkt, inst, ptype, pdata);
+ if (ret)
+ return ret;
+
+ return venus_iface_cmdq_write(hdev, pkt);
+}
+
+static int venus_session_get_property(struct venus_inst *inst, u32 ptype)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
+ struct hfi_session_get_property_pkt pkt;
+ int ret;
+
+ ret = pkt_session_get_property(&pkt, inst, ptype);
+ if (ret)
+ return ret;
+
+ return venus_iface_cmdq_write(hdev, &pkt);
+}
+
+static int venus_resume(struct venus_core *core)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(core);
+ int ret = 0;
+
+ mutex_lock(&hdev->lock);
+
+ if (!hdev->suspended)
+ goto unlock;
+
+ ret = venus_power_on(hdev);
+
+unlock:
+ if (!ret)
+ hdev->suspended = false;
+
+ mutex_unlock(&hdev->lock);
+
+ return ret;
+}
+
+static int venus_suspend_1xx(struct venus_core *core)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(core);
+ struct device *dev = core->dev;
+ u32 ctrl_status;
+ int ret;
+
+ if (!hdev->power_enabled || hdev->suspended)
+ return 0;
+
+ mutex_lock(&hdev->lock);
+ ret = venus_is_valid_state(hdev);
+ mutex_unlock(&hdev->lock);
+
+ if (!ret) {
+ dev_err(dev, "bad state, cannot suspend\n");
+ return -EINVAL;
+ }
+
+ ret = venus_prepare_power_collapse(hdev, true);
+ if (ret) {
+ dev_err(dev, "prepare for power collapse fail (%d)\n", ret);
+ return ret;
+ }
+
+ mutex_lock(&hdev->lock);
+
+ if (hdev->last_packet_type != HFI_CMD_SYS_PC_PREP) {
+ mutex_unlock(&hdev->lock);
+ return -EINVAL;
+ }
+
+ ret = venus_are_queues_empty(hdev);
+ if (ret < 0 || !ret) {
+ mutex_unlock(&hdev->lock);
+ return -EINVAL;
+ }
+
+ ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
+ if (!(ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)) {
+ mutex_unlock(&hdev->lock);
+ return -EINVAL;
+ }
+
+ ret = venus_power_off(hdev);
+ if (ret) {
+ mutex_unlock(&hdev->lock);
+ return ret;
+ }
+
+ hdev->suspended = true;
+
+ mutex_unlock(&hdev->lock);
+
+ return 0;
+}
+
+static int venus_suspend_3xx(struct venus_core *core)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(core);
+ struct device *dev = core->dev;
+ u32 ctrl_status, wfi_status;
+ int ret;
+ int cnt = 100;
+
+ if (!hdev->power_enabled || hdev->suspended)
+ return 0;
+
+ mutex_lock(&hdev->lock);
+ ret = venus_is_valid_state(hdev);
+ mutex_unlock(&hdev->lock);
+
+ if (!ret) {
+ dev_err(dev, "bad state, cannot suspend\n");
+ return -EINVAL;
+ }
+
+ ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
+ if (!(ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)) {
+ wfi_status = venus_readl(hdev, WRAPPER_CPU_STATUS);
+ ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
+
+ ret = venus_prepare_power_collapse(hdev, false);
+ if (ret) {
+ dev_err(dev, "prepare for power collapse fail (%d)\n",
+ ret);
+ return ret;
+ }
+
+ cnt = 100;
+ while (cnt--) {
+ wfi_status = venus_readl(hdev, WRAPPER_CPU_STATUS);
+ ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0);
+ if (ctrl_status & CPU_CS_SCIACMDARG0_PC_READY &&
+ wfi_status & BIT(0))
+ break;
+ usleep_range(1000, 1500);
+ }
+ }
+
+ mutex_lock(&hdev->lock);
+
+ ret = venus_power_off(hdev);
+ if (ret) {
+ dev_err(dev, "venus_power_off (%d)\n", ret);
+ mutex_unlock(&hdev->lock);
+ return ret;
+ }
+
+ hdev->suspended = true;
+
+ mutex_unlock(&hdev->lock);
+
+ return 0;
+}
+
+static int venus_suspend(struct venus_core *core)
+{
+ if (core->res->hfi_version == HFI_VERSION_3XX)
+ return venus_suspend_3xx(core);
+
+ return venus_suspend_1xx(core);
+}
+
+static const struct hfi_ops venus_hfi_ops = {
+ .core_init = venus_core_init,
+ .core_deinit = venus_core_deinit,
+ .core_ping = venus_core_ping,
+ .core_trigger_ssr = venus_core_trigger_ssr,
+
+ .session_init = venus_session_init,
+ .session_end = venus_session_end,
+ .session_abort = venus_session_abort,
+ .session_flush = venus_session_flush,
+ .session_start = venus_session_start,
+ .session_stop = venus_session_stop,
+ .session_continue = venus_session_continue,
+ .session_etb = venus_session_etb,
+ .session_ftb = venus_session_ftb,
+ .session_set_buffers = venus_session_set_buffers,
+ .session_unset_buffers = venus_session_unset_buffers,
+ .session_load_res = venus_session_load_res,
+ .session_release_res = venus_session_release_res,
+ .session_parse_seq_hdr = venus_session_parse_seq_hdr,
+ .session_get_seq_hdr = venus_session_get_seq_hdr,
+ .session_set_property = venus_session_set_property,
+ .session_get_property = venus_session_get_property,
+
+ .resume = venus_resume,
+ .suspend = venus_suspend,
+
+ .isr = venus_isr,
+ .isr_thread = venus_isr_thread,
+};
+
+void venus_hfi_destroy(struct venus_core *core)
+{
+ struct venus_hfi_device *hdev = to_hfi_priv(core);
+
+ venus_interface_queues_release(hdev);
+ mutex_destroy(&hdev->lock);
+ kfree(hdev);
+ core->priv = NULL;
+ core->ops = NULL;
+}
+
+int venus_hfi_create(struct venus_core *core)
+{
+ struct venus_hfi_device *hdev;
+ int ret;
+
+ hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+
+ mutex_init(&hdev->lock);
+
+ hdev->core = core;
+ hdev->suspended = true;
+ core->priv = hdev;
+ core->ops = &venus_hfi_ops;
+ core->core_caps = ENC_ROTATION_CAPABILITY | ENC_SCALING_CAPABILITY |
+ ENC_DEINTERLACE_CAPABILITY |
+ DEC_MULTI_STREAM_CAPABILITY;
+
+ ret = venus_interface_queues_init(hdev);
+ if (ret)
+ goto err_kfree;
+
+ return 0;
+
+err_kfree:
+ kfree(hdev);
+ core->priv = NULL;
+ core->ops = NULL;
+ return ret;
+}
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.h b/drivers/media/platform/qcom/venus/hfi_venus.h
new file mode 100644
index 000000000000..885923354033
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_venus.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __VENUS_HFI_VENUS_H__
+#define __VENUS_HFI_VENUS_H__
+
+struct venus_core;
+
+void venus_hfi_destroy(struct venus_core *core);
+int venus_hfi_create(struct venus_core *core);
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/hfi_venus_io.h b/drivers/media/platform/qcom/venus/hfi_venus_io.h
new file mode 100644
index 000000000000..98cc350113ab
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/hfi_venus_io.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __VENUS_HFI_VENUS_IO_H__
+#define __VENUS_HFI_VENUS_IO_H__
+
+#define VBIF_BASE 0x80000
+
+#define VBIF_AXI_HALT_CTRL0 (VBIF_BASE + 0x208)
+#define VBIF_AXI_HALT_CTRL1 (VBIF_BASE + 0x20c)
+
+#define VBIF_AXI_HALT_CTRL0_HALT_REQ BIT(0)
+#define VBIF_AXI_HALT_CTRL1_HALT_ACK BIT(0)
+#define VBIF_AXI_HALT_ACK_TIMEOUT_US 500000
+
+#define CPU_BASE 0xc0000
+#define CPU_CS_BASE (CPU_BASE + 0x12000)
+#define CPU_IC_BASE (CPU_BASE + 0x1f000)
+
+#define CPU_CS_A2HSOFTINTCLR (CPU_CS_BASE + 0x1c)
+
+#define VIDC_CTRL_INIT (CPU_CS_BASE + 0x48)
+#define VIDC_CTRL_INIT_RESERVED_BITS31_1_MASK 0xfffffffe
+#define VIDC_CTRL_INIT_RESERVED_BITS31_1_SHIFT 1
+#define VIDC_CTRL_INIT_CTRL_MASK 0x1
+#define VIDC_CTRL_INIT_CTRL_SHIFT 0
+
+/* HFI control status */
+#define CPU_CS_SCIACMDARG0 (CPU_CS_BASE + 0x4c)
+#define CPU_CS_SCIACMDARG0_MASK 0xff
+#define CPU_CS_SCIACMDARG0_SHIFT 0x0
+#define CPU_CS_SCIACMDARG0_ERROR_STATUS_MASK 0xfe
+#define CPU_CS_SCIACMDARG0_ERROR_STATUS_SHIFT 0x1
+#define CPU_CS_SCIACMDARG0_INIT_STATUS_MASK 0x1
+#define CPU_CS_SCIACMDARG0_INIT_STATUS_SHIFT 0x0
+#define CPU_CS_SCIACMDARG0_PC_READY BIT(8)
+#define CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK BIT(30)
+
+/* HFI queue table info */
+#define CPU_CS_SCIACMDARG1 (CPU_CS_BASE + 0x50)
+
+/* HFI queue table address */
+#define CPU_CS_SCIACMDARG2 (CPU_CS_BASE + 0x54)
+
+/* Venus cpu */
+#define CPU_CS_SCIACMDARG3 (CPU_CS_BASE + 0x58)
+
+#define SFR_ADDR (CPU_CS_BASE + 0x5c)
+#define MMAP_ADDR (CPU_CS_BASE + 0x60)
+#define UC_REGION_ADDR (CPU_CS_BASE + 0x64)
+#define UC_REGION_SIZE (CPU_CS_BASE + 0x68)
+
+#define CPU_IC_SOFTINT (CPU_IC_BASE + 0x18)
+#define CPU_IC_SOFTINT_H2A_MASK 0x8000
+#define CPU_IC_SOFTINT_H2A_SHIFT 0xf
+
+/* Venus wrapper */
+#define WRAPPER_BASE 0x000e0000
+
+#define WRAPPER_HW_VERSION (WRAPPER_BASE + 0x00)
+#define WRAPPER_HW_VERSION_MAJOR_VERSION_MASK 0x78000000
+#define WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT 28
+#define WRAPPER_HW_VERSION_MINOR_VERSION_MASK 0xfff0000
+#define WRAPPER_HW_VERSION_MINOR_VERSION_SHIFT 16
+#define WRAPPER_HW_VERSION_STEP_VERSION_MASK 0xffff
+
+#define WRAPPER_CLOCK_CONFIG (WRAPPER_BASE + 0x04)
+
+#define WRAPPER_INTR_STATUS (WRAPPER_BASE + 0x0c)
+#define WRAPPER_INTR_STATUS_A2HWD_MASK 0x10
+#define WRAPPER_INTR_STATUS_A2HWD_SHIFT 0x4
+#define WRAPPER_INTR_STATUS_A2H_MASK 0x4
+#define WRAPPER_INTR_STATUS_A2H_SHIFT 0x2
+
+#define WRAPPER_INTR_MASK (WRAPPER_BASE + 0x10)
+#define WRAPPER_INTR_MASK_A2HWD_BASK 0x10
+#define WRAPPER_INTR_MASK_A2HWD_SHIFT 0x4
+#define WRAPPER_INTR_MASK_A2HVCODEC_MASK 0x8
+#define WRAPPER_INTR_MASK_A2HVCODEC_SHIFT 0x3
+#define WRAPPER_INTR_MASK_A2HCPU_MASK 0x4
+#define WRAPPER_INTR_MASK_A2HCPU_SHIFT 0x2
+
+#define WRAPPER_INTR_CLEAR (WRAPPER_BASE + 0x14)
+#define WRAPPER_INTR_CLEAR_A2HWD_MASK 0x10
+#define WRAPPER_INTR_CLEAR_A2HWD_SHIFT 0x4
+#define WRAPPER_INTR_CLEAR_A2H_MASK 0x4
+#define WRAPPER_INTR_CLEAR_A2H_SHIFT 0x2
+
+#define WRAPPER_POWER_STATUS (WRAPPER_BASE + 0x44)
+#define WRAPPER_VDEC_VCODEC_POWER_CONTROL (WRAPPER_BASE + 0x48)
+#define WRAPPER_VENC_VCODEC_POWER_CONTROL (WRAPPER_BASE + 0x4c)
+#define WRAPPER_VDEC_VENC_AHB_BRIDGE_SYNC_RESET (WRAPPER_BASE + 0x64)
+
+#define WRAPPER_CPU_CLOCK_CONFIG (WRAPPER_BASE + 0x2000)
+#define WRAPPER_CPU_AXI_HALT (WRAPPER_BASE + 0x2008)
+#define WRAPPER_CPU_AXI_HALT_STATUS (WRAPPER_BASE + 0x200c)
+
+#define WRAPPER_CPU_CGC_DIS (WRAPPER_BASE + 0x2010)
+#define WRAPPER_CPU_STATUS (WRAPPER_BASE + 0x2014)
+#define WRAPPER_SW_RESET (WRAPPER_BASE + 0x3000)
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
new file mode 100644
index 000000000000..eb0c1c51cfef
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -0,0 +1,1162 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-sg.h>
+
+#include "hfi_venus_io.h"
+#include "core.h"
+#include "helpers.h"
+#include "vdec.h"
+
+static u32 get_framesize_uncompressed(unsigned int plane, u32 width, u32 height)
+{
+ u32 y_stride, uv_stride, y_plane;
+ u32 y_sclines, uv_sclines, uv_plane;
+ u32 size;
+
+ y_stride = ALIGN(width, 128);
+ uv_stride = ALIGN(width, 128);
+ y_sclines = ALIGN(height, 32);
+ uv_sclines = ALIGN(((height + 1) >> 1), 16);
+
+ y_plane = y_stride * y_sclines;
+ uv_plane = uv_stride * uv_sclines + SZ_4K;
+ size = y_plane + uv_plane + SZ_8K;
+
+ return ALIGN(size, SZ_4K);
+}
+
+static u32 get_framesize_compressed(unsigned int width, unsigned int height)
+{
+ return ((width * height * 3 / 2) / 2) + 128;
+}
+
+/*
+ * Three resons to keep MPLANE formats (despite that the number of planes
+ * currently is one):
+ * - the MPLANE formats allow only one plane to be used
+ * - the downstream driver use MPLANE formats too
+ * - future firmware versions could add support for >1 planes
+ */
+static const struct venus_format vdec_formats[] = {
+ {
+ .pixfmt = V4L2_PIX_FMT_NV12,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG4,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG2,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_H263,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_VC1_ANNEX_G,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_VC1_ANNEX_L,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_H264,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_VP8,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_VP9,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_XVID,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ },
+};
+
+static const struct venus_format *find_format(u32 pixfmt, u32 type)
+{
+ const struct venus_format *fmt = vdec_formats;
+ unsigned int size = ARRAY_SIZE(vdec_formats);
+ unsigned int i;
+
+ for (i = 0; i < size; i++) {
+ if (fmt[i].pixfmt == pixfmt)
+ break;
+ }
+
+ if (i == size || fmt[i].type != type)
+ return NULL;
+
+ return &fmt[i];
+}
+
+static const struct venus_format *
+find_format_by_index(unsigned int index, u32 type)
+{
+ const struct venus_format *fmt = vdec_formats;
+ unsigned int size = ARRAY_SIZE(vdec_formats);
+ unsigned int i, k = 0;
+
+ if (index > size)
+ return NULL;
+
+ for (i = 0; i < size; i++) {
+ if (fmt[i].type != type)
+ continue;
+ if (k == index)
+ break;
+ k++;
+ }
+
+ if (i == size)
+ return NULL;
+
+ return &fmt[i];
+}
+
+static const struct venus_format *
+vdec_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+ struct v4l2_plane_pix_format *pfmt = pixmp->plane_fmt;
+ const struct venus_format *fmt;
+ unsigned int p;
+
+ memset(pfmt[0].reserved, 0, sizeof(pfmt[0].reserved));
+ memset(pixmp->reserved, 0, sizeof(pixmp->reserved));
+
+ fmt = find_format(pixmp->pixelformat, f->type);
+ if (!fmt) {
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ pixmp->pixelformat = V4L2_PIX_FMT_NV12;
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ pixmp->pixelformat = V4L2_PIX_FMT_H264;
+ else
+ return NULL;
+ fmt = find_format(pixmp->pixelformat, f->type);
+ pixmp->width = 1280;
+ pixmp->height = 720;
+ }
+
+ pixmp->width = clamp(pixmp->width, inst->cap_width.min,
+ inst->cap_width.max);
+ pixmp->height = clamp(pixmp->height, inst->cap_height.min,
+ inst->cap_height.max);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ pixmp->height = ALIGN(pixmp->height, 32);
+
+ if (pixmp->field == V4L2_FIELD_ANY)
+ pixmp->field = V4L2_FIELD_NONE;
+ pixmp->num_planes = fmt->num_planes;
+ pixmp->flags = 0;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ for (p = 0; p < pixmp->num_planes; p++) {
+ pfmt[p].sizeimage =
+ get_framesize_uncompressed(p, pixmp->width,
+ pixmp->height);
+ pfmt[p].bytesperline = ALIGN(pixmp->width, 128);
+ }
+ } else {
+ pfmt[0].sizeimage = get_framesize_compressed(pixmp->width,
+ pixmp->height);
+ pfmt[0].bytesperline = 0;
+ }
+
+ return fmt;
+}
+
+static int vdec_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct venus_inst *inst = to_inst(file);
+
+ vdec_try_fmt_common(inst, f);
+
+ return 0;
+}
+
+static int vdec_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct venus_inst *inst = to_inst(file);
+ const struct venus_format *fmt = NULL;
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ fmt = inst->fmt_cap;
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ fmt = inst->fmt_out;
+
+ if (inst->reconfig) {
+ struct v4l2_format format = {};
+
+ inst->out_width = inst->reconfig_width;
+ inst->out_height = inst->reconfig_height;
+ inst->reconfig = false;
+
+ format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ format.fmt.pix_mp.pixelformat = inst->fmt_cap->pixfmt;
+ format.fmt.pix_mp.width = inst->out_width;
+ format.fmt.pix_mp.height = inst->out_height;
+
+ vdec_try_fmt_common(inst, &format);
+
+ inst->width = format.fmt.pix_mp.width;
+ inst->height = format.fmt.pix_mp.height;
+ }
+
+ pixmp->pixelformat = fmt->pixfmt;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ pixmp->width = inst->width;
+ pixmp->height = inst->height;
+ pixmp->colorspace = inst->colorspace;
+ pixmp->ycbcr_enc = inst->ycbcr_enc;
+ pixmp->quantization = inst->quantization;
+ pixmp->xfer_func = inst->xfer_func;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ pixmp->width = inst->out_width;
+ pixmp->height = inst->out_height;
+ }
+
+ vdec_try_fmt_common(inst, f);
+
+ return 0;
+}
+
+static int vdec_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct venus_inst *inst = to_inst(file);
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+ struct v4l2_pix_format_mplane orig_pixmp;
+ const struct venus_format *fmt;
+ struct v4l2_format format;
+ u32 pixfmt_out = 0, pixfmt_cap = 0;
+
+ orig_pixmp = *pixmp;
+
+ fmt = vdec_try_fmt_common(inst, f);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ pixfmt_out = pixmp->pixelformat;
+ pixfmt_cap = inst->fmt_cap->pixfmt;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ pixfmt_cap = pixmp->pixelformat;
+ pixfmt_out = inst->fmt_out->pixfmt;
+ }
+
+ memset(&format, 0, sizeof(format));
+
+ format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ format.fmt.pix_mp.pixelformat = pixfmt_out;
+ format.fmt.pix_mp.width = orig_pixmp.width;
+ format.fmt.pix_mp.height = orig_pixmp.height;
+ vdec_try_fmt_common(inst, &format);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ inst->out_width = format.fmt.pix_mp.width;
+ inst->out_height = format.fmt.pix_mp.height;
+ inst->colorspace = pixmp->colorspace;
+ inst->ycbcr_enc = pixmp->ycbcr_enc;
+ inst->quantization = pixmp->quantization;
+ inst->xfer_func = pixmp->xfer_func;
+ }
+
+ memset(&format, 0, sizeof(format));
+
+ format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ format.fmt.pix_mp.pixelformat = pixfmt_cap;
+ format.fmt.pix_mp.width = orig_pixmp.width;
+ format.fmt.pix_mp.height = orig_pixmp.height;
+ vdec_try_fmt_common(inst, &format);
+
+ inst->width = format.fmt.pix_mp.width;
+ inst->height = format.fmt.pix_mp.height;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ inst->fmt_out = fmt;
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ inst->fmt_cap = fmt;
+
+ return 0;
+}
+
+static int
+vdec_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct venus_inst *inst = to_inst(file);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ s->r.width = inst->out_width;
+ s->r.height = inst->out_height;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_PADDED:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ s->r.width = inst->width;
+ s->r.height = inst->height;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ s->r.width = inst->out_width;
+ s->r.height = inst->out_height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ s->r.top = 0;
+ s->r.left = 0;
+
+ return 0;
+}
+
+static int
+vdec_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, "qcom-venus", sizeof(cap->driver));
+ strlcpy(cap->card, "Qualcomm Venus video decoder", sizeof(cap->card));
+ strlcpy(cap->bus_info, "platform:qcom-venus", sizeof(cap->bus_info));
+
+ return 0;
+}
+
+static int vdec_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ const struct venus_format *fmt;
+
+ memset(f->reserved, 0, sizeof(f->reserved));
+
+ fmt = find_format_by_index(f->index, f->type);
+ if (!fmt)
+ return -EINVAL;
+
+ f->pixelformat = fmt->pixfmt;
+
+ return 0;
+}
+
+static int vdec_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct venus_inst *inst = to_inst(file);
+ struct v4l2_captureparm *cap = &a->parm.capture;
+ struct v4l2_fract *timeperframe = &cap->timeperframe;
+ u64 us_per_frame, fps;
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ memset(cap->reserved, 0, sizeof(cap->reserved));
+ if (!timeperframe->denominator)
+ timeperframe->denominator = inst->timeperframe.denominator;
+ if (!timeperframe->numerator)
+ timeperframe->numerator = inst->timeperframe.numerator;
+ cap->readbuffers = 0;
+ cap->extendedmode = 0;
+ cap->capability = V4L2_CAP_TIMEPERFRAME;
+ us_per_frame = timeperframe->numerator * (u64)USEC_PER_SEC;
+ do_div(us_per_frame, timeperframe->denominator);
+
+ if (!us_per_frame)
+ return -EINVAL;
+
+ fps = (u64)USEC_PER_SEC;
+ do_div(fps, us_per_frame);
+
+ inst->fps = fps;
+ inst->timeperframe = *timeperframe;
+
+ return 0;
+}
+
+static int vdec_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct venus_inst *inst = to_inst(file);
+ const struct venus_format *fmt;
+
+ fmt = find_format(fsize->pixel_format,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (!fmt) {
+ fmt = find_format(fsize->pixel_format,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (!fmt)
+ return -EINVAL;
+ }
+
+ if (fsize->index)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+
+ fsize->stepwise.min_width = inst->cap_width.min;
+ fsize->stepwise.max_width = inst->cap_width.max;
+ fsize->stepwise.step_width = inst->cap_width.step_size;
+ fsize->stepwise.min_height = inst->cap_height.min;
+ fsize->stepwise.max_height = inst->cap_height.max;
+ fsize->stepwise.step_height = inst->cap_height.step_size;
+
+ return 0;
+}
+
+static int vdec_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(fh, sub, 2, NULL);
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_src_change_event_subscribe(fh, sub);
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+vdec_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
+{
+ if (cmd->cmd != V4L2_DEC_CMD_STOP)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
+{
+ struct venus_inst *inst = to_inst(file);
+ int ret;
+
+ ret = vdec_try_decoder_cmd(file, fh, cmd);
+ if (ret)
+ return ret;
+
+ mutex_lock(&inst->lock);
+ inst->cmd_stop = true;
+ mutex_unlock(&inst->lock);
+
+ hfi_session_flush(inst);
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops vdec_ioctl_ops = {
+ .vidioc_querycap = vdec_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = vdec_enum_fmt,
+ .vidioc_enum_fmt_vid_out_mplane = vdec_enum_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = vdec_s_fmt,
+ .vidioc_s_fmt_vid_out_mplane = vdec_s_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = vdec_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = vdec_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = vdec_try_fmt,
+ .vidioc_try_fmt_vid_out_mplane = vdec_try_fmt,
+ .vidioc_g_selection = vdec_g_selection,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_s_parm = vdec_s_parm,
+ .vidioc_enum_framesizes = vdec_enum_framesizes,
+ .vidioc_subscribe_event = vdec_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_try_decoder_cmd = vdec_try_decoder_cmd,
+ .vidioc_decoder_cmd = vdec_decoder_cmd,
+};
+
+static int vdec_set_properties(struct venus_inst *inst)
+{
+ struct vdec_controls *ctr = &inst->controls.dec;
+ struct venus_core *core = inst->core;
+ struct hfi_enable en = { .enable = 1 };
+ u32 ptype;
+ int ret;
+
+ if (core->res->hfi_version == HFI_VERSION_1XX) {
+ ptype = HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER;
+ ret = hfi_session_set_property(inst, ptype, &en);
+ if (ret)
+ return ret;
+ }
+
+ if (core->res->hfi_version == HFI_VERSION_3XX ||
+ inst->cap_bufs_mode_dynamic) {
+ struct hfi_buffer_alloc_mode mode;
+
+ ptype = HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE;
+ mode.type = HFI_BUFFER_OUTPUT;
+ mode.mode = HFI_BUFFER_MODE_DYNAMIC;
+
+ ret = hfi_session_set_property(inst, ptype, &mode);
+ if (ret)
+ return ret;
+ }
+
+ if (ctr->post_loop_deb_mode) {
+ ptype = HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER;
+ en.enable = 1;
+ ret = hfi_session_set_property(inst, ptype, &en);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vdec_init_session(struct venus_inst *inst)
+{
+ int ret;
+
+ ret = hfi_session_init(inst, inst->fmt_out->pixfmt);
+ if (ret)
+ return ret;
+
+ ret = venus_helper_set_input_resolution(inst, inst->out_width,
+ inst->out_height);
+ if (ret)
+ goto deinit;
+
+ ret = venus_helper_set_color_format(inst, inst->fmt_cap->pixfmt);
+ if (ret)
+ goto deinit;
+
+ return 0;
+deinit:
+ hfi_session_deinit(inst);
+ return ret;
+}
+
+static int vdec_cap_num_buffers(struct venus_inst *inst, unsigned int *num)
+{
+ struct hfi_buffer_requirements bufreq;
+ int ret;
+
+ ret = vdec_init_session(inst);
+ if (ret)
+ return ret;
+
+ ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq);
+
+ *num = bufreq.count_actual;
+
+ hfi_session_deinit(inst);
+
+ return ret;
+}
+
+static int vdec_queue_setup(struct vb2_queue *q,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct venus_inst *inst = vb2_get_drv_priv(q);
+ unsigned int p, num;
+ int ret = 0;
+
+ if (*num_planes) {
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ *num_planes != inst->fmt_out->num_planes)
+ return -EINVAL;
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ *num_planes != inst->fmt_cap->num_planes)
+ return -EINVAL;
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ sizes[0] < inst->input_buf_size)
+ return -EINVAL;
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ sizes[0] < inst->output_buf_size)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ switch (q->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ *num_planes = inst->fmt_out->num_planes;
+ sizes[0] = get_framesize_compressed(inst->out_width,
+ inst->out_height);
+ inst->input_buf_size = sizes[0];
+ inst->num_input_bufs = *num_buffers;
+
+ ret = vdec_cap_num_buffers(inst, &num);
+ if (ret)
+ break;
+
+ inst->num_output_bufs = num;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ *num_planes = inst->fmt_cap->num_planes;
+
+ ret = vdec_cap_num_buffers(inst, &num);
+ if (ret)
+ break;
+
+ *num_buffers = max(*num_buffers, num);
+
+ for (p = 0; p < *num_planes; p++)
+ sizes[p] = get_framesize_uncompressed(p, inst->width,
+ inst->height);
+
+ inst->num_output_bufs = *num_buffers;
+ inst->output_buf_size = sizes[0];
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int vdec_verify_conf(struct venus_inst *inst)
+{
+ struct hfi_buffer_requirements bufreq;
+ int ret;
+
+ if (!inst->num_input_bufs || !inst->num_output_bufs)
+ return -EINVAL;
+
+ ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq);
+ if (ret)
+ return ret;
+
+ if (inst->num_output_bufs < bufreq.count_actual ||
+ inst->num_output_bufs < bufreq.count_min)
+ return -EINVAL;
+
+ ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq);
+ if (ret)
+ return ret;
+
+ if (inst->num_input_bufs < bufreq.count_min)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct venus_inst *inst = vb2_get_drv_priv(q);
+ struct venus_core *core = inst->core;
+ u32 ptype;
+ int ret;
+
+ mutex_lock(&inst->lock);
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ inst->streamon_out = 1;
+ else
+ inst->streamon_cap = 1;
+
+ if (!(inst->streamon_out & inst->streamon_cap)) {
+ mutex_unlock(&inst->lock);
+ return 0;
+ }
+
+ venus_helper_init_instance(inst);
+
+ inst->reconfig = false;
+ inst->sequence_cap = 0;
+ inst->sequence_out = 0;
+ inst->cmd_stop = false;
+
+ ret = vdec_init_session(inst);
+ if (ret)
+ goto bufs_done;
+
+ ret = vdec_set_properties(inst);
+ if (ret)
+ goto deinit_sess;
+
+ if (core->res->hfi_version == HFI_VERSION_3XX) {
+ struct hfi_buffer_size_actual buf_sz;
+
+ ptype = HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL;
+ buf_sz.type = HFI_BUFFER_OUTPUT;
+ buf_sz.size = inst->output_buf_size;
+
+ ret = hfi_session_set_property(inst, ptype, &buf_sz);
+ if (ret)
+ goto deinit_sess;
+ }
+
+ ret = vdec_verify_conf(inst);
+ if (ret)
+ goto deinit_sess;
+
+ ret = venus_helper_set_num_bufs(inst, inst->num_input_bufs,
+ VB2_MAX_FRAME);
+ if (ret)
+ goto deinit_sess;
+
+ ret = venus_helper_vb2_start_streaming(inst);
+ if (ret)
+ goto deinit_sess;
+
+ mutex_unlock(&inst->lock);
+
+ return 0;
+
+deinit_sess:
+ hfi_session_deinit(inst);
+bufs_done:
+ venus_helper_buffers_done(inst, VB2_BUF_STATE_QUEUED);
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ inst->streamon_out = 0;
+ else
+ inst->streamon_cap = 0;
+ mutex_unlock(&inst->lock);
+ return ret;
+}
+
+static const struct vb2_ops vdec_vb2_ops = {
+ .queue_setup = vdec_queue_setup,
+ .buf_init = venus_helper_vb2_buf_init,
+ .buf_prepare = venus_helper_vb2_buf_prepare,
+ .start_streaming = vdec_start_streaming,
+ .stop_streaming = venus_helper_vb2_stop_streaming,
+ .buf_queue = venus_helper_vb2_buf_queue,
+};
+
+static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type,
+ u32 tag, u32 bytesused, u32 data_offset, u32 flags,
+ u32 hfi_flags, u64 timestamp_us)
+{
+ enum vb2_buffer_state state = VB2_BUF_STATE_DONE;
+ struct vb2_v4l2_buffer *vbuf;
+ struct vb2_buffer *vb;
+ unsigned int type;
+
+ if (buf_type == HFI_BUFFER_INPUT)
+ type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ else
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+
+ vbuf = venus_helper_find_buf(inst, type, tag);
+ if (!vbuf)
+ return;
+
+ vbuf->flags = flags;
+ vbuf->field = V4L2_FIELD_NONE;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ vb = &vbuf->vb2_buf;
+ vb->planes[0].bytesused =
+ max_t(unsigned int, inst->output_buf_size, bytesused);
+ vb->planes[0].data_offset = data_offset;
+ vb->timestamp = timestamp_us * NSEC_PER_USEC;
+ vbuf->sequence = inst->sequence_cap++;
+
+ if (inst->cmd_stop) {
+ vbuf->flags |= V4L2_BUF_FLAG_LAST;
+ inst->cmd_stop = false;
+ }
+
+ if (vbuf->flags & V4L2_BUF_FLAG_LAST) {
+ const struct v4l2_event ev = { .type = V4L2_EVENT_EOS };
+
+ v4l2_event_queue_fh(&inst->fh, &ev);
+ }
+ } else {
+ vbuf->sequence = inst->sequence_out++;
+ }
+
+ if (hfi_flags & HFI_BUFFERFLAG_READONLY)
+ venus_helper_acquire_buf_ref(vbuf);
+
+ if (hfi_flags & HFI_BUFFERFLAG_DATACORRUPT)
+ state = VB2_BUF_STATE_ERROR;
+
+ v4l2_m2m_buf_done(vbuf, state);
+}
+
+static void vdec_event_notify(struct venus_inst *inst, u32 event,
+ struct hfi_event_data *data)
+{
+ struct venus_core *core = inst->core;
+ struct device *dev = core->dev_dec;
+ static const struct v4l2_event ev = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION };
+
+ switch (event) {
+ case EVT_SESSION_ERROR:
+ inst->session_error = true;
+ dev_err(dev, "dec: event session error %x\n", inst->error);
+ break;
+ case EVT_SYS_EVENT_CHANGE:
+ switch (data->event_type) {
+ case HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUF_RESOURCES:
+ hfi_session_continue(inst);
+ dev_dbg(dev, "event sufficient resources\n");
+ break;
+ case HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUF_RESOURCES:
+ inst->reconfig_height = data->height;
+ inst->reconfig_width = data->width;
+ inst->reconfig = true;
+
+ v4l2_event_queue_fh(&inst->fh, &ev);
+
+ dev_dbg(dev, "event not sufficient resources (%ux%u)\n",
+ data->width, data->height);
+ break;
+ case HFI_EVENT_RELEASE_BUFFER_REFERENCE:
+ venus_helper_release_buf_ref(inst, data->tag);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static const struct hfi_inst_ops vdec_hfi_ops = {
+ .buf_done = vdec_buf_done,
+ .event_notify = vdec_event_notify,
+};
+
+static void vdec_inst_init(struct venus_inst *inst)
+{
+ inst->fmt_out = &vdec_formats[6];
+ inst->fmt_cap = &vdec_formats[0];
+ inst->width = 1280;
+ inst->height = ALIGN(720, 32);
+ inst->out_width = 1280;
+ inst->out_height = 720;
+ inst->fps = 30;
+ inst->timeperframe.numerator = 1;
+ inst->timeperframe.denominator = 30;
+
+ inst->cap_width.min = 64;
+ inst->cap_width.max = 1920;
+ if (inst->core->res->hfi_version == HFI_VERSION_3XX)
+ inst->cap_width.max = 3840;
+ inst->cap_width.step_size = 1;
+ inst->cap_height.min = 64;
+ inst->cap_height.max = ALIGN(1080, 32);
+ if (inst->core->res->hfi_version == HFI_VERSION_3XX)
+ inst->cap_height.max = ALIGN(2160, 32);
+ inst->cap_height.step_size = 1;
+ inst->cap_framerate.min = 1;
+ inst->cap_framerate.max = 30;
+ inst->cap_framerate.step_size = 1;
+ inst->cap_mbs_per_frame.min = 16;
+ inst->cap_mbs_per_frame.max = 8160;
+}
+
+static const struct v4l2_m2m_ops vdec_m2m_ops = {
+ .device_run = venus_helper_m2m_device_run,
+ .job_abort = venus_helper_m2m_job_abort,
+};
+
+static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct venus_inst *inst = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->ops = &vdec_vb2_ops;
+ src_vq->mem_ops = &vb2_dma_sg_memops;
+ src_vq->drv_priv = inst;
+ src_vq->buf_struct_size = sizeof(struct venus_buffer);
+ src_vq->allow_zero_bytesused = 1;
+ src_vq->min_buffers_needed = 1;
+ src_vq->dev = inst->core->dev;
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->ops = &vdec_vb2_ops;
+ dst_vq->mem_ops = &vb2_dma_sg_memops;
+ dst_vq->drv_priv = inst;
+ dst_vq->buf_struct_size = sizeof(struct venus_buffer);
+ dst_vq->allow_zero_bytesused = 1;
+ dst_vq->min_buffers_needed = 1;
+ dst_vq->dev = inst->core->dev;
+ ret = vb2_queue_init(dst_vq);
+ if (ret) {
+ vb2_queue_release(src_vq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vdec_open(struct file *file)
+{
+ struct venus_core *core = video_drvdata(file);
+ struct venus_inst *inst;
+ int ret;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&inst->registeredbufs);
+ INIT_LIST_HEAD(&inst->internalbufs);
+ INIT_LIST_HEAD(&inst->list);
+ mutex_init(&inst->lock);
+
+ inst->core = core;
+ inst->session_type = VIDC_SESSION_TYPE_DEC;
+ inst->num_output_bufs = 1;
+
+ venus_helper_init_instance(inst);
+
+ ret = pm_runtime_get_sync(core->dev_dec);
+ if (ret < 0)
+ goto err_free_inst;
+
+ ret = vdec_ctrl_init(inst);
+ if (ret)
+ goto err_put_sync;
+
+ ret = hfi_session_create(inst, &vdec_hfi_ops);
+ if (ret)
+ goto err_ctrl_deinit;
+
+ vdec_inst_init(inst);
+
+ /*
+ * create m2m device for every instance, the m2m context scheduling
+ * is made by firmware side so we do not need to care about.
+ */
+ inst->m2m_dev = v4l2_m2m_init(&vdec_m2m_ops);
+ if (IS_ERR(inst->m2m_dev)) {
+ ret = PTR_ERR(inst->m2m_dev);
+ goto err_session_destroy;
+ }
+
+ inst->m2m_ctx = v4l2_m2m_ctx_init(inst->m2m_dev, inst, m2m_queue_init);
+ if (IS_ERR(inst->m2m_ctx)) {
+ ret = PTR_ERR(inst->m2m_ctx);
+ goto err_m2m_release;
+ }
+
+ v4l2_fh_init(&inst->fh, core->vdev_dec);
+
+ inst->fh.ctrl_handler = &inst->ctrl_handler;
+ v4l2_fh_add(&inst->fh);
+ inst->fh.m2m_ctx = inst->m2m_ctx;
+ file->private_data = &inst->fh;
+
+ return 0;
+
+err_m2m_release:
+ v4l2_m2m_release(inst->m2m_dev);
+err_session_destroy:
+ hfi_session_destroy(inst);
+err_ctrl_deinit:
+ vdec_ctrl_deinit(inst);
+err_put_sync:
+ pm_runtime_put_sync(core->dev_dec);
+err_free_inst:
+ kfree(inst);
+ return ret;
+}
+
+static int vdec_close(struct file *file)
+{
+ struct venus_inst *inst = to_inst(file);
+
+ v4l2_m2m_ctx_release(inst->m2m_ctx);
+ v4l2_m2m_release(inst->m2m_dev);
+ vdec_ctrl_deinit(inst);
+ hfi_session_destroy(inst);
+ mutex_destroy(&inst->lock);
+ v4l2_fh_del(&inst->fh);
+ v4l2_fh_exit(&inst->fh);
+
+ pm_runtime_put_sync(inst->core->dev_dec);
+
+ kfree(inst);
+ return 0;
+}
+
+static const struct v4l2_file_operations vdec_fops = {
+ .owner = THIS_MODULE,
+ .open = vdec_open,
+ .release = vdec_close,
+ .unlocked_ioctl = video_ioctl2,
+ .poll = v4l2_m2m_fop_poll,
+ .mmap = v4l2_m2m_fop_mmap,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = v4l2_compat_ioctl32,
+#endif
+};
+
+static int vdec_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct video_device *vdev;
+ struct venus_core *core;
+ int ret;
+
+ if (!dev->parent)
+ return -EPROBE_DEFER;
+
+ core = dev_get_drvdata(dev->parent);
+ if (!core)
+ return -EPROBE_DEFER;
+
+ if (core->res->hfi_version == HFI_VERSION_3XX) {
+ core->core0_clk = devm_clk_get(dev, "core");
+ if (IS_ERR(core->core0_clk))
+ return PTR_ERR(core->core0_clk);
+ }
+
+ platform_set_drvdata(pdev, core);
+
+ vdev = video_device_alloc();
+ if (!vdev)
+ return -ENOMEM;
+
+ vdev->release = video_device_release;
+ vdev->fops = &vdec_fops;
+ vdev->ioctl_ops = &vdec_ioctl_ops;
+ vdev->vfl_dir = VFL_DIR_M2M;
+ vdev->v4l2_dev = &core->v4l2_dev;
+ vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto err_vdev_release;
+
+ core->vdev_dec = vdev;
+ core->dev_dec = dev;
+
+ video_set_drvdata(vdev, core);
+ pm_runtime_enable(dev);
+
+ return 0;
+
+err_vdev_release:
+ video_device_release(vdev);
+ return ret;
+}
+
+static int vdec_remove(struct platform_device *pdev)
+{
+ struct venus_core *core = dev_get_drvdata(pdev->dev.parent);
+
+ video_unregister_device(core->vdev_dec);
+ pm_runtime_disable(core->dev_dec);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int vdec_runtime_suspend(struct device *dev)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+
+ if (core->res->hfi_version == HFI_VERSION_1XX)
+ return 0;
+
+ writel(0, core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL);
+ clk_disable_unprepare(core->core0_clk);
+ writel(1, core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL);
+
+ return 0;
+}
+
+static int vdec_runtime_resume(struct device *dev)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+ int ret;
+
+ if (core->res->hfi_version == HFI_VERSION_1XX)
+ return 0;
+
+ writel(0, core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL);
+ ret = clk_prepare_enable(core->core0_clk);
+ writel(1, core->base + WRAPPER_VDEC_VCODEC_POWER_CONTROL);
+
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops vdec_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(vdec_runtime_suspend, vdec_runtime_resume, NULL)
+};
+
+static const struct of_device_id vdec_dt_match[] = {
+ { .compatible = "venus-decoder" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, vdec_dt_match);
+
+static struct platform_driver qcom_venus_dec_driver = {
+ .probe = vdec_probe,
+ .remove = vdec_remove,
+ .driver = {
+ .name = "qcom-venus-decoder",
+ .of_match_table = vdec_dt_match,
+ .pm = &vdec_pm_ops,
+ },
+};
+module_platform_driver(qcom_venus_dec_driver);
+
+MODULE_ALIAS("platform:qcom-venus-decoder");
+MODULE_DESCRIPTION("Qualcomm Venus video decoder driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/qcom/venus/vdec.h b/drivers/media/platform/qcom/venus/vdec.h
new file mode 100644
index 000000000000..84b672c54d02
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/vdec.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __VENUS_VDEC_H__
+#define __VENUS_VDEC_H__
+
+struct venus_inst;
+
+int vdec_ctrl_init(struct venus_inst *inst);
+void vdec_ctrl_deinit(struct venus_inst *inst);
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/vdec_ctrls.c b/drivers/media/platform/qcom/venus/vdec_ctrls.c
new file mode 100644
index 000000000000..032839bbc967
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/vdec_ctrls.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/types.h>
+#include <media/v4l2-ctrls.h>
+
+#include "core.h"
+#include "vdec.h"
+
+static int vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct venus_inst *inst = ctrl_to_inst(ctrl);
+ struct vdec_controls *ctr = &inst->controls.dec;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
+ ctr->post_loop_deb_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_VPX_PROFILE:
+ ctr->profile = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ ctr->level = ctrl->val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vdec_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct venus_inst *inst = ctrl_to_inst(ctrl);
+ struct vdec_controls *ctr = &inst->controls.dec;
+ union hfi_get_property hprop;
+ u32 ptype = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
+ int ret;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_VPX_PROFILE:
+ ret = hfi_session_get_property(inst, ptype, &hprop);
+ if (!ret)
+ ctr->profile = hprop.profile_level.profile;
+ ctrl->val = ctr->profile;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ ret = hfi_session_get_property(inst, ptype, &hprop);
+ if (!ret)
+ ctr->level = hprop.profile_level.level;
+ ctrl->val = ctr->level;
+ break;
+ case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
+ ctrl->val = ctr->post_loop_deb_mode;
+ break;
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ ctrl->val = inst->num_output_bufs;
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vdec_ctrl_ops = {
+ .s_ctrl = vdec_op_s_ctrl,
+ .g_volatile_ctrl = vdec_op_g_volatile_ctrl,
+};
+
+int vdec_ctrl_init(struct venus_inst *inst)
+{
+ struct v4l2_ctrl *ctrl;
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 7);
+ if (ret)
+ return ret;
+
+ ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE,
+ V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY,
+ ~((1 << V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE) |
+ (1 << V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE)),
+ V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
+ V4L2_MPEG_VIDEO_MPEG4_LEVEL_5,
+ 0, V4L2_MPEG_VIDEO_MPEG4_LEVEL_0);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH,
+ ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH)),
+ V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ V4L2_MPEG_VIDEO_H264_LEVEL_5_1,
+ 0, V4L2_MPEG_VIDEO_H264_LEVEL_1_0);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ ctrl = v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_VPX_PROFILE, 0, 3, 1, 0);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER, 0, 1, 1, 0);
+
+ ctrl = v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 32, 1, 1);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+ ret = inst->ctrl_handler.error;
+ if (ret) {
+ v4l2_ctrl_handler_free(&inst->ctrl_handler);
+ return ret;
+ }
+
+ return 0;
+}
+
+void vdec_ctrl_deinit(struct venus_inst *inst)
+{
+ v4l2_ctrl_handler_free(&inst->ctrl_handler);
+}
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
new file mode 100644
index 000000000000..39748e7a08e4
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -0,0 +1,1283 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-sg.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ctrls.h>
+
+#include "hfi_venus_io.h"
+#include "core.h"
+#include "helpers.h"
+#include "venc.h"
+
+#define NUM_B_FRAMES_MAX 4
+
+static u32 get_framesize_uncompressed(unsigned int plane, u32 width, u32 height)
+{
+ u32 y_stride, uv_stride, y_plane;
+ u32 y_sclines, uv_sclines, uv_plane;
+ u32 size;
+
+ y_stride = ALIGN(width, 128);
+ uv_stride = ALIGN(width, 128);
+ y_sclines = ALIGN(height, 32);
+ uv_sclines = ALIGN(((height + 1) >> 1), 16);
+
+ y_plane = y_stride * y_sclines;
+ uv_plane = uv_stride * uv_sclines + SZ_4K;
+ size = y_plane + uv_plane + SZ_8K;
+ size = ALIGN(size, SZ_4K);
+
+ return size;
+}
+
+static u32 get_framesize_compressed(u32 width, u32 height)
+{
+ u32 sz = ALIGN(height, 32) * ALIGN(width, 32) * 3 / 2 / 2;
+
+ return ALIGN(sz, SZ_4K);
+}
+
+/*
+ * Three resons to keep MPLANE formats (despite that the number of planes
+ * currently is one):
+ * - the MPLANE formats allow only one plane to be used
+ * - the downstream driver use MPLANE formats too
+ * - future firmware versions could add support for >1 planes
+ */
+static const struct venus_format venc_formats[] = {
+ {
+ .pixfmt = V4L2_PIX_FMT_NV12,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_MPEG4,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_H263,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_H264,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_VP8,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ }, {
+ .pixfmt = V4L2_PIX_FMT_VP9,
+ .num_planes = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+ },
+};
+
+static const struct venus_format *find_format(u32 pixfmt, u32 type)
+{
+ const struct venus_format *fmt = venc_formats;
+ unsigned int size = ARRAY_SIZE(venc_formats);
+ unsigned int i;
+
+ for (i = 0; i < size; i++) {
+ if (fmt[i].pixfmt == pixfmt)
+ break;
+ }
+
+ if (i == size || fmt[i].type != type)
+ return NULL;
+
+ return &fmt[i];
+}
+
+static const struct venus_format *
+find_format_by_index(unsigned int index, u32 type)
+{
+ const struct venus_format *fmt = venc_formats;
+ unsigned int size = ARRAY_SIZE(venc_formats);
+ unsigned int i, k = 0;
+
+ if (index > size)
+ return NULL;
+
+ for (i = 0; i < size; i++) {
+ if (fmt[i].type != type)
+ continue;
+ if (k == index)
+ break;
+ k++;
+ }
+
+ if (i == size)
+ return NULL;
+
+ return &fmt[i];
+}
+
+static int venc_v4l2_to_hfi(int id, int value)
+{
+ switch (id) {
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ switch (value) {
+ case V4L2_MPEG_VIDEO_MPEG4_LEVEL_0:
+ default:
+ return HFI_MPEG4_LEVEL_0;
+ case V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B:
+ return HFI_MPEG4_LEVEL_0b;
+ case V4L2_MPEG_VIDEO_MPEG4_LEVEL_1:
+ return HFI_MPEG4_LEVEL_1;
+ case V4L2_MPEG_VIDEO_MPEG4_LEVEL_2:
+ return HFI_MPEG4_LEVEL_2;
+ case V4L2_MPEG_VIDEO_MPEG4_LEVEL_3:
+ return HFI_MPEG4_LEVEL_3;
+ case V4L2_MPEG_VIDEO_MPEG4_LEVEL_4:
+ return HFI_MPEG4_LEVEL_4;
+ case V4L2_MPEG_VIDEO_MPEG4_LEVEL_5:
+ return HFI_MPEG4_LEVEL_5;
+ }
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+ switch (value) {
+ case V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE:
+ default:
+ return HFI_MPEG4_PROFILE_SIMPLE;
+ case V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE:
+ return HFI_MPEG4_PROFILE_ADVANCEDSIMPLE;
+ }
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ switch (value) {
+ case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
+ return HFI_H264_PROFILE_BASELINE;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE:
+ return HFI_H264_PROFILE_CONSTRAINED_BASE;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
+ return HFI_H264_PROFILE_MAIN;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
+ default:
+ return HFI_H264_PROFILE_HIGH;
+ }
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ switch (value) {
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
+ return HFI_H264_LEVEL_1;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
+ return HFI_H264_LEVEL_1b;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
+ return HFI_H264_LEVEL_11;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
+ return HFI_H264_LEVEL_12;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
+ return HFI_H264_LEVEL_13;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
+ return HFI_H264_LEVEL_2;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
+ return HFI_H264_LEVEL_21;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
+ return HFI_H264_LEVEL_22;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
+ return HFI_H264_LEVEL_3;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
+ return HFI_H264_LEVEL_31;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
+ return HFI_H264_LEVEL_32;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
+ return HFI_H264_LEVEL_4;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
+ return HFI_H264_LEVEL_41;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+ return HFI_H264_LEVEL_42;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_0:
+ default:
+ return HFI_H264_LEVEL_5;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
+ return HFI_H264_LEVEL_51;
+ }
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ switch (value) {
+ case V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC:
+ default:
+ return HFI_H264_ENTROPY_CAVLC;
+ case V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC:
+ return HFI_H264_ENTROPY_CABAC;
+ }
+ case V4L2_CID_MPEG_VIDEO_VPX_PROFILE:
+ switch (value) {
+ case 0:
+ default:
+ return HFI_VPX_PROFILE_VERSION_0;
+ case 1:
+ return HFI_VPX_PROFILE_VERSION_1;
+ case 2:
+ return HFI_VPX_PROFILE_VERSION_2;
+ case 3:
+ return HFI_VPX_PROFILE_VERSION_3;
+ }
+ }
+
+ return 0;
+}
+
+static int
+venc_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, "qcom-venus", sizeof(cap->driver));
+ strlcpy(cap->card, "Qualcomm Venus video encoder", sizeof(cap->card));
+ strlcpy(cap->bus_info, "platform:qcom-venus", sizeof(cap->bus_info));
+
+ return 0;
+}
+
+static int venc_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ const struct venus_format *fmt;
+
+ fmt = find_format_by_index(f->index, f->type);
+
+ memset(f->reserved, 0, sizeof(f->reserved));
+
+ if (!fmt)
+ return -EINVAL;
+
+ f->pixelformat = fmt->pixfmt;
+
+ return 0;
+}
+
+static const struct venus_format *
+venc_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+ struct v4l2_plane_pix_format *pfmt = pixmp->plane_fmt;
+ const struct venus_format *fmt;
+ unsigned int p;
+
+ memset(pfmt[0].reserved, 0, sizeof(pfmt[0].reserved));
+ memset(pixmp->reserved, 0, sizeof(pixmp->reserved));
+
+ fmt = find_format(pixmp->pixelformat, f->type);
+ if (!fmt) {
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ pixmp->pixelformat = V4L2_PIX_FMT_H264;
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ pixmp->pixelformat = V4L2_PIX_FMT_NV12;
+ else
+ return NULL;
+ fmt = find_format(pixmp->pixelformat, f->type);
+ pixmp->width = 1280;
+ pixmp->height = 720;
+ }
+
+ pixmp->width = clamp(pixmp->width, inst->cap_width.min,
+ inst->cap_width.max);
+ pixmp->height = clamp(pixmp->height, inst->cap_height.min,
+ inst->cap_height.max);
+
+ if (inst->core->res->hfi_version == HFI_VERSION_1XX)
+ pixmp->height = ALIGN(pixmp->height, 32);
+
+ pixmp->width = ALIGN(pixmp->width, 2);
+ pixmp->height = ALIGN(pixmp->height, 2);
+
+ if (pixmp->field == V4L2_FIELD_ANY)
+ pixmp->field = V4L2_FIELD_NONE;
+ pixmp->num_planes = fmt->num_planes;
+ pixmp->flags = 0;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ for (p = 0; p < pixmp->num_planes; p++) {
+ pfmt[p].sizeimage =
+ get_framesize_uncompressed(p, pixmp->width,
+ pixmp->height);
+
+ pfmt[p].bytesperline = ALIGN(pixmp->width, 128);
+ }
+ } else {
+ pfmt[0].sizeimage = get_framesize_compressed(pixmp->width,
+ pixmp->height);
+ pfmt[0].bytesperline = 0;
+ }
+
+ return fmt;
+}
+
+static int venc_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct venus_inst *inst = to_inst(file);
+
+ venc_try_fmt_common(inst, f);
+
+ return 0;
+}
+
+static int venc_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct venus_inst *inst = to_inst(file);
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+ struct v4l2_pix_format_mplane orig_pixmp;
+ const struct venus_format *fmt;
+ struct v4l2_format format;
+ u32 pixfmt_out = 0, pixfmt_cap = 0;
+
+ orig_pixmp = *pixmp;
+
+ fmt = venc_try_fmt_common(inst, f);
+ if (!fmt)
+ return -EINVAL;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ pixfmt_out = pixmp->pixelformat;
+ pixfmt_cap = inst->fmt_cap->pixfmt;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ pixfmt_cap = pixmp->pixelformat;
+ pixfmt_out = inst->fmt_out->pixfmt;
+ }
+
+ memset(&format, 0, sizeof(format));
+
+ format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ format.fmt.pix_mp.pixelformat = pixfmt_out;
+ format.fmt.pix_mp.width = orig_pixmp.width;
+ format.fmt.pix_mp.height = orig_pixmp.height;
+ venc_try_fmt_common(inst, &format);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ inst->out_width = format.fmt.pix_mp.width;
+ inst->out_height = format.fmt.pix_mp.height;
+ inst->colorspace = pixmp->colorspace;
+ inst->ycbcr_enc = pixmp->ycbcr_enc;
+ inst->quantization = pixmp->quantization;
+ inst->xfer_func = pixmp->xfer_func;
+ }
+
+ memset(&format, 0, sizeof(format));
+
+ format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ format.fmt.pix_mp.pixelformat = pixfmt_cap;
+ format.fmt.pix_mp.width = orig_pixmp.width;
+ format.fmt.pix_mp.height = orig_pixmp.height;
+ venc_try_fmt_common(inst, &format);
+
+ inst->width = format.fmt.pix_mp.width;
+ inst->height = format.fmt.pix_mp.height;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ inst->fmt_out = fmt;
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ inst->fmt_cap = fmt;
+
+ return 0;
+}
+
+static int venc_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
+ struct venus_inst *inst = to_inst(file);
+ const struct venus_format *fmt;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ fmt = inst->fmt_cap;
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ fmt = inst->fmt_out;
+ else
+ return -EINVAL;
+
+ pixmp->pixelformat = fmt->pixfmt;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ pixmp->width = inst->width;
+ pixmp->height = inst->height;
+ pixmp->colorspace = inst->colorspace;
+ pixmp->ycbcr_enc = inst->ycbcr_enc;
+ pixmp->quantization = inst->quantization;
+ pixmp->xfer_func = inst->xfer_func;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ pixmp->width = inst->out_width;
+ pixmp->height = inst->out_height;
+ }
+
+ venc_try_fmt_common(inst, f);
+
+ return 0;
+}
+
+static int
+venc_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct venus_inst *inst = to_inst(file);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ s->r.width = inst->width;
+ s->r.height = inst->height;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ s->r.width = inst->out_width;
+ s->r.height = inst->out_height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ s->r.top = 0;
+ s->r.left = 0;
+
+ return 0;
+}
+
+static int
+venc_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct venus_inst *inst = to_inst(file);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ if (s->r.width != inst->out_width ||
+ s->r.height != inst->out_height ||
+ s->r.top != 0 || s->r.left != 0)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int venc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct venus_inst *inst = to_inst(file);
+ struct v4l2_outputparm *out = &a->parm.output;
+ struct v4l2_fract *timeperframe = &out->timeperframe;
+ u64 us_per_frame, fps;
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ memset(out->reserved, 0, sizeof(out->reserved));
+
+ if (!timeperframe->denominator)
+ timeperframe->denominator = inst->timeperframe.denominator;
+ if (!timeperframe->numerator)
+ timeperframe->numerator = inst->timeperframe.numerator;
+
+ out->capability = V4L2_CAP_TIMEPERFRAME;
+
+ us_per_frame = timeperframe->numerator * (u64)USEC_PER_SEC;
+ do_div(us_per_frame, timeperframe->denominator);
+
+ if (!us_per_frame)
+ return -EINVAL;
+
+ fps = (u64)USEC_PER_SEC;
+ do_div(fps, us_per_frame);
+
+ inst->timeperframe = *timeperframe;
+ inst->fps = fps;
+
+ return 0;
+}
+
+static int venc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct venus_inst *inst = to_inst(file);
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ a->parm.output.capability |= V4L2_CAP_TIMEPERFRAME;
+ a->parm.output.timeperframe = inst->timeperframe;
+
+ return 0;
+}
+
+static int venc_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct venus_inst *inst = to_inst(file);
+ const struct venus_format *fmt;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+
+ fmt = find_format(fsize->pixel_format,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (!fmt) {
+ fmt = find_format(fsize->pixel_format,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (!fmt)
+ return -EINVAL;
+ }
+
+ if (fsize->index)
+ return -EINVAL;
+
+ fsize->stepwise.min_width = inst->cap_width.min;
+ fsize->stepwise.max_width = inst->cap_width.max;
+ fsize->stepwise.step_width = inst->cap_width.step_size;
+ fsize->stepwise.min_height = inst->cap_height.min;
+ fsize->stepwise.max_height = inst->cap_height.max;
+ fsize->stepwise.step_height = inst->cap_height.step_size;
+
+ return 0;
+}
+
+static int venc_enum_frameintervals(struct file *file, void *fh,
+ struct v4l2_frmivalenum *fival)
+{
+ struct venus_inst *inst = to_inst(file);
+ const struct venus_format *fmt;
+
+ fival->type = V4L2_FRMIVAL_TYPE_STEPWISE;
+
+ fmt = find_format(fival->pixel_format,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (!fmt) {
+ fmt = find_format(fival->pixel_format,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (!fmt)
+ return -EINVAL;
+ }
+
+ if (fival->index)
+ return -EINVAL;
+
+ if (!fival->width || !fival->height)
+ return -EINVAL;
+
+ if (fival->width > inst->cap_width.max ||
+ fival->width < inst->cap_width.min ||
+ fival->height > inst->cap_height.max ||
+ fival->height < inst->cap_height.min)
+ return -EINVAL;
+
+ fival->stepwise.min.numerator = 1;
+ fival->stepwise.min.denominator = inst->cap_framerate.max;
+ fival->stepwise.max.numerator = 1;
+ fival->stepwise.max.denominator = inst->cap_framerate.min;
+ fival->stepwise.step.numerator = 1;
+ fival->stepwise.step.denominator = inst->cap_framerate.max;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops venc_ioctl_ops = {
+ .vidioc_querycap = venc_querycap,
+ .vidioc_enum_fmt_vid_cap_mplane = venc_enum_fmt,
+ .vidioc_enum_fmt_vid_out_mplane = venc_enum_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = venc_s_fmt,
+ .vidioc_s_fmt_vid_out_mplane = venc_s_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = venc_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = venc_g_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = venc_try_fmt,
+ .vidioc_try_fmt_vid_out_mplane = venc_try_fmt,
+ .vidioc_g_selection = venc_g_selection,
+ .vidioc_s_selection = venc_s_selection,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_s_parm = venc_s_parm,
+ .vidioc_g_parm = venc_g_parm,
+ .vidioc_enum_framesizes = venc_enum_framesizes,
+ .vidioc_enum_frameintervals = venc_enum_frameintervals,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int venc_set_properties(struct venus_inst *inst)
+{
+ struct venc_controls *ctr = &inst->controls.enc;
+ struct hfi_intra_period intra_period;
+ struct hfi_profile_level pl;
+ struct hfi_framerate frate;
+ struct hfi_bitrate brate;
+ struct hfi_idr_period idrp;
+ u32 ptype, rate_control, bitrate, profile = 0, level = 0;
+ int ret;
+
+ ptype = HFI_PROPERTY_CONFIG_FRAME_RATE;
+ frate.buffer_type = HFI_BUFFER_OUTPUT;
+ frate.framerate = inst->fps * (1 << 16);
+
+ ret = hfi_session_set_property(inst, ptype, &frate);
+ if (ret)
+ return ret;
+
+ if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_H264) {
+ struct hfi_h264_vui_timing_info info;
+
+ ptype = HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO;
+ info.enable = 1;
+ info.fixed_framerate = 1;
+ info.time_scale = NSEC_PER_SEC;
+
+ ret = hfi_session_set_property(inst, ptype, &info);
+ if (ret)
+ return ret;
+ }
+
+ ptype = HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD;
+ idrp.idr_period = ctr->gop_size;
+ ret = hfi_session_set_property(inst, ptype, &idrp);
+ if (ret)
+ return ret;
+
+ if (ctr->num_b_frames) {
+ u32 max_num_b_frames = NUM_B_FRAMES_MAX;
+
+ ptype = HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES;
+ ret = hfi_session_set_property(inst, ptype, &max_num_b_frames);
+ if (ret)
+ return ret;
+ }
+
+ /* intra_period = pframes + bframes + 1 */
+ if (!ctr->num_p_frames)
+ ctr->num_p_frames = 2 * 15 - 1,
+
+ ptype = HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD;
+ intra_period.pframes = ctr->num_p_frames;
+ intra_period.bframes = ctr->num_b_frames;
+
+ ret = hfi_session_set_property(inst, ptype, &intra_period);
+ if (ret)
+ return ret;
+
+ if (ctr->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR)
+ rate_control = HFI_RATE_CONTROL_VBR_CFR;
+ else
+ rate_control = HFI_RATE_CONTROL_CBR_CFR;
+
+ ptype = HFI_PROPERTY_PARAM_VENC_RATE_CONTROL;
+ ret = hfi_session_set_property(inst, ptype, &rate_control);
+ if (ret)
+ return ret;
+
+ if (!ctr->bitrate)
+ bitrate = 64000;
+ else
+ bitrate = ctr->bitrate;
+
+ ptype = HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE;
+ brate.bitrate = bitrate;
+ brate.layer_id = 0;
+
+ ret = hfi_session_set_property(inst, ptype, &brate);
+ if (ret)
+ return ret;
+
+ if (!ctr->bitrate_peak)
+ bitrate *= 2;
+ else
+ bitrate = ctr->bitrate_peak;
+
+ ptype = HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE;
+ brate.bitrate = bitrate;
+ brate.layer_id = 0;
+
+ ret = hfi_session_set_property(inst, ptype, &brate);
+ if (ret)
+ return ret;
+
+ if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_H264) {
+ profile = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ ctr->profile.h264);
+ level = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ ctr->level.h264);
+ } else if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_VP8) {
+ profile = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_VPX_PROFILE,
+ ctr->profile.vpx);
+ level = 0;
+ } else if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_MPEG4) {
+ profile = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE,
+ ctr->profile.mpeg4);
+ level = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
+ ctr->level.mpeg4);
+ } else if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_H263) {
+ profile = 0;
+ level = 0;
+ }
+
+ ptype = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
+ pl.profile = profile;
+ pl.level = level;
+
+ ret = hfi_session_set_property(inst, ptype, &pl);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int venc_init_session(struct venus_inst *inst)
+{
+ int ret;
+
+ ret = hfi_session_init(inst, inst->fmt_cap->pixfmt);
+ if (ret)
+ return ret;
+
+ ret = venus_helper_set_input_resolution(inst, inst->out_width,
+ inst->out_height);
+ if (ret)
+ goto deinit;
+
+ ret = venus_helper_set_output_resolution(inst, inst->width,
+ inst->height);
+ if (ret)
+ goto deinit;
+
+ ret = venus_helper_set_color_format(inst, inst->fmt_out->pixfmt);
+ if (ret)
+ goto deinit;
+
+ return 0;
+deinit:
+ hfi_session_deinit(inst);
+ return ret;
+}
+
+static int venc_out_num_buffers(struct venus_inst *inst, unsigned int *num)
+{
+ struct hfi_buffer_requirements bufreq;
+ int ret;
+
+ ret = venc_init_session(inst);
+ if (ret)
+ return ret;
+
+ ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq);
+
+ *num = bufreq.count_actual;
+
+ hfi_session_deinit(inst);
+
+ return ret;
+}
+
+static int venc_queue_setup(struct vb2_queue *q,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct venus_inst *inst = vb2_get_drv_priv(q);
+ unsigned int p, num, min = 4;
+ int ret = 0;
+
+ if (*num_planes) {
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ *num_planes != inst->fmt_out->num_planes)
+ return -EINVAL;
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ *num_planes != inst->fmt_cap->num_planes)
+ return -EINVAL;
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ sizes[0] < inst->input_buf_size)
+ return -EINVAL;
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
+ sizes[0] < inst->output_buf_size)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ switch (q->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ *num_planes = inst->fmt_out->num_planes;
+
+ ret = venc_out_num_buffers(inst, &num);
+ if (ret)
+ break;
+
+ num = max(num, min);
+ *num_buffers = max(*num_buffers, num);
+ inst->num_input_bufs = *num_buffers;
+
+ for (p = 0; p < *num_planes; ++p)
+ sizes[p] = get_framesize_uncompressed(p, inst->width,
+ inst->height);
+ inst->input_buf_size = sizes[0];
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ *num_planes = inst->fmt_cap->num_planes;
+ *num_buffers = max(*num_buffers, min);
+ inst->num_output_bufs = *num_buffers;
+ sizes[0] = get_framesize_compressed(inst->width, inst->height);
+ inst->output_buf_size = sizes[0];
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int venc_verify_conf(struct venus_inst *inst)
+{
+ struct hfi_buffer_requirements bufreq;
+ int ret;
+
+ if (!inst->num_input_bufs || !inst->num_output_bufs)
+ return -EINVAL;
+
+ ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq);
+ if (ret)
+ return ret;
+
+ if (inst->num_output_bufs < bufreq.count_actual ||
+ inst->num_output_bufs < bufreq.count_min)
+ return -EINVAL;
+
+ ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq);
+ if (ret)
+ return ret;
+
+ if (inst->num_input_bufs < bufreq.count_actual ||
+ inst->num_input_bufs < bufreq.count_min)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int venc_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct venus_inst *inst = vb2_get_drv_priv(q);
+ int ret;
+
+ mutex_lock(&inst->lock);
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ inst->streamon_out = 1;
+ else
+ inst->streamon_cap = 1;
+
+ if (!(inst->streamon_out & inst->streamon_cap)) {
+ mutex_unlock(&inst->lock);
+ return 0;
+ }
+
+ venus_helper_init_instance(inst);
+
+ inst->sequence_cap = 0;
+ inst->sequence_out = 0;
+
+ ret = venc_init_session(inst);
+ if (ret)
+ goto bufs_done;
+
+ ret = venc_set_properties(inst);
+ if (ret)
+ goto deinit_sess;
+
+ ret = venc_verify_conf(inst);
+ if (ret)
+ goto deinit_sess;
+
+ ret = venus_helper_set_num_bufs(inst, inst->num_input_bufs,
+ inst->num_output_bufs);
+ if (ret)
+ goto deinit_sess;
+
+ ret = venus_helper_vb2_start_streaming(inst);
+ if (ret)
+ goto deinit_sess;
+
+ mutex_unlock(&inst->lock);
+
+ return 0;
+
+deinit_sess:
+ hfi_session_deinit(inst);
+bufs_done:
+ venus_helper_buffers_done(inst, VB2_BUF_STATE_QUEUED);
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ inst->streamon_out = 0;
+ else
+ inst->streamon_cap = 0;
+ mutex_unlock(&inst->lock);
+ return ret;
+}
+
+static const struct vb2_ops venc_vb2_ops = {
+ .queue_setup = venc_queue_setup,
+ .buf_init = venus_helper_vb2_buf_init,
+ .buf_prepare = venus_helper_vb2_buf_prepare,
+ .start_streaming = venc_start_streaming,
+ .stop_streaming = venus_helper_vb2_stop_streaming,
+ .buf_queue = venus_helper_vb2_buf_queue,
+};
+
+static void venc_buf_done(struct venus_inst *inst, unsigned int buf_type,
+ u32 tag, u32 bytesused, u32 data_offset, u32 flags,
+ u32 hfi_flags, u64 timestamp_us)
+{
+ struct vb2_v4l2_buffer *vbuf;
+ struct vb2_buffer *vb;
+ unsigned int type;
+
+ if (buf_type == HFI_BUFFER_INPUT)
+ type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ else
+ type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+
+ vbuf = venus_helper_find_buf(inst, type, tag);
+ if (!vbuf)
+ return;
+
+ vb = &vbuf->vb2_buf;
+ vb->planes[0].bytesused = bytesused;
+ vb->planes[0].data_offset = data_offset;
+
+ vbuf->flags = flags;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ vb->timestamp = timestamp_us * NSEC_PER_USEC;
+ vbuf->sequence = inst->sequence_cap++;
+ } else {
+ vbuf->sequence = inst->sequence_out++;
+ }
+
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
+}
+
+static void venc_event_notify(struct venus_inst *inst, u32 event,
+ struct hfi_event_data *data)
+{
+ struct device *dev = inst->core->dev_enc;
+
+ if (event == EVT_SESSION_ERROR) {
+ inst->session_error = true;
+ dev_err(dev, "enc: event session error %x\n", inst->error);
+ }
+}
+
+static const struct hfi_inst_ops venc_hfi_ops = {
+ .buf_done = venc_buf_done,
+ .event_notify = venc_event_notify,
+};
+
+static const struct v4l2_m2m_ops venc_m2m_ops = {
+ .device_run = venus_helper_m2m_device_run,
+ .job_abort = venus_helper_m2m_job_abort,
+};
+
+static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct venus_inst *inst = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->ops = &venc_vb2_ops;
+ src_vq->mem_ops = &vb2_dma_sg_memops;
+ src_vq->drv_priv = inst;
+ src_vq->buf_struct_size = sizeof(struct venus_buffer);
+ src_vq->allow_zero_bytesused = 1;
+ src_vq->min_buffers_needed = 1;
+ src_vq->dev = inst->core->dev;
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->ops = &venc_vb2_ops;
+ dst_vq->mem_ops = &vb2_dma_sg_memops;
+ dst_vq->drv_priv = inst;
+ dst_vq->buf_struct_size = sizeof(struct venus_buffer);
+ dst_vq->allow_zero_bytesused = 1;
+ dst_vq->min_buffers_needed = 1;
+ dst_vq->dev = inst->core->dev;
+ ret = vb2_queue_init(dst_vq);
+ if (ret) {
+ vb2_queue_release(src_vq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void venc_inst_init(struct venus_inst *inst)
+{
+ inst->fmt_cap = &venc_formats[2];
+ inst->fmt_out = &venc_formats[0];
+ inst->width = 1280;
+ inst->height = ALIGN(720, 32);
+ inst->out_width = 1280;
+ inst->out_height = 720;
+ inst->fps = 15;
+ inst->timeperframe.numerator = 1;
+ inst->timeperframe.denominator = 15;
+
+ inst->cap_width.min = 96;
+ inst->cap_width.max = 1920;
+ if (inst->core->res->hfi_version == HFI_VERSION_3XX)
+ inst->cap_width.max = 3840;
+ inst->cap_width.step_size = 2;
+ inst->cap_height.min = 64;
+ inst->cap_height.max = ALIGN(1080, 32);
+ if (inst->core->res->hfi_version == HFI_VERSION_3XX)
+ inst->cap_height.max = ALIGN(2160, 32);
+ inst->cap_height.step_size = 2;
+ inst->cap_framerate.min = 1;
+ inst->cap_framerate.max = 30;
+ inst->cap_framerate.step_size = 1;
+ inst->cap_mbs_per_frame.min = 24;
+ inst->cap_mbs_per_frame.max = 8160;
+}
+
+static int venc_open(struct file *file)
+{
+ struct venus_core *core = video_drvdata(file);
+ struct venus_inst *inst;
+ int ret;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&inst->registeredbufs);
+ INIT_LIST_HEAD(&inst->internalbufs);
+ INIT_LIST_HEAD(&inst->list);
+ mutex_init(&inst->lock);
+
+ inst->core = core;
+ inst->session_type = VIDC_SESSION_TYPE_ENC;
+
+ venus_helper_init_instance(inst);
+
+ ret = pm_runtime_get_sync(core->dev_enc);
+ if (ret < 0)
+ goto err_free_inst;
+
+ ret = venc_ctrl_init(inst);
+ if (ret)
+ goto err_put_sync;
+
+ ret = hfi_session_create(inst, &venc_hfi_ops);
+ if (ret)
+ goto err_ctrl_deinit;
+
+ venc_inst_init(inst);
+
+ /*
+ * create m2m device for every instance, the m2m context scheduling
+ * is made by firmware side so we do not need to care about.
+ */
+ inst->m2m_dev = v4l2_m2m_init(&venc_m2m_ops);
+ if (IS_ERR(inst->m2m_dev)) {
+ ret = PTR_ERR(inst->m2m_dev);
+ goto err_session_destroy;
+ }
+
+ inst->m2m_ctx = v4l2_m2m_ctx_init(inst->m2m_dev, inst, m2m_queue_init);
+ if (IS_ERR(inst->m2m_ctx)) {
+ ret = PTR_ERR(inst->m2m_ctx);
+ goto err_m2m_release;
+ }
+
+ v4l2_fh_init(&inst->fh, core->vdev_enc);
+
+ inst->fh.ctrl_handler = &inst->ctrl_handler;
+ v4l2_fh_add(&inst->fh);
+ inst->fh.m2m_ctx = inst->m2m_ctx;
+ file->private_data = &inst->fh;
+
+ return 0;
+
+err_m2m_release:
+ v4l2_m2m_release(inst->m2m_dev);
+err_session_destroy:
+ hfi_session_destroy(inst);
+err_ctrl_deinit:
+ venc_ctrl_deinit(inst);
+err_put_sync:
+ pm_runtime_put_sync(core->dev_enc);
+err_free_inst:
+ kfree(inst);
+ return ret;
+}
+
+static int venc_close(struct file *file)
+{
+ struct venus_inst *inst = to_inst(file);
+
+ v4l2_m2m_ctx_release(inst->m2m_ctx);
+ v4l2_m2m_release(inst->m2m_dev);
+ venc_ctrl_deinit(inst);
+ hfi_session_destroy(inst);
+ mutex_destroy(&inst->lock);
+ v4l2_fh_del(&inst->fh);
+ v4l2_fh_exit(&inst->fh);
+
+ pm_runtime_put_sync(inst->core->dev_enc);
+
+ kfree(inst);
+ return 0;
+}
+
+static const struct v4l2_file_operations venc_fops = {
+ .owner = THIS_MODULE,
+ .open = venc_open,
+ .release = venc_close,
+ .unlocked_ioctl = video_ioctl2,
+ .poll = v4l2_m2m_fop_poll,
+ .mmap = v4l2_m2m_fop_mmap,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = v4l2_compat_ioctl32,
+#endif
+};
+
+static int venc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct video_device *vdev;
+ struct venus_core *core;
+ int ret;
+
+ if (!dev->parent)
+ return -EPROBE_DEFER;
+
+ core = dev_get_drvdata(dev->parent);
+ if (!core)
+ return -EPROBE_DEFER;
+
+ if (core->res->hfi_version == HFI_VERSION_3XX) {
+ core->core1_clk = devm_clk_get(dev, "core");
+ if (IS_ERR(core->core1_clk))
+ return PTR_ERR(core->core1_clk);
+ }
+
+ platform_set_drvdata(pdev, core);
+
+ vdev = video_device_alloc();
+ if (!vdev)
+ return -ENOMEM;
+
+ vdev->release = video_device_release;
+ vdev->fops = &venc_fops;
+ vdev->ioctl_ops = &venc_ioctl_ops;
+ vdev->vfl_dir = VFL_DIR_M2M;
+ vdev->v4l2_dev = &core->v4l2_dev;
+ vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto err_vdev_release;
+
+ core->vdev_enc = vdev;
+ core->dev_enc = dev;
+
+ video_set_drvdata(vdev, core);
+ pm_runtime_enable(dev);
+
+ return 0;
+
+err_vdev_release:
+ video_device_release(vdev);
+ return ret;
+}
+
+static int venc_remove(struct platform_device *pdev)
+{
+ struct venus_core *core = dev_get_drvdata(pdev->dev.parent);
+
+ video_unregister_device(core->vdev_enc);
+ pm_runtime_disable(core->dev_enc);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int venc_runtime_suspend(struct device *dev)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+
+ if (core->res->hfi_version == HFI_VERSION_1XX)
+ return 0;
+
+ writel(0, core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL);
+ clk_disable_unprepare(core->core1_clk);
+ writel(1, core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL);
+
+ return 0;
+}
+
+static int venc_runtime_resume(struct device *dev)
+{
+ struct venus_core *core = dev_get_drvdata(dev);
+ int ret;
+
+ if (core->res->hfi_version == HFI_VERSION_1XX)
+ return 0;
+
+ writel(0, core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL);
+ ret = clk_prepare_enable(core->core1_clk);
+ writel(1, core->base + WRAPPER_VENC_VCODEC_POWER_CONTROL);
+
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops venc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(venc_runtime_suspend, venc_runtime_resume, NULL)
+};
+
+static const struct of_device_id venc_dt_match[] = {
+ { .compatible = "venus-encoder" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, venc_dt_match);
+
+static struct platform_driver qcom_venus_enc_driver = {
+ .probe = venc_probe,
+ .remove = venc_remove,
+ .driver = {
+ .name = "qcom-venus-encoder",
+ .of_match_table = venc_dt_match,
+ .pm = &venc_pm_ops,
+ },
+};
+module_platform_driver(qcom_venus_enc_driver);
+
+MODULE_ALIAS("platform:qcom-venus-encoder");
+MODULE_DESCRIPTION("Qualcomm Venus video encoder driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/qcom/venus/venc.h b/drivers/media/platform/qcom/venus/venc.h
new file mode 100644
index 000000000000..9daca669f307
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/venc.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __VENUS_VENC_H__
+#define __VENUS_VENC_H__
+
+struct venus_inst;
+
+int venc_ctrl_init(struct venus_inst *inst);
+void venc_ctrl_deinit(struct venus_inst *inst);
+
+#endif
diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c
new file mode 100644
index 000000000000..ab0fe51ff0f7
--- /dev/null
+++ b/drivers/media/platform/qcom/venus/venc_ctrls.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/types.h>
+#include <media/v4l2-ctrls.h>
+
+#include "core.h"
+#include "venc.h"
+
+#define BITRATE_MIN 32000
+#define BITRATE_MAX 160000000
+#define BITRATE_DEFAULT 1000000
+#define BITRATE_DEFAULT_PEAK (BITRATE_DEFAULT * 2)
+#define BITRATE_STEP 100
+#define SLICE_BYTE_SIZE_MAX 1024
+#define SLICE_BYTE_SIZE_MIN 1024
+#define SLICE_MB_SIZE_MAX 300
+#define INTRA_REFRESH_MBS_MAX 300
+#define AT_SLICE_BOUNDARY \
+ V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY
+
+static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct venus_inst *inst = ctrl_to_inst(ctrl);
+ struct venc_controls *ctr = &inst->controls.enc;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
+ ctr->bitrate_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE:
+ ctr->bitrate = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
+ ctr->bitrate_peak = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
+ ctr->h264_entropy_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
+ ctr->profile.mpeg4 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
+ ctr->profile.h264 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_PROFILE:
+ ctr->profile.vpx = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
+ ctr->level.mpeg4 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
+ ctr->level.h264 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
+ ctr->h264_i_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:
+ ctr->h264_p_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP:
+ ctr->h264_b_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:
+ ctr->h264_min_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:
+ ctr->h264_max_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
+ ctr->multi_slice_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:
+ ctr->multi_slice_max_bytes = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:
+ ctr->multi_slice_max_mb = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA:
+ ctr->h264_loop_filter_alpha = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:
+ ctr->h264_loop_filter_beta = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
+ ctr->h264_loop_filter_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
+ ctr->header_mode = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB:
+ break;
+ case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
+ ctr->gop_size = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD:
+ ctr->h264_i_period = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP:
+ ctr->vp8_min_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP:
+ ctr->vp8_max_qp = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_B_FRAMES:
+ ctr->num_b_frames = ctrl->val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops venc_ctrl_ops = {
+ .s_ctrl = venc_op_s_ctrl,
+};
+
+int venc_ctrl_init(struct venus_inst *inst)
+{
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 27);
+ if (ret)
+ return ret;
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
+ ~((1 << V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) |
+ (1 << V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)),
+ V4L2_MPEG_VIDEO_BITRATE_MODE_VBR);
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE,
+ V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
+ 0, V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC);
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE,
+ V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY,
+ ~((1 << V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE) |
+ (1 << V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE)),
+ V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE);
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
+ V4L2_MPEG_VIDEO_MPEG4_LEVEL_5,
+ 0, V4L2_MPEG_VIDEO_MPEG4_LEVEL_0);
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH,
+ ~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH) |
+ (1 << V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH)),
+ V4L2_MPEG_VIDEO_H264_PROFILE_HIGH);
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+ V4L2_MPEG_VIDEO_H264_LEVEL_5_1,
+ 0, V4L2_MPEG_VIDEO_H264_LEVEL_1_0);
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
+ AT_SLICE_BOUNDARY,
+ 0, V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED);
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_HEADER_MODE,
+ V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ 1 << V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
+ V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE);
+
+ v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
+ V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES,
+ 0, V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE, BITRATE_MIN, BITRATE_MAX,
+ BITRATE_STEP, BITRATE_DEFAULT);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_BITRATE_PEAK, BITRATE_MIN, BITRATE_MAX,
+ BITRATE_STEP, BITRATE_DEFAULT_PEAK);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_VPX_PROFILE, 0, 3, 1, 0);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP, 1, 51, 1, 26);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP, 1, 51, 1, 28);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP, 1, 51, 1, 30);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_MIN_QP, 1, 51, 1, 1);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_MAX_QP, 1, 51, 1, 51);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES, SLICE_BYTE_SIZE_MIN,
+ SLICE_BYTE_SIZE_MAX, 1, SLICE_BYTE_SIZE_MIN);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB, 1,
+ SLICE_MB_SIZE_MAX, 1, 1);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA, -6, 6, 1, 0);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA, -6, 6, 1, 0);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB,
+ 0, INTRA_REFRESH_MBS_MAX, 1, 0);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE, 0, (1 << 16) - 1, 1, 12);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_VPX_MIN_QP, 1, 128, 1, 1);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_VPX_MAX_QP, 1, 128, 1, 128);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_B_FRAMES, 0, 4, 1, 0);
+
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_I_PERIOD, 0, (1 << 16) - 1, 1, 0);
+
+ ret = inst->ctrl_handler.error;
+ if (ret)
+ goto err;
+
+ ret = v4l2_ctrl_handler_setup(&inst->ctrl_handler);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ v4l2_ctrl_handler_free(&inst->ctrl_handler);
+ return ret;
+}
+
+void venc_ctrl_deinit(struct venus_inst *inst)
+{
+ v4l2_ctrl_handler_free(&inst->ctrl_handler);
+}
diff --git a/drivers/media/platform/rcar-vin/Kconfig b/drivers/media/platform/rcar-vin/Kconfig
index 111d2a151f6a..af4c98b44d2e 100644
--- a/drivers/media/platform/rcar-vin/Kconfig
+++ b/drivers/media/platform/rcar-vin/Kconfig
@@ -3,6 +3,7 @@ config VIDEO_RCAR_VIN
depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && OF && HAS_DMA && MEDIA_CONTROLLER
depends on ARCH_RENESAS || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
---help---
Support for Renesas R-Car Video Input (VIN) driver.
Supports R-Car Gen2 SoCs.
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index 098a0b1cc10a..77dff047c41c 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -21,7 +21,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <media/v4l2-of.h>
+#include <media/v4l2-fwnode.h>
#include "rcar-vin.h"
@@ -31,6 +31,20 @@
#define notifier_to_vin(n) container_of(n, struct rvin_dev, notifier)
+static int rvin_find_pad(struct v4l2_subdev *sd, int direction)
+{
+ unsigned int pad;
+
+ if (sd->entity.num_pads <= 1)
+ return 0;
+
+ for (pad = 0; pad < sd->entity.num_pads; pad++)
+ if (sd->entity.pads[pad].flags & direction)
+ return pad;
+
+ return -EINVAL;
+}
+
static bool rvin_mbus_supported(struct rvin_graph_entity *entity)
{
struct v4l2_subdev *sd = entity->subdev;
@@ -39,6 +53,7 @@ static bool rvin_mbus_supported(struct rvin_graph_entity *entity)
};
code.index = 0;
+ code.pad = entity->source_pad;
while (!v4l2_subdev_call(sd, pad, enum_mbus_code, NULL, &code)) {
code.index++;
switch (code.code) {
@@ -86,14 +101,9 @@ static void rvin_digital_notify_unbind(struct v4l2_async_notifier *notifier,
{
struct rvin_dev *vin = notifier_to_vin(notifier);
- if (vin->digital.subdev == subdev) {
- vin_dbg(vin, "unbind digital subdev %s\n", subdev->name);
- rvin_v4l2_remove(vin);
- vin->digital.subdev = NULL;
- return;
- }
-
- vin_err(vin, "no entity for subdev %s to unbind\n", subdev->name);
+ vin_dbg(vin, "unbind digital subdev %s\n", subdev->name);
+ rvin_v4l2_remove(vin);
+ vin->digital.subdev = NULL;
}
static int rvin_digital_notify_bound(struct v4l2_async_notifier *notifier,
@@ -101,27 +111,37 @@ static int rvin_digital_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_async_subdev *asd)
{
struct rvin_dev *vin = notifier_to_vin(notifier);
+ int ret;
v4l2_set_subdev_hostdata(subdev, vin);
- if (vin->digital.asd.match.of.node == subdev->dev->of_node) {
- vin_dbg(vin, "bound digital subdev %s\n", subdev->name);
- vin->digital.subdev = subdev;
- return 0;
- }
+ /* Find source and sink pad of remote subdevice */
- vin_err(vin, "no entity for subdev %s to bind\n", subdev->name);
- return -EINVAL;
+ ret = rvin_find_pad(subdev, MEDIA_PAD_FL_SOURCE);
+ if (ret < 0)
+ return ret;
+ vin->digital.source_pad = ret;
+
+ ret = rvin_find_pad(subdev, MEDIA_PAD_FL_SINK);
+ vin->digital.sink_pad = ret < 0 ? 0 : ret;
+
+ vin->digital.subdev = subdev;
+
+ vin_dbg(vin, "bound subdev %s source pad: %u sink pad: %u\n",
+ subdev->name, vin->digital.source_pad,
+ vin->digital.sink_pad);
+
+ return 0;
}
static int rvin_digitial_parse_v4l2(struct rvin_dev *vin,
struct device_node *ep,
struct v4l2_mbus_config *mbus_cfg)
{
- struct v4l2_of_endpoint v4l2_ep;
+ struct v4l2_fwnode_endpoint v4l2_ep;
int ret;
- ret = v4l2_of_parse_endpoint(ep, &v4l2_ep);
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &v4l2_ep);
if (ret) {
vin_err(vin, "Could not parse v4l2 endpoint\n");
return -EINVAL;
@@ -151,7 +171,7 @@ static int rvin_digital_graph_parse(struct rvin_dev *vin)
struct device_node *ep, *np;
int ret;
- vin->digital.asd.match.of.node = NULL;
+ vin->digital.asd.match.fwnode.fwnode = NULL;
vin->digital.subdev = NULL;
/*
@@ -175,8 +195,8 @@ static int rvin_digital_graph_parse(struct rvin_dev *vin)
if (ret)
return ret;
- vin->digital.asd.match.of.node = np;
- vin->digital.asd.match_type = V4L2_ASYNC_MATCH_OF;
+ vin->digital.asd.match.fwnode.fwnode = of_fwnode_handle(np);
+ vin->digital.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
return 0;
}
@@ -190,7 +210,7 @@ static int rvin_digital_graph_init(struct rvin_dev *vin)
if (ret)
return ret;
- if (!vin->digital.asd.match.of.node) {
+ if (!vin->digital.asd.match.fwnode.fwnode) {
vin_dbg(vin, "No digital subdevice found\n");
return -ENODEV;
}
@@ -203,7 +223,7 @@ static int rvin_digital_graph_init(struct rvin_dev *vin)
subdevs[0] = &vin->digital.asd;
vin_dbg(vin, "Found digital subdevice %s\n",
- of_node_full_name(subdevs[0]->match.of.node));
+ of_node_full_name(to_of_node(subdevs[0]->match.fwnode.fwnode)));
vin->notifier.num_subdevs = 1;
vin->notifier.subdevs = subdevs;
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index 9ccd5ff55e19..b136844499f6 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -119,6 +119,15 @@
#define VNDMR2_FTEV (1 << 17)
#define VNDMR2_VLV(n) ((n & 0xf) << 12)
+struct rvin_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+#define to_buf_list(vb2_buffer) (&container_of(vb2_buffer, \
+ struct rvin_buffer, \
+ vb)->list)
+
static void rvin_write(struct rvin_dev *vin, u32 value, u32 offset)
{
iowrite32(value, vin->base + offset);
@@ -269,48 +278,6 @@ static int rvin_setup(struct rvin_dev *vin)
return 0;
}
-static void rvin_capture_on(struct rvin_dev *vin)
-{
- vin_dbg(vin, "Capture on in %s mode\n",
- vin->continuous ? "continuous" : "single");
-
- if (vin->continuous)
- /* Continuous Frame Capture Mode */
- rvin_write(vin, VNFC_C_FRAME, VNFC_REG);
- else
- /* Single Frame Capture Mode */
- rvin_write(vin, VNFC_S_FRAME, VNFC_REG);
-}
-
-static void rvin_capture_off(struct rvin_dev *vin)
-{
- /* Set continuous & single transfer off */
- rvin_write(vin, 0, VNFC_REG);
-}
-
-static int rvin_capture_start(struct rvin_dev *vin)
-{
- int ret;
-
- rvin_crop_scale_comp(vin);
-
- ret = rvin_setup(vin);
- if (ret)
- return ret;
-
- rvin_capture_on(vin);
-
- return 0;
-}
-
-static void rvin_capture_stop(struct rvin_dev *vin)
-{
- rvin_capture_off(vin);
-
- /* Disable module */
- rvin_write(vin, rvin_read(vin, VNMC_REG) & ~VNMC_ME, VNMC_REG);
-}
-
static void rvin_disable_interrupts(struct rvin_dev *vin)
{
rvin_write(vin, 0, VNIE_REG);
@@ -377,6 +344,99 @@ static void rvin_set_slot_addr(struct rvin_dev *vin, int slot, dma_addr_t addr)
rvin_write(vin, offset, VNMB_REG(slot));
}
+/* Moves a buffer from the queue to the HW slots */
+static bool rvin_fill_hw_slot(struct rvin_dev *vin, int slot)
+{
+ struct rvin_buffer *buf;
+ struct vb2_v4l2_buffer *vbuf;
+ dma_addr_t phys_addr_top;
+
+ if (vin->queue_buf[slot] != NULL)
+ return true;
+
+ if (list_empty(&vin->buf_list))
+ return false;
+
+ vin_dbg(vin, "Filling HW slot: %d\n", slot);
+
+ /* Keep track of buffer we give to HW */
+ buf = list_entry(vin->buf_list.next, struct rvin_buffer, list);
+ vbuf = &buf->vb;
+ list_del_init(to_buf_list(vbuf));
+ vin->queue_buf[slot] = vbuf;
+
+ /* Setup DMA */
+ phys_addr_top = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
+ rvin_set_slot_addr(vin, slot, phys_addr_top);
+
+ return true;
+}
+
+static bool rvin_fill_hw(struct rvin_dev *vin)
+{
+ int slot, limit;
+
+ limit = vin->continuous ? HW_BUFFER_NUM : 1;
+
+ for (slot = 0; slot < limit; slot++)
+ if (!rvin_fill_hw_slot(vin, slot))
+ return false;
+ return true;
+}
+
+static void rvin_capture_on(struct rvin_dev *vin)
+{
+ vin_dbg(vin, "Capture on in %s mode\n",
+ vin->continuous ? "continuous" : "single");
+
+ if (vin->continuous)
+ /* Continuous Frame Capture Mode */
+ rvin_write(vin, VNFC_C_FRAME, VNFC_REG);
+ else
+ /* Single Frame Capture Mode */
+ rvin_write(vin, VNFC_S_FRAME, VNFC_REG);
+}
+
+static int rvin_capture_start(struct rvin_dev *vin)
+{
+ struct rvin_buffer *buf, *node;
+ int bufs, ret;
+
+ /* Count number of free buffers */
+ bufs = 0;
+ list_for_each_entry_safe(buf, node, &vin->buf_list, list)
+ bufs++;
+
+ /* Continuous capture requires more buffers then there are HW slots */
+ vin->continuous = bufs > HW_BUFFER_NUM;
+
+ if (!rvin_fill_hw(vin)) {
+ vin_err(vin, "HW not ready to start, not enough buffers available\n");
+ return -EINVAL;
+ }
+
+ rvin_crop_scale_comp(vin);
+
+ ret = rvin_setup(vin);
+ if (ret)
+ return ret;
+
+ rvin_capture_on(vin);
+
+ vin->state = RUNNING;
+
+ return 0;
+}
+
+static void rvin_capture_stop(struct rvin_dev *vin)
+{
+ /* Set continuous & single transfer off */
+ rvin_write(vin, 0, VNFC_REG);
+
+ /* Disable module */
+ rvin_write(vin, rvin_read(vin, VNMC_REG) & ~VNMC_ME, VNMC_REG);
+}
+
/* -----------------------------------------------------------------------------
* Crop and Scaling Gen2
*/
@@ -839,61 +899,12 @@ void rvin_scale_try(struct rvin_dev *vin, struct v4l2_pix_format *pix,
#define RVIN_TIMEOUT_MS 100
#define RVIN_RETRIES 10
-struct rvin_buffer {
- struct vb2_v4l2_buffer vb;
- struct list_head list;
-};
-
-#define to_buf_list(vb2_buffer) (&container_of(vb2_buffer, \
- struct rvin_buffer, \
- vb)->list)
-
-/* Moves a buffer from the queue to the HW slots */
-static bool rvin_fill_hw_slot(struct rvin_dev *vin, int slot)
-{
- struct rvin_buffer *buf;
- struct vb2_v4l2_buffer *vbuf;
- dma_addr_t phys_addr_top;
-
- if (vin->queue_buf[slot] != NULL)
- return true;
-
- if (list_empty(&vin->buf_list))
- return false;
-
- vin_dbg(vin, "Filling HW slot: %d\n", slot);
-
- /* Keep track of buffer we give to HW */
- buf = list_entry(vin->buf_list.next, struct rvin_buffer, list);
- vbuf = &buf->vb;
- list_del_init(to_buf_list(vbuf));
- vin->queue_buf[slot] = vbuf;
-
- /* Setup DMA */
- phys_addr_top = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0);
- rvin_set_slot_addr(vin, slot, phys_addr_top);
-
- return true;
-}
-
-static bool rvin_fill_hw(struct rvin_dev *vin)
-{
- int slot, limit;
-
- limit = vin->continuous ? HW_BUFFER_NUM : 1;
-
- for (slot = 0; slot < limit; slot++)
- if (!rvin_fill_hw_slot(vin, slot))
- return false;
- return true;
-}
-
static irqreturn_t rvin_irq(int irq, void *data)
{
struct rvin_dev *vin = data;
u32 int_status, vnms;
int slot;
- unsigned int sequence, handled = 0;
+ unsigned int i, sequence, handled = 0;
unsigned long flags;
spin_lock_irqsave(&vin->qlock, flags);
@@ -955,8 +966,20 @@ static irqreturn_t rvin_irq(int irq, void *data)
* the VnMBm registers.
*/
if (vin->continuous) {
- rvin_capture_off(vin);
+ rvin_capture_stop(vin);
vin_dbg(vin, "IRQ %02d: hw not ready stop\n", sequence);
+
+ /* Maybe we can continue in single capture mode */
+ for (i = 0; i < HW_BUFFER_NUM; i++) {
+ if (vin->queue_buf[i]) {
+ list_add(to_buf_list(vin->queue_buf[i]),
+ &vin->buf_list);
+ vin->queue_buf[i] = NULL;
+ }
+ }
+
+ if (!list_empty(&vin->buf_list))
+ rvin_capture_start(vin);
}
} else {
/*
@@ -1041,8 +1064,7 @@ static void rvin_buffer_queue(struct vb2_buffer *vb)
* capturing if HW is ready to continue.
*/
if (vin->state == STALLED)
- if (rvin_fill_hw(vin))
- rvin_capture_on(vin);
+ rvin_capture_start(vin);
spin_unlock_irqrestore(&vin->qlock, flags);
}
@@ -1059,25 +1081,9 @@ static int rvin_start_streaming(struct vb2_queue *vq, unsigned int count)
spin_lock_irqsave(&vin->qlock, flags);
- vin->state = RUNNING;
vin->sequence = 0;
- /* Continuous capture requires more buffers then there are HW slots */
- vin->continuous = count > HW_BUFFER_NUM;
-
- /*
- * This should never happen but if we don't have enough
- * buffers for HW bail out
- */
- if (!rvin_fill_hw(vin)) {
- vin_err(vin, "HW not ready to start, not enough buffers available\n");
- ret = -EINVAL;
- goto out;
- }
-
ret = rvin_capture_start(vin);
-out:
- /* Return all buffers if something went wrong */
if (ret) {
return_all_buffers(vin, VB2_BUF_STATE_QUEUED);
v4l2_subdev_call(sd, video, s_stream, 0);
@@ -1183,7 +1189,7 @@ int rvin_dma_probe(struct rvin_dev *vin, int irq)
q->ops = &rvin_qops;
q->mem_ops = &vb2_dma_contig_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 2;
+ q->min_buffers_needed = 1;
q->dev = vin->dev;
ret = vb2_queue_init(q);
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index 2bbe6d495fa6..dd37ea811680 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -111,7 +111,7 @@ static int rvin_reset_format(struct rvin_dev *vin)
struct v4l2_mbus_framefmt *mf = &fmt.format;
int ret;
- fmt.pad = vin->src_pad_idx;
+ fmt.pad = vin->digital.source_pad;
ret = v4l2_subdev_call(vin_to_source(vin), pad, get_fmt, NULL, &fmt);
if (ret)
@@ -151,6 +151,9 @@ static int rvin_reset_format(struct rvin_dev *vin)
rvin_reset_crop_compose(vin);
+ vin->format.bytesperline = rvin_format_bytesperline(&vin->format);
+ vin->format.sizeimage = rvin_format_sizeimage(&vin->format);
+
return 0;
}
@@ -175,7 +178,7 @@ static int __rvin_try_format_source(struct rvin_dev *vin,
if (pad_cfg == NULL)
return -ENOMEM;
- format.pad = vin->src_pad_idx;
+ format.pad = vin->digital.source_pad;
field = pix->field;
@@ -203,8 +206,8 @@ static int __rvin_try_format(struct rvin_dev *vin,
struct v4l2_pix_format *pix,
struct rvin_source_fmt *source)
{
- const struct rvin_video_format *info;
u32 rwidth, rheight, walign;
+ int ret;
/* Requested */
rwidth = pix->width;
@@ -214,17 +217,11 @@ static int __rvin_try_format(struct rvin_dev *vin,
if (pix->field == V4L2_FIELD_ANY)
pix->field = vin->format.field;
- /*
- * Retrieve format information and select the current format if the
- * requested format isn't supported.
- */
- info = rvin_format_from_pixel(pix->pixelformat);
- if (!info) {
- vin_dbg(vin, "Format %x not found, keeping %x\n",
- pix->pixelformat, vin->format.pixelformat);
- *pix = vin->format;
- pix->width = rwidth;
- pix->height = rheight;
+ /* If requested format is not supported fallback to the default */
+ if (!rvin_format_from_pixel(pix->pixelformat)) {
+ vin_dbg(vin, "Format 0x%x not found, using default 0x%x\n",
+ pix->pixelformat, RVIN_DEFAULT_FORMAT);
+ pix->pixelformat = RVIN_DEFAULT_FORMAT;
}
/* Always recalculate */
@@ -232,7 +229,9 @@ static int __rvin_try_format(struct rvin_dev *vin,
pix->sizeimage = 0;
/* Limit to source capabilities */
- __rvin_try_format_source(vin, which, pix, source);
+ ret = __rvin_try_format_source(vin, which, pix, source);
+ if (ret)
+ return ret;
switch (pix->field) {
case V4L2_FIELD_TOP:
@@ -480,10 +479,14 @@ static int rvin_enum_input(struct file *file, void *priv,
return ret;
i->type = V4L2_INPUT_TYPE_CAMERA;
- i->std = vin->vdev.tvnorms;
- if (v4l2_subdev_has_op(sd, pad, dv_timings_cap))
+ if (v4l2_subdev_has_op(sd, pad, dv_timings_cap)) {
i->capabilities = V4L2_IN_CAP_DV_TIMINGS;
+ i->std = 0;
+ } else {
+ i->capabilities = V4L2_IN_CAP_STD;
+ i->std = vin->vdev.tvnorms;
+ }
strlcpy(i->name, "Camera", sizeof(i->name));
@@ -547,14 +550,16 @@ static int rvin_enum_dv_timings(struct file *file, void *priv_fh,
{
struct rvin_dev *vin = video_drvdata(file);
struct v4l2_subdev *sd = vin_to_source(vin);
- int pad, ret;
+ int ret;
+
+ if (timings->pad)
+ return -EINVAL;
- pad = timings->pad;
- timings->pad = vin->sink_pad_idx;
+ timings->pad = vin->digital.sink_pad;
ret = v4l2_subdev_call(sd, pad, enum_dv_timings, timings);
- timings->pad = pad;
+ timings->pad = 0;
return ret;
}
@@ -570,12 +575,8 @@ static int rvin_s_dv_timings(struct file *file, void *priv_fh,
if (ret)
return ret;
- vin->source.width = timings->bt.width;
- vin->source.height = timings->bt.height;
- vin->format.width = timings->bt.width;
- vin->format.height = timings->bt.height;
-
- return 0;
+ /* Changing the timings will change the width/height */
+ return rvin_reset_format(vin);
}
static int rvin_g_dv_timings(struct file *file, void *priv_fh,
@@ -601,14 +602,16 @@ static int rvin_dv_timings_cap(struct file *file, void *priv_fh,
{
struct rvin_dev *vin = video_drvdata(file);
struct v4l2_subdev *sd = vin_to_source(vin);
- int pad, ret;
+ int ret;
+
+ if (cap->pad)
+ return -EINVAL;
- pad = cap->pad;
- cap->pad = vin->sink_pad_idx;
+ cap->pad = vin->digital.sink_pad;
ret = v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
- cap->pad = pad;
+ cap->pad = 0;
return ret;
}
@@ -617,17 +620,16 @@ static int rvin_g_edid(struct file *file, void *fh, struct v4l2_edid *edid)
{
struct rvin_dev *vin = video_drvdata(file);
struct v4l2_subdev *sd = vin_to_source(vin);
- int input, ret;
+ int ret;
if (edid->pad)
return -EINVAL;
- input = edid->pad;
- edid->pad = vin->sink_pad_idx;
+ edid->pad = vin->digital.sink_pad;
ret = v4l2_subdev_call(sd, pad, get_edid, edid);
- edid->pad = input;
+ edid->pad = 0;
return ret;
}
@@ -636,17 +638,16 @@ static int rvin_s_edid(struct file *file, void *fh, struct v4l2_edid *edid)
{
struct rvin_dev *vin = video_drvdata(file);
struct v4l2_subdev *sd = vin_to_source(vin);
- int input, ret;
+ int ret;
if (edid->pad)
return -EINVAL;
- input = edid->pad;
- edid->pad = vin->sink_pad_idx;
+ edid->pad = vin->digital.sink_pad;
ret = v4l2_subdev_call(sd, pad, set_edid, edid);
- edid->pad = input;
+ edid->pad = 0;
return ret;
}
@@ -869,7 +870,7 @@ int rvin_v4l2_probe(struct rvin_dev *vin)
{
struct video_device *vdev = &vin->vdev;
struct v4l2_subdev *sd = vin_to_source(vin);
- int pad_idx, ret;
+ int ret;
v4l2_set_subdev_hostdata(sd, vin);
@@ -915,22 +916,6 @@ int rvin_v4l2_probe(struct rvin_dev *vin)
vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
V4L2_CAP_READWRITE;
- vin->src_pad_idx = 0;
- for (pad_idx = 0; pad_idx < sd->entity.num_pads; pad_idx++)
- if (sd->entity.pads[pad_idx].flags == MEDIA_PAD_FL_SOURCE)
- break;
- if (pad_idx >= sd->entity.num_pads)
- return -EINVAL;
-
- vin->src_pad_idx = pad_idx;
-
- vin->sink_pad_idx = 0;
- for (pad_idx = 0; pad_idx < sd->entity.num_pads; pad_idx++)
- if (sd->entity.pads[pad_idx].flags == MEDIA_PAD_FL_SINK) {
- vin->sink_pad_idx = pad_idx;
- break;
- }
-
vin->format.pixelformat = RVIN_DEFAULT_FORMAT;
rvin_reset_format(vin);
diff --git a/drivers/media/platform/rcar-vin/rcar-vin.h b/drivers/media/platform/rcar-vin/rcar-vin.h
index 727e215c0871..9bfb5a7c4dc4 100644
--- a/drivers/media/platform/rcar-vin/rcar-vin.h
+++ b/drivers/media/platform/rcar-vin/rcar-vin.h
@@ -74,6 +74,8 @@ struct rvin_video_format {
* @subdev: subdevice matched using async framework
* @code: Media bus format from source
* @mbus_cfg: Media bus format from DT
+ * @source_pad: source pad of remote subdevice
+ * @sink_pad: sink pad of remote subdevice
*/
struct rvin_graph_entity {
struct v4l2_async_subdev asd;
@@ -81,6 +83,9 @@ struct rvin_graph_entity {
u32 code;
struct v4l2_mbus_config mbus_cfg;
+
+ unsigned int source_pad;
+ unsigned int sink_pad;
};
/**
@@ -91,8 +96,6 @@ struct rvin_graph_entity {
*
* @vdev: V4L2 video device associated with VIN
* @v4l2_dev: V4L2 device
- * @src_pad_idx: source pad index for media controller drivers
- * @sink_pad_idx: sink pad index for media controller drivers
* @ctrl_handler: V4L2 control handler
* @notifier: V4L2 asynchronous subdevs notifier
* @digital: entity in the DT for local digital subdevice
@@ -121,8 +124,6 @@ struct rvin_dev {
struct video_device vdev;
struct v4l2_device v4l2_dev;
- int src_pad_idx;
- int sink_pad_idx;
struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_async_notifier notifier;
struct rvin_graph_entity digital;
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
new file mode 100644
index 000000000000..522364ff0d5d
--- /dev/null
+++ b/drivers/media/platform/rcar_drif.c
@@ -0,0 +1,1498 @@
+/*
+ * R-Car Gen3 Digital Radio Interface (DRIF) driver
+ *
+ * Copyright (C) 2017 Renesas Electronics Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * The R-Car DRIF is a receive only MSIOF like controller with an
+ * external master device driving the SCK. It receives data into a FIFO,
+ * then this driver uses the SYS-DMAC engine to move the data from
+ * the device to memory.
+ *
+ * Each DRIF channel DRIFx (as per datasheet) contains two internal
+ * channels DRIFx0 & DRIFx1 within itself with each having its own resources
+ * like module clk, register set, irq and dma. These internal channels share
+ * common CLK & SYNC from master. The two data pins D0 & D1 shall be
+ * considered to represent the two internal channels. This internal split
+ * is not visible to the master device.
+ *
+ * Depending on the master device, a DRIF channel can use
+ * (1) both internal channels (D0 & D1) to receive data in parallel (or)
+ * (2) one internal channel (D0 or D1) to receive data
+ *
+ * The primary design goal of this controller is to act as a Digital Radio
+ * Interface that receives digital samples from a tuner device. Hence the
+ * driver exposes the device as a V4L2 SDR device. In order to qualify as
+ * a V4L2 SDR device, it should possess a tuner interface as mandated by the
+ * framework. This driver expects a tuner driver (sub-device) to bind
+ * asynchronously with this device and the combined drivers shall expose
+ * a V4L2 compliant SDR device. The DRIF driver is independent of the
+ * tuner vendor.
+ *
+ * The DRIF h/w can support I2S mode and Frame start synchronization pulse mode.
+ * This driver is tested for I2S mode only because of the availability of
+ * suitable master devices. Hence, not all configurable options of DRIF h/w
+ * like lsb/msb first, syncdl, dtdl etc. are exposed via DT and I2S defaults
+ * are used. These can be exposed later if needed after testing.
+ */
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/ioctl.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-vmalloc.h>
+
+/* DRIF register offsets */
+#define RCAR_DRIF_SITMDR1 0x00
+#define RCAR_DRIF_SITMDR2 0x04
+#define RCAR_DRIF_SITMDR3 0x08
+#define RCAR_DRIF_SIRMDR1 0x10
+#define RCAR_DRIF_SIRMDR2 0x14
+#define RCAR_DRIF_SIRMDR3 0x18
+#define RCAR_DRIF_SICTR 0x28
+#define RCAR_DRIF_SIFCTR 0x30
+#define RCAR_DRIF_SISTR 0x40
+#define RCAR_DRIF_SIIER 0x44
+#define RCAR_DRIF_SIRFDR 0x60
+
+#define RCAR_DRIF_RFOVF BIT(3) /* Receive FIFO overflow */
+#define RCAR_DRIF_RFUDF BIT(4) /* Receive FIFO underflow */
+#define RCAR_DRIF_RFSERR BIT(5) /* Receive frame sync error */
+#define RCAR_DRIF_REOF BIT(7) /* Frame reception end */
+#define RCAR_DRIF_RDREQ BIT(12) /* Receive data xfer req */
+#define RCAR_DRIF_RFFUL BIT(13) /* Receive FIFO full */
+
+/* SIRMDR1 */
+#define RCAR_DRIF_SIRMDR1_SYNCMD_FRAME (0 << 28)
+#define RCAR_DRIF_SIRMDR1_SYNCMD_LR (3 << 28)
+
+#define RCAR_DRIF_SIRMDR1_SYNCAC_POL_HIGH (0 << 25)
+#define RCAR_DRIF_SIRMDR1_SYNCAC_POL_LOW (1 << 25)
+
+#define RCAR_DRIF_SIRMDR1_MSB_FIRST (0 << 24)
+#define RCAR_DRIF_SIRMDR1_LSB_FIRST (1 << 24)
+
+#define RCAR_DRIF_SIRMDR1_DTDL_0 (0 << 20)
+#define RCAR_DRIF_SIRMDR1_DTDL_1 (1 << 20)
+#define RCAR_DRIF_SIRMDR1_DTDL_2 (2 << 20)
+#define RCAR_DRIF_SIRMDR1_DTDL_0PT5 (5 << 20)
+#define RCAR_DRIF_SIRMDR1_DTDL_1PT5 (6 << 20)
+
+#define RCAR_DRIF_SIRMDR1_SYNCDL_0 (0 << 20)
+#define RCAR_DRIF_SIRMDR1_SYNCDL_1 (1 << 20)
+#define RCAR_DRIF_SIRMDR1_SYNCDL_2 (2 << 20)
+#define RCAR_DRIF_SIRMDR1_SYNCDL_3 (3 << 20)
+#define RCAR_DRIF_SIRMDR1_SYNCDL_0PT5 (5 << 20)
+#define RCAR_DRIF_SIRMDR1_SYNCDL_1PT5 (6 << 20)
+
+#define RCAR_DRIF_MDR_GRPCNT(n) (((n) - 1) << 30)
+#define RCAR_DRIF_MDR_BITLEN(n) (((n) - 1) << 24)
+#define RCAR_DRIF_MDR_WDCNT(n) (((n) - 1) << 16)
+
+/* Hidden Transmit register that controls CLK & SYNC */
+#define RCAR_DRIF_SITMDR1_PCON BIT(30)
+
+#define RCAR_DRIF_SICTR_RX_RISING_EDGE BIT(26)
+#define RCAR_DRIF_SICTR_RX_EN BIT(8)
+#define RCAR_DRIF_SICTR_RESET BIT(0)
+
+/* Constants */
+#define RCAR_DRIF_NUM_HWBUFS 32
+#define RCAR_DRIF_MAX_DEVS 4
+#define RCAR_DRIF_DEFAULT_NUM_HWBUFS 16
+#define RCAR_DRIF_DEFAULT_HWBUF_SIZE (4 * PAGE_SIZE)
+#define RCAR_DRIF_MAX_CHANNEL 2
+#define RCAR_SDR_BUFFER_SIZE SZ_64K
+
+/* Internal buffer status flags */
+#define RCAR_DRIF_BUF_DONE BIT(0) /* DMA completed */
+#define RCAR_DRIF_BUF_OVERFLOW BIT(1) /* Overflow detected */
+
+#define to_rcar_drif_buf_pair(sdr, ch_num, idx) \
+ (&((sdr)->ch[!(ch_num)]->buf[(idx)]))
+
+#define for_each_rcar_drif_channel(ch, ch_mask) \
+ for_each_set_bit(ch, ch_mask, RCAR_DRIF_MAX_CHANNEL)
+
+/* Debug */
+#define rdrif_dbg(sdr, fmt, arg...) \
+ dev_dbg(sdr->v4l2_dev.dev, fmt, ## arg)
+
+#define rdrif_err(sdr, fmt, arg...) \
+ dev_err(sdr->v4l2_dev.dev, fmt, ## arg)
+
+/* Stream formats */
+struct rcar_drif_format {
+ u32 pixelformat;
+ u32 buffersize;
+ u32 bitlen;
+ u32 wdcnt;
+ u32 num_ch;
+};
+
+/* Format descriptions for capture */
+static const struct rcar_drif_format formats[] = {
+ {
+ .pixelformat = V4L2_SDR_FMT_PCU16BE,
+ .buffersize = RCAR_SDR_BUFFER_SIZE,
+ .bitlen = 16,
+ .wdcnt = 1,
+ .num_ch = 2,
+ },
+ {
+ .pixelformat = V4L2_SDR_FMT_PCU18BE,
+ .buffersize = RCAR_SDR_BUFFER_SIZE,
+ .bitlen = 18,
+ .wdcnt = 1,
+ .num_ch = 2,
+ },
+ {
+ .pixelformat = V4L2_SDR_FMT_PCU20BE,
+ .buffersize = RCAR_SDR_BUFFER_SIZE,
+ .bitlen = 20,
+ .wdcnt = 1,
+ .num_ch = 2,
+ },
+};
+
+/* Buffer for a received frame from one or both internal channels */
+struct rcar_drif_frame_buf {
+ /* Common v4l buffer stuff -- must be first */
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+/* OF graph endpoint's V4L2 async data */
+struct rcar_drif_graph_ep {
+ struct v4l2_subdev *subdev; /* Async matched subdev */
+ struct v4l2_async_subdev asd; /* Async sub-device descriptor */
+};
+
+/* DMA buffer */
+struct rcar_drif_hwbuf {
+ void *addr; /* CPU-side address */
+ unsigned int status; /* Buffer status flags */
+};
+
+/* Internal channel */
+struct rcar_drif {
+ struct rcar_drif_sdr *sdr; /* Group device */
+ struct platform_device *pdev; /* Channel's pdev */
+ void __iomem *base; /* Base register address */
+ resource_size_t start; /* I/O resource offset */
+ struct dma_chan *dmach; /* Reserved DMA channel */
+ struct clk *clk; /* Module clock */
+ struct rcar_drif_hwbuf buf[RCAR_DRIF_NUM_HWBUFS]; /* H/W bufs */
+ dma_addr_t dma_handle; /* Handle for all bufs */
+ unsigned int num; /* Channel number */
+ bool acting_sdr; /* Channel acting as SDR device */
+};
+
+/* DRIF V4L2 SDR */
+struct rcar_drif_sdr {
+ struct device *dev; /* Platform device */
+ struct video_device *vdev; /* V4L2 SDR device */
+ struct v4l2_device v4l2_dev; /* V4L2 device */
+
+ /* Videobuf2 queue and queued buffers list */
+ struct vb2_queue vb_queue;
+ struct list_head queued_bufs;
+ spinlock_t queued_bufs_lock; /* Protects queued_bufs */
+ spinlock_t dma_lock; /* To serialize DMA cb of channels */
+
+ struct mutex v4l2_mutex; /* To serialize ioctls */
+ struct mutex vb_queue_mutex; /* To serialize streaming ioctls */
+ struct v4l2_ctrl_handler ctrl_hdl; /* SDR control handler */
+ struct v4l2_async_notifier notifier; /* For subdev (tuner) */
+ struct rcar_drif_graph_ep ep; /* Endpoint V4L2 async data */
+
+ /* Current V4L2 SDR format ptr */
+ const struct rcar_drif_format *fmt;
+
+ /* Device tree SYNC properties */
+ u32 mdr1;
+
+ /* Internals */
+ struct rcar_drif *ch[RCAR_DRIF_MAX_CHANNEL]; /* DRIFx0,1 */
+ unsigned long hw_ch_mask; /* Enabled channels per DT */
+ unsigned long cur_ch_mask; /* Used channels for an SDR FMT */
+ u32 num_hw_ch; /* Num of DT enabled channels */
+ u32 num_cur_ch; /* Num of used channels */
+ u32 hwbuf_size; /* Each DMA buffer size */
+ u32 produced; /* Buffers produced by sdr dev */
+};
+
+/* Register access functions */
+static void rcar_drif_write(struct rcar_drif *ch, u32 offset, u32 data)
+{
+ writel(data, ch->base + offset);
+}
+
+static u32 rcar_drif_read(struct rcar_drif *ch, u32 offset)
+{
+ return readl(ch->base + offset);
+}
+
+/* Release DMA channels */
+static void rcar_drif_release_dmachannels(struct rcar_drif_sdr *sdr)
+{
+ unsigned int i;
+
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask)
+ if (sdr->ch[i]->dmach) {
+ dma_release_channel(sdr->ch[i]->dmach);
+ sdr->ch[i]->dmach = NULL;
+ }
+}
+
+/* Allocate DMA channels */
+static int rcar_drif_alloc_dmachannels(struct rcar_drif_sdr *sdr)
+{
+ struct dma_slave_config dma_cfg;
+ unsigned int i;
+ int ret = -ENODEV;
+
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ struct rcar_drif *ch = sdr->ch[i];
+
+ ch->dmach = dma_request_slave_channel(&ch->pdev->dev, "rx");
+ if (!ch->dmach) {
+ rdrif_err(sdr, "ch%u: dma channel req failed\n", i);
+ goto dmach_error;
+ }
+
+ /* Configure slave */
+ memset(&dma_cfg, 0, sizeof(dma_cfg));
+ dma_cfg.src_addr = (phys_addr_t)(ch->start + RCAR_DRIF_SIRFDR);
+ dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ ret = dmaengine_slave_config(ch->dmach, &dma_cfg);
+ if (ret) {
+ rdrif_err(sdr, "ch%u: dma slave config failed\n", i);
+ goto dmach_error;
+ }
+ }
+ return 0;
+
+dmach_error:
+ rcar_drif_release_dmachannels(sdr);
+ return ret;
+}
+
+/* Release queued vb2 buffers */
+static void rcar_drif_release_queued_bufs(struct rcar_drif_sdr *sdr,
+ enum vb2_buffer_state state)
+{
+ struct rcar_drif_frame_buf *fbuf, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdr->queued_bufs_lock, flags);
+ list_for_each_entry_safe(fbuf, tmp, &sdr->queued_bufs, list) {
+ list_del(&fbuf->list);
+ vb2_buffer_done(&fbuf->vb.vb2_buf, state);
+ }
+ spin_unlock_irqrestore(&sdr->queued_bufs_lock, flags);
+}
+
+/* Set MDR defaults */
+static inline void rcar_drif_set_mdr1(struct rcar_drif_sdr *sdr)
+{
+ unsigned int i;
+
+ /* Set defaults for enabled internal channels */
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ /* Refer MSIOF section in manual for this register setting */
+ rcar_drif_write(sdr->ch[i], RCAR_DRIF_SITMDR1,
+ RCAR_DRIF_SITMDR1_PCON);
+
+ /* Setup MDR1 value */
+ rcar_drif_write(sdr->ch[i], RCAR_DRIF_SIRMDR1, sdr->mdr1);
+
+ rdrif_dbg(sdr, "ch%u: mdr1 = 0x%08x",
+ i, rcar_drif_read(sdr->ch[i], RCAR_DRIF_SIRMDR1));
+ }
+}
+
+/* Set DRIF receive format */
+static int rcar_drif_set_format(struct rcar_drif_sdr *sdr)
+{
+ unsigned int i;
+
+ rdrif_dbg(sdr, "setfmt: bitlen %u wdcnt %u num_ch %u\n",
+ sdr->fmt->bitlen, sdr->fmt->wdcnt, sdr->fmt->num_ch);
+
+ /* Sanity check */
+ if (sdr->fmt->num_ch > sdr->num_cur_ch) {
+ rdrif_err(sdr, "fmt num_ch %u cur_ch %u mismatch\n",
+ sdr->fmt->num_ch, sdr->num_cur_ch);
+ return -EINVAL;
+ }
+
+ /* Setup group, bitlen & wdcnt */
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ u32 mdr;
+
+ /* Two groups */
+ mdr = RCAR_DRIF_MDR_GRPCNT(2) |
+ RCAR_DRIF_MDR_BITLEN(sdr->fmt->bitlen) |
+ RCAR_DRIF_MDR_WDCNT(sdr->fmt->wdcnt);
+ rcar_drif_write(sdr->ch[i], RCAR_DRIF_SIRMDR2, mdr);
+
+ mdr = RCAR_DRIF_MDR_BITLEN(sdr->fmt->bitlen) |
+ RCAR_DRIF_MDR_WDCNT(sdr->fmt->wdcnt);
+ rcar_drif_write(sdr->ch[i], RCAR_DRIF_SIRMDR3, mdr);
+
+ rdrif_dbg(sdr, "ch%u: new mdr[2,3] = 0x%08x, 0x%08x\n",
+ i, rcar_drif_read(sdr->ch[i], RCAR_DRIF_SIRMDR2),
+ rcar_drif_read(sdr->ch[i], RCAR_DRIF_SIRMDR3));
+ }
+ return 0;
+}
+
+/* Release DMA buffers */
+static void rcar_drif_release_buf(struct rcar_drif_sdr *sdr)
+{
+ unsigned int i;
+
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ struct rcar_drif *ch = sdr->ch[i];
+
+ /* First entry contains the dma buf ptr */
+ if (ch->buf[0].addr) {
+ dma_free_coherent(&ch->pdev->dev,
+ sdr->hwbuf_size * RCAR_DRIF_NUM_HWBUFS,
+ ch->buf[0].addr, ch->dma_handle);
+ ch->buf[0].addr = NULL;
+ }
+ }
+}
+
+/* Request DMA buffers */
+static int rcar_drif_request_buf(struct rcar_drif_sdr *sdr)
+{
+ int ret = -ENOMEM;
+ unsigned int i, j;
+ void *addr;
+
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ struct rcar_drif *ch = sdr->ch[i];
+
+ /* Allocate DMA buffers */
+ addr = dma_alloc_coherent(&ch->pdev->dev,
+ sdr->hwbuf_size * RCAR_DRIF_NUM_HWBUFS,
+ &ch->dma_handle, GFP_KERNEL);
+ if (!addr) {
+ rdrif_err(sdr,
+ "ch%u: dma alloc failed. num hwbufs %u size %u\n",
+ i, RCAR_DRIF_NUM_HWBUFS, sdr->hwbuf_size);
+ goto error;
+ }
+
+ /* Split the chunk and populate bufctxt */
+ for (j = 0; j < RCAR_DRIF_NUM_HWBUFS; j++) {
+ ch->buf[j].addr = addr + (j * sdr->hwbuf_size);
+ ch->buf[j].status = 0;
+ }
+ }
+ return 0;
+error:
+ return ret;
+}
+
+/* Setup vb_queue minimum buffer requirements */
+static int rcar_drif_queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct rcar_drif_sdr *sdr = vb2_get_drv_priv(vq);
+
+ /* Need at least 16 buffers */
+ if (vq->num_buffers + *num_buffers < 16)
+ *num_buffers = 16 - vq->num_buffers;
+
+ *num_planes = 1;
+ sizes[0] = PAGE_ALIGN(sdr->fmt->buffersize);
+ rdrif_dbg(sdr, "num_bufs %d sizes[0] %d\n", *num_buffers, sizes[0]);
+
+ return 0;
+}
+
+/* Enqueue buffer */
+static void rcar_drif_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct rcar_drif_sdr *sdr = vb2_get_drv_priv(vb->vb2_queue);
+ struct rcar_drif_frame_buf *fbuf =
+ container_of(vbuf, struct rcar_drif_frame_buf, vb);
+ unsigned long flags;
+
+ rdrif_dbg(sdr, "buf_queue idx %u\n", vb->index);
+ spin_lock_irqsave(&sdr->queued_bufs_lock, flags);
+ list_add_tail(&fbuf->list, &sdr->queued_bufs);
+ spin_unlock_irqrestore(&sdr->queued_bufs_lock, flags);
+}
+
+/* Get a frame buf from list */
+static struct rcar_drif_frame_buf *
+rcar_drif_get_fbuf(struct rcar_drif_sdr *sdr)
+{
+ struct rcar_drif_frame_buf *fbuf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdr->queued_bufs_lock, flags);
+ fbuf = list_first_entry_or_null(&sdr->queued_bufs, struct
+ rcar_drif_frame_buf, list);
+ if (!fbuf) {
+ /*
+ * App is late in enqueing buffers. Samples lost & there will
+ * be a gap in sequence number when app recovers
+ */
+ rdrif_dbg(sdr, "\napp late: prod %u\n", sdr->produced);
+ spin_unlock_irqrestore(&sdr->queued_bufs_lock, flags);
+ return NULL;
+ }
+ list_del(&fbuf->list);
+ spin_unlock_irqrestore(&sdr->queued_bufs_lock, flags);
+
+ return fbuf;
+}
+
+/* Helpers to set/clear buf pair status */
+static inline bool rcar_drif_bufs_done(struct rcar_drif_hwbuf **buf)
+{
+ return (buf[0]->status & buf[1]->status & RCAR_DRIF_BUF_DONE);
+}
+
+static inline bool rcar_drif_bufs_overflow(struct rcar_drif_hwbuf **buf)
+{
+ return ((buf[0]->status | buf[1]->status) & RCAR_DRIF_BUF_OVERFLOW);
+}
+
+static inline void rcar_drif_bufs_clear(struct rcar_drif_hwbuf **buf,
+ unsigned int bit)
+{
+ unsigned int i;
+
+ for (i = 0; i < RCAR_DRIF_MAX_CHANNEL; i++)
+ buf[i]->status &= ~bit;
+}
+
+/* Channel DMA complete */
+static void rcar_drif_channel_complete(struct rcar_drif *ch, u32 idx)
+{
+ u32 str;
+
+ ch->buf[idx].status |= RCAR_DRIF_BUF_DONE;
+
+ /* Check for DRIF errors */
+ str = rcar_drif_read(ch, RCAR_DRIF_SISTR);
+ if (unlikely(str & RCAR_DRIF_RFOVF)) {
+ /* Writing the same clears it */
+ rcar_drif_write(ch, RCAR_DRIF_SISTR, str);
+
+ /* Overflow: some samples are lost */
+ ch->buf[idx].status |= RCAR_DRIF_BUF_OVERFLOW;
+ }
+}
+
+/* DMA callback for each stage */
+static void rcar_drif_dma_complete(void *dma_async_param)
+{
+ struct rcar_drif *ch = dma_async_param;
+ struct rcar_drif_sdr *sdr = ch->sdr;
+ struct rcar_drif_hwbuf *buf[RCAR_DRIF_MAX_CHANNEL];
+ struct rcar_drif_frame_buf *fbuf;
+ bool overflow = false;
+ u32 idx, produced;
+ unsigned int i;
+
+ spin_lock(&sdr->dma_lock);
+
+ /* DMA can be terminated while the callback was waiting on lock */
+ if (!vb2_is_streaming(&sdr->vb_queue)) {
+ spin_unlock(&sdr->dma_lock);
+ return;
+ }
+
+ idx = sdr->produced % RCAR_DRIF_NUM_HWBUFS;
+ rcar_drif_channel_complete(ch, idx);
+
+ if (sdr->num_cur_ch == RCAR_DRIF_MAX_CHANNEL) {
+ buf[0] = ch->num ? to_rcar_drif_buf_pair(sdr, ch->num, idx) :
+ &ch->buf[idx];
+ buf[1] = ch->num ? &ch->buf[idx] :
+ to_rcar_drif_buf_pair(sdr, ch->num, idx);
+
+ /* Check if both DMA buffers are done */
+ if (!rcar_drif_bufs_done(buf)) {
+ spin_unlock(&sdr->dma_lock);
+ return;
+ }
+
+ /* Clear buf done status */
+ rcar_drif_bufs_clear(buf, RCAR_DRIF_BUF_DONE);
+
+ if (rcar_drif_bufs_overflow(buf)) {
+ overflow = true;
+ /* Clear the flag in status */
+ rcar_drif_bufs_clear(buf, RCAR_DRIF_BUF_OVERFLOW);
+ }
+ } else {
+ buf[0] = &ch->buf[idx];
+ if (buf[0]->status & RCAR_DRIF_BUF_OVERFLOW) {
+ overflow = true;
+ /* Clear the flag in status */
+ buf[0]->status &= ~RCAR_DRIF_BUF_OVERFLOW;
+ }
+ }
+
+ /* Buffer produced for consumption */
+ produced = sdr->produced++;
+ spin_unlock(&sdr->dma_lock);
+
+ rdrif_dbg(sdr, "ch%u: prod %u\n", ch->num, produced);
+
+ /* Get fbuf */
+ fbuf = rcar_drif_get_fbuf(sdr);
+ if (!fbuf)
+ return;
+
+ for (i = 0; i < RCAR_DRIF_MAX_CHANNEL; i++)
+ memcpy(vb2_plane_vaddr(&fbuf->vb.vb2_buf, 0) +
+ i * sdr->hwbuf_size, buf[i]->addr, sdr->hwbuf_size);
+
+ fbuf->vb.field = V4L2_FIELD_NONE;
+ fbuf->vb.sequence = produced;
+ fbuf->vb.vb2_buf.timestamp = ktime_get_ns();
+ vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0, sdr->fmt->buffersize);
+
+ /* Set error state on overflow */
+ vb2_buffer_done(&fbuf->vb.vb2_buf,
+ overflow ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+}
+
+static int rcar_drif_qbuf(struct rcar_drif *ch)
+{
+ struct rcar_drif_sdr *sdr = ch->sdr;
+ dma_addr_t addr = ch->dma_handle;
+ struct dma_async_tx_descriptor *rxd;
+ dma_cookie_t cookie;
+ int ret = -EIO;
+
+ /* Setup cyclic DMA with given buffers */
+ rxd = dmaengine_prep_dma_cyclic(ch->dmach, addr,
+ sdr->hwbuf_size * RCAR_DRIF_NUM_HWBUFS,
+ sdr->hwbuf_size, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rxd) {
+ rdrif_err(sdr, "ch%u: prep dma cyclic failed\n", ch->num);
+ return ret;
+ }
+
+ /* Submit descriptor */
+ rxd->callback = rcar_drif_dma_complete;
+ rxd->callback_param = ch;
+ cookie = dmaengine_submit(rxd);
+ if (dma_submit_error(cookie)) {
+ rdrif_err(sdr, "ch%u: dma submit failed\n", ch->num);
+ return ret;
+ }
+
+ dma_async_issue_pending(ch->dmach);
+ return 0;
+}
+
+/* Enable reception */
+static int rcar_drif_enable_rx(struct rcar_drif_sdr *sdr)
+{
+ unsigned int i;
+ u32 ctr;
+ int ret;
+
+ /*
+ * When both internal channels are enabled, they can be synchronized
+ * only by the master
+ */
+
+ /* Enable receive */
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ ctr = rcar_drif_read(sdr->ch[i], RCAR_DRIF_SICTR);
+ ctr |= (RCAR_DRIF_SICTR_RX_RISING_EDGE |
+ RCAR_DRIF_SICTR_RX_EN);
+ rcar_drif_write(sdr->ch[i], RCAR_DRIF_SICTR, ctr);
+ }
+
+ /* Check receive enabled */
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ ret = readl_poll_timeout(sdr->ch[i]->base + RCAR_DRIF_SICTR,
+ ctr, ctr & RCAR_DRIF_SICTR_RX_EN, 7, 100000);
+ if (ret) {
+ rdrif_err(sdr, "ch%u: rx en failed. ctr 0x%08x\n", i,
+ rcar_drif_read(sdr->ch[i], RCAR_DRIF_SICTR));
+ break;
+ }
+ }
+ return ret;
+}
+
+/* Disable reception */
+static void rcar_drif_disable_rx(struct rcar_drif_sdr *sdr)
+{
+ unsigned int i;
+ u32 ctr;
+ int ret;
+
+ /* Disable receive */
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ ctr = rcar_drif_read(sdr->ch[i], RCAR_DRIF_SICTR);
+ ctr &= ~RCAR_DRIF_SICTR_RX_EN;
+ rcar_drif_write(sdr->ch[i], RCAR_DRIF_SICTR, ctr);
+ }
+
+ /* Check receive disabled */
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ ret = readl_poll_timeout(sdr->ch[i]->base + RCAR_DRIF_SICTR,
+ ctr, !(ctr & RCAR_DRIF_SICTR_RX_EN), 7, 100000);
+ if (ret)
+ dev_warn(&sdr->vdev->dev,
+ "ch%u: failed to disable rx. ctr 0x%08x\n",
+ i, rcar_drif_read(sdr->ch[i], RCAR_DRIF_SICTR));
+ }
+}
+
+/* Stop channel */
+static void rcar_drif_stop_channel(struct rcar_drif *ch)
+{
+ /* Disable DMA receive interrupt */
+ rcar_drif_write(ch, RCAR_DRIF_SIIER, 0x00000000);
+
+ /* Terminate all DMA transfers */
+ dmaengine_terminate_sync(ch->dmach);
+}
+
+/* Stop receive operation */
+static void rcar_drif_stop(struct rcar_drif_sdr *sdr)
+{
+ unsigned int i;
+
+ /* Disable Rx */
+ rcar_drif_disable_rx(sdr);
+
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask)
+ rcar_drif_stop_channel(sdr->ch[i]);
+}
+
+/* Start channel */
+static int rcar_drif_start_channel(struct rcar_drif *ch)
+{
+ struct rcar_drif_sdr *sdr = ch->sdr;
+ u32 ctr, str;
+ int ret;
+
+ /* Reset receive */
+ rcar_drif_write(ch, RCAR_DRIF_SICTR, RCAR_DRIF_SICTR_RESET);
+ ret = readl_poll_timeout(ch->base + RCAR_DRIF_SICTR, ctr,
+ !(ctr & RCAR_DRIF_SICTR_RESET), 7, 100000);
+ if (ret) {
+ rdrif_err(sdr, "ch%u: failed to reset rx. ctr 0x%08x\n",
+ ch->num, rcar_drif_read(ch, RCAR_DRIF_SICTR));
+ return ret;
+ }
+
+ /* Queue buffers for DMA */
+ ret = rcar_drif_qbuf(ch);
+ if (ret)
+ return ret;
+
+ /* Clear status register flags */
+ str = RCAR_DRIF_RFFUL | RCAR_DRIF_REOF | RCAR_DRIF_RFSERR |
+ RCAR_DRIF_RFUDF | RCAR_DRIF_RFOVF;
+ rcar_drif_write(ch, RCAR_DRIF_SISTR, str);
+
+ /* Enable DMA receive interrupt */
+ rcar_drif_write(ch, RCAR_DRIF_SIIER, 0x00009000);
+
+ return ret;
+}
+
+/* Start receive operation */
+static int rcar_drif_start(struct rcar_drif_sdr *sdr)
+{
+ unsigned long enabled = 0;
+ unsigned int i;
+ int ret;
+
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ ret = rcar_drif_start_channel(sdr->ch[i]);
+ if (ret)
+ goto start_error;
+ enabled |= BIT(i);
+ }
+
+ ret = rcar_drif_enable_rx(sdr);
+ if (ret)
+ goto enable_error;
+
+ sdr->produced = 0;
+ return ret;
+
+enable_error:
+ rcar_drif_disable_rx(sdr);
+start_error:
+ for_each_rcar_drif_channel(i, &enabled)
+ rcar_drif_stop_channel(sdr->ch[i]);
+
+ return ret;
+}
+
+/* Start streaming */
+static int rcar_drif_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct rcar_drif_sdr *sdr = vb2_get_drv_priv(vq);
+ unsigned long enabled = 0;
+ unsigned int i;
+ int ret;
+
+ mutex_lock(&sdr->v4l2_mutex);
+
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) {
+ ret = clk_prepare_enable(sdr->ch[i]->clk);
+ if (ret)
+ goto error;
+ enabled |= BIT(i);
+ }
+
+ /* Set default MDRx settings */
+ rcar_drif_set_mdr1(sdr);
+
+ /* Set new format */
+ ret = rcar_drif_set_format(sdr);
+ if (ret)
+ goto error;
+
+ if (sdr->num_cur_ch == RCAR_DRIF_MAX_CHANNEL)
+ sdr->hwbuf_size = sdr->fmt->buffersize / RCAR_DRIF_MAX_CHANNEL;
+ else
+ sdr->hwbuf_size = sdr->fmt->buffersize;
+
+ rdrif_dbg(sdr, "num hwbufs %u, hwbuf_size %u\n",
+ RCAR_DRIF_NUM_HWBUFS, sdr->hwbuf_size);
+
+ /* Alloc DMA channel */
+ ret = rcar_drif_alloc_dmachannels(sdr);
+ if (ret)
+ goto error;
+
+ /* Request buffers */
+ ret = rcar_drif_request_buf(sdr);
+ if (ret)
+ goto error;
+
+ /* Start Rx */
+ ret = rcar_drif_start(sdr);
+ if (ret)
+ goto error;
+
+ mutex_unlock(&sdr->v4l2_mutex);
+
+ return ret;
+
+error:
+ rcar_drif_release_queued_bufs(sdr, VB2_BUF_STATE_QUEUED);
+ rcar_drif_release_buf(sdr);
+ rcar_drif_release_dmachannels(sdr);
+ for_each_rcar_drif_channel(i, &enabled)
+ clk_disable_unprepare(sdr->ch[i]->clk);
+
+ mutex_unlock(&sdr->v4l2_mutex);
+
+ return ret;
+}
+
+/* Stop streaming */
+static void rcar_drif_stop_streaming(struct vb2_queue *vq)
+{
+ struct rcar_drif_sdr *sdr = vb2_get_drv_priv(vq);
+ unsigned int i;
+
+ mutex_lock(&sdr->v4l2_mutex);
+
+ /* Stop hardware streaming */
+ rcar_drif_stop(sdr);
+
+ /* Return all queued buffers to vb2 */
+ rcar_drif_release_queued_bufs(sdr, VB2_BUF_STATE_ERROR);
+
+ /* Release buf */
+ rcar_drif_release_buf(sdr);
+
+ /* Release DMA channel resources */
+ rcar_drif_release_dmachannels(sdr);
+
+ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask)
+ clk_disable_unprepare(sdr->ch[i]->clk);
+
+ mutex_unlock(&sdr->v4l2_mutex);
+}
+
+/* Vb2 ops */
+static const struct vb2_ops rcar_drif_vb2_ops = {
+ .queue_setup = rcar_drif_queue_setup,
+ .buf_queue = rcar_drif_buf_queue,
+ .start_streaming = rcar_drif_start_streaming,
+ .stop_streaming = rcar_drif_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int rcar_drif_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct rcar_drif_sdr *sdr = video_drvdata(file);
+
+ strlcpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
+ strlcpy(cap->card, sdr->vdev->name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ sdr->vdev->name);
+
+ return 0;
+}
+
+static int rcar_drif_set_default_format(struct rcar_drif_sdr *sdr)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ /* Matching fmt based on required channels is set as default */
+ if (sdr->num_hw_ch == formats[i].num_ch) {
+ sdr->fmt = &formats[i];
+ sdr->cur_ch_mask = sdr->hw_ch_mask;
+ sdr->num_cur_ch = sdr->num_hw_ch;
+ dev_dbg(sdr->dev, "default fmt[%u]: mask %lu num %u\n",
+ i, sdr->cur_ch_mask, sdr->num_cur_ch);
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int rcar_drif_enum_fmt_sdr_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index >= ARRAY_SIZE(formats))
+ return -EINVAL;
+
+ f->pixelformat = formats[f->index].pixelformat;
+
+ return 0;
+}
+
+static int rcar_drif_g_fmt_sdr_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rcar_drif_sdr *sdr = video_drvdata(file);
+
+ f->fmt.sdr.pixelformat = sdr->fmt->pixelformat;
+ f->fmt.sdr.buffersize = sdr->fmt->buffersize;
+
+ return 0;
+}
+
+static int rcar_drif_s_fmt_sdr_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct rcar_drif_sdr *sdr = video_drvdata(file);
+ struct vb2_queue *q = &sdr->vb_queue;
+ unsigned int i;
+
+ if (vb2_is_busy(q))
+ return -EBUSY;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (formats[i].pixelformat == f->fmt.sdr.pixelformat)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(formats))
+ i = 0; /* Set the 1st format as default on no match */
+
+ sdr->fmt = &formats[i];
+ f->fmt.sdr.pixelformat = sdr->fmt->pixelformat;
+ f->fmt.sdr.buffersize = formats[i].buffersize;
+ memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
+
+ /*
+ * If a format demands one channel only out of two
+ * enabled channels, pick the 0th channel.
+ */
+ if (formats[i].num_ch < sdr->num_hw_ch) {
+ sdr->cur_ch_mask = BIT(0);
+ sdr->num_cur_ch = formats[i].num_ch;
+ } else {
+ sdr->cur_ch_mask = sdr->hw_ch_mask;
+ sdr->num_cur_ch = sdr->num_hw_ch;
+ }
+
+ rdrif_dbg(sdr, "cur: idx %u mask %lu num %u\n",
+ i, sdr->cur_ch_mask, sdr->num_cur_ch);
+
+ return 0;
+}
+
+static int rcar_drif_try_fmt_sdr_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (formats[i].pixelformat == f->fmt.sdr.pixelformat) {
+ f->fmt.sdr.buffersize = formats[i].buffersize;
+ return 0;
+ }
+ }
+
+ f->fmt.sdr.pixelformat = formats[0].pixelformat;
+ f->fmt.sdr.buffersize = formats[0].buffersize;
+ memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
+
+ return 0;
+}
+
+/* Tuner subdev ioctls */
+static int rcar_drif_enum_freq_bands(struct file *file, void *priv,
+ struct v4l2_frequency_band *band)
+{
+ struct rcar_drif_sdr *sdr = video_drvdata(file);
+
+ return v4l2_subdev_call(sdr->ep.subdev, tuner, enum_freq_bands, band);
+}
+
+static int rcar_drif_g_frequency(struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct rcar_drif_sdr *sdr = video_drvdata(file);
+
+ return v4l2_subdev_call(sdr->ep.subdev, tuner, g_frequency, f);
+}
+
+static int rcar_drif_s_frequency(struct file *file, void *priv,
+ const struct v4l2_frequency *f)
+{
+ struct rcar_drif_sdr *sdr = video_drvdata(file);
+
+ return v4l2_subdev_call(sdr->ep.subdev, tuner, s_frequency, f);
+}
+
+static int rcar_drif_g_tuner(struct file *file, void *priv,
+ struct v4l2_tuner *vt)
+{
+ struct rcar_drif_sdr *sdr = video_drvdata(file);
+
+ return v4l2_subdev_call(sdr->ep.subdev, tuner, g_tuner, vt);
+}
+
+static int rcar_drif_s_tuner(struct file *file, void *priv,
+ const struct v4l2_tuner *vt)
+{
+ struct rcar_drif_sdr *sdr = video_drvdata(file);
+
+ return v4l2_subdev_call(sdr->ep.subdev, tuner, s_tuner, vt);
+}
+
+static const struct v4l2_ioctl_ops rcar_drif_ioctl_ops = {
+ .vidioc_querycap = rcar_drif_querycap,
+
+ .vidioc_enum_fmt_sdr_cap = rcar_drif_enum_fmt_sdr_cap,
+ .vidioc_g_fmt_sdr_cap = rcar_drif_g_fmt_sdr_cap,
+ .vidioc_s_fmt_sdr_cap = rcar_drif_s_fmt_sdr_cap,
+ .vidioc_try_fmt_sdr_cap = rcar_drif_try_fmt_sdr_cap,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_s_frequency = rcar_drif_s_frequency,
+ .vidioc_g_frequency = rcar_drif_g_frequency,
+ .vidioc_s_tuner = rcar_drif_s_tuner,
+ .vidioc_g_tuner = rcar_drif_g_tuner,
+ .vidioc_enum_freq_bands = rcar_drif_enum_freq_bands,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+};
+
+static const struct v4l2_file_operations rcar_drif_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static int rcar_drif_sdr_register(struct rcar_drif_sdr *sdr)
+{
+ int ret;
+
+ /* Init video_device structure */
+ sdr->vdev = video_device_alloc();
+ if (!sdr->vdev)
+ return -ENOMEM;
+
+ snprintf(sdr->vdev->name, sizeof(sdr->vdev->name), "R-Car DRIF");
+ sdr->vdev->fops = &rcar_drif_fops;
+ sdr->vdev->ioctl_ops = &rcar_drif_ioctl_ops;
+ sdr->vdev->release = video_device_release;
+ sdr->vdev->lock = &sdr->v4l2_mutex;
+ sdr->vdev->queue = &sdr->vb_queue;
+ sdr->vdev->queue->lock = &sdr->vb_queue_mutex;
+ sdr->vdev->ctrl_handler = &sdr->ctrl_hdl;
+ sdr->vdev->v4l2_dev = &sdr->v4l2_dev;
+ sdr->vdev->device_caps = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_TUNER |
+ V4L2_CAP_STREAMING | V4L2_CAP_READWRITE;
+ video_set_drvdata(sdr->vdev, sdr);
+
+ /* Register V4L2 SDR device */
+ ret = video_register_device(sdr->vdev, VFL_TYPE_SDR, -1);
+ if (ret) {
+ video_device_release(sdr->vdev);
+ sdr->vdev = NULL;
+ dev_err(sdr->dev, "failed video_register_device (%d)\n", ret);
+ }
+
+ return ret;
+}
+
+static void rcar_drif_sdr_unregister(struct rcar_drif_sdr *sdr)
+{
+ video_unregister_device(sdr->vdev);
+ sdr->vdev = NULL;
+}
+
+/* Sub-device bound callback */
+static int rcar_drif_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct rcar_drif_sdr *sdr =
+ container_of(notifier, struct rcar_drif_sdr, notifier);
+
+ if (sdr->ep.asd.match.fwnode.fwnode !=
+ of_fwnode_handle(subdev->dev->of_node)) {
+ rdrif_err(sdr, "subdev %s cannot bind\n", subdev->name);
+ return -EINVAL;
+ }
+
+ v4l2_set_subdev_hostdata(subdev, sdr);
+ sdr->ep.subdev = subdev;
+ rdrif_dbg(sdr, "bound asd %s\n", subdev->name);
+
+ return 0;
+}
+
+/* Sub-device unbind callback */
+static void rcar_drif_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct rcar_drif_sdr *sdr =
+ container_of(notifier, struct rcar_drif_sdr, notifier);
+
+ if (sdr->ep.subdev != subdev) {
+ rdrif_err(sdr, "subdev %s is not bound\n", subdev->name);
+ return;
+ }
+
+ /* Free ctrl handler if initialized */
+ v4l2_ctrl_handler_free(&sdr->ctrl_hdl);
+ sdr->v4l2_dev.ctrl_handler = NULL;
+ sdr->ep.subdev = NULL;
+
+ rcar_drif_sdr_unregister(sdr);
+ rdrif_dbg(sdr, "unbind asd %s\n", subdev->name);
+}
+
+/* Sub-device registered notification callback */
+static int rcar_drif_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct rcar_drif_sdr *sdr =
+ container_of(notifier, struct rcar_drif_sdr, notifier);
+ int ret;
+
+ /*
+ * The subdev tested at this point uses 4 controls. Using 10 as a worst
+ * case scenario hint. When less controls are needed there will be some
+ * unused memory and when more controls are needed the framework uses
+ * hash to manage controls within this number.
+ */
+ ret = v4l2_ctrl_handler_init(&sdr->ctrl_hdl, 10);
+ if (ret)
+ return -ENOMEM;
+
+ sdr->v4l2_dev.ctrl_handler = &sdr->ctrl_hdl;
+ ret = v4l2_device_register_subdev_nodes(&sdr->v4l2_dev);
+ if (ret) {
+ rdrif_err(sdr, "failed: register subdev nodes ret %d\n", ret);
+ goto error;
+ }
+
+ ret = v4l2_ctrl_add_handler(&sdr->ctrl_hdl,
+ sdr->ep.subdev->ctrl_handler, NULL);
+ if (ret) {
+ rdrif_err(sdr, "failed: ctrl add hdlr ret %d\n", ret);
+ goto error;
+ }
+
+ ret = rcar_drif_sdr_register(sdr);
+ if (ret)
+ goto error;
+
+ return ret;
+
+error:
+ v4l2_ctrl_handler_free(&sdr->ctrl_hdl);
+
+ return ret;
+}
+
+/* Read endpoint properties */
+static void rcar_drif_get_ep_properties(struct rcar_drif_sdr *sdr,
+ struct fwnode_handle *fwnode)
+{
+ u32 val;
+
+ /* Set the I2S defaults for SIRMDR1*/
+ sdr->mdr1 = RCAR_DRIF_SIRMDR1_SYNCMD_LR | RCAR_DRIF_SIRMDR1_MSB_FIRST |
+ RCAR_DRIF_SIRMDR1_DTDL_1 | RCAR_DRIF_SIRMDR1_SYNCDL_0;
+
+ /* Parse sync polarity from endpoint */
+ if (!fwnode_property_read_u32(fwnode, "sync-active", &val))
+ sdr->mdr1 |= val ? RCAR_DRIF_SIRMDR1_SYNCAC_POL_HIGH :
+ RCAR_DRIF_SIRMDR1_SYNCAC_POL_LOW;
+ else
+ sdr->mdr1 |= RCAR_DRIF_SIRMDR1_SYNCAC_POL_HIGH; /* default */
+
+ dev_dbg(sdr->dev, "mdr1 0x%08x\n", sdr->mdr1);
+}
+
+/* Parse sub-devs (tuner) to find a matching device */
+static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
+{
+ struct v4l2_async_notifier *notifier = &sdr->notifier;
+ struct fwnode_handle *fwnode, *ep;
+
+ notifier->subdevs = devm_kzalloc(sdr->dev, sizeof(*notifier->subdevs),
+ GFP_KERNEL);
+ if (!notifier->subdevs)
+ return -ENOMEM;
+
+ ep = fwnode_graph_get_next_endpoint(of_fwnode_handle(sdr->dev->of_node),
+ NULL);
+ if (!ep)
+ return 0;
+
+ notifier->subdevs[notifier->num_subdevs] = &sdr->ep.asd;
+ fwnode = fwnode_graph_get_remote_port_parent(ep);
+ if (!fwnode) {
+ dev_warn(sdr->dev, "bad remote port parent\n");
+ fwnode_handle_put(ep);
+ return -EINVAL;
+ }
+
+ sdr->ep.asd.match.fwnode.fwnode = fwnode;
+ sdr->ep.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ notifier->num_subdevs++;
+
+ /* Get the endpoint properties */
+ rcar_drif_get_ep_properties(sdr, ep);
+
+ fwnode_handle_put(fwnode);
+ fwnode_handle_put(ep);
+
+ return 0;
+}
+
+/* Check if the given device is the primary bond */
+static bool rcar_drif_primary_bond(struct platform_device *pdev)
+{
+ return of_property_read_bool(pdev->dev.of_node, "renesas,primary-bond");
+}
+
+/* Check if both devices of the bond are enabled */
+static struct device_node *rcar_drif_bond_enabled(struct platform_device *p)
+{
+ struct device_node *np;
+
+ np = of_parse_phandle(p->dev.of_node, "renesas,bonding", 0);
+ if (np && of_device_is_available(np))
+ return np;
+
+ return NULL;
+}
+
+/* Check if the bonded device is probed */
+static int rcar_drif_bond_available(struct rcar_drif_sdr *sdr,
+ struct device_node *np)
+{
+ struct platform_device *pdev;
+ struct rcar_drif *ch;
+ int ret = 0;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ dev_err(sdr->dev, "failed to get bonded device from node\n");
+ return -ENODEV;
+ }
+
+ device_lock(&pdev->dev);
+ ch = platform_get_drvdata(pdev);
+ if (ch) {
+ /* Update sdr data in the bonded device */
+ ch->sdr = sdr;
+
+ /* Update sdr with bonded device data */
+ sdr->ch[ch->num] = ch;
+ sdr->hw_ch_mask |= BIT(ch->num);
+ } else {
+ /* Defer */
+ dev_info(sdr->dev, "defer probe\n");
+ ret = -EPROBE_DEFER;
+ }
+ device_unlock(&pdev->dev);
+
+ put_device(&pdev->dev);
+
+ return ret;
+}
+
+/* V4L2 SDR device probe */
+static int rcar_drif_sdr_probe(struct rcar_drif_sdr *sdr)
+{
+ int ret;
+
+ /* Validate any supported format for enabled channels */
+ ret = rcar_drif_set_default_format(sdr);
+ if (ret) {
+ dev_err(sdr->dev, "failed to set default format\n");
+ return ret;
+ }
+
+ /* Set defaults */
+ sdr->hwbuf_size = RCAR_DRIF_DEFAULT_HWBUF_SIZE;
+
+ mutex_init(&sdr->v4l2_mutex);
+ mutex_init(&sdr->vb_queue_mutex);
+ spin_lock_init(&sdr->queued_bufs_lock);
+ spin_lock_init(&sdr->dma_lock);
+ INIT_LIST_HEAD(&sdr->queued_bufs);
+
+ /* Init videobuf2 queue structure */
+ sdr->vb_queue.type = V4L2_BUF_TYPE_SDR_CAPTURE;
+ sdr->vb_queue.io_modes = VB2_READ | VB2_MMAP | VB2_DMABUF;
+ sdr->vb_queue.drv_priv = sdr;
+ sdr->vb_queue.buf_struct_size = sizeof(struct rcar_drif_frame_buf);
+ sdr->vb_queue.ops = &rcar_drif_vb2_ops;
+ sdr->vb_queue.mem_ops = &vb2_vmalloc_memops;
+ sdr->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+
+ /* Init videobuf2 queue */
+ ret = vb2_queue_init(&sdr->vb_queue);
+ if (ret) {
+ dev_err(sdr->dev, "failed: vb2_queue_init ret %d\n", ret);
+ return ret;
+ }
+
+ /* Register the v4l2_device */
+ ret = v4l2_device_register(sdr->dev, &sdr->v4l2_dev);
+ if (ret) {
+ dev_err(sdr->dev, "failed: v4l2_device_register ret %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Parse subdevs after v4l2_device_register because if the subdev
+ * is already probed, bound and complete will be called immediately
+ */
+ ret = rcar_drif_parse_subdevs(sdr);
+ if (ret)
+ goto error;
+
+ sdr->notifier.bound = rcar_drif_notify_bound;
+ sdr->notifier.unbind = rcar_drif_notify_unbind;
+ sdr->notifier.complete = rcar_drif_notify_complete;
+
+ /* Register notifier */
+ ret = v4l2_async_notifier_register(&sdr->v4l2_dev, &sdr->notifier);
+ if (ret < 0) {
+ dev_err(sdr->dev, "failed: notifier register ret %d\n", ret);
+ goto error;
+ }
+
+ return ret;
+
+error:
+ v4l2_device_unregister(&sdr->v4l2_dev);
+
+ return ret;
+}
+
+/* V4L2 SDR device remove */
+static void rcar_drif_sdr_remove(struct rcar_drif_sdr *sdr)
+{
+ v4l2_async_notifier_unregister(&sdr->notifier);
+ v4l2_device_unregister(&sdr->v4l2_dev);
+}
+
+/* DRIF channel probe */
+static int rcar_drif_probe(struct platform_device *pdev)
+{
+ struct rcar_drif_sdr *sdr;
+ struct device_node *np;
+ struct rcar_drif *ch;
+ struct resource *res;
+ int ret;
+
+ /* Reserve memory for enabled channel */
+ ch = devm_kzalloc(&pdev->dev, sizeof(*ch), GFP_KERNEL);
+ if (!ch)
+ return -ENOMEM;
+
+ ch->pdev = pdev;
+
+ /* Module clock */
+ ch->clk = devm_clk_get(&pdev->dev, "fck");
+ if (IS_ERR(ch->clk)) {
+ ret = PTR_ERR(ch->clk);
+ dev_err(&pdev->dev, "clk get failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* Register map */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ch->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ch->base)) {
+ ret = PTR_ERR(ch->base);
+ dev_err(&pdev->dev, "ioremap failed (%d)\n", ret);
+ return ret;
+ }
+ ch->start = res->start;
+ platform_set_drvdata(pdev, ch);
+
+ /* Check if both channels of the bond are enabled */
+ np = rcar_drif_bond_enabled(pdev);
+ if (np) {
+ /* Check if current channel acting as primary-bond */
+ if (!rcar_drif_primary_bond(pdev)) {
+ ch->num = 1; /* Primary bond is channel 0 always */
+ of_node_put(np);
+ return 0;
+ }
+ }
+
+ /* Reserve memory for SDR structure */
+ sdr = devm_kzalloc(&pdev->dev, sizeof(*sdr), GFP_KERNEL);
+ if (!sdr) {
+ of_node_put(np);
+ return -ENOMEM;
+ }
+ ch->sdr = sdr;
+ sdr->dev = &pdev->dev;
+
+ /* Establish links between SDR and channel(s) */
+ sdr->ch[ch->num] = ch;
+ sdr->hw_ch_mask = BIT(ch->num);
+ if (np) {
+ /* Check if bonded device is ready */
+ ret = rcar_drif_bond_available(sdr, np);
+ of_node_put(np);
+ if (ret)
+ return ret;
+ }
+ sdr->num_hw_ch = hweight_long(sdr->hw_ch_mask);
+
+ return rcar_drif_sdr_probe(sdr);
+}
+
+/* DRIF channel remove */
+static int rcar_drif_remove(struct platform_device *pdev)
+{
+ struct rcar_drif *ch = platform_get_drvdata(pdev);
+ struct rcar_drif_sdr *sdr = ch->sdr;
+
+ /* Channel 0 will be the SDR instance */
+ if (ch->num)
+ return 0;
+
+ /* SDR instance */
+ rcar_drif_sdr_remove(sdr);
+
+ return 0;
+}
+
+/* FIXME: Implement suspend/resume support */
+static int __maybe_unused rcar_drif_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int __maybe_unused rcar_drif_resume(struct device *dev)
+{
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(rcar_drif_pm_ops, rcar_drif_suspend,
+ rcar_drif_resume);
+
+static const struct of_device_id rcar_drif_of_table[] = {
+ { .compatible = "renesas,rcar-gen3-drif" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rcar_drif_of_table);
+
+#define RCAR_DRIF_DRV_NAME "rcar_drif"
+static struct platform_driver rcar_drif_driver = {
+ .driver = {
+ .name = RCAR_DRIF_DRV_NAME,
+ .of_match_table = of_match_ptr(rcar_drif_of_table),
+ .pm = &rcar_drif_pm_ops,
+ },
+ .probe = rcar_drif_probe,
+ .remove = rcar_drif_remove,
+};
+
+module_platform_driver(rcar_drif_driver);
+
+MODULE_DESCRIPTION("Renesas R-Car Gen3 DRIF driver");
+MODULE_ALIAS("platform:" RCAR_DRIF_DRV_NAME);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Ramesh Shanmugasundaram <[email protected]>");
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
index 42f25d241edd..3ee51fc3bb50 100644
--- a/drivers/media/platform/rcar_fdp1.c
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -1,5 +1,5 @@
/*
- * Renesas RCar Fine Display Processor
+ * Renesas R-Car Fine Display Processor
*
* Video format converter and frame deinterlacer device.
*
@@ -258,8 +258,9 @@ MODULE_PARM_DESC(debug, "activate debug info");
/* Internal Data (HW Version) */
#define FD1_IP_INTDATA 0x0800
-#define FD1_IP_H3 0x02010101
+#define FD1_IP_H3_ES1 0x02010101
#define FD1_IP_M3W 0x02010202
+#define FD1_IP_H3 0x02010203
/* LUTs */
#define FD1_LUT_DIF_ADJ 0x1000
@@ -2359,12 +2360,15 @@ static int fdp1_probe(struct platform_device *pdev)
hw_version = fdp1_read(fdp1, FD1_IP_INTDATA);
switch (hw_version) {
- case FD1_IP_H3:
- dprintk(fdp1, "FDP1 Version R-Car H3\n");
+ case FD1_IP_H3_ES1:
+ dprintk(fdp1, "FDP1 Version R-Car H3 ES1\n");
break;
case FD1_IP_M3W:
dprintk(fdp1, "FDP1 Version R-Car M3-W\n");
break;
+ case FD1_IP_H3:
+ dprintk(fdp1, "FDP1 Version R-Car H3\n");
+ break;
default:
dev_err(fdp1->dev, "FDP1 Unidentifiable (0x%08x)\n",
hw_version);
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index 1b30be72f4f9..25c7a7d42292 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -80,7 +80,7 @@ static int s3c_camif_hw_init(struct camif_dev *camif, struct camif_vp *vp)
camif_hw_set_test_pattern(camif, camif->test_pattern);
if (variant->has_img_effect)
camif_hw_set_effect(camif, camif->colorfx,
- camif->colorfx_cb, camif->colorfx_cr);
+ camif->colorfx_cr, camif->colorfx_cb);
if (variant->ip_revision == S3C6410_CAMIF_IP_REV)
camif_hw_set_input_path(vp);
camif_cfg_video_path(vp);
@@ -364,7 +364,7 @@ irqreturn_t s3c_camif_irq_handler(int irq, void *priv)
camif_hw_set_test_pattern(camif, camif->test_pattern);
if (camif->variant->has_img_effect)
camif_hw_set_effect(camif, camif->colorfx,
- camif->colorfx_cb, camif->colorfx_cr);
+ camif->colorfx_cr, camif->colorfx_cb);
vp->state &= ~ST_VP_CONFIG;
}
unlock:
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.c b/drivers/media/platform/s5p-cec/s5p_cec.c
index 664937b61fa4..8e06071a7977 100644
--- a/drivers/media/platform/s5p-cec/s5p_cec.c
+++ b/drivers/media/platform/s5p-cec/s5p_cec.c
@@ -173,6 +173,7 @@ static int s5p_cec_probe(struct platform_device *pdev)
struct platform_device *hdmi_dev;
struct resource *res;
struct s5p_cec_dev *cec;
+ bool needs_hpd = of_property_read_bool(pdev->dev.of_node, "needs-hpd");
int ret;
np = of_parse_phandle(pdev->dev.of_node, "hdmi-phandle", 0);
@@ -221,7 +222,8 @@ static int s5p_cec_probe(struct platform_device *pdev)
cec->adap = cec_allocate_adapter(&s5p_cec_adap_ops, cec,
CEC_NAME,
CEC_CAP_LOG_ADDRS | CEC_CAP_TRANSMIT |
- CEC_CAP_PASSTHROUGH | CEC_CAP_RC, 1);
+ CEC_CAP_PASSTHROUGH | CEC_CAP_RC |
+ (needs_hpd ? CEC_CAP_NEEDS_HPD : 0), 1);
ret = PTR_ERR_OR_ZERO(cec->adap);
if (ret)
return ret;
@@ -235,7 +237,7 @@ static int s5p_cec_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, cec);
pm_runtime_enable(dev);
- dev_dbg(dev, "successfuly probed\n");
+ dev_dbg(dev, "successfully probed\n");
return 0;
err_delete_adapter:
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.h b/drivers/media/platform/s5p-cec/s5p_cec.h
index 7015845c1caa..8bcd8dc1aeb9 100644
--- a/drivers/media/platform/s5p-cec/s5p_cec.h
+++ b/drivers/media/platform/s5p-cec/s5p_cec.h
@@ -22,7 +22,6 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/timer.h>
-#include <linux/version.h>
#include <linux/workqueue.h>
#include <media/cec.h>
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 52dc7941db65..d1e3ebb22577 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -1099,10 +1099,10 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
struct s5p_jpeg_ctx *ctx)
{
int c, components = 0, notfound, n_dht = 0, n_dqt = 0;
- unsigned int height, width, word, subsampling = 0, sos = 0, sof = 0,
- sof_len = 0;
- unsigned int dht[S5P_JPEG_MAX_MARKER], dht_len[S5P_JPEG_MAX_MARKER],
- dqt[S5P_JPEG_MAX_MARKER], dqt_len[S5P_JPEG_MAX_MARKER];
+ unsigned int height = 0, width = 0, word, subsampling = 0;
+ unsigned int sos = 0, sof = 0, sof_len = 0;
+ unsigned int dht[S5P_JPEG_MAX_MARKER], dht_len[S5P_JPEG_MAX_MARKER];
+ unsigned int dqt[S5P_JPEG_MAX_MARKER], dqt_len[S5P_JPEG_MAX_MARKER];
long length;
struct s5p_jpeg_buffer jpeg_buffer;
@@ -2642,13 +2642,13 @@ static irqreturn_t s5p_jpeg_irq(int irq, void *dev_id)
if (curr_ctx->mode == S5P_JPEG_ENCODE)
vb2_set_plane_payload(&dst_buf->vb2_buf, 0, payload_size);
v4l2_m2m_buf_done(dst_buf, state);
- v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx);
curr_ctx->subsampling = s5p_jpeg_get_subsampling_mode(jpeg->regs);
spin_unlock(&jpeg->slock);
s5p_jpeg_clear_int(jpeg->regs);
+ v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx);
return IRQ_HANDLED;
}
@@ -2707,11 +2707,12 @@ static irqreturn_t exynos4_jpeg_irq(int irq, void *priv)
v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
}
- v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx);
if (jpeg->variant->version == SJPEG_EXYNOS4)
curr_ctx->subsampling = exynos4_jpeg_get_frame_fmt(jpeg->regs);
spin_unlock(&jpeg->slock);
+
+ v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx);
return IRQ_HANDLED;
}
@@ -2770,10 +2771,15 @@ static irqreturn_t exynos3250_jpeg_irq(int irq, void *dev_id)
if (curr_ctx->mode == S5P_JPEG_ENCODE)
vb2_set_plane_payload(&dst_buf->vb2_buf, 0, payload_size);
v4l2_m2m_buf_done(dst_buf, state);
- v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx);
curr_ctx->subsampling =
exynos3250_jpeg_get_subsampling_mode(jpeg->regs);
+
+ spin_unlock(&jpeg->slock);
+
+ v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx);
+ return IRQ_HANDLED;
+
exit_unlock:
spin_unlock(&jpeg->slock);
return IRQ_HANDLED;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
index b41ee608c171..0913881219ff 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
@@ -1293,7 +1293,7 @@ static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx)
* First set the output frame buffers
*/
if (ctx->capture_state != QUEUE_BUFS_MMAPED) {
- mfc_err("It seems that not all destionation buffers were mmaped\nMFC requires that all destination are mmaped before starting processing\n");
+ mfc_err("It seems that not all destination buffers were mmaped\nMFC requires that all destination are mmaped before starting processing\n");
return -EAGAIN;
}
if (list_empty(&ctx->src_queue)) {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index 85880e9106be..88dbb9c341ec 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -1650,7 +1650,7 @@ static inline int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx)
* s5p_mfc_alloc_dec_buffers(ctx); */
if (ctx->capture_state != QUEUE_BUFS_MMAPED) {
- mfc_err("It seems that not all destionation buffers were\n"
+ mfc_err("It seems that not all destination buffers were\n"
"mmaped.MFC requires that all destination are mmaped\n"
"before starting processing.\n");
return -EAGAIN;
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index 992d61a8b961..871da2a2a91c 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -229,6 +229,7 @@ static void sh_vou_stream_config(struct sh_vou_device *vou_dev)
break;
case V4L2_PIX_FMT_RGB565:
dataswap ^= 1;
+ /* fall through */
case V4L2_PIX_FMT_RGB565X:
row_coeff = 2;
break;
@@ -815,6 +816,7 @@ static u32 sh_vou_ntsc_mode(enum sh_vou_bus_fmt bus_fmt)
default:
pr_warn("%s(): Invalid bus-format code %d, using default 8-bit\n",
__func__, bus_fmt);
+ /* fall through */
case SH_VOU_BUS_8BIT:
return 1;
case SH_VOU_BUS_16BIT:
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index 3c9421f4d8e3..45a0429d75bb 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -23,6 +23,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
@@ -36,7 +37,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-dev.h>
-#include <media/v4l2-of.h>
+#include <media/v4l2-fwnode.h>
#include <media/videobuf2-v4l2.h>
/* Default to VGA resolution */
@@ -1512,8 +1513,8 @@ static int soc_of_bind(struct soc_camera_host *ici,
if (!info)
return -ENOMEM;
- info->sasd.asd.match.of.node = remote;
- info->sasd.asd.match_type = V4L2_ASYNC_MATCH_OF;
+ info->sasd.asd.match.fwnode.fwnode = of_fwnode_handle(remote);
+ info->sasd.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
info->subdev = &info->sasd.asd;
/* Or shall this be managed by the soc-camera device? */
diff --git a/drivers/media/platform/soc_camera/soc_mediabus.c b/drivers/media/platform/soc_camera/soc_mediabus.c
index e3e665e1c503..57581f626f4c 100644
--- a/drivers/media/platform/soc_camera/soc_mediabus.c
+++ b/drivers/media/platform/soc_camera/soc_mediabus.c
@@ -494,6 +494,7 @@ unsigned int soc_mbus_config_compatible(const struct v4l2_mbus_config *cfg,
V4L2_MBUS_HSYNC_ACTIVE_LOW);
vsync = common_flags & (V4L2_MBUS_VSYNC_ACTIVE_HIGH |
V4L2_MBUS_VSYNC_ACTIVE_LOW);
+ /* fall through */
case V4L2_MBUS_BT656:
pclk = common_flags & (V4L2_MBUS_PCLK_SAMPLE_RISING |
V4L2_MBUS_PCLK_SAMPLE_FALLING);
diff --git a/drivers/media/platform/sti/cec/stih-cec.c b/drivers/media/platform/sti/cec/stih-cec.c
index 39ff55145a82..dccbdaebb7a8 100644
--- a/drivers/media/platform/sti/cec/stih-cec.c
+++ b/drivers/media/platform/sti/cec/stih-cec.c
@@ -1,6 +1,6 @@
/*
* STIH4xx CEC driver
- * Copyright (C) STMicroelectronic SA 2016
+ * Copyright (C) STMicroelectronics SA 2016
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -213,7 +213,8 @@ static int stih_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
for (i = 0; i < msg->len; i++)
writeb(msg->msg[i], cec->regs + CEC_TX_DATA_BASE + i);
- /* Start transmission, configure hardware to add start and stop bits
+ /*
+ * Start transmission, configure hardware to add start and stop bits
* Signal free time is handled by the hardware
*/
writel(CEC_TX_AUTO_SOM_EN | CEC_TX_AUTO_EOM_EN | CEC_TX_START |
@@ -225,22 +226,21 @@ static int stih_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
static void stih_tx_done(struct stih_cec *cec, u32 status)
{
if (status & CEC_TX_ERROR) {
- cec_transmit_done(cec->adap, CEC_TX_STATUS_ERROR, 0, 0, 0, 1);
+ cec_transmit_attempt_done(cec->adap, CEC_TX_STATUS_ERROR);
return;
}
if (status & CEC_TX_ARB_ERROR) {
- cec_transmit_done(cec->adap,
- CEC_TX_STATUS_ARB_LOST, 1, 0, 0, 0);
+ cec_transmit_attempt_done(cec->adap, CEC_TX_STATUS_ARB_LOST);
return;
}
if (!(status & CEC_TX_ACK_GET_STS)) {
- cec_transmit_done(cec->adap, CEC_TX_STATUS_NACK, 0, 1, 0, 0);
+ cec_transmit_attempt_done(cec->adap, CEC_TX_STATUS_NACK);
return;
}
- cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
+ cec_transmit_attempt_done(cec->adap, CEC_TX_STATUS_OK);
}
static void stih_rx_done(struct stih_cec *cec, u32 status)
@@ -353,7 +353,7 @@ static int stih_cec_probe(struct platform_device *pdev)
cec->adap = cec_allocate_adapter(&sti_cec_adap_ops, cec,
CEC_NAME,
CEC_CAP_LOG_ADDRS | CEC_CAP_PASSTHROUGH |
- CEC_CAP_TRANSMIT, 1);
+ CEC_CAP_TRANSMIT, CEC_MAX_LOG_ADDRS);
ret = PTR_ERR_OR_ZERO(cec->adap);
if (ret)
return ret;
diff --git a/drivers/media/platform/stm32/Makefile b/drivers/media/platform/stm32/Makefile
new file mode 100644
index 000000000000..07355091376b
--- /dev/null
+++ b/drivers/media/platform/stm32/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_VIDEO_STM32_DCMI) += stm32-dcmi.o
+obj-$(CONFIG_VIDEO_STM32_HDMI_CEC) += stm32-cec.o
diff --git a/drivers/media/platform/stm32/stm32-cec.c b/drivers/media/platform/stm32/stm32-cec.c
new file mode 100644
index 000000000000..9ab896b01ee8
--- /dev/null
+++ b/drivers/media/platform/stm32/stm32-cec.c
@@ -0,0 +1,362 @@
+/*
+ * STM32 CEC driver
+ * Copyright (C) STMicroelectronics SA 2017
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <media/cec.h>
+
+#define CEC_NAME "stm32-cec"
+
+/* CEC registers */
+#define CEC_CR 0x0000 /* Control Register */
+#define CEC_CFGR 0x0004 /* ConFiGuration Register */
+#define CEC_TXDR 0x0008 /* Rx data Register */
+#define CEC_RXDR 0x000C /* Rx data Register */
+#define CEC_ISR 0x0010 /* Interrupt and status Register */
+#define CEC_IER 0x0014 /* Interrupt enable Register */
+
+#define TXEOM BIT(2)
+#define TXSOM BIT(1)
+#define CECEN BIT(0)
+
+#define LSTN BIT(31)
+#define OAR GENMASK(30, 16)
+#define SFTOP BIT(8)
+#define BRDNOGEN BIT(7)
+#define LBPEGEN BIT(6)
+#define BREGEN BIT(5)
+#define BRESTP BIT(4)
+#define RXTOL BIT(3)
+#define SFT GENMASK(2, 0)
+#define FULL_CFG (LSTN | SFTOP | BRDNOGEN | LBPEGEN | BREGEN | BRESTP \
+ | RXTOL)
+
+#define TXACKE BIT(12)
+#define TXERR BIT(11)
+#define TXUDR BIT(10)
+#define TXEND BIT(9)
+#define TXBR BIT(8)
+#define ARBLST BIT(7)
+#define RXACKE BIT(6)
+#define RXOVR BIT(2)
+#define RXEND BIT(1)
+#define RXBR BIT(0)
+
+#define ALL_TX_IT (TXEND | TXBR | TXACKE | TXERR | TXUDR | ARBLST)
+#define ALL_RX_IT (RXEND | RXBR | RXACKE | RXOVR)
+
+struct stm32_cec {
+ struct cec_adapter *adap;
+ struct device *dev;
+ struct clk *clk_cec;
+ struct clk *clk_hdmi_cec;
+ struct reset_control *rstc;
+ struct regmap *regmap;
+ int irq;
+ u32 irq_status;
+ struct cec_msg rx_msg;
+ struct cec_msg tx_msg;
+ int tx_cnt;
+};
+
+static void cec_hw_init(struct stm32_cec *cec)
+{
+ regmap_update_bits(cec->regmap, CEC_CR, TXEOM | TXSOM | CECEN, 0);
+
+ regmap_update_bits(cec->regmap, CEC_IER, ALL_TX_IT | ALL_RX_IT,
+ ALL_TX_IT | ALL_RX_IT);
+
+ regmap_update_bits(cec->regmap, CEC_CFGR, FULL_CFG, FULL_CFG);
+}
+
+static void stm32_tx_done(struct stm32_cec *cec, u32 status)
+{
+ if (status & (TXERR | TXUDR)) {
+ cec_transmit_done(cec->adap, CEC_TX_STATUS_ERROR,
+ 0, 0, 0, 1);
+ return;
+ }
+
+ if (status & ARBLST) {
+ cec_transmit_done(cec->adap, CEC_TX_STATUS_ARB_LOST,
+ 1, 0, 0, 0);
+ return;
+ }
+
+ if (status & TXACKE) {
+ cec_transmit_done(cec->adap, CEC_TX_STATUS_NACK,
+ 0, 1, 0, 0);
+ return;
+ }
+
+ if (cec->irq_status & TXBR) {
+ /* send next byte */
+ if (cec->tx_cnt < cec->tx_msg.len)
+ regmap_write(cec->regmap, CEC_TXDR,
+ cec->tx_msg.msg[cec->tx_cnt++]);
+
+ /* TXEOM is set to command transmission of the last byte */
+ if (cec->tx_cnt == cec->tx_msg.len)
+ regmap_update_bits(cec->regmap, CEC_CR, TXEOM, TXEOM);
+ }
+
+ if (cec->irq_status & TXEND)
+ cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
+}
+
+static void stm32_rx_done(struct stm32_cec *cec, u32 status)
+{
+ if (cec->irq_status & (RXACKE | RXOVR)) {
+ cec->rx_msg.len = 0;
+ return;
+ }
+
+ if (cec->irq_status & RXBR) {
+ u32 val;
+
+ regmap_read(cec->regmap, CEC_RXDR, &val);
+ cec->rx_msg.msg[cec->rx_msg.len++] = val & 0xFF;
+ }
+
+ if (cec->irq_status & RXEND) {
+ cec_received_msg(cec->adap, &cec->rx_msg);
+ cec->rx_msg.len = 0;
+ }
+}
+
+static irqreturn_t stm32_cec_irq_thread(int irq, void *arg)
+{
+ struct stm32_cec *cec = arg;
+
+ if (cec->irq_status & ALL_TX_IT)
+ stm32_tx_done(cec, cec->irq_status);
+
+ if (cec->irq_status & ALL_RX_IT)
+ stm32_rx_done(cec, cec->irq_status);
+
+ cec->irq_status = 0;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stm32_cec_irq_handler(int irq, void *arg)
+{
+ struct stm32_cec *cec = arg;
+
+ regmap_read(cec->regmap, CEC_ISR, &cec->irq_status);
+
+ regmap_update_bits(cec->regmap, CEC_ISR,
+ ALL_TX_IT | ALL_RX_IT,
+ ALL_TX_IT | ALL_RX_IT);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static int stm32_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct stm32_cec *cec = adap->priv;
+ int ret = 0;
+
+ if (enable) {
+ ret = clk_enable(cec->clk_cec);
+ if (ret)
+ dev_err(cec->dev, "fail to enable cec clock\n");
+
+ clk_enable(cec->clk_hdmi_cec);
+ regmap_update_bits(cec->regmap, CEC_CR, CECEN, CECEN);
+ } else {
+ clk_disable(cec->clk_cec);
+ clk_disable(cec->clk_hdmi_cec);
+ regmap_update_bits(cec->regmap, CEC_CR, CECEN, 0);
+ }
+
+ return ret;
+}
+
+static int stm32_cec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr)
+{
+ struct stm32_cec *cec = adap->priv;
+ u32 oar = (1 << logical_addr) << 16;
+
+ regmap_update_bits(cec->regmap, CEC_CR, CECEN, 0);
+
+ if (logical_addr == CEC_LOG_ADDR_INVALID)
+ regmap_update_bits(cec->regmap, CEC_CFGR, OAR, 0);
+ else
+ regmap_update_bits(cec->regmap, CEC_CFGR, oar, oar);
+
+ regmap_update_bits(cec->regmap, CEC_CR, CECEN, CECEN);
+
+ return 0;
+}
+
+static int stm32_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct stm32_cec *cec = adap->priv;
+
+ /* Copy message */
+ cec->tx_msg = *msg;
+ cec->tx_cnt = 0;
+
+ /*
+ * If the CEC message consists of only one byte,
+ * TXEOM must be set before of TXSOM.
+ */
+ if (cec->tx_msg.len == 1)
+ regmap_update_bits(cec->regmap, CEC_CR, TXEOM, TXEOM);
+
+ /* TXSOM is set to command transmission of the first byte */
+ regmap_update_bits(cec->regmap, CEC_CR, TXSOM, TXSOM);
+
+ /* Write the header (first byte of message) */
+ regmap_write(cec->regmap, CEC_TXDR, cec->tx_msg.msg[0]);
+ cec->tx_cnt++;
+
+ return 0;
+}
+
+static const struct cec_adap_ops stm32_cec_adap_ops = {
+ .adap_enable = stm32_cec_adap_enable,
+ .adap_log_addr = stm32_cec_adap_log_addr,
+ .adap_transmit = stm32_cec_adap_transmit,
+};
+
+static const struct regmap_config stm32_cec_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = sizeof(u32),
+ .max_register = 0x14,
+ .fast_io = true,
+};
+
+static int stm32_cec_probe(struct platform_device *pdev)
+{
+ u32 caps = CEC_CAP_LOG_ADDRS | CEC_CAP_PASSTHROUGH |
+ CEC_CAP_TRANSMIT | CEC_CAP_RC | CEC_CAP_PHYS_ADDR |
+ CEC_MODE_MONITOR_ALL;
+ struct resource *res;
+ struct stm32_cec *cec;
+ void __iomem *mmio;
+ int ret;
+
+ cec = devm_kzalloc(&pdev->dev, sizeof(*cec), GFP_KERNEL);
+ if (!cec)
+ return -ENOMEM;
+
+ cec->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mmio = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mmio))
+ return PTR_ERR(mmio);
+
+ cec->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "cec", mmio,
+ &stm32_cec_regmap_cfg);
+
+ if (IS_ERR(cec->regmap))
+ return PTR_ERR(cec->regmap);
+
+ cec->irq = platform_get_irq(pdev, 0);
+ if (cec->irq < 0)
+ return cec->irq;
+
+ ret = devm_request_threaded_irq(&pdev->dev, cec->irq,
+ stm32_cec_irq_handler,
+ stm32_cec_irq_thread,
+ 0,
+ pdev->name, cec);
+ if (ret)
+ return ret;
+
+ cec->clk_cec = devm_clk_get(&pdev->dev, "cec");
+ if (IS_ERR(cec->clk_cec)) {
+ dev_err(&pdev->dev, "Cannot get cec clock\n");
+ return PTR_ERR(cec->clk_cec);
+ }
+
+ ret = clk_prepare(cec->clk_cec);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to prepare cec clock\n");
+ return ret;
+ }
+
+ cec->clk_hdmi_cec = devm_clk_get(&pdev->dev, "hdmi-cec");
+ if (!IS_ERR(cec->clk_hdmi_cec)) {
+ ret = clk_prepare(cec->clk_hdmi_cec);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to prepare hdmi-cec clock\n");
+ return ret;
+ }
+ }
+
+ /*
+ * CEC_CAP_PHYS_ADDR caps should be removed when a cec notifier is
+ * available for example when a drm driver can provide edid
+ */
+ cec->adap = cec_allocate_adapter(&stm32_cec_adap_ops, cec,
+ CEC_NAME, caps, CEC_MAX_LOG_ADDRS);
+ ret = PTR_ERR_OR_ZERO(cec->adap);
+ if (ret)
+ return ret;
+
+ ret = cec_register_adapter(cec->adap, &pdev->dev);
+ if (ret) {
+ cec_delete_adapter(cec->adap);
+ return ret;
+ }
+
+ cec_hw_init(cec);
+
+ platform_set_drvdata(pdev, cec);
+
+ return 0;
+}
+
+static int stm32_cec_remove(struct platform_device *pdev)
+{
+ struct stm32_cec *cec = platform_get_drvdata(pdev);
+
+ clk_unprepare(cec->clk_cec);
+ clk_unprepare(cec->clk_hdmi_cec);
+
+ cec_unregister_adapter(cec->adap);
+
+ return 0;
+}
+
+static const struct of_device_id stm32_cec_of_match[] = {
+ { .compatible = "st,stm32-cec" },
+ { /* end node */ }
+};
+MODULE_DEVICE_TABLE(of, stm32_cec_of_match);
+
+static struct platform_driver stm32_cec_driver = {
+ .probe = stm32_cec_probe,
+ .remove = stm32_cec_remove,
+ .driver = {
+ .name = CEC_NAME,
+ .of_match_table = stm32_cec_of_match,
+ },
+};
+
+module_platform_driver(stm32_cec_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <[email protected]>");
+MODULE_AUTHOR("Yannick Fertre <[email protected]>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 Consumer Electronics Control");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
new file mode 100644
index 000000000000..83d32a5d0f40
--- /dev/null
+++ b/drivers/media/platform/stm32/stm32-dcmi.c
@@ -0,0 +1,1404 @@
+/*
+ * Driver for STM32 Digital Camera Memory Interface
+ *
+ * Copyright (C) STMicroelectronics SA 2017
+ * Authors: Yannick Fertre <[email protected]>
+ * Hugues Fruchet <[email protected]>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ *
+ * This driver is based on atmel_isi.c
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-image-sizes.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+
+#define DRV_NAME "stm32-dcmi"
+
+/* Registers offset for DCMI */
+#define DCMI_CR 0x00 /* Control Register */
+#define DCMI_SR 0x04 /* Status Register */
+#define DCMI_RIS 0x08 /* Raw Interrupt Status register */
+#define DCMI_IER 0x0C /* Interrupt Enable Register */
+#define DCMI_MIS 0x10 /* Masked Interrupt Status register */
+#define DCMI_ICR 0x14 /* Interrupt Clear Register */
+#define DCMI_ESCR 0x18 /* Embedded Synchronization Code Register */
+#define DCMI_ESUR 0x1C /* Embedded Synchronization Unmask Register */
+#define DCMI_CWSTRT 0x20 /* Crop Window STaRT */
+#define DCMI_CWSIZE 0x24 /* Crop Window SIZE */
+#define DCMI_DR 0x28 /* Data Register */
+#define DCMI_IDR 0x2C /* IDentifier Register */
+
+/* Bits definition for control register (DCMI_CR) */
+#define CR_CAPTURE BIT(0)
+#define CR_CM BIT(1)
+#define CR_CROP BIT(2)
+#define CR_JPEG BIT(3)
+#define CR_ESS BIT(4)
+#define CR_PCKPOL BIT(5)
+#define CR_HSPOL BIT(6)
+#define CR_VSPOL BIT(7)
+#define CR_FCRC_0 BIT(8)
+#define CR_FCRC_1 BIT(9)
+#define CR_EDM_0 BIT(10)
+#define CR_EDM_1 BIT(11)
+#define CR_ENABLE BIT(14)
+
+/* Bits definition for status register (DCMI_SR) */
+#define SR_HSYNC BIT(0)
+#define SR_VSYNC BIT(1)
+#define SR_FNE BIT(2)
+
+/*
+ * Bits definition for interrupt registers
+ * (DCMI_RIS, DCMI_IER, DCMI_MIS, DCMI_ICR)
+ */
+#define IT_FRAME BIT(0)
+#define IT_OVR BIT(1)
+#define IT_ERR BIT(2)
+#define IT_VSYNC BIT(3)
+#define IT_LINE BIT(4)
+
+enum state {
+ STOPPED = 0,
+ RUNNING,
+ STOPPING,
+};
+
+#define MIN_WIDTH 16U
+#define MAX_WIDTH 2048U
+#define MIN_HEIGHT 16U
+#define MAX_HEIGHT 2048U
+
+#define TIMEOUT_MS 1000
+
+struct dcmi_graph_entity {
+ struct device_node *node;
+
+ struct v4l2_async_subdev asd;
+ struct v4l2_subdev *subdev;
+};
+
+struct dcmi_format {
+ u32 fourcc;
+ u32 mbus_code;
+ u8 bpp;
+};
+
+struct dcmi_buf {
+ struct vb2_v4l2_buffer vb;
+ bool prepared;
+ dma_addr_t paddr;
+ size_t size;
+ struct list_head list;
+};
+
+struct stm32_dcmi {
+ /* Protects the access of variables shared within the interrupt */
+ spinlock_t irqlock;
+ struct device *dev;
+ void __iomem *regs;
+ struct resource *res;
+ struct reset_control *rstc;
+ int sequence;
+ struct list_head buffers;
+ struct dcmi_buf *active;
+
+ struct v4l2_device v4l2_dev;
+ struct video_device *vdev;
+ struct v4l2_async_notifier notifier;
+ struct dcmi_graph_entity entity;
+ struct v4l2_format fmt;
+
+ const struct dcmi_format **user_formats;
+ unsigned int num_user_formats;
+ const struct dcmi_format *current_fmt;
+
+ /* Protect this data structure */
+ struct mutex lock;
+ struct vb2_queue queue;
+
+ struct v4l2_fwnode_bus_parallel bus;
+ struct completion complete;
+ struct clk *mclk;
+ enum state state;
+ struct dma_chan *dma_chan;
+ dma_cookie_t dma_cookie;
+ u32 misr;
+ int errors_count;
+ int buffers_count;
+};
+
+static inline struct stm32_dcmi *notifier_to_dcmi(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct stm32_dcmi, notifier);
+}
+
+static inline u32 reg_read(void __iomem *base, u32 reg)
+{
+ return readl_relaxed(base + reg);
+}
+
+static inline void reg_write(void __iomem *base, u32 reg, u32 val)
+{
+ writel_relaxed(val, base + reg);
+}
+
+static inline void reg_set(void __iomem *base, u32 reg, u32 mask)
+{
+ reg_write(base, reg, reg_read(base, reg) | mask);
+}
+
+static inline void reg_clear(void __iomem *base, u32 reg, u32 mask)
+{
+ reg_write(base, reg, reg_read(base, reg) & ~mask);
+}
+
+static int dcmi_start_capture(struct stm32_dcmi *dcmi);
+
+static void dcmi_dma_callback(void *param)
+{
+ struct stm32_dcmi *dcmi = (struct stm32_dcmi *)param;
+ struct dma_chan *chan = dcmi->dma_chan;
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ spin_lock(&dcmi->irqlock);
+
+ /* Check DMA status */
+ status = dmaengine_tx_status(chan, dcmi->dma_cookie, &state);
+
+ switch (status) {
+ case DMA_IN_PROGRESS:
+ dev_dbg(dcmi->dev, "%s: Received DMA_IN_PROGRESS\n", __func__);
+ break;
+ case DMA_PAUSED:
+ dev_err(dcmi->dev, "%s: Received DMA_PAUSED\n", __func__);
+ break;
+ case DMA_ERROR:
+ dev_err(dcmi->dev, "%s: Received DMA_ERROR\n", __func__);
+ break;
+ case DMA_COMPLETE:
+ dev_dbg(dcmi->dev, "%s: Received DMA_COMPLETE\n", __func__);
+
+ if (dcmi->active) {
+ struct dcmi_buf *buf = dcmi->active;
+ struct vb2_v4l2_buffer *vbuf = &dcmi->active->vb;
+
+ vbuf->sequence = dcmi->sequence++;
+ vbuf->field = V4L2_FIELD_NONE;
+ vbuf->vb2_buf.timestamp = ktime_get_ns();
+ vb2_set_plane_payload(&vbuf->vb2_buf, 0, buf->size);
+ vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
+ dev_dbg(dcmi->dev, "buffer[%d] done seq=%d\n",
+ vbuf->vb2_buf.index, vbuf->sequence);
+
+ dcmi->buffers_count++;
+ dcmi->active = NULL;
+ }
+
+ /* Restart a new DMA transfer with next buffer */
+ if (dcmi->state == RUNNING) {
+ if (list_empty(&dcmi->buffers)) {
+ dev_err(dcmi->dev, "%s: No more buffer queued, cannot capture buffer",
+ __func__);
+ dcmi->errors_count++;
+ dcmi->active = NULL;
+
+ spin_unlock(&dcmi->irqlock);
+ return;
+ }
+
+ dcmi->active = list_entry(dcmi->buffers.next,
+ struct dcmi_buf, list);
+
+ list_del_init(&dcmi->active->list);
+
+ if (dcmi_start_capture(dcmi)) {
+ dev_err(dcmi->dev, "%s: Cannot restart capture on DMA complete",
+ __func__);
+
+ spin_unlock(&dcmi->irqlock);
+ return;
+ }
+
+ /* Enable capture */
+ reg_set(dcmi->regs, DCMI_CR, CR_CAPTURE);
+ }
+
+ break;
+ default:
+ dev_err(dcmi->dev, "%s: Received unknown status\n", __func__);
+ break;
+ }
+
+ spin_unlock(&dcmi->irqlock);
+}
+
+static int dcmi_start_dma(struct stm32_dcmi *dcmi,
+ struct dcmi_buf *buf)
+{
+ struct dma_async_tx_descriptor *desc = NULL;
+ struct dma_slave_config config;
+ int ret;
+
+ memset(&config, 0, sizeof(config));
+
+ config.src_addr = (dma_addr_t)dcmi->res->start + DCMI_DR;
+ config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ config.dst_maxburst = 4;
+
+ /* Configure DMA channel */
+ ret = dmaengine_slave_config(dcmi->dma_chan, &config);
+ if (ret < 0) {
+ dev_err(dcmi->dev, "%s: DMA channel config failed (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* Prepare a DMA transaction */
+ desc = dmaengine_prep_slave_single(dcmi->dma_chan, buf->paddr,
+ buf->size,
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dcmi->dev, "%s: DMA dmaengine_prep_slave_single failed for buffer size %zu\n",
+ __func__, buf->size);
+ return -EINVAL;
+ }
+
+ /* Set completion callback routine for notification */
+ desc->callback = dcmi_dma_callback;
+ desc->callback_param = dcmi;
+
+ /* Push current DMA transaction in the pending queue */
+ dcmi->dma_cookie = dmaengine_submit(desc);
+
+ dma_async_issue_pending(dcmi->dma_chan);
+
+ return 0;
+}
+
+static int dcmi_start_capture(struct stm32_dcmi *dcmi)
+{
+ int ret;
+ struct dcmi_buf *buf = dcmi->active;
+
+ if (!buf)
+ return -EINVAL;
+
+ ret = dcmi_start_dma(dcmi, buf);
+ if (ret) {
+ dcmi->errors_count++;
+ return ret;
+ }
+
+ /* Enable capture */
+ reg_set(dcmi->regs, DCMI_CR, CR_CAPTURE);
+
+ return 0;
+}
+
+static irqreturn_t dcmi_irq_thread(int irq, void *arg)
+{
+ struct stm32_dcmi *dcmi = arg;
+
+ spin_lock(&dcmi->irqlock);
+
+ /* Stop capture is required */
+ if (dcmi->state == STOPPING) {
+ reg_clear(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR);
+
+ dcmi->state = STOPPED;
+
+ complete(&dcmi->complete);
+
+ spin_unlock(&dcmi->irqlock);
+ return IRQ_HANDLED;
+ }
+
+ if ((dcmi->misr & IT_OVR) || (dcmi->misr & IT_ERR)) {
+ /*
+ * An overflow or an error has been detected,
+ * stop current DMA transfert & restart it
+ */
+ dev_warn(dcmi->dev, "%s: Overflow or error detected\n",
+ __func__);
+
+ dcmi->errors_count++;
+ dmaengine_terminate_all(dcmi->dma_chan);
+
+ reg_set(dcmi->regs, DCMI_ICR, IT_FRAME | IT_OVR | IT_ERR);
+
+ dev_dbg(dcmi->dev, "Restarting capture after DCMI error\n");
+
+ if (dcmi_start_capture(dcmi)) {
+ dev_err(dcmi->dev, "%s: Cannot restart capture on overflow or error\n",
+ __func__);
+
+ spin_unlock(&dcmi->irqlock);
+ return IRQ_HANDLED;
+ }
+ }
+
+ spin_unlock(&dcmi->irqlock);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dcmi_irq_callback(int irq, void *arg)
+{
+ struct stm32_dcmi *dcmi = arg;
+
+ spin_lock(&dcmi->irqlock);
+
+ dcmi->misr = reg_read(dcmi->regs, DCMI_MIS);
+
+ /* Clear interrupt */
+ reg_set(dcmi->regs, DCMI_ICR, IT_FRAME | IT_OVR | IT_ERR);
+
+ spin_unlock(&dcmi->irqlock);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static int dcmi_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers,
+ unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
+ unsigned int size;
+
+ size = dcmi->fmt.fmt.pix.sizeimage;
+
+ /* Make sure the image size is large enough */
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ dcmi->active = NULL;
+
+ dev_dbg(dcmi->dev, "Setup queue, count=%d, size=%d\n",
+ *nbuffers, size);
+
+ return 0;
+}
+
+static int dcmi_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb);
+
+ INIT_LIST_HEAD(&buf->list);
+
+ return 0;
+}
+
+static int dcmi_buf_prepare(struct vb2_buffer *vb)
+{
+ struct stm32_dcmi *dcmi = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb);
+ unsigned long size;
+
+ size = dcmi->fmt.fmt.pix.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_err(dcmi->dev, "%s data will not fit into plane (%lu < %lu)\n",
+ __func__, vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+
+ if (!buf->prepared) {
+ /* Get memory addresses */
+ buf->paddr =
+ vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
+ buf->size = vb2_plane_size(&buf->vb.vb2_buf, 0);
+ buf->prepared = true;
+
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
+
+ dev_dbg(dcmi->dev, "buffer[%d] phy=0x%pad size=%zu\n",
+ vb->index, &buf->paddr, buf->size);
+ }
+
+ return 0;
+}
+
+static void dcmi_buf_queue(struct vb2_buffer *vb)
+{
+ struct stm32_dcmi *dcmi = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct dcmi_buf *buf = container_of(vbuf, struct dcmi_buf, vb);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&dcmi->irqlock, flags);
+
+ if ((dcmi->state == RUNNING) && (!dcmi->active)) {
+ dcmi->active = buf;
+
+ dev_dbg(dcmi->dev, "Starting capture on buffer[%d] queued\n",
+ buf->vb.vb2_buf.index);
+
+ if (dcmi_start_capture(dcmi)) {
+ dev_err(dcmi->dev, "%s: Cannot restart capture on overflow or error\n",
+ __func__);
+
+ spin_unlock_irqrestore(&dcmi->irqlock, flags);
+ return;
+ }
+ } else {
+ /* Enqueue to video buffers list */
+ list_add_tail(&buf->list, &dcmi->buffers);
+ }
+
+ spin_unlock_irqrestore(&dcmi->irqlock, flags);
+}
+
+static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
+ struct dcmi_buf *buf, *node;
+ u32 val;
+ int ret;
+
+ ret = clk_enable(dcmi->mclk);
+ if (ret) {
+ dev_err(dcmi->dev, "%s: Failed to start streaming, cannot enable clock",
+ __func__);
+ goto err_release_buffers;
+ }
+
+ /* Enable stream on the sub device */
+ ret = v4l2_subdev_call(dcmi->entity.subdev, video, s_stream, 1);
+ if (ret && ret != -ENOIOCTLCMD) {
+ dev_err(dcmi->dev, "%s: Failed to start streaming, subdev streamon error",
+ __func__);
+ goto err_disable_clock;
+ }
+
+ spin_lock_irq(&dcmi->irqlock);
+
+ val = reg_read(dcmi->regs, DCMI_CR);
+
+ val &= ~(CR_PCKPOL | CR_HSPOL | CR_VSPOL |
+ CR_EDM_0 | CR_EDM_1 | CR_FCRC_0 |
+ CR_FCRC_1 | CR_JPEG | CR_ESS);
+
+ /* Set bus width */
+ switch (dcmi->bus.bus_width) {
+ case 14:
+ val &= CR_EDM_0 + CR_EDM_1;
+ break;
+ case 12:
+ val &= CR_EDM_1;
+ break;
+ case 10:
+ val &= CR_EDM_0;
+ break;
+ default:
+ /* Set bus width to 8 bits by default */
+ break;
+ }
+
+ /* Set vertical synchronization polarity */
+ if (dcmi->bus.flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
+ val |= CR_VSPOL;
+
+ /* Set horizontal synchronization polarity */
+ if (dcmi->bus.flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
+ val |= CR_HSPOL;
+
+ /* Set pixel clock polarity */
+ if (dcmi->bus.flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
+ val |= CR_PCKPOL;
+
+ reg_write(dcmi->regs, DCMI_CR, val);
+
+ /* Enable dcmi */
+ reg_set(dcmi->regs, DCMI_CR, CR_ENABLE);
+
+ dcmi->state = RUNNING;
+
+ dcmi->sequence = 0;
+ dcmi->errors_count = 0;
+ dcmi->buffers_count = 0;
+ dcmi->active = NULL;
+
+ /*
+ * Start transfer if at least one buffer has been queued,
+ * otherwise transfer is deferred at buffer queueing
+ */
+ if (list_empty(&dcmi->buffers)) {
+ dev_dbg(dcmi->dev, "Start streaming is deferred to next buffer queueing\n");
+ spin_unlock_irq(&dcmi->irqlock);
+ return 0;
+ }
+
+ dcmi->active = list_entry(dcmi->buffers.next, struct dcmi_buf, list);
+ list_del_init(&dcmi->active->list);
+
+ dev_dbg(dcmi->dev, "Start streaming, starting capture\n");
+
+ ret = dcmi_start_capture(dcmi);
+ if (ret) {
+ dev_err(dcmi->dev, "%s: Start streaming failed, cannot start capture",
+ __func__);
+
+ spin_unlock_irq(&dcmi->irqlock);
+ goto err_subdev_streamoff;
+ }
+
+ /* Enable interruptions */
+ reg_set(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR);
+
+ spin_unlock_irq(&dcmi->irqlock);
+
+ return 0;
+
+err_subdev_streamoff:
+ v4l2_subdev_call(dcmi->entity.subdev, video, s_stream, 0);
+
+err_disable_clock:
+ clk_disable(dcmi->mclk);
+
+err_release_buffers:
+ spin_lock_irq(&dcmi->irqlock);
+ /*
+ * Return all buffers to vb2 in QUEUED state.
+ * This will give ownership back to userspace
+ */
+ if (dcmi->active) {
+ buf = dcmi->active;
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
+ dcmi->active = NULL;
+ }
+ list_for_each_entry_safe(buf, node, &dcmi->buffers, list) {
+ list_del_init(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
+ }
+ spin_unlock_irq(&dcmi->irqlock);
+
+ return ret;
+}
+
+static void dcmi_stop_streaming(struct vb2_queue *vq)
+{
+ struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
+ struct dcmi_buf *buf, *node;
+ unsigned long time_ms = msecs_to_jiffies(TIMEOUT_MS);
+ long timeout;
+ int ret;
+
+ /* Disable stream on the sub device */
+ ret = v4l2_subdev_call(dcmi->entity.subdev, video, s_stream, 0);
+ if (ret && ret != -ENOIOCTLCMD)
+ dev_err(dcmi->dev, "stream off failed in subdev\n");
+
+ dcmi->state = STOPPING;
+
+ timeout = wait_for_completion_interruptible_timeout(&dcmi->complete,
+ time_ms);
+
+ spin_lock_irq(&dcmi->irqlock);
+
+ /* Disable interruptions */
+ reg_clear(dcmi->regs, DCMI_IER, IT_FRAME | IT_OVR | IT_ERR);
+
+ /* Disable DCMI */
+ reg_clear(dcmi->regs, DCMI_CR, CR_ENABLE);
+
+ if (!timeout) {
+ dev_err(dcmi->dev, "Timeout during stop streaming\n");
+ dcmi->state = STOPPED;
+ }
+
+ /* Return all queued buffers to vb2 in ERROR state */
+ if (dcmi->active) {
+ buf = dcmi->active;
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ dcmi->active = NULL;
+ }
+ list_for_each_entry_safe(buf, node, &dcmi->buffers, list) {
+ list_del_init(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+
+ spin_unlock_irq(&dcmi->irqlock);
+
+ /* Stop all pending DMA operations */
+ dmaengine_terminate_all(dcmi->dma_chan);
+
+ clk_disable(dcmi->mclk);
+
+ dev_dbg(dcmi->dev, "Stop streaming, errors=%d buffers=%d\n",
+ dcmi->errors_count, dcmi->buffers_count);
+}
+
+static struct vb2_ops dcmi_video_qops = {
+ .queue_setup = dcmi_queue_setup,
+ .buf_init = dcmi_buf_init,
+ .buf_prepare = dcmi_buf_prepare,
+ .buf_queue = dcmi_buf_queue,
+ .start_streaming = dcmi_start_streaming,
+ .stop_streaming = dcmi_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int dcmi_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+
+ *fmt = dcmi->fmt;
+
+ return 0;
+}
+
+static const struct dcmi_format *find_format_by_fourcc(struct stm32_dcmi *dcmi,
+ unsigned int fourcc)
+{
+ unsigned int num_formats = dcmi->num_user_formats;
+ const struct dcmi_format *fmt;
+ unsigned int i;
+
+ for (i = 0; i < num_formats; i++) {
+ fmt = dcmi->user_formats[i];
+ if (fmt->fourcc == fourcc)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f,
+ const struct dcmi_format **current_fmt)
+{
+ const struct dcmi_format *dcmi_fmt;
+ struct v4l2_pix_format *pixfmt = &f->fmt.pix;
+ struct v4l2_subdev_pad_config pad_cfg;
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ };
+ int ret;
+
+ dcmi_fmt = find_format_by_fourcc(dcmi, pixfmt->pixelformat);
+ if (!dcmi_fmt) {
+ dcmi_fmt = dcmi->user_formats[dcmi->num_user_formats - 1];
+ pixfmt->pixelformat = dcmi_fmt->fourcc;
+ }
+
+ /* Limit to hardware capabilities */
+ pixfmt->width = clamp(pixfmt->width, MIN_WIDTH, MAX_WIDTH);
+ pixfmt->height = clamp(pixfmt->height, MIN_HEIGHT, MAX_HEIGHT);
+
+ v4l2_fill_mbus_format(&format.format, pixfmt, dcmi_fmt->mbus_code);
+ ret = v4l2_subdev_call(dcmi->entity.subdev, pad, set_fmt,
+ &pad_cfg, &format);
+ if (ret < 0)
+ return ret;
+
+ v4l2_fill_pix_format(pixfmt, &format.format);
+
+ pixfmt->field = V4L2_FIELD_NONE;
+ pixfmt->bytesperline = pixfmt->width * dcmi_fmt->bpp;
+ pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
+
+ if (current_fmt)
+ *current_fmt = dcmi_fmt;
+
+ return 0;
+}
+
+static int dcmi_set_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f)
+{
+ struct v4l2_subdev_format format = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ const struct dcmi_format *current_fmt;
+ int ret;
+
+ ret = dcmi_try_fmt(dcmi, f, &current_fmt);
+ if (ret)
+ return ret;
+
+ v4l2_fill_mbus_format(&format.format, &f->fmt.pix,
+ current_fmt->mbus_code);
+ ret = v4l2_subdev_call(dcmi->entity.subdev, pad,
+ set_fmt, NULL, &format);
+ if (ret < 0)
+ return ret;
+
+ dcmi->fmt = *f;
+ dcmi->current_fmt = current_fmt;
+
+ return 0;
+}
+
+static int dcmi_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+
+ if (vb2_is_streaming(&dcmi->queue))
+ return -EBUSY;
+
+ return dcmi_set_fmt(dcmi, f);
+}
+
+static int dcmi_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+
+ return dcmi_try_fmt(dcmi, f, NULL);
+}
+
+static int dcmi_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+
+ if (f->index >= dcmi->num_user_formats)
+ return -EINVAL;
+
+ f->pixelformat = dcmi->user_formats[f->index]->fourcc;
+ return 0;
+}
+
+static int dcmi_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strlcpy(cap->driver, DRV_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, "STM32 Camera Memory Interface",
+ sizeof(cap->card));
+ strlcpy(cap->bus_info, "platform:dcmi", sizeof(cap->bus_info));
+ return 0;
+}
+
+static int dcmi_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ if (i->index != 0)
+ return -EINVAL;
+
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+ strlcpy(i->name, "Camera", sizeof(i->name));
+ return 0;
+}
+
+static int dcmi_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ *i = 0;
+ return 0;
+}
+
+static int dcmi_s_input(struct file *file, void *priv, unsigned int i)
+{
+ if (i > 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int dcmi_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+ const struct dcmi_format *dcmi_fmt;
+ struct v4l2_subdev_frame_size_enum fse = {
+ .index = fsize->index,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ dcmi_fmt = find_format_by_fourcc(dcmi, fsize->pixel_format);
+ if (!dcmi_fmt)
+ return -EINVAL;
+
+ fse.code = dcmi_fmt->mbus_code;
+
+ ret = v4l2_subdev_call(dcmi->entity.subdev, pad, enum_frame_size,
+ NULL, &fse);
+ if (ret)
+ return ret;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = fse.max_width;
+ fsize->discrete.height = fse.max_height;
+
+ return 0;
+}
+
+static int dcmi_enum_frameintervals(struct file *file, void *fh,
+ struct v4l2_frmivalenum *fival)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+ const struct dcmi_format *dcmi_fmt;
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .index = fival->index,
+ .width = fival->width,
+ .height = fival->height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ dcmi_fmt = find_format_by_fourcc(dcmi, fival->pixel_format);
+ if (!dcmi_fmt)
+ return -EINVAL;
+
+ fie.code = dcmi_fmt->mbus_code;
+
+ ret = v4l2_subdev_call(dcmi->entity.subdev, pad,
+ enum_frame_interval, NULL, &fie);
+ if (ret)
+ return ret;
+
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete = fie.interval;
+
+ return 0;
+}
+
+static const struct of_device_id stm32_dcmi_of_match[] = {
+ { .compatible = "st,stm32-dcmi"},
+ { /* end node */ },
+};
+MODULE_DEVICE_TABLE(of, stm32_dcmi_of_match);
+
+static int dcmi_open(struct file *file)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+ struct v4l2_subdev *sd = dcmi->entity.subdev;
+ int ret;
+
+ if (mutex_lock_interruptible(&dcmi->lock))
+ return -ERESTARTSYS;
+
+ ret = v4l2_fh_open(file);
+ if (ret < 0)
+ goto unlock;
+
+ if (!v4l2_fh_is_singular_file(file))
+ goto fh_rel;
+
+ ret = v4l2_subdev_call(sd, core, s_power, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ goto fh_rel;
+
+ ret = dcmi_set_fmt(dcmi, &dcmi->fmt);
+ if (ret)
+ v4l2_subdev_call(sd, core, s_power, 0);
+fh_rel:
+ if (ret)
+ v4l2_fh_release(file);
+unlock:
+ mutex_unlock(&dcmi->lock);
+ return ret;
+}
+
+static int dcmi_release(struct file *file)
+{
+ struct stm32_dcmi *dcmi = video_drvdata(file);
+ struct v4l2_subdev *sd = dcmi->entity.subdev;
+ bool fh_singular;
+ int ret;
+
+ mutex_lock(&dcmi->lock);
+
+ fh_singular = v4l2_fh_is_singular_file(file);
+
+ ret = _vb2_fop_release(file, NULL);
+
+ if (fh_singular)
+ v4l2_subdev_call(sd, core, s_power, 0);
+
+ mutex_unlock(&dcmi->lock);
+
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops dcmi_ioctl_ops = {
+ .vidioc_querycap = dcmi_querycap,
+
+ .vidioc_try_fmt_vid_cap = dcmi_try_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = dcmi_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = dcmi_s_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = dcmi_enum_fmt_vid_cap,
+
+ .vidioc_enum_input = dcmi_enum_input,
+ .vidioc_g_input = dcmi_g_input,
+ .vidioc_s_input = dcmi_s_input,
+
+ .vidioc_enum_framesizes = dcmi_enum_framesizes,
+ .vidioc_enum_frameintervals = dcmi_enum_frameintervals,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_file_operations dcmi_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = dcmi_open,
+ .release = dcmi_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+#ifndef CONFIG_MMU
+ .get_unmapped_area = vb2_fop_get_unmapped_area,
+#endif
+ .read = vb2_fop_read,
+};
+
+static int dcmi_set_default_fmt(struct stm32_dcmi *dcmi)
+{
+ struct v4l2_format f = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .fmt.pix = {
+ .width = CIF_WIDTH,
+ .height = CIF_HEIGHT,
+ .field = V4L2_FIELD_NONE,
+ .pixelformat = dcmi->user_formats[0]->fourcc,
+ },
+ };
+ int ret;
+
+ ret = dcmi_try_fmt(dcmi, &f, NULL);
+ if (ret)
+ return ret;
+ dcmi->current_fmt = dcmi->user_formats[0];
+ dcmi->fmt = f;
+ return 0;
+}
+
+static const struct dcmi_format dcmi_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .bpp = 2,
+ }, {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .bpp = 2,
+ },
+};
+
+static int dcmi_formats_init(struct stm32_dcmi *dcmi)
+{
+ const struct dcmi_format *dcmi_fmts[ARRAY_SIZE(dcmi_formats)];
+ unsigned int num_fmts = 0, i, j;
+ struct v4l2_subdev *subdev = dcmi->entity.subdev;
+ struct v4l2_subdev_mbus_code_enum mbus_code = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+
+ while (!v4l2_subdev_call(subdev, pad, enum_mbus_code,
+ NULL, &mbus_code)) {
+ for (i = 0; i < ARRAY_SIZE(dcmi_formats); i++) {
+ if (dcmi_formats[i].mbus_code != mbus_code.code)
+ continue;
+
+ /* Code supported, have we got this fourcc yet? */
+ for (j = 0; j < num_fmts; j++)
+ if (dcmi_fmts[j]->fourcc ==
+ dcmi_formats[i].fourcc)
+ /* Already available */
+ break;
+ if (j == num_fmts)
+ /* New */
+ dcmi_fmts[num_fmts++] = dcmi_formats + i;
+ }
+ mbus_code.index++;
+ }
+
+ if (!num_fmts)
+ return -ENXIO;
+
+ dcmi->num_user_formats = num_fmts;
+ dcmi->user_formats = devm_kcalloc(dcmi->dev,
+ num_fmts, sizeof(struct dcmi_format *),
+ GFP_KERNEL);
+ if (!dcmi->user_formats) {
+ dev_err(dcmi->dev, "could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ memcpy(dcmi->user_formats, dcmi_fmts,
+ num_fmts * sizeof(struct dcmi_format *));
+ dcmi->current_fmt = dcmi->user_formats[0];
+
+ return 0;
+}
+
+static int dcmi_graph_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
+ int ret;
+
+ dcmi->vdev->ctrl_handler = dcmi->entity.subdev->ctrl_handler;
+ ret = dcmi_formats_init(dcmi);
+ if (ret) {
+ dev_err(dcmi->dev, "No supported mediabus format found\n");
+ return ret;
+ }
+
+ ret = dcmi_set_default_fmt(dcmi);
+ if (ret) {
+ dev_err(dcmi->dev, "Could not set default format\n");
+ return ret;
+ }
+
+ ret = video_register_device(dcmi->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ dev_err(dcmi->dev, "Failed to register video device\n");
+ return ret;
+ }
+
+ dev_dbg(dcmi->dev, "Device registered as %s\n",
+ video_device_node_name(dcmi->vdev));
+ return 0;
+}
+
+static void dcmi_graph_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
+
+ dev_dbg(dcmi->dev, "Removing %s\n", video_device_node_name(dcmi->vdev));
+
+ /* Checks internaly if vdev has been init or not */
+ video_unregister_device(dcmi->vdev);
+}
+
+static int dcmi_graph_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
+
+ dev_dbg(dcmi->dev, "Subdev %s bound\n", subdev->name);
+
+ dcmi->entity.subdev = subdev;
+
+ return 0;
+}
+
+static int dcmi_graph_parse(struct stm32_dcmi *dcmi, struct device_node *node)
+{
+ struct device_node *ep = NULL;
+ struct device_node *remote;
+
+ while (1) {
+ ep = of_graph_get_next_endpoint(node, ep);
+ if (!ep)
+ return -EINVAL;
+
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote) {
+ of_node_put(ep);
+ return -EINVAL;
+ }
+
+ /* Remote node to connect */
+ dcmi->entity.node = remote;
+ dcmi->entity.asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ dcmi->entity.asd.match.fwnode.fwnode = of_fwnode_handle(remote);
+ return 0;
+ }
+}
+
+static int dcmi_graph_init(struct stm32_dcmi *dcmi)
+{
+ struct v4l2_async_subdev **subdevs = NULL;
+ int ret;
+
+ /* Parse the graph to extract a list of subdevice DT nodes. */
+ ret = dcmi_graph_parse(dcmi, dcmi->dev->of_node);
+ if (ret < 0) {
+ dev_err(dcmi->dev, "Graph parsing failed\n");
+ return ret;
+ }
+
+ /* Register the subdevices notifier. */
+ subdevs = devm_kzalloc(dcmi->dev, sizeof(*subdevs), GFP_KERNEL);
+ if (!subdevs) {
+ of_node_put(dcmi->entity.node);
+ return -ENOMEM;
+ }
+
+ subdevs[0] = &dcmi->entity.asd;
+
+ dcmi->notifier.subdevs = subdevs;
+ dcmi->notifier.num_subdevs = 1;
+ dcmi->notifier.bound = dcmi_graph_notify_bound;
+ dcmi->notifier.unbind = dcmi_graph_notify_unbind;
+ dcmi->notifier.complete = dcmi_graph_notify_complete;
+
+ ret = v4l2_async_notifier_register(&dcmi->v4l2_dev, &dcmi->notifier);
+ if (ret < 0) {
+ dev_err(dcmi->dev, "Notifier registration failed\n");
+ of_node_put(dcmi->entity.node);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dcmi_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match = NULL;
+ struct v4l2_fwnode_endpoint ep;
+ struct stm32_dcmi *dcmi;
+ struct vb2_queue *q;
+ struct dma_chan *chan;
+ struct clk *mclk;
+ int irq;
+ int ret = 0;
+
+ match = of_match_device(of_match_ptr(stm32_dcmi_of_match), &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Could not find a match in devicetree\n");
+ return -ENODEV;
+ }
+
+ dcmi = devm_kzalloc(&pdev->dev, sizeof(struct stm32_dcmi), GFP_KERNEL);
+ if (!dcmi)
+ return -ENOMEM;
+
+ dcmi->rstc = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(dcmi->rstc)) {
+ dev_err(&pdev->dev, "Could not get reset control\n");
+ return -ENODEV;
+ }
+
+ /* Get bus characteristics from devicetree */
+ np = of_graph_get_next_endpoint(np, NULL);
+ if (!np) {
+ dev_err(&pdev->dev, "Could not find the endpoint\n");
+ of_node_put(np);
+ return -ENODEV;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(np), &ep);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not parse the endpoint\n");
+ of_node_put(np);
+ return -ENODEV;
+ }
+
+ if (ep.bus_type == V4L2_MBUS_CSI2) {
+ dev_err(&pdev->dev, "CSI bus not supported\n");
+ of_node_put(np);
+ return -ENODEV;
+ }
+ dcmi->bus.flags = ep.bus.parallel.flags;
+ dcmi->bus.bus_width = ep.bus.parallel.bus_width;
+ dcmi->bus.data_shift = ep.bus.parallel.data_shift;
+
+ of_node_put(np);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "Could not get irq\n");
+ return -ENODEV;
+ }
+
+ dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!dcmi->res) {
+ dev_err(&pdev->dev, "Could not get resource\n");
+ return -ENODEV;
+ }
+
+ dcmi->regs = devm_ioremap_resource(&pdev->dev, dcmi->res);
+ if (IS_ERR(dcmi->regs)) {
+ dev_err(&pdev->dev, "Could not map registers\n");
+ return PTR_ERR(dcmi->regs);
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, dcmi_irq_callback,
+ dcmi_irq_thread, IRQF_ONESHOT,
+ dev_name(&pdev->dev), dcmi);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
+ return -ENODEV;
+ }
+
+ mclk = devm_clk_get(&pdev->dev, "mclk");
+ if (IS_ERR(mclk)) {
+ dev_err(&pdev->dev, "Unable to get mclk\n");
+ return PTR_ERR(mclk);
+ }
+
+ chan = dma_request_slave_channel(&pdev->dev, "tx");
+ if (!chan) {
+ dev_info(&pdev->dev, "Unable to request DMA channel, defer probing\n");
+ return -EPROBE_DEFER;
+ }
+
+ ret = clk_prepare(mclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to prepare mclk %p\n", mclk);
+ goto err_dma_release;
+ }
+
+ spin_lock_init(&dcmi->irqlock);
+ mutex_init(&dcmi->lock);
+ init_completion(&dcmi->complete);
+ INIT_LIST_HEAD(&dcmi->buffers);
+
+ dcmi->dev = &pdev->dev;
+ dcmi->mclk = mclk;
+ dcmi->state = STOPPED;
+ dcmi->dma_chan = chan;
+
+ q = &dcmi->queue;
+
+ /* Initialize the top-level structure */
+ ret = v4l2_device_register(&pdev->dev, &dcmi->v4l2_dev);
+ if (ret)
+ goto err_clk_unprepare;
+
+ dcmi->vdev = video_device_alloc();
+ if (!dcmi->vdev) {
+ ret = -ENOMEM;
+ goto err_device_unregister;
+ }
+
+ /* Video node */
+ dcmi->vdev->fops = &dcmi_fops;
+ dcmi->vdev->v4l2_dev = &dcmi->v4l2_dev;
+ dcmi->vdev->queue = &dcmi->queue;
+ strlcpy(dcmi->vdev->name, KBUILD_MODNAME, sizeof(dcmi->vdev->name));
+ dcmi->vdev->release = video_device_release;
+ dcmi->vdev->ioctl_ops = &dcmi_ioctl_ops;
+ dcmi->vdev->lock = &dcmi->lock;
+ dcmi->vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+ video_set_drvdata(dcmi->vdev, dcmi);
+
+ /* Buffer queue */
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF;
+ q->lock = &dcmi->lock;
+ q->drv_priv = dcmi;
+ q->buf_struct_size = sizeof(struct dcmi_buf);
+ q->ops = &dcmi_video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->min_buffers_needed = 2;
+ q->dev = &pdev->dev;
+
+ ret = vb2_queue_init(q);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to initialize vb2 queue\n");
+ goto err_device_release;
+ }
+
+ ret = dcmi_graph_init(dcmi);
+ if (ret < 0)
+ goto err_device_release;
+
+ /* Reset device */
+ ret = reset_control_assert(dcmi->rstc);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to assert the reset line\n");
+ goto err_device_release;
+ }
+
+ usleep_range(3000, 5000);
+
+ ret = reset_control_deassert(dcmi->rstc);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to deassert the reset line\n");
+ goto err_device_release;
+ }
+
+ dev_info(&pdev->dev, "Probe done\n");
+
+ platform_set_drvdata(pdev, dcmi);
+ return 0;
+
+err_device_release:
+ video_device_release(dcmi->vdev);
+err_device_unregister:
+ v4l2_device_unregister(&dcmi->v4l2_dev);
+err_clk_unprepare:
+ clk_unprepare(dcmi->mclk);
+err_dma_release:
+ dma_release_channel(dcmi->dma_chan);
+
+ return ret;
+}
+
+static int dcmi_remove(struct platform_device *pdev)
+{
+ struct stm32_dcmi *dcmi = platform_get_drvdata(pdev);
+
+ v4l2_async_notifier_unregister(&dcmi->notifier);
+ v4l2_device_unregister(&dcmi->v4l2_dev);
+ clk_unprepare(dcmi->mclk);
+ dma_release_channel(dcmi->dma_chan);
+
+ return 0;
+}
+
+static struct platform_driver stm32_dcmi_driver = {
+ .probe = dcmi_probe,
+ .remove = dcmi_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(stm32_dcmi_of_match),
+ },
+};
+
+module_platform_driver(stm32_dcmi_driver);
+
+MODULE_AUTHOR("Yannick Fertre <[email protected]>");
+MODULE_AUTHOR("Hugues Fruchet <[email protected]>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 Digital Camera Memory Interface driver");
+MODULE_LICENSE("GPL");
+MODULE_SUPPORTED_DEVICE("video");
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
index 7a058b6e03d0..177faa36bc16 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -21,7 +21,7 @@
#include <linux/of_device.h>
#include <linux/of_graph.h>
-#include <media/v4l2-of.h>
+#include <media/v4l2-fwnode.h>
#include <media/v4l2-async.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
@@ -270,7 +270,7 @@ struct cal_ctx {
struct video_device vdev;
struct v4l2_async_notifier notifier;
struct v4l2_subdev *sensor;
- struct v4l2_of_endpoint endpoint;
+ struct v4l2_fwnode_endpoint endpoint;
struct v4l2_async_subdev asd;
struct v4l2_async_subdev *asd_list[1];
@@ -608,7 +608,8 @@ static void csi2_lane_config(struct cal_ctx *ctx)
u32 val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
u32 lane_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK;
u32 polarity_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK;
- struct v4l2_of_bus_mipi_csi2 *mipi_csi2 = &ctx->endpoint.bus.mipi_csi2;
+ struct v4l2_fwnode_bus_mipi_csi2 *mipi_csi2 =
+ &ctx->endpoint.bus.mipi_csi2;
int lane;
set_field(&val, mipi_csi2->clock_lane + 1, lane_mask);
@@ -1643,7 +1644,7 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
struct platform_device *pdev = ctx->dev->pdev;
struct device_node *ep_node, *port, *remote_ep,
*sensor_node, *parent;
- struct v4l2_of_endpoint *endpoint;
+ struct v4l2_fwnode_endpoint *endpoint;
struct v4l2_async_subdev *asd;
u32 regval = 0;
int ret, index, found_port = 0, lane;
@@ -1698,15 +1699,15 @@ static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
ctx_dbg(3, ctx, "can't get remote parent\n");
goto cleanup_exit;
}
- asd->match_type = V4L2_ASYNC_MATCH_OF;
- asd->match.of.node = sensor_node;
+ asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
+ asd->match.fwnode.fwnode = of_fwnode_handle(sensor_node);
remote_ep = of_parse_phandle(ep_node, "remote-endpoint", 0);
if (!remote_ep) {
ctx_dbg(3, ctx, "can't get remote-endpoint\n");
goto cleanup_exit;
}
- v4l2_of_parse_endpoint(remote_ep, endpoint);
+ v4l2_fwnode_endpoint_parse(of_fwnode_handle(remote_ep), endpoint);
if (endpoint->bus_type != V4L2_MBUS_CSI2) {
ctx_err(ctx, "Port:%d sub-device %s is not a CSI2 device\n",
diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c
new file mode 100644
index 000000000000..665744716f73
--- /dev/null
+++ b/drivers/media/platform/video-mux.c
@@ -0,0 +1,334 @@
+/*
+ * video stream multiplexer controlled via mux control
+ *
+ * Copyright (C) 2013 Pengutronix, Sascha Hauer <[email protected]>
+ * Copyright (C) 2016-2017 Pengutronix, Philipp Zabel <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+struct video_mux {
+ struct v4l2_subdev subdev;
+ struct media_pad *pads;
+ struct v4l2_mbus_framefmt *format_mbus;
+ struct regmap_field *field;
+ struct mutex lock;
+ int active;
+};
+
+static inline struct video_mux *v4l2_subdev_to_video_mux(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct video_mux, subdev);
+}
+
+static int video_mux_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct video_mux *vmux = v4l2_subdev_to_video_mux(sd);
+ int ret = 0;
+
+ /*
+ * The mux state is determined by the enabled sink pad link.
+ * Enabling or disabling the source pad link has no effect.
+ */
+ if (local->flags & MEDIA_PAD_FL_SOURCE)
+ return 0;
+
+ dev_dbg(sd->dev, "link setup '%s':%d->'%s':%d[%d]",
+ remote->entity->name, remote->index, local->entity->name,
+ local->index, flags & MEDIA_LNK_FL_ENABLED);
+
+ mutex_lock(&vmux->lock);
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (vmux->active == local->index)
+ goto out;
+
+ if (vmux->active >= 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ dev_dbg(sd->dev, "setting %d active\n", local->index);
+ ret = regmap_field_write(vmux->field, local->index);
+ if (ret < 0)
+ goto out;
+ vmux->active = local->index;
+ } else {
+ if (vmux->active != local->index)
+ goto out;
+
+ dev_dbg(sd->dev, "going inactive\n");
+ vmux->active = -1;
+ }
+
+out:
+ mutex_unlock(&vmux->lock);
+ return ret;
+}
+
+static const struct media_entity_operations video_mux_ops = {
+ .link_setup = video_mux_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int video_mux_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct video_mux *vmux = v4l2_subdev_to_video_mux(sd);
+ struct v4l2_subdev *upstream_sd;
+ struct media_pad *pad;
+
+ if (vmux->active == -1) {
+ dev_err(sd->dev, "Can not start streaming on inactive mux\n");
+ return -EINVAL;
+ }
+
+ pad = media_entity_remote_pad(&sd->entity.pads[vmux->active]);
+ if (!pad) {
+ dev_err(sd->dev, "Failed to find remote source pad\n");
+ return -ENOLINK;
+ }
+
+ if (!is_media_entity_v4l2_subdev(pad->entity)) {
+ dev_err(sd->dev, "Upstream entity is not a v4l2 subdev\n");
+ return -ENODEV;
+ }
+
+ upstream_sd = media_entity_to_v4l2_subdev(pad->entity);
+
+ return v4l2_subdev_call(upstream_sd, video, s_stream, enable);
+}
+
+static const struct v4l2_subdev_video_ops video_mux_subdev_video_ops = {
+ .s_stream = video_mux_s_stream,
+};
+
+static struct v4l2_mbus_framefmt *
+__video_mux_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, u32 which)
+{
+ struct video_mux *vmux = v4l2_subdev_to_video_mux(sd);
+
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(sd, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &vmux->format_mbus[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int video_mux_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct video_mux *vmux = v4l2_subdev_to_video_mux(sd);
+
+ mutex_lock(&vmux->lock);
+
+ sdformat->format = *__video_mux_get_pad_format(sd, cfg, sdformat->pad,
+ sdformat->which);
+
+ mutex_unlock(&vmux->lock);
+
+ return 0;
+}
+
+static int video_mux_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct video_mux *vmux = v4l2_subdev_to_video_mux(sd);
+ struct v4l2_mbus_framefmt *mbusformat;
+ struct media_pad *pad = &vmux->pads[sdformat->pad];
+
+ mbusformat = __video_mux_get_pad_format(sd, cfg, sdformat->pad,
+ sdformat->which);
+ if (!mbusformat)
+ return -EINVAL;
+
+ mutex_lock(&vmux->lock);
+
+ /* Source pad mirrors active sink pad, no limitations on sink pads */
+ if ((pad->flags & MEDIA_PAD_FL_SOURCE) && vmux->active >= 0)
+ sdformat->format = vmux->format_mbus[vmux->active];
+
+ *mbusformat = sdformat->format;
+
+ mutex_unlock(&vmux->lock);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops video_mux_pad_ops = {
+ .get_fmt = video_mux_get_format,
+ .set_fmt = video_mux_set_format,
+};
+
+static const struct v4l2_subdev_ops video_mux_subdev_ops = {
+ .pad = &video_mux_pad_ops,
+ .video = &video_mux_subdev_video_ops,
+};
+
+static int video_mux_probe_mmio_mux(struct video_mux *vmux)
+{
+ struct device *dev = vmux->subdev.dev;
+ struct of_phandle_args args;
+ struct reg_field field;
+ struct regmap *regmap;
+ u32 reg, mask;
+ int ret;
+
+ ret = of_parse_phandle_with_args(dev->of_node, "mux-controls",
+ "#mux-control-cells", 0, &args);
+ if (ret)
+ return ret;
+
+ if (!of_device_is_compatible(args.np, "mmio-mux"))
+ return -EINVAL;
+
+ regmap = syscon_node_to_regmap(args.np->parent);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = of_property_read_u32_index(args.np, "mux-reg-masks",
+ 2 * args.args[0], &reg);
+ if (!ret)
+ ret = of_property_read_u32_index(args.np, "mux-reg-masks",
+ 2 * args.args[0] + 1, &mask);
+ if (ret < 0)
+ return ret;
+
+ field.reg = reg;
+ field.msb = fls(mask) - 1;
+ field.lsb = ffs(mask) - 1;
+
+ vmux->field = devm_regmap_field_alloc(dev, regmap, field);
+ if (IS_ERR(vmux->field))
+ return PTR_ERR(vmux->field);
+
+ return 0;
+}
+
+static int video_mux_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct device_node *ep;
+ struct video_mux *vmux;
+ unsigned int num_pads = 0;
+ int ret;
+ int i;
+
+ vmux = devm_kzalloc(dev, sizeof(*vmux), GFP_KERNEL);
+ if (!vmux)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, vmux);
+
+ v4l2_subdev_init(&vmux->subdev, &video_mux_subdev_ops);
+ snprintf(vmux->subdev.name, sizeof(vmux->subdev.name), "%s", np->name);
+ vmux->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ vmux->subdev.dev = dev;
+
+ /*
+ * The largest numbered port is the output port. It determines
+ * total number of pads.
+ */
+ for_each_endpoint_of_node(np, ep) {
+ struct of_endpoint endpoint;
+
+ of_graph_parse_endpoint(ep, &endpoint);
+ num_pads = max(num_pads, endpoint.port + 1);
+ }
+
+ if (num_pads < 2) {
+ dev_err(dev, "Not enough ports %d\n", num_pads);
+ return -EINVAL;
+ }
+
+ ret = video_mux_probe_mmio_mux(vmux);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get mux: %d\n", ret);
+ return ret;
+ }
+
+ mutex_init(&vmux->lock);
+ vmux->active = -1;
+ vmux->pads = devm_kcalloc(dev, num_pads, sizeof(*vmux->pads),
+ GFP_KERNEL);
+ vmux->format_mbus = devm_kcalloc(dev, num_pads,
+ sizeof(*vmux->format_mbus),
+ GFP_KERNEL);
+
+ for (i = 0; i < num_pads - 1; i++)
+ vmux->pads[i].flags = MEDIA_PAD_FL_SINK;
+ vmux->pads[num_pads - 1].flags = MEDIA_PAD_FL_SOURCE;
+
+ vmux->subdev.entity.function = MEDIA_ENT_F_VID_MUX;
+ ret = media_entity_pads_init(&vmux->subdev.entity, num_pads,
+ vmux->pads);
+ if (ret < 0)
+ return ret;
+
+ vmux->subdev.entity.ops = &video_mux_ops;
+
+ return v4l2_async_register_subdev(&vmux->subdev);
+}
+
+static int video_mux_remove(struct platform_device *pdev)
+{
+ struct video_mux *vmux = platform_get_drvdata(pdev);
+ struct v4l2_subdev *sd = &vmux->subdev;
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+
+ return 0;
+}
+
+static const struct of_device_id video_mux_dt_ids[] = {
+ { .compatible = "video-mux", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, video_mux_dt_ids);
+
+static struct platform_driver video_mux_driver = {
+ .probe = video_mux_probe,
+ .remove = video_mux_remove,
+ .driver = {
+ .of_match_table = video_mux_dt_ids,
+ .name = "video-mux",
+ },
+};
+
+module_platform_driver(video_mux_driver);
+
+MODULE_DESCRIPTION("video stream multiplexer");
+MODULE_AUTHOR("Sascha Hauer, Pengutronix");
+MODULE_AUTHOR("Philipp Zabel, Pengutronix");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/Kconfig b/drivers/media/platform/vimc/Kconfig
index a18f6352c422..71c9fe7d3370 100644
--- a/drivers/media/platform/vimc/Kconfig
+++ b/drivers/media/platform/vimc/Kconfig
@@ -2,6 +2,7 @@ config VIDEO_VIMC
tristate "Virtual Media Controller Driver (VIMC)"
depends on VIDEO_DEV && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
select VIDEOBUF2_VMALLOC
+ select VIDEO_V4L2_TPG
default n
---help---
Skeleton driver for Virtual Media Controller
diff --git a/drivers/media/platform/vimc/Makefile b/drivers/media/platform/vimc/Makefile
index c45195e5e05c..68c5d9804c11 100644
--- a/drivers/media/platform/vimc/Makefile
+++ b/drivers/media/platform/vimc/Makefile
@@ -1,3 +1,9 @@
-vimc-objs := vimc-core.o vimc-capture.o vimc-sensor.o
+vimc-objs := vimc-core.o
+vimc_capture-objs := vimc-capture.o
+vimc_common-objs := vimc-common.o
+vimc_debayer-objs := vimc-debayer.o
+vimc_scaler-objs := vimc-scaler.o
+vimc_sensor-objs := vimc-sensor.o
-obj-$(CONFIG_VIDEO_VIMC) += vimc.o
+obj-$(CONFIG_VIDEO_VIMC) += vimc.o vimc_capture.o vimc_common.o vimc-debayer.o \
+ vimc_scaler.o vimc_sensor.o
diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c
index 9adb06d7e13d..14cb32e21130 100644
--- a/drivers/media/platform/vimc/vimc-capture.c
+++ b/drivers/media/platform/vimc/vimc-capture.c
@@ -15,15 +15,21 @@
*
*/
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-core.h>
#include <media/videobuf2-vmalloc.h>
-#include "vimc-capture.h"
+#include "vimc-common.h"
+
+#define VIMC_CAP_DRV_NAME "vimc-capture"
struct vimc_cap_device {
struct vimc_ent_device ved;
struct video_device vdev;
+ struct device *dev;
struct v4l2_pix_format format;
struct vb2_queue queue;
struct list_head buf_list;
@@ -40,6 +46,14 @@ struct vimc_cap_device {
struct media_pipeline pipe;
};
+static const struct v4l2_pix_format fmt_default = {
+ .width = 640,
+ .height = 480,
+ .pixelformat = V4L2_PIX_FMT_RGB24,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_DEFAULT,
+};
+
struct vimc_cap_buffer {
/*
* struct vb2_v4l2_buffer must be the first element
@@ -64,7 +78,16 @@ static int vimc_cap_querycap(struct file *file, void *priv,
return 0;
}
-static int vimc_cap_fmt_vid_cap(struct file *file, void *priv,
+static void vimc_cap_get_format(struct vimc_ent_device *ved,
+ struct v4l2_pix_format *fmt)
+{
+ struct vimc_cap_device *vcap = container_of(ved, struct vimc_cap_device,
+ ved);
+
+ *fmt = vcap->format;
+}
+
+static int vimc_cap_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct vimc_cap_device *vcap = video_drvdata(file);
@@ -74,16 +97,98 @@ static int vimc_cap_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
+static int vimc_cap_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format *format = &f->fmt.pix;
+ const struct vimc_pix_map *vpix;
+
+ format->width = clamp_t(u32, format->width, VIMC_FRAME_MIN_WIDTH,
+ VIMC_FRAME_MAX_WIDTH) & ~1;
+ format->height = clamp_t(u32, format->height, VIMC_FRAME_MIN_HEIGHT,
+ VIMC_FRAME_MAX_HEIGHT) & ~1;
+
+ /* Don't accept a pixelformat that is not on the table */
+ vpix = vimc_pix_map_by_pixelformat(format->pixelformat);
+ if (!vpix) {
+ format->pixelformat = fmt_default.pixelformat;
+ vpix = vimc_pix_map_by_pixelformat(format->pixelformat);
+ }
+ /* TODO: Add support for custom bytesperline values */
+ format->bytesperline = format->width * vpix->bpp;
+ format->sizeimage = format->bytesperline * format->height;
+
+ if (format->field == V4L2_FIELD_ANY)
+ format->field = fmt_default.field;
+
+ vimc_colorimetry_clamp(format);
+
+ return 0;
+}
+
+static int vimc_cap_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct vimc_cap_device *vcap = video_drvdata(file);
+
+ /* Do not change the format while stream is on */
+ if (vb2_is_busy(&vcap->queue))
+ return -EBUSY;
+
+ vimc_cap_try_fmt_vid_cap(file, priv, f);
+
+ dev_dbg(vcap->dev, "%s: format update: "
+ "old:%dx%d (0x%x, %d, %d, %d, %d) "
+ "new:%dx%d (0x%x, %d, %d, %d, %d)\n", vcap->vdev.name,
+ /* old */
+ vcap->format.width, vcap->format.height,
+ vcap->format.pixelformat, vcap->format.colorspace,
+ vcap->format.quantization, vcap->format.xfer_func,
+ vcap->format.ycbcr_enc,
+ /* new */
+ f->fmt.pix.width, f->fmt.pix.height,
+ f->fmt.pix.pixelformat, f->fmt.pix.colorspace,
+ f->fmt.pix.quantization, f->fmt.pix.xfer_func,
+ f->fmt.pix.ycbcr_enc);
+
+ vcap->format = f->fmt.pix;
+
+ return 0;
+}
+
static int vimc_cap_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- struct vimc_cap_device *vcap = video_drvdata(file);
+ const struct vimc_pix_map *vpix = vimc_pix_map_by_index(f->index);
+
+ if (!vpix)
+ return -EINVAL;
+
+ f->pixelformat = vpix->pixelformat;
+
+ return 0;
+}
+
+static int vimc_cap_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ const struct vimc_pix_map *vpix;
+
+ if (fsize->index)
+ return -EINVAL;
- if (f->index > 0)
+ /* Only accept code in the pix map table */
+ vpix = vimc_pix_map_by_code(fsize->pixel_format);
+ if (!vpix)
return -EINVAL;
- /* We only support one format for now */
- f->pixelformat = vcap->format.pixelformat;
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ fsize->stepwise.min_width = VIMC_FRAME_MIN_WIDTH;
+ fsize->stepwise.max_width = VIMC_FRAME_MAX_WIDTH;
+ fsize->stepwise.min_height = VIMC_FRAME_MIN_HEIGHT;
+ fsize->stepwise.max_height = VIMC_FRAME_MAX_HEIGHT;
+ fsize->stepwise.step_width = 2;
+ fsize->stepwise.step_height = 2;
return 0;
}
@@ -101,10 +206,11 @@ static const struct v4l2_file_operations vimc_cap_fops = {
static const struct v4l2_ioctl_ops vimc_cap_ioctl_ops = {
.vidioc_querycap = vimc_cap_querycap,
- .vidioc_g_fmt_vid_cap = vimc_cap_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = vimc_cap_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vimc_cap_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vimc_cap_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vimc_cap_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vimc_cap_try_fmt_vid_cap,
.vidioc_enum_fmt_vid_cap = vimc_cap_enum_fmt_vid_cap,
+ .vidioc_enum_framesizes = vimc_cap_enum_framesizes,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
@@ -132,31 +238,6 @@ static void vimc_cap_return_all_buffers(struct vimc_cap_device *vcap,
spin_unlock(&vcap->qlock);
}
-static int vimc_cap_pipeline_s_stream(struct vimc_cap_device *vcap, int enable)
-{
- struct v4l2_subdev *sd;
- struct media_pad *pad;
- int ret;
-
- /* Start the stream in the subdevice direct connected */
- pad = media_entity_remote_pad(&vcap->vdev.entity.pads[0]);
-
- /*
- * if it is a raw node from vimc-core, there is nothing to activate
- * TODO: remove this when there are no more raw nodes in the
- * core and return error instead
- */
- if (pad->entity->obj_type == MEDIA_ENTITY_TYPE_BASE)
- return 0;
-
- sd = media_entity_to_v4l2_subdev(pad->entity);
- ret = v4l2_subdev_call(sd, video, s_stream, enable);
- if (ret && ret != -ENOIOCTLCMD)
- return ret;
-
- return 0;
-}
-
static int vimc_cap_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct vimc_cap_device *vcap = vb2_get_drv_priv(vq);
@@ -173,7 +254,7 @@ static int vimc_cap_start_streaming(struct vb2_queue *vq, unsigned int count)
}
/* Enable streaming from the pipe */
- ret = vimc_cap_pipeline_s_stream(vcap, 1);
+ ret = vimc_pipeline_s_stream(&vcap->vdev.entity, 1);
if (ret) {
media_pipeline_stop(entity);
vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED);
@@ -192,7 +273,7 @@ static void vimc_cap_stop_streaming(struct vb2_queue *vq)
struct vimc_cap_device *vcap = vb2_get_drv_priv(vq);
/* Disable streaming from the pipe */
- vimc_cap_pipeline_s_stream(vcap, 0);
+ vimc_pipeline_s_stream(&vcap->vdev.entity, 0);
/* Stop the media pipeline */
media_pipeline_stop(&vcap->vdev.entity);
@@ -234,8 +315,7 @@ static int vimc_cap_buffer_prepare(struct vb2_buffer *vb)
unsigned long size = vcap->format.sizeimage;
if (vb2_plane_size(vb, 0) < size) {
- dev_err(vcap->vdev.v4l2_dev->dev,
- "%s: buffer too small (%lu < %lu)\n",
+ dev_err(vcap->dev, "%s: buffer too small (%lu < %lu)\n",
vcap->vdev.name, vb2_plane_size(vb, 0), size);
return -EINVAL;
}
@@ -256,78 +336,14 @@ static const struct vb2_ops vimc_cap_qops = {
.wait_finish = vb2_ops_wait_finish,
};
-/*
- * NOTE: this function is a copy of v4l2_subdev_link_validate_get_format
- * maybe the v4l2 function should be public
- */
-static int vimc_cap_v4l2_subdev_link_validate_get_format(struct media_pad *pad,
- struct v4l2_subdev_format *fmt)
-{
- struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(pad->entity);
-
- fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
- fmt->pad = pad->index;
-
- return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
-}
-
-static int vimc_cap_link_validate(struct media_link *link)
-{
- struct v4l2_subdev_format source_fmt;
- const struct vimc_pix_map *vpix;
- struct vimc_cap_device *vcap = container_of(link->sink->entity,
- struct vimc_cap_device,
- vdev.entity);
- struct v4l2_pix_format *sink_fmt = &vcap->format;
- int ret;
-
- /*
- * if it is a raw node from vimc-core, ignore the link for now
- * TODO: remove this when there are no more raw nodes in the
- * core and return error instead
- */
- if (link->source->entity->obj_type == MEDIA_ENTITY_TYPE_BASE)
- return 0;
-
- /* Get the the format of the subdev */
- ret = vimc_cap_v4l2_subdev_link_validate_get_format(link->source,
- &source_fmt);
- if (ret)
- return ret;
-
- dev_dbg(vcap->vdev.v4l2_dev->dev,
- "%s: link validate formats src:%dx%d %d sink:%dx%d %d\n",
- vcap->vdev.name,
- source_fmt.format.width, source_fmt.format.height,
- source_fmt.format.code,
- sink_fmt->width, sink_fmt->height,
- sink_fmt->pixelformat);
-
- /* The width, height and code must match. */
- vpix = vimc_pix_map_by_pixelformat(sink_fmt->pixelformat);
- if (source_fmt.format.width != sink_fmt->width
- || source_fmt.format.height != sink_fmt->height
- || vpix->code != source_fmt.format.code)
- return -EPIPE;
-
- /*
- * The field order must match, or the sink field order must be NONE
- * to support interlaced hardware connected to bridges that support
- * progressive formats only.
- */
- if (source_fmt.format.field != sink_fmt->field &&
- sink_fmt->field != V4L2_FIELD_NONE)
- return -EPIPE;
-
- return 0;
-}
-
static const struct media_entity_operations vimc_cap_mops = {
- .link_validate = vimc_cap_link_validate,
+ .link_validate = vimc_link_validate,
};
-static void vimc_cap_destroy(struct vimc_ent_device *ved)
+static void vimc_cap_comp_unbind(struct device *comp, struct device *master,
+ void *master_data)
{
+ struct vimc_ent_device *ved = dev_get_drvdata(comp);
struct vimc_cap_device *vcap = container_of(ved, struct vimc_cap_device,
ved);
@@ -376,42 +392,35 @@ static void vimc_cap_process_frame(struct vimc_ent_device *ved,
vb2_buffer_done(&vimc_buf->vb2.vb2_buf, VB2_BUF_STATE_DONE);
}
-struct vimc_ent_device *vimc_cap_create(struct v4l2_device *v4l2_dev,
- const char *const name,
- u16 num_pads,
- const unsigned long *pads_flag)
+static int vimc_cap_comp_bind(struct device *comp, struct device *master,
+ void *master_data)
{
+ struct v4l2_device *v4l2_dev = master_data;
+ struct vimc_platform_data *pdata = comp->platform_data;
const struct vimc_pix_map *vpix;
struct vimc_cap_device *vcap;
struct video_device *vdev;
struct vb2_queue *q;
int ret;
- /*
- * Check entity configuration params
- * NOTE: we only support a single sink pad
- */
- if (!name || num_pads != 1 || !pads_flag ||
- !(pads_flag[0] & MEDIA_PAD_FL_SINK))
- return ERR_PTR(-EINVAL);
-
/* Allocate the vimc_cap_device struct */
vcap = kzalloc(sizeof(*vcap), GFP_KERNEL);
if (!vcap)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
/* Allocate the pads */
- vcap->ved.pads = vimc_pads_init(num_pads, pads_flag);
+ vcap->ved.pads =
+ vimc_pads_init(1, (const unsigned long[1]) {MEDIA_PAD_FL_SINK});
if (IS_ERR(vcap->ved.pads)) {
ret = PTR_ERR(vcap->ved.pads);
goto err_free_vcap;
}
/* Initialize the media entity */
- vcap->vdev.entity.name = name;
+ vcap->vdev.entity.name = pdata->entity_name;
vcap->vdev.entity.function = MEDIA_ENT_F_IO_V4L;
ret = media_entity_pads_init(&vcap->vdev.entity,
- num_pads, vcap->ved.pads);
+ 1, vcap->ved.pads);
if (ret)
goto err_clean_pads;
@@ -432,9 +441,8 @@ struct vimc_ent_device *vimc_cap_create(struct v4l2_device *v4l2_dev,
ret = vb2_queue_init(q);
if (ret) {
- dev_err(vcap->vdev.v4l2_dev->dev,
- "%s: vb2 queue init failed (err=%d)\n",
- vcap->vdev.name, ret);
+ dev_err(comp, "%s: vb2 queue init failed (err=%d)\n",
+ pdata->entity_name, ret);
goto err_clean_m_ent;
}
@@ -442,23 +450,19 @@ struct vimc_ent_device *vimc_cap_create(struct v4l2_device *v4l2_dev,
INIT_LIST_HEAD(&vcap->buf_list);
spin_lock_init(&vcap->qlock);
- /* Set the frame format (this is hardcoded for now) */
- vcap->format.width = 640;
- vcap->format.height = 480;
- vcap->format.pixelformat = V4L2_PIX_FMT_RGB24;
- vcap->format.field = V4L2_FIELD_NONE;
- vcap->format.colorspace = V4L2_COLORSPACE_SRGB;
-
+ /* Set default frame format */
+ vcap->format = fmt_default;
vpix = vimc_pix_map_by_pixelformat(vcap->format.pixelformat);
-
vcap->format.bytesperline = vcap->format.width * vpix->bpp;
vcap->format.sizeimage = vcap->format.bytesperline *
vcap->format.height;
/* Fill the vimc_ent_device struct */
- vcap->ved.destroy = vimc_cap_destroy;
vcap->ved.ent = &vcap->vdev.entity;
vcap->ved.process_frame = vimc_cap_process_frame;
+ vcap->ved.vdev_get_format = vimc_cap_get_format;
+ dev_set_drvdata(comp, &vcap->ved);
+ vcap->dev = comp;
/* Initialize the video_device struct */
vdev = &vcap->vdev;
@@ -471,19 +475,18 @@ struct vimc_ent_device *vimc_cap_create(struct v4l2_device *v4l2_dev,
vdev->queue = q;
vdev->v4l2_dev = v4l2_dev;
vdev->vfl_dir = VFL_DIR_RX;
- strlcpy(vdev->name, name, sizeof(vdev->name));
+ strlcpy(vdev->name, pdata->entity_name, sizeof(vdev->name));
video_set_drvdata(vdev, &vcap->ved);
/* Register the video_device with the v4l2 and the media framework */
ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
if (ret) {
- dev_err(vcap->vdev.v4l2_dev->dev,
- "%s: video register failed (err=%d)\n",
+ dev_err(comp, "%s: video register failed (err=%d)\n",
vcap->vdev.name, ret);
goto err_release_queue;
}
- return &vcap->ved;
+ return 0;
err_release_queue:
vb2_queue_release(q);
@@ -494,5 +497,45 @@ err_clean_pads:
err_free_vcap:
kfree(vcap);
- return ERR_PTR(ret);
+ return ret;
+}
+
+static const struct component_ops vimc_cap_comp_ops = {
+ .bind = vimc_cap_comp_bind,
+ .unbind = vimc_cap_comp_unbind,
+};
+
+static int vimc_cap_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &vimc_cap_comp_ops);
}
+
+static int vimc_cap_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &vimc_cap_comp_ops);
+
+ return 0;
+}
+
+static struct platform_driver vimc_cap_pdrv = {
+ .probe = vimc_cap_probe,
+ .remove = vimc_cap_remove,
+ .driver = {
+ .name = VIMC_CAP_DRV_NAME,
+ },
+};
+
+static const struct platform_device_id vimc_cap_driver_ids[] = {
+ {
+ .name = VIMC_CAP_DRV_NAME,
+ },
+ { }
+};
+
+module_platform_driver(vimc_cap_pdrv);
+
+MODULE_DEVICE_TABLE(platform, vimc_cap_driver_ids);
+
+MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Capture");
+MODULE_AUTHOR("Helen Mae Koike Fornazier <[email protected]>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-common.c b/drivers/media/platform/vimc/vimc-common.c
new file mode 100644
index 000000000000..9d63c84a9876
--- /dev/null
+++ b/drivers/media/platform/vimc/vimc-common.c
@@ -0,0 +1,473 @@
+/*
+ * vimc-common.c Virtual Media Controller Driver
+ *
+ * Copyright (C) 2015-2017 Helen Koike <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include "vimc-common.h"
+
+/*
+ * NOTE: non-bayer formats need to come first (necessary for enum_mbus_code
+ * in the scaler)
+ */
+static const struct vimc_pix_map vimc_pix_map_list[] = {
+ /* TODO: add all missing formats */
+
+ /* RGB formats */
+ {
+ .code = MEDIA_BUS_FMT_BGR888_1X24,
+ .pixelformat = V4L2_PIX_FMT_BGR24,
+ .bpp = 3,
+ .bayer = false,
+ },
+ {
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .pixelformat = V4L2_PIX_FMT_RGB24,
+ .bpp = 3,
+ .bayer = false,
+ },
+ {
+ .code = MEDIA_BUS_FMT_ARGB8888_1X32,
+ .pixelformat = V4L2_PIX_FMT_ARGB32,
+ .bpp = 4,
+ .bayer = false,
+ },
+
+ /* Bayer formats */
+ {
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SBGGR8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SGBRG8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SGRBG8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SRGGB8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .pixelformat = V4L2_PIX_FMT_SBGGR10,
+ .bpp = 2,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .pixelformat = V4L2_PIX_FMT_SGBRG10,
+ .bpp = 2,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .pixelformat = V4L2_PIX_FMT_SGRBG10,
+ .bpp = 2,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .pixelformat = V4L2_PIX_FMT_SRGGB10,
+ .bpp = 2,
+ .bayer = true,
+ },
+
+ /* 10bit raw bayer a-law compressed to 8 bits */
+ {
+ .code = MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SBGGR10ALAW8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SGBRG10ALAW8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SGRBG10ALAW8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SRGGB10ALAW8,
+ .bpp = 1,
+ .bayer = true,
+ },
+
+ /* 10bit raw bayer DPCM compressed to 8 bits */
+ {
+ .code = MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SBGGR10DPCM8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SGBRG10DPCM8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SGRBG10DPCM8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
+ .pixelformat = V4L2_PIX_FMT_SRGGB10DPCM8,
+ .bpp = 1,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .pixelformat = V4L2_PIX_FMT_SBGGR12,
+ .bpp = 2,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .pixelformat = V4L2_PIX_FMT_SGBRG12,
+ .bpp = 2,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .pixelformat = V4L2_PIX_FMT_SGRBG12,
+ .bpp = 2,
+ .bayer = true,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .pixelformat = V4L2_PIX_FMT_SRGGB12,
+ .bpp = 2,
+ .bayer = true,
+ },
+};
+
+const struct vimc_pix_map *vimc_pix_map_by_index(unsigned int i)
+{
+ if (i >= ARRAY_SIZE(vimc_pix_map_list))
+ return NULL;
+
+ return &vimc_pix_map_list[i];
+}
+EXPORT_SYMBOL_GPL(vimc_pix_map_by_index);
+
+const struct vimc_pix_map *vimc_pix_map_by_code(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vimc_pix_map_list); i++) {
+ if (vimc_pix_map_list[i].code == code)
+ return &vimc_pix_map_list[i];
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(vimc_pix_map_by_code);
+
+const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vimc_pix_map_list); i++) {
+ if (vimc_pix_map_list[i].pixelformat == pixelformat)
+ return &vimc_pix_map_list[i];
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(vimc_pix_map_by_pixelformat);
+
+int vimc_propagate_frame(struct media_pad *src, const void *frame)
+{
+ struct media_link *link;
+
+ if (!(src->flags & MEDIA_PAD_FL_SOURCE))
+ return -EINVAL;
+
+ /* Send this frame to all sink pads that are direct linked */
+ list_for_each_entry(link, &src->entity->links, list) {
+ if (link->source == src &&
+ (link->flags & MEDIA_LNK_FL_ENABLED)) {
+ struct vimc_ent_device *ved = NULL;
+ struct media_entity *entity = link->sink->entity;
+
+ if (is_media_entity_v4l2_subdev(entity)) {
+ struct v4l2_subdev *sd =
+ container_of(entity, struct v4l2_subdev,
+ entity);
+ ved = v4l2_get_subdevdata(sd);
+ } else if (is_media_entity_v4l2_video_device(entity)) {
+ struct video_device *vdev =
+ container_of(entity,
+ struct video_device,
+ entity);
+ ved = video_get_drvdata(vdev);
+ }
+ if (ved && ved->process_frame)
+ ved->process_frame(ved, link->sink, frame);
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vimc_propagate_frame);
+
+/* Helper function to allocate and initialize pads */
+struct media_pad *vimc_pads_init(u16 num_pads, const unsigned long *pads_flag)
+{
+ struct media_pad *pads;
+ unsigned int i;
+
+ /* Allocate memory for the pads */
+ pads = kcalloc(num_pads, sizeof(*pads), GFP_KERNEL);
+ if (!pads)
+ return ERR_PTR(-ENOMEM);
+
+ /* Initialize the pads */
+ for (i = 0; i < num_pads; i++) {
+ pads[i].index = i;
+ pads[i].flags = pads_flag[i];
+ }
+
+ return pads;
+}
+EXPORT_SYMBOL_GPL(vimc_pads_init);
+
+int vimc_pipeline_s_stream(struct media_entity *ent, int enable)
+{
+ struct v4l2_subdev *sd;
+ struct media_pad *pad;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ent->num_pads; i++) {
+ if (ent->pads[i].flags & MEDIA_PAD_FL_SOURCE)
+ continue;
+
+ /* Start the stream in the subdevice direct connected */
+ pad = media_entity_remote_pad(&ent->pads[i]);
+
+ if (!is_media_entity_v4l2_subdev(pad->entity))
+ return -EINVAL;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+ ret = v4l2_subdev_call(sd, video, s_stream, enable);
+ if (ret && ret != -ENOIOCTLCMD)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vimc_pipeline_s_stream);
+
+static int vimc_get_mbus_format(struct media_pad *pad,
+ struct v4l2_subdev_format *fmt)
+{
+ if (is_media_entity_v4l2_subdev(pad->entity)) {
+ struct v4l2_subdev *sd =
+ media_entity_to_v4l2_subdev(pad->entity);
+ int ret;
+
+ fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt->pad = pad->index;
+
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
+ if (ret)
+ return ret;
+
+ } else if (is_media_entity_v4l2_video_device(pad->entity)) {
+ struct video_device *vdev = container_of(pad->entity,
+ struct video_device,
+ entity);
+ struct vimc_ent_device *ved = video_get_drvdata(vdev);
+ const struct vimc_pix_map *vpix;
+ struct v4l2_pix_format vdev_fmt;
+
+ if (!ved->vdev_get_format)
+ return -ENOIOCTLCMD;
+
+ ved->vdev_get_format(ved, &vdev_fmt);
+ vpix = vimc_pix_map_by_pixelformat(vdev_fmt.pixelformat);
+ v4l2_fill_mbus_format(&fmt->format, &vdev_fmt, vpix->code);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int vimc_link_validate(struct media_link *link)
+{
+ struct v4l2_subdev_format source_fmt, sink_fmt;
+ int ret;
+
+ ret = vimc_get_mbus_format(link->source, &source_fmt);
+ if (ret)
+ return ret;
+
+ ret = vimc_get_mbus_format(link->sink, &sink_fmt);
+ if (ret)
+ return ret;
+
+ pr_info("vimc link validate: "
+ "%s:src:%dx%d (0x%x, %d, %d, %d, %d) "
+ "%s:snk:%dx%d (0x%x, %d, %d, %d, %d)\n",
+ /* src */
+ link->source->entity->name,
+ source_fmt.format.width, source_fmt.format.height,
+ source_fmt.format.code, source_fmt.format.colorspace,
+ source_fmt.format.quantization, source_fmt.format.xfer_func,
+ source_fmt.format.ycbcr_enc,
+ /* sink */
+ link->sink->entity->name,
+ sink_fmt.format.width, sink_fmt.format.height,
+ sink_fmt.format.code, sink_fmt.format.colorspace,
+ sink_fmt.format.quantization, sink_fmt.format.xfer_func,
+ sink_fmt.format.ycbcr_enc);
+
+ /* The width, height and code must match. */
+ if (source_fmt.format.width != sink_fmt.format.width
+ || source_fmt.format.height != sink_fmt.format.height
+ || source_fmt.format.code != sink_fmt.format.code)
+ return -EPIPE;
+
+ /*
+ * The field order must match, or the sink field order must be NONE
+ * to support interlaced hardware connected to bridges that support
+ * progressive formats only.
+ */
+ if (source_fmt.format.field != sink_fmt.format.field &&
+ sink_fmt.format.field != V4L2_FIELD_NONE)
+ return -EPIPE;
+
+ /*
+ * If colorspace is DEFAULT, then assume all the colorimetry is also
+ * DEFAULT, return 0 to skip comparing the other colorimetry parameters
+ */
+ if (source_fmt.format.colorspace == V4L2_COLORSPACE_DEFAULT
+ || sink_fmt.format.colorspace == V4L2_COLORSPACE_DEFAULT)
+ return 0;
+
+ /* Colorspace must match. */
+ if (source_fmt.format.colorspace != sink_fmt.format.colorspace)
+ return -EPIPE;
+
+ /* Colorimetry must match if they are not set to DEFAULT */
+ if (source_fmt.format.ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT
+ && sink_fmt.format.ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT
+ && source_fmt.format.ycbcr_enc != sink_fmt.format.ycbcr_enc)
+ return -EPIPE;
+
+ if (source_fmt.format.quantization != V4L2_QUANTIZATION_DEFAULT
+ && sink_fmt.format.quantization != V4L2_QUANTIZATION_DEFAULT
+ && source_fmt.format.quantization != sink_fmt.format.quantization)
+ return -EPIPE;
+
+ if (source_fmt.format.xfer_func != V4L2_XFER_FUNC_DEFAULT
+ && sink_fmt.format.xfer_func != V4L2_XFER_FUNC_DEFAULT
+ && source_fmt.format.xfer_func != sink_fmt.format.xfer_func)
+ return -EPIPE;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vimc_link_validate);
+
+static const struct media_entity_operations vimc_ent_sd_mops = {
+ .link_validate = vimc_link_validate,
+};
+
+int vimc_ent_sd_register(struct vimc_ent_device *ved,
+ struct v4l2_subdev *sd,
+ struct v4l2_device *v4l2_dev,
+ const char *const name,
+ u32 function,
+ u16 num_pads,
+ const unsigned long *pads_flag,
+ const struct v4l2_subdev_ops *sd_ops)
+{
+ int ret;
+
+ /* Allocate the pads */
+ ved->pads = vimc_pads_init(num_pads, pads_flag);
+ if (IS_ERR(ved->pads))
+ return PTR_ERR(ved->pads);
+
+ /* Fill the vimc_ent_device struct */
+ ved->ent = &sd->entity;
+
+ /* Initialize the subdev */
+ v4l2_subdev_init(sd, sd_ops);
+ sd->entity.function = function;
+ sd->entity.ops = &vimc_ent_sd_mops;
+ sd->owner = THIS_MODULE;
+ strlcpy(sd->name, name, sizeof(sd->name));
+ v4l2_set_subdevdata(sd, ved);
+
+ /* Expose this subdev to user space */
+ sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ /* Initialize the media entity */
+ ret = media_entity_pads_init(&sd->entity, num_pads, ved->pads);
+ if (ret)
+ goto err_clean_pads;
+
+ /* Register the subdev with the v4l2 and the media framework */
+ ret = v4l2_device_register_subdev(v4l2_dev, sd);
+ if (ret) {
+ dev_err(v4l2_dev->dev,
+ "%s: subdev register failed (err=%d)\n",
+ name, ret);
+ goto err_clean_m_ent;
+ }
+
+ return 0;
+
+err_clean_m_ent:
+ media_entity_cleanup(&sd->entity);
+err_clean_pads:
+ vimc_pads_cleanup(ved->pads);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vimc_ent_sd_register);
+
+void vimc_ent_sd_unregister(struct vimc_ent_device *ved, struct v4l2_subdev *sd)
+{
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(ved->ent);
+ vimc_pads_cleanup(ved->pads);
+}
+EXPORT_SYMBOL_GPL(vimc_ent_sd_unregister);
+
+MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Common");
+MODULE_AUTHOR("Helen Koike <[email protected]>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-common.h b/drivers/media/platform/vimc/vimc-common.h
new file mode 100644
index 000000000000..dca528a316e7
--- /dev/null
+++ b/drivers/media/platform/vimc/vimc-common.h
@@ -0,0 +1,229 @@
+/*
+ * vimc-common.h Virtual Media Controller Driver
+ *
+ * Copyright (C) 2015-2017 Helen Koike <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIMC_COMMON_H_
+#define _VIMC_COMMON_H_
+
+#include <linux/slab.h>
+#include <media/media-device.h>
+#include <media/v4l2-device.h>
+
+#define VIMC_FRAME_MAX_WIDTH 4096
+#define VIMC_FRAME_MAX_HEIGHT 2160
+#define VIMC_FRAME_MIN_WIDTH 16
+#define VIMC_FRAME_MIN_HEIGHT 16
+
+#define VIMC_FRAME_INDEX(lin, col, width, bpp) ((lin * width + col) * bpp)
+
+/**
+ * struct vimc_colorimetry_clamp - Adjust colorimetry parameters
+ *
+ * @fmt: the pointer to struct v4l2_pix_format or
+ * struct v4l2_mbus_framefmt
+ *
+ * Entities must check if colorimetry given by the userspace is valid, if not
+ * then set them as DEFAULT
+ */
+#define vimc_colorimetry_clamp(fmt) \
+do { \
+ if ((fmt)->colorspace == V4L2_COLORSPACE_DEFAULT \
+ || (fmt)->colorspace > V4L2_COLORSPACE_DCI_P3) { \
+ (fmt)->colorspace = V4L2_COLORSPACE_DEFAULT; \
+ (fmt)->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; \
+ (fmt)->quantization = V4L2_QUANTIZATION_DEFAULT; \
+ (fmt)->xfer_func = V4L2_XFER_FUNC_DEFAULT; \
+ } \
+ if ((fmt)->ycbcr_enc > V4L2_YCBCR_ENC_SMPTE240M) \
+ (fmt)->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; \
+ if ((fmt)->quantization > V4L2_QUANTIZATION_LIM_RANGE) \
+ (fmt)->quantization = V4L2_QUANTIZATION_DEFAULT; \
+ if ((fmt)->xfer_func > V4L2_XFER_FUNC_SMPTE2084) \
+ (fmt)->xfer_func = V4L2_XFER_FUNC_DEFAULT; \
+} while (0)
+
+/**
+ * struct vimc_platform_data - platform data to components
+ *
+ * @entity_name: The name of the entity to be created
+ *
+ * Board setup code will often provide additional information using the device's
+ * platform_data field to hold additional information.
+ * When injecting a new platform_device in the component system the core needs
+ * to provide to the corresponding submodules the name of the entity that should
+ * be used when registering the subdevice in the Media Controller system.
+ */
+struct vimc_platform_data {
+ char entity_name[32];
+};
+
+/**
+ * struct vimc_pix_map - maps media bus code with v4l2 pixel format
+ *
+ * @code: media bus format code defined by MEDIA_BUS_FMT_* macros
+ * @bbp: number of bytes each pixel occupies
+ * @pixelformat: pixel format devined by V4L2_PIX_FMT_* macros
+ *
+ * Struct which matches the MEDIA_BUS_FMT_* codes with the corresponding
+ * V4L2_PIX_FMT_* fourcc pixelformat and its bytes per pixel (bpp)
+ */
+struct vimc_pix_map {
+ unsigned int code;
+ unsigned int bpp;
+ u32 pixelformat;
+ bool bayer;
+};
+
+/**
+ * struct vimc_ent_device - core struct that represents a node in the topology
+ *
+ * @ent: the pointer to struct media_entity for the node
+ * @pads: the list of pads of the node
+ * @process_frame: callback send a frame to that node
+ * @vdev_get_format: callback that returns the current format a pad, used
+ * only when is_media_entity_v4l2_video_device(ent) returns
+ * true
+ *
+ * Each node of the topology must create a vimc_ent_device struct. Depending on
+ * the node it will be of an instance of v4l2_subdev or video_device struct
+ * where both contains a struct media_entity.
+ * Those structures should embedded the vimc_ent_device struct through
+ * v4l2_set_subdevdata() and video_set_drvdata() respectivaly, allowing the
+ * vimc_ent_device struct to be retrieved from the corresponding struct
+ * media_entity
+ */
+struct vimc_ent_device {
+ struct media_entity *ent;
+ struct media_pad *pads;
+ void (*process_frame)(struct vimc_ent_device *ved,
+ struct media_pad *sink, const void *frame);
+ void (*vdev_get_format)(struct vimc_ent_device *ved,
+ struct v4l2_pix_format *fmt);
+};
+
+/**
+ * vimc_propagate_frame - propagate a frame through the topology
+ *
+ * @src: the source pad where the frame is being originated
+ * @frame: the frame to be propagated
+ *
+ * This function will call the process_frame callback from the vimc_ent_device
+ * struct of the nodes directly connected to the @src pad
+ */
+int vimc_propagate_frame(struct media_pad *src, const void *frame);
+
+/**
+ * vimc_pads_init - initialize pads
+ *
+ * @num_pads: number of pads to initialize
+ * @pads_flags: flags to use in each pad
+ *
+ * Helper functions to allocate/initialize pads
+ */
+struct media_pad *vimc_pads_init(u16 num_pads,
+ const unsigned long *pads_flag);
+
+/**
+ * vimc_pads_cleanup - free pads
+ *
+ * @pads: pointer to the pads
+ *
+ * Helper function to free the pads initialized with vimc_pads_init
+ */
+static inline void vimc_pads_cleanup(struct media_pad *pads)
+{
+ kfree(pads);
+}
+
+/**
+ * vimc_pipeline_s_stream - start stream through the pipeline
+ *
+ * @ent: the pointer to struct media_entity for the node
+ * @enable: 1 to start the stream and 0 to stop
+ *
+ * Helper function to call the s_stream of the subdevices connected
+ * in all the sink pads of the entity
+ */
+int vimc_pipeline_s_stream(struct media_entity *ent, int enable);
+
+/**
+ * vimc_pix_map_by_index - get vimc_pix_map struct by its index
+ *
+ * @i: index of the vimc_pix_map struct in vimc_pix_map_list
+ */
+const struct vimc_pix_map *vimc_pix_map_by_index(unsigned int i);
+
+/**
+ * vimc_pix_map_by_code - get vimc_pix_map struct by media bus code
+ *
+ * @code: media bus format code defined by MEDIA_BUS_FMT_* macros
+ */
+const struct vimc_pix_map *vimc_pix_map_by_code(u32 code);
+
+/**
+ * vimc_pix_map_by_pixelformat - get vimc_pix_map struct by v4l2 pixel format
+ *
+ * @pixelformat: pixel format devined by V4L2_PIX_FMT_* macros
+ */
+const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat);
+
+/**
+ * vimc_ent_sd_register - initialize and register a subdev node
+ *
+ * @ved: the vimc_ent_device struct to be initialize
+ * @sd: the v4l2_subdev struct to be initialize and registered
+ * @v4l2_dev: the v4l2 device to register the v4l2_subdev
+ * @name: name of the sub-device. Please notice that the name must be
+ * unique.
+ * @function: media entity function defined by MEDIA_ENT_F_* macros
+ * @num_pads: number of pads to initialize
+ * @pads_flag: flags to use in each pad
+ * @sd_ops: pointer to &struct v4l2_subdev_ops.
+ *
+ * Helper function initialize and register the struct vimc_ent_device and struct
+ * v4l2_subdev which represents a subdev node in the topology
+ */
+int vimc_ent_sd_register(struct vimc_ent_device *ved,
+ struct v4l2_subdev *sd,
+ struct v4l2_device *v4l2_dev,
+ const char *const name,
+ u32 function,
+ u16 num_pads,
+ const unsigned long *pads_flag,
+ const struct v4l2_subdev_ops *sd_ops);
+
+/**
+ * vimc_ent_sd_unregister - cleanup and unregister a subdev node
+ *
+ * @ved: the vimc_ent_device struct to be cleaned up
+ * @sd: the v4l2_subdev struct to be unregistered
+ *
+ * Helper function cleanup and unregister the struct vimc_ent_device and struct
+ * v4l2_subdev which represents a subdev node in the topology
+ */
+void vimc_ent_sd_unregister(struct vimc_ent_device *ved,
+ struct v4l2_subdev *sd);
+
+/**
+ * vimc_link_validate - validates a media link
+ *
+ * @link: pointer to &struct media_link
+ *
+ * This function calls validates if a media link is valid for streaming.
+ */
+int vimc_link_validate(struct media_link *link);
+
+#endif
diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/platform/vimc/vimc-core.c
index bc107da8fbd5..51c0eee61ca6 100644
--- a/drivers/media/platform/vimc/vimc-core.c
+++ b/drivers/media/platform/vimc/vimc-core.c
@@ -15,15 +15,14 @@
*
*/
+#include <linux/component.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <media/media-device.h>
#include <media/v4l2-device.h>
-#include "vimc-capture.h"
-#include "vimc-core.h"
-#include "vimc-sensor.h"
+#include "vimc-common.h"
#define VIMC_PDEV_NAME "vimc"
#define VIMC_MDEV_MODEL_NAME "VIMC MDEV"
@@ -37,10 +36,10 @@
}
struct vimc_device {
- /*
- * The pipeline configuration
- * (filled before calling vimc_device_register)
- */
+ /* The platform device */
+ struct platform_device pdev;
+
+ /* The pipeline configuration */
const struct vimc_pipeline_config *pipe_cfg;
/* The Associated media_device parent */
@@ -49,43 +48,14 @@ struct vimc_device {
/* Internal v4l2 parent device*/
struct v4l2_device v4l2_dev;
- /* Internal topology */
- struct vimc_ent_device **ved;
-};
-
-/**
- * enum vimc_ent_node - Select the functionality of a node in the topology
- * @VIMC_ENT_NODE_SENSOR: A node of type SENSOR simulates a camera sensor
- * generating internal images in bayer format and
- * propagating those images through the pipeline
- * @VIMC_ENT_NODE_CAPTURE: A node of type CAPTURE is a v4l2 video_device
- * that exposes the received image from the
- * pipeline to the user space
- * @VIMC_ENT_NODE_INPUT: A node of type INPUT is a v4l2 video_device that
- * receives images from the user space and
- * propagates them through the pipeline
- * @VIMC_ENT_NODE_DEBAYER: A node type DEBAYER expects to receive a frame
- * in bayer format converts it to RGB
- * @VIMC_ENT_NODE_SCALER: A node of type SCALER scales the received image
- * by a given multiplier
- *
- * This enum is used in the entity configuration struct to allow the definition
- * of a custom topology specifying the role of each node on it.
- */
-enum vimc_ent_node {
- VIMC_ENT_NODE_SENSOR,
- VIMC_ENT_NODE_CAPTURE,
- VIMC_ENT_NODE_INPUT,
- VIMC_ENT_NODE_DEBAYER,
- VIMC_ENT_NODE_SCALER,
+ /* Subdevices */
+ struct platform_device **subdevs;
};
/* Structure which describes individual configuration for each entity */
struct vimc_ent_config {
const char *name;
- size_t pads_qty;
- const unsigned long *pads_flag;
- enum vimc_ent_node node;
+ const char *drv;
};
/* Structure which describes links between entities */
@@ -112,60 +82,40 @@ struct vimc_pipeline_config {
static const struct vimc_ent_config ent_config[] = {
{
.name = "Sensor A",
- .pads_qty = 1,
- .pads_flag = (const unsigned long[]){MEDIA_PAD_FL_SOURCE},
- .node = VIMC_ENT_NODE_SENSOR,
+ .drv = "vimc-sensor",
},
{
.name = "Sensor B",
- .pads_qty = 1,
- .pads_flag = (const unsigned long[]){MEDIA_PAD_FL_SOURCE},
- .node = VIMC_ENT_NODE_SENSOR,
+ .drv = "vimc-sensor",
},
{
.name = "Debayer A",
- .pads_qty = 2,
- .pads_flag = (const unsigned long[]){MEDIA_PAD_FL_SINK,
- MEDIA_PAD_FL_SOURCE},
- .node = VIMC_ENT_NODE_DEBAYER,
+ .drv = "vimc-debayer",
},
{
.name = "Debayer B",
- .pads_qty = 2,
- .pads_flag = (const unsigned long[]){MEDIA_PAD_FL_SINK,
- MEDIA_PAD_FL_SOURCE},
- .node = VIMC_ENT_NODE_DEBAYER,
+ .drv = "vimc-debayer",
},
{
.name = "Raw Capture 0",
- .pads_qty = 1,
- .pads_flag = (const unsigned long[]){MEDIA_PAD_FL_SINK},
- .node = VIMC_ENT_NODE_CAPTURE,
+ .drv = "vimc-capture",
},
{
.name = "Raw Capture 1",
- .pads_qty = 1,
- .pads_flag = (const unsigned long[]){MEDIA_PAD_FL_SINK},
- .node = VIMC_ENT_NODE_CAPTURE,
+ .drv = "vimc-capture",
},
{
.name = "RGB/YUV Input",
- .pads_qty = 1,
- .pads_flag = (const unsigned long[]){MEDIA_PAD_FL_SOURCE},
- .node = VIMC_ENT_NODE_INPUT,
+ /* TODO: change this to vimc-input when it is implemented */
+ .drv = "vimc-sensor",
},
{
.name = "Scaler",
- .pads_qty = 2,
- .pads_flag = (const unsigned long[]){MEDIA_PAD_FL_SINK,
- MEDIA_PAD_FL_SOURCE},
- .node = VIMC_ENT_NODE_SCALER,
+ .drv = "vimc-scaler",
},
{
.name = "RGB/YUV Capture",
- .pads_qty = 1,
- .pads_flag = (const unsigned long[]){MEDIA_PAD_FL_SINK},
- .node = VIMC_ENT_NODE_CAPTURE,
+ .drv = "vimc-capture",
},
};
@@ -197,314 +147,40 @@ static const struct vimc_pipeline_config pipe_cfg = {
/* -------------------------------------------------------------------------- */
-static const struct vimc_pix_map vimc_pix_map_list[] = {
- /* TODO: add all missing formats */
-
- /* RGB formats */
- {
- .code = MEDIA_BUS_FMT_BGR888_1X24,
- .pixelformat = V4L2_PIX_FMT_BGR24,
- .bpp = 3,
- },
- {
- .code = MEDIA_BUS_FMT_RGB888_1X24,
- .pixelformat = V4L2_PIX_FMT_RGB24,
- .bpp = 3,
- },
- {
- .code = MEDIA_BUS_FMT_ARGB8888_1X32,
- .pixelformat = V4L2_PIX_FMT_ARGB32,
- .bpp = 4,
- },
-
- /* Bayer formats */
- {
- .code = MEDIA_BUS_FMT_SBGGR8_1X8,
- .pixelformat = V4L2_PIX_FMT_SBGGR8,
- .bpp = 1,
- },
- {
- .code = MEDIA_BUS_FMT_SGBRG8_1X8,
- .pixelformat = V4L2_PIX_FMT_SGBRG8,
- .bpp = 1,
- },
- {
- .code = MEDIA_BUS_FMT_SGRBG8_1X8,
- .pixelformat = V4L2_PIX_FMT_SGRBG8,
- .bpp = 1,
- },
- {
- .code = MEDIA_BUS_FMT_SRGGB8_1X8,
- .pixelformat = V4L2_PIX_FMT_SRGGB8,
- .bpp = 1,
- },
- {
- .code = MEDIA_BUS_FMT_SBGGR10_1X10,
- .pixelformat = V4L2_PIX_FMT_SBGGR10,
- .bpp = 2,
- },
- {
- .code = MEDIA_BUS_FMT_SGBRG10_1X10,
- .pixelformat = V4L2_PIX_FMT_SGBRG10,
- .bpp = 2,
- },
- {
- .code = MEDIA_BUS_FMT_SGRBG10_1X10,
- .pixelformat = V4L2_PIX_FMT_SGRBG10,
- .bpp = 2,
- },
- {
- .code = MEDIA_BUS_FMT_SRGGB10_1X10,
- .pixelformat = V4L2_PIX_FMT_SRGGB10,
- .bpp = 2,
- },
-
- /* 10bit raw bayer a-law compressed to 8 bits */
- {
- .code = MEDIA_BUS_FMT_SBGGR10_ALAW8_1X8,
- .pixelformat = V4L2_PIX_FMT_SBGGR10ALAW8,
- .bpp = 1,
- },
- {
- .code = MEDIA_BUS_FMT_SGBRG10_ALAW8_1X8,
- .pixelformat = V4L2_PIX_FMT_SGBRG10ALAW8,
- .bpp = 1,
- },
- {
- .code = MEDIA_BUS_FMT_SGRBG10_ALAW8_1X8,
- .pixelformat = V4L2_PIX_FMT_SGRBG10ALAW8,
- .bpp = 1,
- },
- {
- .code = MEDIA_BUS_FMT_SRGGB10_ALAW8_1X8,
- .pixelformat = V4L2_PIX_FMT_SRGGB10ALAW8,
- .bpp = 1,
- },
-
- /* 10bit raw bayer DPCM compressed to 8 bits */
- {
- .code = MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
- .pixelformat = V4L2_PIX_FMT_SBGGR10DPCM8,
- .bpp = 1,
- },
- {
- .code = MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
- .pixelformat = V4L2_PIX_FMT_SGBRG10DPCM8,
- .bpp = 1,
- },
- {
- .code = MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
- .pixelformat = V4L2_PIX_FMT_SGRBG10DPCM8,
- .bpp = 1,
- },
- {
- .code = MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
- .pixelformat = V4L2_PIX_FMT_SRGGB10DPCM8,
- .bpp = 1,
- },
- {
- .code = MEDIA_BUS_FMT_SBGGR12_1X12,
- .pixelformat = V4L2_PIX_FMT_SBGGR12,
- .bpp = 2,
- },
- {
- .code = MEDIA_BUS_FMT_SGBRG12_1X12,
- .pixelformat = V4L2_PIX_FMT_SGBRG12,
- .bpp = 2,
- },
- {
- .code = MEDIA_BUS_FMT_SGRBG12_1X12,
- .pixelformat = V4L2_PIX_FMT_SGRBG12,
- .bpp = 2,
- },
- {
- .code = MEDIA_BUS_FMT_SRGGB12_1X12,
- .pixelformat = V4L2_PIX_FMT_SRGGB12,
- .bpp = 2,
- },
-};
-
-const struct vimc_pix_map *vimc_pix_map_by_code(u32 code)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(vimc_pix_map_list); i++) {
- if (vimc_pix_map_list[i].code == code)
- return &vimc_pix_map_list[i];
- }
- return NULL;
-}
-
-const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(vimc_pix_map_list); i++) {
- if (vimc_pix_map_list[i].pixelformat == pixelformat)
- return &vimc_pix_map_list[i];
- }
- return NULL;
-}
-
-int vimc_propagate_frame(struct media_pad *src, const void *frame)
-{
- struct media_link *link;
-
- if (!(src->flags & MEDIA_PAD_FL_SOURCE))
- return -EINVAL;
-
- /* Send this frame to all sink pads that are direct linked */
- list_for_each_entry(link, &src->entity->links, list) {
- if (link->source == src &&
- (link->flags & MEDIA_LNK_FL_ENABLED)) {
- struct vimc_ent_device *ved = NULL;
- struct media_entity *entity = link->sink->entity;
-
- if (is_media_entity_v4l2_subdev(entity)) {
- struct v4l2_subdev *sd =
- container_of(entity, struct v4l2_subdev,
- entity);
- ved = v4l2_get_subdevdata(sd);
- } else if (is_media_entity_v4l2_video_device(entity)) {
- struct video_device *vdev =
- container_of(entity,
- struct video_device,
- entity);
- ved = video_get_drvdata(vdev);
- }
- if (ved && ved->process_frame)
- ved->process_frame(ved, link->sink, frame);
- }
- }
-
- return 0;
-}
-
-static void vimc_device_unregister(struct vimc_device *vimc)
-{
- unsigned int i;
-
- media_device_unregister(&vimc->mdev);
- /* Cleanup (only initialized) entities */
- for (i = 0; i < vimc->pipe_cfg->num_ents; i++) {
- if (vimc->ved[i] && vimc->ved[i]->destroy)
- vimc->ved[i]->destroy(vimc->ved[i]);
-
- vimc->ved[i] = NULL;
- }
- v4l2_device_unregister(&vimc->v4l2_dev);
- media_device_cleanup(&vimc->mdev);
-}
-
-/* Helper function to allocate and initialize pads */
-struct media_pad *vimc_pads_init(u16 num_pads, const unsigned long *pads_flag)
+static int vimc_create_links(struct vimc_device *vimc)
{
- struct media_pad *pads;
unsigned int i;
-
- /* Allocate memory for the pads */
- pads = kcalloc(num_pads, sizeof(*pads), GFP_KERNEL);
- if (!pads)
- return ERR_PTR(-ENOMEM);
-
- /* Initialize the pads */
- for (i = 0; i < num_pads; i++) {
- pads[i].index = i;
- pads[i].flags = pads_flag[i];
- }
-
- return pads;
-}
-
-/*
- * TODO: remove this function when all the
- * entities specific code are implemented
- */
-static void vimc_raw_destroy(struct vimc_ent_device *ved)
-{
- media_device_unregister_entity(ved->ent);
-
- media_entity_cleanup(ved->ent);
-
- vimc_pads_cleanup(ved->pads);
-
- kfree(ved->ent);
-
- kfree(ved);
-}
-
-/*
- * TODO: remove this function when all the
- * entities specific code are implemented
- */
-static struct vimc_ent_device *vimc_raw_create(struct v4l2_device *v4l2_dev,
- const char *const name,
- u16 num_pads,
- const unsigned long *pads_flag)
-{
- struct vimc_ent_device *ved;
int ret;
- /* Allocate the main ved struct */
- ved = kzalloc(sizeof(*ved), GFP_KERNEL);
- if (!ved)
- return ERR_PTR(-ENOMEM);
-
- /* Allocate the media entity */
- ved->ent = kzalloc(sizeof(*ved->ent), GFP_KERNEL);
- if (!ved->ent) {
- ret = -ENOMEM;
- goto err_free_ved;
- }
-
- /* Allocate the pads */
- ved->pads = vimc_pads_init(num_pads, pads_flag);
- if (IS_ERR(ved->pads)) {
- ret = PTR_ERR(ved->pads);
- goto err_free_ent;
+ /* Initialize the links between entities */
+ for (i = 0; i < vimc->pipe_cfg->num_links; i++) {
+ const struct vimc_ent_link *link = &vimc->pipe_cfg->links[i];
+ /*
+ * TODO: Check another way of retrieving ved struct without
+ * relying on platform_get_drvdata
+ */
+ struct vimc_ent_device *ved_src =
+ platform_get_drvdata(vimc->subdevs[link->src_ent]);
+ struct vimc_ent_device *ved_sink =
+ platform_get_drvdata(vimc->subdevs[link->sink_ent]);
+
+ ret = media_create_pad_link(ved_src->ent, link->src_pad,
+ ved_sink->ent, link->sink_pad,
+ link->flags);
+ if (ret)
+ return ret;
}
- /* Initialize the media entity */
- ved->ent->name = name;
- ved->ent->function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
- ret = media_entity_pads_init(ved->ent, num_pads, ved->pads);
- if (ret)
- goto err_cleanup_pads;
-
- /* Register the media entity */
- ret = media_device_register_entity(v4l2_dev->mdev, ved->ent);
- if (ret)
- goto err_cleanup_entity;
-
- /* Fill out the destroy function and return */
- ved->destroy = vimc_raw_destroy;
- return ved;
-
-err_cleanup_entity:
- media_entity_cleanup(ved->ent);
-err_cleanup_pads:
- vimc_pads_cleanup(ved->pads);
-err_free_ent:
- kfree(ved->ent);
-err_free_ved:
- kfree(ved);
-
- return ERR_PTR(ret);
+ return 0;
}
-static int vimc_device_register(struct vimc_device *vimc)
+static int vimc_comp_bind(struct device *master)
{
- unsigned int i;
+ struct vimc_device *vimc = container_of(to_platform_device(master),
+ struct vimc_device, pdev);
int ret;
- /* Allocate memory for the vimc_ent_devices pointers */
- vimc->ved = devm_kcalloc(vimc->mdev.dev, vimc->pipe_cfg->num_ents,
- sizeof(*vimc->ved), GFP_KERNEL);
- if (!vimc->ved)
- return -ENOMEM;
-
- /* Link the media device within the v4l2_device */
- vimc->v4l2_dev.mdev = &vimc->mdev;
+ dev_dbg(master, "bind");
/* Register the v4l2 struct */
ret = v4l2_device_register(vimc->mdev.dev, &vimc->v4l2_dev);
@@ -514,66 +190,22 @@ static int vimc_device_register(struct vimc_device *vimc)
return ret;
}
- /* Initialize entities */
- for (i = 0; i < vimc->pipe_cfg->num_ents; i++) {
- struct vimc_ent_device *(*create_func)(struct v4l2_device *,
- const char *const,
- u16,
- const unsigned long *);
-
- /* Register the specific node */
- switch (vimc->pipe_cfg->ents[i].node) {
- case VIMC_ENT_NODE_SENSOR:
- create_func = vimc_sen_create;
- break;
-
- case VIMC_ENT_NODE_CAPTURE:
- create_func = vimc_cap_create;
- break;
-
- /* TODO: Instantiate the specific topology node */
- case VIMC_ENT_NODE_INPUT:
- case VIMC_ENT_NODE_DEBAYER:
- case VIMC_ENT_NODE_SCALER:
- default:
- /*
- * TODO: remove this when all the entities specific
- * code are implemented
- */
- create_func = vimc_raw_create;
- break;
- }
-
- vimc->ved[i] = create_func(&vimc->v4l2_dev,
- vimc->pipe_cfg->ents[i].name,
- vimc->pipe_cfg->ents[i].pads_qty,
- vimc->pipe_cfg->ents[i].pads_flag);
- if (IS_ERR(vimc->ved[i])) {
- ret = PTR_ERR(vimc->ved[i]);
- vimc->ved[i] = NULL;
- goto err;
- }
- }
-
- /* Initialize the links between entities */
- for (i = 0; i < vimc->pipe_cfg->num_links; i++) {
- const struct vimc_ent_link *link = &vimc->pipe_cfg->links[i];
+ /* Bind subdevices */
+ ret = component_bind_all(master, &vimc->v4l2_dev);
+ if (ret)
+ goto err_v4l2_unregister;
- ret = media_create_pad_link(vimc->ved[link->src_ent]->ent,
- link->src_pad,
- vimc->ved[link->sink_ent]->ent,
- link->sink_pad,
- link->flags);
- if (ret)
- goto err;
- }
+ /* Initialize links */
+ ret = vimc_create_links(vimc);
+ if (ret)
+ goto err_comp_unbind_all;
/* Register the media device */
ret = media_device_register(&vimc->mdev);
if (ret) {
dev_err(vimc->mdev.dev,
"media device register failed (err=%d)\n", ret);
- return ret;
+ goto err_comp_unbind_all;
}
/* Expose all subdev's nodes*/
@@ -582,32 +214,106 @@ static int vimc_device_register(struct vimc_device *vimc)
dev_err(vimc->mdev.dev,
"vimc subdev nodes registration failed (err=%d)\n",
ret);
- goto err;
+ goto err_mdev_unregister;
}
return 0;
-err:
- /* Destroy the so far created topology */
- vimc_device_unregister(vimc);
+err_mdev_unregister:
+ media_device_unregister(&vimc->mdev);
+err_comp_unbind_all:
+ component_unbind_all(master, NULL);
+err_v4l2_unregister:
+ v4l2_device_unregister(&vimc->v4l2_dev);
return ret;
}
+static void vimc_comp_unbind(struct device *master)
+{
+ struct vimc_device *vimc = container_of(to_platform_device(master),
+ struct vimc_device, pdev);
+
+ dev_dbg(master, "unbind");
+
+ media_device_unregister(&vimc->mdev);
+ component_unbind_all(master, NULL);
+ v4l2_device_unregister(&vimc->v4l2_dev);
+}
+
+static int vimc_comp_compare(struct device *comp, void *data)
+{
+ const struct platform_device *pdev = to_platform_device(comp);
+ const char *name = data;
+
+ return !strcmp(pdev->dev.platform_data, name);
+}
+
+static struct component_match *vimc_add_subdevs(struct vimc_device *vimc)
+{
+ struct component_match *match = NULL;
+ struct vimc_platform_data pdata;
+ int i;
+
+ for (i = 0; i < vimc->pipe_cfg->num_ents; i++) {
+ dev_dbg(&vimc->pdev.dev, "new pdev for %s\n",
+ vimc->pipe_cfg->ents[i].drv);
+
+ strlcpy(pdata.entity_name, vimc->pipe_cfg->ents[i].name,
+ sizeof(pdata.entity_name));
+
+ vimc->subdevs[i] = platform_device_register_data(&vimc->pdev.dev,
+ vimc->pipe_cfg->ents[i].drv,
+ PLATFORM_DEVID_AUTO,
+ &pdata,
+ sizeof(pdata));
+ if (!vimc->subdevs[i]) {
+ while (--i >= 0)
+ platform_device_unregister(vimc->subdevs[i]);
+
+ return ERR_PTR(-ENOMEM);
+ }
+
+ component_match_add(&vimc->pdev.dev, &match, vimc_comp_compare,
+ (void *)vimc->pipe_cfg->ents[i].name);
+ }
+
+ return match;
+}
+
+static void vimc_rm_subdevs(struct vimc_device *vimc)
+{
+ unsigned int i;
+
+ for (i = 0; i < vimc->pipe_cfg->num_ents; i++)
+ platform_device_unregister(vimc->subdevs[i]);
+}
+
+static const struct component_master_ops vimc_comp_ops = {
+ .bind = vimc_comp_bind,
+ .unbind = vimc_comp_unbind,
+};
+
static int vimc_probe(struct platform_device *pdev)
{
- struct vimc_device *vimc;
+ struct vimc_device *vimc = container_of(pdev, struct vimc_device, pdev);
+ struct component_match *match = NULL;
int ret;
- /* Prepare the vimc topology structure */
+ dev_dbg(&pdev->dev, "probe");
- /* Allocate memory for the vimc structure */
- vimc = kzalloc(sizeof(*vimc), GFP_KERNEL);
- if (!vimc)
+ /* Create platform_device for each entity in the topology*/
+ vimc->subdevs = devm_kcalloc(&vimc->pdev.dev, vimc->pipe_cfg->num_ents,
+ sizeof(*vimc->subdevs), GFP_KERNEL);
+ if (!vimc->subdevs)
return -ENOMEM;
- /* Set the pipeline configuration struct */
- vimc->pipe_cfg = &pipe_cfg;
+ match = vimc_add_subdevs(vimc);
+ if (IS_ERR(match))
+ return PTR_ERR(match);
+
+ /* Link the media device within the v4l2_device */
+ vimc->v4l2_dev.mdev = &vimc->mdev;
/* Initialize media device */
strlcpy(vimc->mdev.model, VIMC_MDEV_MODEL_NAME,
@@ -615,28 +321,27 @@ static int vimc_probe(struct platform_device *pdev)
vimc->mdev.dev = &pdev->dev;
media_device_init(&vimc->mdev);
- /* Create vimc topology */
- ret = vimc_device_register(vimc);
+ /* Add self to the component system */
+ ret = component_master_add_with_match(&pdev->dev, &vimc_comp_ops,
+ match);
if (ret) {
- dev_err(vimc->mdev.dev,
- "vimc device registration failed (err=%d)\n", ret);
+ media_device_cleanup(&vimc->mdev);
+ vimc_rm_subdevs(vimc);
kfree(vimc);
return ret;
}
- /* Link the topology object with the platform device object */
- platform_set_drvdata(pdev, vimc);
-
return 0;
}
static int vimc_remove(struct platform_device *pdev)
{
- struct vimc_device *vimc = platform_get_drvdata(pdev);
+ struct vimc_device *vimc = container_of(pdev, struct vimc_device, pdev);
+
+ dev_dbg(&pdev->dev, "remove");
- /* Destroy all the topology */
- vimc_device_unregister(vimc);
- kfree(vimc);
+ component_master_del(&pdev->dev, &vimc_comp_ops);
+ vimc_rm_subdevs(vimc);
return 0;
}
@@ -645,9 +350,12 @@ static void vimc_dev_release(struct device *dev)
{
}
-static struct platform_device vimc_pdev = {
- .name = VIMC_PDEV_NAME,
- .dev.release = vimc_dev_release,
+static struct vimc_device vimc_dev = {
+ .pipe_cfg = &pipe_cfg,
+ .pdev = {
+ .name = VIMC_PDEV_NAME,
+ .dev.release = vimc_dev_release,
+ }
};
static struct platform_driver vimc_pdrv = {
@@ -662,29 +370,29 @@ static int __init vimc_init(void)
{
int ret;
- ret = platform_device_register(&vimc_pdev);
+ ret = platform_device_register(&vimc_dev.pdev);
if (ret) {
- dev_err(&vimc_pdev.dev,
+ dev_err(&vimc_dev.pdev.dev,
"platform device registration failed (err=%d)\n", ret);
return ret;
}
ret = platform_driver_register(&vimc_pdrv);
if (ret) {
- dev_err(&vimc_pdev.dev,
+ dev_err(&vimc_dev.pdev.dev,
"platform driver registration failed (err=%d)\n", ret);
-
- platform_device_unregister(&vimc_pdev);
+ platform_driver_unregister(&vimc_pdrv);
+ return ret;
}
- return ret;
+ return 0;
}
static void __exit vimc_exit(void)
{
platform_driver_unregister(&vimc_pdrv);
- platform_device_unregister(&vimc_pdev);
+ platform_device_unregister(&vimc_dev.pdev);
}
module_init(vimc_init);
diff --git a/drivers/media/platform/vimc/vimc-core.h b/drivers/media/platform/vimc/vimc-core.h
deleted file mode 100644
index 4525d23211ca..000000000000
--- a/drivers/media/platform/vimc/vimc-core.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * vimc-core.h Virtual Media Controller Driver
- *
- * Copyright (C) 2015-2017 Helen Koike <[email protected]>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _VIMC_CORE_H_
-#define _VIMC_CORE_H_
-
-#include <linux/slab.h>
-#include <media/v4l2-device.h>
-
-/**
- * struct vimc_pix_map - maps media bus code with v4l2 pixel format
- *
- * @code: media bus format code defined by MEDIA_BUS_FMT_* macros
- * @bbp: number of bytes each pixel occupies
- * @pixelformat: pixel format devined by V4L2_PIX_FMT_* macros
- *
- * Struct which matches the MEDIA_BUS_FMT_* codes with the corresponding
- * V4L2_PIX_FMT_* fourcc pixelformat and its bytes per pixel (bpp)
- */
-struct vimc_pix_map {
- unsigned int code;
- unsigned int bpp;
- u32 pixelformat;
-};
-
-/**
- * struct vimc_ent_device - core struct that represents a node in the topology
- *
- * @ent: the pointer to struct media_entity for the node
- * @pads: the list of pads of the node
- * @destroy: callback to destroy the node
- * @process_frame: callback send a frame to that node
- *
- * Each node of the topology must create a vimc_ent_device struct. Depending on
- * the node it will be of an instance of v4l2_subdev or video_device struct
- * where both contains a struct media_entity.
- * Those structures should embedded the vimc_ent_device struct through
- * v4l2_set_subdevdata() and video_set_drvdata() respectivaly, allowing the
- * vimc_ent_device struct to be retrieved from the corresponding struct
- * media_entity
- */
-struct vimc_ent_device {
- struct media_entity *ent;
- struct media_pad *pads;
- void (*destroy)(struct vimc_ent_device *);
- void (*process_frame)(struct vimc_ent_device *ved,
- struct media_pad *sink, const void *frame);
-};
-
-/**
- * vimc_propagate_frame - propagate a frame through the topology
- *
- * @src: the source pad where the frame is being originated
- * @frame: the frame to be propagated
- *
- * This function will call the process_frame callback from the vimc_ent_device
- * struct of the nodes directly connected to the @src pad
- */
-int vimc_propagate_frame(struct media_pad *src, const void *frame);
-
-/**
- * vimc_pads_init - initialize pads
- *
- * @num_pads: number of pads to initialize
- * @pads_flags: flags to use in each pad
- *
- * Helper functions to allocate/initialize pads
- */
-struct media_pad *vimc_pads_init(u16 num_pads,
- const unsigned long *pads_flag);
-
-/**
- * vimc_pads_cleanup - free pads
- *
- * @pads: pointer to the pads
- *
- * Helper function to free the pads initialized with vimc_pads_init
- */
-static inline void vimc_pads_cleanup(struct media_pad *pads)
-{
- kfree(pads);
-}
-
-/**
- * vimc_pix_map_by_code - get vimc_pix_map struct by media bus code
- *
- * @code: media bus format code defined by MEDIA_BUS_FMT_* macros
- */
-const struct vimc_pix_map *vimc_pix_map_by_code(u32 code);
-
-/**
- * vimc_pix_map_by_pixelformat - get vimc_pix_map struct by v4l2 pixel format
- *
- * @pixelformat: pixel format devined by V4L2_PIX_FMT_* macros
- */
-const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat);
-
-#endif
diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c
new file mode 100644
index 000000000000..35b15bd4d61d
--- /dev/null
+++ b/drivers/media/platform/vimc/vimc-debayer.c
@@ -0,0 +1,601 @@
+/*
+ * vimc-debayer.c Virtual Media Controller Driver
+ *
+ * Copyright (C) 2015-2017 Helen Koike <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/vmalloc.h>
+#include <linux/v4l2-mediabus.h>
+#include <media/v4l2-subdev.h>
+
+#include "vimc-common.h"
+
+#define VIMC_DEB_DRV_NAME "vimc-debayer"
+
+static unsigned int deb_mean_win_size = 3;
+module_param(deb_mean_win_size, uint, 0000);
+MODULE_PARM_DESC(deb_mean_win_size, " the window size to calculate the mean.\n"
+ "NOTE: the window size need to be an odd number, as the main pixel "
+ "stays in the center of the window, otherwise the next odd number "
+ "is considered");
+
+#define IS_SINK(pad) (!pad)
+#define IS_SRC(pad) (pad)
+
+enum vimc_deb_rgb_colors {
+ VIMC_DEB_RED = 0,
+ VIMC_DEB_GREEN = 1,
+ VIMC_DEB_BLUE = 2,
+};
+
+struct vimc_deb_pix_map {
+ u32 code;
+ enum vimc_deb_rgb_colors order[2][2];
+};
+
+struct vimc_deb_device {
+ struct vimc_ent_device ved;
+ struct v4l2_subdev sd;
+ struct device *dev;
+ /* The active format */
+ struct v4l2_mbus_framefmt sink_fmt;
+ u32 src_code;
+ void (*set_rgb_src)(struct vimc_deb_device *vdeb, unsigned int lin,
+ unsigned int col, unsigned int rgb[3]);
+ /* Values calculated when the stream starts */
+ u8 *src_frame;
+ const struct vimc_deb_pix_map *sink_pix_map;
+ unsigned int sink_bpp;
+};
+
+static const struct v4l2_mbus_framefmt sink_fmt_default = {
+ .width = 640,
+ .height = 480,
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_DEFAULT,
+};
+
+static const struct vimc_deb_pix_map vimc_deb_pix_map_list[] = {
+ {
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .order = { { VIMC_DEB_BLUE, VIMC_DEB_GREEN },
+ { VIMC_DEB_GREEN, VIMC_DEB_RED } }
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .order = { { VIMC_DEB_GREEN, VIMC_DEB_BLUE },
+ { VIMC_DEB_RED, VIMC_DEB_GREEN } }
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .order = { { VIMC_DEB_GREEN, VIMC_DEB_RED },
+ { VIMC_DEB_BLUE, VIMC_DEB_GREEN } }
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .order = { { VIMC_DEB_RED, VIMC_DEB_GREEN },
+ { VIMC_DEB_GREEN, VIMC_DEB_BLUE } }
+ },
+ {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .order = { { VIMC_DEB_BLUE, VIMC_DEB_GREEN },
+ { VIMC_DEB_GREEN, VIMC_DEB_RED } }
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .order = { { VIMC_DEB_GREEN, VIMC_DEB_BLUE },
+ { VIMC_DEB_RED, VIMC_DEB_GREEN } }
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .order = { { VIMC_DEB_GREEN, VIMC_DEB_RED },
+ { VIMC_DEB_BLUE, VIMC_DEB_GREEN } }
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .order = { { VIMC_DEB_RED, VIMC_DEB_GREEN },
+ { VIMC_DEB_GREEN, VIMC_DEB_BLUE } }
+ },
+ {
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .order = { { VIMC_DEB_BLUE, VIMC_DEB_GREEN },
+ { VIMC_DEB_GREEN, VIMC_DEB_RED } }
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .order = { { VIMC_DEB_GREEN, VIMC_DEB_BLUE },
+ { VIMC_DEB_RED, VIMC_DEB_GREEN } }
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .order = { { VIMC_DEB_GREEN, VIMC_DEB_RED },
+ { VIMC_DEB_BLUE, VIMC_DEB_GREEN } }
+ },
+ {
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .order = { { VIMC_DEB_RED, VIMC_DEB_GREEN },
+ { VIMC_DEB_GREEN, VIMC_DEB_BLUE } }
+ },
+};
+
+static const struct vimc_deb_pix_map *vimc_deb_pix_map_by_code(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vimc_deb_pix_map_list); i++)
+ if (vimc_deb_pix_map_list[i].code == code)
+ return &vimc_deb_pix_map_list[i];
+
+ return NULL;
+}
+
+static int vimc_deb_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf;
+ unsigned int i;
+
+ mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ *mf = sink_fmt_default;
+
+ for (i = 1; i < sd->entity.num_pads; i++) {
+ mf = v4l2_subdev_get_try_format(sd, cfg, i);
+ *mf = sink_fmt_default;
+ mf->code = vdeb->src_code;
+ }
+
+ return 0;
+}
+
+static int vimc_deb_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ /* We only support one format for source pads */
+ if (IS_SRC(code->pad)) {
+ struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
+
+ if (code->index)
+ return -EINVAL;
+
+ code->code = vdeb->src_code;
+ } else {
+ if (code->index >= ARRAY_SIZE(vimc_deb_pix_map_list))
+ return -EINVAL;
+
+ code->code = vimc_deb_pix_map_list[code->index].code;
+ }
+
+ return 0;
+}
+
+static int vimc_deb_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
+
+ if (fse->index)
+ return -EINVAL;
+
+ if (IS_SINK(fse->pad)) {
+ const struct vimc_deb_pix_map *vpix =
+ vimc_deb_pix_map_by_code(fse->code);
+
+ if (!vpix)
+ return -EINVAL;
+ } else if (fse->code != vdeb->src_code) {
+ return -EINVAL;
+ }
+
+ fse->min_width = VIMC_FRAME_MIN_WIDTH;
+ fse->max_width = VIMC_FRAME_MAX_WIDTH;
+ fse->min_height = VIMC_FRAME_MIN_HEIGHT;
+ fse->max_height = VIMC_FRAME_MAX_HEIGHT;
+
+ return 0;
+}
+
+static int vimc_deb_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
+
+ /* Get the current sink format */
+ fmt->format = fmt->which == V4L2_SUBDEV_FORMAT_TRY ?
+ *v4l2_subdev_get_try_format(sd, cfg, 0) :
+ vdeb->sink_fmt;
+
+ /* Set the right code for the source pad */
+ if (IS_SRC(fmt->pad))
+ fmt->format.code = vdeb->src_code;
+
+ return 0;
+}
+
+static void vimc_deb_adjust_sink_fmt(struct v4l2_mbus_framefmt *fmt)
+{
+ const struct vimc_deb_pix_map *vpix;
+
+ /* Don't accept a code that is not on the debayer table */
+ vpix = vimc_deb_pix_map_by_code(fmt->code);
+ if (!vpix)
+ fmt->code = sink_fmt_default.code;
+
+ fmt->width = clamp_t(u32, fmt->width, VIMC_FRAME_MIN_WIDTH,
+ VIMC_FRAME_MAX_WIDTH) & ~1;
+ fmt->height = clamp_t(u32, fmt->height, VIMC_FRAME_MIN_HEIGHT,
+ VIMC_FRAME_MAX_HEIGHT) & ~1;
+
+ if (fmt->field == V4L2_FIELD_ANY)
+ fmt->field = sink_fmt_default.field;
+
+ vimc_colorimetry_clamp(fmt);
+}
+
+static int vimc_deb_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *sink_fmt;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ /* Do not change the format while stream is on */
+ if (vdeb->src_frame)
+ return -EBUSY;
+
+ sink_fmt = &vdeb->sink_fmt;
+ } else {
+ sink_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ }
+
+ /*
+ * Do not change the format of the source pad,
+ * it is propagated from the sink
+ */
+ if (IS_SRC(fmt->pad)) {
+ fmt->format = *sink_fmt;
+ /* TODO: Add support for other formats */
+ fmt->format.code = vdeb->src_code;
+ } else {
+ /* Set the new format in the sink pad */
+ vimc_deb_adjust_sink_fmt(&fmt->format);
+
+ dev_dbg(vdeb->dev, "%s: sink format update: "
+ "old:%dx%d (0x%x, %d, %d, %d, %d) "
+ "new:%dx%d (0x%x, %d, %d, %d, %d)\n", vdeb->sd.name,
+ /* old */
+ sink_fmt->width, sink_fmt->height, sink_fmt->code,
+ sink_fmt->colorspace, sink_fmt->quantization,
+ sink_fmt->xfer_func, sink_fmt->ycbcr_enc,
+ /* new */
+ fmt->format.width, fmt->format.height, fmt->format.code,
+ fmt->format.colorspace, fmt->format.quantization,
+ fmt->format.xfer_func, fmt->format.ycbcr_enc);
+
+ *sink_fmt = fmt->format;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops vimc_deb_pad_ops = {
+ .init_cfg = vimc_deb_init_cfg,
+ .enum_mbus_code = vimc_deb_enum_mbus_code,
+ .enum_frame_size = vimc_deb_enum_frame_size,
+ .get_fmt = vimc_deb_get_fmt,
+ .set_fmt = vimc_deb_set_fmt,
+};
+
+static void vimc_deb_set_rgb_mbus_fmt_rgb888_1x24(struct vimc_deb_device *vdeb,
+ unsigned int lin,
+ unsigned int col,
+ unsigned int rgb[3])
+{
+ unsigned int i, index;
+
+ index = VIMC_FRAME_INDEX(lin, col, vdeb->sink_fmt.width, 3);
+ for (i = 0; i < 3; i++)
+ vdeb->src_frame[index + i] = rgb[i];
+}
+
+static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (enable) {
+ const struct vimc_pix_map *vpix;
+ unsigned int frame_size;
+
+ if (vdeb->src_frame)
+ return 0;
+
+ /* Calculate the frame size of the source pad */
+ vpix = vimc_pix_map_by_code(vdeb->src_code);
+ frame_size = vdeb->sink_fmt.width * vdeb->sink_fmt.height *
+ vpix->bpp;
+
+ /* Save the bytes per pixel of the sink */
+ vpix = vimc_pix_map_by_code(vdeb->sink_fmt.code);
+ vdeb->sink_bpp = vpix->bpp;
+
+ /* Get the corresponding pixel map from the table */
+ vdeb->sink_pix_map =
+ vimc_deb_pix_map_by_code(vdeb->sink_fmt.code);
+
+ /*
+ * Allocate the frame buffer. Use vmalloc to be able to
+ * allocate a large amount of memory
+ */
+ vdeb->src_frame = vmalloc(frame_size);
+ if (!vdeb->src_frame)
+ return -ENOMEM;
+
+ /* Turn the stream on in the subdevices directly connected */
+ ret = vimc_pipeline_s_stream(&vdeb->sd.entity, 1);
+ if (ret) {
+ vfree(vdeb->src_frame);
+ vdeb->src_frame = NULL;
+ return ret;
+ }
+ } else {
+ if (!vdeb->src_frame)
+ return 0;
+
+ /* Disable streaming from the pipe */
+ ret = vimc_pipeline_s_stream(&vdeb->sd.entity, 0);
+ if (ret)
+ return ret;
+
+ vfree(vdeb->src_frame);
+ vdeb->src_frame = NULL;
+ }
+
+ return 0;
+}
+
+static struct v4l2_subdev_video_ops vimc_deb_video_ops = {
+ .s_stream = vimc_deb_s_stream,
+};
+
+static const struct v4l2_subdev_ops vimc_deb_ops = {
+ .pad = &vimc_deb_pad_ops,
+ .video = &vimc_deb_video_ops,
+};
+
+static unsigned int vimc_deb_get_val(const u8 *bytes,
+ const unsigned int n_bytes)
+{
+ unsigned int i;
+ unsigned int acc = 0;
+
+ for (i = 0; i < n_bytes; i++)
+ acc = acc + (bytes[i] << (8 * i));
+
+ return acc;
+}
+
+static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb,
+ const u8 *frame,
+ const unsigned int lin,
+ const unsigned int col,
+ unsigned int rgb[3])
+{
+ unsigned int i, seek, wlin, wcol;
+ unsigned int n_rgb[3] = {0, 0, 0};
+
+ for (i = 0; i < 3; i++)
+ rgb[i] = 0;
+
+ /*
+ * Calculate how many we need to subtract to get to the pixel in
+ * the top left corner of the mean window (considering the current
+ * pixel as the center)
+ */
+ seek = deb_mean_win_size / 2;
+
+ /* Sum the values of the colors in the mean window */
+
+ dev_dbg(vdeb->dev,
+ "deb: %s: --- Calc pixel %dx%d, window mean %d, seek %d ---\n",
+ vdeb->sd.name, lin, col, vdeb->sink_fmt.height, seek);
+
+ /*
+ * Iterate through all the lines in the mean window, start
+ * with zero if the pixel is outside the frame and don't pass
+ * the height when the pixel is in the bottom border of the
+ * frame
+ */
+ for (wlin = seek > lin ? 0 : lin - seek;
+ wlin < lin + seek + 1 && wlin < vdeb->sink_fmt.height;
+ wlin++) {
+
+ /*
+ * Iterate through all the columns in the mean window, start
+ * with zero if the pixel is outside the frame and don't pass
+ * the width when the pixel is in the right border of the
+ * frame
+ */
+ for (wcol = seek > col ? 0 : col - seek;
+ wcol < col + seek + 1 && wcol < vdeb->sink_fmt.width;
+ wcol++) {
+ enum vimc_deb_rgb_colors color;
+ unsigned int index;
+
+ /* Check which color this pixel is */
+ color = vdeb->sink_pix_map->order[wlin % 2][wcol % 2];
+
+ index = VIMC_FRAME_INDEX(wlin, wcol,
+ vdeb->sink_fmt.width,
+ vdeb->sink_bpp);
+
+ dev_dbg(vdeb->dev,
+ "deb: %s: RGB CALC: frame index %d, win pos %dx%d, color %d\n",
+ vdeb->sd.name, index, wlin, wcol, color);
+
+ /* Get its value */
+ rgb[color] = rgb[color] +
+ vimc_deb_get_val(&frame[index], vdeb->sink_bpp);
+
+ /* Save how many values we already added */
+ n_rgb[color]++;
+
+ dev_dbg(vdeb->dev, "deb: %s: RGB CALC: val %d, n %d\n",
+ vdeb->sd.name, rgb[color], n_rgb[color]);
+ }
+ }
+
+ /* Calculate the mean */
+ for (i = 0; i < 3; i++) {
+ dev_dbg(vdeb->dev,
+ "deb: %s: PRE CALC: %dx%d Color %d, val %d, n %d\n",
+ vdeb->sd.name, lin, col, i, rgb[i], n_rgb[i]);
+
+ if (n_rgb[i])
+ rgb[i] = rgb[i] / n_rgb[i];
+
+ dev_dbg(vdeb->dev,
+ "deb: %s: FINAL CALC: %dx%d Color %d, val %d\n",
+ vdeb->sd.name, lin, col, i, rgb[i]);
+ }
+}
+
+static void vimc_deb_process_frame(struct vimc_ent_device *ved,
+ struct media_pad *sink,
+ const void *sink_frame)
+{
+ struct vimc_deb_device *vdeb = container_of(ved, struct vimc_deb_device,
+ ved);
+ unsigned int rgb[3];
+ unsigned int i, j;
+
+ /* If the stream in this node is not active, just return */
+ if (!vdeb->src_frame)
+ return;
+
+ for (i = 0; i < vdeb->sink_fmt.height; i++)
+ for (j = 0; j < vdeb->sink_fmt.width; j++) {
+ vimc_deb_calc_rgb_sink(vdeb, sink_frame, i, j, rgb);
+ vdeb->set_rgb_src(vdeb, i, j, rgb);
+ }
+
+ /* Propagate the frame through all source pads */
+ for (i = 1; i < vdeb->sd.entity.num_pads; i++) {
+ struct media_pad *pad = &vdeb->sd.entity.pads[i];
+
+ vimc_propagate_frame(pad, vdeb->src_frame);
+ }
+}
+
+static void vimc_deb_comp_unbind(struct device *comp, struct device *master,
+ void *master_data)
+{
+ struct vimc_ent_device *ved = dev_get_drvdata(comp);
+ struct vimc_deb_device *vdeb = container_of(ved, struct vimc_deb_device,
+ ved);
+
+ vimc_ent_sd_unregister(ved, &vdeb->sd);
+ kfree(vdeb);
+}
+
+static int vimc_deb_comp_bind(struct device *comp, struct device *master,
+ void *master_data)
+{
+ struct v4l2_device *v4l2_dev = master_data;
+ struct vimc_platform_data *pdata = comp->platform_data;
+ struct vimc_deb_device *vdeb;
+ int ret;
+
+ /* Allocate the vdeb struct */
+ vdeb = kzalloc(sizeof(*vdeb), GFP_KERNEL);
+ if (!vdeb)
+ return -ENOMEM;
+
+ /* Initialize ved and sd */
+ ret = vimc_ent_sd_register(&vdeb->ved, &vdeb->sd, v4l2_dev,
+ pdata->entity_name,
+ MEDIA_ENT_F_ATV_DECODER, 2,
+ (const unsigned long[2]) {MEDIA_PAD_FL_SINK,
+ MEDIA_PAD_FL_SOURCE},
+ &vimc_deb_ops);
+ if (ret) {
+ kfree(vdeb);
+ return ret;
+ }
+
+ vdeb->ved.process_frame = vimc_deb_process_frame;
+ dev_set_drvdata(comp, &vdeb->ved);
+ vdeb->dev = comp;
+
+ /* Initialize the frame format */
+ vdeb->sink_fmt = sink_fmt_default;
+ /*
+ * TODO: Add support for more output formats, we only support
+ * RGB888 for now
+ * NOTE: the src format is always the same as the sink, except
+ * for the code
+ */
+ vdeb->src_code = MEDIA_BUS_FMT_RGB888_1X24;
+ vdeb->set_rgb_src = vimc_deb_set_rgb_mbus_fmt_rgb888_1x24;
+
+ return 0;
+}
+
+static const struct component_ops vimc_deb_comp_ops = {
+ .bind = vimc_deb_comp_bind,
+ .unbind = vimc_deb_comp_unbind,
+};
+
+static int vimc_deb_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &vimc_deb_comp_ops);
+}
+
+static int vimc_deb_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &vimc_deb_comp_ops);
+
+ return 0;
+}
+
+static struct platform_driver vimc_deb_pdrv = {
+ .probe = vimc_deb_probe,
+ .remove = vimc_deb_remove,
+ .driver = {
+ .name = VIMC_DEB_DRV_NAME,
+ },
+};
+
+static const struct platform_device_id vimc_deb_driver_ids[] = {
+ {
+ .name = VIMC_DEB_DRV_NAME,
+ },
+ { }
+};
+
+module_platform_driver(vimc_deb_pdrv);
+
+MODULE_DEVICE_TABLE(platform, vimc_deb_driver_ids);
+
+MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Debayer");
+MODULE_AUTHOR("Helen Mae Koike Fornazier <[email protected]>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c
new file mode 100644
index 000000000000..fe77505d2679
--- /dev/null
+++ b/drivers/media/platform/vimc/vimc-scaler.c
@@ -0,0 +1,455 @@
+/*
+ * vimc-scaler.c Virtual Media Controller Driver
+ *
+ * Copyright (C) 2015-2017 Helen Koike <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/vmalloc.h>
+#include <linux/v4l2-mediabus.h>
+#include <media/v4l2-subdev.h>
+
+#include "vimc-common.h"
+
+#define VIMC_SCA_DRV_NAME "vimc-scaler"
+
+static unsigned int sca_mult = 3;
+module_param(sca_mult, uint, 0000);
+MODULE_PARM_DESC(sca_mult, " the image size multiplier");
+
+#define IS_SINK(pad) (!pad)
+#define IS_SRC(pad) (pad)
+#define MAX_ZOOM 8
+
+struct vimc_sca_device {
+ struct vimc_ent_device ved;
+ struct v4l2_subdev sd;
+ struct device *dev;
+ /* NOTE: the source fmt is the same as the sink
+ * with the width and hight multiplied by mult
+ */
+ struct v4l2_mbus_framefmt sink_fmt;
+ /* Values calculated when the stream starts */
+ u8 *src_frame;
+ unsigned int src_line_size;
+ unsigned int bpp;
+};
+
+static const struct v4l2_mbus_framefmt sink_fmt_default = {
+ .width = 640,
+ .height = 480,
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_DEFAULT,
+};
+
+static int vimc_sca_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ struct v4l2_mbus_framefmt *mf;
+ unsigned int i;
+
+ mf = v4l2_subdev_get_try_format(sd, cfg, 0);
+ *mf = sink_fmt_default;
+
+ for (i = 1; i < sd->entity.num_pads; i++) {
+ mf = v4l2_subdev_get_try_format(sd, cfg, i);
+ *mf = sink_fmt_default;
+ mf->width = mf->width * sca_mult;
+ mf->height = mf->height * sca_mult;
+ }
+
+ return 0;
+}
+
+static int vimc_sca_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ const struct vimc_pix_map *vpix = vimc_pix_map_by_index(code->index);
+
+ /* We don't support bayer format */
+ if (!vpix || vpix->bayer)
+ return -EINVAL;
+
+ code->code = vpix->code;
+
+ return 0;
+}
+
+static int vimc_sca_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ const struct vimc_pix_map *vpix;
+
+ if (fse->index)
+ return -EINVAL;
+
+ /* Only accept code in the pix map table in non bayer format */
+ vpix = vimc_pix_map_by_code(fse->code);
+ if (!vpix || vpix->bayer)
+ return -EINVAL;
+
+ fse->min_width = VIMC_FRAME_MIN_WIDTH;
+ fse->min_height = VIMC_FRAME_MIN_HEIGHT;
+
+ if (IS_SINK(fse->pad)) {
+ fse->max_width = VIMC_FRAME_MAX_WIDTH;
+ fse->max_height = VIMC_FRAME_MAX_HEIGHT;
+ } else {
+ fse->max_width = VIMC_FRAME_MAX_WIDTH * MAX_ZOOM;
+ fse->max_height = VIMC_FRAME_MAX_HEIGHT * MAX_ZOOM;
+ }
+
+ return 0;
+}
+
+static int vimc_sca_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
+
+ /* Get the current sink format */
+ format->format = (format->which == V4L2_SUBDEV_FORMAT_TRY) ?
+ *v4l2_subdev_get_try_format(sd, cfg, 0) :
+ vsca->sink_fmt;
+
+ /* Scale the frame size for the source pad */
+ if (IS_SRC(format->pad)) {
+ format->format.width = vsca->sink_fmt.width * sca_mult;
+ format->format.height = vsca->sink_fmt.height * sca_mult;
+ }
+
+ return 0;
+}
+
+static void vimc_sca_adjust_sink_fmt(struct v4l2_mbus_framefmt *fmt)
+{
+ const struct vimc_pix_map *vpix;
+
+ /* Only accept code in the pix map table in non bayer format */
+ vpix = vimc_pix_map_by_code(fmt->code);
+ if (!vpix || vpix->bayer)
+ fmt->code = sink_fmt_default.code;
+
+ fmt->width = clamp_t(u32, fmt->width, VIMC_FRAME_MIN_WIDTH,
+ VIMC_FRAME_MAX_WIDTH) & ~1;
+ fmt->height = clamp_t(u32, fmt->height, VIMC_FRAME_MIN_HEIGHT,
+ VIMC_FRAME_MAX_HEIGHT) & ~1;
+
+ if (fmt->field == V4L2_FIELD_ANY)
+ fmt->field = sink_fmt_default.field;
+
+ vimc_colorimetry_clamp(fmt);
+}
+
+static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *sink_fmt;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ /* Do not change the format while stream is on */
+ if (vsca->src_frame)
+ return -EBUSY;
+
+ sink_fmt = &vsca->sink_fmt;
+ } else {
+ sink_fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ }
+
+ /*
+ * Do not change the format of the source pad,
+ * it is propagated from the sink
+ */
+ if (IS_SRC(fmt->pad)) {
+ fmt->format = *sink_fmt;
+ fmt->format.width = sink_fmt->width * sca_mult;
+ fmt->format.height = sink_fmt->height * sca_mult;
+ } else {
+ /* Set the new format in the sink pad */
+ vimc_sca_adjust_sink_fmt(&fmt->format);
+
+ dev_dbg(vsca->dev, "%s: sink format update: "
+ "old:%dx%d (0x%x, %d, %d, %d, %d) "
+ "new:%dx%d (0x%x, %d, %d, %d, %d)\n", vsca->sd.name,
+ /* old */
+ sink_fmt->width, sink_fmt->height, sink_fmt->code,
+ sink_fmt->colorspace, sink_fmt->quantization,
+ sink_fmt->xfer_func, sink_fmt->ycbcr_enc,
+ /* new */
+ fmt->format.width, fmt->format.height, fmt->format.code,
+ fmt->format.colorspace, fmt->format.quantization,
+ fmt->format.xfer_func, fmt->format.ycbcr_enc);
+
+ *sink_fmt = fmt->format;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops vimc_sca_pad_ops = {
+ .init_cfg = vimc_sca_init_cfg,
+ .enum_mbus_code = vimc_sca_enum_mbus_code,
+ .enum_frame_size = vimc_sca_enum_frame_size,
+ .get_fmt = vimc_sca_get_fmt,
+ .set_fmt = vimc_sca_set_fmt,
+};
+
+static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
+ int ret;
+
+ if (enable) {
+ const struct vimc_pix_map *vpix;
+ unsigned int frame_size;
+
+ if (vsca->src_frame)
+ return 0;
+
+ /* Save the bytes per pixel of the sink */
+ vpix = vimc_pix_map_by_code(vsca->sink_fmt.code);
+ vsca->bpp = vpix->bpp;
+
+ /* Calculate the width in bytes of the src frame */
+ vsca->src_line_size = vsca->sink_fmt.width *
+ sca_mult * vsca->bpp;
+
+ /* Calculate the frame size of the source pad */
+ frame_size = vsca->src_line_size * vsca->sink_fmt.height *
+ sca_mult;
+
+ /* Allocate the frame buffer. Use vmalloc to be able to
+ * allocate a large amount of memory
+ */
+ vsca->src_frame = vmalloc(frame_size);
+ if (!vsca->src_frame)
+ return -ENOMEM;
+
+ /* Turn the stream on in the subdevices directly connected */
+ ret = vimc_pipeline_s_stream(&vsca->sd.entity, 1);
+ if (ret) {
+ vfree(vsca->src_frame);
+ vsca->src_frame = NULL;
+ return ret;
+ }
+ } else {
+ if (!vsca->src_frame)
+ return 0;
+
+ /* Disable streaming from the pipe */
+ ret = vimc_pipeline_s_stream(&vsca->sd.entity, 0);
+ if (ret)
+ return ret;
+
+ vfree(vsca->src_frame);
+ vsca->src_frame = NULL;
+ }
+
+ return 0;
+}
+
+static struct v4l2_subdev_video_ops vimc_sca_video_ops = {
+ .s_stream = vimc_sca_s_stream,
+};
+
+static const struct v4l2_subdev_ops vimc_sca_ops = {
+ .pad = &vimc_sca_pad_ops,
+ .video = &vimc_sca_video_ops,
+};
+
+static void vimc_sca_fill_pix(u8 *const ptr,
+ const u8 *const pixel,
+ const unsigned int bpp)
+{
+ unsigned int i;
+
+ /* copy the pixel to the pointer */
+ for (i = 0; i < bpp; i++)
+ ptr[i] = pixel[i];
+}
+
+static void vimc_sca_scale_pix(const struct vimc_sca_device *const vsca,
+ const unsigned int lin, const unsigned int col,
+ const u8 *const sink_frame)
+{
+ unsigned int i, j, index;
+ const u8 *pixel;
+
+ /* Point to the pixel value in position (lin, col) in the sink frame */
+ index = VIMC_FRAME_INDEX(lin, col,
+ vsca->sink_fmt.width,
+ vsca->bpp);
+ pixel = &sink_frame[index];
+
+ dev_dbg(vsca->dev,
+ "sca: %s: --- scale_pix sink pos %dx%d, index %d ---\n",
+ vsca->sd.name, lin, col, index);
+
+ /* point to the place we are going to put the first pixel
+ * in the scaled src frame
+ */
+ index = VIMC_FRAME_INDEX(lin * sca_mult, col * sca_mult,
+ vsca->sink_fmt.width * sca_mult, vsca->bpp);
+
+ dev_dbg(vsca->dev, "sca: %s: scale_pix src pos %dx%d, index %d\n",
+ vsca->sd.name, lin * sca_mult, col * sca_mult, index);
+
+ /* Repeat this pixel mult times */
+ for (i = 0; i < sca_mult; i++) {
+ /* Iterate through each beginning of a
+ * pixel repetition in a line
+ */
+ for (j = 0; j < sca_mult * vsca->bpp; j += vsca->bpp) {
+ dev_dbg(vsca->dev,
+ "sca: %s: sca: scale_pix src pos %d\n",
+ vsca->sd.name, index + j);
+
+ /* copy the pixel to the position index + j */
+ vimc_sca_fill_pix(&vsca->src_frame[index + j],
+ pixel, vsca->bpp);
+ }
+
+ /* move the index to the next line */
+ index += vsca->src_line_size;
+ }
+}
+
+static void vimc_sca_fill_src_frame(const struct vimc_sca_device *const vsca,
+ const u8 *const sink_frame)
+{
+ unsigned int i, j;
+
+ /* Scale each pixel from the original sink frame */
+ /* TODO: implement scale down, only scale up is supported for now */
+ for (i = 0; i < vsca->sink_fmt.height; i++)
+ for (j = 0; j < vsca->sink_fmt.width; j++)
+ vimc_sca_scale_pix(vsca, i, j, sink_frame);
+}
+
+static void vimc_sca_process_frame(struct vimc_ent_device *ved,
+ struct media_pad *sink,
+ const void *sink_frame)
+{
+ struct vimc_sca_device *vsca = container_of(ved, struct vimc_sca_device,
+ ved);
+ unsigned int i;
+
+ /* If the stream in this node is not active, just return */
+ if (!vsca->src_frame)
+ return;
+
+ vimc_sca_fill_src_frame(vsca, sink_frame);
+
+ /* Propagate the frame through all source pads */
+ for (i = 1; i < vsca->sd.entity.num_pads; i++) {
+ struct media_pad *pad = &vsca->sd.entity.pads[i];
+
+ vimc_propagate_frame(pad, vsca->src_frame);
+ }
+};
+
+static void vimc_sca_comp_unbind(struct device *comp, struct device *master,
+ void *master_data)
+{
+ struct vimc_ent_device *ved = dev_get_drvdata(comp);
+ struct vimc_sca_device *vsca = container_of(ved, struct vimc_sca_device,
+ ved);
+
+ vimc_ent_sd_unregister(ved, &vsca->sd);
+ kfree(vsca);
+}
+
+
+static int vimc_sca_comp_bind(struct device *comp, struct device *master,
+ void *master_data)
+{
+ struct v4l2_device *v4l2_dev = master_data;
+ struct vimc_platform_data *pdata = comp->platform_data;
+ struct vimc_sca_device *vsca;
+ int ret;
+
+ /* Allocate the vsca struct */
+ vsca = kzalloc(sizeof(*vsca), GFP_KERNEL);
+ if (!vsca)
+ return -ENOMEM;
+
+ /* Initialize ved and sd */
+ ret = vimc_ent_sd_register(&vsca->ved, &vsca->sd, v4l2_dev,
+ pdata->entity_name,
+ MEDIA_ENT_F_ATV_DECODER, 2,
+ (const unsigned long[2]) {MEDIA_PAD_FL_SINK,
+ MEDIA_PAD_FL_SOURCE},
+ &vimc_sca_ops);
+ if (ret) {
+ kfree(vsca);
+ return ret;
+ }
+
+ vsca->ved.process_frame = vimc_sca_process_frame;
+ dev_set_drvdata(comp, &vsca->ved);
+ vsca->dev = comp;
+
+ /* Initialize the frame format */
+ vsca->sink_fmt = sink_fmt_default;
+
+ return 0;
+}
+
+static const struct component_ops vimc_sca_comp_ops = {
+ .bind = vimc_sca_comp_bind,
+ .unbind = vimc_sca_comp_unbind,
+};
+
+static int vimc_sca_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &vimc_sca_comp_ops);
+}
+
+static int vimc_sca_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &vimc_sca_comp_ops);
+
+ return 0;
+}
+
+static struct platform_driver vimc_sca_pdrv = {
+ .probe = vimc_sca_probe,
+ .remove = vimc_sca_remove,
+ .driver = {
+ .name = VIMC_SCA_DRV_NAME,
+ },
+};
+
+static const struct platform_device_id vimc_sca_driver_ids[] = {
+ {
+ .name = VIMC_SCA_DRV_NAME,
+ },
+ { }
+};
+
+module_platform_driver(vimc_sca_pdrv);
+
+MODULE_DEVICE_TABLE(platform, vimc_sca_driver_ids);
+
+MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Scaler");
+MODULE_AUTHOR("Helen Mae Koike Fornazier <[email protected]>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c
index 591f6a4f8bd3..ebdbbe8c05ed 100644
--- a/drivers/media/platform/vimc/vimc-sensor.c
+++ b/drivers/media/platform/vimc/vimc-sensor.c
@@ -15,36 +15,64 @@
*
*/
+#include <linux/component.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/v4l2-mediabus.h>
#include <linux/vmalloc.h>
#include <media/v4l2-subdev.h>
+#include <media/v4l2-tpg.h>
-#include "vimc-sensor.h"
+#include "vimc-common.h"
+
+#define VIMC_SEN_DRV_NAME "vimc-sensor"
struct vimc_sen_device {
struct vimc_ent_device ved;
struct v4l2_subdev sd;
+ struct device *dev;
+ struct tpg_data tpg;
struct task_struct *kthread_sen;
u8 *frame;
/* The active format */
struct v4l2_mbus_framefmt mbus_format;
- int frame_size;
};
+static const struct v4l2_mbus_framefmt fmt_default = {
+ .width = 640,
+ .height = 480,
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_DEFAULT,
+};
+
+static int vimc_sen_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ unsigned int i;
+
+ for (i = 0; i < sd->entity.num_pads; i++) {
+ struct v4l2_mbus_framefmt *mf;
+
+ mf = v4l2_subdev_get_try_format(sd, cfg, i);
+ *mf = fmt_default;
+ }
+
+ return 0;
+}
+
static int vimc_sen_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
- struct vimc_sen_device *vsen =
- container_of(sd, struct vimc_sen_device, sd);
+ const struct vimc_pix_map *vpix = vimc_pix_map_by_index(code->index);
- /* TODO: Add support for other codes */
- if (code->index)
+ if (!vpix)
return -EINVAL;
- code->code = vsen->mbus_format.code;
+ code->code = vpix->code;
return 0;
}
@@ -53,51 +81,123 @@ static int vimc_sen_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
- struct vimc_sen_device *vsen =
- container_of(sd, struct vimc_sen_device, sd);
+ const struct vimc_pix_map *vpix;
- /* TODO: Add support to other formats */
if (fse->index)
return -EINVAL;
- /* TODO: Add support for other codes */
- if (fse->code != vsen->mbus_format.code)
+ /* Only accept code in the pix map table */
+ vpix = vimc_pix_map_by_code(fse->code);
+ if (!vpix)
return -EINVAL;
- fse->min_width = vsen->mbus_format.width;
- fse->max_width = vsen->mbus_format.width;
- fse->min_height = vsen->mbus_format.height;
- fse->max_height = vsen->mbus_format.height;
+ fse->min_width = VIMC_FRAME_MIN_WIDTH;
+ fse->max_width = VIMC_FRAME_MAX_WIDTH;
+ fse->min_height = VIMC_FRAME_MIN_HEIGHT;
+ fse->max_height = VIMC_FRAME_MAX_HEIGHT;
return 0;
}
static int vimc_sen_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *format)
+ struct v4l2_subdev_format *fmt)
{
struct vimc_sen_device *vsen =
container_of(sd, struct vimc_sen_device, sd);
- format->format = vsen->mbus_format;
+ fmt->format = fmt->which == V4L2_SUBDEV_FORMAT_TRY ?
+ *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) :
+ vsen->mbus_format;
+
+ return 0;
+}
+
+static void vimc_sen_tpg_s_format(struct vimc_sen_device *vsen)
+{
+ const struct vimc_pix_map *vpix =
+ vimc_pix_map_by_code(vsen->mbus_format.code);
+
+ tpg_reset_source(&vsen->tpg, vsen->mbus_format.width,
+ vsen->mbus_format.height, vsen->mbus_format.field);
+ tpg_s_bytesperline(&vsen->tpg, 0, vsen->mbus_format.width * vpix->bpp);
+ tpg_s_buf_height(&vsen->tpg, vsen->mbus_format.height);
+ tpg_s_fourcc(&vsen->tpg, vpix->pixelformat);
+ /* TODO: add support for V4L2_FIELD_ALTERNATE */
+ tpg_s_field(&vsen->tpg, vsen->mbus_format.field, false);
+ tpg_s_colorspace(&vsen->tpg, vsen->mbus_format.colorspace);
+ tpg_s_ycbcr_enc(&vsen->tpg, vsen->mbus_format.ycbcr_enc);
+ tpg_s_quantization(&vsen->tpg, vsen->mbus_format.quantization);
+ tpg_s_xfer_func(&vsen->tpg, vsen->mbus_format.xfer_func);
+}
+
+static void vimc_sen_adjust_fmt(struct v4l2_mbus_framefmt *fmt)
+{
+ const struct vimc_pix_map *vpix;
+
+ /* Only accept code in the pix map table */
+ vpix = vimc_pix_map_by_code(fmt->code);
+ if (!vpix)
+ fmt->code = fmt_default.code;
+
+ fmt->width = clamp_t(u32, fmt->width, VIMC_FRAME_MIN_WIDTH,
+ VIMC_FRAME_MAX_WIDTH) & ~1;
+ fmt->height = clamp_t(u32, fmt->height, VIMC_FRAME_MIN_HEIGHT,
+ VIMC_FRAME_MAX_HEIGHT) & ~1;
+
+ /* TODO: add support for V4L2_FIELD_ALTERNATE */
+ if (fmt->field == V4L2_FIELD_ANY || fmt->field == V4L2_FIELD_ALTERNATE)
+ fmt->field = fmt_default.field;
+
+ vimc_colorimetry_clamp(fmt);
+}
+
+static int vimc_sen_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vimc_sen_device *vsen = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ /* Do not change the format while stream is on */
+ if (vsen->frame)
+ return -EBUSY;
+
+ mf = &vsen->mbus_format;
+ } else {
+ mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ }
+
+ /* Set the new format */
+ vimc_sen_adjust_fmt(&fmt->format);
+
+ dev_dbg(vsen->dev, "%s: format update: "
+ "old:%dx%d (0x%x, %d, %d, %d, %d) "
+ "new:%dx%d (0x%x, %d, %d, %d, %d)\n", vsen->sd.name,
+ /* old */
+ mf->width, mf->height, mf->code,
+ mf->colorspace, mf->quantization,
+ mf->xfer_func, mf->ycbcr_enc,
+ /* new */
+ fmt->format.width, fmt->format.height, fmt->format.code,
+ fmt->format.colorspace, fmt->format.quantization,
+ fmt->format.xfer_func, fmt->format.ycbcr_enc);
+
+ *mf = fmt->format;
return 0;
}
static const struct v4l2_subdev_pad_ops vimc_sen_pad_ops = {
+ .init_cfg = vimc_sen_init_cfg,
.enum_mbus_code = vimc_sen_enum_mbus_code,
.enum_frame_size = vimc_sen_enum_frame_size,
.get_fmt = vimc_sen_get_fmt,
- /* TODO: Add support to other formats */
- .set_fmt = vimc_sen_get_fmt,
+ .set_fmt = vimc_sen_set_fmt,
};
-/* media operations */
-static const struct media_entity_operations vimc_sen_mops = {
- .link_validate = v4l2_subdev_link_validate,
-};
-
-static int vimc_thread_sen(void *data)
+static int vimc_sen_tpg_thread(void *data)
{
struct vimc_sen_device *vsen = data;
unsigned int i;
@@ -110,7 +210,7 @@ static int vimc_thread_sen(void *data)
if (kthread_should_stop())
break;
- memset(vsen->frame, 100, vsen->frame_size);
+ tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame);
/* Send the frame to all source pads */
for (i = 0; i < vsen->sd.entity.num_pads; i++)
@@ -132,50 +232,57 @@ static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
if (enable) {
const struct vimc_pix_map *vpix;
+ unsigned int frame_size;
if (vsen->kthread_sen)
- return -EINVAL;
+ /* tpg is already executing */
+ return 0;
/* Calculate the frame size */
vpix = vimc_pix_map_by_code(vsen->mbus_format.code);
- vsen->frame_size = vsen->mbus_format.width * vpix->bpp *
- vsen->mbus_format.height;
+ frame_size = vsen->mbus_format.width * vpix->bpp *
+ vsen->mbus_format.height;
/*
* Allocate the frame buffer. Use vmalloc to be able to
* allocate a large amount of memory
*/
- vsen->frame = vmalloc(vsen->frame_size);
+ vsen->frame = vmalloc(frame_size);
if (!vsen->frame)
return -ENOMEM;
+ /* configure the test pattern generator */
+ vimc_sen_tpg_s_format(vsen);
+
/* Initialize the image generator thread */
- vsen->kthread_sen = kthread_run(vimc_thread_sen, vsen, "%s-sen",
- vsen->sd.v4l2_dev->name);
+ vsen->kthread_sen = kthread_run(vimc_sen_tpg_thread, vsen,
+ "%s-sen", vsen->sd.v4l2_dev->name);
if (IS_ERR(vsen->kthread_sen)) {
- dev_err(vsen->sd.v4l2_dev->dev,
- "%s: kernel_thread() failed\n", vsen->sd.name);
+ dev_err(vsen->dev, "%s: kernel_thread() failed\n",
+ vsen->sd.name);
vfree(vsen->frame);
vsen->frame = NULL;
return PTR_ERR(vsen->kthread_sen);
}
} else {
if (!vsen->kthread_sen)
- return -EINVAL;
+ return 0;
/* Stop image generator */
ret = kthread_stop(vsen->kthread_sen);
- vsen->kthread_sen = NULL;
+ if (ret)
+ return ret;
+ vsen->kthread_sen = NULL;
vfree(vsen->frame);
vsen->frame = NULL;
- return ret;
+ return 0;
}
return 0;
}
-struct v4l2_subdev_video_ops vimc_sen_video_ops = {
+static struct v4l2_subdev_video_ops vimc_sen_video_ops = {
.s_stream = vimc_sen_s_stream,
};
@@ -184,93 +291,99 @@ static const struct v4l2_subdev_ops vimc_sen_ops = {
.video = &vimc_sen_video_ops,
};
-static void vimc_sen_destroy(struct vimc_ent_device *ved)
+static void vimc_sen_comp_unbind(struct device *comp, struct device *master,
+ void *master_data)
{
+ struct vimc_ent_device *ved = dev_get_drvdata(comp);
struct vimc_sen_device *vsen =
container_of(ved, struct vimc_sen_device, ved);
- v4l2_device_unregister_subdev(&vsen->sd);
- media_entity_cleanup(ved->ent);
+ vimc_ent_sd_unregister(ved, &vsen->sd);
+ tpg_free(&vsen->tpg);
kfree(vsen);
}
-struct vimc_ent_device *vimc_sen_create(struct v4l2_device *v4l2_dev,
- const char *const name,
- u16 num_pads,
- const unsigned long *pads_flag)
+static int vimc_sen_comp_bind(struct device *comp, struct device *master,
+ void *master_data)
{
+ struct v4l2_device *v4l2_dev = master_data;
+ struct vimc_platform_data *pdata = comp->platform_data;
struct vimc_sen_device *vsen;
- unsigned int i;
int ret;
- /* NOTE: a sensor node may be created with more then one pad */
- if (!name || !num_pads || !pads_flag)
- return ERR_PTR(-EINVAL);
-
- /* check if all pads are sources */
- for (i = 0; i < num_pads; i++)
- if (!(pads_flag[i] & MEDIA_PAD_FL_SOURCE))
- return ERR_PTR(-EINVAL);
-
/* Allocate the vsen struct */
vsen = kzalloc(sizeof(*vsen), GFP_KERNEL);
if (!vsen)
- return ERR_PTR(-ENOMEM);
-
- /* Allocate the pads */
- vsen->ved.pads = vimc_pads_init(num_pads, pads_flag);
- if (IS_ERR(vsen->ved.pads)) {
- ret = PTR_ERR(vsen->ved.pads);
+ return -ENOMEM;
+
+ /* Initialize ved and sd */
+ ret = vimc_ent_sd_register(&vsen->ved, &vsen->sd, v4l2_dev,
+ pdata->entity_name,
+ MEDIA_ENT_F_ATV_DECODER, 1,
+ (const unsigned long[1]) {MEDIA_PAD_FL_SOURCE},
+ &vimc_sen_ops);
+ if (ret)
goto err_free_vsen;
- }
-
- /* Fill the vimc_ent_device struct */
- vsen->ved.destroy = vimc_sen_destroy;
- vsen->ved.ent = &vsen->sd.entity;
- /* Initialize the subdev */
- v4l2_subdev_init(&vsen->sd, &vimc_sen_ops);
- vsen->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
- vsen->sd.entity.ops = &vimc_sen_mops;
- vsen->sd.owner = THIS_MODULE;
- strlcpy(vsen->sd.name, name, sizeof(vsen->sd.name));
- v4l2_set_subdevdata(&vsen->sd, &vsen->ved);
+ dev_set_drvdata(comp, &vsen->ved);
+ vsen->dev = comp;
- /* Expose this subdev to user space */
- vsen->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ /* Initialize the frame format */
+ vsen->mbus_format = fmt_default;
- /* Initialize the media entity */
- ret = media_entity_pads_init(&vsen->sd.entity,
- num_pads, vsen->ved.pads);
+ /* Initialize the test pattern generator */
+ tpg_init(&vsen->tpg, vsen->mbus_format.width,
+ vsen->mbus_format.height);
+ ret = tpg_alloc(&vsen->tpg, VIMC_FRAME_MAX_WIDTH);
if (ret)
- goto err_clean_pads;
-
- /* Set the active frame format (this is hardcoded for now) */
- vsen->mbus_format.width = 640;
- vsen->mbus_format.height = 480;
- vsen->mbus_format.code = MEDIA_BUS_FMT_RGB888_1X24;
- vsen->mbus_format.field = V4L2_FIELD_NONE;
- vsen->mbus_format.colorspace = V4L2_COLORSPACE_SRGB;
- vsen->mbus_format.quantization = V4L2_QUANTIZATION_FULL_RANGE;
- vsen->mbus_format.xfer_func = V4L2_XFER_FUNC_SRGB;
-
- /* Register the subdev with the v4l2 and the media framework */
- ret = v4l2_device_register_subdev(v4l2_dev, &vsen->sd);
- if (ret) {
- dev_err(vsen->sd.v4l2_dev->dev,
- "%s: subdev register failed (err=%d)\n",
- vsen->sd.name, ret);
- goto err_clean_m_ent;
- }
+ goto err_unregister_ent_sd;
- return &vsen->ved;
+ return 0;
-err_clean_m_ent:
- media_entity_cleanup(&vsen->sd.entity);
-err_clean_pads:
- vimc_pads_cleanup(vsen->ved.pads);
+err_unregister_ent_sd:
+ vimc_ent_sd_unregister(&vsen->ved, &vsen->sd);
err_free_vsen:
kfree(vsen);
- return ERR_PTR(ret);
+ return ret;
}
+
+static const struct component_ops vimc_sen_comp_ops = {
+ .bind = vimc_sen_comp_bind,
+ .unbind = vimc_sen_comp_unbind,
+};
+
+static int vimc_sen_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &vimc_sen_comp_ops);
+}
+
+static int vimc_sen_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &vimc_sen_comp_ops);
+
+ return 0;
+}
+
+static struct platform_driver vimc_sen_pdrv = {
+ .probe = vimc_sen_probe,
+ .remove = vimc_sen_remove,
+ .driver = {
+ .name = VIMC_SEN_DRV_NAME,
+ },
+};
+
+static const struct platform_device_id vimc_sen_driver_ids[] = {
+ {
+ .name = VIMC_SEN_DRV_NAME,
+ },
+ { }
+};
+
+module_platform_driver(vimc_sen_pdrv);
+
+MODULE_DEVICE_TABLE(platform, vimc_sen_driver_ids);
+
+MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Sensor");
+MODULE_AUTHOR("Helen Mae Koike Fornazier <[email protected]>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vimc/vimc-sensor.h b/drivers/media/platform/vimc/vimc-sensor.h
deleted file mode 100644
index 505310e8aeb7..000000000000
--- a/drivers/media/platform/vimc/vimc-sensor.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * vimc-sensor.h Virtual Media Controller Driver
- *
- * Copyright (C) 2015-2017 Helen Koike <[email protected]>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _VIMC_SENSOR_H_
-#define _VIMC_SENSOR_H_
-
-#include "vimc-core.h"
-
-struct vimc_ent_device *vimc_sen_create(struct v4l2_device *v4l2_dev,
- const char *const name,
- u16 num_pads,
- const unsigned long *pads_flag);
-
-#endif
diff --git a/drivers/media/platform/vivid/vivid-cec.c b/drivers/media/platform/vivid/vivid-cec.c
index 653f4099f737..e15705758969 100644
--- a/drivers/media/platform/vivid/vivid-cec.c
+++ b/drivers/media/platform/vivid/vivid-cec.c
@@ -34,7 +34,7 @@ void vivid_cec_bus_free_work(struct vivid_dev *dev)
cancel_delayed_work_sync(&cw->work);
spin_lock(&dev->cec_slock);
list_del(&cw->list);
- cec_transmit_done(cw->adap, CEC_TX_STATUS_LOW_DRIVE, 0, 0, 1, 0);
+ cec_transmit_attempt_done(cw->adap, CEC_TX_STATUS_LOW_DRIVE);
kfree(cw);
}
spin_unlock(&dev->cec_slock);
@@ -84,7 +84,7 @@ static void vivid_cec_xfer_done_worker(struct work_struct *work)
dev->cec_xfer_start_jiffies = 0;
list_del(&cw->list);
spin_unlock(&dev->cec_slock);
- cec_transmit_done(cw->adap, cw->tx_status, 0, valid_dest ? 0 : 1, 0, 0);
+ cec_transmit_attempt_done(cw->adap, cw->tx_status);
/* Broadcast message */
if (adap != dev->cec_rx_adap)
@@ -105,7 +105,7 @@ static void vivid_cec_xfer_try_worker(struct work_struct *work)
if (dev->cec_xfer_time_jiffies) {
list_del(&cw->list);
spin_unlock(&dev->cec_slock);
- cec_transmit_done(cw->adap, CEC_TX_STATUS_ARB_LOST, 1, 0, 0, 0);
+ cec_transmit_attempt_done(cw->adap, CEC_TX_STATUS_ARB_LOST);
kfree(cw);
} else {
INIT_DELAYED_WORK(&cw->work, vivid_cec_xfer_done_worker);
diff --git a/drivers/media/platform/xilinx/Kconfig b/drivers/media/platform/xilinx/Kconfig
index 84bae795b70d..a5d21b7c6e0b 100644
--- a/drivers/media/platform/xilinx/Kconfig
+++ b/drivers/media/platform/xilinx/Kconfig
@@ -2,6 +2,7 @@ config VIDEO_XILINX
tristate "Xilinx Video IP (EXPERIMENTAL)"
depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && OF && HAS_DMA
select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
---help---
Driver for Xilinx Video IP Pipelines
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c
index feb3b2f1d874..ac4704388920 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.c
+++ b/drivers/media/platform/xilinx/xilinx-vipp.c
@@ -22,7 +22,7 @@
#include <media/v4l2-async.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
-#include <media/v4l2-of.h>
+#include <media/v4l2-fwnode.h>
#include "xilinx-dma.h"
#include "xilinx-vipp.h"
@@ -74,7 +74,7 @@ static int xvip_graph_build_one(struct xvip_composite_device *xdev,
struct media_pad *local_pad;
struct media_pad *remote_pad;
struct xvip_graph_entity *ent;
- struct v4l2_of_link link;
+ struct v4l2_fwnode_link link;
struct device_node *ep = NULL;
struct device_node *next;
int ret = 0;
@@ -92,7 +92,7 @@ static int xvip_graph_build_one(struct xvip_composite_device *xdev,
dev_dbg(xdev->dev, "processing endpoint %s\n", ep->full_name);
- ret = v4l2_of_parse_link(ep, &link);
+ ret = v4l2_fwnode_parse_link(of_fwnode_handle(ep), &link);
if (ret < 0) {
dev_err(xdev->dev, "failed to parse link for %s\n",
ep->full_name);
@@ -103,9 +103,10 @@ static int xvip_graph_build_one(struct xvip_composite_device *xdev,
* the link.
*/
if (link.local_port >= local->num_pads) {
- dev_err(xdev->dev, "invalid port number %u on %s\n",
- link.local_port, link.local_node->full_name);
- v4l2_of_put_link(&link);
+ dev_err(xdev->dev, "invalid port number %u for %s\n",
+ link.local_port,
+ to_of_node(link.local_node)->full_name);
+ v4l2_fwnode_put_link(&link);
ret = -EINVAL;
break;
}
@@ -114,25 +115,28 @@ static int xvip_graph_build_one(struct xvip_composite_device *xdev,
if (local_pad->flags & MEDIA_PAD_FL_SINK) {
dev_dbg(xdev->dev, "skipping sink port %s:%u\n",
- link.local_node->full_name, link.local_port);
- v4l2_of_put_link(&link);
+ to_of_node(link.local_node)->full_name,
+ link.local_port);
+ v4l2_fwnode_put_link(&link);
continue;
}
/* Skip DMA engines, they will be processed separately. */
- if (link.remote_node == xdev->dev->of_node) {
+ if (link.remote_node == of_fwnode_handle(xdev->dev->of_node)) {
dev_dbg(xdev->dev, "skipping DMA port %s:%u\n",
- link.local_node->full_name, link.local_port);
- v4l2_of_put_link(&link);
+ to_of_node(link.local_node)->full_name,
+ link.local_port);
+ v4l2_fwnode_put_link(&link);
continue;
}
/* Find the remote entity. */
- ent = xvip_graph_find_entity(xdev, link.remote_node);
+ ent = xvip_graph_find_entity(xdev,
+ to_of_node(link.remote_node));
if (ent == NULL) {
dev_err(xdev->dev, "no entity found for %s\n",
- link.remote_node->full_name);
- v4l2_of_put_link(&link);
+ to_of_node(link.remote_node)->full_name);
+ v4l2_fwnode_put_link(&link);
ret = -ENODEV;
break;
}
@@ -141,15 +145,16 @@ static int xvip_graph_build_one(struct xvip_composite_device *xdev,
if (link.remote_port >= remote->num_pads) {
dev_err(xdev->dev, "invalid port number %u on %s\n",
- link.remote_port, link.remote_node->full_name);
- v4l2_of_put_link(&link);
+ link.remote_port,
+ to_of_node(link.remote_node)->full_name);
+ v4l2_fwnode_put_link(&link);
ret = -EINVAL;
break;
}
remote_pad = &remote->pads[link.remote_port];
- v4l2_of_put_link(&link);
+ v4l2_fwnode_put_link(&link);
/* Create the media link. */
dev_dbg(xdev->dev, "creating %s:%u -> %s:%u link\n",
@@ -194,7 +199,7 @@ static int xvip_graph_build_dma(struct xvip_composite_device *xdev)
struct media_pad *source_pad;
struct media_pad *sink_pad;
struct xvip_graph_entity *ent;
- struct v4l2_of_link link;
+ struct v4l2_fwnode_link link;
struct device_node *ep = NULL;
struct device_node *next;
struct xvip_dma *dma;
@@ -213,7 +218,7 @@ static int xvip_graph_build_dma(struct xvip_composite_device *xdev)
dev_dbg(xdev->dev, "processing endpoint %s\n", ep->full_name);
- ret = v4l2_of_parse_link(ep, &link);
+ ret = v4l2_fwnode_parse_link(of_fwnode_handle(ep), &link);
if (ret < 0) {
dev_err(xdev->dev, "failed to parse link for %s\n",
ep->full_name);
@@ -225,7 +230,7 @@ static int xvip_graph_build_dma(struct xvip_composite_device *xdev)
if (dma == NULL) {
dev_err(xdev->dev, "no DMA engine found for port %u\n",
link.local_port);
- v4l2_of_put_link(&link);
+ v4l2_fwnode_put_link(&link);
ret = -EINVAL;
break;
}
@@ -234,19 +239,21 @@ static int xvip_graph_build_dma(struct xvip_composite_device *xdev)
dma->video.name);
/* Find the remote entity. */
- ent = xvip_graph_find_entity(xdev, link.remote_node);
+ ent = xvip_graph_find_entity(xdev,
+ to_of_node(link.remote_node));
if (ent == NULL) {
dev_err(xdev->dev, "no entity found for %s\n",
- link.remote_node->full_name);
- v4l2_of_put_link(&link);
+ to_of_node(link.remote_node)->full_name);
+ v4l2_fwnode_put_link(&link);
ret = -ENODEV;
break;
}
if (link.remote_port >= ent->entity->num_pads) {
dev_err(xdev->dev, "invalid port number %u on %s\n",
- link.remote_port, link.remote_node->full_name);
- v4l2_of_put_link(&link);
+ link.remote_port,
+ to_of_node(link.remote_node)->full_name);
+ v4l2_fwnode_put_link(&link);
ret = -EINVAL;
break;
}
@@ -263,7 +270,7 @@ static int xvip_graph_build_dma(struct xvip_composite_device *xdev)
sink_pad = &dma->pad;
}
- v4l2_of_put_link(&link);
+ v4l2_fwnode_put_link(&link);
/* Create the media link. */
dev_dbg(xdev->dev, "creating %s:%u -> %s:%u link\n",
@@ -383,8 +390,8 @@ static int xvip_graph_parse_one(struct xvip_composite_device *xdev,
}
entity->node = remote;
- entity->asd.match_type = V4L2_ASYNC_MATCH_OF;
- entity->asd.match.of.node = remote;
+ entity->asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ entity->asd.match.fwnode.fwnode = of_fwnode_handle(remote);
list_add_tail(&entity->list, &xdev->entities);
xdev->num_subdevs++;
}
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index e422f3d56f76..5e83b76495f7 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -169,11 +169,11 @@ config IR_HIX5HD2
tristate "Hisilicon hix5hd2 IR remote control"
depends on RC_CORE
help
- Say Y here if you want to use hisilicon hix5hd2 remote control.
- To compile this driver as a module, choose M here: the module will be
- called ir-hix5hd2.
+ Say Y here if you want to use hisilicon hix5hd2 remote control.
+ To compile this driver as a module, choose M here: the module will be
+ called ir-hix5hd2.
- If you're not sure, select N here
+ If you're not sure, select N here
config IR_IMON
tristate "SoundGraph iMON Receiver and Display"
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index 9cf3e69de16a..a4c6ad4f67c1 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -904,9 +904,6 @@ static int ati_remote_probe(struct usb_interface *interface,
if (err)
goto exit_kill_urbs;
- /* use our delay for rc_dev */
- ati_remote->rdev->input_dev->rep[REP_DELAY] = repeat_delay;
-
/* Set up and register mouse input device */
if (mouse) {
input_dev = input_allocate_device();
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index ccf24fd7ec1b..8711a7ff55cc 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -113,6 +113,7 @@ static void process_ir_data(struct iguanair *ir, unsigned len)
break;
case CMD_TX_OVERFLOW:
ir->tx_overflow = true;
+ /* fall through */
case CMD_RECEIVER_OFF:
case CMD_RECEIVER_ON:
case CMD_SEND:
diff --git a/drivers/media/rc/img-ir/img-ir-hw.c b/drivers/media/rc/img-ir/img-ir-hw.c
index 431d33b36fb0..8d1439622533 100644
--- a/drivers/media/rc/img-ir/img-ir-hw.c
+++ b/drivers/media/rc/img-ir/img-ir-hw.c
@@ -702,10 +702,6 @@ static void img_ir_set_protocol(struct img_ir_priv *priv, u64 proto)
{
struct rc_dev *rdev = priv->hw.rdev;
- spin_lock_irq(&rdev->rc_map.lock);
- rdev->rc_map.rc_type = __ffs64(proto);
- spin_unlock_irq(&rdev->rc_map.lock);
-
mutex_lock(&rdev->lock);
rdev->enabled_protocols = proto;
rdev->allowed_wakeup_protocols = proto;
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 3489010601b5..bd76534a2749 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -1722,7 +1722,7 @@ static void imon_incoming_scancode(struct imon_context *ictx,
if (kc == KEY_KEYBOARD && !ictx->release_code) {
ictx->last_keycode = kc;
if (!nomouse) {
- ictx->pad_mouse = ~(ictx->pad_mouse) & 0x1;
+ ictx->pad_mouse = !ictx->pad_mouse;
dev_dbg(dev, "toggling to %s mode\n",
ictx->pad_mouse ? "mouse" : "keyboard");
spin_unlock_irqrestore(&ictx->kc_lock, flags);
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index de85f1d7ce43..a30af91710fe 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -327,16 +327,6 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
return ret;
}
-static int ir_lirc_open(void *data)
-{
- return 0;
-}
-
-static void ir_lirc_close(void *data)
-{
- return;
-}
-
static const struct file_operations lirc_fops = {
.owner = THIS_MODULE,
.write = ir_lirc_transmit_ir,
@@ -354,7 +344,6 @@ static const struct file_operations lirc_fops = {
static int ir_lirc_register(struct rc_dev *dev)
{
struct lirc_driver *drv;
- struct lirc_buffer *rbuf;
int rc = -ENOMEM;
unsigned long features = 0;
@@ -362,19 +351,12 @@ static int ir_lirc_register(struct rc_dev *dev)
if (!drv)
return rc;
- rbuf = kzalloc(sizeof(struct lirc_buffer), GFP_KERNEL);
- if (!rbuf)
- goto rbuf_alloc_failed;
-
- rc = lirc_buffer_init(rbuf, sizeof(int), LIRCBUF_SIZE);
- if (rc)
- goto rbuf_init_failed;
-
if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
features |= LIRC_CAN_REC_MODE2;
if (dev->rx_resolution)
features |= LIRC_CAN_GET_REC_RESOLUTION;
}
+
if (dev->tx_ir) {
features |= LIRC_CAN_SEND_PULSE;
if (dev->s_tx_mask)
@@ -403,10 +385,10 @@ static int ir_lirc_register(struct rc_dev *dev)
drv->minor = -1;
drv->features = features;
drv->data = &dev->raw->lirc;
- drv->rbuf = rbuf;
- drv->set_use_inc = &ir_lirc_open;
- drv->set_use_dec = &ir_lirc_close;
+ drv->rbuf = NULL;
drv->code_length = sizeof(struct ir_raw_event) * 8;
+ drv->chunk_size = sizeof(int);
+ drv->buffer_size = LIRCBUF_SIZE;
drv->fops = &lirc_fops;
drv->dev = &dev->dev;
drv->rdev = dev;
@@ -415,19 +397,15 @@ static int ir_lirc_register(struct rc_dev *dev)
drv->minor = lirc_register_driver(drv);
if (drv->minor < 0) {
rc = -ENODEV;
- goto lirc_register_failed;
+ goto out;
}
dev->raw->lirc.drv = drv;
dev->raw->lirc.dev = dev;
return 0;
-lirc_register_failed:
-rbuf_init_failed:
- kfree(rbuf);
-rbuf_alloc_failed:
+out:
kfree(drv);
-
return rc;
}
@@ -436,9 +414,8 @@ static int ir_lirc_unregister(struct rc_dev *dev)
struct lirc_codec *lirc = &dev->raw->lirc;
lirc_unregister_driver(lirc->drv->minor);
- lirc_buffer_free(lirc->drv->rbuf);
- kfree(lirc->drv->rbuf);
kfree(lirc->drv);
+ lirc->drv = NULL;
return 0;
}
diff --git a/drivers/media/rc/ir-spi.c b/drivers/media/rc/ir-spi.c
index c8863f36686a..7e383b3fedd5 100644
--- a/drivers/media/rc/ir-spi.c
+++ b/drivers/media/rc/ir-spi.c
@@ -57,10 +57,13 @@ static int ir_spi_tx(struct rc_dev *dev,
/* convert the pulse/space signal to raw binary signal */
for (i = 0; i < count; i++) {
+ unsigned int periods;
int j;
- u16 val = ((i + 1) % 2) ? idata->pulse : idata->space;
+ u16 val;
- if (len + buffer[i] >= IR_SPI_MAX_BUFSIZE)
+ periods = DIV_ROUND_CLOSEST(buffer[i] * idata->freq, 1000000);
+
+ if (len + periods >= IR_SPI_MAX_BUFSIZE)
return -EINVAL;
/*
@@ -69,13 +72,13 @@ static int ir_spi_tx(struct rc_dev *dev,
* contain a space duration.
*/
val = (i % 2) ? idata->space : idata->pulse;
- for (j = 0; j < buffer[i]; j++)
+ for (j = 0; j < periods; j++)
idata->tx_buf[len++] = val;
}
memset(&xfer, 0, sizeof(xfer));
- xfer.speed_hz = idata->freq;
+ xfer.speed_hz = idata->freq * 16;
xfer.len = len * sizeof(*idata->tx_buf);
xfer.tx_buf = idata->tx_buf;
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 8d60c9f00df9..db1e7b70c998 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -18,18 +18,10 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
-#include <linux/kernel.h>
#include <linux/sched/signal.h>
-#include <linux/errno.h>
#include <linux/ioctl.h>
-#include <linux/fs.h>
#include <linux/poll.h>
-#include <linux/completion.h>
#include <linux/mutex.h>
-#include <linux/wait.h>
-#include <linux/unistd.h>
-#include <linux/kthread.h>
-#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/cdev.h>
@@ -37,9 +29,6 @@
#include <media/lirc.h>
#include <media/lirc_dev.h>
-static bool debug;
-
-#define IRCTL_DEV_NAME "BaseRemoteCtl"
#define NOPLUG -1
#define LOGHEAD "lirc_dev (%s[%d]): "
@@ -52,13 +41,11 @@ struct irctl {
struct mutex irctl_lock;
struct lirc_buffer *buf;
+ bool buf_internal;
unsigned int chunk_size;
struct device dev;
struct cdev cdev;
-
- struct task_struct *task;
- long jiffies_to_wait;
};
static DEFINE_MUTEX(lirc_dev_lock);
@@ -68,22 +55,11 @@ static struct irctl *irctls[MAX_IRCTL_DEVICES];
/* Only used for sysfs but defined to void otherwise */
static struct class *lirc_class;
-/* helper function
- * initializes the irctl structure
- */
-static void lirc_irctl_init(struct irctl *ir)
-{
- mutex_init(&ir->irctl_lock);
- ir->d.minor = NOPLUG;
-}
-
static void lirc_release(struct device *ld)
{
struct irctl *ir = container_of(ld, struct irctl, dev);
- put_device(ir->dev.parent);
-
- if (ir->buf != ir->d.rbuf) {
+ if (ir->buf_internal) {
lirc_buffer_free(ir->buf);
kfree(ir->buf);
}
@@ -94,93 +70,6 @@ static void lirc_release(struct device *ld)
kfree(ir);
}
-/* helper function
- * reads key codes from driver and puts them into buffer
- * returns 0 on success
- */
-static int lirc_add_to_buf(struct irctl *ir)
-{
- int res;
- int got_data = -1;
-
- if (!ir->d.add_to_buf)
- return 0;
-
- /*
- * service the device as long as it is returning
- * data and we have space
- */
- do {
- got_data++;
- res = ir->d.add_to_buf(ir->d.data, ir->buf);
- } while (!res);
-
- if (res == -ENODEV)
- kthread_stop(ir->task);
-
- return got_data ? 0 : res;
-}
-
-/* main function of the polling thread
- */
-static int lirc_thread(void *irctl)
-{
- struct irctl *ir = irctl;
-
- do {
- if (ir->open) {
- if (ir->jiffies_to_wait) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(ir->jiffies_to_wait);
- }
- if (kthread_should_stop())
- break;
- if (!lirc_add_to_buf(ir))
- wake_up_interruptible(&ir->buf->wait_poll);
- } else {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
- }
- } while (!kthread_should_stop());
-
- return 0;
-}
-
-
-static const struct file_operations lirc_dev_fops = {
- .owner = THIS_MODULE,
- .read = lirc_dev_fop_read,
- .write = lirc_dev_fop_write,
- .poll = lirc_dev_fop_poll,
- .unlocked_ioctl = lirc_dev_fop_ioctl,
- .open = lirc_dev_fop_open,
- .release = lirc_dev_fop_close,
- .llseek = noop_llseek,
-};
-
-static int lirc_cdev_add(struct irctl *ir)
-{
- struct lirc_driver *d = &ir->d;
- struct cdev *cdev;
- int retval;
-
- cdev = &ir->cdev;
-
- if (d->fops) {
- cdev_init(cdev, d->fops);
- cdev->owner = d->owner;
- } else {
- cdev_init(cdev, &lirc_dev_fops);
- cdev->owner = THIS_MODULE;
- }
- retval = kobject_set_name(&cdev->kobj, "lirc%d", d->minor);
- if (retval)
- return retval;
-
- cdev->kobj.parent = &ir->dev.kobj;
- return cdev_add(cdev, ir->dev.devt, 1);
-}
-
static int lirc_allocate_buffer(struct irctl *ir)
{
int err = 0;
@@ -189,8 +78,6 @@ static int lirc_allocate_buffer(struct irctl *ir)
unsigned int buffer_size;
struct lirc_driver *d = &ir->d;
- mutex_lock(&lirc_dev_lock);
-
bytes_in_key = BITS_TO_LONGS(d->code_length) +
(d->code_length % 8 ? 1 : 0);
buffer_size = d->buffer_size ? d->buffer_size : BUFLEN / bytes_in_key;
@@ -198,6 +85,7 @@ static int lirc_allocate_buffer(struct irctl *ir)
if (d->rbuf) {
ir->buf = d->rbuf;
+ ir->buf_internal = false;
} else {
ir->buf = kmalloc(sizeof(struct lirc_buffer), GFP_KERNEL);
if (!ir->buf) {
@@ -208,18 +96,20 @@ static int lirc_allocate_buffer(struct irctl *ir)
err = lirc_buffer_init(ir->buf, chunk_size, buffer_size);
if (err) {
kfree(ir->buf);
+ ir->buf = NULL;
goto out;
}
+
+ ir->buf_internal = true;
+ d->rbuf = ir->buf;
}
ir->chunk_size = ir->buf->chunk_size;
out:
- mutex_unlock(&lirc_dev_lock);
-
return err;
}
-static int lirc_allocate_driver(struct lirc_driver *d)
+int lirc_register_driver(struct lirc_driver *d)
{
struct irctl *ir;
int minor;
@@ -235,6 +125,11 @@ static int lirc_allocate_driver(struct lirc_driver *d)
return -EINVAL;
}
+ if (!d->fops) {
+ pr_err("fops pointer not filled in!\n");
+ return -EINVAL;
+ }
+
if (d->minor >= MAX_IRCTL_DEVICES) {
dev_err(d->dev, "minor must be between 0 and %d!\n",
MAX_IRCTL_DEVICES - 1);
@@ -247,18 +142,8 @@ static int lirc_allocate_driver(struct lirc_driver *d)
return -EBADRQC;
}
- if (d->sample_rate) {
- if (2 > d->sample_rate || HZ < d->sample_rate) {
- dev_err(d->dev, "invalid %d sample rate\n",
- d->sample_rate);
- return -EBADRQC;
- }
- if (!d->add_to_buf) {
- dev_err(d->dev, "add_to_buf not set\n");
- return -EBADRQC;
- }
- } else if (!d->rbuf && !(d->fops && d->fops->read &&
- d->fops->poll && d->fops->unlocked_ioctl)) {
+ if (!d->rbuf && !(d->fops && d->fops->read &&
+ d->fops->poll && d->fops->unlocked_ioctl)) {
dev_err(d->dev, "undefined read, poll, ioctl\n");
return -EBADRQC;
}
@@ -288,7 +173,8 @@ static int lirc_allocate_driver(struct lirc_driver *d)
err = -ENOMEM;
goto out_lock;
}
- lirc_irctl_init(ir);
+
+ mutex_init(&ir->irctl_lock);
irctls[minor] = ir;
d->minor = minor;
@@ -300,32 +186,29 @@ static int lirc_allocate_driver(struct lirc_driver *d)
ir->d = *d;
+ if (LIRC_CAN_REC(d->features)) {
+ err = lirc_allocate_buffer(irctls[minor]);
+ if (err) {
+ kfree(ir);
+ goto out_lock;
+ }
+ d->rbuf = ir->buf;
+ }
+
+ device_initialize(&ir->dev);
ir->dev.devt = MKDEV(MAJOR(lirc_base_dev), ir->d.minor);
ir->dev.class = lirc_class;
ir->dev.parent = d->dev;
ir->dev.release = lirc_release;
dev_set_name(&ir->dev, "lirc%d", ir->d.minor);
- device_initialize(&ir->dev);
- if (d->sample_rate) {
- ir->jiffies_to_wait = HZ / d->sample_rate;
+ cdev_init(&ir->cdev, d->fops);
+ ir->cdev.owner = ir->d.owner;
+ ir->cdev.kobj.parent = &ir->dev.kobj;
- /* try to fire up polling thread */
- ir->task = kthread_run(lirc_thread, (void *)ir, "lirc_dev");
- if (IS_ERR(ir->task)) {
- dev_err(d->dev, "cannot run thread for minor = %d\n",
- d->minor);
- err = -ECHILD;
- goto out_sysfs;
- }
- } else {
- /* it means - wait for external event in task queue */
- ir->jiffies_to_wait = 0;
- }
-
- err = lirc_cdev_add(ir);
+ err = cdev_add(&ir->cdev, ir->dev.devt, 1);
if (err)
- goto out_sysfs;
+ goto out_free_dev;
ir->attached = 1;
@@ -335,37 +218,20 @@ static int lirc_allocate_driver(struct lirc_driver *d)
mutex_unlock(&lirc_dev_lock);
- get_device(ir->dev.parent);
-
dev_info(ir->d.dev, "lirc_dev: driver %s registered at minor = %d\n",
ir->d.name, ir->d.minor);
+
return minor;
+
out_cdev:
cdev_del(&ir->cdev);
-out_sysfs:
+out_free_dev:
put_device(&ir->dev);
out_lock:
mutex_unlock(&lirc_dev_lock);
return err;
}
-
-int lirc_register_driver(struct lirc_driver *d)
-{
- int minor, err = 0;
-
- minor = lirc_allocate_driver(d);
- if (minor < 0)
- return minor;
-
- if (LIRC_CAN_REC(d->features)) {
- err = lirc_allocate_buffer(irctls[minor]);
- if (err)
- lirc_unregister_driver(minor);
- }
-
- return err ? err : minor;
-}
EXPORT_SYMBOL(lirc_register_driver);
int lirc_unregister_driver(int minor)
@@ -393,10 +259,6 @@ int lirc_unregister_driver(int minor)
return -ENOENT;
}
- /* end up polling thread */
- if (ir->task)
- kthread_stop(ir->task);
-
dev_dbg(ir->d.dev, "lirc_dev: driver %s unregistered from minor = %d\n",
ir->d.name, ir->d.minor);
@@ -407,12 +269,6 @@ int lirc_unregister_driver(int minor)
wake_up_interruptible(&ir->buf->wait_poll);
}
- mutex_lock(&ir->irctl_lock);
-
- if (ir->d.set_use_dec)
- ir->d.set_use_dec(ir->d.data);
-
- mutex_unlock(&ir->irctl_lock);
mutex_unlock(&lirc_dev_lock);
device_del(&ir->dev);
@@ -462,17 +318,10 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
goto error;
}
+ if (ir->buf)
+ lirc_buffer_clear(ir->buf);
+
ir->open++;
- if (ir->d.set_use_inc)
- retval = ir->d.set_use_inc(ir->d.data);
- if (retval) {
- ir->open--;
- } else {
- if (ir->buf)
- lirc_buffer_clear(ir->buf);
- if (ir->task)
- wake_up_process(ir->task);
- }
error:
nonseekable_open(inode, file);
@@ -497,8 +346,6 @@ int lirc_dev_fop_close(struct inode *inode, struct file *file)
rc_close(ir->d.rdev);
ir->open--;
- if (ir->d.set_use_dec)
- ir->d.set_use_dec(ir->d.data);
if (!ret)
mutex_unlock(&lirc_dev_lock);
@@ -517,7 +364,7 @@ unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait)
}
if (!ir->attached)
- return POLLERR;
+ return POLLHUP | POLLERR;
if (ir->buf) {
poll_wait(file, &ir->buf->wait_poll, wait);
@@ -729,24 +576,6 @@ void *lirc_get_pdata(struct file *file)
EXPORT_SYMBOL(lirc_get_pdata);
-ssize_t lirc_dev_fop_write(struct file *file, const char __user *buffer,
- size_t length, loff_t *ppos)
-{
- struct irctl *ir = irctls[iminor(file_inode(file))];
-
- if (!ir) {
- pr_err("called with invalid irctl\n");
- return -ENODEV;
- }
-
- if (!ir->attached)
- return -ENODEV;
-
- return -EINVAL;
-}
-EXPORT_SYMBOL(lirc_dev_fop_write);
-
-
static int __init lirc_dev_init(void)
{
int retval;
@@ -758,7 +587,7 @@ static int __init lirc_dev_init(void)
}
retval = alloc_chrdev_region(&lirc_base_dev, 0, MAX_IRCTL_DEVICES,
- IRCTL_DEV_NAME);
+ "BaseRemoteCtl");
if (retval) {
class_destroy(lirc_class);
pr_err("alloc_chrdev_region failed\n");
@@ -784,6 +613,3 @@ module_exit(lirc_dev_exit);
MODULE_DESCRIPTION("LIRC base driver module");
MODULE_AUTHOR("Artur Lipowski");
MODULE_LICENSE("GPL");
-
-module_param(debug, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Enable debugging messages");
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 93b16fe3ab38..eb130694bbb8 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -36,18 +36,18 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/workqueue.h>
#include <linux/usb.h>
#include <linux/usb/input.h>
#include <linux/pm_wakeup.h>
#include <media/rc-core.h>
-#define DRIVER_VERSION "1.92"
+#define DRIVER_VERSION "1.93"
#define DRIVER_AUTHOR "Jarod Wilson <[email protected]>"
#define DRIVER_DESC "Windows Media Center Ed. eHome Infrared Transceiver " \
"device driver"
#define DRIVER_NAME "mceusb"
-#define USB_BUFLEN 32 /* USB reception buffer length */
#define USB_CTRL_MSG_SZ 2 /* Size of usb ctrl msg on gen1 hw */
#define MCE_G1_INIT_MSGS 40 /* Init messages on gen1 hw to throw out */
@@ -417,7 +417,9 @@ struct mceusb_dev {
/* usb */
struct usb_device *usbdev;
struct urb *urb_in;
+ unsigned int pipe_in;
struct usb_endpoint_descriptor *usb_ep_out;
+ unsigned int pipe_out;
/* buffers and dma */
unsigned char *buf_in;
@@ -454,6 +456,16 @@ struct mceusb_dev {
u8 num_rxports; /* number of receive sensors */
u8 txports_cabled; /* bitmask of transmitters with cable */
u8 rxports_active; /* bitmask of active receive sensors */
+
+ /*
+ * support for async error handler mceusb_deferred_kevent()
+ * where usb_clear_halt(), usb_reset_configuration(),
+ * usb_reset_device(), etc. must be done in process context
+ */
+ struct work_struct kevent;
+ unsigned long kevent_flags;
+# define EVENT_TX_HALT 0
+# define EVENT_RX_HALT 1
};
/* MCE Device Command Strings, generally a port and command pair */
@@ -527,7 +539,7 @@ static int mceusb_cmd_datasize(u8 cmd, u8 subcmd)
}
static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
- int offset, int len, bool out)
+ int buf_len, int offset, int len, bool out)
{
#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
char *inout;
@@ -544,7 +556,8 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
return;
dev_dbg(dev, "%cx data: %*ph (length=%d)",
- (out ? 't' : 'r'), min(len, USB_BUFLEN), buf, len);
+ (out ? 't' : 'r'),
+ min(len, buf_len - offset), buf + offset, len);
inout = out ? "Request" : "Got";
@@ -686,6 +699,21 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
#endif
}
+/*
+ * Schedule work that can't be done in interrupt handlers
+ * (mceusb_dev_recv() and mce_async_callback()) nor tasklets.
+ * Invokes mceusb_deferred_kevent() for recovering from
+ * error events specified by the kevent bit field.
+ */
+static void mceusb_defer_kevent(struct mceusb_dev *ir, int kevent)
+{
+ set_bit(kevent, &ir->kevent_flags);
+ if (!schedule_work(&ir->kevent))
+ dev_err(ir->dev, "kevent %d may have been dropped", kevent);
+ else
+ dev_dbg(ir->dev, "kevent %d scheduled", kevent);
+}
+
static void mce_async_callback(struct urb *urb)
{
struct mceusb_dev *ir;
@@ -701,7 +729,8 @@ static void mce_async_callback(struct urb *urb)
case 0:
len = urb->actual_length;
- mceusb_dev_printdata(ir, urb->transfer_buffer, 0, len, true);
+ mceusb_dev_printdata(ir, urb->transfer_buffer, len,
+ 0, len, true);
break;
case -ECONNRESET:
@@ -711,6 +740,11 @@ static void mce_async_callback(struct urb *urb)
break;
case -EPIPE:
+ dev_err(ir->dev, "Error: request urb status = %d (TX HALT)",
+ urb->status);
+ mceusb_defer_kevent(ir, EVENT_TX_HALT);
+ break;
+
default:
dev_err(ir->dev, "Error: request urb status = %d", urb->status);
break;
@@ -721,18 +755,18 @@ static void mce_async_callback(struct urb *urb)
usb_free_urb(urb);
}
-/* request incoming or send outgoing usb packet - used to initialize remote */
+/* request outgoing (send) usb packet - used to initialize remote */
static void mce_request_packet(struct mceusb_dev *ir, unsigned char *data,
int size)
{
- int res, pipe;
+ int res;
struct urb *async_urb;
struct device *dev = ir->dev;
unsigned char *async_buf;
async_urb = usb_alloc_urb(0, GFP_KERNEL);
if (unlikely(!async_urb)) {
- dev_err(dev, "Error, couldn't allocate urb!\n");
+ dev_err(dev, "Error, couldn't allocate urb!");
return;
}
@@ -743,32 +777,26 @@ static void mce_request_packet(struct mceusb_dev *ir, unsigned char *data,
}
/* outbound data */
- if (usb_endpoint_xfer_int(ir->usb_ep_out)) {
- pipe = usb_sndintpipe(ir->usbdev,
- ir->usb_ep_out->bEndpointAddress);
- usb_fill_int_urb(async_urb, ir->usbdev, pipe, async_buf,
- size, mce_async_callback, ir,
+ if (usb_endpoint_xfer_int(ir->usb_ep_out))
+ usb_fill_int_urb(async_urb, ir->usbdev, ir->pipe_out,
+ async_buf, size, mce_async_callback, ir,
ir->usb_ep_out->bInterval);
- } else {
- pipe = usb_sndbulkpipe(ir->usbdev,
- ir->usb_ep_out->bEndpointAddress);
- usb_fill_bulk_urb(async_urb, ir->usbdev, pipe,
- async_buf, size, mce_async_callback,
- ir);
- }
- memcpy(async_buf, data, size);
+ else
+ usb_fill_bulk_urb(async_urb, ir->usbdev, ir->pipe_out,
+ async_buf, size, mce_async_callback, ir);
- dev_dbg(dev, "receive request called (size=%#x)", size);
+ memcpy(async_buf, data, size);
- async_urb->transfer_buffer_length = size;
- async_urb->dev = ir->usbdev;
+ dev_dbg(dev, "send request called (size=%#x)", size);
res = usb_submit_urb(async_urb, GFP_ATOMIC);
if (res) {
- dev_err(dev, "receive request FAILED! (res=%d)", res);
+ dev_err(dev, "send request FAILED! (res=%d)", res);
+ kfree(async_buf);
+ usb_free_urb(async_urb);
return;
}
- dev_dbg(dev, "receive request complete (res=%d)", res);
+ dev_dbg(dev, "send request complete (res=%d)", res);
}
static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size)
@@ -974,7 +1002,7 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
switch (ir->parser_state) {
case SUBCMD:
ir->rem = mceusb_cmd_datasize(ir->cmd, ir->buf_in[i]);
- mceusb_dev_printdata(ir, ir->buf_in, i - 1,
+ mceusb_dev_printdata(ir, ir->buf_in, buf_len, i - 1,
ir->rem + 2, false);
mceusb_handle_command(ir, i);
ir->parser_state = CMD_DATA;
@@ -986,7 +1014,7 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK)
* US_TO_NS(MCE_TIME_UNIT);
- dev_dbg(ir->dev, "Storing %s with duration %d",
+ dev_dbg(ir->dev, "Storing %s with duration %u",
rawir.pulse ? "pulse" : "space",
rawir.duration);
@@ -1007,7 +1035,7 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
continue;
}
ir->rem = (ir->cmd & MCE_PACKET_LENGTH_MASK);
- mceusb_dev_printdata(ir, ir->buf_in,
+ mceusb_dev_printdata(ir, ir->buf_in, buf_len,
i, ir->rem + 1, false);
if (ir->rem)
ir->parser_state = PARSE_IRDATA;
@@ -1052,6 +1080,11 @@ static void mceusb_dev_recv(struct urb *urb)
return;
case -EPIPE:
+ dev_err(ir->dev, "Error: urb status = %d (RX HALT)",
+ urb->status);
+ mceusb_defer_kevent(ir, EVENT_RX_HALT);
+ return;
+
default:
dev_err(ir->dev, "Error: urb status = %d", urb->status);
break;
@@ -1170,6 +1203,45 @@ static void mceusb_flash_led(struct mceusb_dev *ir)
mce_async_out(ir, FLASH_LED, sizeof(FLASH_LED));
}
+/*
+ * Workqueue function
+ * for resetting or recovering device after occurrence of error events
+ * specified in ir->kevent bit field.
+ * Function runs (via schedule_work()) in non-interrupt context, for
+ * calls here (such as usb_clear_halt()) requiring non-interrupt context.
+ */
+static void mceusb_deferred_kevent(struct work_struct *work)
+{
+ struct mceusb_dev *ir =
+ container_of(work, struct mceusb_dev, kevent);
+ int status;
+
+ if (test_bit(EVENT_RX_HALT, &ir->kevent_flags)) {
+ usb_unlink_urb(ir->urb_in);
+ status = usb_clear_halt(ir->usbdev, ir->pipe_in);
+ if (status < 0) {
+ dev_err(ir->dev, "rx clear halt error %d",
+ status);
+ }
+ clear_bit(EVENT_RX_HALT, &ir->kevent_flags);
+ if (status == 0) {
+ status = usb_submit_urb(ir->urb_in, GFP_KERNEL);
+ if (status < 0) {
+ dev_err(ir->dev,
+ "rx unhalt submit urb error %d",
+ status);
+ }
+ }
+ }
+
+ if (test_bit(EVENT_TX_HALT, &ir->kevent_flags)) {
+ status = usb_clear_halt(ir->usbdev, ir->pipe_out);
+ if (status < 0)
+ dev_err(ir->dev, "tx clear halt error %d", status);
+ clear_bit(EVENT_TX_HALT, &ir->kevent_flags);
+ }
+}
+
static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
{
struct usb_device *udev = ir->usbdev;
@@ -1303,6 +1375,7 @@ static int mceusb_dev_probe(struct usb_interface *intf,
if (!ir)
goto mem_alloc_fail;
+ ir->pipe_in = pipe;
ir->buf_in = usb_alloc_coherent(dev, maxp, GFP_ATOMIC, &ir->dma_in);
if (!ir->buf_in)
goto buf_in_alloc_fail;
@@ -1321,6 +1394,12 @@ static int mceusb_dev_probe(struct usb_interface *intf,
/* Saving usb interface data for use by the transmitter routine */
ir->usb_ep_out = ep_out;
+ if (usb_endpoint_xfer_int(ep_out))
+ ir->pipe_out = usb_sndintpipe(ir->usbdev,
+ ep_out->bEndpointAddress);
+ else
+ ir->pipe_out = usb_sndbulkpipe(ir->usbdev,
+ ep_out->bEndpointAddress);
if (dev->descriptor.iManufacturer
&& usb_string(dev, dev->descriptor.iManufacturer,
@@ -1332,21 +1411,32 @@ static int mceusb_dev_probe(struct usb_interface *intf,
snprintf(name + strlen(name), sizeof(name) - strlen(name),
" %s", buf);
+ /*
+ * Initialize async USB error handler before registering
+ * or activating any mceusb RX and TX functions
+ */
+ INIT_WORK(&ir->kevent, mceusb_deferred_kevent);
+
ir->rc = mceusb_init_rc_dev(ir);
if (!ir->rc)
goto rc_dev_fail;
/* wire up inbound data handler */
- usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
- mceusb_dev_recv, ir, ep_in->bInterval);
+ if (usb_endpoint_xfer_int(ep_in))
+ usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
+ mceusb_dev_recv, ir, ep_in->bInterval);
+ else
+ usb_fill_bulk_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
+ mceusb_dev_recv, ir);
+
ir->urb_in->transfer_dma = ir->dma_in;
ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
/* flush buffers on the device */
- dev_dbg(&intf->dev, "Flushing receive buffers\n");
+ dev_dbg(&intf->dev, "Flushing receive buffers");
res = usb_submit_urb(ir->urb_in, GFP_KERNEL);
if (res)
- dev_err(&intf->dev, "failed to flush buffers: %d\n", res);
+ dev_err(&intf->dev, "failed to flush buffers: %d", res);
/* figure out which firmware/emulator version this hardware has */
mceusb_get_emulator_version(ir);
@@ -1380,6 +1470,7 @@ static int mceusb_dev_probe(struct usb_interface *intf,
/* Error-handling path */
rc_dev_fail:
+ cancel_work_sync(&ir->kevent);
usb_put_dev(ir->usbdev);
usb_kill_urb(ir->urb_in);
usb_free_urb(ir->urb_in);
@@ -1405,6 +1496,7 @@ static void mceusb_dev_disconnect(struct usb_interface *intf)
return;
ir->usbdev = NULL;
+ cancel_work_sync(&ir->kevent);
rc_unregister_device(ir->rc);
usb_kill_urb(ir->urb_in);
usb_free_urb(ir->urb_in);
diff --git a/drivers/media/rc/meson-ir.c b/drivers/media/rc/meson-ir.c
index 5576dbd6b1a4..65566d569cb1 100644
--- a/drivers/media/rc/meson-ir.c
+++ b/drivers/media/rc/meson-ir.c
@@ -19,6 +19,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
+#include <linux/bitfield.h>
#include <media/rc-core.h>
@@ -36,7 +37,7 @@
/* only available on Meson 8b and newer */
#define IR_DEC_REG2 0x20
-#define REG0_RATE_MASK (BIT(11) - 1)
+#define REG0_RATE_MASK GENMASK(11, 0)
#define DECODE_MODE_NEC 0x0
#define DECODE_MODE_RAW 0x2
@@ -49,14 +50,13 @@
#define REG2_MODE_MASK GENMASK(3, 0)
#define REG2_MODE_SHIFT 0
-#define REG1_TIME_IV_SHIFT 16
-#define REG1_TIME_IV_MASK ((BIT(13) - 1) << REG1_TIME_IV_SHIFT)
+#define REG1_TIME_IV_MASK GENMASK(28, 16)
-#define REG1_IRQSEL_MASK (BIT(2) | BIT(3))
-#define REG1_IRQSEL_NEC_MODE (0 << 2)
-#define REG1_IRQSEL_RISE_FALL (1 << 2)
-#define REG1_IRQSEL_FALL (2 << 2)
-#define REG1_IRQSEL_RISE (3 << 2)
+#define REG1_IRQSEL_MASK GENMASK(3, 2)
+#define REG1_IRQSEL_NEC_MODE 0
+#define REG1_IRQSEL_RISE_FALL 1
+#define REG1_IRQSEL_FALL 2
+#define REG1_IRQSEL_RISE 3
#define REG1_RESET BIT(0)
#define REG1_ENABLE BIT(15)
@@ -68,7 +68,6 @@
struct meson_ir {
void __iomem *reg;
struct rc_dev *rc;
- int irq;
spinlock_t lock;
};
@@ -86,18 +85,19 @@ static void meson_ir_set_mask(struct meson_ir *ir, unsigned int reg,
static irqreturn_t meson_ir_irq(int irqno, void *dev_id)
{
struct meson_ir *ir = dev_id;
- u32 duration;
+ u32 duration, status;
DEFINE_IR_RAW_EVENT(rawir);
spin_lock(&ir->lock);
- duration = readl(ir->reg + IR_DEC_REG1);
- duration = (duration & REG1_TIME_IV_MASK) >> REG1_TIME_IV_SHIFT;
+ duration = readl_relaxed(ir->reg + IR_DEC_REG1);
+ duration = FIELD_GET(REG1_TIME_IV_MASK, duration);
rawir.duration = US_TO_NS(duration * MESON_TRATE);
- rawir.pulse = !!(readl(ir->reg + IR_DEC_STATUS) & STATUS_IR_DEC_IN);
+ status = readl_relaxed(ir->reg + IR_DEC_STATUS);
+ rawir.pulse = !!(status & STATUS_IR_DEC_IN);
- ir_raw_event_store_with_filter(ir->rc, &rawir);
+ ir_raw_event_store(ir->rc, &rawir);
ir_raw_event_handle(ir->rc);
spin_unlock(&ir->lock);
@@ -112,7 +112,7 @@ static int meson_ir_probe(struct platform_device *pdev)
struct resource *res;
const char *map_name;
struct meson_ir *ir;
- int ret;
+ int irq, ret;
ir = devm_kzalloc(dev, sizeof(struct meson_ir), GFP_KERNEL);
if (!ir)
@@ -125,13 +125,13 @@ static int meson_ir_probe(struct platform_device *pdev)
return PTR_ERR(ir->reg);
}
- ir->irq = platform_get_irq(pdev, 0);
- if (ir->irq < 0) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
dev_err(dev, "no irq resource\n");
- return ir->irq;
+ return irq;
}
- ir->rc = rc_allocate_device(RC_DRIVER_IR_RAW);
+ ir->rc = devm_rc_allocate_device(dev, RC_DRIVER_IR_RAW);
if (!ir->rc) {
dev_err(dev, "failed to allocate rc device\n");
return -ENOMEM;
@@ -143,7 +143,6 @@ static int meson_ir_probe(struct platform_device *pdev)
ir->rc->input_id.bustype = BUS_HOST;
map_name = of_get_property(node, "linux,rc-map-name", NULL);
ir->rc->map_name = map_name ? map_name : RC_MAP_EMPTY;
- ir->rc->dev.parent = dev;
ir->rc->allowed_protocols = RC_BIT_ALL_IR_DECODER;
ir->rc->rx_resolution = US_TO_NS(MESON_TRATE);
ir->rc->timeout = MS_TO_NS(200);
@@ -152,16 +151,16 @@ static int meson_ir_probe(struct platform_device *pdev)
spin_lock_init(&ir->lock);
platform_set_drvdata(pdev, ir);
- ret = rc_register_device(ir->rc);
+ ret = devm_rc_register_device(dev, ir->rc);
if (ret) {
dev_err(dev, "failed to register rc device\n");
- goto out_free;
+ return ret;
}
- ret = devm_request_irq(dev, ir->irq, meson_ir_irq, 0, "ir-meson", ir);
+ ret = devm_request_irq(dev, irq, meson_ir_irq, 0, NULL, ir);
if (ret) {
dev_err(dev, "failed to request irq\n");
- goto out_unreg;
+ return ret;
}
/* Reset the decoder */
@@ -171,29 +170,22 @@ static int meson_ir_probe(struct platform_device *pdev)
/* Set general operation mode (= raw/software decoding) */
if (of_device_is_compatible(node, "amlogic,meson6-ir"))
meson_ir_set_mask(ir, IR_DEC_REG1, REG1_MODE_MASK,
- DECODE_MODE_RAW << REG1_MODE_SHIFT);
+ FIELD_PREP(REG1_MODE_MASK, DECODE_MODE_RAW));
else
meson_ir_set_mask(ir, IR_DEC_REG2, REG2_MODE_MASK,
- DECODE_MODE_RAW << REG2_MODE_SHIFT);
+ FIELD_PREP(REG2_MODE_MASK, DECODE_MODE_RAW));
/* Set rate */
meson_ir_set_mask(ir, IR_DEC_REG0, REG0_RATE_MASK, MESON_TRATE - 1);
/* IRQ on rising and falling edges */
meson_ir_set_mask(ir, IR_DEC_REG1, REG1_IRQSEL_MASK,
- REG1_IRQSEL_RISE_FALL);
+ FIELD_PREP(REG1_IRQSEL_MASK, REG1_IRQSEL_RISE_FALL));
/* Enable the decoder */
meson_ir_set_mask(ir, IR_DEC_REG1, REG1_ENABLE, REG1_ENABLE);
dev_info(dev, "receiver initialized\n");
return 0;
-out_unreg:
- rc_unregister_device(ir->rc);
- ir->rc = NULL;
-out_free:
- rc_free_device(ir->rc);
-
- return ret;
}
static int meson_ir_remove(struct platform_device *pdev)
@@ -206,11 +198,35 @@ static int meson_ir_remove(struct platform_device *pdev)
meson_ir_set_mask(ir, IR_DEC_REG1, REG1_ENABLE, 0);
spin_unlock_irqrestore(&ir->lock, flags);
- rc_unregister_device(ir->rc);
-
return 0;
}
+static void meson_ir_shutdown(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct meson_ir *ir = platform_get_drvdata(pdev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ir->lock, flags);
+
+ /*
+ * Set operation mode to NEC/hardware decoding to give
+ * bootloader a chance to power the system back on
+ */
+ if (of_device_is_compatible(node, "amlogic,meson6-ir"))
+ meson_ir_set_mask(ir, IR_DEC_REG1, REG1_MODE_MASK,
+ DECODE_MODE_NEC << REG1_MODE_SHIFT);
+ else
+ meson_ir_set_mask(ir, IR_DEC_REG2, REG2_MODE_MASK,
+ DECODE_MODE_NEC << REG2_MODE_SHIFT);
+
+ /* Set rate to default value */
+ meson_ir_set_mask(ir, IR_DEC_REG0, REG0_RATE_MASK, 0x13);
+
+ spin_unlock_irqrestore(&ir->lock, flags);
+}
+
static const struct of_device_id meson_ir_match[] = {
{ .compatible = "amlogic,meson6-ir" },
{ .compatible = "amlogic,meson8b-ir" },
@@ -222,6 +238,7 @@ MODULE_DEVICE_TABLE(of, meson_ir_match);
static struct platform_driver meson_ir_driver = {
.probe = meson_ir_probe,
.remove = meson_ir_remove,
+ .shutdown = meson_ir_shutdown,
.driver = {
.name = DRIVER_NAME,
.of_match_table = meson_ir_match,
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h
index 0455b273c2fc..b3e7cac2c3ee 100644
--- a/drivers/media/rc/rc-core-priv.h
+++ b/drivers/media/rc/rc-core-priv.h
@@ -263,7 +263,9 @@ int ir_raw_gen_pl(struct ir_raw_event **ev, unsigned int max,
* Routines from rc-raw.c to be used internally and by decoders
*/
u64 ir_raw_get_allowed_protocols(void);
+int ir_raw_event_prepare(struct rc_dev *dev);
int ir_raw_event_register(struct rc_dev *dev);
+void ir_raw_event_free(struct rc_dev *dev);
void ir_raw_event_unregister(struct rc_dev *dev);
int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler);
void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler);
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index a2fc1a1d58b0..b6d256f03847 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -486,15 +486,18 @@ EXPORT_SYMBOL(ir_raw_encode_scancode);
/*
* Used to (un)register raw event clients
*/
-int ir_raw_event_register(struct rc_dev *dev)
+int ir_raw_event_prepare(struct rc_dev *dev)
{
- int rc;
- struct ir_raw_handler *handler;
- struct task_struct *thread;
+ static bool raw_init; /* 'false' default value, raw decoders loaded? */
if (!dev)
return -EINVAL;
+ if (!raw_init) {
+ request_module("ir-lirc-codec");
+ raw_init = true;
+ }
+
dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
if (!dev->raw)
return -ENOMEM;
@@ -503,6 +506,14 @@ int ir_raw_event_register(struct rc_dev *dev)
dev->change_protocol = change_protocol;
INIT_KFIFO(dev->raw->kfifo);
+ return 0;
+}
+
+int ir_raw_event_register(struct rc_dev *dev)
+{
+ struct ir_raw_handler *handler;
+ struct task_struct *thread;
+
/*
* raw transmitters do not need any event registration
* because the event is coming from userspace
@@ -511,10 +522,8 @@ int ir_raw_event_register(struct rc_dev *dev)
thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u",
dev->minor);
- if (IS_ERR(thread)) {
- rc = PTR_ERR(thread);
- goto out;
- }
+ if (IS_ERR(thread))
+ return PTR_ERR(thread);
dev->raw->thread = thread;
}
@@ -527,11 +536,15 @@ int ir_raw_event_register(struct rc_dev *dev)
mutex_unlock(&ir_raw_handler_lock);
return 0;
+}
+
+void ir_raw_event_free(struct rc_dev *dev)
+{
+ if (!dev)
+ return;
-out:
kfree(dev->raw);
dev->raw = NULL;
- return rc;
}
void ir_raw_event_unregister(struct rc_dev *dev)
@@ -550,8 +563,7 @@ void ir_raw_event_unregister(struct rc_dev *dev)
handler->raw_unregister(dev);
mutex_unlock(&ir_raw_handler_lock);
- kfree(dev->raw);
- dev->raw = NULL;
+ ir_raw_event_free(dev);
}
/*
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 6ec73357fa47..a9eba0013525 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -15,7 +15,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <media/rc-core.h>
-#include <linux/atomic.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/input.h>
@@ -934,8 +933,8 @@ static bool lirc_is_present(void)
* It returns the protocol names of supported protocols.
* Enabled protocols are printed in brackets.
*
- * dev->lock is taken to guard against races between device
- * registration, store_protocols and show_protocols.
+ * dev->lock is taken to guard against races between
+ * store_protocols and show_protocols.
*/
static ssize_t show_protocols(struct device *device,
struct device_attribute *mattr, char *buf)
@@ -945,13 +944,6 @@ static ssize_t show_protocols(struct device *device,
char *tmp = buf;
int i;
- /* Device is being removed */
- if (!dev)
- return -EINVAL;
-
- if (!atomic_read(&dev->initialized))
- return -ERESTARTSYS;
-
mutex_lock(&dev->lock);
enabled = dev->enabled_protocols;
@@ -1106,8 +1098,8 @@ static void ir_raw_load_modules(u64 *protocols)
* See parse_protocol_change() for the valid commands.
* Returns @len on success or a negative error code.
*
- * dev->lock is taken to guard against races between device
- * registration, store_protocols and show_protocols.
+ * dev->lock is taken to guard against races between
+ * store_protocols and show_protocols.
*/
static ssize_t store_protocols(struct device *device,
struct device_attribute *mattr,
@@ -1119,13 +1111,6 @@ static ssize_t store_protocols(struct device *device,
u64 old_protocols, new_protocols;
ssize_t rc;
- /* Device is being removed */
- if (!dev)
- return -EINVAL;
-
- if (!atomic_read(&dev->initialized))
- return -ERESTARTSYS;
-
IR_dprintk(1, "Normal protocol change requested\n");
current_protocols = &dev->enabled_protocols;
filter = &dev->scancode_filter;
@@ -1200,7 +1185,7 @@ out:
* Bits of the filter value corresponding to set bits in the filter mask are
* compared against input scancodes and non-matching scancodes are discarded.
*
- * dev->lock is taken to guard against races between device registration,
+ * dev->lock is taken to guard against races between
* store_filter and show_filter.
*/
static ssize_t show_filter(struct device *device,
@@ -1212,13 +1197,6 @@ static ssize_t show_filter(struct device *device,
struct rc_scancode_filter *filter;
u32 val;
- /* Device is being removed */
- if (!dev)
- return -EINVAL;
-
- if (!atomic_read(&dev->initialized))
- return -ERESTARTSYS;
-
mutex_lock(&dev->lock);
if (fattr->type == RC_FILTER_NORMAL)
@@ -1251,7 +1229,7 @@ static ssize_t show_filter(struct device *device,
* Bits of the filter value corresponding to set bits in the filter mask are
* compared against input scancodes and non-matching scancodes are discarded.
*
- * dev->lock is taken to guard against races between device registration,
+ * dev->lock is taken to guard against races between
* store_filter and show_filter.
*/
static ssize_t store_filter(struct device *device,
@@ -1265,13 +1243,6 @@ static ssize_t store_filter(struct device *device,
unsigned long val;
int (*set_filter)(struct rc_dev *dev, struct rc_scancode_filter *filter);
- /* Device is being removed */
- if (!dev)
- return -EINVAL;
-
- if (!atomic_read(&dev->initialized))
- return -ERESTARTSYS;
-
ret = kstrtoul(buf, 0, &val);
if (ret < 0)
return ret;
@@ -1372,8 +1343,8 @@ static const char * const proto_variant_names[] = {
* It returns the protocol names of supported protocols.
* The enabled protocols are printed in brackets.
*
- * dev->lock is taken to guard against races between device
- * registration, store_protocols and show_protocols.
+ * dev->lock is taken to guard against races between
+ * store_wakeup_protocols and show_wakeup_protocols.
*/
static ssize_t show_wakeup_protocols(struct device *device,
struct device_attribute *mattr,
@@ -1385,13 +1356,6 @@ static ssize_t show_wakeup_protocols(struct device *device,
char *tmp = buf;
int i;
- /* Device is being removed */
- if (!dev)
- return -EINVAL;
-
- if (!atomic_read(&dev->initialized))
- return -ERESTARTSYS;
-
mutex_lock(&dev->lock);
allowed = dev->allowed_wakeup_protocols;
@@ -1431,8 +1395,8 @@ static ssize_t show_wakeup_protocols(struct device *device,
* It is trigged by writing to /sys/class/rc/rc?/wakeup_protocols.
* Returns @len on success or a negative error code.
*
- * dev->lock is taken to guard against races between device
- * registration, store_protocols and show_protocols.
+ * dev->lock is taken to guard against races between
+ * store_wakeup_protocols and show_wakeup_protocols.
*/
static ssize_t store_wakeup_protocols(struct device *device,
struct device_attribute *mattr,
@@ -1444,13 +1408,6 @@ static ssize_t store_wakeup_protocols(struct device *device,
u64 allowed;
int i;
- /* Device is being removed */
- if (!dev)
- return -EINVAL;
-
- if (!atomic_read(&dev->initialized))
- return -ERESTARTSYS;
-
mutex_lock(&dev->lock);
allowed = dev->allowed_wakeup_protocols;
@@ -1663,7 +1620,7 @@ struct rc_dev *devm_rc_allocate_device(struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_rc_allocate_device);
-static int rc_setup_rx_device(struct rc_dev *dev)
+static int rc_prepare_rx_device(struct rc_dev *dev)
{
int rc;
struct rc_map *rc_map;
@@ -1703,6 +1660,28 @@ static int rc_setup_rx_device(struct rc_dev *dev)
if (dev->close)
dev->input_dev->close = ir_close;
+ dev->input_dev->dev.parent = &dev->dev;
+ memcpy(&dev->input_dev->id, &dev->input_id, sizeof(dev->input_id));
+ dev->input_dev->phys = dev->input_phys;
+ dev->input_dev->name = dev->input_name;
+
+ return 0;
+
+out_table:
+ ir_free_table(&dev->rc_map);
+
+ return rc;
+}
+
+static int rc_setup_rx_device(struct rc_dev *dev)
+{
+ int rc;
+
+ /* rc_open will be called here */
+ rc = input_register_device(dev->input_dev);
+ if (rc)
+ return rc;
+
/*
* Default delay of 250ms is too short for some protocols, especially
* since the timeout is currently set to 250ms. Increase it to 500ms,
@@ -1718,38 +1697,24 @@ static int rc_setup_rx_device(struct rc_dev *dev)
*/
dev->input_dev->rep[REP_PERIOD] = 125;
- dev->input_dev->dev.parent = &dev->dev;
- memcpy(&dev->input_dev->id, &dev->input_id, sizeof(dev->input_id));
- dev->input_dev->phys = dev->input_phys;
- dev->input_dev->name = dev->input_name;
-
- /* rc_open will be called here */
- rc = input_register_device(dev->input_dev);
- if (rc)
- goto out_table;
-
return 0;
-
-out_table:
- ir_free_table(&dev->rc_map);
-
- return rc;
}
static void rc_free_rx_device(struct rc_dev *dev)
{
- if (!dev || dev->driver_type == RC_DRIVER_IR_RAW_TX)
+ if (!dev)
return;
- ir_free_table(&dev->rc_map);
+ if (dev->input_dev) {
+ input_unregister_device(dev->input_dev);
+ dev->input_dev = NULL;
+ }
- input_unregister_device(dev->input_dev);
- dev->input_dev = NULL;
+ ir_free_table(&dev->rc_map);
}
int rc_register_device(struct rc_dev *dev)
{
- static bool raw_init; /* 'false' default value, raw decoders loaded? */
const char *path;
int attr = 0;
int minor;
@@ -1765,7 +1730,6 @@ int rc_register_device(struct rc_dev *dev)
dev->minor = minor;
dev_set_name(&dev->dev, "rc%u", dev->minor);
dev_set_drvdata(&dev->dev, dev);
- atomic_set(&dev->initialized, 0);
dev->dev.groups = dev->sysfs_groups;
if (dev->driver_type != RC_DRIVER_IR_RAW_TX)
@@ -1776,34 +1740,40 @@ int rc_register_device(struct rc_dev *dev)
dev->sysfs_groups[attr++] = &rc_dev_wakeup_filter_attr_grp;
dev->sysfs_groups[attr++] = NULL;
+ if (dev->driver_type == RC_DRIVER_IR_RAW ||
+ dev->driver_type == RC_DRIVER_IR_RAW_TX) {
+ rc = ir_raw_event_prepare(dev);
+ if (rc < 0)
+ goto out_minor;
+ }
+
+ if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
+ rc = rc_prepare_rx_device(dev);
+ if (rc)
+ goto out_raw;
+ }
+
rc = device_add(&dev->dev);
if (rc)
- goto out_unlock;
+ goto out_rx_free;
path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
dev_info(&dev->dev, "%s as %s\n",
dev->input_name ?: "Unspecified device", path ?: "N/A");
kfree(path);
- if (dev->driver_type == RC_DRIVER_IR_RAW ||
- dev->driver_type == RC_DRIVER_IR_RAW_TX) {
- if (!raw_init) {
- request_module_nowait("ir-lirc-codec");
- raw_init = true;
- }
- rc = ir_raw_event_register(dev);
- if (rc < 0)
- goto out_dev;
- }
-
if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
rc = rc_setup_rx_device(dev);
if (rc)
- goto out_raw;
+ goto out_dev;
}
- /* Allow the RC sysfs nodes to be accessible */
- atomic_set(&dev->initialized, 1);
+ if (dev->driver_type == RC_DRIVER_IR_RAW ||
+ dev->driver_type == RC_DRIVER_IR_RAW_TX) {
+ rc = ir_raw_event_register(dev);
+ if (rc < 0)
+ goto out_rx;
+ }
IR_dprintk(1, "Registered rc%u (driver: %s)\n",
dev->minor,
@@ -1811,11 +1781,15 @@ int rc_register_device(struct rc_dev *dev)
return 0;
-out_raw:
- ir_raw_event_unregister(dev);
+out_rx:
+ rc_free_rx_device(dev);
out_dev:
device_del(&dev->dev);
-out_unlock:
+out_rx_free:
+ ir_free_table(&dev->rc_map);
+out_raw:
+ ir_raw_event_free(dev);
+out_minor:
ida_simple_remove(&rc_ida, minor);
return rc;
}
diff --git a/drivers/media/rc/sir_ir.c b/drivers/media/rc/sir_ir.c
index 90a5f8fd5eea..20234ba0b318 100644
--- a/drivers/media/rc/sir_ir.c
+++ b/drivers/media/rc/sir_ir.c
@@ -53,16 +53,13 @@ static DEFINE_SPINLOCK(hardware_lock);
/* Communication with user-space */
static void add_read_queue(int flag, unsigned long val);
-static int init_chrdev(void);
/* Hardware */
static irqreturn_t sir_interrupt(int irq, void *dev_id);
static void send_space(unsigned long len);
static void send_pulse(unsigned long len);
-static int init_hardware(void);
+static void init_hardware(void);
static void drop_hardware(void);
/* Initialisation */
-static int init_port(void);
-static void drop_port(void);
static inline unsigned int sinp(int offset)
{
@@ -122,28 +119,6 @@ static void add_read_queue(int flag, unsigned long val)
ir_raw_event_store_with_filter(rcdev, &ev);
}
-static int init_chrdev(void)
-{
- rcdev = devm_rc_allocate_device(&sir_ir_dev->dev, RC_DRIVER_IR_RAW);
- if (!rcdev)
- return -ENOMEM;
-
- rcdev->input_name = "SIR IrDA port";
- rcdev->input_phys = KBUILD_MODNAME "/input0";
- rcdev->input_id.bustype = BUS_HOST;
- rcdev->input_id.vendor = 0x0001;
- rcdev->input_id.product = 0x0001;
- rcdev->input_id.version = 0x0100;
- rcdev->tx_ir = sir_tx_ir;
- rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
- rcdev->driver_name = KBUILD_MODNAME;
- rcdev->map_name = RC_MAP_RC6_MCE;
- rcdev->timeout = IR_DEFAULT_TIMEOUT;
- rcdev->dev.parent = &sir_ir_dev->dev;
-
- return devm_rc_register_device(&sir_ir_dev->dev, rcdev);
-}
-
/* SECTION: Hardware */
static void sir_timeout(unsigned long data)
{
@@ -288,7 +263,7 @@ static void send_pulse(unsigned long len)
}
}
-static int init_hardware(void)
+static void init_hardware(void)
{
unsigned long flags;
@@ -310,7 +285,6 @@ static int init_hardware(void)
/* turn on UART */
outb(UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2, io + UART_MCR);
spin_unlock_irqrestore(&hardware_lock, flags);
- return 0;
}
static void drop_hardware(void)
@@ -326,61 +300,55 @@ static void drop_hardware(void)
}
/* SECTION: Initialisation */
-
-static int init_port(void)
+static int sir_ir_probe(struct platform_device *dev)
{
int retval;
+ rcdev = devm_rc_allocate_device(&sir_ir_dev->dev, RC_DRIVER_IR_RAW);
+ if (!rcdev)
+ return -ENOMEM;
+
+ rcdev->input_name = "SIR IrDA port";
+ rcdev->input_phys = KBUILD_MODNAME "/input0";
+ rcdev->input_id.bustype = BUS_HOST;
+ rcdev->input_id.vendor = 0x0001;
+ rcdev->input_id.product = 0x0001;
+ rcdev->input_id.version = 0x0100;
+ rcdev->tx_ir = sir_tx_ir;
+ rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
+ rcdev->driver_name = KBUILD_MODNAME;
+ rcdev->map_name = RC_MAP_RC6_MCE;
+ rcdev->timeout = IR_DEFAULT_TIMEOUT;
+ rcdev->dev.parent = &sir_ir_dev->dev;
+
setup_timer(&timerlist, sir_timeout, 0);
/* get I/O port access and IRQ line */
- if (!request_region(io, 8, KBUILD_MODNAME)) {
+ if (!devm_request_region(&sir_ir_dev->dev, io, 8, KBUILD_MODNAME)) {
pr_err("i/o port 0x%.4x already in use.\n", io);
return -EBUSY;
}
- retval = request_irq(irq, sir_interrupt, 0,
- KBUILD_MODNAME, NULL);
+ retval = devm_request_irq(&sir_ir_dev->dev, irq, sir_interrupt, 0,
+ KBUILD_MODNAME, NULL);
if (retval < 0) {
- release_region(io, 8);
pr_err("IRQ %d already in use.\n", irq);
return retval;
}
pr_info("I/O port 0x%.4x, IRQ %d.\n", io, irq);
- return 0;
-}
-
-static void drop_port(void)
-{
- free_irq(irq, NULL);
- del_timer_sync(&timerlist);
- release_region(io, 8);
-}
-
-static int init_sir_ir(void)
-{
- int retval;
-
- retval = init_port();
+ retval = devm_rc_register_device(&sir_ir_dev->dev, rcdev);
if (retval < 0)
return retval;
- init_hardware();
- return 0;
-}
-
-static int sir_ir_probe(struct platform_device *dev)
-{
- int retval;
- retval = init_chrdev();
- if (retval < 0)
- return retval;
+ init_hardware();
- return init_sir_ir();
+ return 0;
}
static int sir_ir_remove(struct platform_device *dev)
{
+ drop_hardware();
+ del_timer_sync(&timerlist);
return 0;
}
@@ -421,8 +389,6 @@ pdev_alloc_fail:
static void __exit sir_ir_exit(void)
{
- drop_hardware();
- drop_port();
platform_device_unregister(sir_ir_dev);
platform_driver_unregister(&sir_ir_driver);
}
@@ -434,10 +400,10 @@ MODULE_DESCRIPTION("Infrared receiver driver for SIR type serial ports");
MODULE_AUTHOR("Milan Pikula");
MODULE_LICENSE("GPL");
-module_param(io, int, 0444);
+module_param_hw(io, int, ioport, 0444);
MODULE_PARM_DESC(io, "I/O address base (0x3f8 or 0x2f8)");
-module_param(irq, int, 0444);
+module_param_hw(irq, int, irq, 0444);
MODULE_PARM_DESC(irq, "Interrupt (4 or 3)");
module_param(threshold, int, 0444);
diff --git a/drivers/media/tuners/tda18271-fe.c b/drivers/media/tuners/tda18271-fe.c
index b4e5fa2ff5e5..147155553648 100644
--- a/drivers/media/tuners/tda18271-fe.c
+++ b/drivers/media/tuners/tda18271-fe.c
@@ -960,7 +960,7 @@ static int tda18271_set_params(struct dvb_frontend *fe)
break;
case SYS_DVBC_ANNEX_B:
bw = 6000000;
- /* falltrough */
+ /* fall through */
case SYS_DVBC_ANNEX_A:
case SYS_DVBC_ANNEX_C:
if (bw <= 6000000) {
diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
index e823aafce276..0e7e4fdf9e50 100644
--- a/drivers/media/tuners/xc5000.c
+++ b/drivers/media/tuners/xc5000.c
@@ -565,38 +565,16 @@ static int xc_get_totalgain(struct xc5000_priv *priv, u16 *totalgain)
return xc5000_readreg(priv, XREG_TOTALGAIN, totalgain);
}
-static u16 wait_for_lock(struct xc5000_priv *priv)
-{
- u16 lock_state = 0;
- int watch_dog_count = 40;
-
- while ((lock_state == 0) && (watch_dog_count > 0)) {
- xc_get_lock_status(priv, &lock_state);
- if (lock_state != 1) {
- msleep(5);
- watch_dog_count--;
- }
- }
- return lock_state;
-}
-
#define XC_TUNE_ANALOG 0
#define XC_TUNE_DIGITAL 1
static int xc_tune_channel(struct xc5000_priv *priv, u32 freq_hz, int mode)
{
- int found = 0;
-
dprintk(1, "%s(%u)\n", __func__, freq_hz);
if (xc_set_rf_frequency(priv, freq_hz) != 0)
- return 0;
-
- if (mode == XC_TUNE_ANALOG) {
- if (wait_for_lock(priv) == 1)
- found = 1;
- }
+ return -EREMOTEIO;
- return found;
+ return 0;
}
static int xc_set_xtal(struct dvb_frontend *fe)
@@ -788,6 +766,7 @@ static int xc5000_set_digital_params(struct dvb_frontend *fe)
if (!bw)
bw = 6000000;
/* fall to OFDM handling */
+ /* fall through */
case SYS_DMBTH:
case SYS_DVBT:
case SYS_DVBT2:
diff --git a/drivers/media/usb/au0828/au0828-dvb.c b/drivers/media/usb/au0828/au0828-dvb.c
index 7e0c9b795e52..34dc7e062471 100644
--- a/drivers/media/usb/au0828/au0828-dvb.c
+++ b/drivers/media/usb/au0828/au0828-dvb.c
@@ -105,6 +105,15 @@ static struct tda18271_config hauppauge_woodbury_tunerconfig = {
static void au0828_restart_dvb_streaming(struct work_struct *work);
+static void au0828_bulk_timeout(unsigned long data)
+{
+ struct au0828_dev *dev = (struct au0828_dev *) data;
+
+ dprintk(1, "%s called\n", __func__);
+ dev->bulk_timeout_running = 0;
+ schedule_work(&dev->restart_streaming);
+}
+
/*-------------------------------------------------------------------*/
static void urb_completion(struct urb *purb)
{
@@ -138,6 +147,13 @@ static void urb_completion(struct urb *purb)
ptr[0], purb->actual_length);
schedule_work(&dev->restart_streaming);
return;
+ } else if (dev->bulk_timeout_running == 1) {
+ /* The URB handler has fired, so cancel timer which would
+ * restart endpoint if we hadn't
+ */
+ dprintk(1, "%s cancelling bulk timeout\n", __func__);
+ dev->bulk_timeout_running = 0;
+ del_timer(&dev->bulk_timeout);
}
/* Feed the transport payload into the kernel demux */
@@ -160,6 +176,11 @@ static int stop_urb_transfer(struct au0828_dev *dev)
if (!dev->urb_streaming)
return 0;
+ if (dev->bulk_timeout_running == 1) {
+ dev->bulk_timeout_running = 0;
+ del_timer(&dev->bulk_timeout);
+ }
+
dev->urb_streaming = false;
for (i = 0; i < URB_COUNT; i++) {
if (dev->urbs[i]) {
@@ -232,6 +253,11 @@ static int start_urb_transfer(struct au0828_dev *dev)
}
dev->urb_streaming = true;
+
+ /* If we don't valid data within 1 second, restart stream */
+ mod_timer(&dev->bulk_timeout, jiffies + (HZ));
+ dev->bulk_timeout_running = 1;
+
return 0;
}
@@ -622,6 +648,10 @@ int au0828_dvb_register(struct au0828_dev *dev)
return ret;
}
+ dev->bulk_timeout.function = au0828_bulk_timeout;
+ dev->bulk_timeout.data = (unsigned long) dev;
+ init_timer(&dev->bulk_timeout);
+
return 0;
}
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index 88e59748ebc2..05e445fe0b77 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -195,6 +195,8 @@ struct au0828_dev {
/* Digital */
struct au0828_dvb dvb;
struct work_struct restart_streaming;
+ struct timer_list bulk_timeout;
+ int bulk_timeout_running;
#ifdef CONFIG_VIDEO_AU0828_V4L2
/* Analog */
diff --git a/drivers/media/usb/cpia2/cpia2_core.c b/drivers/media/usb/cpia2/cpia2_core.c
index b1d13444ff30..0efba0da0a45 100644
--- a/drivers/media/usb/cpia2/cpia2_core.c
+++ b/drivers/media/usb/cpia2/cpia2_core.c
@@ -173,7 +173,8 @@ int cpia2_do_command(struct camera_data *cam,
cmd.start = CPIA2_VP_DEVICEH;
break;
case CPIA2_CMD_SET_VP_BRIGHTNESS:
- cmd.buffer.block_data[0] = param; /* Then fall through */
+ cmd.buffer.block_data[0] = param;
+ /* fall through */
case CPIA2_CMD_GET_VP_BRIGHTNESS:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
@@ -183,14 +184,16 @@ int cpia2_do_command(struct camera_data *cam,
cmd.start = CPIA2_VP5_EXPOSURE_TARGET;
break;
case CPIA2_CMD_SET_CONTRAST:
- cmd.buffer.block_data[0] = param; /* Then fall through */
+ cmd.buffer.block_data[0] = param;
+ /* fall through */
case CPIA2_CMD_GET_CONTRAST:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
cmd.start = CPIA2_VP_YRANGE;
break;
case CPIA2_CMD_SET_VP_SATURATION:
- cmd.buffer.block_data[0] = param; /* Then fall through */
+ cmd.buffer.block_data[0] = param;
+ /* fall through */
case CPIA2_CMD_GET_VP_SATURATION:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
@@ -200,28 +203,32 @@ int cpia2_do_command(struct camera_data *cam,
cmd.start = CPIA2_VP5_MCUVSATURATION;
break;
case CPIA2_CMD_SET_VP_GPIO_DATA:
- cmd.buffer.block_data[0] = param; /* Then fall through */
+ cmd.buffer.block_data[0] = param;
+ /* fall through */
case CPIA2_CMD_GET_VP_GPIO_DATA:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
cmd.start = CPIA2_VP_GPIO_DATA;
break;
case CPIA2_CMD_SET_VP_GPIO_DIRECTION:
- cmd.buffer.block_data[0] = param; /* Then fall through */
+ cmd.buffer.block_data[0] = param;
+ /* fall through */
case CPIA2_CMD_GET_VP_GPIO_DIRECTION:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
cmd.start = CPIA2_VP_GPIO_DIRECTION;
break;
case CPIA2_CMD_SET_VC_MP_GPIO_DATA:
- cmd.buffer.block_data[0] = param; /* Then fall through */
+ cmd.buffer.block_data[0] = param;
+ /* fall through */
case CPIA2_CMD_GET_VC_MP_GPIO_DATA:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC;
cmd.reg_count = 1;
cmd.start = CPIA2_VC_MP_DATA;
break;
case CPIA2_CMD_SET_VC_MP_GPIO_DIRECTION:
- cmd.buffer.block_data[0] = param; /* Then fall through */
+ cmd.buffer.block_data[0] = param;
+ /*fall through */
case CPIA2_CMD_GET_VC_MP_GPIO_DIRECTION:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC;
cmd.reg_count = 1;
@@ -235,7 +242,8 @@ int cpia2_do_command(struct camera_data *cam,
cmd.buffer.block_data[0] = param;
break;
case CPIA2_CMD_SET_FLICKER_MODES:
- cmd.buffer.block_data[0] = param; /* Then fall through */
+ cmd.buffer.block_data[0] = param;
+ /* fall through */
case CPIA2_CMD_GET_FLICKER_MODES:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
@@ -280,8 +288,9 @@ int cpia2_do_command(struct camera_data *cam,
cmd.start = CPIA2_SYSTEM_SYSTEM_CONTROL;
cmd.buffer.block_data[0] = CPIA2_SYSTEM_CONTROL_CLEAR_ERR;
break;
- case CPIA2_CMD_SET_USER_MODE: /* Then fall through */
+ case CPIA2_CMD_SET_USER_MODE:
cmd.buffer.block_data[0] = param;
+ /* fall through */
case CPIA2_CMD_GET_USER_MODE:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
@@ -300,14 +309,16 @@ int cpia2_do_command(struct camera_data *cam,
cmd.buffer.block_data[0] = param;
break;
case CPIA2_CMD_SET_WAKEUP:
- cmd.buffer.block_data[0] = param; /* Then fall through */
+ cmd.buffer.block_data[0] = param;
+ /* fall through */
case CPIA2_CMD_GET_WAKEUP:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC;
cmd.reg_count = 1;
cmd.start = CPIA2_VC_WAKEUP;
break;
case CPIA2_CMD_SET_PW_CONTROL:
- cmd.buffer.block_data[0] = param; /* Then fall through */
+ cmd.buffer.block_data[0] = param;
+ /* fall through */
case CPIA2_CMD_GET_PW_CONTROL:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC;
cmd.reg_count = 1;
@@ -319,7 +330,8 @@ int cpia2_do_command(struct camera_data *cam,
cmd.start = CPIA2_VP_SYSTEMSTATE;
break;
case CPIA2_CMD_SET_SYSTEM_CTRL:
- cmd.buffer.block_data[0] = param; /* Then fall through */
+ cmd.buffer.block_data[0] = param;
+ /* fall through */
case CPIA2_CMD_GET_SYSTEM_CTRL:
cmd.req_mode =
CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_SYSTEM;
@@ -327,21 +339,24 @@ int cpia2_do_command(struct camera_data *cam,
cmd.start = CPIA2_SYSTEM_SYSTEM_CONTROL;
break;
case CPIA2_CMD_SET_VP_SYSTEM_CTRL:
- cmd.buffer.block_data[0] = param; /* Then fall through */
+ cmd.buffer.block_data[0] = param;
+ /* fall through */
case CPIA2_CMD_GET_VP_SYSTEM_CTRL:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
cmd.start = CPIA2_VP_SYSTEMCTRL;
break;
case CPIA2_CMD_SET_VP_EXP_MODES:
- cmd.buffer.block_data[0] = param; /* Then fall through */
+ cmd.buffer.block_data[0] = param;
+ /* fall through */
case CPIA2_CMD_GET_VP_EXP_MODES:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
cmd.start = CPIA2_VP_EXPOSURE_MODES;
break;
case CPIA2_CMD_SET_DEVICE_CONFIG:
- cmd.buffer.block_data[0] = param; /* Then fall through */
+ cmd.buffer.block_data[0] = param;
+ /* fall through */
case CPIA2_CMD_GET_DEVICE_CONFIG:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
@@ -361,7 +376,8 @@ int cpia2_do_command(struct camera_data *cam,
cmd.start = CPIA2_SENSOR_CR1;
break;
case CPIA2_CMD_SET_VC_CONTROL:
- cmd.buffer.block_data[0] = param; /* Then fall through */
+ cmd.buffer.block_data[0] = param;
+ /* fall through */
case CPIA2_CMD_GET_VC_CONTROL:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VC;
cmd.reg_count = 1;
@@ -395,7 +411,8 @@ int cpia2_do_command(struct camera_data *cam,
case CPIA2_CMD_SET_USER_EFFECTS: /* Note: Be careful with this as
this register can also affect
flicker modes */
- cmd.buffer.block_data[0] = param; /* Then fall through */
+ cmd.buffer.block_data[0] = param;
+ /* fall through */
case CPIA2_CMD_GET_USER_EFFECTS:
cmd.req_mode = CAMERAACCESS_TYPE_BLOCK | CAMERAACCESS_VP;
cmd.reg_count = 1;
diff --git a/drivers/media/usb/cx231xx/Kconfig b/drivers/media/usb/cx231xx/Kconfig
index 58de80bff4c7..6276d9b2198b 100644
--- a/drivers/media/usb/cx231xx/Kconfig
+++ b/drivers/media/usb/cx231xx/Kconfig
@@ -52,6 +52,8 @@ config VIDEO_CX231XX_DVB
select DVB_SI2165 if MEDIA_SUBDRV_AUTOSELECT
select DVB_SI2168 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_MN88473 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_R820T if MEDIA_SUBDRV_AUTOSELECT
---help---
This adds support for DVB cards based on the
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index a1007d005290..e0daa9b6c2a0 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -868,6 +868,33 @@ struct cx231xx_board cx231xx_boards[] = {
.amux = CX231XX_AMUX_LINE_IN,
} },
},
+ [CX231XX_BOARD_ASTROMETA_T2HYBRID] = {
+ .name = "Astrometa T2hybrid",
+ .tuner_type = TUNER_ABSENT,
+ .has_dvb = 1,
+ .output_mode = OUT_MODE_VIP11,
+ .agc_analog_digital_select_gpio = 0x01,
+ .ctl_pin_status_mask = 0xffffffc4,
+ .demod_addr = 0x18, /* 0x30 >> 1 */
+ .demod_i2c_master = I2C_1_MUX_1,
+ .gpio_pin_status_mask = 0xa,
+ .norm = V4L2_STD_NTSC,
+ .tuner_addr = 0x3a, /* 0x74 >> 1 */
+ .tuner_i2c_master = I2C_1_MUX_3,
+ .tuner_scl_gpio = 0x1a,
+ .tuner_sda_gpio = 0x1b,
+ .tuner_sif_gpio = 0x05,
+ .input = {{
+ .type = CX231XX_VMUX_TELEVISION,
+ .vmux = CX231XX_VIN_1_1,
+ .amux = CX231XX_AMUX_VIDEO,
+ }, {
+ .type = CX231XX_VMUX_COMPOSITE1,
+ .vmux = CX231XX_VIN_2_1,
+ .amux = CX231XX_AMUX_LINE_IN,
+ },
+ },
+ },
};
const unsigned int cx231xx_bcount = ARRAY_SIZE(cx231xx_boards);
@@ -937,6 +964,8 @@ struct usb_device_id cx231xx_id_table[] = {
.driver_info = CX231XX_BOARD_TERRATEC_GRABBY},
{USB_DEVICE(0x1b80, 0xd3b2),
.driver_info = CX231XX_BOARD_EVROMEDIA_FULL_HYBRID_FULLHD},
+ {USB_DEVICE(0x15f4, 0x0135),
+ .driver_info = CX231XX_BOARD_ASTROMETA_T2HYBRID},
{},
};
@@ -1013,6 +1042,11 @@ void cx231xx_pre_card_setup(struct cx231xx *dev)
dev_info(dev->dev, "Identified as %s (card=%d)\n",
dev->board.name, dev->model);
+ if (CX231XX_BOARD_ASTROMETA_T2HYBRID == dev->model) {
+ /* turn on demodulator chip */
+ cx231xx_set_gpio_value(dev, 0x03, 0x01);
+ }
+
/* set the direction for GPIO pins */
if (dev->board.tuner_gpio) {
cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1);
diff --git a/drivers/media/usb/cx231xx/cx231xx-dvb.c b/drivers/media/usb/cx231xx/cx231xx-dvb.c
index 46427fd3b220..ee3eeeb600f8 100644
--- a/drivers/media/usb/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/usb/cx231xx/cx231xx-dvb.c
@@ -37,6 +37,8 @@
#include "mb86a20s.h"
#include "si2157.h"
#include "lgdt3306a.h"
+#include "r820t.h"
+#include "mn88473.h"
MODULE_DESCRIPTION("driver for cx231xx based DVB cards");
MODULE_AUTHOR("Srinivasa Deevi <[email protected]>");
@@ -164,6 +166,13 @@ static struct lgdt3306a_config hauppauge_955q_lgdt3306a_config = {
.xtalMHz = 25,
};
+static struct r820t_config astrometa_t2hybrid_r820t_config = {
+ .i2c_addr = 0x3a, /* 0x74 >> 1 */
+ .xtal = 16000000,
+ .rafael_chip = CHIP_R828D,
+ .max_i2c_msg_len = 2,
+};
+
static inline void print_err_status(struct cx231xx *dev, int packet, int status)
{
char *errmsg = "Unknown";
@@ -1019,6 +1028,46 @@ static int dvb_init(struct cx231xx *dev)
dev->dvb->i2c_client_tuner = client;
break;
}
+ case CX231XX_BOARD_ASTROMETA_T2HYBRID:
+ {
+ struct i2c_client *client;
+ struct i2c_board_info info = {};
+ struct mn88473_config mn88473_config = {};
+
+ /* attach demodulator chip */
+ mn88473_config.i2c_wr_max = 16;
+ mn88473_config.xtal = 25000000;
+ mn88473_config.fe = &dev->dvb->frontend;
+
+ strlcpy(info.type, "mn88473", sizeof(info.type));
+ info.addr = dev->board.demod_addr;
+ info.platform_data = &mn88473_config;
+
+ request_module(info.type);
+ client = i2c_new_device(demod_i2c, &info);
+
+ if (client == NULL || client->dev.driver == NULL) {
+ result = -ENODEV;
+ goto out_free;
+ }
+
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ result = -ENODEV;
+ goto out_free;
+ }
+
+ dvb->i2c_client_demod = client;
+
+ /* define general-purpose callback pointer */
+ dvb->frontend->callback = cx231xx_tuner_callback;
+
+ /* attach tuner chip */
+ dvb_attach(r820t_attach, dev->dvb->frontend,
+ tuner_i2c,
+ &astrometa_t2hybrid_r820t_config);
+ break;
+ }
default:
dev_err(dev->dev,
"%s/2: The frontend of your DVB/ATSC card isn't supported yet\n",
diff --git a/drivers/media/usb/cx231xx/cx231xx-input.c b/drivers/media/usb/cx231xx/cx231xx-input.c
index 6e80f3c573f3..eecf074b0a48 100644
--- a/drivers/media/usb/cx231xx/cx231xx-input.c
+++ b/drivers/media/usb/cx231xx/cx231xx-input.c
@@ -30,7 +30,7 @@ static int get_key_isdbt(struct IR_i2c *ir, enum rc_type *protocol,
int rc;
u8 cmd, scancode;
- dev_dbg(&ir->rc->input_dev->dev, "%s\n", __func__);
+ dev_dbg(&ir->rc->dev, "%s\n", __func__);
/* poll IR chip */
rc = i2c_master_recv(ir->c, &cmd, 1);
@@ -48,8 +48,7 @@ static int get_key_isdbt(struct IR_i2c *ir, enum rc_type *protocol,
scancode = bitrev8(cmd);
- dev_dbg(&ir->rc->input_dev->dev, "cmd %02x, scan = %02x\n",
- cmd, scancode);
+ dev_dbg(&ir->rc->dev, "cmd %02x, scan = %02x\n", cmd, scancode);
*protocol = RC_TYPE_OTHER;
*pscancode = scancode;
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index 6414188ffdfa..f67f86876625 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -1134,7 +1134,7 @@ void cx231xx_v4l2_create_entities(struct cx231xx *dev)
/* The DVB core will handle it */
if (dev->tuner_type == TUNER_ABSENT)
continue;
- /* fall though */
+ /* fall through */
default: /* just to shut up a gcc warning */
ent->function = MEDIA_ENT_F_CONN_RF;
break;
diff --git a/drivers/media/usb/cx231xx/cx231xx.h b/drivers/media/usb/cx231xx/cx231xx.h
index d9792ea4bbc6..986c64ba5b56 100644
--- a/drivers/media/usb/cx231xx/cx231xx.h
+++ b/drivers/media/usb/cx231xx/cx231xx.h
@@ -79,6 +79,7 @@
#define CX231XX_BOARD_HAUPPAUGE_955Q 21
#define CX231XX_BOARD_TERRATEC_GRABBY 22
#define CX231XX_BOARD_EVROMEDIA_FULL_HYBRID_FULLHD 23
+#define CX231XX_BOARD_ASTROMETA_T2HYBRID 24
/* Limits minimum and default number of buffers */
#define CX231XX_MIN_BUF 4
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index caa1e6101f58..23bbbf367b51 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -36,7 +36,7 @@ static int af9015_ctrl_msg(struct dvb_usb_device *d, struct req_t *req)
state->buf[0] = req->cmd;
state->buf[1] = state->seq++;
- state->buf[2] = req->i2c_addr;
+ state->buf[2] = req->i2c_addr << 1;
state->buf[3] = req->addr >> 8;
state->buf[4] = req->addr & 0xff;
state->buf[5] = req->mbox;
@@ -52,6 +52,7 @@ static int af9015_ctrl_msg(struct dvb_usb_device *d, struct req_t *req)
case READ_I2C:
write = 0;
state->buf[2] |= 0x01; /* set I2C direction */
+ /* fall through */
case WRITE_I2C:
state->buf[0] = READ_WRITE_I2C;
break;
@@ -205,9 +206,9 @@ static int af9015_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
struct af9015_state *state = d_to_priv(d);
- int ret = 0, i = 0;
+ int ret;
u16 addr;
- u8 uninitialized_var(mbox), addr_len;
+ u8 mbox, addr_len;
struct req_t req;
/*
@@ -232,84 +233,89 @@ Due to that the only way to select correct tuner is use demodulator I2C-gate.
| addr 0x3a | | addr 0xc6 |
|____________| |____________|
*/
- if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
- return -EAGAIN;
- while (i < num) {
- if (msg[i].addr == state->af9013_config[0].i2c_addr ||
- msg[i].addr == state->af9013_config[1].i2c_addr) {
- addr = msg[i].buf[0] << 8;
- addr += msg[i].buf[1];
- mbox = msg[i].buf[2];
- addr_len = 3;
- } else {
- addr = msg[i].buf[0];
- addr_len = 1;
- /* mbox is don't care in that case */
- }
+ if (msg[0].len == 0 || msg[0].flags & I2C_M_RD) {
+ addr = 0x0000;
+ mbox = 0;
+ addr_len = 0;
+ } else if (msg[0].len == 1) {
+ addr = msg[0].buf[0];
+ mbox = 0;
+ addr_len = 1;
+ } else if (msg[0].len == 2) {
+ addr = msg[0].buf[0] << 8|msg[0].buf[1] << 0;
+ mbox = 0;
+ addr_len = 2;
+ } else {
+ addr = msg[0].buf[0] << 8|msg[0].buf[1] << 0;
+ mbox = msg[0].buf[2];
+ addr_len = 3;
+ }
- if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
- if (msg[i].len > 3 || msg[i+1].len > 61) {
- ret = -EOPNOTSUPP;
- goto error;
- }
- if (msg[i].addr == state->af9013_config[0].i2c_addr)
- req.cmd = READ_MEMORY;
- else
- req.cmd = READ_I2C;
- req.i2c_addr = msg[i].addr;
- req.addr = addr;
- req.mbox = mbox;
- req.addr_len = addr_len;
- req.data_len = msg[i+1].len;
- req.data = &msg[i+1].buf[0];
- ret = af9015_ctrl_msg(d, &req);
- i += 2;
- } else if (msg[i].flags & I2C_M_RD) {
- if (msg[i].len > 61) {
- ret = -EOPNOTSUPP;
- goto error;
- }
- if (msg[i].addr == state->af9013_config[0].i2c_addr) {
- ret = -EINVAL;
- goto error;
- }
+ if (num == 1 && !(msg[0].flags & I2C_M_RD)) {
+ /* i2c write */
+ if (msg[0].len > 21) {
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+ if (msg[0].addr == state->af9013_config[0].i2c_addr)
+ req.cmd = WRITE_MEMORY;
+ else
+ req.cmd = WRITE_I2C;
+ req.i2c_addr = msg[0].addr;
+ req.addr = addr;
+ req.mbox = mbox;
+ req.addr_len = addr_len;
+ req.data_len = msg[0].len-addr_len;
+ req.data = &msg[0].buf[addr_len];
+ ret = af9015_ctrl_msg(d, &req);
+ } else if (num == 2 && !(msg[0].flags & I2C_M_RD) &&
+ (msg[1].flags & I2C_M_RD)) {
+ /* i2c write + read */
+ if (msg[0].len > 3 || msg[1].len > 61) {
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+ if (msg[0].addr == state->af9013_config[0].i2c_addr)
+ req.cmd = READ_MEMORY;
+ else
req.cmd = READ_I2C;
- req.i2c_addr = msg[i].addr;
- req.addr = addr;
- req.mbox = mbox;
- req.addr_len = addr_len;
- req.data_len = msg[i].len;
- req.data = &msg[i].buf[0];
- ret = af9015_ctrl_msg(d, &req);
- i += 1;
- } else {
- if (msg[i].len > 21) {
- ret = -EOPNOTSUPP;
- goto error;
- }
- if (msg[i].addr == state->af9013_config[0].i2c_addr)
- req.cmd = WRITE_MEMORY;
- else
- req.cmd = WRITE_I2C;
- req.i2c_addr = msg[i].addr;
- req.addr = addr;
- req.mbox = mbox;
- req.addr_len = addr_len;
- req.data_len = msg[i].len-addr_len;
- req.data = &msg[i].buf[addr_len];
- ret = af9015_ctrl_msg(d, &req);
- i += 1;
+ req.i2c_addr = msg[0].addr;
+ req.addr = addr;
+ req.mbox = mbox;
+ req.addr_len = addr_len;
+ req.data_len = msg[1].len;
+ req.data = &msg[1].buf[0];
+ ret = af9015_ctrl_msg(d, &req);
+ } else if (num == 1 && (msg[0].flags & I2C_M_RD)) {
+ /* i2c read */
+ if (msg[0].len > 61) {
+ ret = -EOPNOTSUPP;
+ goto err;
}
- if (ret)
- goto error;
-
+ if (msg[0].addr == state->af9013_config[0].i2c_addr) {
+ ret = -EINVAL;
+ goto err;
+ }
+ req.cmd = READ_I2C;
+ req.i2c_addr = msg[0].addr;
+ req.addr = addr;
+ req.mbox = mbox;
+ req.addr_len = addr_len;
+ req.data_len = msg[0].len;
+ req.data = &msg[0].buf[0];
+ ret = af9015_ctrl_msg(d, &req);
+ } else {
+ ret = -EOPNOTSUPP;
+ dev_dbg(&d->udev->dev, "%s: unknown msg, num %u\n",
+ __func__, num);
}
- ret = i;
-
-error:
- mutex_unlock(&d->i2c_mutex);
+ if (ret)
+ goto err;
+ return num;
+err:
+ dev_dbg(&d->udev->dev, "%s: failed %d\n", __func__, ret);
return ret;
}
@@ -471,6 +477,8 @@ static int af9015_read_config(struct dvb_usb_device *d)
if (d->udev->speed == USB_SPEED_FULL)
state->dual_mode = 0;
+ state->af9013_config[0].i2c_addr = AF9015_I2C_DEMOD;
+
if (state->dual_mode) {
/* read 2nd demodulator I2C address */
req.addr = AF9015_EEPROM_DEMOD2_I2C;
@@ -478,7 +486,7 @@ static int af9015_read_config(struct dvb_usb_device *d)
if (ret)
goto error;
- state->af9013_config[1].i2c_addr = val;
+ state->af9013_config[1].i2c_addr = val >> 1;
}
for (i = 0; i < state->dual_mode + 1; i++) {
@@ -733,9 +741,6 @@ static int af9015_copy_firmware(struct dvb_usb_device *d)
fw_params[2] = state->firmware_checksum >> 8;
fw_params[3] = state->firmware_checksum & 0xff;
- /* wait 2nd demodulator ready */
- msleep(100);
-
ret = af9015_read_reg_i2c(d, state->af9013_config[1].i2c_addr,
0x98be, &val);
if (ret)
@@ -823,6 +828,9 @@ static int af9015_af9013_frontend_attach(struct dvb_usb_adapter *adap)
/* copy firmware to 2nd demodulator */
if (state->dual_mode) {
+ /* Wait 2nd demodulator ready */
+ msleep(100);
+
ret = af9015_copy_firmware(adap_to_d(adap));
if (ret) {
dev_err(&adap_to_d(adap)->udev->dev,
@@ -870,12 +878,12 @@ static int af9015_af9013_frontend_attach(struct dvb_usb_adapter *adap)
}
static struct mt2060_config af9015_mt2060_config = {
- .i2c_address = 0xc0,
+ .i2c_address = 0x60,
.clock_out = 0,
};
static struct qt1010_config af9015_qt1010_config = {
- .i2c_address = 0xc4,
+ .i2c_address = 0x62,
};
static struct tda18271_config af9015_tda18271_config = {
@@ -884,7 +892,7 @@ static struct tda18271_config af9015_tda18271_config = {
};
static struct mxl5005s_config af9015_mxl5003_config = {
- .i2c_address = 0xc6,
+ .i2c_address = 0x63,
.if_freq = IF_FREQ_4570000HZ,
.xtal_freq = CRYSTAL_FREQ_16000000HZ,
.agc_mode = MXL_SINGLE_AGC,
@@ -901,7 +909,7 @@ static struct mxl5005s_config af9015_mxl5003_config = {
};
static struct mxl5005s_config af9015_mxl5005_config = {
- .i2c_address = 0xc6,
+ .i2c_address = 0x63,
.if_freq = IF_FREQ_4570000HZ,
.xtal_freq = CRYSTAL_FREQ_16000000HZ,
.agc_mode = MXL_SINGLE_AGC,
@@ -918,12 +926,12 @@ static struct mxl5005s_config af9015_mxl5005_config = {
};
static struct mc44s803_config af9015_mc44s803_config = {
- .i2c_address = 0xc0,
+ .i2c_address = 0x60,
.dig_out = 1,
};
static struct tda18218_config af9015_tda18218_config = {
- .i2c_address = 0xc0,
+ .i2c_address = 0x60,
.i2c_wr_max = 21, /* max wr bytes AF9015 I2C adap can handle at once */
};
@@ -954,7 +962,7 @@ static int af9015_tuner_attach(struct dvb_usb_adapter *adap)
&af9015_qt1010_config) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_TDA18271:
- ret = dvb_attach(tda18271_attach, adap->fe[0], 0xc0,
+ ret = dvb_attach(tda18271_attach, adap->fe[0], 0x60,
&adap_to_d(adap)->i2c_adap,
&af9015_tda18271_config) == NULL ? -ENODEV : 0;
break;
@@ -975,7 +983,7 @@ static int af9015_tuner_attach(struct dvb_usb_adapter *adap)
&af9015_mxl5005_config) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_ENV77H11D5:
- ret = dvb_attach(dvb_pll_attach, adap->fe[0], 0xc0,
+ ret = dvb_attach(dvb_pll_attach, adap->fe[0], 0x60,
&adap_to_d(adap)->i2c_adap,
DVB_PLL_TDA665X) == NULL ? -ENODEV : 0;
break;
@@ -987,7 +995,7 @@ static int af9015_tuner_attach(struct dvb_usb_adapter *adap)
case AF9013_TUNER_MXL5007T:
ret = dvb_attach(mxl5007t_attach, adap->fe[0],
&adap_to_d(adap)->i2c_adap,
- 0xc0, &af9015_mxl5007t_config) == NULL ? -ENODEV : 0;
+ 0x60, &af9015_mxl5007t_config) == NULL ? -ENODEV : 0;
break;
case AF9013_TUNER_UNKNOWN:
default:
@@ -1124,10 +1132,21 @@ static int af9015_init_endpoint(struct dvb_usb_device *d)
}
/* enable / disable mp2if2 */
- if (state->dual_mode)
+ if (state->dual_mode) {
ret = af9015_set_reg_bit(d, 0xd50b, 0);
- else
+ if (ret)
+ goto error;
+ ret = af9015_set_reg_bit(d, 0xd520, 4);
+ if (ret)
+ goto error;
+ } else {
ret = af9015_clear_reg_bit(d, 0xd50b, 0);
+ if (ret)
+ goto error;
+ ret = af9015_clear_reg_bit(d, 0xd520, 4);
+ if (ret)
+ goto error;
+ }
error:
if (ret)
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.h b/drivers/media/usb/dvb-usb-v2/af9015.h
index 2dd9231a8ece..3a9d9815ab7a 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.h
+++ b/drivers/media/usb/dvb-usb-v2/af9015.h
@@ -47,8 +47,8 @@
#define TS_USB20_MAX_PACKET_SIZE 512
#define TS_USB11_MAX_PACKET_SIZE 64
-#define AF9015_I2C_EEPROM 0xa0
-#define AF9015_I2C_DEMOD 0x38
+#define AF9015_I2C_EEPROM 0x50
+#define AF9015_I2C_DEMOD 0x1c
#define AF9015_USB_TIMEOUT 2000
/* EEPROM locations */
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 924adfdb660d..594360a63c18 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -1065,6 +1065,7 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
}
break;
}
+ /* fall through */
case 0x22f0:
st->i2c_gate = 5;
adap->fe[0] = dvb_attach(m88rs2000_attach,
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
index ffb49c28b15a..0eb33e043079 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
@@ -316,7 +316,7 @@ fail:
static int mxl111sf_i2c_send_data(struct mxl111sf_state *state,
u8 index, u8 *wdata)
{
- int ret = mxl111sf_ctrl_msg(state->d, wdata[0],
+ int ret = mxl111sf_ctrl_msg(state, wdata[0],
&wdata[1], 25, NULL, 0);
mxl_fail(ret);
@@ -326,7 +326,7 @@ static int mxl111sf_i2c_send_data(struct mxl111sf_state *state,
static int mxl111sf_i2c_get_data(struct mxl111sf_state *state,
u8 index, u8 *wdata, u8 *rdata)
{
- int ret = mxl111sf_ctrl_msg(state->d, wdata[0],
+ int ret = mxl111sf_ctrl_msg(state, wdata[0],
&wdata[1], 25, rdata, 24);
mxl_fail(ret);
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index abf69d8fa469..b0d5904a4ea6 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -24,9 +24,6 @@
#include "lgdt3305.h"
#include "lg2160.h"
-/* Max transfer size done by I2C transfer functions */
-#define MAX_XFER_SIZE 64
-
int dvb_usb_mxl111sf_debug;
module_param_named(debug, dvb_usb_mxl111sf_debug, int, 0644);
MODULE_PARM_DESC(debug, "set debugging level (1=info, 2=xfer, 4=i2c, 8=reg, 16=adv (or-able)).");
@@ -55,27 +52,34 @@ MODULE_PARM_DESC(rfswitch, "force rf switch position (0=auto, 1=ext, 2=int).");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-int mxl111sf_ctrl_msg(struct dvb_usb_device *d,
+int mxl111sf_ctrl_msg(struct mxl111sf_state *state,
u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen)
{
+ struct dvb_usb_device *d = state->d;
int wo = (rbuf == NULL || rlen == 0); /* write-only */
int ret;
- u8 sndbuf[MAX_XFER_SIZE];
- if (1 + wlen > sizeof(sndbuf)) {
+ if (1 + wlen > MXL_MAX_XFER_SIZE) {
pr_warn("%s: len=%d is too big!\n", __func__, wlen);
return -EOPNOTSUPP;
}
pr_debug("%s(wlen = %d, rlen = %d)\n", __func__, wlen, rlen);
- memset(sndbuf, 0, 1+wlen);
+ mutex_lock(&state->msg_lock);
+ memset(state->sndbuf, 0, 1+wlen);
+ memset(state->rcvbuf, 0, rlen);
+
+ state->sndbuf[0] = cmd;
+ memcpy(&state->sndbuf[1], wbuf, wlen);
- sndbuf[0] = cmd;
- memcpy(&sndbuf[1], wbuf, wlen);
+ ret = (wo) ? dvb_usbv2_generic_write(d, state->sndbuf, 1+wlen) :
+ dvb_usbv2_generic_rw(d, state->sndbuf, 1+wlen, state->rcvbuf,
+ rlen);
+
+ memcpy(rbuf, state->rcvbuf, rlen);
+ mutex_unlock(&state->msg_lock);
- ret = (wo) ? dvb_usbv2_generic_write(d, sndbuf, 1+wlen) :
- dvb_usbv2_generic_rw(d, sndbuf, 1+wlen, rbuf, rlen);
mxl_fail(ret);
return ret;
@@ -91,7 +95,7 @@ int mxl111sf_read_reg(struct mxl111sf_state *state, u8 addr, u8 *data)
u8 buf[2];
int ret;
- ret = mxl111sf_ctrl_msg(state->d, MXL_CMD_REG_READ, &addr, 1, buf, 2);
+ ret = mxl111sf_ctrl_msg(state, MXL_CMD_REG_READ, &addr, 1, buf, 2);
if (mxl_fail(ret)) {
mxl_debug("error reading reg: 0x%02x", addr);
goto fail;
@@ -117,7 +121,7 @@ int mxl111sf_write_reg(struct mxl111sf_state *state, u8 addr, u8 data)
pr_debug("W: (0x%02x, 0x%02x)\n", addr, data);
- ret = mxl111sf_ctrl_msg(state->d, MXL_CMD_REG_WRITE, buf, 2, NULL, 0);
+ ret = mxl111sf_ctrl_msg(state, MXL_CMD_REG_WRITE, buf, 2, NULL, 0);
if (mxl_fail(ret))
pr_err("error writing reg: 0x%02x, val: 0x%02x", addr, data);
return ret;
@@ -926,6 +930,8 @@ static int mxl111sf_init(struct dvb_usb_device *d)
.len = sizeof(eeprom), .buf = eeprom },
};
+ mutex_init(&state->msg_lock);
+
ret = get_chip_info(state);
if (mxl_fail(ret))
pr_err("failed to get chip info during probe");
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.h b/drivers/media/usb/dvb-usb-v2/mxl111sf.h
index 846260e0eec0..3e6f5880bd1e 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.h
@@ -19,6 +19,9 @@
#include <media/tveeprom.h>
#include <media/media-entity.h>
+/* Max transfer size done by I2C transfer functions */
+#define MXL_MAX_XFER_SIZE 64
+
#define MXL_EP1_REG_READ 1
#define MXL_EP2_REG_WRITE 2
#define MXL_EP3_INTERRUPT 3
@@ -86,6 +89,9 @@ struct mxl111sf_state {
struct mutex fe_lock;
u8 num_frontends;
struct mxl111sf_adap_state adap_state[3];
+ u8 sndbuf[MXL_MAX_XFER_SIZE];
+ u8 rcvbuf[MXL_MAX_XFER_SIZE];
+ struct mutex msg_lock;
#ifdef CONFIG_MEDIA_CONTROLLER_DVB
struct media_entity tuner;
struct media_pad tuner_pads[2];
@@ -108,7 +114,7 @@ int mxl111sf_ctrl_program_regs(struct mxl111sf_state *state,
/* needed for hardware i2c functions in mxl111sf-i2c.c:
* mxl111sf_i2c_send_data / mxl111sf_i2c_get_data */
-int mxl111sf_ctrl_msg(struct dvb_usb_device *d,
+int mxl111sf_ctrl_msg(struct mxl111sf_state *state,
u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen);
#define mxl_printk(kern, fmt, arg...) \
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 85ab3fa48f9a..6a57fc6d3472 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -1659,6 +1659,7 @@ static int dib8096_set_param_override(struct dvb_frontend *fe)
switch (band) {
default:
deb_info("Warning : Rf frequency (%iHz) is not in the supported range, using VHF switch ", fe->dtv_property_cache.frequency);
+ /* fall through */
case BAND_VHF:
state->dib8000_ops.set_gpio(fe, 3, 0, 1);
break;
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-remote.c b/drivers/media/usb/dvb-usb/dvb-usb-remote.c
index 059ded59208e..f05f1fc80729 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-remote.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-remote.c
@@ -131,6 +131,11 @@ static void legacy_dvb_usb_read_remote_control(struct work_struct *work)
case REMOTE_KEY_PRESSED:
deb_rc("key pressed\n");
d->last_event = event;
+ input_event(d->input_dev, EV_KEY, event, 1);
+ input_sync(d->input_dev);
+ input_event(d->input_dev, EV_KEY, d->last_event, 0);
+ input_sync(d->input_dev);
+ break;
case REMOTE_KEY_REPEAT:
deb_rc("key repeated\n");
input_event(d->input_dev, EV_KEY, event, 1);
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 6e654e5026dd..57b187240110 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -1840,11 +1840,12 @@ static int dw2102_load_firmware(struct usb_device *dev,
switch (le16_to_cpu(dev->descriptor.idProduct)) {
case USB_PID_TEVII_S650:
dw2104_properties.rc.core.rc_codes = RC_MAP_TEVII_NEC;
+ /* fall through */
case USB_PID_DW2104:
reset = 1;
dw210x_op_rw(dev, 0xc4, 0x0000, 0, &reset, 1,
DW210X_WRITE_MSG);
- /* break omitted intentionally */
+ /* fall through */
case USB_PID_DW3101:
reset = 0;
dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0,
@@ -1877,6 +1878,7 @@ static int dw2102_load_firmware(struct usb_device *dev,
break;
}
}
+ /* fall through */
case 0x2101:
dw210x_op_rw(dev, 0xbc, 0x0030, 0, &reset16[0], 2,
DW210X_READ_MSG);
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index a12b599a1fa2..146341aeb782 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -2855,7 +2855,7 @@ static int em28xx_hint_board(struct em28xx *dev)
"Your board has no unique USB ID.\n"
"A hint were successfully done, based on eeprom hash.\n"
"This method is not 100%% failproof.\n"
- "If the board were missdetected, please email this log to:\n"
+ "If the board were misdetected, please email this log to:\n"
"\tV4L Mailing List <[email protected]>\n"
"Board detected as %s\n",
em28xx_boards[dev->model].name);
@@ -2885,7 +2885,7 @@ static int em28xx_hint_board(struct em28xx *dev)
"Your board has no unique USB ID.\n"
"A hint were successfully done, based on i2c devicelist hash.\n"
"This method is not 100%% failproof.\n"
- "If the board were missdetected, please email this log to:\n"
+ "If the board were misdetected, please email this log to:\n"
"\tV4L Mailing List <[email protected]>\n"
"Board detected as %s\n",
em28xx_boards[dev->model].name);
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index 19ccff41c7eb..1d0d8cc06103 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -91,22 +91,16 @@ int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg,
if (len > URB_MAX_CTRL_SIZE)
return -EINVAL;
- em28xx_regdbg("(pipe 0x%08x): IN: %02x %02x %02x %02x %02x %02x %02x %02x ",
- pipe, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- req, 0, 0,
- reg & 0xff, reg >> 8,
- len & 0xff, len >> 8);
-
mutex_lock(&dev->ctrl_urb_lock);
ret = usb_control_msg(udev, pipe, req,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0000, reg, dev->urb_buf, len, HZ);
if (ret < 0) {
- em28xx_regdbg("(pipe 0x%08x): IN: %02x %02x %02x %02x %02x %02x %02x %02x failed\n",
+ em28xx_regdbg("(pipe 0x%08x): IN: %02x %02x %02x %02x %02x %02x %02x %02x failed with error %i\n",
pipe, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
req, 0, 0,
reg & 0xff, reg >> 8,
- len & 0xff, len >> 8);
+ len & 0xff, len >> 8, ret);
mutex_unlock(&dev->ctrl_urb_lock);
return usb_translate_errors(ret);
}
@@ -116,7 +110,7 @@ int em28xx_read_reg_req_len(struct em28xx *dev, u8 req, u16 reg,
mutex_unlock(&dev->ctrl_urb_lock);
- em28xx_regdbg("(pipe 0x%08x): IN: %02x %02x %02x %02x %02x %02x %02x %02x failed <<< %*ph\n",
+ em28xx_regdbg("(pipe 0x%08x): IN: %02x %02x %02x %02x %02x %02x %02x %02x <<< %*ph\n",
pipe, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
req, 0, 0,
reg & 0xff, reg >> 8,
@@ -164,13 +158,6 @@ int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
if ((len < 1) || (len > URB_MAX_CTRL_SIZE))
return -EINVAL;
- em28xx_regdbg("(pipe 0x%08x): OUT: %02x %02x %02x %02x %02x %02x %02x %02x >>> %*ph\n",
- pipe,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- req, 0, 0,
- reg & 0xff, reg >> 8,
- len & 0xff, len >> 8, len, buf);
-
mutex_lock(&dev->ctrl_urb_lock);
memcpy(dev->urb_buf, buf, len);
ret = usb_control_msg(udev, pipe, req,
@@ -178,8 +165,22 @@ int em28xx_write_regs_req(struct em28xx *dev, u8 req, u16 reg, char *buf,
0x0000, reg, dev->urb_buf, len, HZ);
mutex_unlock(&dev->ctrl_urb_lock);
- if (ret < 0)
+ if (ret < 0) {
+ em28xx_regdbg("(pipe 0x%08x): OUT: %02x %02x %02x %02x %02x %02x %02x %02x >>> %*ph failed with error %i\n",
+ pipe,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ req, 0, 0,
+ reg & 0xff, reg >> 8,
+ len & 0xff, len >> 8, len, buf, ret);
return usb_translate_errors(ret);
+ }
+
+ em28xx_regdbg("(pipe 0x%08x): OUT: %02x %02x %02x %02x %02x %02x %02x %02x >>> %*ph\n",
+ pipe,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ req, 0, 0,
+ reg & 0xff, reg >> 8,
+ len & 0xff, len >> 8, len, buf);
if (dev->wait_after_write)
msleep(dev->wait_after_write);
diff --git a/drivers/media/usb/gspca/m5602/m5602_s5k83a.c b/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
index be5e25d1a2e8..6ad8d4849680 100644
--- a/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
+++ b/drivers/media/usb/gspca/m5602/m5602_s5k83a.c
@@ -345,6 +345,11 @@ int s5k83a_start(struct sd *sd)
to assume that there is no better way of accomplishing this */
sd->rotation_thread = kthread_create(rotation_thread_function,
sd, "rotation thread");
+ if (IS_ERR(sd->rotation_thread)) {
+ err = PTR_ERR(sd->rotation_thread);
+ sd->rotation_thread = NULL;
+ return err;
+ }
wake_up_process(sd->rotation_thread);
/* Preinit the sensor */
diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
index f4c41f043cda..cdb79c5f0c38 100644
--- a/drivers/media/usb/gspca/ov519.c
+++ b/drivers/media/usb/gspca/ov519.c
@@ -3526,7 +3526,8 @@ static void ov511_mode_init_regs(struct sd *sd)
sd->clockdiv = 0;
break;
}
- /* Fall through for 640x480 case */
+ /* For 640x480 case */
+ /* fall through */
default:
/* case 20: */
/* case 15: */
diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c
index 1dfc2de1fe77..c843070f24c1 100644
--- a/drivers/media/usb/pulse8-cec/pulse8-cec.c
+++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c
@@ -148,18 +148,15 @@ static void pulse8_irq_work_handler(struct work_struct *work)
cec_received_msg(pulse8->adap, &pulse8->rx_msg);
break;
case MSGCODE_TRANSMIT_SUCCEEDED:
- cec_transmit_done(pulse8->adap, CEC_TX_STATUS_OK,
- 0, 0, 0, 0);
+ cec_transmit_attempt_done(pulse8->adap, CEC_TX_STATUS_OK);
break;
case MSGCODE_TRANSMIT_FAILED_ACK:
- cec_transmit_done(pulse8->adap, CEC_TX_STATUS_NACK,
- 0, 1, 0, 0);
+ cec_transmit_attempt_done(pulse8->adap, CEC_TX_STATUS_NACK);
break;
case MSGCODE_TRANSMIT_FAILED_LINE:
case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
- cec_transmit_done(pulse8->adap, CEC_TX_STATUS_ERROR,
- 0, 0, 0, 1);
+ cec_transmit_attempt_done(pulse8->adap, CEC_TX_STATUS_ERROR);
break;
}
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
index f727b54a53c6..20a52b785fff 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
@@ -488,7 +488,7 @@ static int pvr2_i2c_xfer(struct i2c_adapter *i2c_adap,
if ((ret > 0) || !(msgs[idx].flags & I2C_M_RD)) {
if (cnt > 8) cnt = 8;
printk(KERN_CONT " [");
- for (offs = 0; offs < (cnt>8?8:cnt); offs++) {
+ for (offs = 0; offs < cnt; offs++) {
if (offs) printk(KERN_CONT " ");
printk(KERN_CONT "%02x",msgs[idx].buf[offs]);
}
diff --git a/drivers/media/usb/pwc/pwc-v4l.c b/drivers/media/usb/pwc/pwc-v4l.c
index 92f04db6bbae..043b2b97cee6 100644
--- a/drivers/media/usb/pwc/pwc-v4l.c
+++ b/drivers/media/usb/pwc/pwc-v4l.c
@@ -568,7 +568,8 @@ static int pwc_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
pdev->gain_valid = true;
if (!DEVICE_USE_CODEC3(pdev->type))
break;
- /* Fall through for CODEC3 where autogain also controls expo */
+ /* For CODEC3 where autogain also controls expo */
+ /* fall through */
case V4L2_CID_EXPOSURE_AUTO:
if (pdev->exposure_valid && time_before(jiffies,
pdev->last_exposure_update + HZ / 4)) {
diff --git a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
index 4126552c9055..f203699e9c1b 100644
--- a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
+++ b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
@@ -98,16 +98,13 @@ static void rain_process_msg(struct rain *rain)
switch (stat) {
case 1:
- cec_transmit_done(rain->adap, CEC_TX_STATUS_OK,
- 0, 0, 0, 0);
+ cec_transmit_attempt_done(rain->adap, CEC_TX_STATUS_OK);
break;
case 2:
- cec_transmit_done(rain->adap, CEC_TX_STATUS_NACK,
- 0, 1, 0, 0);
+ cec_transmit_attempt_done(rain->adap, CEC_TX_STATUS_NACK);
break;
default:
- cec_transmit_done(rain->adap, CEC_TX_STATUS_LOW_DRIVE,
- 0, 0, 0, 1);
+ cec_transmit_attempt_done(rain->adap, CEC_TX_STATUS_LOW_DRIVE);
break;
}
}
@@ -123,11 +120,12 @@ static void rain_irq_work_handler(struct work_struct *work)
char data;
spin_lock_irqsave(&rain->buf_lock, flags);
- exit_loop = rain->buf_len == 0;
if (rain->buf_len) {
data = rain->buf[rain->buf_rd_idx];
rain->buf_len--;
rain->buf_rd_idx = (rain->buf_rd_idx + 1) & 0xff;
+ } else {
+ exit_loop = true;
}
spin_unlock_irqrestore(&rain->buf_lock, flags);
@@ -296,7 +294,7 @@ static int rain_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
cec_msg_destination(msg), msg->msg[1]);
for (i = 2; i < msg->len; i++) {
snprintf(hex, sizeof(hex), "%02x", msg->msg[i]);
- strncat(cmd, hex, sizeof(cmd));
+ strlcat(cmd, hex, sizeof(cmd));
}
}
mutex_lock(&rain->write_lock);
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index a9d4484f7626..6a88b1dbb3a0 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -1803,6 +1803,8 @@ static int save_frame(struct s2255_dev *dev, struct s2255_pipeinfo *pipe_info)
default:
pr_info("s2255 unknown resp\n");
}
+ pdata++;
+ break;
default:
pdata++;
break;
diff --git a/drivers/media/usb/tm6000/tm6000-input.c b/drivers/media/usb/tm6000/tm6000-input.c
index 39c15bb2b20c..1a033f57fcc1 100644
--- a/drivers/media/usb/tm6000/tm6000-input.c
+++ b/drivers/media/usb/tm6000/tm6000-input.c
@@ -63,7 +63,6 @@ struct tm6000_IR {
u8 wait:1;
u8 pwled:2;
u8 submit_urb:1;
- u16 key_addr;
struct urb *int_urb;
/* IR device properties */
@@ -321,9 +320,6 @@ static int tm6000_ir_change_protocol(struct rc_dev *rc, u64 *rc_type)
dprintk(2, "%s\n",__func__);
- if ((rc->rc_map.scan) && (*rc_type == RC_BIT_NEC))
- ir->key_addr = ((rc->rc_map.scan[0].scancode >> 8) & 0xffff);
-
ir->rc_type = *rc_type;
tm6000_ir_config(ir);
diff --git a/drivers/media/usb/usbvision/usbvision-i2c.c b/drivers/media/usb/usbvision/usbvision-i2c.c
index 5a3f788ad033..fdf6b6e285da 100644
--- a/drivers/media/usb/usbvision/usbvision-i2c.c
+++ b/drivers/media/usb/usbvision/usbvision-i2c.c
@@ -311,10 +311,13 @@ usbvision_i2c_read_max4(struct usb_usbvision *usbvision, unsigned char addr,
switch (len) {
case 4:
buf[3] = usbvision_read_reg(usbvision, USBVISION_SER_DAT4);
+ /* fall through */
case 3:
buf[2] = usbvision_read_reg(usbvision, USBVISION_SER_DAT3);
+ /* fall through */
case 2:
buf[1] = usbvision_read_reg(usbvision, USBVISION_SER_DAT2);
+ /* fall through */
case 1:
buf[0] = usbvision_read_reg(usbvision, USBVISION_SER_DAT1);
break;
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index f9c3325aa4d4..756322c4ac05 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -1427,8 +1427,8 @@ static int usbvision_probe(struct usb_interface *intf,
int model, i, ret;
PDEBUG(DBG_PROBE, "VID=%#04x, PID=%#04x, ifnum=%u",
- dev->descriptor.idVendor,
- dev->descriptor.idProduct, ifnum);
+ le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct), ifnum);
model = devid->driver_info;
if (model < 0 || model >= usbvision_device_data_size) {
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 46d6be0bb316..70842c5af05b 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -2013,6 +2013,7 @@ static int uvc_probe(struct usb_interface *intf,
{
struct usb_device *udev = interface_to_usbdev(intf);
struct uvc_device *dev;
+ int function;
int ret;
if (id->idVendor && id->idProduct)
@@ -2044,9 +2045,27 @@ static int uvc_probe(struct usb_interface *intf,
strlcpy(dev->name, udev->product, sizeof dev->name);
else
snprintf(dev->name, sizeof dev->name,
- "UVC Camera (%04x:%04x)",
- le16_to_cpu(udev->descriptor.idVendor),
- le16_to_cpu(udev->descriptor.idProduct));
+ "UVC Camera (%04x:%04x)",
+ le16_to_cpu(udev->descriptor.idVendor),
+ le16_to_cpu(udev->descriptor.idProduct));
+
+ /*
+ * Add iFunction or iInterface to names when available as additional
+ * distinguishers between interfaces. iFunction is prioritized over
+ * iInterface which matches Windows behavior at the point of writing.
+ */
+ if (intf->intf_assoc && intf->intf_assoc->iFunction != 0)
+ function = intf->intf_assoc->iFunction;
+ else
+ function = intf->cur_altsetting->desc.iInterface;
+ if (function != 0) {
+ size_t len;
+
+ strlcat(dev->name, ": ", sizeof(dev->name));
+ len = strlen(dev->name);
+ usb_string(udev, function, dev->name + len,
+ sizeof(dev->name) - len);
+ }
/* Parse the Video Class control descriptor. */
if (uvc_parse_control(dev) < 0) {
@@ -2441,6 +2460,15 @@ static struct usb_device_id uvc_ids[] = {
.bInterfaceProtocol = 0,
.driver_info = UVC_QUIRK_PROBE_MINMAX
| UVC_QUIRK_BUILTIN_ISIGHT },
+ /* Apple Built-In iSight via iBridge */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x05ac,
+ .idProduct = 0x8600,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_PROBE_DEF },
/* Foxlink ("HP Webcam" on HP Mini 5103) */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 47d93a938dde..fb86d6af398d 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1323,11 +1323,11 @@ static void uvc_video_complete(struct urb *urb)
default:
uvc_printk(KERN_WARNING, "Non-zero status (%d) in video "
"completion handler.\n", urb->status);
-
+ /* fall through */
case -ENOENT: /* usb_kill_urb() called. */
if (stream->frozen)
return;
-
+ /* fall through */
case -ECONNRESET: /* usb_unlink_urb() called. */
case -ESHUTDOWN: /* The endpoint is being disabled. */
uvc_queue_cancel(queue, urb->status == -ESHUTDOWN);
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 6b1b78ff1417..a35c33686abf 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -55,6 +55,9 @@ config V4L2_FLASH_LED_CLASS
When in doubt, say N.
+config V4L2_FWNODE
+ tristate
+
# Used by drivers that need Videobuf modules
config VIDEOBUF_GEN
tristate
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
index 795a5352761d..098ad5fd5231 100644
--- a/drivers/media/v4l2-core/Makefile
+++ b/drivers/media/v4l2-core/Makefile
@@ -10,9 +10,7 @@ videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o v4l2-fh.o \
ifeq ($(CONFIG_COMPAT),y)
videodev-objs += v4l2-compat-ioctl32.o
endif
-ifeq ($(CONFIG_OF),y)
- videodev-objs += v4l2-of.o
-endif
+obj-$(CONFIG_V4L2_FWNODE) += v4l2-fwnode.o
ifeq ($(CONFIG_TRACEPOINTS),y)
videodev-objs += vb2-trace.o v4l2-trace.o
endif
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index 96cc733f35ef..851f128eba22 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -12,8 +12,10 @@
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/list.h>
+#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -40,10 +42,14 @@ static bool match_devname(struct v4l2_subdev *sd,
return !strcmp(asd->match.device_name.name, dev_name(sd->dev));
}
-static bool match_of(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
+static bool match_fwnode(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
{
- return !of_node_cmp(of_node_full_name(sd->of_node),
- of_node_full_name(asd->match.of.node));
+ if (!is_of_node(sd->fwnode) || !is_of_node(asd->match.fwnode.fwnode))
+ return sd->fwnode == asd->match.fwnode.fwnode;
+
+ return !of_node_cmp(of_node_full_name(to_of_node(sd->fwnode)),
+ of_node_full_name(
+ to_of_node(asd->match.fwnode.fwnode)));
}
static bool match_custom(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
@@ -77,8 +83,8 @@ static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *
case V4L2_ASYNC_MATCH_I2C:
match = match_i2c;
break;
- case V4L2_ASYNC_MATCH_OF:
- match = match_of;
+ case V4L2_ASYNC_MATCH_FWNODE:
+ match = match_fwnode;
break;
default:
/* Cannot happen, unless someone breaks us */
@@ -143,7 +149,8 @@ int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
struct v4l2_async_subdev *asd;
int i;
- if (!notifier->num_subdevs || notifier->num_subdevs > V4L2_MAX_SUBDEVS)
+ if (!v4l2_dev || !notifier->num_subdevs ||
+ notifier->num_subdevs > V4L2_MAX_SUBDEVS)
return -EINVAL;
notifier->v4l2_dev = v4l2_dev;
@@ -157,7 +164,7 @@ int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
case V4L2_ASYNC_MATCH_CUSTOM:
case V4L2_ASYNC_MATCH_DEVNAME:
case V4L2_ASYNC_MATCH_I2C:
- case V4L2_ASYNC_MATCH_OF:
+ case V4L2_ASYNC_MATCH_FWNODE:
break;
default:
dev_err(notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL,
@@ -204,7 +211,7 @@ void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
if (!notifier->v4l2_dev)
return;
- dev = kmalloc_array(n_subdev, sizeof(*dev), GFP_KERNEL);
+ dev = kvmalloc_array(n_subdev, sizeof(*dev), GFP_KERNEL);
if (!dev) {
dev_err(notifier->v4l2_dev->dev,
"Failed to allocate device cache!\n");
@@ -260,7 +267,7 @@ void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
}
put_device(d);
}
- kfree(dev);
+ kvfree(dev);
notifier->v4l2_dev = NULL;
@@ -280,8 +287,8 @@ int v4l2_async_register_subdev(struct v4l2_subdev *sd)
* (struct v4l2_subdev.dev), and async sub-device does not
* exist independently of the device at any point of time.
*/
- if (!sd->of_node && sd->dev)
- sd->of_node = sd->dev->of_node;
+ if (!sd->fwnode && sd->dev)
+ sd->fwnode = dev_fwnode(sd->dev);
mutex_lock(&list_lock);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index ec42872d11cf..dd1db678718c 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -19,6 +19,7 @@
*/
#include <linux/ctype.h>
+#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <media/v4l2-ioctl.h>
@@ -886,6 +887,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_PIXEL_RATE: return "Pixel Rate";
case V4L2_CID_TEST_PATTERN: return "Test Pattern";
case V4L2_CID_DEINTERLACING_MODE: return "Deinterlacing Mode";
+ case V4L2_CID_DIGITAL_GAIN: return "Digital Gain";
/* DV controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
@@ -1739,14 +1741,15 @@ int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl,
unsigned nr_of_controls_hint,
struct lock_class_key *key, const char *name)
{
+ mutex_init(&hdl->_lock);
hdl->lock = &hdl->_lock;
- mutex_init(hdl->lock);
lockdep_set_class_and_name(hdl->lock, key, name);
INIT_LIST_HEAD(&hdl->ctrls);
INIT_LIST_HEAD(&hdl->ctrl_refs);
hdl->nr_of_buckets = 1 + nr_of_controls_hint / 8;
- hdl->buckets = kcalloc(hdl->nr_of_buckets, sizeof(hdl->buckets[0]),
- GFP_KERNEL);
+ hdl->buckets = kvmalloc_array(hdl->nr_of_buckets,
+ sizeof(hdl->buckets[0]),
+ GFP_KERNEL | __GFP_ZERO);
hdl->error = hdl->buckets ? 0 : -ENOMEM;
return hdl->error;
}
@@ -1773,13 +1776,14 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
list_del(&ctrl->node);
list_for_each_entry_safe(sev, next_sev, &ctrl->ev_subs, node)
list_del(&sev->node);
- kfree(ctrl);
+ kvfree(ctrl);
}
- kfree(hdl->buckets);
+ kvfree(hdl->buckets);
hdl->buckets = NULL;
hdl->cached = NULL;
hdl->error = 0;
mutex_unlock(hdl->lock);
+ mutex_destroy(&hdl->_lock);
}
EXPORT_SYMBOL(v4l2_ctrl_handler_free);
@@ -2022,7 +2026,7 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
is_array)
sz_extra += 2 * tot_ctrl_size;
- ctrl = kzalloc(sizeof(*ctrl) + sz_extra, GFP_KERNEL);
+ ctrl = kvzalloc(sizeof(*ctrl) + sz_extra, GFP_KERNEL);
if (ctrl == NULL) {
handler_set_err(hdl, -ENOMEM);
return NULL;
@@ -2071,7 +2075,7 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
}
if (handler_new_ref(hdl, ctrl)) {
- kfree(ctrl);
+ kvfree(ctrl);
return NULL;
}
mutex_lock(hdl->lock);
@@ -2444,14 +2448,16 @@ int v4l2_ctrl_subdev_log_status(struct v4l2_subdev *sd)
EXPORT_SYMBOL(v4l2_ctrl_subdev_log_status);
/* Call s_ctrl for all controls owned by the handler */
-int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
+int __v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
{
struct v4l2_ctrl *ctrl;
int ret = 0;
if (hdl == NULL)
return 0;
- mutex_lock(hdl->lock);
+
+ lockdep_assert_held(hdl->lock);
+
list_for_each_entry(ctrl, &hdl->ctrls, node)
ctrl->done = false;
@@ -2476,7 +2482,22 @@ int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
if (ret)
break;
}
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__v4l2_ctrl_handler_setup);
+
+int v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl)
+{
+ int ret;
+
+ if (hdl == NULL)
+ return 0;
+
+ mutex_lock(hdl->lock);
+ ret = __v4l2_ctrl_handler_setup(hdl);
mutex_unlock(hdl->lock);
+
return ret;
}
EXPORT_SYMBOL(v4l2_ctrl_handler_setup);
@@ -2824,8 +2845,8 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs
return class_check(hdl, cs->which);
if (cs->count > ARRAY_SIZE(helper)) {
- helpers = kmalloc_array(cs->count, sizeof(helper[0]),
- GFP_KERNEL);
+ helpers = kvmalloc_array(cs->count, sizeof(helper[0]),
+ GFP_KERNEL);
if (helpers == NULL)
return -ENOMEM;
}
@@ -2877,7 +2898,7 @@ int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs
}
if (cs->count > ARRAY_SIZE(helper))
- kfree(helpers);
+ kvfree(helpers);
return ret;
}
EXPORT_SYMBOL(v4l2_g_ext_ctrls);
@@ -3079,8 +3100,8 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
return class_check(hdl, cs->which);
if (cs->count > ARRAY_SIZE(helper)) {
- helpers = kmalloc_array(cs->count, sizeof(helper[0]),
- GFP_KERNEL);
+ helpers = kvmalloc_array(cs->count, sizeof(helper[0]),
+ GFP_KERNEL);
if (!helpers)
return -ENOMEM;
}
@@ -3157,7 +3178,7 @@ static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl,
}
if (cs->count > ARRAY_SIZE(helper))
- kfree(helpers);
+ kvfree(helpers);
return ret;
}
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index a75df6cb141f..968c2eb08b5a 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -21,6 +21,7 @@
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
+#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/export.h>
@@ -214,7 +215,8 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
if (elems < 1)
elems = 1;
- sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL);
+ sev = kvzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems,
+ GFP_KERNEL);
if (!sev)
return -ENOMEM;
for (i = 0; i < elems; i++)
@@ -232,7 +234,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
if (found_ev) {
- kfree(sev);
+ kvfree(sev);
return 0; /* Already listening */
}
@@ -304,7 +306,7 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
if (sev && sev->ops && sev->ops->del)
sev->ops->del(sev);
- kfree(sev);
+ kvfree(sev);
return 0;
}
diff --git a/drivers/media/v4l2-core/v4l2-flash-led-class.c b/drivers/media/v4l2-core/v4l2-flash-led-class.c
index 794e563f24f8..7b8288108e8a 100644
--- a/drivers/media/v4l2-core/v4l2-flash-led-class.c
+++ b/drivers/media/v4l2-core/v4l2-flash-led-class.c
@@ -12,7 +12,7 @@
#include <linux/led-class-flash.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <media/v4l2-flash-led-class.h>
@@ -612,7 +612,7 @@ static const struct v4l2_subdev_internal_ops v4l2_flash_subdev_internal_ops = {
static const struct v4l2_subdev_ops v4l2_flash_subdev_ops;
struct v4l2_flash *v4l2_flash_init(
- struct device *dev, struct device_node *of_node,
+ struct device *dev, struct fwnode_handle *fwn,
struct led_classdev_flash *fled_cdev,
struct led_classdev_flash *iled_cdev,
const struct v4l2_flash_ops *ops,
@@ -638,7 +638,7 @@ struct v4l2_flash *v4l2_flash_init(
v4l2_flash->iled_cdev = iled_cdev;
v4l2_flash->ops = ops;
sd->dev = dev;
- sd->of_node = of_node ? of_node : led_cdev->dev->of_node;
+ sd->fwnode = fwn ? fwn : dev_fwnode(led_cdev->dev);
v4l2_subdev_init(sd, &v4l2_flash_subdev_ops);
sd->internal_ops = &v4l2_flash_subdev_internal_ops;
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
@@ -654,7 +654,7 @@ struct v4l2_flash *v4l2_flash_init(
if (ret < 0)
goto err_init_controls;
- of_node_get(sd->of_node);
+ fwnode_handle_get(sd->fwnode);
ret = v4l2_async_register_subdev(sd);
if (ret < 0)
@@ -663,7 +663,7 @@ struct v4l2_flash *v4l2_flash_init(
return v4l2_flash;
err_async_register_sd:
- of_node_put(sd->of_node);
+ fwnode_handle_put(sd->fwnode);
v4l2_ctrl_handler_free(sd->ctrl_handler);
err_init_controls:
media_entity_cleanup(&sd->entity);
@@ -683,7 +683,7 @@ void v4l2_flash_release(struct v4l2_flash *v4l2_flash)
v4l2_async_unregister_subdev(sd);
- of_node_put(sd->of_node);
+ fwnode_handle_put(sd->fwnode);
v4l2_ctrl_handler_free(sd->ctrl_handler);
media_entity_cleanup(&sd->entity);
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
new file mode 100644
index 000000000000..153c53ca3925
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -0,0 +1,345 @@
+/*
+ * V4L2 fwnode binding parsing library
+ *
+ * The origins of the V4L2 fwnode library are in V4L2 OF library that
+ * formerly was located in v4l2-of.c.
+ *
+ * Copyright (c) 2016 Intel Corporation.
+ * Author: Sakari Ailus <[email protected]>
+ *
+ * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <[email protected]>
+ *
+ * Copyright (C) 2012 Renesas Electronics Corp.
+ * Author: Guennadi Liakhovetski <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <media/v4l2-fwnode.h>
+
+static int v4l2_fwnode_endpoint_parse_csi_bus(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_endpoint *vep)
+{
+ struct v4l2_fwnode_bus_mipi_csi2 *bus = &vep->bus.mipi_csi2;
+ bool have_clk_lane = false;
+ unsigned int flags = 0, lanes_used = 0;
+ unsigned int i;
+ u32 v;
+ int rval;
+
+ rval = fwnode_property_read_u32_array(fwnode, "data-lanes", NULL, 0);
+ if (rval > 0) {
+ u32 array[ARRAY_SIZE(bus->data_lanes)];
+
+ bus->num_data_lanes =
+ min_t(int, ARRAY_SIZE(bus->data_lanes), rval);
+
+ fwnode_property_read_u32_array(fwnode, "data-lanes", array,
+ bus->num_data_lanes);
+
+ for (i = 0; i < bus->num_data_lanes; i++) {
+ if (lanes_used & BIT(array[i]))
+ pr_warn("duplicated lane %u in data-lanes\n",
+ array[i]);
+ lanes_used |= BIT(array[i]);
+
+ bus->data_lanes[i] = array[i];
+ }
+ }
+
+ rval = fwnode_property_read_u32_array(fwnode, "lane-polarities", NULL,
+ 0);
+ if (rval > 0) {
+ u32 array[ARRAY_SIZE(bus->lane_polarities)];
+
+ if (rval < 1 + bus->num_data_lanes /* clock + data */) {
+ pr_warn("too few lane-polarities entries (need %u, got %u)\n",
+ 1 + bus->num_data_lanes, rval);
+ return -EINVAL;
+ }
+
+ fwnode_property_read_u32_array(fwnode, "lane-polarities", array,
+ 1 + bus->num_data_lanes);
+
+ for (i = 0; i < 1 + bus->num_data_lanes; i++)
+ bus->lane_polarities[i] = array[i];
+ }
+
+ if (!fwnode_property_read_u32(fwnode, "clock-lanes", &v)) {
+ if (lanes_used & BIT(v))
+ pr_warn("duplicated lane %u in clock-lanes\n", v);
+ lanes_used |= BIT(v);
+
+ bus->clock_lane = v;
+ have_clk_lane = true;
+ }
+
+ if (fwnode_property_present(fwnode, "clock-noncontinuous"))
+ flags |= V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK;
+ else if (have_clk_lane || bus->num_data_lanes > 0)
+ flags |= V4L2_MBUS_CSI2_CONTINUOUS_CLOCK;
+
+ bus->flags = flags;
+ vep->bus_type = V4L2_MBUS_CSI2;
+
+ return 0;
+}
+
+static void v4l2_fwnode_endpoint_parse_parallel_bus(
+ struct fwnode_handle *fwnode, struct v4l2_fwnode_endpoint *vep)
+{
+ struct v4l2_fwnode_bus_parallel *bus = &vep->bus.parallel;
+ unsigned int flags = 0;
+ u32 v;
+
+ if (!fwnode_property_read_u32(fwnode, "hsync-active", &v))
+ flags |= v ? V4L2_MBUS_HSYNC_ACTIVE_HIGH :
+ V4L2_MBUS_HSYNC_ACTIVE_LOW;
+
+ if (!fwnode_property_read_u32(fwnode, "vsync-active", &v))
+ flags |= v ? V4L2_MBUS_VSYNC_ACTIVE_HIGH :
+ V4L2_MBUS_VSYNC_ACTIVE_LOW;
+
+ if (!fwnode_property_read_u32(fwnode, "field-even-active", &v))
+ flags |= v ? V4L2_MBUS_FIELD_EVEN_HIGH :
+ V4L2_MBUS_FIELD_EVEN_LOW;
+ if (flags)
+ vep->bus_type = V4L2_MBUS_PARALLEL;
+ else
+ vep->bus_type = V4L2_MBUS_BT656;
+
+ if (!fwnode_property_read_u32(fwnode, "pclk-sample", &v))
+ flags |= v ? V4L2_MBUS_PCLK_SAMPLE_RISING :
+ V4L2_MBUS_PCLK_SAMPLE_FALLING;
+
+ if (!fwnode_property_read_u32(fwnode, "data-active", &v))
+ flags |= v ? V4L2_MBUS_DATA_ACTIVE_HIGH :
+ V4L2_MBUS_DATA_ACTIVE_LOW;
+
+ if (fwnode_property_present(fwnode, "slave-mode"))
+ flags |= V4L2_MBUS_SLAVE;
+ else
+ flags |= V4L2_MBUS_MASTER;
+
+ if (!fwnode_property_read_u32(fwnode, "bus-width", &v))
+ bus->bus_width = v;
+
+ if (!fwnode_property_read_u32(fwnode, "data-shift", &v))
+ bus->data_shift = v;
+
+ if (!fwnode_property_read_u32(fwnode, "sync-on-green-active", &v))
+ flags |= v ? V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH :
+ V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW;
+
+ bus->flags = flags;
+
+}
+
+/**
+ * v4l2_fwnode_endpoint_parse() - parse all fwnode node properties
+ * @fwnode: pointer to the endpoint's fwnode handle
+ * @vep: pointer to the V4L2 fwnode data structure
+ *
+ * All properties are optional. If none are found, we don't set any flags. This
+ * means the port has a static configuration and no properties have to be
+ * specified explicitly. If any properties that identify the bus as parallel
+ * are found and slave-mode isn't set, we set V4L2_MBUS_MASTER. Similarly, if
+ * we recognise the bus as serial CSI-2 and clock-noncontinuous isn't set, we
+ * set the V4L2_MBUS_CSI2_CONTINUOUS_CLOCK flag. The caller should hold a
+ * reference to @fwnode.
+ *
+ * NOTE: This function does not parse properties the size of which is variable
+ * without a low fixed limit. Please use v4l2_fwnode_endpoint_alloc_parse() in
+ * new drivers instead.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_endpoint *vep)
+{
+ int rval;
+
+ fwnode_graph_parse_endpoint(fwnode, &vep->base);
+
+ /* Zero fields from bus_type to until the end */
+ memset(&vep->bus_type, 0, sizeof(*vep) -
+ offsetof(typeof(*vep), bus_type));
+
+ rval = v4l2_fwnode_endpoint_parse_csi_bus(fwnode, vep);
+ if (rval)
+ return rval;
+ /*
+ * Parse the parallel video bus properties only if none
+ * of the MIPI CSI-2 specific properties were found.
+ */
+ if (vep->bus.mipi_csi2.flags == 0)
+ v4l2_fwnode_endpoint_parse_parallel_bus(fwnode, vep);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_parse);
+
+/*
+ * v4l2_fwnode_endpoint_free() - free the V4L2 fwnode acquired by
+ * v4l2_fwnode_endpoint_alloc_parse()
+ * @vep - the V4L2 fwnode the resources of which are to be released
+ *
+ * It is safe to call this function with NULL argument or on a V4L2 fwnode the
+ * parsing of which failed.
+ */
+void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep)
+{
+ if (IS_ERR_OR_NULL(vep))
+ return;
+
+ kfree(vep->link_frequencies);
+ kfree(vep);
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_free);
+
+/**
+ * v4l2_fwnode_endpoint_alloc_parse() - parse all fwnode node properties
+ * @fwnode: pointer to the endpoint's fwnode handle
+ *
+ * All properties are optional. If none are found, we don't set any flags. This
+ * means the port has a static configuration and no properties have to be
+ * specified explicitly. If any properties that identify the bus as parallel
+ * are found and slave-mode isn't set, we set V4L2_MBUS_MASTER. Similarly, if
+ * we recognise the bus as serial CSI-2 and clock-noncontinuous isn't set, we
+ * set the V4L2_MBUS_CSI2_CONTINUOUS_CLOCK flag. The caller should hold a
+ * reference to @fwnode.
+ *
+ * v4l2_fwnode_endpoint_alloc_parse() has two important differences to
+ * v4l2_fwnode_endpoint_parse():
+ *
+ * 1. It also parses variable size data.
+ *
+ * 2. The memory it has allocated to store the variable size data must be freed
+ * using v4l2_fwnode_endpoint_free() when no longer needed.
+ *
+ * Return: Pointer to v4l2_fwnode_endpoint if successful, on an error pointer
+ * on error.
+ */
+struct v4l2_fwnode_endpoint *v4l2_fwnode_endpoint_alloc_parse(
+ struct fwnode_handle *fwnode)
+{
+ struct v4l2_fwnode_endpoint *vep;
+ int rval;
+
+ vep = kzalloc(sizeof(*vep), GFP_KERNEL);
+ if (!vep)
+ return ERR_PTR(-ENOMEM);
+
+ rval = v4l2_fwnode_endpoint_parse(fwnode, vep);
+ if (rval < 0)
+ goto out_err;
+
+ rval = fwnode_property_read_u64_array(fwnode, "link-frequencies",
+ NULL, 0);
+ if (rval < 0)
+ goto out_err;
+
+ vep->link_frequencies =
+ kmalloc_array(rval, sizeof(*vep->link_frequencies), GFP_KERNEL);
+ if (!vep->link_frequencies) {
+ rval = -ENOMEM;
+ goto out_err;
+ }
+
+ vep->nr_of_link_frequencies = rval;
+
+ rval = fwnode_property_read_u64_array(fwnode, "link-frequencies",
+ vep->link_frequencies,
+ vep->nr_of_link_frequencies);
+ if (rval < 0)
+ goto out_err;
+
+ return vep;
+
+out_err:
+ v4l2_fwnode_endpoint_free(vep);
+ return ERR_PTR(rval);
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_endpoint_alloc_parse);
+
+/**
+ * v4l2_fwnode_endpoint_parse_link() - parse a link between two endpoints
+ * @__fwnode: pointer to the endpoint's fwnode at the local end of the link
+ * @link: pointer to the V4L2 fwnode link data structure
+ *
+ * Fill the link structure with the local and remote nodes and port numbers.
+ * The local_node and remote_node fields are set to point to the local and
+ * remote port's parent nodes respectively (the port parent node being the
+ * parent node of the port node if that node isn't a 'ports' node, or the
+ * grand-parent node of the port node otherwise).
+ *
+ * A reference is taken to both the local and remote nodes, the caller must use
+ * v4l2_fwnode_endpoint_put_link() to drop the references when done with the
+ * link.
+ *
+ * Return: 0 on success, or -ENOLINK if the remote endpoint fwnode can't be
+ * found.
+ */
+int v4l2_fwnode_parse_link(struct fwnode_handle *__fwnode,
+ struct v4l2_fwnode_link *link)
+{
+ const char *port_prop = is_of_node(__fwnode) ? "reg" : "port";
+ struct fwnode_handle *fwnode;
+
+ memset(link, 0, sizeof(*link));
+
+ fwnode = fwnode_get_parent(__fwnode);
+ fwnode_property_read_u32(fwnode, port_prop, &link->local_port);
+ fwnode = fwnode_get_next_parent(fwnode);
+ if (is_of_node(fwnode) &&
+ of_node_cmp(to_of_node(fwnode)->name, "ports") == 0)
+ fwnode = fwnode_get_next_parent(fwnode);
+ link->local_node = fwnode;
+
+ fwnode = fwnode_graph_get_remote_endpoint(__fwnode);
+ if (!fwnode) {
+ fwnode_handle_put(fwnode);
+ return -ENOLINK;
+ }
+
+ fwnode = fwnode_get_parent(fwnode);
+ fwnode_property_read_u32(fwnode, port_prop, &link->remote_port);
+ fwnode = fwnode_get_next_parent(fwnode);
+ if (is_of_node(fwnode) &&
+ of_node_cmp(to_of_node(fwnode)->name, "ports") == 0)
+ fwnode = fwnode_get_next_parent(fwnode);
+ link->remote_node = fwnode;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_parse_link);
+
+/**
+ * v4l2_fwnode_put_link() - drop references to nodes in a link
+ * @link: pointer to the V4L2 fwnode link data structure
+ *
+ * Drop references to the local and remote nodes in the link. This function
+ * must be called on every link parsed with v4l2_fwnode_parse_link().
+ */
+void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link)
+{
+ fwnode_handle_put(link->local_node);
+ fwnode_handle_put(link->remote_node);
+}
+EXPORT_SYMBOL_GPL(v4l2_fwnode_put_link);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sakari Ailus <[email protected]>");
+MODULE_AUTHOR("Sylwester Nawrocki <[email protected]>");
+MODULE_AUTHOR("Guennadi Liakhovetski <[email protected]>");
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index e5a2187381db..cab63bb49c97 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -12,6 +12,7 @@
* Mauro Carvalho Chehab <[email protected]> (version 2)
*/
+#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -1229,6 +1230,9 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_SDR_FMT_CS8: descr = "Complex S8"; break;
case V4L2_SDR_FMT_CS14LE: descr = "Complex S14LE"; break;
case V4L2_SDR_FMT_RU12LE: descr = "Real U12LE"; break;
+ case V4L2_SDR_FMT_PCU16BE: descr = "Planar Complex U16BE"; break;
+ case V4L2_SDR_FMT_PCU18BE: descr = "Planar Complex U18BE"; break;
+ case V4L2_SDR_FMT_PCU20BE: descr = "Planar Complex U20BE"; break;
case V4L2_TCH_FMT_DELTA_TD16: descr = "16-bit signed deltas"; break;
case V4L2_TCH_FMT_DELTA_TD08: descr = "8-bit signed deltas"; break;
case V4L2_TCH_FMT_TU16: descr = "16-bit unsigned touch data"; break;
@@ -2141,6 +2145,47 @@ static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops,
-EINVAL;
}
+/*
+ * The selection API specified originally that the _MPLANE buffer types
+ * shouldn't be used. The reasons for this are lost in the mists of time
+ * (or just really crappy memories). Regardless, this is really annoying
+ * for userspace. So to keep things simple we map _MPLANE buffer types
+ * to their 'regular' counterparts before calling the driver. And we
+ * restore it afterwards. This way applications can use either buffer
+ * type and drivers don't need to check for both.
+ */
+static int v4l_g_selection(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_selection *p = arg;
+ u32 old_type = p->type;
+ int ret;
+
+ if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ p->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ p->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ ret = ops->vidioc_g_selection(file, fh, p);
+ p->type = old_type;
+ return ret;
+}
+
+static int v4l_s_selection(const struct v4l2_ioctl_ops *ops,
+ struct file *file, void *fh, void *arg)
+{
+ struct v4l2_selection *p = arg;
+ u32 old_type = p->type;
+ int ret;
+
+ if (p->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ p->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ else if (p->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ p->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ ret = ops->vidioc_s_selection(file, fh, p);
+ p->type = old_type;
+ return ret;
+}
+
static int v4l_g_crop(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
@@ -2160,7 +2205,7 @@ static int v4l_g_crop(const struct v4l2_ioctl_ops *ops,
else
s.target = V4L2_SEL_TGT_CROP_ACTIVE;
- ret = ops->vidioc_g_selection(file, fh, &s);
+ ret = v4l_g_selection(ops, file, fh, &s);
/* copying results to old structure on success */
if (!ret)
@@ -2187,7 +2232,7 @@ static int v4l_s_crop(const struct v4l2_ioctl_ops *ops,
else
s.target = V4L2_SEL_TGT_CROP_ACTIVE;
- return ops->vidioc_s_selection(file, fh, &s);
+ return v4l_s_selection(ops, file, fh, &s);
}
static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
@@ -2229,7 +2274,7 @@ static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
else
s.target = V4L2_SEL_TGT_CROP_BOUNDS;
- ret = ops->vidioc_g_selection(file, fh, &s);
+ ret = v4l_g_selection(ops, file, fh, &s);
if (ret)
return ret;
p->bounds = s.r;
@@ -2240,7 +2285,7 @@ static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
else
s.target = V4L2_SEL_TGT_CROP_DEFAULT;
- ret = ops->vidioc_g_selection(file, fh, &s);
+ ret = v4l_g_selection(ops, file, fh, &s);
if (ret)
return ret;
p->defrect = s.r;
@@ -2472,20 +2517,22 @@ struct v4l2_ioctl_info {
};
/* This control needs a priority check */
-#define INFO_FL_PRIO (1 << 0)
+#define INFO_FL_PRIO (1 << 0)
/* This control can be valid if the filehandle passes a control handler. */
-#define INFO_FL_CTRL (1 << 1)
+#define INFO_FL_CTRL (1 << 1)
/* This is a standard ioctl, no need for special code */
-#define INFO_FL_STD (1 << 2)
+#define INFO_FL_STD (1 << 2)
/* This is ioctl has its own function */
-#define INFO_FL_FUNC (1 << 3)
+#define INFO_FL_FUNC (1 << 3)
/* Queuing ioctl */
-#define INFO_FL_QUEUE (1 << 4)
+#define INFO_FL_QUEUE (1 << 4)
+/* Always copy back result, even on error */
+#define INFO_FL_ALWAYS_COPY (1 << 5)
/* Zero struct from after the field to the end */
#define INFO_FL_CLEAR(v4l2_struct, field) \
((offsetof(struct v4l2_struct, field) + \
sizeof(((struct v4l2_struct *)0)->field)) << 16)
-#define INFO_FL_CLEAR_MASK (_IOC_SIZEMASK << 16)
+#define INFO_FL_CLEAR_MASK (_IOC_SIZEMASK << 16)
#define IOCTL_INFO_STD(_ioctl, _vidioc, _debug, _flags) \
[_IOC_NR(_ioctl)] = { \
@@ -2536,8 +2583,8 @@ static struct v4l2_ioctl_info v4l2_ioctls[] = {
IOCTL_INFO_FNC(VIDIOC_QUERYMENU, v4l_querymenu, v4l_print_querymenu, INFO_FL_CTRL | INFO_FL_CLEAR(v4l2_querymenu, index)),
IOCTL_INFO_STD(VIDIOC_G_INPUT, vidioc_g_input, v4l_print_u32, 0),
IOCTL_INFO_FNC(VIDIOC_S_INPUT, v4l_s_input, v4l_print_u32, INFO_FL_PRIO),
- IOCTL_INFO_STD(VIDIOC_G_EDID, vidioc_g_edid, v4l_print_edid, 0),
- IOCTL_INFO_STD(VIDIOC_S_EDID, vidioc_s_edid, v4l_print_edid, INFO_FL_PRIO),
+ IOCTL_INFO_STD(VIDIOC_G_EDID, vidioc_g_edid, v4l_print_edid, INFO_FL_ALWAYS_COPY),
+ IOCTL_INFO_STD(VIDIOC_S_EDID, vidioc_s_edid, v4l_print_edid, INFO_FL_PRIO | INFO_FL_ALWAYS_COPY),
IOCTL_INFO_STD(VIDIOC_G_OUTPUT, vidioc_g_output, v4l_print_u32, 0),
IOCTL_INFO_FNC(VIDIOC_S_OUTPUT, v4l_s_output, v4l_print_u32, INFO_FL_PRIO),
IOCTL_INFO_FNC(VIDIOC_ENUMOUTPUT, v4l_enumoutput, v4l_print_enumoutput, INFO_FL_CLEAR(v4l2_output, index)),
@@ -2550,8 +2597,8 @@ static struct v4l2_ioctl_info v4l2_ioctls[] = {
IOCTL_INFO_FNC(VIDIOC_CROPCAP, v4l_cropcap, v4l_print_cropcap, INFO_FL_CLEAR(v4l2_cropcap, type)),
IOCTL_INFO_FNC(VIDIOC_G_CROP, v4l_g_crop, v4l_print_crop, INFO_FL_CLEAR(v4l2_crop, type)),
IOCTL_INFO_FNC(VIDIOC_S_CROP, v4l_s_crop, v4l_print_crop, INFO_FL_PRIO),
- IOCTL_INFO_STD(VIDIOC_G_SELECTION, vidioc_g_selection, v4l_print_selection, INFO_FL_CLEAR(v4l2_selection, r)),
- IOCTL_INFO_STD(VIDIOC_S_SELECTION, vidioc_s_selection, v4l_print_selection, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_selection, r)),
+ IOCTL_INFO_FNC(VIDIOC_G_SELECTION, v4l_g_selection, v4l_print_selection, INFO_FL_CLEAR(v4l2_selection, r)),
+ IOCTL_INFO_FNC(VIDIOC_S_SELECTION, v4l_s_selection, v4l_print_selection, INFO_FL_PRIO | INFO_FL_CLEAR(v4l2_selection, r)),
IOCTL_INFO_STD(VIDIOC_G_JPEGCOMP, vidioc_g_jpegcomp, v4l_print_jpegcompression, 0),
IOCTL_INFO_STD(VIDIOC_S_JPEGCOMP, vidioc_s_jpegcomp, v4l_print_jpegcompression, INFO_FL_PRIO),
IOCTL_INFO_FNC(VIDIOC_QUERYSTD, v4l_querystd, v4l_print_std, 0),
@@ -2583,7 +2630,7 @@ static struct v4l2_ioctl_info v4l2_ioctls[] = {
IOCTL_INFO_FNC(VIDIOC_CREATE_BUFS, v4l_create_bufs, v4l_print_create_buffers, INFO_FL_PRIO | INFO_FL_QUEUE),
IOCTL_INFO_FNC(VIDIOC_PREPARE_BUF, v4l_prepare_buf, v4l_print_buffer, INFO_FL_QUEUE),
IOCTL_INFO_STD(VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings, v4l_print_enum_dv_timings, INFO_FL_CLEAR(v4l2_enum_dv_timings, pad)),
- IOCTL_INFO_STD(VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings, v4l_print_dv_timings, 0),
+ IOCTL_INFO_STD(VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings, v4l_print_dv_timings, INFO_FL_ALWAYS_COPY),
IOCTL_INFO_STD(VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap, v4l_print_dv_timings_cap, INFO_FL_CLEAR(v4l2_dv_timings_cap, type)),
IOCTL_INFO_FNC(VIDIOC_ENUM_FREQ_BANDS, v4l_enum_freq_bands, v4l_print_freq_band, 0),
IOCTL_INFO_FNC(VIDIOC_DBG_G_CHIP_INFO, v4l_dbg_g_chip_info, v4l_print_dbg_chip_info, INFO_FL_CLEAR(v4l2_dbg_chip_info, match)),
@@ -2801,6 +2848,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
void *parg = (void *)arg;
long err = -EINVAL;
bool has_array_args;
+ bool always_copy = false;
size_t array_size = 0;
void __user *user_ptr = NULL;
void **kernel_ptr = NULL;
@@ -2811,7 +2859,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
parg = sbuf;
} else {
/* too big to allocate from stack */
- mbuf = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
+ mbuf = kvmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
if (NULL == mbuf)
return -ENOMEM;
parg = mbuf;
@@ -2830,8 +2878,10 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
*/
if (v4l2_is_known_ioctl(cmd)) {
u32 flags = v4l2_ioctls[_IOC_NR(cmd)].flags;
+
if (flags & INFO_FL_CLEAR_MASK)
n = (flags & INFO_FL_CLEAR_MASK) >> 16;
+ always_copy = flags & INFO_FL_ALWAYS_COPY;
}
if (copy_from_user(parg, (void __user *)arg, n))
@@ -2858,7 +2908,7 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
* array) fits into sbuf (so that mbuf will still remain
* unused up to here).
*/
- mbuf = kmalloc(array_size, GFP_KERNEL);
+ mbuf = kvmalloc(array_size, GFP_KERNEL);
err = -ENOMEM;
if (NULL == mbuf)
goto out_array_args;
@@ -2885,9 +2935,11 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
err = -EFAULT;
goto out_array_args;
}
- /* VIDIOC_QUERY_DV_TIMINGS can return an error, but still have valid
- results that must be returned. */
- if (err < 0 && cmd != VIDIOC_QUERY_DV_TIMINGS)
+ /*
+ * Some ioctls can return an error, but still have valid
+ * results that must be returned.
+ */
+ if (err < 0 && !always_copy)
goto out;
out_array_args:
@@ -2901,7 +2953,7 @@ out_array_args:
}
out:
- kfree(mbuf);
+ kvfree(mbuf);
return err;
}
EXPORT_SYMBOL(video_usercopy);
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 6bc27e7b2a33..f62e68aa04c4 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -126,6 +126,43 @@ void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
}
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
+void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx,
+ struct vb2_v4l2_buffer *vbuf)
+{
+ struct v4l2_m2m_buffer *b;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
+ b = container_of(vbuf, struct v4l2_m2m_buffer, vb);
+ list_del(&b->list);
+ q_ctx->num_rdy--;
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf);
+
+struct vb2_v4l2_buffer *
+v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx)
+
+{
+ struct v4l2_m2m_buffer *b, *tmp;
+ struct vb2_v4l2_buffer *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
+ list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) {
+ if (b->vb.vb2_buf.index == idx) {
+ list_del(&b->list);
+ q_ctx->num_rdy--;
+ ret = &b->vb;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx);
+
/*
* Scheduling handlers
*/
diff --git a/drivers/media/v4l2-core/v4l2-of.c b/drivers/media/v4l2-core/v4l2-of.c
deleted file mode 100644
index 4f59f442dd0a..000000000000
--- a/drivers/media/v4l2-core/v4l2-of.c
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * V4L2 OF binding parsing library
- *
- * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd.
- * Author: Sylwester Nawrocki <[email protected]>
- *
- * Copyright (C) 2012 Renesas Electronics Corp.
- * Author: Guennadi Liakhovetski <[email protected]>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-#include <media/v4l2-of.h>
-
-static int v4l2_of_parse_csi_bus(const struct device_node *node,
- struct v4l2_of_endpoint *endpoint)
-{
- struct v4l2_of_bus_mipi_csi2 *bus = &endpoint->bus.mipi_csi2;
- struct property *prop;
- bool have_clk_lane = false;
- unsigned int flags = 0, lanes_used = 0;
- u32 v;
-
- prop = of_find_property(node, "data-lanes", NULL);
- if (prop) {
- const __be32 *lane = NULL;
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(bus->data_lanes); i++) {
- lane = of_prop_next_u32(prop, lane, &v);
- if (!lane)
- break;
-
- if (lanes_used & BIT(v))
- pr_warn("%s: duplicated lane %u in data-lanes\n",
- node->full_name, v);
- lanes_used |= BIT(v);
-
- bus->data_lanes[i] = v;
- }
- bus->num_data_lanes = i;
- }
-
- prop = of_find_property(node, "lane-polarities", NULL);
- if (prop) {
- const __be32 *polarity = NULL;
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(bus->lane_polarities); i++) {
- polarity = of_prop_next_u32(prop, polarity, &v);
- if (!polarity)
- break;
- bus->lane_polarities[i] = v;
- }
-
- if (i < 1 + bus->num_data_lanes /* clock + data */) {
- pr_warn("%s: too few lane-polarities entries (need %u, got %u)\n",
- node->full_name, 1 + bus->num_data_lanes, i);
- return -EINVAL;
- }
- }
-
- if (!of_property_read_u32(node, "clock-lanes", &v)) {
- if (lanes_used & BIT(v))
- pr_warn("%s: duplicated lane %u in clock-lanes\n",
- node->full_name, v);
- lanes_used |= BIT(v);
-
- bus->clock_lane = v;
- have_clk_lane = true;
- }
-
- if (of_get_property(node, "clock-noncontinuous", &v))
- flags |= V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK;
- else if (have_clk_lane || bus->num_data_lanes > 0)
- flags |= V4L2_MBUS_CSI2_CONTINUOUS_CLOCK;
-
- bus->flags = flags;
- endpoint->bus_type = V4L2_MBUS_CSI2;
-
- return 0;
-}
-
-static void v4l2_of_parse_parallel_bus(const struct device_node *node,
- struct v4l2_of_endpoint *endpoint)
-{
- struct v4l2_of_bus_parallel *bus = &endpoint->bus.parallel;
- unsigned int flags = 0;
- u32 v;
-
- if (!of_property_read_u32(node, "hsync-active", &v))
- flags |= v ? V4L2_MBUS_HSYNC_ACTIVE_HIGH :
- V4L2_MBUS_HSYNC_ACTIVE_LOW;
-
- if (!of_property_read_u32(node, "vsync-active", &v))
- flags |= v ? V4L2_MBUS_VSYNC_ACTIVE_HIGH :
- V4L2_MBUS_VSYNC_ACTIVE_LOW;
-
- if (!of_property_read_u32(node, "field-even-active", &v))
- flags |= v ? V4L2_MBUS_FIELD_EVEN_HIGH :
- V4L2_MBUS_FIELD_EVEN_LOW;
- if (flags)
- endpoint->bus_type = V4L2_MBUS_PARALLEL;
- else
- endpoint->bus_type = V4L2_MBUS_BT656;
-
- if (!of_property_read_u32(node, "pclk-sample", &v))
- flags |= v ? V4L2_MBUS_PCLK_SAMPLE_RISING :
- V4L2_MBUS_PCLK_SAMPLE_FALLING;
-
- if (!of_property_read_u32(node, "data-active", &v))
- flags |= v ? V4L2_MBUS_DATA_ACTIVE_HIGH :
- V4L2_MBUS_DATA_ACTIVE_LOW;
-
- if (of_get_property(node, "slave-mode", &v))
- flags |= V4L2_MBUS_SLAVE;
- else
- flags |= V4L2_MBUS_MASTER;
-
- if (!of_property_read_u32(node, "bus-width", &v))
- bus->bus_width = v;
-
- if (!of_property_read_u32(node, "data-shift", &v))
- bus->data_shift = v;
-
- if (!of_property_read_u32(node, "sync-on-green-active", &v))
- flags |= v ? V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH :
- V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW;
-
- bus->flags = flags;
-
-}
-
-/**
- * v4l2_of_parse_endpoint() - parse all endpoint node properties
- * @node: pointer to endpoint device_node
- * @endpoint: pointer to the V4L2 OF endpoint data structure
- *
- * All properties are optional. If none are found, we don't set any flags.
- * This means the port has a static configuration and no properties have
- * to be specified explicitly.
- * If any properties that identify the bus as parallel are found and
- * slave-mode isn't set, we set V4L2_MBUS_MASTER. Similarly, if we recognise
- * the bus as serial CSI-2 and clock-noncontinuous isn't set, we set the
- * V4L2_MBUS_CSI2_CONTINUOUS_CLOCK flag.
- * The caller should hold a reference to @node.
- *
- * NOTE: This function does not parse properties the size of which is
- * variable without a low fixed limit. Please use
- * v4l2_of_alloc_parse_endpoint() in new drivers instead.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int v4l2_of_parse_endpoint(const struct device_node *node,
- struct v4l2_of_endpoint *endpoint)
-{
- int rval;
-
- of_graph_parse_endpoint(node, &endpoint->base);
- /* Zero fields from bus_type to until the end */
- memset(&endpoint->bus_type, 0, sizeof(*endpoint) -
- offsetof(typeof(*endpoint), bus_type));
-
- rval = v4l2_of_parse_csi_bus(node, endpoint);
- if (rval)
- return rval;
- /*
- * Parse the parallel video bus properties only if none
- * of the MIPI CSI-2 specific properties were found.
- */
- if (endpoint->bus.mipi_csi2.flags == 0)
- v4l2_of_parse_parallel_bus(node, endpoint);
-
- return 0;
-}
-EXPORT_SYMBOL(v4l2_of_parse_endpoint);
-
-/*
- * v4l2_of_free_endpoint() - free the endpoint acquired by
- * v4l2_of_alloc_parse_endpoint()
- * @endpoint - the endpoint the resources of which are to be released
- *
- * It is safe to call this function with NULL argument or on an
- * endpoint the parsing of which failed.
- */
-void v4l2_of_free_endpoint(struct v4l2_of_endpoint *endpoint)
-{
- if (IS_ERR_OR_NULL(endpoint))
- return;
-
- kfree(endpoint->link_frequencies);
- kfree(endpoint);
-}
-EXPORT_SYMBOL(v4l2_of_free_endpoint);
-
-/**
- * v4l2_of_alloc_parse_endpoint() - parse all endpoint node properties
- * @node: pointer to endpoint device_node
- *
- * All properties are optional. If none are found, we don't set any flags.
- * This means the port has a static configuration and no properties have
- * to be specified explicitly.
- * If any properties that identify the bus as parallel are found and
- * slave-mode isn't set, we set V4L2_MBUS_MASTER. Similarly, if we recognise
- * the bus as serial CSI-2 and clock-noncontinuous isn't set, we set the
- * V4L2_MBUS_CSI2_CONTINUOUS_CLOCK flag.
- * The caller should hold a reference to @node.
- *
- * v4l2_of_alloc_parse_endpoint() has two important differences to
- * v4l2_of_parse_endpoint():
- *
- * 1. It also parses variable size data and
- *
- * 2. The memory it has allocated to store the variable size data must
- * be freed using v4l2_of_free_endpoint() when no longer needed.
- *
- * Return: Pointer to v4l2_of_endpoint if successful, on error a
- * negative error code.
- */
-struct v4l2_of_endpoint *v4l2_of_alloc_parse_endpoint(
- const struct device_node *node)
-{
- struct v4l2_of_endpoint *endpoint;
- int len;
- int rval;
-
- endpoint = kzalloc(sizeof(*endpoint), GFP_KERNEL);
- if (!endpoint)
- return ERR_PTR(-ENOMEM);
-
- rval = v4l2_of_parse_endpoint(node, endpoint);
- if (rval < 0)
- goto out_err;
-
- if (of_get_property(node, "link-frequencies", &len)) {
- endpoint->link_frequencies = kmalloc(len, GFP_KERNEL);
- if (!endpoint->link_frequencies) {
- rval = -ENOMEM;
- goto out_err;
- }
-
- endpoint->nr_of_link_frequencies =
- len / sizeof(*endpoint->link_frequencies);
-
- rval = of_property_read_u64_array(
- node, "link-frequencies", endpoint->link_frequencies,
- endpoint->nr_of_link_frequencies);
- if (rval < 0)
- goto out_err;
- }
-
- return endpoint;
-
-out_err:
- v4l2_of_free_endpoint(endpoint);
- return ERR_PTR(rval);
-}
-EXPORT_SYMBOL(v4l2_of_alloc_parse_endpoint);
-
-/**
- * v4l2_of_parse_link() - parse a link between two endpoints
- * @node: pointer to the endpoint at the local end of the link
- * @link: pointer to the V4L2 OF link data structure
- *
- * Fill the link structure with the local and remote nodes and port numbers.
- * The local_node and remote_node fields are set to point to the local and
- * remote port's parent nodes respectively (the port parent node being the
- * parent node of the port node if that node isn't a 'ports' node, or the
- * grand-parent node of the port node otherwise).
- *
- * A reference is taken to both the local and remote nodes, the caller must use
- * v4l2_of_put_link() to drop the references when done with the link.
- *
- * Return: 0 on success, or -ENOLINK if the remote endpoint can't be found.
- */
-int v4l2_of_parse_link(const struct device_node *node,
- struct v4l2_of_link *link)
-{
- struct device_node *np;
-
- memset(link, 0, sizeof(*link));
-
- np = of_get_parent(node);
- of_property_read_u32(np, "reg", &link->local_port);
- np = of_get_next_parent(np);
- if (of_node_cmp(np->name, "ports") == 0)
- np = of_get_next_parent(np);
- link->local_node = np;
-
- np = of_parse_phandle(node, "remote-endpoint", 0);
- if (!np) {
- of_node_put(link->local_node);
- return -ENOLINK;
- }
-
- np = of_get_parent(np);
- of_property_read_u32(np, "reg", &link->remote_port);
- np = of_get_next_parent(np);
- if (of_node_cmp(np->name, "ports") == 0)
- np = of_get_next_parent(np);
- link->remote_node = np;
-
- return 0;
-}
-EXPORT_SYMBOL(v4l2_of_parse_link);
-
-/**
- * v4l2_of_put_link() - drop references to nodes in a link
- * @link: pointer to the V4L2 OF link data structure
- *
- * Drop references to the local and remote nodes in the link. This function must
- * be called on every link parsed with v4l2_of_parse_link().
- */
-void v4l2_of_put_link(struct v4l2_of_link *link)
-{
- of_node_put(link->local_node);
- of_node_put(link->remote_node);
-}
-EXPORT_SYMBOL(v4l2_of_put_link);
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index da78497ae5ed..43fefa73e0a3 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -17,6 +17,7 @@
*/
#include <linux/ioctl.h>
+#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/videodev2.h>
@@ -577,13 +578,14 @@ v4l2_subdev_alloc_pad_config(struct v4l2_subdev *sd)
if (!sd->entity.num_pads)
return NULL;
- cfg = kcalloc(sd->entity.num_pads, sizeof(*cfg), GFP_KERNEL);
+ cfg = kvmalloc_array(sd->entity.num_pads, sizeof(*cfg),
+ GFP_KERNEL | __GFP_ZERO);
if (!cfg)
return NULL;
ret = v4l2_subdev_call(sd, pad, init_cfg, cfg);
if (ret < 0 && ret != -ENOIOCTLCMD) {
- kfree(cfg);
+ kvfree(cfg);
return NULL;
}
@@ -593,7 +595,7 @@ EXPORT_SYMBOL_GPL(v4l2_subdev_alloc_pad_config);
void v4l2_subdev_free_pad_config(struct v4l2_subdev_pad_config *cfg)
{
- kfree(cfg);
+ kvfree(cfg);
}
EXPORT_SYMBOL_GPL(v4l2_subdev_free_pad_config);
#endif /* CONFIG_MEDIA_CONTROLLER */
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index c0175ea7e7ad..14f83cecfa92 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -210,7 +210,7 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
mem_priv = call_ptr_memop(vb, alloc,
q->alloc_devs[plane] ? : q->dev,
q->dma_attrs, size, dma_dir, q->gfp_flags);
- if (IS_ERR(mem_priv)) {
+ if (IS_ERR_OR_NULL(mem_priv)) {
if (mem_priv)
ret = PTR_ERR(mem_priv);
goto free;
@@ -956,9 +956,9 @@ void vb2_discard_done(struct vb2_queue *q)
EXPORT_SYMBOL_GPL(vb2_discard_done);
/**
- * __qbuf_mmap() - handle qbuf of an MMAP buffer
+ * __prepare_mmap() - prepare an MMAP buffer
*/
-static int __qbuf_mmap(struct vb2_buffer *vb, const void *pb)
+static int __prepare_mmap(struct vb2_buffer *vb, const void *pb)
{
int ret = 0;
@@ -969,9 +969,9 @@ static int __qbuf_mmap(struct vb2_buffer *vb, const void *pb)
}
/**
- * __qbuf_userptr() - handle qbuf of a USERPTR buffer
+ * __prepare_userptr() - prepare a USERPTR buffer
*/
-static int __qbuf_userptr(struct vb2_buffer *vb, const void *pb)
+static int __prepare_userptr(struct vb2_buffer *vb, const void *pb)
{
struct vb2_plane planes[VB2_MAX_PLANES];
struct vb2_queue *q = vb->vb2_queue;
@@ -1087,9 +1087,9 @@ err:
}
/**
- * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer
+ * __prepare_dmabuf() - prepare a DMABUF buffer
*/
-static int __qbuf_dmabuf(struct vb2_buffer *vb, const void *pb)
+static int __prepare_dmabuf(struct vb2_buffer *vb, const void *pb)
{
struct vb2_plane planes[VB2_MAX_PLANES];
struct vb2_queue *q = vb->vb2_queue;
@@ -1227,23 +1227,19 @@ err:
static void __enqueue_in_driver(struct vb2_buffer *vb)
{
struct vb2_queue *q = vb->vb2_queue;
- unsigned int plane;
vb->state = VB2_BUF_STATE_ACTIVE;
atomic_inc(&q->owned_by_drv_count);
trace_vb2_buf_queue(q, vb);
- /* sync buffers */
- for (plane = 0; plane < vb->num_planes; ++plane)
- call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
-
call_void_vb_qop(vb, buf_queue, vb);
}
static int __buf_prepare(struct vb2_buffer *vb, const void *pb)
{
struct vb2_queue *q = vb->vb2_queue;
+ unsigned int plane;
int ret;
if (q->error) {
@@ -1255,24 +1251,32 @@ static int __buf_prepare(struct vb2_buffer *vb, const void *pb)
switch (q->memory) {
case VB2_MEMORY_MMAP:
- ret = __qbuf_mmap(vb, pb);
+ ret = __prepare_mmap(vb, pb);
break;
case VB2_MEMORY_USERPTR:
- ret = __qbuf_userptr(vb, pb);
+ ret = __prepare_userptr(vb, pb);
break;
case VB2_MEMORY_DMABUF:
- ret = __qbuf_dmabuf(vb, pb);
+ ret = __prepare_dmabuf(vb, pb);
break;
default:
WARN(1, "Invalid queue type\n");
ret = -EINVAL;
}
- if (ret)
+ if (ret) {
dprintk(1, "buffer preparation failed: %d\n", ret);
- vb->state = ret ? VB2_BUF_STATE_DEQUEUED : VB2_BUF_STATE_PREPARED;
+ vb->state = VB2_BUF_STATE_DEQUEUED;
+ return ret;
+ }
- return ret;
+ /* sync buffers */
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
+
+ vb->state = VB2_BUF_STATE_PREPARED;
+
+ return 0;
}
int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index 8e8798a74760..5defa1f22ca2 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -120,8 +120,8 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
buf->num_pages = size >> PAGE_SHIFT;
buf->dma_sgt = &buf->sg_table;
- buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
- GFP_KERNEL);
+ buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *),
+ GFP_KERNEL | __GFP_ZERO);
if (!buf->pages)
goto fail_pages_array_alloc;
@@ -165,7 +165,7 @@ fail_table_alloc:
while (num_pages--)
__free_page(buf->pages[num_pages]);
fail_pages_alloc:
- kfree(buf->pages);
+ kvfree(buf->pages);
fail_pages_array_alloc:
kfree(buf);
return ERR_PTR(-ENOMEM);
@@ -187,7 +187,7 @@ static void vb2_dma_sg_put(void *buf_priv)
sg_free_table(buf->dma_sgt);
while (--i >= 0)
__free_page(buf->pages[i]);
- kfree(buf->pages);
+ kvfree(buf->pages);
put_device(buf->dev);
kfree(buf);
}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 3eb5c93595f6..94ad2c1c3d90 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -160,6 +160,11 @@ config MFD_AXP20X_I2C
components like regulators or the PEK (Power Enable Key) under the
corresponding menus.
+ Note on x86 this provides an ACPI OpRegion, so this must be 'y'
+ (builtin) and not a module, as the OpRegion must be available as
+ soon as possible. For the same reason the I2C bus driver options
+ I2C_DESIGNWARE_PLATFORM and I2C_DESIGNWARE_BAYTRAIL must be 'y' too.
+
config MFD_AXP20X_RSB
tristate "X-Powers AXP series PMICs with RSB"
select MFD_AXP20X
@@ -448,17 +453,22 @@ config LPC_SCH
config INTEL_SOC_PMIC
bool "Support for Crystal Cove PMIC"
- depends on GPIOLIB
- depends on I2C=y
+ depends on HAS_IOMEM && I2C=y && GPIOLIB && COMMON_CLK
+ depends on X86 || COMPILE_TEST
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
+ select I2C_DESIGNWARE_PLATFORM if ACPI
help
Select this option to enable support for Crystal Cove PMIC
on some Intel SoC systems. The PMIC provides ADC, GPIO,
thermal, charger and related power management functions
on these systems.
+ This option is a bool as it provides an ACPI OpRegion which must be
+ available before any devices using it are probed. This option also
+ causes the designware-i2c driver to be builtin for the same reason.
+
config INTEL_SOC_PMIC_BXTWC
tristate "Support for Intel Broxton Whiskey Cove PMIC"
depends on INTEL_PMC_IPC
@@ -470,6 +480,22 @@ config INTEL_SOC_PMIC_BXTWC
thermal, charger and related power management functions
on these systems.
+config INTEL_SOC_PMIC_CHTWC
+ tristate "Support for Intel Cherry Trail Whiskey Cove PMIC"
+ depends on ACPI && HAS_IOMEM && I2C=y && COMMON_CLK
+ depends on X86 || COMPILE_TEST
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ select I2C_DESIGNWARE_PLATFORM
+ help
+ Select this option to enable support for the Intel Cherry Trail
+ Whiskey Cove PMIC found on some Intel Cherry Trail systems.
+
+ This option is a bool as it provides an ACPI OpRegion which must be
+ available before any devices using it are probed. This option also
+ causes the designware-i2c driver to be builtin for the same reason.
+
config MFD_INTEL_LPSS
tristate
select COMMON_CLK
@@ -1325,6 +1351,20 @@ config MFD_TI_LP873X
This driver can also be built as a module. If so, the module
will be called lp873x.
+config MFD_TI_LP87565
+ tristate "TI LP87565 Power Management IC"
+ depends on I2C && OF
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ If you say yes here then you get support for the LP87565 series of
+ Power Management Integrated Circuits (PMIC).
+ These include voltage regulators, thermal protection, configurable
+ General Purpose Outputs (GPO) that are used in portable devices.
+
+ This driver can also be built as a module. If so, the module
+ will be called lp87565.
+
config MFD_TPS65218
tristate "TI TPS65218 Power Management chips"
depends on I2C
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index c16bf1ea0ea9..080793b3fd0e 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
obj-$(CONFIG_HTC_I2CPLD) += htc-i2cpld.o
obj-$(CONFIG_MFD_TI_LP873X) += lp873x.o
+obj-$(CONFIG_MFD_TI_LP87565) += lp87565.o
obj-$(CONFIG_MFD_DAVINCI_VOICECODEC) += davinci_voicecodec.o
obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o
@@ -214,6 +215,7 @@ obj-$(CONFIG_MFD_SKY81452) += sky81452.o
intel-soc-pmic-objs := intel_soc_pmic_core.o intel_soc_pmic_crc.o
obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o
obj-$(CONFIG_INTEL_SOC_PMIC_BXTWC) += intel_soc_pmic_bxtwc.o
+obj-$(CONFIG_INTEL_SOC_PMIC_CHTWC) += intel_soc_pmic_chtwc.o
obj-$(CONFIG_MFD_MT6397) += mt6397-core.o
obj-$(CONFIG_MFD_ALTERA_A10SR) += altera-a10sr.o
diff --git a/drivers/mfd/atmel-flexcom.c b/drivers/mfd/atmel-flexcom.c
index e8e67be6b493..064bde9cff5a 100644
--- a/drivers/mfd/atmel-flexcom.c
+++ b/drivers/mfd/atmel-flexcom.c
@@ -80,7 +80,7 @@ static int atmel_flexcom_probe(struct platform_device *pdev)
clk_disable_unprepare(clk);
- return of_platform_populate(np, NULL, NULL, &pdev->dev);
+ return devm_of_platform_populate(&pdev->dev);
}
static const struct of_device_id atmel_flexcom_of_match[] = {
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 1dc6235778eb..917b6ddc4f15 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -848,7 +848,8 @@ static struct mfd_cell axp803_cells[] = {
.name = "axp20x-pek",
.num_resources = ARRAY_SIZE(axp803_pek_resources),
.resources = axp803_pek_resources,
- }
+ },
+ { .name = "axp20x-regulator" },
};
static struct mfd_cell axp806_cells[] = {
diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c
index d4a407e466b5..dc6ce9091694 100644
--- a/drivers/mfd/cros_ec.c
+++ b/drivers/mfd/cros_ec.c
@@ -147,7 +147,7 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
}
if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
- err = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ err = devm_of_platform_populate(dev);
if (err) {
mfd_remove_devices(dev);
dev_err(dev, "Failed to register sub-devices\n");
@@ -183,6 +183,9 @@ int cros_ec_remove(struct cros_ec_device *ec_dev)
cros_ec_acpi_remove_gpe_handler();
+ if (ec_dev->irq)
+ free_irq(ec_dev->irq, ec_dev);
+
return 0;
}
EXPORT_SYMBOL(cros_ec_remove);
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
index 7f5e8be0a9ea..fbe0f245ce8e 100644
--- a/drivers/mfd/da9062-core.c
+++ b/drivers/mfd/da9062-core.c
@@ -429,9 +429,6 @@ static const struct regmap_range da9061_aa_readable_ranges[] = {
.range_min = DA9062AA_VLDO1_B,
.range_max = DA9062AA_VLDO4_B,
}, {
- .range_min = DA9062AA_BBAT_CONT,
- .range_max = DA9062AA_BBAT_CONT,
- }, {
.range_min = DA9062AA_INTERFACE,
.range_max = DA9062AA_CONFIG_E,
}, {
@@ -514,9 +511,6 @@ static const struct regmap_range da9061_aa_writeable_ranges[] = {
.range_min = DA9062AA_VLDO1_B,
.range_max = DA9062AA_VLDO4_B,
}, {
- .range_min = DA9062AA_BBAT_CONT,
- .range_max = DA9062AA_BBAT_CONT,
- }, {
.range_min = DA9062AA_GP_ID_0,
.range_max = DA9062AA_GP_ID_19,
},
@@ -651,9 +645,6 @@ static const struct regmap_range da9062_aa_readable_ranges[] = {
.range_min = DA9062AA_VLDO1_B,
.range_max = DA9062AA_VLDO4_B,
}, {
- .range_min = DA9062AA_BBAT_CONT,
- .range_max = DA9062AA_BBAT_CONT,
- }, {
.range_min = DA9062AA_INTERFACE,
.range_max = DA9062AA_CONFIG_E,
}, {
@@ -730,9 +721,6 @@ static const struct regmap_range da9062_aa_writeable_ranges[] = {
.range_min = DA9062AA_VLDO1_B,
.range_max = DA9062AA_VLDO4_B,
}, {
- .range_min = DA9062AA_BBAT_CONT,
- .range_max = DA9062AA_BBAT_CONT,
- }, {
.range_min = DA9062AA_GP_ID_0,
.range_max = DA9062AA_GP_ID_19,
},
diff --git a/drivers/mfd/exynos-lpass.c b/drivers/mfd/exynos-lpass.c
index 0bf3aebdac45..ca829f85672f 100644
--- a/drivers/mfd/exynos-lpass.c
+++ b/drivers/mfd/exynos-lpass.c
@@ -138,7 +138,7 @@ static int exynos_lpass_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
exynos_lpass_enable(lpass);
- return of_platform_populate(dev->of_node, NULL, NULL, dev);
+ return devm_of_platform_populate(dev);
}
static int exynos_lpass_remove(struct platform_device *pdev)
diff --git a/drivers/mfd/fsl-imx25-tsadc.c b/drivers/mfd/fsl-imx25-tsadc.c
index ac430a396a89..b3767c3141e5 100644
--- a/drivers/mfd/fsl-imx25-tsadc.c
+++ b/drivers/mfd/fsl-imx25-tsadc.c
@@ -59,7 +59,7 @@ static int mx25_tsadc_domain_map(struct irq_domain *d, unsigned int irq,
return 0;
}
-static struct irq_domain_ops mx25_tsadc_domain_ops = {
+static const struct irq_domain_ops mx25_tsadc_domain_ops = {
.map = mx25_tsadc_domain_map,
.xlate = irq_domain_xlate_onecell,
};
@@ -129,7 +129,6 @@ static void mx25_tsadc_setup_clk(struct platform_device *pdev,
static int mx25_tsadc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
struct mx25_tsadc *tsadc;
struct resource *res;
int ret;
@@ -178,9 +177,7 @@ static int mx25_tsadc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tsadc);
- of_platform_populate(np, NULL, NULL, dev);
-
- return 0;
+ return devm_of_platform_populate(dev);
}
static const struct of_device_id mx25_tsadc_ids[] = {
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index 16ffeaeb1385..ad388bb056cd 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -201,6 +201,19 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x9d64), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9d65), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0x9d66), (kernel_ulong_t)&spt_uart_info },
+ /* CNL-LP */
+ { PCI_VDEVICE(INTEL, 0x9da8), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x9da9), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x9daa), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x9dab), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x9dfb), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x9dc5), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x9dc6), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x9dc7), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x9de8), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x9de9), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x9dea), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x9deb), (kernel_ulong_t)&spt_i2c_info },
/* SPT-H */
{ PCI_VDEVICE(INTEL, 0xa127), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0xa128), (kernel_ulong_t)&spt_uart_info },
@@ -219,6 +232,17 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0xa2e2), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa2e3), (kernel_ulong_t)&spt_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa2e6), (kernel_ulong_t)&spt_uart_info },
+ /* CNL-H */
+ { PCI_VDEVICE(INTEL, 0xa328), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0xa329), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0xa32a), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa32b), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa37b), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa347), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0xa368), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa369), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa36a), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa36b), (kernel_ulong_t)&spt_i2c_info },
{ }
};
MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);
diff --git a/drivers/mfd/intel_quark_i2c_gpio.c b/drivers/mfd/intel_quark_i2c_gpio.c
index 7946d6e38b87..90e35dec8648 100644
--- a/drivers/mfd/intel_quark_i2c_gpio.c
+++ b/drivers/mfd/intel_quark_i2c_gpio.c
@@ -58,19 +58,34 @@ struct intel_quark_mfd {
struct clk_lookup *i2c_clk_lookup;
};
-struct i2c_mode_info {
- const char *name;
- unsigned int i2c_scl_freq;
-};
-
-static const struct i2c_mode_info platform_i2c_mode_info[] = {
+static const struct dmi_system_id dmi_platform_info[] = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"),
+ },
+ .driver_data = (void *)100000,
+ },
{
- .name = "Galileo",
- .i2c_scl_freq = 100000,
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
+ },
+ .driver_data = (void *)400000,
},
{
- .name = "GalileoGen2",
- .i2c_scl_freq = 400000,
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
+ DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
+ "6ES7647-0AA00-0YA2"),
+ },
+ .driver_data = (void *)400000,
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
+ DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
+ "6ES7647-0AA00-1YA2"),
+ },
+ .driver_data = (void *)400000,
},
{}
};
@@ -160,8 +175,7 @@ static void intel_quark_unregister_i2c_clk(struct device *dev)
static int intel_quark_i2c_setup(struct pci_dev *pdev, struct mfd_cell *cell)
{
- const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
- const struct i2c_mode_info *info;
+ const struct dmi_system_id *dmi_id;
struct dw_i2c_platform_data *pdata;
struct resource *res = (struct resource *)cell->resources;
struct device *dev = &pdev->dev;
@@ -181,14 +195,9 @@ static int intel_quark_i2c_setup(struct pci_dev *pdev, struct mfd_cell *cell)
/* Normal mode by default */
pdata->i2c_scl_freq = 100000;
- if (board_name) {
- for (info = platform_i2c_mode_info; info->name; info++) {
- if (!strcmp(board_name, info->name)) {
- pdata->i2c_scl_freq = info->i2c_scl_freq;
- break;
- }
- }
- }
+ dmi_id = dmi_first_match(dmi_platform_info);
+ if (dmi_id)
+ pdata->i2c_scl_freq = (uintptr_t)dmi_id->driver_data;
cell->platform_data = pdata;
cell->pdata_size = sizeof(*pdata);
diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c
index 8c3cbf63c6ad..15bc052704a6 100644
--- a/drivers/mfd/intel_soc_pmic_bxtwc.c
+++ b/drivers/mfd/intel_soc_pmic_bxtwc.c
@@ -82,20 +82,26 @@ enum bxtwc_irqs {
BXTWC_PWRBTN_IRQ,
};
-enum bxtwc_irqs_level2 {
- /* Level 2 */
- BXTWC_THRM0_IRQ = 0,
- BXTWC_THRM1_IRQ,
- BXTWC_THRM2_IRQ,
- BXTWC_BCU_IRQ,
- BXTWC_ADC_IRQ,
- BXTWC_USBC_IRQ,
+enum bxtwc_irqs_bcu {
+ BXTWC_BCU_IRQ = 0,
+};
+
+enum bxtwc_irqs_adc {
+ BXTWC_ADC_IRQ = 0,
+};
+
+enum bxtwc_irqs_chgr {
+ BXTWC_USBC_IRQ = 0,
BXTWC_CHGR0_IRQ,
BXTWC_CHGR1_IRQ,
- BXTWC_GPIO0_IRQ,
- BXTWC_GPIO1_IRQ,
- BXTWC_CRIT_IRQ,
- BXTWC_TMU_IRQ,
+};
+
+enum bxtwc_irqs_tmu {
+ BXTWC_TMU_IRQ = 0,
+};
+
+enum bxtwc_irqs_crit {
+ BXTWC_CRIT_IRQ = 0,
};
static const struct regmap_irq bxtwc_regmap_irqs[] = {
@@ -110,24 +116,28 @@ static const struct regmap_irq bxtwc_regmap_irqs[] = {
REGMAP_IRQ_REG(BXTWC_PWRBTN_IRQ, 1, 0x03),
};
-static const struct regmap_irq bxtwc_regmap_irqs_level2[] = {
- REGMAP_IRQ_REG(BXTWC_THRM0_IRQ, 0, 0xff),
- REGMAP_IRQ_REG(BXTWC_THRM1_IRQ, 1, 0xbf),
- REGMAP_IRQ_REG(BXTWC_THRM2_IRQ, 2, 0xff),
- REGMAP_IRQ_REG(BXTWC_BCU_IRQ, 3, 0x1f),
- REGMAP_IRQ_REG(BXTWC_ADC_IRQ, 4, 0xff),
- REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 5, BIT(5)),
- REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 5, 0x1f),
- REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 6, 0x1f),
- REGMAP_IRQ_REG(BXTWC_GPIO0_IRQ, 7, 0xff),
- REGMAP_IRQ_REG(BXTWC_GPIO1_IRQ, 8, 0x3f),
- REGMAP_IRQ_REG(BXTWC_CRIT_IRQ, 9, 0x03),
+static const struct regmap_irq bxtwc_regmap_irqs_bcu[] = {
+ REGMAP_IRQ_REG(BXTWC_BCU_IRQ, 0, 0x1f),
+};
+
+static const struct regmap_irq bxtwc_regmap_irqs_adc[] = {
+ REGMAP_IRQ_REG(BXTWC_ADC_IRQ, 0, 0xff),
+};
+
+static const struct regmap_irq bxtwc_regmap_irqs_chgr[] = {
+ REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 0, BIT(5)),
+ REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 0, 0x1f),
+ REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 1, 0x1f),
};
static const struct regmap_irq bxtwc_regmap_irqs_tmu[] = {
REGMAP_IRQ_REG(BXTWC_TMU_IRQ, 0, 0x06),
};
+static const struct regmap_irq bxtwc_regmap_irqs_crit[] = {
+ REGMAP_IRQ_REG(BXTWC_CRIT_IRQ, 0, 0x03),
+};
+
static struct regmap_irq_chip bxtwc_regmap_irq_chip = {
.name = "bxtwc_irq_chip",
.status_base = BXTWC_IRQLVL1,
@@ -137,15 +147,6 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip = {
.num_regs = 2,
};
-static struct regmap_irq_chip bxtwc_regmap_irq_chip_level2 = {
- .name = "bxtwc_irq_chip_level2",
- .status_base = BXTWC_THRM0IRQ,
- .mask_base = BXTWC_MTHRM0IRQ,
- .irqs = bxtwc_regmap_irqs_level2,
- .num_irqs = ARRAY_SIZE(bxtwc_regmap_irqs_level2),
- .num_regs = 10,
-};
-
static struct regmap_irq_chip bxtwc_regmap_irq_chip_tmu = {
.name = "bxtwc_irq_chip_tmu",
.status_base = BXTWC_TMUIRQ,
@@ -155,9 +156,44 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip_tmu = {
.num_regs = 1,
};
+static struct regmap_irq_chip bxtwc_regmap_irq_chip_bcu = {
+ .name = "bxtwc_irq_chip_bcu",
+ .status_base = BXTWC_BCUIRQ,
+ .mask_base = BXTWC_MBCUIRQ,
+ .irqs = bxtwc_regmap_irqs_bcu,
+ .num_irqs = ARRAY_SIZE(bxtwc_regmap_irqs_bcu),
+ .num_regs = 1,
+};
+
+static struct regmap_irq_chip bxtwc_regmap_irq_chip_adc = {
+ .name = "bxtwc_irq_chip_adc",
+ .status_base = BXTWC_ADCIRQ,
+ .mask_base = BXTWC_MADCIRQ,
+ .irqs = bxtwc_regmap_irqs_adc,
+ .num_irqs = ARRAY_SIZE(bxtwc_regmap_irqs_adc),
+ .num_regs = 1,
+};
+
+static struct regmap_irq_chip bxtwc_regmap_irq_chip_chgr = {
+ .name = "bxtwc_irq_chip_chgr",
+ .status_base = BXTWC_CHGR0IRQ,
+ .mask_base = BXTWC_MCHGR0IRQ,
+ .irqs = bxtwc_regmap_irqs_chgr,
+ .num_irqs = ARRAY_SIZE(bxtwc_regmap_irqs_chgr),
+ .num_regs = 2,
+};
+
+static struct regmap_irq_chip bxtwc_regmap_irq_chip_crit = {
+ .name = "bxtwc_irq_chip_crit",
+ .status_base = BXTWC_CRITIRQ,
+ .mask_base = BXTWC_MCRITIRQ,
+ .irqs = bxtwc_regmap_irqs_crit,
+ .num_irqs = ARRAY_SIZE(bxtwc_regmap_irqs_crit),
+ .num_regs = 1,
+};
+
static struct resource gpio_resources[] = {
- DEFINE_RES_IRQ_NAMED(BXTWC_GPIO0_IRQ, "GPIO0"),
- DEFINE_RES_IRQ_NAMED(BXTWC_GPIO1_IRQ, "GPIO1"),
+ DEFINE_RES_IRQ_NAMED(BXTWC_GPIO_LVL1_IRQ, "GPIO"),
};
static struct resource adc_resources[] = {
@@ -174,9 +210,7 @@ static struct resource charger_resources[] = {
};
static struct resource thermal_resources[] = {
- DEFINE_RES_IRQ(BXTWC_THRM0_IRQ),
- DEFINE_RES_IRQ(BXTWC_THRM1_IRQ),
- DEFINE_RES_IRQ(BXTWC_THRM2_IRQ),
+ DEFINE_RES_IRQ(BXTWC_THRM_LVL1_IRQ),
};
static struct resource bcu_resources[] = {
@@ -367,6 +401,26 @@ static const struct regmap_config bxtwc_regmap_config = {
.reg_read = regmap_ipc_byte_reg_read,
};
+static int bxtwc_add_chained_irq_chip(struct intel_soc_pmic *pmic,
+ struct regmap_irq_chip_data *pdata,
+ int pirq, int irq_flags,
+ const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data)
+{
+ int irq;
+
+ irq = regmap_irq_get_virq(pdata, pirq);
+ if (irq < 0) {
+ dev_err(pmic->dev,
+ "Failed to get parent vIRQ(%d) for chip %s, ret:%d\n",
+ pirq, chip->name, irq);
+ return irq;
+ }
+
+ return devm_regmap_add_irq_chip(pmic->dev, pmic->regmap, irq, irq_flags,
+ 0, chip, data);
+}
+
static int bxtwc_probe(struct platform_device *pdev)
{
int ret;
@@ -409,45 +463,88 @@ static int bxtwc_probe(struct platform_device *pdev)
return ret;
}
- ret = regmap_add_irq_chip(pmic->regmap, pmic->irq,
- IRQF_ONESHOT | IRQF_SHARED,
- 0, &bxtwc_regmap_irq_chip,
- &pmic->irq_chip_data);
+ ret = devm_regmap_add_irq_chip(&pdev->dev, pmic->regmap, pmic->irq,
+ IRQF_ONESHOT | IRQF_SHARED,
+ 0, &bxtwc_regmap_irq_chip,
+ &pmic->irq_chip_data);
if (ret) {
dev_err(&pdev->dev, "Failed to add IRQ chip\n");
return ret;
}
- ret = regmap_add_irq_chip(pmic->regmap, pmic->irq,
- IRQF_ONESHOT | IRQF_SHARED,
- 0, &bxtwc_regmap_irq_chip_level2,
- &pmic->irq_chip_data_level2);
+ ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
+ BXTWC_TMU_LVL1_IRQ,
+ IRQF_ONESHOT,
+ &bxtwc_regmap_irq_chip_tmu,
+ &pmic->irq_chip_data_tmu);
if (ret) {
- dev_err(&pdev->dev, "Failed to add secondary IRQ chip\n");
- goto err_irq_chip_level2;
+ dev_err(&pdev->dev, "Failed to add TMU IRQ chip\n");
+ return ret;
}
- ret = regmap_add_irq_chip(pmic->regmap, pmic->irq,
- IRQF_ONESHOT | IRQF_SHARED,
- 0, &bxtwc_regmap_irq_chip_tmu,
- &pmic->irq_chip_data_tmu);
+ /* Add chained IRQ handler for BCU IRQs */
+ ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
+ BXTWC_BCU_LVL1_IRQ,
+ IRQF_ONESHOT,
+ &bxtwc_regmap_irq_chip_bcu,
+ &pmic->irq_chip_data_bcu);
+
+
if (ret) {
- dev_err(&pdev->dev, "Failed to add TMU IRQ chip\n");
- goto err_irq_chip_tmu;
+ dev_err(&pdev->dev, "Failed to add BUC IRQ chip\n");
+ return ret;
+ }
+
+ /* Add chained IRQ handler for ADC IRQs */
+ ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
+ BXTWC_ADC_LVL1_IRQ,
+ IRQF_ONESHOT,
+ &bxtwc_regmap_irq_chip_adc,
+ &pmic->irq_chip_data_adc);
+
+
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add ADC IRQ chip\n");
+ return ret;
+ }
+
+ /* Add chained IRQ handler for CHGR IRQs */
+ ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
+ BXTWC_CHGR_LVL1_IRQ,
+ IRQF_ONESHOT,
+ &bxtwc_regmap_irq_chip_chgr,
+ &pmic->irq_chip_data_chgr);
+
+
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add CHGR IRQ chip\n");
+ return ret;
+ }
+
+ /* Add chained IRQ handler for CRIT IRQs */
+ ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
+ BXTWC_CRIT_LVL1_IRQ,
+ IRQF_ONESHOT,
+ &bxtwc_regmap_irq_chip_crit,
+ &pmic->irq_chip_data_crit);
+
+
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add CRIT IRQ chip\n");
+ return ret;
}
- ret = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, bxt_wc_dev,
- ARRAY_SIZE(bxt_wc_dev), NULL, 0,
- NULL);
+ ret = devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, bxt_wc_dev,
+ ARRAY_SIZE(bxt_wc_dev), NULL, 0, NULL);
if (ret) {
dev_err(&pdev->dev, "Failed to add devices\n");
- goto err_mfd;
+ return ret;
}
ret = sysfs_create_group(&pdev->dev.kobj, &bxtwc_group);
if (ret) {
dev_err(&pdev->dev, "Failed to create sysfs group %d\n", ret);
- goto err_sysfs;
+ return ret;
}
/*
@@ -461,28 +558,11 @@ static int bxtwc_probe(struct platform_device *pdev)
BXTWC_MIRQLVL1_MCHGR, 0);
return 0;
-
-err_sysfs:
- mfd_remove_devices(&pdev->dev);
-err_mfd:
- regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data_tmu);
-err_irq_chip_tmu:
- regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data_level2);
-err_irq_chip_level2:
- regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data);
-
- return ret;
}
static int bxtwc_remove(struct platform_device *pdev)
{
- struct intel_soc_pmic *pmic = dev_get_drvdata(&pdev->dev);
-
sysfs_remove_group(&pdev->dev.kobj, &bxtwc_group);
- mfd_remove_devices(&pdev->dev);
- regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data);
- regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data_level2);
- regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data_tmu);
return 0;
}
diff --git a/drivers/mfd/intel_soc_pmic_chtwc.c b/drivers/mfd/intel_soc_pmic_chtwc.c
new file mode 100644
index 000000000000..b35da01d5bcf
--- /dev/null
+++ b/drivers/mfd/intel_soc_pmic_chtwc.c
@@ -0,0 +1,230 @@
+/*
+ * MFD core driver for Intel Cherrytrail Whiskey Cove PMIC
+ *
+ * Copyright (C) 2017 Hans de Goede <[email protected]>
+ *
+ * Based on various non upstream patches to support the CHT Whiskey Cove PMIC:
+ * Copyright (C) 2013-2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/regmap.h>
+
+/* PMIC device registers */
+#define REG_OFFSET_MASK GENMASK(7, 0)
+#define REG_ADDR_MASK GENMASK(15, 8)
+#define REG_ADDR_SHIFT 8
+
+#define CHT_WC_IRQLVL1 0x6e02
+#define CHT_WC_IRQLVL1_MASK 0x6e0e
+
+/* Whiskey Cove PMIC share same ACPI ID between different platforms */
+#define CHT_WC_HRV 3
+
+/* Level 1 IRQs (level 2 IRQs are handled in the child device drivers) */
+enum {
+ CHT_WC_PWRSRC_IRQ = 0,
+ CHT_WC_THRM_IRQ,
+ CHT_WC_BCU_IRQ,
+ CHT_WC_ADC_IRQ,
+ CHT_WC_EXT_CHGR_IRQ,
+ CHT_WC_GPIO_IRQ,
+ /* There is no irq 6 */
+ CHT_WC_CRIT_IRQ = 7,
+};
+
+static struct resource cht_wc_pwrsrc_resources[] = {
+ DEFINE_RES_IRQ(CHT_WC_PWRSRC_IRQ),
+};
+
+static struct resource cht_wc_ext_charger_resources[] = {
+ DEFINE_RES_IRQ(CHT_WC_EXT_CHGR_IRQ),
+};
+
+static struct mfd_cell cht_wc_dev[] = {
+ {
+ .name = "cht_wcove_pwrsrc",
+ .num_resources = ARRAY_SIZE(cht_wc_pwrsrc_resources),
+ .resources = cht_wc_pwrsrc_resources,
+ }, {
+ .name = "cht_wcove_ext_chgr",
+ .num_resources = ARRAY_SIZE(cht_wc_ext_charger_resources),
+ .resources = cht_wc_ext_charger_resources,
+ },
+ { .name = "cht_wcove_region", },
+};
+
+/*
+ * The CHT Whiskey Cove covers multiple I2C addresses, with a 1 Byte
+ * register address space per I2C address, so we use 16 bit register
+ * addresses where the high 8 bits contain the I2C client address.
+ */
+static int cht_wc_byte_reg_read(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct i2c_client *client = context;
+ int ret, orig_addr = client->addr;
+
+ if (!(reg & REG_ADDR_MASK)) {
+ dev_err(&client->dev, "Error I2C address not specified\n");
+ return -EINVAL;
+ }
+
+ client->addr = (reg & REG_ADDR_MASK) >> REG_ADDR_SHIFT;
+ ret = i2c_smbus_read_byte_data(client, reg & REG_OFFSET_MASK);
+ client->addr = orig_addr;
+
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return 0;
+}
+
+static int cht_wc_byte_reg_write(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct i2c_client *client = context;
+ int ret, orig_addr = client->addr;
+
+ if (!(reg & REG_ADDR_MASK)) {
+ dev_err(&client->dev, "Error I2C address not specified\n");
+ return -EINVAL;
+ }
+
+ client->addr = (reg & REG_ADDR_MASK) >> REG_ADDR_SHIFT;
+ ret = i2c_smbus_write_byte_data(client, reg & REG_OFFSET_MASK, val);
+ client->addr = orig_addr;
+
+ return ret;
+}
+
+static const struct regmap_config cht_wc_regmap_cfg = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .reg_write = cht_wc_byte_reg_write,
+ .reg_read = cht_wc_byte_reg_read,
+};
+
+static const struct regmap_irq cht_wc_regmap_irqs[] = {
+ REGMAP_IRQ_REG(CHT_WC_PWRSRC_IRQ, 0, BIT(CHT_WC_PWRSRC_IRQ)),
+ REGMAP_IRQ_REG(CHT_WC_THRM_IRQ, 0, BIT(CHT_WC_THRM_IRQ)),
+ REGMAP_IRQ_REG(CHT_WC_BCU_IRQ, 0, BIT(CHT_WC_BCU_IRQ)),
+ REGMAP_IRQ_REG(CHT_WC_ADC_IRQ, 0, BIT(CHT_WC_ADC_IRQ)),
+ REGMAP_IRQ_REG(CHT_WC_EXT_CHGR_IRQ, 0, BIT(CHT_WC_EXT_CHGR_IRQ)),
+ REGMAP_IRQ_REG(CHT_WC_GPIO_IRQ, 0, BIT(CHT_WC_GPIO_IRQ)),
+ REGMAP_IRQ_REG(CHT_WC_CRIT_IRQ, 0, BIT(CHT_WC_CRIT_IRQ)),
+};
+
+static const struct regmap_irq_chip cht_wc_regmap_irq_chip = {
+ .name = "cht_wc_irq_chip",
+ .status_base = CHT_WC_IRQLVL1,
+ .mask_base = CHT_WC_IRQLVL1_MASK,
+ .irqs = cht_wc_regmap_irqs,
+ .num_irqs = ARRAY_SIZE(cht_wc_regmap_irqs),
+ .num_regs = 1,
+};
+
+static int cht_wc_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct intel_soc_pmic *pmic;
+ acpi_status status;
+ unsigned long long hrv;
+ int ret;
+
+ status = acpi_evaluate_integer(ACPI_HANDLE(dev), "_HRV", NULL, &hrv);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "Failed to get PMIC hardware revision\n");
+ return -ENODEV;
+ }
+ if (hrv != CHT_WC_HRV) {
+ dev_err(dev, "Invalid PMIC hardware revision: %llu\n", hrv);
+ return -ENODEV;
+ }
+ if (client->irq < 0) {
+ dev_err(dev, "Invalid IRQ\n");
+ return -EINVAL;
+ }
+
+ pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
+ if (!pmic)
+ return -ENOMEM;
+
+ pmic->irq = client->irq;
+ pmic->dev = dev;
+ i2c_set_clientdata(client, pmic);
+
+ pmic->regmap = devm_regmap_init(dev, NULL, client, &cht_wc_regmap_cfg);
+ if (IS_ERR(pmic->regmap))
+ return PTR_ERR(pmic->regmap);
+
+ ret = devm_regmap_add_irq_chip(dev, pmic->regmap, pmic->irq,
+ IRQF_ONESHOT | IRQF_SHARED, 0,
+ &cht_wc_regmap_irq_chip,
+ &pmic->irq_chip_data);
+ if (ret)
+ return ret;
+
+ return devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE,
+ cht_wc_dev, ARRAY_SIZE(cht_wc_dev), NULL, 0,
+ regmap_irq_get_domain(pmic->irq_chip_data));
+}
+
+static void cht_wc_shutdown(struct i2c_client *client)
+{
+ struct intel_soc_pmic *pmic = i2c_get_clientdata(client);
+
+ disable_irq(pmic->irq);
+}
+
+static int __maybe_unused cht_wc_suspend(struct device *dev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
+
+ disable_irq(pmic->irq);
+
+ return 0;
+}
+
+static int __maybe_unused cht_wc_resume(struct device *dev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
+
+ enable_irq(pmic->irq);
+
+ return 0;
+}
+static SIMPLE_DEV_PM_OPS(cht_wc_pm_ops, cht_wc_suspend, cht_wc_resume);
+
+static const struct i2c_device_id cht_wc_i2c_id[] = {
+ { }
+};
+
+static const struct acpi_device_id cht_wc_acpi_ids[] = {
+ { "INT34D3", },
+ { }
+};
+
+static struct i2c_driver cht_wc_driver = {
+ .driver = {
+ .name = "CHT Whiskey Cove PMIC",
+ .pm = &cht_wc_pm_ops,
+ .acpi_match_table = cht_wc_acpi_ids,
+ },
+ .probe_new = cht_wc_probe,
+ .shutdown = cht_wc_shutdown,
+ .id_table = cht_wc_i2c_id,
+};
+builtin_i2c_driver(cht_wc_driver);
diff --git a/drivers/mfd/ipaq-micro.c b/drivers/mfd/ipaq-micro.c
index 124aad2b1d02..cd762d08f116 100644
--- a/drivers/mfd/ipaq-micro.c
+++ b/drivers/mfd/ipaq-micro.c
@@ -53,8 +53,6 @@ static void ipaq_micro_trigger_tx(struct ipaq_micro *micro)
tx->buf[bp++] = checksum;
tx->len = bp;
tx->index = 0;
- print_hex_dump_debug("data: ", DUMP_PREFIX_OFFSET, 16, 1,
- tx->buf, tx->len, true);
/* Enable interrupt */
val = readl(micro->base + UTCR3);
@@ -281,9 +279,6 @@ static void __init ipaq_micro_eeprom_dump(struct ipaq_micro *micro)
dev_info(micro->dev, "RAM size: %u KiB\n", ipaq_micro_to_u16(dump+92));
dev_info(micro->dev, "screen: %u x %u\n",
ipaq_micro_to_u16(dump+94), ipaq_micro_to_u16(dump+96));
- print_hex_dump_debug("eeprom: ", DUMP_PREFIX_OFFSET, 16, 1,
- dump, 256, true);
-
}
static void micro_tx_chars(struct ipaq_micro *micro)
diff --git a/drivers/mfd/lp87565.c b/drivers/mfd/lp87565.c
new file mode 100644
index 000000000000..340ad0c63744
--- /dev/null
+++ b/drivers/mfd/lp87565.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Author: Keerthy <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/lp87565.h>
+
+static const struct regmap_config lp87565_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = LP87565_REG_MAX,
+};
+
+static const struct mfd_cell lp87565_cells[] = {
+ { .name = "lp87565-q1-regulator", },
+ { .name = "lp87565-q1-gpio", },
+};
+
+static const struct of_device_id of_lp87565_match_table[] = {
+ { .compatible = "ti,lp87565", },
+ {
+ .compatible = "ti,lp87565-q1",
+ .data = (void *)LP87565_DEVICE_TYPE_LP87565_Q1,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_lp87565_match_table);
+
+static int lp87565_probe(struct i2c_client *client,
+ const struct i2c_device_id *ids)
+{
+ struct lp87565 *lp87565;
+ const struct of_device_id *of_id;
+ int ret;
+ unsigned int otpid;
+
+ lp87565 = devm_kzalloc(&client->dev, sizeof(*lp87565), GFP_KERNEL);
+ if (!lp87565)
+ return -ENOMEM;
+
+ lp87565->dev = &client->dev;
+
+ lp87565->regmap = devm_regmap_init_i2c(client, &lp87565_regmap_config);
+ if (IS_ERR(lp87565->regmap)) {
+ ret = PTR_ERR(lp87565->regmap);
+ dev_err(lp87565->dev,
+ "Failed to initialize register map: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(lp87565->regmap, LP87565_REG_OTP_REV, &otpid);
+ if (ret) {
+ dev_err(lp87565->dev, "Failed to read OTP ID\n");
+ return ret;
+ }
+
+ lp87565->rev = otpid & LP87565_OTP_REV_OTP_ID;
+
+ of_id = of_match_device(of_lp87565_match_table, &client->dev);
+ if (of_id)
+ lp87565->dev_type = (enum lp87565_device_type)of_id->data;
+
+ i2c_set_clientdata(client, lp87565);
+
+ ret = mfd_add_devices(lp87565->dev, PLATFORM_DEVID_AUTO, lp87565_cells,
+ ARRAY_SIZE(lp87565_cells), NULL, 0, NULL);
+
+ return ret;
+}
+
+static const struct i2c_device_id lp87565_id_table[] = {
+ { "lp87565-q1", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, lp87565_id_table);
+
+static struct i2c_driver lp87565_driver = {
+ .driver = {
+ .name = "lp87565",
+ .of_match_table = of_lp87565_match_table,
+ },
+ .probe = lp87565_probe,
+ .id_table = lp87565_id_table,
+};
+module_i2c_driver(lp87565_driver);
+
+MODULE_AUTHOR("J Keerthy <[email protected]>");
+MODULE_DESCRIPTION("lp87565 chip family Multi-Function Device driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/motorola-cpcap.c b/drivers/mfd/motorola-cpcap.c
index 3cab58ab0b84..d2cc1eabac05 100644
--- a/drivers/mfd/motorola-cpcap.c
+++ b/drivers/mfd/motorola-cpcap.c
@@ -260,17 +260,7 @@ static int cpcap_probe(struct spi_device *spi)
if (ret)
return ret;
- return of_platform_populate(spi->dev.of_node, NULL, NULL,
- &cpcap->spi->dev);
-}
-
-static int cpcap_remove(struct spi_device *pdev)
-{
- struct cpcap_ddata *cpcap = spi_get_drvdata(pdev);
-
- of_platform_depopulate(&cpcap->spi->dev);
-
- return 0;
+ return devm_of_platform_populate(&cpcap->spi->dev);
}
static struct spi_driver cpcap_driver = {
@@ -279,7 +269,6 @@ static struct spi_driver cpcap_driver = {
.of_match_table = cpcap_of_match,
},
.probe = cpcap_probe,
- .remove = cpcap_remove,
};
module_spi_driver(cpcap_driver);
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index 9103affedcbc..3922a93f9f92 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -676,7 +676,7 @@ no_irq:
* otherwise continue and add devices using mfd helpers.
*/
if (node) {
- ret = of_platform_populate(node, NULL, NULL, &i2c->dev);
+ ret = devm_of_platform_populate(&i2c->dev);
if (ret < 0) {
goto err_irq;
} else if (pdata->pm_off && !pm_power_off) {
diff --git a/drivers/mfd/qcom-spmi-pmic.c b/drivers/mfd/qcom-spmi-pmic.c
index 8653e8b9bb4f..2022bdfa7ab4 100644
--- a/drivers/mfd/qcom-spmi-pmic.c
+++ b/drivers/mfd/qcom-spmi-pmic.c
@@ -120,7 +120,6 @@ static const struct regmap_config spmi_regmap_config = {
static int pmic_spmi_probe(struct spmi_device *sdev)
{
- struct device_node *root = sdev->dev.of_node;
struct regmap *regmap;
regmap = devm_regmap_init_spmi_ext(sdev, &spmi_regmap_config);
@@ -131,19 +130,13 @@ static int pmic_spmi_probe(struct spmi_device *sdev)
if (sdev->usid % 2 == 0)
pmic_spmi_show_revid(regmap, &sdev->dev);
- return of_platform_populate(root, NULL, NULL, &sdev->dev);
-}
-
-static void pmic_spmi_remove(struct spmi_device *sdev)
-{
- of_platform_depopulate(&sdev->dev);
+ return devm_of_platform_populate(&sdev->dev);
}
MODULE_DEVICE_TABLE(of, pmic_spmi_id_table);
static struct spmi_driver pmic_spmi_driver = {
.probe = pmic_spmi_probe,
- .remove = pmic_spmi_remove,
.driver = {
.name = "pmic-spmi",
.of_match_table = pmic_spmi_id_table,
diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c
index 8131d1975745..f4037d42a60f 100644
--- a/drivers/mfd/rn5t618.c
+++ b/drivers/mfd/rn5t618.c
@@ -155,6 +155,8 @@ static int rn5t618_i2c_remove(struct i2c_client *i2c)
pm_power_off = NULL;
}
+ unregister_restart_handler(&rn5t618_restart_handler);
+
return 0;
}
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index 850590aac008..a0ac89dfdf0f 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -30,6 +30,7 @@
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/rtsx_pci.h>
+#include <linux/mmc/card.h>
#include <asm/unaligned.h>
#include "rtsx_pcr.h"
@@ -452,8 +453,12 @@ int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
}
spin_lock_irqsave(&pcr->lock, flags);
- if (pcr->trans_result == TRANS_RESULT_FAIL)
- err = -EINVAL;
+ if (pcr->trans_result == TRANS_RESULT_FAIL) {
+ err = -EILSEQ;
+ if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION)
+ pcr->dma_error_count++;
+ }
+
else if (pcr->trans_result == TRANS_NO_DEVICE)
err = -ENODEV;
spin_unlock_irqrestore(&pcr->lock, flags);
@@ -659,6 +664,13 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
if (err < 0)
return err;
+ /* Reduce card clock by 20MHz each time a DMA transfer error occurs */
+ if (card_clock == UHS_SDR104_MAX_DTR &&
+ pcr->dma_error_count &&
+ PCI_PID(pcr) == RTS5227_DEVICE_ID)
+ card_clock = UHS_SDR104_MAX_DTR -
+ (pcr->dma_error_count * 20000000);
+
card_clock /= 1000000;
pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
@@ -894,6 +906,7 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
pcr->card_removed |= SD_EXIST;
pcr->card_inserted &= ~SD_EXIST;
}
+ pcr->dma_error_count = 0;
}
if (int_reg & MS_INT) {
diff --git a/drivers/mfd/smsc-ece1099.c b/drivers/mfd/smsc-ece1099.c
index 1f40baf1234e..93a8297de52a 100644
--- a/drivers/mfd/smsc-ece1099.c
+++ b/drivers/mfd/smsc-ece1099.c
@@ -69,8 +69,7 @@ static int smsc_i2c_probe(struct i2c_client *i2c,
#ifdef CONFIG_OF
if (i2c->dev.of_node)
- ret = of_platform_populate(i2c->dev.of_node,
- NULL, NULL, &i2c->dev);
+ ret = devm_of_platform_populate(&i2c->dev);
#endif
return ret;
diff --git a/drivers/mfd/stm32-timers.c b/drivers/mfd/stm32-timers.c
index 2182f00db101..a6675a449409 100644
--- a/drivers/mfd/stm32-timers.c
+++ b/drivers/mfd/stm32-timers.c
@@ -58,14 +58,7 @@ static int stm32_timers_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
- return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
-}
-
-static int stm32_timers_remove(struct platform_device *pdev)
-{
- of_platform_depopulate(&pdev->dev);
-
- return 0;
+ return devm_of_platform_populate(&pdev->dev);
}
static const struct of_device_id stm32_timers_of_match[] = {
@@ -76,7 +69,6 @@ MODULE_DEVICE_TABLE(of, stm32_timers_of_match);
static struct platform_driver stm32_timers_driver = {
.probe = stm32_timers_probe,
- .remove = stm32_timers_remove,
.driver = {
.name = "stm32-timers",
.of_match_table = stm32_timers_of_match,
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index d16e71bd9482..0c9f0390e891 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -797,7 +797,9 @@ static int tc6393xb_resume(struct platform_device *dev)
int ret;
int i;
- clk_prepare_enable(tc6393xb->clk);
+ ret = clk_prepare_enable(tc6393xb->clk);
+ if (ret)
+ return ret;
ret = tcpd->resume(dev);
if (ret)
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index c9339f85359b..cd4a6d7d6750 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -32,13 +32,13 @@
#include <linux/i2c.h>
#include <linux/i2c-ocores.h>
#include <linux/i2c-xiic.h>
-#include <linux/i2c/tsc2007.h>
#include <linux/spi/spi.h>
#include <linux/spi/xilinx_spi.h>
#include <linux/spi/max7301.h>
#include <linux/spi/mc33880.h>
+#include <linux/platform_data/tsc2007.h>
#include <linux/platform_data/media/timb_radio.h>
#include <linux/platform_data/media/timb_video.h>
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index b46c0cfc27d9..378c02d43bf7 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -638,8 +638,10 @@ int twl4030_sih_setup(struct device *dev, int module, int irq_base)
}
}
- if (status < 0)
+ if (status < 0) {
+ dev_err(dev, "module to setup SIH for not found\n");
return status;
+ }
agent = kzalloc(sizeof(*agent), GFP_KERNEL);
if (!agent)
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 13a4c1190dca..e70d35ef5c6d 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -1766,7 +1766,7 @@ int wm831x_device_init(struct wm831x *wm831x, int irq)
}
wm831x->locked = 1;
- if (pdata && pdata->pre_init) {
+ if (pdata->pre_init) {
ret = pdata->pre_init(wm831x);
if (ret != 0) {
dev_err(wm831x->dev, "pre_init() failed: %d\n", ret);
@@ -1774,19 +1774,17 @@ int wm831x_device_init(struct wm831x *wm831x, int irq)
}
}
- if (pdata) {
- for (i = 0; i < ARRAY_SIZE(pdata->gpio_defaults); i++) {
- if (!pdata->gpio_defaults[i])
- continue;
+ for (i = 0; i < ARRAY_SIZE(pdata->gpio_defaults); i++) {
+ if (!pdata->gpio_defaults[i])
+ continue;
- wm831x_reg_write(wm831x,
- WM831X_GPIO1_CONTROL + i,
- pdata->gpio_defaults[i] & 0xffff);
- }
+ wm831x_reg_write(wm831x,
+ WM831X_GPIO1_CONTROL + i,
+ pdata->gpio_defaults[i] & 0xffff);
}
/* Multiply by 10 as we have many subdevices of the same type */
- if (pdata && pdata->wm831x_num)
+ if (pdata->wm831x_num)
wm831x_num = pdata->wm831x_num * 10;
else
wm831x_num = -1;
@@ -1809,7 +1807,7 @@ int wm831x_device_init(struct wm831x *wm831x, int irq)
ret = mfd_add_devices(wm831x->dev, wm831x_num,
wm8311_devs, ARRAY_SIZE(wm8311_devs),
NULL, 0, NULL);
- if (!pdata || !pdata->disable_touch)
+ if (!pdata->disable_touch)
mfd_add_devices(wm831x->dev, wm831x_num,
touch_devs, ARRAY_SIZE(touch_devs),
NULL, 0, NULL);
@@ -1819,7 +1817,7 @@ int wm831x_device_init(struct wm831x *wm831x, int irq)
ret = mfd_add_devices(wm831x->dev, wm831x_num,
wm8312_devs, ARRAY_SIZE(wm8312_devs),
NULL, 0, NULL);
- if (!pdata || !pdata->disable_touch)
+ if (!pdata->disable_touch)
mfd_add_devices(wm831x->dev, wm831x_num,
touch_devs, ARRAY_SIZE(touch_devs),
NULL, 0, NULL);
@@ -1865,7 +1863,7 @@ int wm831x_device_init(struct wm831x *wm831x, int irq)
dev_info(wm831x->dev, "32.768kHz clock disabled, no RTC\n");
}
- if (pdata && pdata->backlight) {
+ if (pdata->backlight) {
/* Treat errors as non-critical */
ret = mfd_add_devices(wm831x->dev, wm831x_num, backlight_devs,
ARRAY_SIZE(backlight_devs), NULL,
@@ -1877,7 +1875,7 @@ int wm831x_device_init(struct wm831x *wm831x, int irq)
wm831x_otp_init(wm831x);
- if (pdata && pdata->post_init) {
+ if (pdata->post_init) {
ret = pdata->post_init(wm831x);
if (ret != 0) {
dev_err(wm831x->dev, "post_init() failed: %d\n", ret);
diff --git a/drivers/mfd/wm831x-i2c.c b/drivers/mfd/wm831x-i2c.c
index 781af060f32d..22f7054d1b28 100644
--- a/drivers/mfd/wm831x-i2c.c
+++ b/drivers/mfd/wm831x-i2c.c
@@ -37,6 +37,10 @@ static int wm831x_i2c_probe(struct i2c_client *i2c,
if (i2c->dev.of_node) {
of_id = of_match_device(wm831x_of_match, &i2c->dev);
+ if (!of_id) {
+ dev_err(&i2c->dev, "Failed to match device\n");
+ return -ENODEV;
+ }
type = (enum wm831x_parent)of_id->data;
} else {
type = (enum wm831x_parent)id->driver_data;
diff --git a/drivers/mfd/wm831x-spi.c b/drivers/mfd/wm831x-spi.c
index c332e2885b26..018ce652ae57 100644
--- a/drivers/mfd/wm831x-spi.c
+++ b/drivers/mfd/wm831x-spi.c
@@ -34,6 +34,10 @@ static int wm831x_spi_probe(struct spi_device *spi)
if (spi->dev.of_node) {
of_id = of_match_device(wm831x_of_match, &spi->dev);
+ if (!of_id) {
+ dev_err(&spi->dev, "Failed to match device\n");
+ return -ENODEV;
+ }
type = (enum wm831x_parent)of_id->data;
} else {
type = (enum wm831x_parent)id->driver_data;
diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig
index b75cf830d08a..93397cb05b15 100644
--- a/drivers/misc/cxl/Kconfig
+++ b/drivers/misc/cxl/Kconfig
@@ -11,11 +11,16 @@ config CXL_AFU_DRIVER_OPS
bool
default n
+config CXL_LIB
+ bool
+ default n
+
config CXL
tristate "Support for IBM Coherent Accelerators (CXL)"
depends on PPC_POWERNV && PCI_MSI && EEH
select CXL_BASE
select CXL_AFU_DRIVER_OPS
+ select CXL_LIB
default m
help
Select this option to enable driver support for IBM Coherent
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile
index c14fd6b65b5a..0b5fd749d96d 100644
--- a/drivers/misc/cxl/Makefile
+++ b/drivers/misc/cxl/Makefile
@@ -3,7 +3,7 @@ ccflags-$(CONFIG_PPC_WERROR) += -Werror
cxl-y += main.o file.o irq.o fault.o native.o
cxl-y += context.o sysfs.o pci.o trace.o
-cxl-y += vphb.o phb.o api.o
+cxl-y += vphb.o phb.o api.o cxllib.o
cxl-$(CONFIG_PPC_PSERIES) += flash.o guest.o of.o hcalls.o
cxl-$(CONFIG_DEBUG_FS) += debugfs.o
obj-$(CONFIG_CXL) += cxl.o
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index a03f8e7535e5..b1afeccbb97f 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -1010,6 +1010,7 @@ static inline void cxl_debugfs_add_afu_regs_psl8(struct cxl_afu *afu, struct den
void cxl_handle_fault(struct work_struct *work);
void cxl_prefault(struct cxl_context *ctx, u64 wed);
+int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar);
struct cxl *get_cxl_adapter(int num);
int cxl_alloc_sst(struct cxl_context *ctx);
@@ -1061,6 +1062,11 @@ int cxl_afu_slbia(struct cxl_afu *afu);
int cxl_data_cache_flush(struct cxl *adapter);
int cxl_afu_disable(struct cxl_afu *afu);
int cxl_psl_purge(struct cxl_afu *afu);
+int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid,
+ u32 *phb_index, u64 *capp_unit_id);
+int cxl_slot_is_switched(struct pci_dev *dev);
+int cxl_get_xsl9_dsnctl(u64 capp_unit_id, u64 *reg);
+u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9);
void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx);
void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx);
diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c
new file mode 100644
index 000000000000..5dba23ca2e5f
--- /dev/null
+++ b/drivers/misc/cxl/cxllib.c
@@ -0,0 +1,246 @@
+/*
+ * Copyright 2017 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/hugetlb.h>
+#include <linux/sched/mm.h>
+#include <asm/pnv-pci.h>
+#include <misc/cxllib.h>
+
+#include "cxl.h"
+
+#define CXL_INVALID_DRA ~0ull
+#define CXL_DUMMY_READ_SIZE 128
+#define CXL_DUMMY_READ_ALIGN 8
+#define CXL_CAPI_WINDOW_START 0x2000000000000ull
+#define CXL_CAPI_WINDOW_LOG_SIZE 48
+#define CXL_XSL_CONFIG_CURRENT_VERSION CXL_XSL_CONFIG_VERSION1
+
+
+bool cxllib_slot_is_supported(struct pci_dev *dev, unsigned long flags)
+{
+ int rc;
+ u32 phb_index;
+ u64 chip_id, capp_unit_id;
+
+ /* No flags currently supported */
+ if (flags)
+ return false;
+
+ if (!cpu_has_feature(CPU_FTR_HVMODE))
+ return false;
+
+ if (!cxl_is_power9())
+ return false;
+
+ if (cxl_slot_is_switched(dev))
+ return false;
+
+ /* on p9, some pci slots are not connected to a CAPP unit */
+ rc = cxl_calc_capp_routing(dev, &chip_id, &phb_index, &capp_unit_id);
+ if (rc)
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(cxllib_slot_is_supported);
+
+static DEFINE_MUTEX(dra_mutex);
+static u64 dummy_read_addr = CXL_INVALID_DRA;
+
+static int allocate_dummy_read_buf(void)
+{
+ u64 buf, vaddr;
+ size_t buf_size;
+
+ /*
+ * Dummy read buffer is 128-byte long, aligned on a
+ * 256-byte boundary and we need the physical address.
+ */
+ buf_size = CXL_DUMMY_READ_SIZE + (1ull << CXL_DUMMY_READ_ALIGN);
+ buf = (u64) kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ vaddr = (buf + (1ull << CXL_DUMMY_READ_ALIGN) - 1) &
+ (~0ull << CXL_DUMMY_READ_ALIGN);
+
+ WARN((vaddr + CXL_DUMMY_READ_SIZE) > (buf + buf_size),
+ "Dummy read buffer alignment issue");
+ dummy_read_addr = virt_to_phys((void *) vaddr);
+ return 0;
+}
+
+int cxllib_get_xsl_config(struct pci_dev *dev, struct cxllib_xsl_config *cfg)
+{
+ int rc;
+ u32 phb_index;
+ u64 chip_id, capp_unit_id;
+
+ if (!cpu_has_feature(CPU_FTR_HVMODE))
+ return -EINVAL;
+
+ mutex_lock(&dra_mutex);
+ if (dummy_read_addr == CXL_INVALID_DRA) {
+ rc = allocate_dummy_read_buf();
+ if (rc) {
+ mutex_unlock(&dra_mutex);
+ return rc;
+ }
+ }
+ mutex_unlock(&dra_mutex);
+
+ rc = cxl_calc_capp_routing(dev, &chip_id, &phb_index, &capp_unit_id);
+ if (rc)
+ return rc;
+
+ rc = cxl_get_xsl9_dsnctl(capp_unit_id, &cfg->dsnctl);
+ if (rc)
+ return rc;
+ if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
+ /* workaround for DD1 - nbwind = capiind */
+ cfg->dsnctl |= ((u64)0x02 << (63-47));
+ }
+
+ cfg->version = CXL_XSL_CONFIG_CURRENT_VERSION;
+ cfg->log_bar_size = CXL_CAPI_WINDOW_LOG_SIZE;
+ cfg->bar_addr = CXL_CAPI_WINDOW_START;
+ cfg->dra = dummy_read_addr;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxllib_get_xsl_config);
+
+int cxllib_switch_phb_mode(struct pci_dev *dev, enum cxllib_mode mode,
+ unsigned long flags)
+{
+ int rc = 0;
+
+ if (!cpu_has_feature(CPU_FTR_HVMODE))
+ return -EINVAL;
+
+ switch (mode) {
+ case CXL_MODE_PCI:
+ /*
+ * We currently don't support going back to PCI mode
+ * However, we'll turn the invalidations off, so that
+ * the firmware doesn't have to ack them and can do
+ * things like reset, etc.. with no worries.
+ * So always return EPERM (can't go back to PCI) or
+ * EBUSY if we couldn't even turn off snooping
+ */
+ rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_OFF);
+ if (rc)
+ rc = -EBUSY;
+ else
+ rc = -EPERM;
+ break;
+ case CXL_MODE_CXL:
+ /* DMA only supported on TVT1 for the time being */
+ if (flags != CXL_MODE_DMA_TVT1)
+ return -EINVAL;
+ rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_DMA_TVT1);
+ if (rc)
+ return rc;
+ rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON);
+ break;
+ default:
+ rc = -EINVAL;
+ }
+ return rc;
+}
+EXPORT_SYMBOL_GPL(cxllib_switch_phb_mode);
+
+/*
+ * When switching the PHB to capi mode, the TVT#1 entry for
+ * the Partitionable Endpoint is set in bypass mode, like
+ * in PCI mode.
+ * Configure the device dma to use TVT#1, which is done
+ * by calling dma_set_mask() with a mask large enough.
+ */
+int cxllib_set_device_dma(struct pci_dev *dev, unsigned long flags)
+{
+ int rc;
+
+ if (flags)
+ return -EINVAL;
+
+ rc = dma_set_mask(&dev->dev, DMA_BIT_MASK(64));
+ return rc;
+}
+EXPORT_SYMBOL_GPL(cxllib_set_device_dma);
+
+int cxllib_get_PE_attributes(struct task_struct *task,
+ unsigned long translation_mode,
+ struct cxllib_pe_attributes *attr)
+{
+ struct mm_struct *mm = NULL;
+
+ if (translation_mode != CXL_TRANSLATED_MODE &&
+ translation_mode != CXL_REAL_MODE)
+ return -EINVAL;
+
+ attr->sr = cxl_calculate_sr(false,
+ task == NULL,
+ translation_mode == CXL_REAL_MODE,
+ true);
+ attr->lpid = mfspr(SPRN_LPID);
+ if (task) {
+ mm = get_task_mm(task);
+ if (mm == NULL)
+ return -EINVAL;
+ /*
+ * Caller is keeping a reference on mm_users for as long
+ * as XSL uses the memory context
+ */
+ attr->pid = mm->context.id;
+ mmput(mm);
+ } else {
+ attr->pid = 0;
+ }
+ attr->tid = 0;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxllib_get_PE_attributes);
+
+int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags)
+{
+ int rc;
+ u64 dar;
+ struct vm_area_struct *vma = NULL;
+ unsigned long page_size;
+
+ if (mm == NULL)
+ return -EFAULT;
+
+ down_read(&mm->mmap_sem);
+
+ for (dar = addr; dar < addr + size; dar += page_size) {
+ if (!vma || dar < vma->vm_start || dar > vma->vm_end) {
+ vma = find_vma(mm, addr);
+ if (!vma) {
+ pr_err("Can't find vma for addr %016llx\n", addr);
+ rc = -EFAULT;
+ goto out;
+ }
+ /* get the size of the pages allocated */
+ page_size = vma_kernel_pagesize(vma);
+ }
+
+ rc = cxl_handle_mm_fault(mm, flags, dar);
+ if (rc) {
+ pr_err("cxl_handle_mm_fault failed %d", rc);
+ rc = -EFAULT;
+ goto out;
+ }
+ }
+ rc = 0;
+out:
+ up_read(&mm->mmap_sem);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(cxllib_handle_fault);
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index c79e39bad7a4..6eed7d03e2b5 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -132,18 +132,15 @@ static int cxl_handle_segment_miss(struct cxl_context *ctx,
return IRQ_HANDLED;
}
-static void cxl_handle_page_fault(struct cxl_context *ctx,
- struct mm_struct *mm, u64 dsisr, u64 dar)
+int cxl_handle_mm_fault(struct mm_struct *mm, u64 dsisr, u64 dar)
{
unsigned flt = 0;
int result;
unsigned long access, flags, inv_flags = 0;
- trace_cxl_pte_miss(ctx, dsisr, dar);
-
if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) {
pr_devel("copro_handle_mm_fault failed: %#x\n", result);
- return cxl_ack_ae(ctx);
+ return result;
}
if (!radix_enabled()) {
@@ -155,9 +152,8 @@ static void cxl_handle_page_fault(struct cxl_context *ctx,
if (dsisr & CXL_PSL_DSISR_An_S)
access |= _PAGE_WRITE;
- access |= _PAGE_PRIVILEGED;
- if ((!ctx->kernel) || (REGION_ID(dar) == USER_REGION_ID))
- access &= ~_PAGE_PRIVILEGED;
+ if (!mm && (REGION_ID(dar) != USER_REGION_ID))
+ access |= _PAGE_PRIVILEGED;
if (dsisr & DSISR_NOHPTE)
inv_flags |= HPTE_NOHPTE_UPDATE;
@@ -166,8 +162,21 @@ static void cxl_handle_page_fault(struct cxl_context *ctx,
hash_page_mm(mm, dar, access, 0x300, inv_flags);
local_irq_restore(flags);
}
- pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe);
- cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
+ return 0;
+}
+
+static void cxl_handle_page_fault(struct cxl_context *ctx,
+ struct mm_struct *mm,
+ u64 dsisr, u64 dar)
+{
+ trace_cxl_pte_miss(ctx, dsisr, dar);
+
+ if (cxl_handle_mm_fault(mm, dsisr, dar)) {
+ cxl_ack_ae(ctx);
+ } else {
+ pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe);
+ cxl_ops->ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
+ }
}
/*
diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c
index 7c61c70ba3f6..3aa216bf0939 100644
--- a/drivers/misc/cxl/flash.c
+++ b/drivers/misc/cxl/flash.c
@@ -401,8 +401,10 @@ static int device_open(struct inode *inode, struct file *file)
if (down_interruptible(&sem) != 0)
return -EPERM;
- if (!(adapter = get_cxl_adapter(adapter_num)))
- return -ENODEV;
+ if (!(adapter = get_cxl_adapter(adapter_num))) {
+ rc = -ENODEV;
+ goto err_unlock;
+ }
file->private_data = adapter;
continue_token = 0;
@@ -446,6 +448,8 @@ err1:
free_page((unsigned long) le);
err:
put_device(&adapter->dev);
+err_unlock:
+ up(&sem);
return rc;
}
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 2b2f8894149d..4a82c313cf71 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -586,17 +586,17 @@ err:
#define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE))
#endif
-static u64 calculate_sr(struct cxl_context *ctx)
+u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9)
{
u64 sr = 0;
set_endian(sr);
- if (ctx->master)
+ if (master)
sr |= CXL_PSL_SR_An_MP;
if (mfspr(SPRN_LPCR) & LPCR_TC)
sr |= CXL_PSL_SR_An_TC;
- if (ctx->kernel) {
- if (!ctx->real_mode)
+ if (kernel) {
+ if (!real_mode)
sr |= CXL_PSL_SR_An_R;
sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV;
} else {
@@ -608,7 +608,7 @@ static u64 calculate_sr(struct cxl_context *ctx)
if (!test_tsk_thread_flag(current, TIF_32BIT))
sr |= CXL_PSL_SR_An_SF;
}
- if (cxl_is_power9()) {
+ if (p9) {
if (radix_enabled())
sr |= CXL_PSL_SR_An_XLAT_ror;
else
@@ -617,6 +617,12 @@ static u64 calculate_sr(struct cxl_context *ctx)
return sr;
}
+static u64 calculate_sr(struct cxl_context *ctx)
+{
+ return cxl_calculate_sr(ctx->master, ctx->kernel, ctx->real_mode,
+ cxl_is_power9());
+}
+
static void update_ivtes_directed(struct cxl_context *ctx)
{
bool need_update = (ctx->status == STARTED);
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 1eb9859809bf..d18b3d9292fd 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -375,7 +375,7 @@ static u64 get_capp_unit_id(struct device_node *np, u32 phb_index)
return 0;
}
-static int calc_capp_routing(struct pci_dev *dev, u64 *chipid,
+int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid,
u32 *phb_index, u64 *capp_unit_id)
{
int rc;
@@ -408,17 +408,9 @@ static int calc_capp_routing(struct pci_dev *dev, u64 *chipid,
return 0;
}
-static int init_implementation_adapter_regs_psl9(struct cxl *adapter, struct pci_dev *dev)
+int cxl_get_xsl9_dsnctl(u64 capp_unit_id, u64 *reg)
{
- u64 xsl_dsnctl, psl_fircntl;
- u64 chipid;
- u32 phb_index;
- u64 capp_unit_id;
- int rc;
-
- rc = calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
- if (rc)
- return rc;
+ u64 xsl_dsnctl;
/*
* CAPI Identifier bits [0:7]
@@ -454,6 +446,27 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter, struct pci
xsl_dsnctl |= ((u64)0x04 << (63-55));
}
+ *reg = xsl_dsnctl;
+ return 0;
+}
+
+static int init_implementation_adapter_regs_psl9(struct cxl *adapter,
+ struct pci_dev *dev)
+{
+ u64 xsl_dsnctl, psl_fircntl;
+ u64 chipid;
+ u32 phb_index;
+ u64 capp_unit_id;
+ int rc;
+
+ rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
+ if (rc)
+ return rc;
+
+ rc = cxl_get_xsl9_dsnctl(capp_unit_id, &xsl_dsnctl);
+ if (rc)
+ return rc;
+
cxl_p1_write(adapter, CXL_XSL9_DSNCTL, xsl_dsnctl);
/* Set fir_cntl to recommended value for production env */
@@ -505,7 +518,7 @@ static int init_implementation_adapter_regs_psl8(struct cxl *adapter, struct pci
u64 capp_unit_id;
int rc;
- rc = calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
+ rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
if (rc)
return rc;
@@ -538,7 +551,7 @@ static int init_implementation_adapter_regs_xsl(struct cxl *adapter, struct pci_
u64 capp_unit_id;
int rc;
- rc = calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
+ rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
if (rc)
return rc;
@@ -1897,7 +1910,7 @@ static void cxl_pci_remove_adapter(struct cxl *adapter)
#define CXL_MAX_PCIEX_PARENT 2
-static int cxl_slot_is_switched(struct pci_dev *dev)
+int cxl_slot_is_switched(struct pci_dev *dev)
{
struct device_node *np;
int depth = 0;
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index d3fe3ea902d4..eb29113e0bac 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -375,6 +375,7 @@ int enclosure_add_device(struct enclosure_device *edev, int component,
struct device *dev)
{
struct enclosure_component *cdev;
+ int err;
if (!edev || component >= edev->components)
return -EINVAL;
@@ -384,12 +385,17 @@ int enclosure_add_device(struct enclosure_device *edev, int component,
if (cdev->dev == dev)
return -EEXIST;
- if (cdev->dev)
+ if (cdev->dev) {
enclosure_remove_links(cdev);
-
- put_device(cdev->dev);
+ put_device(cdev->dev);
+ }
cdev->dev = get_device(dev);
- return enclosure_add_links(cdev);
+ err = enclosure_add_links(cdev);
+ if (err) {
+ put_device(cdev->dev);
+ cdev->dev = NULL;
+ }
+ return err;
}
EXPORT_SYMBOL_GPL(enclosure_add_device);
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 57e254aac48d..7db8c7a8d38d 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -20,6 +20,7 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
@@ -27,7 +28,6 @@
#include <linux/bitops.h>
#include <linux/gpio.h>
-#include <asm/mach-jz4740/gpio.h>
#include <asm/cacheflush.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
@@ -901,15 +901,6 @@ static const struct mmc_host_ops jz4740_mmc_ops = {
.enable_sdio_irq = jz4740_mmc_enable_sdio_irq,
};
-static const struct jz_gpio_bulk_request jz4740_mmc_pins[] = {
- JZ_GPIO_BULK_PIN(MSC_CMD),
- JZ_GPIO_BULK_PIN(MSC_CLK),
- JZ_GPIO_BULK_PIN(MSC_DATA0),
- JZ_GPIO_BULK_PIN(MSC_DATA1),
- JZ_GPIO_BULK_PIN(MSC_DATA2),
- JZ_GPIO_BULK_PIN(MSC_DATA3),
-};
-
static int jz4740_mmc_request_gpio(struct device *dev, int gpio,
const char *name, bool output, int value)
{
@@ -973,15 +964,6 @@ static void jz4740_mmc_free_gpios(struct platform_device *pdev)
gpio_free(pdata->gpio_power);
}
-static inline size_t jz4740_mmc_num_pins(struct jz4740_mmc_host *host)
-{
- size_t num_pins = ARRAY_SIZE(jz4740_mmc_pins);
- if (host->pdata && host->pdata->data_1bit)
- num_pins -= 3;
-
- return num_pins;
-}
-
static int jz4740_mmc_probe(struct platform_device* pdev)
{
int ret;
@@ -1022,15 +1004,9 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
goto err_free_host;
}
- ret = jz_gpio_bulk_request(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
- if (ret) {
- dev_err(&pdev->dev, "Failed to request mmc pins: %d\n", ret);
- goto err_free_host;
- }
-
ret = jz4740_mmc_request_gpios(mmc, pdev);
if (ret)
- goto err_gpio_bulk_free;
+ goto err_release_dma;
mmc->ops = &jz4740_mmc_ops;
mmc->f_min = JZ_MMC_CLK_RATE / 128;
@@ -1086,10 +1062,9 @@ err_free_irq:
free_irq(host->irq, host);
err_free_gpios:
jz4740_mmc_free_gpios(pdev);
-err_gpio_bulk_free:
+err_release_dma:
if (host->use_dma)
jz4740_mmc_release_dma_channels(host);
- jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
err_free_host:
mmc_free_host(mmc);
@@ -1109,7 +1084,6 @@ static int jz4740_mmc_remove(struct platform_device *pdev)
free_irq(host->irq, host);
jz4740_mmc_free_gpios(pdev);
- jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
if (host->use_dma)
jz4740_mmc_release_dma_channels(host);
@@ -1123,20 +1097,12 @@ static int jz4740_mmc_remove(struct platform_device *pdev)
static int jz4740_mmc_suspend(struct device *dev)
{
- struct jz4740_mmc_host *host = dev_get_drvdata(dev);
-
- jz_gpio_bulk_suspend(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
-
- return 0;
+ return pinctrl_pm_select_sleep_state(dev);
}
static int jz4740_mmc_resume(struct device *dev)
{
- struct jz4740_mmc_host *host = dev_get_drvdata(dev);
-
- jz_gpio_bulk_resume(jz4740_mmc_pins, jz4740_mmc_num_pins(host));
-
- return 0;
+ return pinctrl_pm_select_default_state(dev);
}
static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index 5551c36adbdf..0d06a1f07d82 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -25,7 +25,6 @@
#include <linux/gpio.h>
-#include <asm/mach-jz4740/gpio.h>
#include <asm/mach-jz4740/jz4740_nand.h>
#define JZ_REG_NAND_CTRL 0x50
@@ -310,34 +309,20 @@ static int jz_nand_detect_bank(struct platform_device *pdev,
uint8_t *nand_dev_id)
{
int ret;
- int gpio;
- char gpio_name[9];
char res_name[6];
uint32_t ctrl;
struct nand_chip *chip = &nand->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
- /* Request GPIO port. */
- gpio = JZ_GPIO_MEM_CS0 + bank - 1;
- sprintf(gpio_name, "NAND CS%d", bank);
- ret = gpio_request(gpio, gpio_name);
- if (ret) {
- dev_warn(&pdev->dev,
- "Failed to request %s gpio %d: %d\n",
- gpio_name, gpio, ret);
- goto notfound_gpio;
- }
-
/* Request I/O resource. */
sprintf(res_name, "bank%d", bank);
ret = jz_nand_ioremap_resource(pdev, res_name,
&nand->bank_mem[bank - 1],
&nand->bank_base[bank - 1]);
if (ret)
- goto notfound_resource;
+ return ret;
/* Enable chip in bank. */
- jz_gpio_set_function(gpio, JZ_GPIO_FUNC_MEM_CS0);
ctrl = readl(nand->base + JZ_REG_NAND_CTRL);
ctrl |= JZ_NAND_CTRL_ENABLE_CHIP(bank - 1);
writel(ctrl, nand->base + JZ_REG_NAND_CTRL);
@@ -377,12 +362,8 @@ notfound_id:
dev_info(&pdev->dev, "No chip found on bank %i\n", bank);
ctrl &= ~(JZ_NAND_CTRL_ENABLE_CHIP(bank - 1));
writel(ctrl, nand->base + JZ_REG_NAND_CTRL);
- jz_gpio_set_function(gpio, JZ_GPIO_FUNC_NONE);
jz_nand_iounmap_resource(nand->bank_mem[bank - 1],
nand->bank_base[bank - 1]);
-notfound_resource:
- gpio_free(gpio);
-notfound_gpio:
return ret;
}
@@ -503,7 +484,6 @@ err_nand_release:
err_unclaim_banks:
while (chipnr--) {
unsigned char bank = nand->banks[chipnr];
- gpio_free(JZ_GPIO_MEM_CS0 + bank - 1);
jz_nand_iounmap_resource(nand->bank_mem[bank - 1],
nand->bank_base[bank - 1]);
}
@@ -530,7 +510,6 @@ static int jz_nand_remove(struct platform_device *pdev)
if (bank != 0) {
jz_nand_iounmap_resource(nand->bank_mem[bank - 1],
nand->bank_base[bank - 1]);
- gpio_free(JZ_GPIO_MEM_CS0 + bank - 1);
}
}
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 2d956cb59d06..01cab9548785 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -225,8 +225,10 @@ static int com20020pci_probe(struct pci_dev *pdev,
card = devm_kzalloc(&pdev->dev, sizeof(struct com20020_dev),
GFP_KERNEL);
- if (!card)
- return -ENOMEM;
+ if (!card) {
+ ret = -ENOMEM;
+ goto out_port;
+ }
card->index = i;
card->pci_priv = priv;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2865f31c6076..14ff622190a5 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1820,7 +1820,7 @@ err_undo_flags:
*/
static int __bond_release_one(struct net_device *bond_dev,
struct net_device *slave_dev,
- bool all)
+ bool all, bool unregister)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *oldcurrent;
@@ -1965,7 +1965,10 @@ static int __bond_release_one(struct net_device *bond_dev,
dev_set_mac_address(slave_dev, (struct sockaddr *)&ss);
}
- dev_set_mtu(slave_dev, slave->original_mtu);
+ if (unregister)
+ __dev_set_mtu(slave_dev, slave->original_mtu);
+ else
+ dev_set_mtu(slave_dev, slave->original_mtu);
slave_dev->priv_flags &= ~IFF_BONDING;
@@ -1977,7 +1980,7 @@ static int __bond_release_one(struct net_device *bond_dev,
/* A wrapper used because of ndo_del_link */
int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
{
- return __bond_release_one(bond_dev, slave_dev, false);
+ return __bond_release_one(bond_dev, slave_dev, false, false);
}
/* First release a slave and then destroy the bond if no more slaves are left.
@@ -1989,7 +1992,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
struct bonding *bond = netdev_priv(bond_dev);
int ret;
- ret = bond_release(bond_dev, slave_dev);
+ ret = __bond_release_one(bond_dev, slave_dev, false, true);
if (ret == 0 && !bond_has_slaves(bond)) {
bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
netdev_info(bond_dev, "Destroying bond %s\n",
@@ -3060,7 +3063,7 @@ static int bond_slave_netdev_event(unsigned long event,
if (bond_dev->type != ARPHRD_ETHER)
bond_release_and_destroy(bond_dev, slave_dev);
else
- bond_release(bond_dev, slave_dev);
+ __bond_release_one(bond_dev, slave_dev, false, true);
break;
case NETDEV_UP:
case NETDEV_CHANGE:
@@ -4252,7 +4255,7 @@ static void bond_uninit(struct net_device *bond_dev)
/* Release the bonded slaves */
bond_for_each_slave(bond, slave, iter)
- __bond_release_one(bond_dev, slave->dev, true);
+ __bond_release_one(bond_dev, slave->dev, true, true);
netdev_info(bond_dev, "Released all slaves\n");
arr = rtnl_dereference(bond->slave_arr);
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index e69ebdd65658..26d25749c3e4 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -3314,10 +3314,11 @@ static const struct macb_config sama5d2_config = {
static const struct macb_config sama5d3_config = {
.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
- | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
+ | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
+ .jumbo_max_len = 10240,
};
static const struct macb_config sama5d4_config = {
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index c6700b91a2df..fe166e0f6781 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -300,9 +300,9 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
mtu);
}
-int hns_nic_net_xmit_hw(struct net_device *ndev,
- struct sk_buff *skb,
- struct hns_nic_ring_data *ring_data)
+netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
+ struct sk_buff *skb,
+ struct hns_nic_ring_data *ring_data)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_ring *ring = ring_data->ring;
@@ -361,6 +361,10 @@ int hns_nic_net_xmit_hw(struct net_device *ndev,
dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
netdev_tx_sent_queue(dev_queue, skb->len);
+ netif_trans_update(ndev);
+ ndev->stats.tx_bytes += skb->len;
+ ndev->stats.tx_packets++;
+
wmb(); /* commit all data before submit */
assert(skb->queue_mapping < priv->ae_handle->q_num);
hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
@@ -1469,17 +1473,11 @@ static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
- int ret;
assert(skb->queue_mapping < ndev->ae_handle->q_num);
- ret = hns_nic_net_xmit_hw(ndev, skb,
- &tx_ring_data(priv, skb->queue_mapping));
- if (ret == NETDEV_TX_OK) {
- netif_trans_update(ndev);
- ndev->stats.tx_bytes += skb->len;
- ndev->stats.tx_packets++;
- }
- return (netdev_tx_t)ret;
+
+ return hns_nic_net_xmit_hw(ndev, skb,
+ &tx_ring_data(priv, skb->queue_mapping));
}
static void hns_nic_drop_rx_fetch(struct hns_nic_ring_data *ring_data,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
index 1b83232082b2..9cb4c7884201 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
@@ -92,8 +92,8 @@ void hns_ethtool_set_ops(struct net_device *ndev);
void hns_nic_net_reset(struct net_device *ndev);
void hns_nic_net_reinit(struct net_device *netdev);
int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h);
-int hns_nic_net_xmit_hw(struct net_device *ndev,
- struct sk_buff *skb,
- struct hns_nic_ring_data *ring_data);
+netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
+ struct sk_buff *skb,
+ struct hns_nic_ring_data *ring_data);
#endif /**__HNS_ENET_H */
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index e5221d95afe1..017e08452d8c 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -261,7 +261,7 @@ static int hns_mdio_write(struct mii_bus *bus,
/* config the data needed writing */
cmd_reg_cfg = devad;
- op = MDIO_C45_WRITE_ADDR;
+ op = MDIO_C45_WRITE_DATA;
}
MDIO_SET_REG_FIELD(mdio_dev, MDIO_WDATA_REG, MDIO_WDATA_DATA_M,
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 3e0a695537e2..d17c2b03f580 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -469,56 +469,6 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
}
}
-static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
-{
- int i;
- struct device *dev = &adapter->vdev->dev;
-
- if (adapter->buffer_list_addr != NULL) {
- if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
- dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
- DMA_BIDIRECTIONAL);
- adapter->buffer_list_dma = DMA_ERROR_CODE;
- }
- free_page((unsigned long)adapter->buffer_list_addr);
- adapter->buffer_list_addr = NULL;
- }
-
- if (adapter->filter_list_addr != NULL) {
- if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
- dma_unmap_single(dev, adapter->filter_list_dma, 4096,
- DMA_BIDIRECTIONAL);
- adapter->filter_list_dma = DMA_ERROR_CODE;
- }
- free_page((unsigned long)adapter->filter_list_addr);
- adapter->filter_list_addr = NULL;
- }
-
- if (adapter->rx_queue.queue_addr != NULL) {
- dma_free_coherent(dev, adapter->rx_queue.queue_len,
- adapter->rx_queue.queue_addr,
- adapter->rx_queue.queue_dma);
- adapter->rx_queue.queue_addr = NULL;
- }
-
- for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
- if (adapter->rx_buff_pool[i].active)
- ibmveth_free_buffer_pool(adapter,
- &adapter->rx_buff_pool[i]);
-
- if (adapter->bounce_buffer != NULL) {
- if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
- dma_unmap_single(&adapter->vdev->dev,
- adapter->bounce_buffer_dma,
- adapter->netdev->mtu + IBMVETH_BUFF_OH,
- DMA_BIDIRECTIONAL);
- adapter->bounce_buffer_dma = DMA_ERROR_CODE;
- }
- kfree(adapter->bounce_buffer);
- adapter->bounce_buffer = NULL;
- }
-}
-
static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
union ibmveth_buf_desc rxq_desc, u64 mac_address)
{
@@ -575,14 +525,17 @@ static int ibmveth_open(struct net_device *netdev)
for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
rxq_entries += adapter->rx_buff_pool[i].size;
+ rc = -ENOMEM;
adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
- adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
+ if (!adapter->buffer_list_addr) {
+ netdev_err(netdev, "unable to allocate list pages\n");
+ goto out;
+ }
- if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
- netdev_err(netdev, "unable to allocate filter or buffer list "
- "pages\n");
- rc = -ENOMEM;
- goto err_out;
+ adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
+ if (!adapter->filter_list_addr) {
+ netdev_err(netdev, "unable to allocate filter pages\n");
+ goto out_free_buffer_list;
}
dev = &adapter->vdev->dev;
@@ -592,22 +545,21 @@ static int ibmveth_open(struct net_device *netdev)
adapter->rx_queue.queue_addr =
dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
&adapter->rx_queue.queue_dma, GFP_KERNEL);
- if (!adapter->rx_queue.queue_addr) {
- rc = -ENOMEM;
- goto err_out;
- }
+ if (!adapter->rx_queue.queue_addr)
+ goto out_free_filter_list;
adapter->buffer_list_dma = dma_map_single(dev,
adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, adapter->buffer_list_dma)) {
+ netdev_err(netdev, "unable to map buffer list pages\n");
+ goto out_free_queue_mem;
+ }
+
adapter->filter_list_dma = dma_map_single(dev,
adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
-
- if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
- (dma_mapping_error(dev, adapter->filter_list_dma))) {
- netdev_err(netdev, "unable to map filter or buffer list "
- "pages\n");
- rc = -ENOMEM;
- goto err_out;
+ if (dma_mapping_error(dev, adapter->filter_list_dma)) {
+ netdev_err(netdev, "unable to map filter list pages\n");
+ goto out_unmap_buffer_list;
}
adapter->rx_queue.index = 0;
@@ -638,7 +590,7 @@ static int ibmveth_open(struct net_device *netdev)
rxq_desc.desc,
mac_address);
rc = -ENONET;
- goto err_out;
+ goto out_unmap_filter_list;
}
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
@@ -648,7 +600,7 @@ static int ibmveth_open(struct net_device *netdev)
netdev_err(netdev, "unable to alloc pool\n");
adapter->rx_buff_pool[i].active = 0;
rc = -ENOMEM;
- goto err_out;
+ goto out_free_buffer_pools;
}
}
@@ -662,22 +614,21 @@ static int ibmveth_open(struct net_device *netdev)
lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
} while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
- goto err_out;
+ goto out_free_buffer_pools;
}
+ rc = -ENOMEM;
adapter->bounce_buffer =
kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
- if (!adapter->bounce_buffer) {
- rc = -ENOMEM;
- goto err_out_free_irq;
- }
+ if (!adapter->bounce_buffer)
+ goto out_free_irq;
+
adapter->bounce_buffer_dma =
dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
netdev_err(netdev, "unable to map bounce buffer\n");
- rc = -ENOMEM;
- goto err_out_free_irq;
+ goto out_free_bounce_buffer;
}
netdev_dbg(netdev, "initial replenish cycle\n");
@@ -689,10 +640,31 @@ static int ibmveth_open(struct net_device *netdev)
return 0;
-err_out_free_irq:
+out_free_bounce_buffer:
+ kfree(adapter->bounce_buffer);
+out_free_irq:
free_irq(netdev->irq, netdev);
-err_out:
- ibmveth_cleanup(adapter);
+out_free_buffer_pools:
+ while (--i >= 0) {
+ if (adapter->rx_buff_pool[i].active)
+ ibmveth_free_buffer_pool(adapter,
+ &adapter->rx_buff_pool[i]);
+ }
+out_unmap_filter_list:
+ dma_unmap_single(dev, adapter->filter_list_dma, 4096,
+ DMA_BIDIRECTIONAL);
+out_unmap_buffer_list:
+ dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
+ DMA_BIDIRECTIONAL);
+out_free_queue_mem:
+ dma_free_coherent(dev, adapter->rx_queue.queue_len,
+ adapter->rx_queue.queue_addr,
+ adapter->rx_queue.queue_dma);
+out_free_filter_list:
+ free_page((unsigned long)adapter->filter_list_addr);
+out_free_buffer_list:
+ free_page((unsigned long)adapter->buffer_list_addr);
+out:
napi_disable(&adapter->napi);
return rc;
}
@@ -700,7 +672,9 @@ err_out:
static int ibmveth_close(struct net_device *netdev)
{
struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->vdev->dev;
long lpar_rc;
+ int i;
netdev_dbg(netdev, "close starting\n");
@@ -724,7 +698,27 @@ static int ibmveth_close(struct net_device *netdev)
ibmveth_update_rx_no_buffer(adapter);
- ibmveth_cleanup(adapter);
+ dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
+ DMA_BIDIRECTIONAL);
+ free_page((unsigned long)adapter->buffer_list_addr);
+
+ dma_unmap_single(dev, adapter->filter_list_dma, 4096,
+ DMA_BIDIRECTIONAL);
+ free_page((unsigned long)adapter->filter_list_addr);
+
+ dma_free_coherent(dev, adapter->rx_queue.queue_len,
+ adapter->rx_queue.queue_addr,
+ adapter->rx_queue.queue_dma);
+
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+ if (adapter->rx_buff_pool[i].active)
+ ibmveth_free_buffer_pool(adapter,
+ &adapter->rx_buff_pool[i]);
+
+ dma_unmap_single(&adapter->vdev->dev, adapter->bounce_buffer_dma,
+ adapter->netdev->mtu + IBMVETH_BUFF_OH,
+ DMA_BIDIRECTIONAL);
+ kfree(adapter->bounce_buffer);
netdev_dbg(netdev, "close complete\n");
@@ -1719,11 +1713,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
-
- adapter->buffer_list_dma = DMA_ERROR_CODE;
- adapter->filter_list_dma = DMA_ERROR_CODE;
- adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
-
netdev_dbg(netdev, "registering netdev...\n");
ibmveth_set_features(netdev, netdev->features);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 3e26d27ad213..63784576ae8b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -2348,30 +2348,19 @@ static void fm10k_io_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
}
-/**
- * fm10k_io_reset_notify - called when PCI function is reset
- * @pdev: Pointer to PCI device
- *
- * This callback is called when the PCI function is reset such as from
- * /sys/class/net/<enpX>/device/reset or similar. When prepare is true, it
- * means we should prepare for a function reset. If prepare is false, it means
- * the function reset just occurred.
- */
-static void fm10k_io_reset_notify(struct pci_dev *pdev, bool prepare)
+static void fm10k_io_reset_prepare(struct pci_dev *pdev)
{
- struct fm10k_intfc *interface = pci_get_drvdata(pdev);
- int err = 0;
-
- if (prepare) {
- /* warn incase we have any active VF devices */
- if (pci_num_vf(pdev))
- dev_warn(&pdev->dev,
- "PCIe FLR may cause issues for any active VF devices\n");
+ /* warn incase we have any active VF devices */
+ if (pci_num_vf(pdev))
+ dev_warn(&pdev->dev,
+ "PCIe FLR may cause issues for any active VF devices\n");
+ fm10k_prepare_suspend(pci_get_drvdata(pdev));
+}
- fm10k_prepare_suspend(interface);
- } else {
- err = fm10k_handle_resume(interface);
- }
+static void fm10k_io_reset_done(struct pci_dev *pdev)
+{
+ struct fm10k_intfc *interface = pci_get_drvdata(pdev);
+ int err = fm10k_handle_resume(interface);
if (err) {
dev_warn(&pdev->dev,
@@ -2384,7 +2373,8 @@ static const struct pci_error_handlers fm10k_err_handler = {
.error_detected = fm10k_io_error_detected,
.slot_reset = fm10k_io_slot_reset,
.resume = fm10k_io_resume,
- .reset_notify = fm10k_io_reset_notify,
+ .reset_prepare = fm10k_io_reset_prepare,
+ .reset_done = fm10k_io_reset_done,
};
static struct pci_driver fm10k_driver = {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 41a5c5d2ac89..b3d0c2e6347a 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2401,15 +2401,10 @@ static int mtk_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct device_node *mac_np;
- const struct of_device_id *match;
- struct mtk_soc_data *soc;
struct mtk_eth *eth;
int err;
int i;
- match = of_match_device(of_mtk_match, &pdev->dev);
- soc = (struct mtk_soc_data *)match->data;
-
eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
if (!eth)
return -ENOMEM;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index fc10f27e0a0c..6a65c8b33807 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -356,6 +356,7 @@ err_free_app_priv:
static void nfp_flower_clean(struct nfp_app *app)
{
+ nfp_flower_metadata_cleanup(app);
vfree(app->priv);
app->priv = NULL;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 2e69bcdc5b07..99a26a9efec1 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2229,6 +2229,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
kfree(names);
kfree(callbacks);
kfree(vqs);
+ kfree(ctx);
return 0;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index f4d0054981c6..8a1eaf3c302a 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -922,15 +922,10 @@ static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
static void vrf_dev_uninit(struct net_device *dev)
{
struct net_vrf *vrf = netdev_priv(dev);
- struct net_device *port_dev;
- struct list_head *iter;
vrf_rtable_release(dev, vrf);
vrf_rt6_release(dev, vrf);
- netdev_for_each_lower_dev(dev, port_dev, iter)
- vrf_del_slave(dev, port_dev);
-
free_percpu(dev->dstats);
dev->dstats = NULL;
}
@@ -1386,6 +1381,12 @@ static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
static void vrf_dellink(struct net_device *dev, struct list_head *head)
{
+ struct net_device *port_dev;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(dev, port_dev, iter)
+ vrf_del_slave(dev, port_dev);
+
unregister_netdevice_queue(dev, head);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index b53ecf1eddda..21f2201405d1 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -346,11 +346,13 @@ static const struct pci_device_id mwifiex_ids[] = {
MODULE_DEVICE_TABLE(pci, mwifiex_ids);
-static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare)
+/*
+ * Cleanup all software without cleaning anything related to PCIe and HW.
+ */
+static void mwifiex_pcie_reset_prepare(struct pci_dev *pdev)
{
struct pcie_service_card *card = pci_get_drvdata(pdev);
struct mwifiex_adapter *adapter = card->adapter;
- int ret;
if (!adapter) {
dev_err(&pdev->dev, "%s: adapter structure is not valid\n",
@@ -359,35 +361,46 @@ static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare)
}
mwifiex_dbg(adapter, INFO,
- "%s: vendor=0x%4.04x device=0x%4.04x rev=%d %s\n",
- __func__, pdev->vendor, pdev->device,
- pdev->revision,
- prepare ? "Pre-FLR" : "Post-FLR");
-
- if (prepare) {
- /* Kernel would be performing FLR after this notification.
- * Cleanup all software without cleaning anything related to
- * PCIe and HW.
- */
- mwifiex_shutdown_sw(adapter);
- clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
- clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
- } else {
- /* Kernel stores and restores PCIe function context before and
- * after performing FLR respectively. Reconfigure the software
- * and firmware including firmware redownload
- */
- ret = mwifiex_reinit_sw(adapter);
- if (ret) {
- dev_err(&pdev->dev, "reinit failed: %d\n", ret);
- return;
- }
- }
+ "%s: vendor=0x%4.04x device=0x%4.04x rev=%d Pre-FLR\n",
+ __func__, pdev->vendor, pdev->device, pdev->revision);
+
+ mwifiex_shutdown_sw(adapter);
+ clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
+ clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
}
-static const struct pci_error_handlers mwifiex_pcie_err_handler[] = {
- { .reset_notify = mwifiex_pcie_reset_notify, },
+/*
+ * Kernel stores and restores PCIe function context before and after performing
+ * FLR respectively. Reconfigure the software and firmware including firmware
+ * redownload.
+ */
+static void mwifiex_pcie_reset_done(struct pci_dev *pdev)
+{
+ struct pcie_service_card *card = pci_get_drvdata(pdev);
+ struct mwifiex_adapter *adapter = card->adapter;
+ int ret;
+
+ if (!adapter) {
+ dev_err(&pdev->dev, "%s: adapter structure is not valid\n",
+ __func__);
+ return;
+ }
+
+ mwifiex_dbg(adapter, INFO,
+ "%s: vendor=0x%4.04x device=0x%4.04x rev=%d Post-FLR\n",
+ __func__, pdev->vendor, pdev->device, pdev->revision);
+
+ ret = mwifiex_reinit_sw(adapter);
+ if (ret)
+ dev_err(&pdev->dev, "reinit failed: %d\n", ret);
+ else
+ mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
+}
+
+static const struct pci_error_handlers mwifiex_pcie_err_handler = {
+ .reset_prepare = mwifiex_pcie_reset_prepare,
+ .reset_done = mwifiex_pcie_reset_done,
};
#ifdef CONFIG_PM_SLEEP
@@ -408,7 +421,7 @@ static struct pci_driver __refdata mwifiex_pcie = {
},
#endif
.shutdown = mwifiex_pcie_shutdown,
- .err_handler = mwifiex_pcie_err_handler,
+ .err_handler = &mwifiex_pcie_err_handler,
};
/*
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index b6ba0618ea46..64216dea5278 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -37,8 +37,8 @@ static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
struct nd_btt *nd_btt = arena->nd_btt;
struct nd_namespace_common *ndns = nd_btt->ndns;
- /* arena offsets are 4K from the base of the device */
- offset += SZ_4K;
+ /* arena offsets may be shifted from the base of the device */
+ offset += arena->nd_btt->initial_offset;
return nvdimm_read_bytes(ndns, offset, buf, n, flags);
}
@@ -48,8 +48,8 @@ static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
struct nd_btt *nd_btt = arena->nd_btt;
struct nd_namespace_common *ndns = nd_btt->ndns;
- /* arena offsets are 4K from the base of the device */
- offset += SZ_4K;
+ /* arena offsets may be shifted from the base of the device */
+ offset += arena->nd_btt->initial_offset;
return nvdimm_write_bytes(ndns, offset, buf, n, flags);
}
@@ -323,7 +323,7 @@ static int btt_log_read(struct arena_info *arena, u32 lane,
old_ent = btt_log_get_old(log);
if (old_ent < 0 || old_ent > 1) {
- dev_info(to_dev(arena),
+ dev_err(to_dev(arena),
"log corruption (%d): lane %d seq [%d, %d]\n",
old_ent, lane, log[0].seq, log[1].seq);
/* TODO set error state? */
@@ -576,8 +576,8 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size,
arena->internal_lbasize = roundup(arena->external_lbasize,
INT_LBASIZE_ALIGNMENT);
arena->nfree = BTT_DEFAULT_NFREE;
- arena->version_major = 1;
- arena->version_minor = 1;
+ arena->version_major = btt->nd_btt->version_major;
+ arena->version_minor = btt->nd_btt->version_minor;
if (available % BTT_PG_SIZE)
available -= (available % BTT_PG_SIZE);
@@ -684,7 +684,7 @@ static int discover_arenas(struct btt *btt)
dev_info(to_dev(arena), "No existing arenas\n");
goto out;
} else {
- dev_info(to_dev(arena),
+ dev_err(to_dev(arena),
"Found corrupted metadata!\n");
ret = -ENODEV;
goto out;
@@ -1227,7 +1227,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
op_is_write(bio_op(bio)), iter.bi_sector);
if (err) {
- dev_info(&btt->nd_btt->dev,
+ dev_err(&btt->nd_btt->dev,
"io error in %s sector %lld, len %d,\n",
(op_is_write(bio_op(bio))) ? "WRITE" :
"READ",
@@ -1248,10 +1248,13 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, bool is_write)
{
struct btt *btt = bdev->bd_disk->private_data;
+ int rc;
- btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, is_write, sector);
- page_endio(page, is_write, 0);
- return 0;
+ rc = btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, is_write, sector);
+ if (rc == 0)
+ page_endio(page, is_write, 0);
+
+ return rc;
}
@@ -1369,7 +1372,7 @@ static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
}
if (btt->init_state != INIT_READY && nd_region->ro) {
- dev_info(dev, "%s is read-only, unable to init btt metadata\n",
+ dev_warn(dev, "%s is read-only, unable to init btt metadata\n",
dev_name(&nd_region->dev));
return NULL;
} else if (btt->init_state != INIT_READY) {
@@ -1424,6 +1427,7 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
{
struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
struct nd_region *nd_region;
+ struct btt_sb *btt_sb;
struct btt *btt;
size_t rawsize;
@@ -1432,10 +1436,21 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
return -ENODEV;
}
- rawsize = nvdimm_namespace_capacity(ndns) - SZ_4K;
+ btt_sb = devm_kzalloc(&nd_btt->dev, sizeof(*btt_sb), GFP_KERNEL);
+
+ /*
+ * If this returns < 0, that is ok as it just means there wasn't
+ * an existing BTT, and we're creating a new one. We still need to
+ * call this as we need the version dependent fields in nd_btt to be
+ * set correctly based on the holder class
+ */
+ nd_btt_version(nd_btt, ndns, btt_sb);
+
+ rawsize = nvdimm_namespace_capacity(ndns) - nd_btt->initial_offset;
if (rawsize < ARENA_MIN_SIZE) {
dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
- dev_name(&ndns->dev), ARENA_MIN_SIZE + SZ_4K);
+ dev_name(&ndns->dev),
+ ARENA_MIN_SIZE + nd_btt->initial_offset);
return -ENXIO;
}
nd_region = to_nd_region(nd_btt->dev.parent);
diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h
index b2f8651e5395..888e862907a0 100644
--- a/drivers/nvdimm/btt.h
+++ b/drivers/nvdimm/btt.h
@@ -184,5 +184,7 @@ struct btt {
};
bool nd_btt_arena_is_valid(struct nd_btt *nd_btt, struct btt_sb *super);
+int nd_btt_version(struct nd_btt *nd_btt, struct nd_namespace_common *ndns,
+ struct btt_sb *btt_sb);
#endif
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 4c989bb9a8a0..3e359d282f8e 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -260,20 +260,55 @@ bool nd_btt_arena_is_valid(struct nd_btt *nd_btt, struct btt_sb *super)
}
EXPORT_SYMBOL(nd_btt_arena_is_valid);
+int nd_btt_version(struct nd_btt *nd_btt, struct nd_namespace_common *ndns,
+ struct btt_sb *btt_sb)
+{
+ if (ndns->claim_class == NVDIMM_CCLASS_BTT2) {
+ /* Probe/setup for BTT v2.0 */
+ nd_btt->initial_offset = 0;
+ nd_btt->version_major = 2;
+ nd_btt->version_minor = 0;
+ if (nvdimm_read_bytes(ndns, 0, btt_sb, sizeof(*btt_sb), 0))
+ return -ENXIO;
+ if (!nd_btt_arena_is_valid(nd_btt, btt_sb))
+ return -ENODEV;
+ if ((le16_to_cpu(btt_sb->version_major) != 2) ||
+ (le16_to_cpu(btt_sb->version_minor) != 0))
+ return -ENODEV;
+ } else {
+ /*
+ * Probe/setup for BTT v1.1 (NVDIMM_CCLASS_NONE or
+ * NVDIMM_CCLASS_BTT)
+ */
+ nd_btt->initial_offset = SZ_4K;
+ nd_btt->version_major = 1;
+ nd_btt->version_minor = 1;
+ if (nvdimm_read_bytes(ndns, SZ_4K, btt_sb, sizeof(*btt_sb), 0))
+ return -ENXIO;
+ if (!nd_btt_arena_is_valid(nd_btt, btt_sb))
+ return -ENODEV;
+ if ((le16_to_cpu(btt_sb->version_major) != 1) ||
+ (le16_to_cpu(btt_sb->version_minor) != 1))
+ return -ENODEV;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(nd_btt_version);
+
static int __nd_btt_probe(struct nd_btt *nd_btt,
struct nd_namespace_common *ndns, struct btt_sb *btt_sb)
{
+ int rc;
+
if (!btt_sb || !ndns || !nd_btt)
return -ENODEV;
- if (nvdimm_read_bytes(ndns, SZ_4K, btt_sb, sizeof(*btt_sb), 0))
- return -ENXIO;
-
if (nvdimm_namespace_capacity(ndns) < SZ_16M)
return -ENXIO;
- if (!nd_btt_arena_is_valid(nd_btt, btt_sb))
- return -ENODEV;
+ rc = nd_btt_version(nd_btt, ndns, btt_sb);
+ if (rc < 0)
+ return rc;
nd_btt->lbasize = le32_to_cpu(btt_sb->external_lbasize);
nd_btt->uuid = kmemdup(btt_sb->uuid, 16, GFP_KERNEL);
@@ -295,6 +330,15 @@ int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns)
if (ndns->force_raw)
return -ENODEV;
+ switch (ndns->claim_class) {
+ case NVDIMM_CCLASS_NONE:
+ case NVDIMM_CCLASS_BTT:
+ case NVDIMM_CCLASS_BTT2:
+ break;
+ default:
+ return -ENODEV;
+ }
+
nvdimm_bus_lock(&ndns->dev);
btt_dev = __nd_btt_create(nd_region, 0, NULL, ndns);
nvdimm_bus_unlock(&ndns->dev);
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index e9361bffe5ee..937fafa1886a 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -38,13 +38,13 @@ static int to_nd_device_type(struct device *dev)
{
if (is_nvdimm(dev))
return ND_DEVICE_DIMM;
- else if (is_nd_pmem(dev))
+ else if (is_memory(dev))
return ND_DEVICE_REGION_PMEM;
else if (is_nd_blk(dev))
return ND_DEVICE_REGION_BLK;
else if (is_nd_dax(dev))
return ND_DEVICE_DAX_PMEM;
- else if (is_nd_pmem(dev->parent) || is_nd_blk(dev->parent))
+ else if (is_nd_region(dev->parent))
return nd_region_to_nstype(to_nd_region(dev->parent));
return 0;
@@ -56,7 +56,7 @@ static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
* Ensure that region devices always have their numa node set as
* early as possible.
*/
- if (is_nd_pmem(dev) || is_nd_blk(dev))
+ if (is_nd_region(dev))
set_dev_node(dev, to_nd_region(dev)->numa_node);
return add_uevent_var(env, "MODALIAS=" ND_DEVICE_MODALIAS_FMT,
to_nd_device_type(dev));
@@ -65,7 +65,7 @@ static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
static struct module *to_bus_provider(struct device *dev)
{
/* pin bus providers while regions are enabled */
- if (is_nd_pmem(dev) || is_nd_blk(dev)) {
+ if (is_nd_region(dev)) {
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
return nvdimm_bus->nd_desc->module;
@@ -198,6 +198,9 @@ static int nvdimm_clear_badblocks_region(struct device *dev, void *data)
sector = (ctx->phys - nd_region->ndr_start) / 512;
badblocks_clear(&nd_region->bb, sector, ctx->cleared / 512);
+ if (nd_region->bb_state)
+ sysfs_notify_dirent(nd_region->bb_state);
+
return 0;
}
@@ -907,6 +910,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
static char in_env[ND_CMD_MAX_ENVELOPE];
const struct nd_cmd_desc *desc = NULL;
unsigned int cmd = _IOC_NR(ioctl_cmd);
+ unsigned int func = cmd;
void __user *p = (void __user *) arg;
struct device *dev = &nvdimm_bus->dev;
struct nd_cmd_pkg pkg;
@@ -972,6 +976,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
}
if (cmd == ND_CMD_CALL) {
+ func = pkg.nd_command;
dev_dbg(dev, "%s:%s, idx: %llu, in: %zu, out: %zu, len %zu\n",
__func__, dimm_name, pkg.nd_command,
in_len, out_len, buf_len);
@@ -1020,7 +1025,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
}
nvdimm_bus_lock(&nvdimm_bus->dev);
- rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, cmd, buf);
+ rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf);
if (rc)
goto out_unlock;
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index 7ceb5fa4f2a1..47770460f3d3 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -12,8 +12,8 @@
*/
#include <linux/device.h>
#include <linux/sizes.h>
-#include <linux/pmem.h>
#include "nd-core.h"
+#include "pmem.h"
#include "pfn.h"
#include "btt.h"
#include "nd.h"
@@ -184,6 +184,35 @@ ssize_t nd_namespace_store(struct device *dev,
}
ndns = to_ndns(found);
+
+ switch (ndns->claim_class) {
+ case NVDIMM_CCLASS_NONE:
+ break;
+ case NVDIMM_CCLASS_BTT:
+ case NVDIMM_CCLASS_BTT2:
+ if (!is_nd_btt(dev)) {
+ len = -EBUSY;
+ goto out_attach;
+ }
+ break;
+ case NVDIMM_CCLASS_PFN:
+ if (!is_nd_pfn(dev)) {
+ len = -EBUSY;
+ goto out_attach;
+ }
+ break;
+ case NVDIMM_CCLASS_DAX:
+ if (!is_nd_dax(dev)) {
+ len = -EBUSY;
+ goto out_attach;
+ }
+ break;
+ default:
+ len = -EBUSY;
+ goto out_attach;
+ break;
+ }
+
if (__nvdimm_namespace_capacity(ndns) < SZ_16M) {
dev_dbg(dev, "%s too small to host\n", name);
len = -ENXIO;
@@ -260,8 +289,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
* work around this collision.
*/
if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
- && !(flags & NVDIMM_IO_ATOMIC)
- && !ndns->claim) {
+ && !(flags & NVDIMM_IO_ATOMIC)) {
long cleared;
cleared = nvdimm_clear_poison(&ndns->dev,
@@ -272,12 +300,12 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
cleared /= 512;
badblocks_clear(&nsio->bb, sector, cleared);
}
- invalidate_pmem(nsio->addr + offset, size);
+ arch_invalidate_pmem(nsio->addr + offset, size);
} else
rc = -EIO;
}
- memcpy_to_pmem(nsio->addr + offset, buf, size);
+ memcpy_flushcache(nsio->addr + offset, buf, size);
nvdimm_flush(to_nd_region(ndns->dev.parent));
return rc;
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 2dee908e4bae..7cd99b1f8596 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -504,7 +504,7 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region,
struct nvdimm_bus *nvdimm_bus;
struct list_head *poison_list;
- if (!is_nd_pmem(&nd_region->dev)) {
+ if (!is_memory(&nd_region->dev)) {
dev_WARN_ONCE(&nd_region->dev, 1,
"%s only valid for pmem regions\n", __func__);
return;
@@ -699,6 +699,9 @@ static __init int libnvdimm_init(void)
rc = nd_region_init();
if (rc)
goto err_region;
+
+ nd_label_init();
+
return 0;
err_region:
nvdimm_exit();
diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c
index c1b6556aea6e..1bf2bd318371 100644
--- a/drivers/nvdimm/dax_devs.c
+++ b/drivers/nvdimm/dax_devs.c
@@ -89,7 +89,7 @@ struct device *nd_dax_create(struct nd_region *nd_region)
struct device *dev = NULL;
struct nd_dax *nd_dax;
- if (!is_nd_pmem(&nd_region->dev))
+ if (!is_memory(&nd_region->dev))
return NULL;
nd_dax = nd_dax_alloc(nd_region);
@@ -111,6 +111,14 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns)
if (ndns->force_raw)
return -ENODEV;
+ switch (ndns->claim_class) {
+ case NVDIMM_CCLASS_NONE:
+ case NVDIMM_CCLASS_DAX:
+ break;
+ default:
+ return -ENODEV;
+ }
+
nvdimm_bus_lock(&ndns->dev);
nd_dax = nd_dax_alloc(nd_region);
nd_pfn = &nd_dax->nd_pfn;
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 9852a3355509..f0d1b7e5de01 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -20,6 +20,7 @@
#include <linux/mm.h>
#include "nd-core.h"
#include "label.h"
+#include "pmem.h"
#include "nd.h"
static DEFINE_IDA(dimm_ida);
@@ -235,6 +236,13 @@ struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr)
}
EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm);
+unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr)
+{
+ /* pmem mapping properties are private to libnvdimm */
+ return ARCH_MEMREMAP_PMEM;
+}
+EXPORT_SYMBOL_GPL(nd_blk_memremap_flags);
+
struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
{
struct nvdimm *nvdimm = nd_mapping->nvdimm;
@@ -411,7 +419,7 @@ int alias_dpa_busy(struct device *dev, void *data)
struct resource *res;
int i;
- if (!is_nd_pmem(dev))
+ if (!is_memory(dev))
return 0;
nd_region = to_nd_region(dev);
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index dd615345699f..87796f840777 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -12,6 +12,7 @@
*/
#include <linux/device.h>
#include <linux/ndctl.h>
+#include <linux/uuid.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/nd.h>
@@ -19,6 +20,11 @@
#include "label.h"
#include "nd.h"
+static guid_t nvdimm_btt_guid;
+static guid_t nvdimm_btt2_guid;
+static guid_t nvdimm_pfn_guid;
+static guid_t nvdimm_dax_guid;
+
static u32 best_seq(u32 a, u32 b)
{
a &= NSINDEX_SEQ_MASK;
@@ -34,6 +40,11 @@ static u32 best_seq(u32 a, u32 b)
return a;
}
+unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
+{
+ return ndd->nslabel_size;
+}
+
size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
{
u32 index_span;
@@ -49,7 +60,7 @@ size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
* starts to waste space at larger config_sizes, but it's
* unlikely we'll ever see anything but 128K.
*/
- index_span = ndd->nsarea.config_size / 129;
+ index_span = ndd->nsarea.config_size / (sizeof_namespace_label(ndd) + 1);
index_span /= NSINDEX_ALIGN * 2;
ndd->nsindex_size = index_span * NSINDEX_ALIGN;
@@ -58,10 +69,10 @@ size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
{
- return ndd->nsarea.config_size / 129;
+ return ndd->nsarea.config_size / (sizeof_namespace_label(ndd) + 1);
}
-int nd_label_validate(struct nvdimm_drvdata *ndd)
+static int __nd_label_validate(struct nvdimm_drvdata *ndd)
{
/*
* On media label format consists of two index blocks followed
@@ -104,6 +115,7 @@ int nd_label_validate(struct nvdimm_drvdata *ndd)
u32 nslot;
u8 sig[NSINDEX_SIG_LEN];
u64 sum_save, sum, size;
+ unsigned int version, labelsize;
memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN);
if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) {
@@ -111,6 +123,21 @@ int nd_label_validate(struct nvdimm_drvdata *ndd)
__func__, i);
continue;
}
+
+ /* label sizes larger than 128 arrived with v1.2 */
+ version = __le16_to_cpu(nsindex[i]->major) * 100
+ + __le16_to_cpu(nsindex[i]->minor);
+ if (version >= 102)
+ labelsize = 1 << (7 + nsindex[i]->labelsize);
+ else
+ labelsize = 128;
+
+ if (labelsize != sizeof_namespace_label(ndd)) {
+ dev_dbg(dev, "%s: nsindex%d labelsize %d invalid\n",
+ __func__, i, nsindex[i]->labelsize);
+ continue;
+ }
+
sum_save = __le64_to_cpu(nsindex[i]->checksum);
nsindex[i]->checksum = __cpu_to_le64(0);
sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1);
@@ -153,7 +180,7 @@ int nd_label_validate(struct nvdimm_drvdata *ndd)
}
nslot = __le32_to_cpu(nsindex[i]->nslot);
- if (nslot * sizeof(struct nd_namespace_label)
+ if (nslot * sizeof_namespace_label(ndd)
+ 2 * sizeof_namespace_index(ndd)
> ndd->nsarea.config_size) {
dev_dbg(dev, "%s: nsindex%d nslot: %u invalid, config_size: %#x\n",
@@ -189,6 +216,29 @@ int nd_label_validate(struct nvdimm_drvdata *ndd)
return -1;
}
+int nd_label_validate(struct nvdimm_drvdata *ndd)
+{
+ /*
+ * In order to probe for and validate namespace index blocks we
+ * need to know the size of the labels, and we can't trust the
+ * size of the labels until we validate the index blocks.
+ * Resolve this dependency loop by probing for known label
+ * sizes, but default to v1.2 256-byte namespace labels if
+ * discovery fails.
+ */
+ int label_size[] = { 128, 256 };
+ int i, rc;
+
+ for (i = 0; i < ARRAY_SIZE(label_size); i++) {
+ ndd->nslabel_size = label_size[i];
+ rc = __nd_label_validate(ndd);
+ if (rc >= 0)
+ return rc;
+ }
+
+ return -1;
+}
+
void nd_label_copy(struct nvdimm_drvdata *ndd, struct nd_namespace_index *dst,
struct nd_namespace_index *src)
{
@@ -210,7 +260,22 @@ static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd)
static int to_slot(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label)
{
- return nd_label - nd_label_base(ndd);
+ unsigned long label, base;
+
+ label = (unsigned long) nd_label;
+ base = (unsigned long) nd_label_base(ndd);
+
+ return (label - base) / sizeof_namespace_label(ndd);
+}
+
+static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot)
+{
+ unsigned long label, base;
+
+ base = (unsigned long) nd_label_base(ndd);
+ label = base + sizeof_namespace_label(ndd) * slot;
+
+ return (struct nd_namespace_label *) label;
}
#define for_each_clear_bit_le(bit, addr, size) \
@@ -268,7 +333,8 @@ static bool preamble_next(struct nvdimm_drvdata *ndd,
free, nslot);
}
-static bool slot_valid(struct nd_namespace_label *nd_label, u32 slot)
+static bool slot_valid(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_label *nd_label, u32 slot)
{
/* check that we are written where we expect to be written */
if (slot != __le32_to_cpu(nd_label->slot))
@@ -279,6 +345,21 @@ static bool slot_valid(struct nd_namespace_label *nd_label, u32 slot)
| __le64_to_cpu(nd_label->rawsize)) % SZ_4K)
return false;
+ /* check checksum */
+ if (namespace_label_has(ndd, checksum)) {
+ u64 sum, sum_save;
+
+ sum_save = __le64_to_cpu(nd_label->checksum);
+ nd_label->checksum = __cpu_to_le64(0);
+ sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
+ nd_label->checksum = __cpu_to_le64(sum_save);
+ if (sum != sum_save) {
+ dev_dbg(ndd->dev, "%s fail checksum. slot: %d expect: %#llx\n",
+ __func__, slot, sum);
+ return false;
+ }
+ }
+
return true;
}
@@ -299,9 +380,9 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
struct resource *res;
u32 flags;
- nd_label = nd_label_base(ndd) + slot;
+ nd_label = to_label(ndd, slot);
- if (!slot_valid(nd_label, slot))
+ if (!slot_valid(ndd, nd_label, slot))
continue;
memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
@@ -331,9 +412,9 @@ int nd_label_active_count(struct nvdimm_drvdata *ndd)
for_each_clear_bit_le(slot, free, nslot) {
struct nd_namespace_label *nd_label;
- nd_label = nd_label_base(ndd) + slot;
+ nd_label = to_label(ndd, slot);
- if (!slot_valid(nd_label, slot)) {
+ if (!slot_valid(ndd, nd_label, slot)) {
u32 label_slot = __le32_to_cpu(nd_label->slot);
u64 size = __le64_to_cpu(nd_label->rawsize);
u64 dpa = __le64_to_cpu(nd_label->dpa);
@@ -360,12 +441,12 @@ struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n)
for_each_clear_bit_le(slot, free, nslot) {
struct nd_namespace_label *nd_label;
- nd_label = nd_label_base(ndd) + slot;
- if (!slot_valid(nd_label, slot))
+ nd_label = to_label(ndd, slot);
+ if (!slot_valid(ndd, nd_label, slot))
continue;
if (n-- == 0)
- return nd_label_base(ndd) + slot;
+ return to_label(ndd, slot);
}
return NULL;
@@ -437,7 +518,8 @@ static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
nslot = __le32_to_cpu(nsindex->nslot);
memcpy(nsindex->sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN);
- nsindex->flags = __cpu_to_le32(0);
+ memset(&nsindex->flags, 0, 3);
+ nsindex->labelsize = sizeof_namespace_label(ndd) >> 8;
nsindex->seq = __cpu_to_le32(seq);
offset = (unsigned long) nsindex
- (unsigned long) to_namespace_index(ndd, 0);
@@ -452,7 +534,10 @@ static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
nsindex->labeloff = __cpu_to_le64(offset);
nsindex->nslot = __cpu_to_le32(nslot);
nsindex->major = __cpu_to_le16(1);
- nsindex->minor = __cpu_to_le16(1);
+ if (sizeof_namespace_label(ndd) < 256)
+ nsindex->minor = __cpu_to_le16(1);
+ else
+ nsindex->minor = __cpu_to_le16(2);
nsindex->checksum = __cpu_to_le64(0);
if (flags & ND_NSINDEX_INIT) {
unsigned long *free = (unsigned long *) nsindex->free;
@@ -490,11 +575,49 @@ static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
- (unsigned long) to_namespace_index(ndd, 0);
}
+enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
+{
+ if (guid_equal(guid, &nvdimm_btt_guid))
+ return NVDIMM_CCLASS_BTT;
+ else if (guid_equal(guid, &nvdimm_btt2_guid))
+ return NVDIMM_CCLASS_BTT2;
+ else if (guid_equal(guid, &nvdimm_pfn_guid))
+ return NVDIMM_CCLASS_PFN;
+ else if (guid_equal(guid, &nvdimm_dax_guid))
+ return NVDIMM_CCLASS_DAX;
+ else if (guid_equal(guid, &guid_null))
+ return NVDIMM_CCLASS_NONE;
+
+ return NVDIMM_CCLASS_UNKNOWN;
+}
+
+static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
+ guid_t *target)
+{
+ if (claim_class == NVDIMM_CCLASS_BTT)
+ return &nvdimm_btt_guid;
+ else if (claim_class == NVDIMM_CCLASS_BTT2)
+ return &nvdimm_btt2_guid;
+ else if (claim_class == NVDIMM_CCLASS_PFN)
+ return &nvdimm_pfn_guid;
+ else if (claim_class == NVDIMM_CCLASS_DAX)
+ return &nvdimm_dax_guid;
+ else if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
+ /*
+ * If we're modifying a namespace for which we don't
+ * know the claim_class, don't touch the existing guid.
+ */
+ return target;
+ } else
+ return &guid_null;
+}
+
static int __pmem_label_update(struct nd_region *nd_region,
struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
int pos)
{
- u64 cookie = nd_region_interleave_set_cookie(nd_region);
+ struct nd_namespace_common *ndns = &nspm->nsio.common;
+ struct nd_interleave_set *nd_set = nd_region->nd_set;
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nd_label_ent *label_ent, *victim = NULL;
struct nd_namespace_label *nd_label;
@@ -504,11 +627,13 @@ static int __pmem_label_update(struct nd_region *nd_region,
unsigned long *free;
u32 nslot, slot;
size_t offset;
+ u64 cookie;
int rc;
if (!preamble_next(ndd, &nsindex, &free, &nslot))
return -ENXIO;
+ cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
nd_label_gen_id(&label_id, nspm->uuid, 0);
for_each_dpa_resource(ndd, res)
if (strcmp(res->name, label_id.id) == 0)
@@ -525,8 +650,8 @@ static int __pmem_label_update(struct nd_region *nd_region,
return -ENXIO;
dev_dbg(ndd->dev, "%s: allocated: %d\n", __func__, slot);
- nd_label = nd_label_base(ndd) + slot;
- memset(nd_label, 0, sizeof(struct nd_namespace_label));
+ nd_label = to_label(ndd, slot);
+ memset(nd_label, 0, sizeof_namespace_label(ndd));
memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
if (nspm->alt_name)
memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
@@ -535,14 +660,28 @@ static int __pmem_label_update(struct nd_region *nd_region,
nd_label->position = __cpu_to_le16(pos);
nd_label->isetcookie = __cpu_to_le64(cookie);
nd_label->rawsize = __cpu_to_le64(resource_size(res));
+ nd_label->lbasize = __cpu_to_le64(nspm->lbasize);
nd_label->dpa = __cpu_to_le64(res->start);
nd_label->slot = __cpu_to_le32(slot);
+ if (namespace_label_has(ndd, type_guid))
+ guid_copy(&nd_label->type_guid, &nd_set->type_guid);
+ if (namespace_label_has(ndd, abstraction_guid))
+ guid_copy(&nd_label->abstraction_guid,
+ to_abstraction_guid(ndns->claim_class,
+ &nd_label->abstraction_guid));
+ if (namespace_label_has(ndd, checksum)) {
+ u64 sum;
+
+ nd_label->checksum = __cpu_to_le64(0);
+ sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
+ nd_label->checksum = __cpu_to_le64(sum);
+ }
nd_dbg_dpa(nd_region, ndd, res, "%s\n", __func__);
/* update label */
offset = nd_label_offset(ndd, nd_label);
rc = nvdimm_set_config_data(ndd, offset, nd_label,
- sizeof(struct nd_namespace_label));
+ sizeof_namespace_label(ndd));
if (rc < 0)
return rc;
@@ -624,6 +763,8 @@ static int __blk_label_update(struct nd_region *nd_region,
int num_labels)
{
int i, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO;
+ struct nd_interleave_set *nd_set = nd_region->nd_set;
+ struct nd_namespace_common *ndns = &nsblk->common;
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nd_namespace_label *nd_label;
struct nd_label_ent *label_ent, *e;
@@ -632,6 +773,7 @@ static int __blk_label_update(struct nd_region *nd_region,
struct resource *res, **old_res_list;
struct nd_label_id label_id;
u8 uuid[NSLABEL_UUID_LEN];
+ int min_dpa_idx = 0;
LIST_HEAD(list);
u32 nslot, slot;
@@ -668,7 +810,7 @@ static int __blk_label_update(struct nd_region *nd_region,
/* mark unused labels for garbage collection */
for_each_clear_bit_le(slot, free, nslot) {
- nd_label = nd_label_base(ndd) + slot;
+ nd_label = to_label(ndd, slot);
memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
continue;
@@ -703,6 +845,18 @@ static int __blk_label_update(struct nd_region *nd_region,
}
}
+ /*
+ * Find the resource associated with the first label in the set
+ * per the v1.2 namespace specification.
+ */
+ for (i = 0; i < nsblk->num_resources; i++) {
+ struct resource *min = nsblk->res[min_dpa_idx];
+
+ res = nsblk->res[i];
+ if (res->start < min->start)
+ min_dpa_idx = i;
+ }
+
for (i = 0; i < nsblk->num_resources; i++) {
size_t offset;
@@ -714,25 +868,58 @@ static int __blk_label_update(struct nd_region *nd_region,
goto abort;
dev_dbg(ndd->dev, "%s: allocated: %d\n", __func__, slot);
- nd_label = nd_label_base(ndd) + slot;
- memset(nd_label, 0, sizeof(struct nd_namespace_label));
+ nd_label = to_label(ndd, slot);
+ memset(nd_label, 0, sizeof_namespace_label(ndd));
memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN);
if (nsblk->alt_name)
memcpy(nd_label->name, nsblk->alt_name,
NSLABEL_NAME_LEN);
nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_LOCAL);
- nd_label->nlabel = __cpu_to_le16(0); /* N/A */
- nd_label->position = __cpu_to_le16(0); /* N/A */
- nd_label->isetcookie = __cpu_to_le64(0); /* N/A */
+
+ /*
+ * Use the presence of the type_guid as a flag to
+ * determine isetcookie usage and nlabel + position
+ * policy for blk-aperture namespaces.
+ */
+ if (namespace_label_has(ndd, type_guid)) {
+ if (i == min_dpa_idx) {
+ nd_label->nlabel = __cpu_to_le16(nsblk->num_resources);
+ nd_label->position = __cpu_to_le16(0);
+ } else {
+ nd_label->nlabel = __cpu_to_le16(0xffff);
+ nd_label->position = __cpu_to_le16(0xffff);
+ }
+ nd_label->isetcookie = __cpu_to_le64(nd_set->cookie2);
+ } else {
+ nd_label->nlabel = __cpu_to_le16(0); /* N/A */
+ nd_label->position = __cpu_to_le16(0); /* N/A */
+ nd_label->isetcookie = __cpu_to_le64(0); /* N/A */
+ }
+
nd_label->dpa = __cpu_to_le64(res->start);
nd_label->rawsize = __cpu_to_le64(resource_size(res));
nd_label->lbasize = __cpu_to_le64(nsblk->lbasize);
nd_label->slot = __cpu_to_le32(slot);
+ if (namespace_label_has(ndd, type_guid))
+ guid_copy(&nd_label->type_guid, &nd_set->type_guid);
+ if (namespace_label_has(ndd, abstraction_guid))
+ guid_copy(&nd_label->abstraction_guid,
+ to_abstraction_guid(ndns->claim_class,
+ &nd_label->abstraction_guid));
+
+ if (namespace_label_has(ndd, checksum)) {
+ u64 sum;
+
+ nd_label->checksum = __cpu_to_le64(0);
+ sum = nd_fletcher64(nd_label,
+ sizeof_namespace_label(ndd), 1);
+ nd_label->checksum = __cpu_to_le64(sum);
+ }
/* update label */
offset = nd_label_offset(ndd, nd_label);
rc = nvdimm_set_config_data(ndd, offset, nd_label,
- sizeof(struct nd_namespace_label));
+ sizeof_namespace_label(ndd));
if (rc < 0)
goto abort;
}
@@ -790,7 +977,7 @@ static int __blk_label_update(struct nd_region *nd_region,
goto out;
}
for_each_clear_bit_le(slot, free, nslot) {
- nd_label = nd_label_base(ndd) + slot;
+ nd_label = to_label(ndd, slot);
memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
continue;
@@ -973,3 +1160,13 @@ int nd_blk_namespace_label_update(struct nd_region *nd_region,
return __blk_label_update(nd_region, nd_mapping, nsblk, count);
}
+
+int __init nd_label_init(void)
+{
+ WARN_ON(guid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_guid));
+ WARN_ON(guid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_guid));
+ WARN_ON(guid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_guid));
+ WARN_ON(guid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_guid));
+
+ return 0;
+}
diff --git a/drivers/nvdimm/label.h b/drivers/nvdimm/label.h
index a59ef6eef2a3..1ebf4d3d01ba 100644
--- a/drivers/nvdimm/label.h
+++ b/drivers/nvdimm/label.h
@@ -15,6 +15,7 @@
#include <linux/ndctl.h>
#include <linux/sizes.h>
+#include <linux/uuid.h>
#include <linux/io.h>
enum {
@@ -60,7 +61,8 @@ static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0";
*/
struct nd_namespace_index {
u8 sig[NSINDEX_SIG_LEN];
- __le32 flags;
+ u8 flags[3];
+ u8 labelsize;
__le32 seq;
__le64 myoff;
__le64 mysize;
@@ -98,9 +100,23 @@ struct nd_namespace_label {
__le64 dpa;
__le64 rawsize;
__le32 slot;
- __le32 unused;
+ /*
+ * Accessing fields past this point should be gated by a
+ * namespace_label_has() check.
+ */
+ u8 align;
+ u8 reserved[3];
+ guid_t type_guid;
+ guid_t abstraction_guid;
+ u8 reserved2[88];
+ __le64 checksum;
};
+#define NVDIMM_BTT_GUID "8aed63a2-29a2-4c66-8b12-f05d15d3922a"
+#define NVDIMM_BTT2_GUID "18633bfc-1735-4217-8ac9-17239282d3f8"
+#define NVDIMM_PFN_GUID "266400ba-fb9f-4677-bcb0-968f11d0d225"
+#define NVDIMM_DAX_GUID "97a86d9c-3cdd-4eda-986f-5068b4f80088"
+
/**
* struct nd_label_id - identifier string for dpa allocation
* @id: "{blk|pmem}-<namespace uuid>"
@@ -131,6 +147,7 @@ struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n);
u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd);
bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot);
u32 nd_label_nfree(struct nvdimm_drvdata *ndd);
+enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid);
struct nd_region;
struct nd_namespace_pmem;
struct nd_namespace_blk;
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 2f9dfbd2dbec..5f1c6756e57c 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -14,10 +14,10 @@
#include <linux/device.h>
#include <linux/sort.h>
#include <linux/slab.h>
-#include <linux/pmem.h>
#include <linux/list.h>
#include <linux/nd.h>
#include "nd-core.h"
+#include "pmem.h"
#include "nd.h"
static void namespace_io_release(struct device *dev)
@@ -112,7 +112,7 @@ static int is_uuid_busy(struct device *dev, void *data)
static int is_namespace_uuid_busy(struct device *dev, void *data)
{
- if (is_nd_pmem(dev) || is_nd_blk(dev))
+ if (is_nd_region(dev))
return device_for_each_child(dev, data, is_uuid_busy);
return 0;
}
@@ -155,14 +155,33 @@ bool pmem_should_map_pages(struct device *dev)
IORES_DESC_NONE) == REGION_MIXED)
return false;
-#ifdef ARCH_MEMREMAP_PMEM
return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
-#else
- return false;
-#endif
}
EXPORT_SYMBOL(pmem_should_map_pages);
+unsigned int pmem_sector_size(struct nd_namespace_common *ndns)
+{
+ if (is_namespace_pmem(&ndns->dev)) {
+ struct nd_namespace_pmem *nspm;
+
+ nspm = to_nd_namespace_pmem(&ndns->dev);
+ if (nspm->lbasize == 0 || nspm->lbasize == 512)
+ /* default */;
+ else if (nspm->lbasize == 4096)
+ return 4096;
+ else
+ dev_WARN(&ndns->dev, "unsupported sector size: %ld\n",
+ nspm->lbasize);
+ }
+
+ /*
+ * There is no namespace label (is_namespace_io()), or the label
+ * indicates the default sector size.
+ */
+ return 512;
+}
+EXPORT_SYMBOL(pmem_sector_size);
+
const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
char *name)
{
@@ -787,7 +806,7 @@ static int __reserve_free_pmem(struct device *dev, void *data)
struct nd_label_id label_id;
int i;
- if (!is_nd_pmem(dev))
+ if (!is_memory(dev))
return 0;
nd_region = to_nd_region(dev);
@@ -1283,28 +1302,49 @@ static ssize_t resource_show(struct device *dev,
}
static DEVICE_ATTR_RO(resource);
-static const unsigned long ns_lbasize_supported[] = { 512, 520, 528,
+static const unsigned long blk_lbasize_supported[] = { 512, 520, 528,
4096, 4104, 4160, 4224, 0 };
+static const unsigned long pmem_lbasize_supported[] = { 512, 4096, 0 };
+
static ssize_t sector_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
+ if (is_namespace_blk(dev)) {
+ struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
- if (!is_namespace_blk(dev))
- return -ENXIO;
+ return nd_sector_size_show(nsblk->lbasize,
+ blk_lbasize_supported, buf);
+ }
+
+ if (is_namespace_pmem(dev)) {
+ struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
- return nd_sector_size_show(nsblk->lbasize, ns_lbasize_supported, buf);
+ return nd_sector_size_show(nspm->lbasize,
+ pmem_lbasize_supported, buf);
+ }
+ return -ENXIO;
}
static ssize_t sector_size_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
- struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
struct nd_region *nd_region = to_nd_region(dev->parent);
+ const unsigned long *supported;
+ unsigned long *lbasize;
ssize_t rc = 0;
- if (!is_namespace_blk(dev))
+ if (is_namespace_blk(dev)) {
+ struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
+
+ lbasize = &nsblk->lbasize;
+ supported = blk_lbasize_supported;
+ } else if (is_namespace_pmem(dev)) {
+ struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
+
+ lbasize = &nspm->lbasize;
+ supported = pmem_lbasize_supported;
+ } else
return -ENXIO;
device_lock(dev);
@@ -1312,8 +1352,7 @@ static ssize_t sector_size_store(struct device *dev,
if (to_ndns(dev)->claim)
rc = -EBUSY;
if (rc >= 0)
- rc = nd_sector_size_store(dev, buf, &nsblk->lbasize,
- ns_lbasize_supported);
+ rc = nd_sector_size_store(dev, buf, lbasize, supported);
if (rc >= 0)
rc = nd_namespace_label_update(nd_region, dev);
dev_dbg(dev, "%s: result: %zd %s: %s%s", __func__,
@@ -1368,6 +1407,58 @@ static ssize_t dpa_extents_show(struct device *dev,
}
static DEVICE_ATTR_RO(dpa_extents);
+static int btt_claim_class(struct device *dev)
+{
+ struct nd_region *nd_region = to_nd_region(dev->parent);
+ int i, loop_bitmask = 0;
+
+ for (i = 0; i < nd_region->ndr_mappings; i++) {
+ struct nd_mapping *nd_mapping = &nd_region->mapping[i];
+ struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+ struct nd_namespace_index *nsindex;
+
+ nsindex = to_namespace_index(ndd, ndd->ns_current);
+ if (nsindex == NULL)
+ loop_bitmask |= 1;
+ else {
+ /* check whether existing labels are v1.1 or v1.2 */
+ if (__le16_to_cpu(nsindex->major) == 1
+ && __le16_to_cpu(nsindex->minor) == 1)
+ loop_bitmask |= 2;
+ else
+ loop_bitmask |= 4;
+ }
+ }
+ /*
+ * If nsindex is null loop_bitmask's bit 0 will be set, and if an index
+ * block is found, a v1.1 label for any mapping will set bit 1, and a
+ * v1.2 label will set bit 2.
+ *
+ * At the end of the loop, at most one of the three bits must be set.
+ * If multiple bits were set, it means the different mappings disagree
+ * about their labels, and this must be cleaned up first.
+ *
+ * If all the label index blocks are found to agree, nsindex of NULL
+ * implies labels haven't been initialized yet, and when they will,
+ * they will be of the 1.2 format, so we can assume BTT2.0
+ *
+ * If 1.1 labels are found, we enforce BTT1.1, and if 1.2 labels are
+ * found, we enforce BTT2.0
+ *
+ * If the loop was never entered, default to BTT1.1 (legacy namespaces)
+ */
+ switch (loop_bitmask) {
+ case 0:
+ case 2:
+ return NVDIMM_CCLASS_BTT;
+ case 1:
+ case 4:
+ return NVDIMM_CCLASS_BTT2;
+ default:
+ return -ENXIO;
+ }
+}
+
static ssize_t holder_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1382,6 +1473,74 @@ static ssize_t holder_show(struct device *dev,
}
static DEVICE_ATTR_RO(holder);
+static ssize_t __holder_class_store(struct device *dev, const char *buf)
+{
+ struct nd_namespace_common *ndns = to_ndns(dev);
+
+ if (dev->driver || ndns->claim)
+ return -EBUSY;
+
+ if (strcmp(buf, "btt") == 0 || strcmp(buf, "btt\n") == 0)
+ ndns->claim_class = btt_claim_class(dev);
+ else if (strcmp(buf, "pfn") == 0 || strcmp(buf, "pfn\n") == 0)
+ ndns->claim_class = NVDIMM_CCLASS_PFN;
+ else if (strcmp(buf, "dax") == 0 || strcmp(buf, "dax\n") == 0)
+ ndns->claim_class = NVDIMM_CCLASS_DAX;
+ else if (strcmp(buf, "") == 0 || strcmp(buf, "\n") == 0)
+ ndns->claim_class = NVDIMM_CCLASS_NONE;
+ else
+ return -EINVAL;
+
+ /* btt_claim_class() could've returned an error */
+ if (ndns->claim_class < 0)
+ return ndns->claim_class;
+
+ return 0;
+}
+
+static ssize_t holder_class_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct nd_region *nd_region = to_nd_region(dev->parent);
+ ssize_t rc;
+
+ device_lock(dev);
+ nvdimm_bus_lock(dev);
+ wait_nvdimm_bus_probe_idle(dev);
+ rc = __holder_class_store(dev, buf);
+ if (rc >= 0)
+ rc = nd_namespace_label_update(nd_region, dev);
+ dev_dbg(dev, "%s: %s(%zd)\n", __func__, rc < 0 ? "fail " : "", rc);
+ nvdimm_bus_unlock(dev);
+ device_unlock(dev);
+
+ return rc < 0 ? rc : len;
+}
+
+static ssize_t holder_class_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nd_namespace_common *ndns = to_ndns(dev);
+ ssize_t rc;
+
+ device_lock(dev);
+ if (ndns->claim_class == NVDIMM_CCLASS_NONE)
+ rc = sprintf(buf, "\n");
+ else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) ||
+ (ndns->claim_class == NVDIMM_CCLASS_BTT2))
+ rc = sprintf(buf, "btt\n");
+ else if (ndns->claim_class == NVDIMM_CCLASS_PFN)
+ rc = sprintf(buf, "pfn\n");
+ else if (ndns->claim_class == NVDIMM_CCLASS_DAX)
+ rc = sprintf(buf, "dax\n");
+ else
+ rc = sprintf(buf, "<unknown>\n");
+ device_unlock(dev);
+
+ return rc;
+}
+static DEVICE_ATTR_RW(holder_class);
+
static ssize_t mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1440,6 +1599,7 @@ static struct attribute *nd_namespace_attributes[] = {
&dev_attr_force_raw.attr,
&dev_attr_sector_size.attr,
&dev_attr_dpa_extents.attr,
+ &dev_attr_holder_class.attr,
NULL,
};
@@ -1458,14 +1618,12 @@ static umode_t namespace_visible(struct kobject *kobj,
if (a == &dev_attr_size.attr)
return 0644;
- if (is_namespace_pmem(dev) && a == &dev_attr_sector_size.attr)
- return 0;
-
return a->mode;
}
if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr
|| a == &dev_attr_holder.attr
+ || a == &dev_attr_holder_class.attr
|| a == &dev_attr_force_raw.attr
|| a == &dev_attr_mode.attr)
return a->mode;
@@ -1599,6 +1757,8 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
+ struct nd_interleave_set *nd_set = nd_region->nd_set;
+ struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nd_label_ent *label_ent;
bool found_uuid = false;
@@ -1619,8 +1779,17 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0)
continue;
+ if (namespace_label_has(ndd, type_guid)
+ && !guid_equal(&nd_set->type_guid,
+ &nd_label->type_guid)) {
+ dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
+ nd_set->type_guid.b,
+ nd_label->type_guid.b);
+ continue;
+ }
+
if (found_uuid) {
- dev_dbg(to_ndd(nd_mapping)->dev,
+ dev_dbg(ndd->dev,
"%s duplicate entry for uuid\n",
__func__);
return false;
@@ -1698,10 +1867,11 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
* @nd_label: target pmem namespace label to evaluate
*/
struct device *create_namespace_pmem(struct nd_region *nd_region,
+ struct nd_namespace_index *nsindex,
struct nd_namespace_label *nd_label)
{
+ u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
u64 altcookie = nd_region_interleave_set_altcookie(nd_region);
- u64 cookie = nd_region_interleave_set_cookie(nd_region);
struct nd_label_ent *label_ent;
struct nd_namespace_pmem *nspm;
struct nd_mapping *nd_mapping;
@@ -1775,6 +1945,7 @@ struct device *create_namespace_pmem(struct nd_region *nd_region,
/* Calculate total size and populate namespace properties from label0 */
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_namespace_label *label0;
+ struct nvdimm_drvdata *ndd;
nd_mapping = &nd_region->mapping[i];
label_ent = list_first_entry_or_null(&nd_mapping->labels,
@@ -1794,6 +1965,12 @@ struct device *create_namespace_pmem(struct nd_region *nd_region,
NSLABEL_NAME_LEN, GFP_KERNEL);
nspm->uuid = kmemdup((void __force *) label0->uuid,
NSLABEL_UUID_LEN, GFP_KERNEL);
+ nspm->lbasize = __le64_to_cpu(label0->lbasize);
+ ndd = to_ndd(nd_mapping);
+ if (namespace_label_has(ndd, abstraction_guid))
+ nspm->nsio.common.claim_class
+ = to_nvdimm_cclass(&label0->abstraction_guid);
+
}
if (!nspm->alt_name || !nspm->uuid) {
@@ -1876,7 +2053,7 @@ static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
struct resource *res;
struct device *dev;
- if (!is_nd_pmem(&nd_region->dev))
+ if (!is_memory(&nd_region->dev))
return NULL;
nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
@@ -2005,12 +2182,29 @@ struct device *create_namespace_blk(struct nd_region *nd_region,
{
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
+ struct nd_interleave_set *nd_set = nd_region->nd_set;
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nd_namespace_blk *nsblk;
char name[NSLABEL_NAME_LEN];
struct device *dev = NULL;
struct resource *res;
+ if (namespace_label_has(ndd, type_guid)) {
+ if (!guid_equal(&nd_set->type_guid, &nd_label->type_guid)) {
+ dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
+ nd_set->type_guid.b,
+ nd_label->type_guid.b);
+ return ERR_PTR(-EAGAIN);
+ }
+
+ if (nd_label->isetcookie != __cpu_to_le64(nd_set->cookie2)) {
+ dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n",
+ nd_set->cookie2,
+ __le64_to_cpu(nd_label->isetcookie));
+ return ERR_PTR(-EAGAIN);
+ }
+ }
+
nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
if (!nsblk)
return ERR_PTR(-ENOMEM);
@@ -2021,6 +2215,9 @@ struct device *create_namespace_blk(struct nd_region *nd_region,
nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
GFP_KERNEL);
+ if (namespace_label_has(ndd, abstraction_guid))
+ nsblk->common.claim_class
+ = to_nvdimm_cclass(&nd_label->abstraction_guid);
if (!nsblk->uuid)
goto blk_err;
memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
@@ -2102,27 +2299,30 @@ static struct device **scan_labels(struct nd_region *nd_region)
kfree(devs);
devs = __devs;
- if (is_nd_blk(&nd_region->dev)) {
+ if (is_nd_blk(&nd_region->dev))
dev = create_namespace_blk(nd_region, nd_label, count);
- if (IS_ERR(dev))
+ else {
+ struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+ struct nd_namespace_index *nsindex;
+
+ nsindex = to_namespace_index(ndd, ndd->ns_current);
+ dev = create_namespace_pmem(nd_region, nsindex, nd_label);
+ }
+
+ if (IS_ERR(dev)) {
+ switch (PTR_ERR(dev)) {
+ case -EAGAIN:
+ /* skip invalid labels */
+ continue;
+ case -ENODEV:
+ /* fallthrough to seed creation */
+ break;
+ default:
goto err;
+ }
+ } else
devs[count++] = dev;
- } else {
- dev = create_namespace_pmem(nd_region, nd_label);
- if (IS_ERR(dev)) {
- switch (PTR_ERR(dev)) {
- case -EAGAIN:
- /* skip invalid labels */
- continue;
- case -ENODEV:
- /* fallthrough to seed creation */
- break;
- default:
- goto err;
- }
- } else
- devs[count++] = dev;
- }
+
}
dev_dbg(&nd_region->dev, "%s: discovered %d %s namespace%s\n",
@@ -2156,7 +2356,7 @@ static struct device **scan_labels(struct nd_region *nd_region)
}
dev->parent = &nd_region->dev;
devs[count++] = dev;
- } else if (is_nd_pmem(&nd_region->dev)) {
+ } else if (is_memory(&nd_region->dev)) {
/* clean unselected labels */
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct list_head *l, *e;
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 4c4bd209e725..86bc19ae30da 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -64,7 +64,16 @@ struct blk_alloc_info {
bool is_nvdimm(struct device *dev);
bool is_nd_pmem(struct device *dev);
+bool is_nd_volatile(struct device *dev);
bool is_nd_blk(struct device *dev);
+static inline bool is_nd_region(struct device *dev)
+{
+ return is_nd_pmem(dev) || is_nd_blk(dev) || is_nd_volatile(dev);
+}
+static inline bool is_memory(struct device *dev)
+{
+ return is_nd_pmem(dev) || is_nd_volatile(dev);
+}
struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev);
int __init nvdimm_bus_init(void);
void nvdimm_bus_exit(void);
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 03852d738eec..e1b5715bd91f 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -42,7 +42,7 @@ struct nd_poison {
struct nvdimm_drvdata {
struct device *dev;
- int nsindex_size;
+ int nsindex_size, nslabel_size;
struct nd_cmd_get_config_size nsarea;
void *data;
int ns_current, ns_next;
@@ -96,6 +96,12 @@ static inline struct nd_namespace_index *to_next_namespace_index(
return to_namespace_index(ndd, ndd->ns_next);
}
+unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd);
+
+#define namespace_label_has(ndd, field) \
+ (offsetof(struct nd_namespace_label, field) \
+ < sizeof_namespace_label(ndd))
+
#define nd_dbg_dpa(r, d, res, fmt, arg...) \
dev_dbg((r) ? &(r)->dev : (d)->dev, "%s: %.13s: %#llx @ %#llx " fmt, \
(r) ? dev_name((d)->dev) : "", res ? res->name : "null", \
@@ -155,6 +161,7 @@ struct nd_region {
u64 ndr_start;
int id, num_lanes, ro, numa_node;
void *provider_data;
+ struct kernfs_node *bb_state;
struct badblocks bb;
struct nd_interleave_set *nd_set;
struct nd_percpu_lane __percpu *lane;
@@ -188,6 +195,9 @@ struct nd_btt {
u64 size;
u8 *uuid;
int id;
+ int initial_offset;
+ u16 version_major;
+ u16 version_minor;
};
enum nd_pfn_mode {
@@ -229,6 +239,7 @@ ssize_t nd_sector_size_store(struct device *dev, const char *buf,
unsigned long *current_lbasize, const unsigned long *supported);
int __init nvdimm_init(void);
int __init nd_region_init(void);
+int __init nd_label_init(void);
void nvdimm_exit(void);
void nd_region_exit(void);
struct nvdimm;
@@ -330,7 +341,8 @@ static inline struct device *nd_dax_create(struct nd_region *nd_region)
struct nd_region *to_nd_region(struct device *dev);
int nd_region_to_nstype(struct nd_region *nd_region);
int nd_region_register_namespaces(struct nd_region *nd_region, int *err);
-u64 nd_region_interleave_set_cookie(struct nd_region *nd_region);
+u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
+ struct nd_namespace_index *nsindex);
u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region);
void nvdimm_bus_lock(struct device *dev);
void nvdimm_bus_unlock(struct device *dev);
@@ -349,6 +361,7 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt);
const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
char *name);
+unsigned int pmem_sector_size(struct nd_namespace_common *ndns);
void nvdimm_badblocks_populate(struct nd_region *nd_region,
struct badblocks *bb, const struct resource *res);
#if IS_ENABLED(CONFIG_ND_CLAIM)
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index a6c403600d19..5fcb6f5b22a2 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -331,7 +331,7 @@ struct device *nd_pfn_create(struct nd_region *nd_region)
struct nd_pfn *nd_pfn;
struct device *dev;
- if (!is_nd_pmem(&nd_region->dev))
+ if (!is_memory(&nd_region->dev))
return NULL;
nd_pfn = nd_pfn_alloc(nd_region);
@@ -354,7 +354,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
if (!pfn_sb || !ndns)
return -ENODEV;
- if (!is_nd_pmem(nd_pfn->dev.parent))
+ if (!is_memory(nd_pfn->dev.parent))
return -ENODEV;
if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0))
@@ -471,6 +471,14 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
if (ndns->force_raw)
return -ENODEV;
+ switch (ndns->claim_class) {
+ case NVDIMM_CCLASS_NONE:
+ case NVDIMM_CCLASS_PFN:
+ break;
+ default:
+ return -ENODEV;
+ }
+
nvdimm_bus_lock(&ndns->dev);
nd_pfn = nd_pfn_alloc(nd_region);
pfn_dev = nd_pfn_devinit(nd_pfn, ndns);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 6b577afb1d44..f7099adaabc0 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -28,7 +28,7 @@
#include <linux/blk-mq.h>
#include <linux/pfn_t.h>
#include <linux/slab.h>
-#include <linux/pmem.h>
+#include <linux/uio.h>
#include <linux/dax.h>
#include <linux/nd.h>
#include "pmem.h"
@@ -68,9 +68,11 @@ static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
(unsigned long long) sector, cleared,
cleared > 1 ? "s" : "");
badblocks_clear(&pmem->bb, sector, cleared);
+ if (pmem->bb_state)
+ sysfs_notify_dirent(pmem->bb_state);
}
- invalidate_pmem(pmem->virt_addr + offset, len);
+ arch_invalidate_pmem(pmem->virt_addr + offset, len);
return rc;
}
@@ -80,7 +82,7 @@ static void write_pmem(void *pmem_addr, struct page *page,
{
void *mem = kmap_atomic(page);
- memcpy_to_pmem(pmem_addr, mem + off, len);
+ memcpy_flushcache(pmem_addr, mem + off, len);
kunmap_atomic(mem);
}
@@ -235,8 +237,27 @@ static long pmem_dax_direct_access(struct dax_device *dax_dev,
return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
}
+static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i)
+{
+ return copy_from_iter_flushcache(addr, bytes, i);
+}
+
+static void pmem_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t size)
+{
+ arch_wb_cache_pmem(addr, size);
+}
+
static const struct dax_operations pmem_dax_ops = {
.direct_access = pmem_dax_direct_access,
+ .copy_from_iter = pmem_copy_from_iter,
+ .flush = pmem_dax_flush,
+};
+
+static const struct attribute_group *pmem_attribute_groups[] = {
+ &dax_attribute_group,
+ NULL,
};
static void pmem_release_queue(void *q)
@@ -265,14 +286,15 @@ static int pmem_attach_disk(struct device *dev,
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
struct nd_region *nd_region = to_nd_region(dev->parent);
struct vmem_altmap __altmap, *altmap = NULL;
+ int nid = dev_to_node(dev), fua, wbc;
struct resource *res = &nsio->res;
struct nd_pfn *nd_pfn = NULL;
struct dax_device *dax_dev;
- int nid = dev_to_node(dev);
struct nd_pfn_sb *pfn_sb;
struct pmem_device *pmem;
struct resource pfn_res;
struct request_queue *q;
+ struct device *gendev;
struct gendisk *disk;
void *addr;
@@ -294,8 +316,12 @@ static int pmem_attach_disk(struct device *dev,
dev_set_drvdata(dev, pmem);
pmem->phys_addr = res->start;
pmem->size = resource_size(res);
- if (nvdimm_has_flush(nd_region) < 0)
+ fua = nvdimm_has_flush(nd_region);
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) {
dev_warn(dev, "unable to guarantee persistence of writes\n");
+ fua = 0;
+ }
+ wbc = nvdimm_has_cache(nd_region);
if (!devm_request_mem_region(dev, res->start, resource_size(res),
dev_name(&ndns->dev))) {
@@ -339,9 +365,10 @@ static int pmem_attach_disk(struct device *dev,
return PTR_ERR(addr);
pmem->virt_addr = addr;
- blk_queue_write_cache(q, true, true);
+ blk_queue_write_cache(q, wbc, fua);
blk_queue_make_request(q, pmem_make_request);
blk_queue_physical_block_size(q, PAGE_SIZE);
+ blk_queue_logical_block_size(q, pmem_sector_size(ndns));
blk_queue_max_hw_sectors(q, UINT_MAX);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
@@ -368,14 +395,23 @@ static int pmem_attach_disk(struct device *dev,
put_disk(disk);
return -ENOMEM;
}
+ dax_write_cache(dax_dev, wbc);
pmem->dax_dev = dax_dev;
+ gendev = disk_to_dev(disk);
+ gendev->groups = pmem_attribute_groups;
+
device_add_disk(dev, disk);
if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
return -ENOMEM;
revalidate_disk(disk);
+ pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd,
+ "badblocks");
+ if (!pmem->bb_state)
+ dev_warn(dev, "'badblocks' notification disabled\n");
+
return 0;
}
@@ -407,8 +443,18 @@ static int nd_pmem_probe(struct device *dev)
static int nd_pmem_remove(struct device *dev)
{
+ struct pmem_device *pmem = dev_get_drvdata(dev);
+
if (is_nd_btt(dev))
nvdimm_namespace_detach_btt(to_nd_btt(dev));
+ else {
+ /*
+ * Note, this assumes device_lock() context to not race
+ * nd_pmem_notify()
+ */
+ sysfs_put(pmem->bb_state);
+ pmem->bb_state = NULL;
+ }
nvdimm_flush(to_nd_region(dev->parent));
return 0;
@@ -427,6 +473,7 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
struct nd_namespace_io *nsio;
struct resource res;
struct badblocks *bb;
+ struct kernfs_node *bb_state;
if (event != NVDIMM_REVALIDATE_POISON)
return;
@@ -438,11 +485,13 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
nd_region = to_nd_region(ndns->dev.parent);
nsio = to_nd_namespace_io(&ndns->dev);
bb = &nsio->bb;
+ bb_state = NULL;
} else {
struct pmem_device *pmem = dev_get_drvdata(dev);
nd_region = to_region(pmem);
bb = &pmem->bb;
+ bb_state = pmem->bb_state;
if (is_nd_pfn(dev)) {
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
@@ -462,6 +511,8 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
res.start = nsio->res.start + offset;
res.end = nsio->res.end - end_trunc;
nvdimm_badblocks_populate(nd_region, bb, &res);
+ if (bb_state)
+ sysfs_notify_dirent(bb_state);
}
MODULE_ALIAS("pmem");
diff --git a/drivers/nvdimm/pmem.h b/drivers/nvdimm/pmem.h
index 7f4dbd72a90a..5434321cad67 100644
--- a/drivers/nvdimm/pmem.h
+++ b/drivers/nvdimm/pmem.h
@@ -5,6 +5,20 @@
#include <linux/pfn_t.h>
#include <linux/fs.h>
+#ifdef CONFIG_ARCH_HAS_PMEM_API
+#define ARCH_MEMREMAP_PMEM MEMREMAP_WB
+void arch_wb_cache_pmem(void *addr, size_t size);
+void arch_invalidate_pmem(void *addr, size_t size);
+#else
+#define ARCH_MEMREMAP_PMEM MEMREMAP_WT
+static inline void arch_wb_cache_pmem(void *addr, size_t size)
+{
+}
+static inline void arch_invalidate_pmem(void *addr, size_t size)
+{
+}
+#endif
+
/* this definition is in it's own header for tools/testing/nvdimm to consume */
struct pmem_device {
/* One contiguous memory region per device */
@@ -17,6 +31,7 @@ struct pmem_device {
size_t size;
/* trim size when namespace capacity has been section aligned */
u32 pfn_pad;
+ struct kernfs_node *bb_state;
struct badblocks bb;
struct dax_device *dax_dev;
struct gendisk *disk;
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index 869a886c292e..034f0a07d627 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -58,10 +58,14 @@ static int nd_region_probe(struct device *dev)
if (devm_init_badblocks(dev, &nd_region->bb))
return -ENODEV;
+ nd_region->bb_state = sysfs_get_dirent(nd_region->dev.kobj.sd,
+ "badblocks");
+ if (!nd_region->bb_state)
+ dev_warn(&nd_region->dev,
+ "'badblocks' notification disabled\n");
ndr_res.start = nd_region->ndr_start;
ndr_res.end = nd_region->ndr_start + nd_region->ndr_size - 1;
- nvdimm_badblocks_populate(nd_region,
- &nd_region->bb, &ndr_res);
+ nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res);
}
nd_region->btt_seed = nd_btt_create(nd_region);
@@ -105,6 +109,13 @@ static int nd_region_remove(struct device *dev)
dev_set_drvdata(dev, NULL);
nvdimm_bus_unlock(dev);
+ /*
+ * Note, this assumes device_lock() context to not race
+ * nd_region_notify()
+ */
+ sysfs_put(nd_region->bb_state);
+ nd_region->bb_state = NULL;
+
return 0;
}
@@ -126,6 +137,8 @@ static void nd_region_notify(struct device *dev, enum nvdimm_event event)
nd_region->ndr_size - 1;
nvdimm_badblocks_populate(nd_region,
&nd_region->bb, &res);
+ if (nd_region->bb_state)
+ sysfs_notify_dirent(nd_region->bb_state);
}
}
device_for_each_child(dev, &event, child_notify);
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index b550edf2571f..5954cfbea3fc 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -15,7 +15,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/hash.h>
-#include <linux/pmem.h>
#include <linux/sort.h>
#include <linux/io.h>
#include <linux/nd.h>
@@ -169,6 +168,11 @@ bool is_nd_blk(struct device *dev)
return dev ? dev->type == &nd_blk_device_type : false;
}
+bool is_nd_volatile(struct device *dev)
+{
+ return dev ? dev->type == &nd_volatile_device_type : false;
+}
+
struct nd_region *to_nd_region(struct device *dev)
{
struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
@@ -215,7 +219,7 @@ EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
*/
int nd_region_to_nstype(struct nd_region *nd_region)
{
- if (is_nd_pmem(&nd_region->dev)) {
+ if (is_memory(&nd_region->dev)) {
u16 i, alias;
for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
@@ -243,7 +247,7 @@ static ssize_t size_show(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev);
unsigned long long size = 0;
- if (is_nd_pmem(dev)) {
+ if (is_memory(dev)) {
size = nd_region->ndr_size;
} else if (nd_region->ndr_mappings == 1) {
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
@@ -307,13 +311,41 @@ static ssize_t set_cookie_show(struct device *dev,
{
struct nd_region *nd_region = to_nd_region(dev);
struct nd_interleave_set *nd_set = nd_region->nd_set;
+ ssize_t rc = 0;
- if (is_nd_pmem(dev) && nd_set)
+ if (is_memory(dev) && nd_set)
/* pass, should be precluded by region_visible */;
else
return -ENXIO;
- return sprintf(buf, "%#llx\n", nd_set->cookie);
+ /*
+ * The cookie to show depends on which specification of the
+ * labels we are using. If there are not labels then default to
+ * the v1.1 namespace label cookie definition. To read all this
+ * data we need to wait for probing to settle.
+ */
+ device_lock(dev);
+ nvdimm_bus_lock(dev);
+ wait_nvdimm_bus_probe_idle(dev);
+ if (nd_region->ndr_mappings) {
+ struct nd_mapping *nd_mapping = &nd_region->mapping[0];
+ struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
+
+ if (ndd) {
+ struct nd_namespace_index *nsindex;
+
+ nsindex = to_namespace_index(ndd, ndd->ns_current);
+ rc = sprintf(buf, "%#llx\n",
+ nd_region_interleave_set_cookie(nd_region,
+ nsindex));
+ }
+ }
+ nvdimm_bus_unlock(dev);
+ device_unlock(dev);
+
+ if (rc)
+ return rc;
+ return sprintf(buf, "%#llx\n", nd_set->cookie1);
}
static DEVICE_ATTR_RO(set_cookie);
@@ -335,7 +367,7 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
if (!ndd)
return 0;
- if (is_nd_pmem(&nd_region->dev)) {
+ if (is_memory(&nd_region->dev)) {
available += nd_pmem_available_dpa(nd_region,
nd_mapping, &overlap);
if (overlap > blk_max_overlap) {
@@ -521,10 +553,10 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
struct nd_interleave_set *nd_set = nd_region->nd_set;
int type = nd_region_to_nstype(nd_region);
- if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr)
+ if (!is_memory(dev) && a == &dev_attr_pfn_seed.attr)
return 0;
- if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr)
+ if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
return 0;
if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
@@ -552,7 +584,7 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
|| type == ND_DEVICE_NAMESPACE_BLK)
&& a == &dev_attr_available_size.attr)
return a->mode;
- else if (is_nd_pmem(dev) && nd_set)
+ else if (is_memory(dev) && nd_set)
return a->mode;
return 0;
@@ -564,13 +596,18 @@ struct attribute_group nd_region_attribute_group = {
};
EXPORT_SYMBOL_GPL(nd_region_attribute_group);
-u64 nd_region_interleave_set_cookie(struct nd_region *nd_region)
+u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
+ struct nd_namespace_index *nsindex)
{
struct nd_interleave_set *nd_set = nd_region->nd_set;
- if (nd_set)
- return nd_set->cookie;
- return 0;
+ if (!nd_set)
+ return 0;
+
+ if (nsindex && __le16_to_cpu(nsindex->major) == 1
+ && __le16_to_cpu(nsindex->minor) == 1)
+ return nd_set->cookie1;
+ return nd_set->cookie2;
}
u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
@@ -604,7 +641,7 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
{
struct nd_region *nd_region;
- if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) {
+ if (!probe && is_nd_region(dev)) {
int i;
nd_region = to_nd_region(dev);
@@ -622,12 +659,8 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
if (ndd)
atomic_dec(&nvdimm->busy);
}
-
- if (is_nd_pmem(dev))
- return;
}
- if (dev->parent && (is_nd_blk(dev->parent) || is_nd_pmem(dev->parent))
- && probe) {
+ if (dev->parent && is_nd_region(dev->parent) && probe) {
nd_region = to_nd_region(dev->parent);
nvdimm_bus_lock(dev);
if (nd_region->ns_seed == dev)
@@ -800,7 +833,7 @@ int nd_blk_region_init(struct nd_region *nd_region)
return 0;
if (nd_region->ndr_mappings < 1) {
- dev_err(dev, "invalid BLK region\n");
+ dev_dbg(dev, "invalid BLK region\n");
return -ENXIO;
}
@@ -1015,8 +1048,8 @@ void nvdimm_flush(struct nd_region *nd_region)
* The first wmb() is needed to 'sfence' all previous writes
* such that they are architecturally visible for the platform
* buffer flush. Note that we've already arranged for pmem
- * writes to avoid the cache via arch_memcpy_to_pmem(). The
- * final wmb() ensures ordering for the NVDIMM flush write.
+ * writes to avoid the cache via memcpy_flushcache(). The final
+ * wmb() ensures ordering for the NVDIMM flush write.
*/
wmb();
for (i = 0; i < nd_region->ndr_mappings; i++)
@@ -1038,8 +1071,9 @@ int nvdimm_has_flush(struct nd_region *nd_region)
{
int i;
- /* no nvdimm == flushing capability unknown */
- if (nd_region->ndr_mappings == 0)
+ /* no nvdimm or pmem api == flushing capability unknown */
+ if (nd_region->ndr_mappings == 0
+ || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
return -ENXIO;
for (i = 0; i < nd_region->ndr_mappings; i++) {
@@ -1059,6 +1093,12 @@ int nvdimm_has_flush(struct nd_region *nd_region)
}
EXPORT_SYMBOL_GPL(nvdimm_has_flush);
+int nvdimm_has_cache(struct nd_region *nd_region)
+{
+ return is_nd_pmem(&nd_region->dev);
+}
+EXPORT_SYMBOL_GPL(nvdimm_has_cache);
+
void __exit nd_region_devs_exit(void)
{
ida_destroy(&region_ida);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 5b1ac79fb607..b7a84c523475 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2303,14 +2303,16 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return result;
}
-static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
+static void nvme_reset_prepare(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
+ nvme_dev_disable(dev, false);
+}
- if (prepare)
- nvme_dev_disable(dev, false);
- else
- nvme_reset_ctrl(&dev->ctrl);
+static void nvme_reset_done(struct pci_dev *pdev)
+{
+ struct nvme_dev *dev = pci_get_drvdata(pdev);
+ nvme_reset_ctrl(&dev->ctrl);
}
static void nvme_shutdown(struct pci_dev *pdev)
@@ -2434,7 +2436,8 @@ static const struct pci_error_handlers nvme_err_handler = {
.error_detected = nvme_error_detected,
.slot_reset = nvme_slot_reset,
.resume = nvme_error_resume,
- .reset_notify = nvme_reset_notify,
+ .reset_prepare = nvme_reset_prepare,
+ .reset_done = nvme_reset_done,
};
static const struct pci_device_id nvme_id_table[] = {
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index d7efd9d458aa..97dc01c81438 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -1,4 +1,4 @@
-obj-y = base.o device.o platform.o
+obj-y = base.o device.o platform.o property.o
obj-$(CONFIG_OF_DYNAMIC) += dynamic.o
obj-$(CONFIG_OF_FLATTREE) += fdt.o
obj-$(CONFIG_OF_EARLY_FLATTREE) += fdt_address.o
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 72914cdfce2a..580bbf6ca2b1 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -710,7 +710,7 @@ static int __of_address_to_resource(struct device_node *dev,
*
* Note that if your address is a PIO address, the conversion will fail if
* the physical address can't be internally converted to an IO token with
- * pci_address_to_pio(), that is because it's either called to early or it
+ * pci_address_to_pio(), that is because it's either called too early or it
* can't be matched to any host bridge IO space
*/
int of_address_to_resource(struct device_node *dev, int index,
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 28d5f53bc631..686628d1dfa6 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -155,7 +155,7 @@ int __of_add_property_sysfs(struct device_node *np, struct property *pp)
sysfs_bin_attr_init(&pp->attr);
pp->attr.attr.name = safe_name(&np->kobj, pp->name);
- pp->attr.attr.mode = secure ? S_IRUSR : S_IRUGO;
+ pp->attr.attr.mode = secure ? 0400 : 0444;
pp->attr.size = secure ? 0 : pp->length;
pp->attr.read = of_node_property_read;
@@ -773,16 +773,31 @@ static struct device_node *__of_find_node_by_path(struct device_node *parent,
return NULL;
__for_each_child_of_node(parent, child) {
- const char *name = strrchr(child->full_name, '/');
- if (WARN(!name, "malformed device_node %s\n", child->full_name))
- continue;
- name++;
+ const char *name = kbasename(child->full_name);
if (strncmp(path, name, len) == 0 && (strlen(name) == len))
return child;
}
return NULL;
}
+struct device_node *__of_find_node_by_full_path(struct device_node *node,
+ const char *path)
+{
+ const char *separator = strchr(path, ':');
+
+ while (node && *path == '/') {
+ struct device_node *tmp = node;
+
+ path++; /* Increment past '/' delimiter */
+ node = __of_find_node_by_path(node, path);
+ of_node_put(tmp);
+ path = strchrnul(path, '/');
+ if (separator && separator < path)
+ break;
+ }
+ return node;
+}
+
/**
* of_find_node_opts_by_path - Find a node matching a full OF path
* @path: Either the full path to match, or if the path does not
@@ -842,16 +857,7 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt
raw_spin_lock_irqsave(&devtree_lock, flags);
if (!np)
np = of_node_get(of_root);
- while (np && *path == '/') {
- struct device_node *tmp = np;
-
- path++; /* Increment past '/' delimiter */
- np = __of_find_node_by_path(np, path);
- of_node_put(tmp);
- path = strchrnul(path, '/');
- if (separator && separator < path)
- break;
- }
+ np = __of_find_node_by_full_path(np, path);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return np;
}
@@ -1113,458 +1119,6 @@ struct device_node *of_find_node_by_phandle(phandle handle)
}
EXPORT_SYMBOL(of_find_node_by_phandle);
-/**
- * of_property_count_elems_of_size - Count the number of elements in a property
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @elem_size: size of the individual element
- *
- * Search for a property in a device node and count the number of elements of
- * size elem_size in it. Returns number of elements on sucess, -EINVAL if the
- * property does not exist or its length does not match a multiple of elem_size
- * and -ENODATA if the property does not have a value.
- */
-int of_property_count_elems_of_size(const struct device_node *np,
- const char *propname, int elem_size)
-{
- struct property *prop = of_find_property(np, propname, NULL);
-
- if (!prop)
- return -EINVAL;
- if (!prop->value)
- return -ENODATA;
-
- if (prop->length % elem_size != 0) {
- pr_err("size of %s in node %s is not a multiple of %d\n",
- propname, np->full_name, elem_size);
- return -EINVAL;
- }
-
- return prop->length / elem_size;
-}
-EXPORT_SYMBOL_GPL(of_property_count_elems_of_size);
-
-/**
- * of_find_property_value_of_size
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @min: minimum allowed length of property value
- * @max: maximum allowed length of property value (0 means unlimited)
- * @len: if !=NULL, actual length is written to here
- *
- * Search for a property in a device node and valid the requested size.
- * Returns the property value on success, -EINVAL if the property does not
- * exist, -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data is too small or too large.
- *
- */
-static void *of_find_property_value_of_size(const struct device_node *np,
- const char *propname, u32 min, u32 max, size_t *len)
-{
- struct property *prop = of_find_property(np, propname, NULL);
-
- if (!prop)
- return ERR_PTR(-EINVAL);
- if (!prop->value)
- return ERR_PTR(-ENODATA);
- if (prop->length < min)
- return ERR_PTR(-EOVERFLOW);
- if (max && prop->length > max)
- return ERR_PTR(-EOVERFLOW);
-
- if (len)
- *len = prop->length;
-
- return prop->value;
-}
-
-/**
- * of_property_read_u32_index - Find and read a u32 from a multi-value property.
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @index: index of the u32 in the list of values
- * @out_value: pointer to return value, modified only if no error.
- *
- * Search for a property in a device node and read nth 32-bit value from
- * it. Returns 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
- *
- * The out_value is modified only if a valid u32 value can be decoded.
- */
-int of_property_read_u32_index(const struct device_node *np,
- const char *propname,
- u32 index, u32 *out_value)
-{
- const u32 *val = of_find_property_value_of_size(np, propname,
- ((index + 1) * sizeof(*out_value)),
- 0,
- NULL);
-
- if (IS_ERR(val))
- return PTR_ERR(val);
-
- *out_value = be32_to_cpup(((__be32 *)val) + index);
- return 0;
-}
-EXPORT_SYMBOL_GPL(of_property_read_u32_index);
-
-/**
- * of_property_read_u64_index - Find and read a u64 from a multi-value property.
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @index: index of the u64 in the list of values
- * @out_value: pointer to return value, modified only if no error.
- *
- * Search for a property in a device node and read nth 64-bit value from
- * it. Returns 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
- *
- * The out_value is modified only if a valid u64 value can be decoded.
- */
-int of_property_read_u64_index(const struct device_node *np,
- const char *propname,
- u32 index, u64 *out_value)
-{
- const u64 *val = of_find_property_value_of_size(np, propname,
- ((index + 1) * sizeof(*out_value)),
- 0, NULL);
-
- if (IS_ERR(val))
- return PTR_ERR(val);
-
- *out_value = be64_to_cpup(((__be64 *)val) + index);
- return 0;
-}
-EXPORT_SYMBOL_GPL(of_property_read_u64_index);
-
-/**
- * of_property_read_variable_u8_array - Find and read an array of u8 from a
- * property, with bounds on the minimum and maximum array size.
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
- * @sz_min: minimum number of array elements to read
- * @sz_max: maximum number of array elements to read, if zero there is no
- * upper limit on the number of elements in the dts entry but only
- * sz_min will be read.
- *
- * Search for a property in a device node and read 8-bit value(s) from
- * it. Returns number of elements read on success, -EINVAL if the property
- * does not exist, -ENODATA if property does not have a value, and -EOVERFLOW
- * if the property data is smaller than sz_min or longer than sz_max.
- *
- * dts entry of array should be like:
- * property = /bits/ 8 <0x50 0x60 0x70>;
- *
- * The out_values is modified only if a valid u8 value can be decoded.
- */
-int of_property_read_variable_u8_array(const struct device_node *np,
- const char *propname, u8 *out_values,
- size_t sz_min, size_t sz_max)
-{
- size_t sz, count;
- const u8 *val = of_find_property_value_of_size(np, propname,
- (sz_min * sizeof(*out_values)),
- (sz_max * sizeof(*out_values)),
- &sz);
-
- if (IS_ERR(val))
- return PTR_ERR(val);
-
- if (!sz_max)
- sz = sz_min;
- else
- sz /= sizeof(*out_values);
-
- count = sz;
- while (count--)
- *out_values++ = *val++;
-
- return sz;
-}
-EXPORT_SYMBOL_GPL(of_property_read_variable_u8_array);
-
-/**
- * of_property_read_variable_u16_array - Find and read an array of u16 from a
- * property, with bounds on the minimum and maximum array size.
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
- * @sz_min: minimum number of array elements to read
- * @sz_max: maximum number of array elements to read, if zero there is no
- * upper limit on the number of elements in the dts entry but only
- * sz_min will be read.
- *
- * Search for a property in a device node and read 16-bit value(s) from
- * it. Returns number of elements read on success, -EINVAL if the property
- * does not exist, -ENODATA if property does not have a value, and -EOVERFLOW
- * if the property data is smaller than sz_min or longer than sz_max.
- *
- * dts entry of array should be like:
- * property = /bits/ 16 <0x5000 0x6000 0x7000>;
- *
- * The out_values is modified only if a valid u16 value can be decoded.
- */
-int of_property_read_variable_u16_array(const struct device_node *np,
- const char *propname, u16 *out_values,
- size_t sz_min, size_t sz_max)
-{
- size_t sz, count;
- const __be16 *val = of_find_property_value_of_size(np, propname,
- (sz_min * sizeof(*out_values)),
- (sz_max * sizeof(*out_values)),
- &sz);
-
- if (IS_ERR(val))
- return PTR_ERR(val);
-
- if (!sz_max)
- sz = sz_min;
- else
- sz /= sizeof(*out_values);
-
- count = sz;
- while (count--)
- *out_values++ = be16_to_cpup(val++);
-
- return sz;
-}
-EXPORT_SYMBOL_GPL(of_property_read_variable_u16_array);
-
-/**
- * of_property_read_variable_u32_array - Find and read an array of 32 bit
- * integers from a property, with bounds on the minimum and maximum array size.
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
- * @sz_min: minimum number of array elements to read
- * @sz_max: maximum number of array elements to read, if zero there is no
- * upper limit on the number of elements in the dts entry but only
- * sz_min will be read.
- *
- * Search for a property in a device node and read 32-bit value(s) from
- * it. Returns number of elements read on success, -EINVAL if the property
- * does not exist, -ENODATA if property does not have a value, and -EOVERFLOW
- * if the property data is smaller than sz_min or longer than sz_max.
- *
- * The out_values is modified only if a valid u32 value can be decoded.
- */
-int of_property_read_variable_u32_array(const struct device_node *np,
- const char *propname, u32 *out_values,
- size_t sz_min, size_t sz_max)
-{
- size_t sz, count;
- const __be32 *val = of_find_property_value_of_size(np, propname,
- (sz_min * sizeof(*out_values)),
- (sz_max * sizeof(*out_values)),
- &sz);
-
- if (IS_ERR(val))
- return PTR_ERR(val);
-
- if (!sz_max)
- sz = sz_min;
- else
- sz /= sizeof(*out_values);
-
- count = sz;
- while (count--)
- *out_values++ = be32_to_cpup(val++);
-
- return sz;
-}
-EXPORT_SYMBOL_GPL(of_property_read_variable_u32_array);
-
-/**
- * of_property_read_u64 - Find and read a 64 bit integer from a property
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_value: pointer to return value, modified only if return value is 0.
- *
- * Search for a property in a device node and read a 64-bit value from
- * it. Returns 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
- *
- * The out_value is modified only if a valid u64 value can be decoded.
- */
-int of_property_read_u64(const struct device_node *np, const char *propname,
- u64 *out_value)
-{
- const __be32 *val = of_find_property_value_of_size(np, propname,
- sizeof(*out_value),
- 0,
- NULL);
-
- if (IS_ERR(val))
- return PTR_ERR(val);
-
- *out_value = of_read_number(val, 2);
- return 0;
-}
-EXPORT_SYMBOL_GPL(of_property_read_u64);
-
-/**
- * of_property_read_variable_u64_array - Find and read an array of 64 bit
- * integers from a property, with bounds on the minimum and maximum array size.
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
- * @sz_min: minimum number of array elements to read
- * @sz_max: maximum number of array elements to read, if zero there is no
- * upper limit on the number of elements in the dts entry but only
- * sz_min will be read.
- *
- * Search for a property in a device node and read 64-bit value(s) from
- * it. Returns number of elements read on success, -EINVAL if the property
- * does not exist, -ENODATA if property does not have a value, and -EOVERFLOW
- * if the property data is smaller than sz_min or longer than sz_max.
- *
- * The out_values is modified only if a valid u64 value can be decoded.
- */
-int of_property_read_variable_u64_array(const struct device_node *np,
- const char *propname, u64 *out_values,
- size_t sz_min, size_t sz_max)
-{
- size_t sz, count;
- const __be32 *val = of_find_property_value_of_size(np, propname,
- (sz_min * sizeof(*out_values)),
- (sz_max * sizeof(*out_values)),
- &sz);
-
- if (IS_ERR(val))
- return PTR_ERR(val);
-
- if (!sz_max)
- sz = sz_min;
- else
- sz /= sizeof(*out_values);
-
- count = sz;
- while (count--) {
- *out_values++ = of_read_number(val, 2);
- val += 2;
- }
-
- return sz;
-}
-EXPORT_SYMBOL_GPL(of_property_read_variable_u64_array);
-
-/**
- * of_property_read_string - Find and read a string from a property
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_string: pointer to null terminated return string, modified only if
- * return value is 0.
- *
- * Search for a property in a device tree node and retrieve a null
- * terminated string value (pointer to data, not a copy). Returns 0 on
- * success, -EINVAL if the property does not exist, -ENODATA if property
- * does not have a value, and -EILSEQ if the string is not null-terminated
- * within the length of the property data.
- *
- * The out_string pointer is modified only if a valid string can be decoded.
- */
-int of_property_read_string(const struct device_node *np, const char *propname,
- const char **out_string)
-{
- const struct property *prop = of_find_property(np, propname, NULL);
- if (!prop)
- return -EINVAL;
- if (!prop->value)
- return -ENODATA;
- if (strnlen(prop->value, prop->length) >= prop->length)
- return -EILSEQ;
- *out_string = prop->value;
- return 0;
-}
-EXPORT_SYMBOL_GPL(of_property_read_string);
-
-/**
- * of_property_match_string() - Find string in a list and return index
- * @np: pointer to node containing string list property
- * @propname: string list property name
- * @string: pointer to string to search for in string list
- *
- * This function searches a string list property and returns the index
- * of a specific string value.
- */
-int of_property_match_string(const struct device_node *np, const char *propname,
- const char *string)
-{
- const struct property *prop = of_find_property(np, propname, NULL);
- size_t l;
- int i;
- const char *p, *end;
-
- if (!prop)
- return -EINVAL;
- if (!prop->value)
- return -ENODATA;
-
- p = prop->value;
- end = p + prop->length;
-
- for (i = 0; p < end; i++, p += l) {
- l = strnlen(p, end - p) + 1;
- if (p + l > end)
- return -EILSEQ;
- pr_debug("comparing %s with %s\n", string, p);
- if (strcmp(string, p) == 0)
- return i; /* Found it; return index */
- }
- return -ENODATA;
-}
-EXPORT_SYMBOL_GPL(of_property_match_string);
-
-/**
- * of_property_read_string_helper() - Utility helper for parsing string properties
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_strs: output array of string pointers.
- * @sz: number of array elements to read.
- * @skip: Number of strings to skip over at beginning of list.
- *
- * Don't call this function directly. It is a utility helper for the
- * of_property_read_string*() family of functions.
- */
-int of_property_read_string_helper(const struct device_node *np,
- const char *propname, const char **out_strs,
- size_t sz, int skip)
-{
- const struct property *prop = of_find_property(np, propname, NULL);
- int l = 0, i = 0;
- const char *p, *end;
-
- if (!prop)
- return -EINVAL;
- if (!prop->value)
- return -ENODATA;
- p = prop->value;
- end = p + prop->length;
-
- for (i = 0; p < end && (!out_strs || i < skip + sz); i++, p += l) {
- l = strnlen(p, end - p) + 1;
- if (p + l > end)
- return -EILSEQ;
- if (out_strs && i >= skip)
- *out_strs++ = p;
- }
- i -= skip;
- return i <= 0 ? -ENODATA : i;
-}
-EXPORT_SYMBOL_GPL(of_property_read_string_helper);
-
void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
{
int i;
@@ -1601,6 +1155,7 @@ int of_phandle_iterator_init(struct of_phandle_iterator *it,
return 0;
}
+EXPORT_SYMBOL_GPL(of_phandle_iterator_init);
int of_phandle_iterator_next(struct of_phandle_iterator *it)
{
@@ -1670,6 +1225,7 @@ err:
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(of_phandle_iterator_next);
int of_phandle_iterator_args(struct of_phandle_iterator *it,
uint32_t *args,
@@ -2211,47 +1767,6 @@ int of_alias_get_highest_id(const char *stem)
}
EXPORT_SYMBOL_GPL(of_alias_get_highest_id);
-const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur,
- u32 *pu)
-{
- const void *curv = cur;
-
- if (!prop)
- return NULL;
-
- if (!cur) {
- curv = prop->value;
- goto out_val;
- }
-
- curv += sizeof(*cur);
- if (curv >= prop->value + prop->length)
- return NULL;
-
-out_val:
- *pu = be32_to_cpup(curv);
- return curv;
-}
-EXPORT_SYMBOL_GPL(of_prop_next_u32);
-
-const char *of_prop_next_string(struct property *prop, const char *cur)
-{
- const void *curv = cur;
-
- if (!prop)
- return NULL;
-
- if (!cur)
- return prop->value;
-
- curv += strlen(cur) + 1;
- if (curv >= prop->value + prop->length)
- return NULL;
-
- return curv;
-}
-EXPORT_SYMBOL_GPL(of_prop_next_string);
-
/**
* of_console_check() - Test and setup console for DT setup
* @dn - Pointer to device node
@@ -2325,243 +1840,3 @@ int of_find_last_cache_level(unsigned int cpu)
return cache_level;
}
-
-/**
- * of_graph_parse_endpoint() - parse common endpoint node properties
- * @node: pointer to endpoint device_node
- * @endpoint: pointer to the OF endpoint data structure
- *
- * The caller should hold a reference to @node.
- */
-int of_graph_parse_endpoint(const struct device_node *node,
- struct of_endpoint *endpoint)
-{
- struct device_node *port_node = of_get_parent(node);
-
- WARN_ONCE(!port_node, "%s(): endpoint %s has no parent node\n",
- __func__, node->full_name);
-
- memset(endpoint, 0, sizeof(*endpoint));
-
- endpoint->local_node = node;
- /*
- * It doesn't matter whether the two calls below succeed.
- * If they don't then the default value 0 is used.
- */
- of_property_read_u32(port_node, "reg", &endpoint->port);
- of_property_read_u32(node, "reg", &endpoint->id);
-
- of_node_put(port_node);
-
- return 0;
-}
-EXPORT_SYMBOL(of_graph_parse_endpoint);
-
-/**
- * of_graph_get_port_by_id() - get the port matching a given id
- * @parent: pointer to the parent device node
- * @id: id of the port
- *
- * Return: A 'port' node pointer with refcount incremented. The caller
- * has to use of_node_put() on it when done.
- */
-struct device_node *of_graph_get_port_by_id(struct device_node *parent, u32 id)
-{
- struct device_node *node, *port;
-
- node = of_get_child_by_name(parent, "ports");
- if (node)
- parent = node;
-
- for_each_child_of_node(parent, port) {
- u32 port_id = 0;
-
- if (of_node_cmp(port->name, "port") != 0)
- continue;
- of_property_read_u32(port, "reg", &port_id);
- if (id == port_id)
- break;
- }
-
- of_node_put(node);
-
- return port;
-}
-EXPORT_SYMBOL(of_graph_get_port_by_id);
-
-/**
- * of_graph_get_next_endpoint() - get next endpoint node
- * @parent: pointer to the parent device node
- * @prev: previous endpoint node, or NULL to get first
- *
- * Return: An 'endpoint' node pointer with refcount incremented. Refcount
- * of the passed @prev node is decremented.
- */
-struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
- struct device_node *prev)
-{
- struct device_node *endpoint;
- struct device_node *port;
-
- if (!parent)
- return NULL;
-
- /*
- * Start by locating the port node. If no previous endpoint is specified
- * search for the first port node, otherwise get the previous endpoint
- * parent port node.
- */
- if (!prev) {
- struct device_node *node;
-
- node = of_get_child_by_name(parent, "ports");
- if (node)
- parent = node;
-
- port = of_get_child_by_name(parent, "port");
- of_node_put(node);
-
- if (!port) {
- pr_err("graph: no port node found in %s\n",
- parent->full_name);
- return NULL;
- }
- } else {
- port = of_get_parent(prev);
- if (WARN_ONCE(!port, "%s(): endpoint %s has no parent node\n",
- __func__, prev->full_name))
- return NULL;
- }
-
- while (1) {
- /*
- * Now that we have a port node, get the next endpoint by
- * getting the next child. If the previous endpoint is NULL this
- * will return the first child.
- */
- endpoint = of_get_next_child(port, prev);
- if (endpoint) {
- of_node_put(port);
- return endpoint;
- }
-
- /* No more endpoints under this port, try the next one. */
- prev = NULL;
-
- do {
- port = of_get_next_child(parent, port);
- if (!port)
- return NULL;
- } while (of_node_cmp(port->name, "port"));
- }
-}
-EXPORT_SYMBOL(of_graph_get_next_endpoint);
-
-/**
- * of_graph_get_endpoint_by_regs() - get endpoint node of specific identifiers
- * @parent: pointer to the parent device node
- * @port_reg: identifier (value of reg property) of the parent port node
- * @reg: identifier (value of reg property) of the endpoint node
- *
- * Return: An 'endpoint' node pointer which is identified by reg and at the same
- * is the child of a port node identified by port_reg. reg and port_reg are
- * ignored when they are -1.
- */
-struct device_node *of_graph_get_endpoint_by_regs(
- const struct device_node *parent, int port_reg, int reg)
-{
- struct of_endpoint endpoint;
- struct device_node *node = NULL;
-
- for_each_endpoint_of_node(parent, node) {
- of_graph_parse_endpoint(node, &endpoint);
- if (((port_reg == -1) || (endpoint.port == port_reg)) &&
- ((reg == -1) || (endpoint.id == reg)))
- return node;
- }
-
- return NULL;
-}
-EXPORT_SYMBOL(of_graph_get_endpoint_by_regs);
-
-/**
- * of_graph_get_remote_port_parent() - get remote port's parent node
- * @node: pointer to a local endpoint device_node
- *
- * Return: Remote device node associated with remote endpoint node linked
- * to @node. Use of_node_put() on it when done.
- */
-struct device_node *of_graph_get_remote_port_parent(
- const struct device_node *node)
-{
- struct device_node *np;
- unsigned int depth;
-
- /* Get remote endpoint node. */
- np = of_parse_phandle(node, "remote-endpoint", 0);
-
- /* Walk 3 levels up only if there is 'ports' node. */
- for (depth = 3; depth && np; depth--) {
- np = of_get_next_parent(np);
- if (depth == 2 && of_node_cmp(np->name, "ports"))
- break;
- }
- return np;
-}
-EXPORT_SYMBOL(of_graph_get_remote_port_parent);
-
-/**
- * of_graph_get_remote_port() - get remote port node
- * @node: pointer to a local endpoint device_node
- *
- * Return: Remote port node associated with remote endpoint node linked
- * to @node. Use of_node_put() on it when done.
- */
-struct device_node *of_graph_get_remote_port(const struct device_node *node)
-{
- struct device_node *np;
-
- /* Get remote endpoint node. */
- np = of_parse_phandle(node, "remote-endpoint", 0);
- if (!np)
- return NULL;
- return of_get_next_parent(np);
-}
-EXPORT_SYMBOL(of_graph_get_remote_port);
-
-/**
- * of_graph_get_remote_node() - get remote parent device_node for given port/endpoint
- * @node: pointer to parent device_node containing graph port/endpoint
- * @port: identifier (value of reg property) of the parent port node
- * @endpoint: identifier (value of reg property) of the endpoint node
- *
- * Return: Remote device node associated with remote endpoint node linked
- * to @node. Use of_node_put() on it when done.
- */
-struct device_node *of_graph_get_remote_node(const struct device_node *node,
- u32 port, u32 endpoint)
-{
- struct device_node *endpoint_node, *remote;
-
- endpoint_node = of_graph_get_endpoint_by_regs(node, port, endpoint);
- if (!endpoint_node) {
- pr_debug("no valid endpoint (%d, %d) for node %s\n",
- port, endpoint, node->full_name);
- return NULL;
- }
-
- remote = of_graph_get_remote_port_parent(endpoint_node);
- of_node_put(endpoint_node);
- if (!remote) {
- pr_debug("no valid remote node\n");
- return NULL;
- }
-
- if (!of_device_is_available(remote)) {
- pr_debug("not available for remote node\n");
- return NULL;
- }
-
- return remote;
-}
-EXPORT_SYMBOL(of_graph_get_remote_node);
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index 888fdbc09992..0542cf8b6e3d 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -216,7 +216,7 @@ int of_property_notify(int action, struct device_node *np,
return of_reconfig_notify(action, &pr);
}
-void __of_attach_node(struct device_node *np)
+static void __of_attach_node(struct device_node *np)
{
const __be32 *phandle;
int sz;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 43bd69dceabf..ce30c9a588a4 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -91,7 +91,7 @@ void of_fdt_limit_memory(int limit)
* On match, returns a non-zero value with smaller values returned for more
* specific compatible values.
*/
-int of_fdt_is_compatible(const void *blob,
+static int of_fdt_is_compatible(const void *blob,
unsigned long node, const char *compat)
{
const char *cp;
@@ -1053,7 +1053,7 @@ u64 __init dt_mem_next_cell(int s, const __be32 **cellp)
}
/**
- * early_init_dt_scan_memory - Look for an parse memory nodes
+ * early_init_dt_scan_memory - Look for and parse memory nodes
*/
int __init early_init_dt_scan_memory(unsigned long node, const char *uname,
int depth, void *data)
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index d11437cb1187..6ce72aa65425 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -369,7 +369,10 @@ EXPORT_SYMBOL_GPL(of_irq_parse_one);
*/
int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
{
- int irq = irq_of_parse_and_map(dev, index);
+ int irq = of_irq_get(dev, index);
+
+ if (irq < 0)
+ return irq;
/* Only dereference the resource if both the
* resource and the irq are valid. */
diff --git a/drivers/of/of_pci_irq.c b/drivers/of/of_pci_irq.c
index c175d9cd0bb5..3a05568f65df 100644
--- a/drivers/of/of_pci_irq.c
+++ b/drivers/of/of_pci_irq.c
@@ -113,7 +113,8 @@ EXPORT_SYMBOL_GPL(of_irq_parse_pci);
* @pin: PCI irq pin number; passed when used as map_irq callback. Unused
*
* @slot and @pin are unused, but included in the function so that this
- * function can be used directly as the map_irq callback to pci_fixup_irqs().
+ * function can be used directly as the map_irq callback to
+ * pci_assign_irq() and struct pci_host_bridge.map_irq pointer
*/
int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
{
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 4ebb0149d118..3ae12ffbf547 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -77,6 +77,9 @@ extern void *__unflatten_device_tree(const void *blob,
struct property *__of_prop_dup(const struct property *prop, gfp_t allocflags);
__printf(2, 3) struct device_node *__of_node_dup(const struct device_node *np, const char *fmt, ...);
+struct device_node *__of_find_node_by_full_path(struct device_node *node,
+ const char *path);
+
extern const void *__of_get_property(const struct device_node *np,
const char *name, int *lenp);
extern int __of_add_property(struct device_node *np, struct property *prop);
@@ -90,7 +93,6 @@ extern int __of_update_property(struct device_node *np,
extern void __of_update_property_sysfs(struct device_node *np,
struct property *newprop, struct property *oldprop);
-extern void __of_attach_node(struct device_node *np);
extern int __of_attach_node_sysfs(struct device_node *np);
extern void __of_detach_node(struct device_node *np);
extern void __of_detach_node_sysfs(struct device_node *np);
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 7827786718d8..c0e4ee1cd1ba 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -132,6 +132,10 @@ static int of_overlay_apply_single_device_node(struct of_overlay *ov,
/* NOTE: Multiple mods of created nodes not supported */
tchild = of_get_child_by_name(target, cname);
if (tchild != NULL) {
+ /* new overlay phandle value conflicts with existing value */
+ if (child->phandle)
+ return -EINVAL;
+
/* apply overlay recursively */
ret = of_overlay_apply_one(ov, tchild, child);
of_node_put(tchild);
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 703a42118ffc..b19524623498 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -99,7 +99,7 @@ static void of_device_make_bus_id(struct device *dev)
/* format arguments only used if dev_name() resolves to NULL */
dev_set_name(dev, dev_name(dev) ? "%s:%s" : "%s",
- strrchr(node->full_name, '/') + 1, dev_name(dev));
+ kbasename(node->full_name), dev_name(dev));
node = node->parent;
}
}
diff --git a/drivers/of/property.c b/drivers/of/property.c
new file mode 100644
index 000000000000..07c7c36c5ca8
--- /dev/null
+++ b/drivers/of/property.c
@@ -0,0 +1,806 @@
+/*
+ * drivers/of/property.c - Procedures for accessing and interpreting
+ * Devicetree properties and graphs.
+ *
+ * Initially created by copying procedures from drivers/of/base.c. This
+ * file contains the OF property as well as the OF graph interface
+ * functions.
+ *
+ * Paul Mackerras August 1996.
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
+ * {engebret|bergner}@us.ibm.com
+ *
+ * Adapted for sparc and sparc64 by David S. Miller [email protected]
+ *
+ * Reconsolidated from arch/x/kernel/prom.c by Stephen Rothwell and
+ * Grant Likely.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "OF: " fmt
+
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/string.h>
+
+#include "of_private.h"
+
+/**
+ * of_property_count_elems_of_size - Count the number of elements in a property
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @elem_size: size of the individual element
+ *
+ * Search for a property in a device node and count the number of elements of
+ * size elem_size in it. Returns number of elements on sucess, -EINVAL if the
+ * property does not exist or its length does not match a multiple of elem_size
+ * and -ENODATA if the property does not have a value.
+ */
+int of_property_count_elems_of_size(const struct device_node *np,
+ const char *propname, int elem_size)
+{
+ struct property *prop = of_find_property(np, propname, NULL);
+
+ if (!prop)
+ return -EINVAL;
+ if (!prop->value)
+ return -ENODATA;
+
+ if (prop->length % elem_size != 0) {
+ pr_err("size of %s in node %s is not a multiple of %d\n",
+ propname, np->full_name, elem_size);
+ return -EINVAL;
+ }
+
+ return prop->length / elem_size;
+}
+EXPORT_SYMBOL_GPL(of_property_count_elems_of_size);
+
+/**
+ * of_find_property_value_of_size
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @min: minimum allowed length of property value
+ * @max: maximum allowed length of property value (0 means unlimited)
+ * @len: if !=NULL, actual length is written to here
+ *
+ * Search for a property in a device node and valid the requested size.
+ * Returns the property value on success, -EINVAL if the property does not
+ * exist, -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data is too small or too large.
+ *
+ */
+static void *of_find_property_value_of_size(const struct device_node *np,
+ const char *propname, u32 min, u32 max, size_t *len)
+{
+ struct property *prop = of_find_property(np, propname, NULL);
+
+ if (!prop)
+ return ERR_PTR(-EINVAL);
+ if (!prop->value)
+ return ERR_PTR(-ENODATA);
+ if (prop->length < min)
+ return ERR_PTR(-EOVERFLOW);
+ if (max && prop->length > max)
+ return ERR_PTR(-EOVERFLOW);
+
+ if (len)
+ *len = prop->length;
+
+ return prop->value;
+}
+
+/**
+ * of_property_read_u32_index - Find and read a u32 from a multi-value property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @index: index of the u32 in the list of values
+ * @out_value: pointer to return value, modified only if no error.
+ *
+ * Search for a property in a device node and read nth 32-bit value from
+ * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_value is modified only if a valid u32 value can be decoded.
+ */
+int of_property_read_u32_index(const struct device_node *np,
+ const char *propname,
+ u32 index, u32 *out_value)
+{
+ const u32 *val = of_find_property_value_of_size(np, propname,
+ ((index + 1) * sizeof(*out_value)),
+ 0,
+ NULL);
+
+ if (IS_ERR(val))
+ return PTR_ERR(val);
+
+ *out_value = be32_to_cpup(((__be32 *)val) + index);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_property_read_u32_index);
+
+/**
+ * of_property_read_u64_index - Find and read a u64 from a multi-value property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @index: index of the u64 in the list of values
+ * @out_value: pointer to return value, modified only if no error.
+ *
+ * Search for a property in a device node and read nth 64-bit value from
+ * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_value is modified only if a valid u64 value can be decoded.
+ */
+int of_property_read_u64_index(const struct device_node *np,
+ const char *propname,
+ u32 index, u64 *out_value)
+{
+ const u64 *val = of_find_property_value_of_size(np, propname,
+ ((index + 1) * sizeof(*out_value)),
+ 0, NULL);
+
+ if (IS_ERR(val))
+ return PTR_ERR(val);
+
+ *out_value = be64_to_cpup(((__be64 *)val) + index);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_property_read_u64_index);
+
+/**
+ * of_property_read_variable_u8_array - Find and read an array of u8 from a
+ * property, with bounds on the minimum and maximum array size.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz_min: minimum number of array elements to read
+ * @sz_max: maximum number of array elements to read, if zero there is no
+ * upper limit on the number of elements in the dts entry but only
+ * sz_min will be read.
+ *
+ * Search for a property in a device node and read 8-bit value(s) from
+ * it. Returns number of elements read on success, -EINVAL if the property
+ * does not exist, -ENODATA if property does not have a value, and -EOVERFLOW
+ * if the property data is smaller than sz_min or longer than sz_max.
+ *
+ * dts entry of array should be like:
+ * property = /bits/ 8 <0x50 0x60 0x70>;
+ *
+ * The out_values is modified only if a valid u8 value can be decoded.
+ */
+int of_property_read_variable_u8_array(const struct device_node *np,
+ const char *propname, u8 *out_values,
+ size_t sz_min, size_t sz_max)
+{
+ size_t sz, count;
+ const u8 *val = of_find_property_value_of_size(np, propname,
+ (sz_min * sizeof(*out_values)),
+ (sz_max * sizeof(*out_values)),
+ &sz);
+
+ if (IS_ERR(val))
+ return PTR_ERR(val);
+
+ if (!sz_max)
+ sz = sz_min;
+ else
+ sz /= sizeof(*out_values);
+
+ count = sz;
+ while (count--)
+ *out_values++ = *val++;
+
+ return sz;
+}
+EXPORT_SYMBOL_GPL(of_property_read_variable_u8_array);
+
+/**
+ * of_property_read_variable_u16_array - Find and read an array of u16 from a
+ * property, with bounds on the minimum and maximum array size.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz_min: minimum number of array elements to read
+ * @sz_max: maximum number of array elements to read, if zero there is no
+ * upper limit on the number of elements in the dts entry but only
+ * sz_min will be read.
+ *
+ * Search for a property in a device node and read 16-bit value(s) from
+ * it. Returns number of elements read on success, -EINVAL if the property
+ * does not exist, -ENODATA if property does not have a value, and -EOVERFLOW
+ * if the property data is smaller than sz_min or longer than sz_max.
+ *
+ * dts entry of array should be like:
+ * property = /bits/ 16 <0x5000 0x6000 0x7000>;
+ *
+ * The out_values is modified only if a valid u16 value can be decoded.
+ */
+int of_property_read_variable_u16_array(const struct device_node *np,
+ const char *propname, u16 *out_values,
+ size_t sz_min, size_t sz_max)
+{
+ size_t sz, count;
+ const __be16 *val = of_find_property_value_of_size(np, propname,
+ (sz_min * sizeof(*out_values)),
+ (sz_max * sizeof(*out_values)),
+ &sz);
+
+ if (IS_ERR(val))
+ return PTR_ERR(val);
+
+ if (!sz_max)
+ sz = sz_min;
+ else
+ sz /= sizeof(*out_values);
+
+ count = sz;
+ while (count--)
+ *out_values++ = be16_to_cpup(val++);
+
+ return sz;
+}
+EXPORT_SYMBOL_GPL(of_property_read_variable_u16_array);
+
+/**
+ * of_property_read_variable_u32_array - Find and read an array of 32 bit
+ * integers from a property, with bounds on the minimum and maximum array size.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz_min: minimum number of array elements to read
+ * @sz_max: maximum number of array elements to read, if zero there is no
+ * upper limit on the number of elements in the dts entry but only
+ * sz_min will be read.
+ *
+ * Search for a property in a device node and read 32-bit value(s) from
+ * it. Returns number of elements read on success, -EINVAL if the property
+ * does not exist, -ENODATA if property does not have a value, and -EOVERFLOW
+ * if the property data is smaller than sz_min or longer than sz_max.
+ *
+ * The out_values is modified only if a valid u32 value can be decoded.
+ */
+int of_property_read_variable_u32_array(const struct device_node *np,
+ const char *propname, u32 *out_values,
+ size_t sz_min, size_t sz_max)
+{
+ size_t sz, count;
+ const __be32 *val = of_find_property_value_of_size(np, propname,
+ (sz_min * sizeof(*out_values)),
+ (sz_max * sizeof(*out_values)),
+ &sz);
+
+ if (IS_ERR(val))
+ return PTR_ERR(val);
+
+ if (!sz_max)
+ sz = sz_min;
+ else
+ sz /= sizeof(*out_values);
+
+ count = sz;
+ while (count--)
+ *out_values++ = be32_to_cpup(val++);
+
+ return sz;
+}
+EXPORT_SYMBOL_GPL(of_property_read_variable_u32_array);
+
+/**
+ * of_property_read_u64 - Find and read a 64 bit integer from a property
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_value: pointer to return value, modified only if return value is 0.
+ *
+ * Search for a property in a device node and read a 64-bit value from
+ * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_value is modified only if a valid u64 value can be decoded.
+ */
+int of_property_read_u64(const struct device_node *np, const char *propname,
+ u64 *out_value)
+{
+ const __be32 *val = of_find_property_value_of_size(np, propname,
+ sizeof(*out_value),
+ 0,
+ NULL);
+
+ if (IS_ERR(val))
+ return PTR_ERR(val);
+
+ *out_value = of_read_number(val, 2);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_property_read_u64);
+
+/**
+ * of_property_read_variable_u64_array - Find and read an array of 64 bit
+ * integers from a property, with bounds on the minimum and maximum array size.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz_min: minimum number of array elements to read
+ * @sz_max: maximum number of array elements to read, if zero there is no
+ * upper limit on the number of elements in the dts entry but only
+ * sz_min will be read.
+ *
+ * Search for a property in a device node and read 64-bit value(s) from
+ * it. Returns number of elements read on success, -EINVAL if the property
+ * does not exist, -ENODATA if property does not have a value, and -EOVERFLOW
+ * if the property data is smaller than sz_min or longer than sz_max.
+ *
+ * The out_values is modified only if a valid u64 value can be decoded.
+ */
+int of_property_read_variable_u64_array(const struct device_node *np,
+ const char *propname, u64 *out_values,
+ size_t sz_min, size_t sz_max)
+{
+ size_t sz, count;
+ const __be32 *val = of_find_property_value_of_size(np, propname,
+ (sz_min * sizeof(*out_values)),
+ (sz_max * sizeof(*out_values)),
+ &sz);
+
+ if (IS_ERR(val))
+ return PTR_ERR(val);
+
+ if (!sz_max)
+ sz = sz_min;
+ else
+ sz /= sizeof(*out_values);
+
+ count = sz;
+ while (count--) {
+ *out_values++ = of_read_number(val, 2);
+ val += 2;
+ }
+
+ return sz;
+}
+EXPORT_SYMBOL_GPL(of_property_read_variable_u64_array);
+
+/**
+ * of_property_read_string - Find and read a string from a property
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_string: pointer to null terminated return string, modified only if
+ * return value is 0.
+ *
+ * Search for a property in a device tree node and retrieve a null
+ * terminated string value (pointer to data, not a copy). Returns 0 on
+ * success, -EINVAL if the property does not exist, -ENODATA if property
+ * does not have a value, and -EILSEQ if the string is not null-terminated
+ * within the length of the property data.
+ *
+ * The out_string pointer is modified only if a valid string can be decoded.
+ */
+int of_property_read_string(const struct device_node *np, const char *propname,
+ const char **out_string)
+{
+ const struct property *prop = of_find_property(np, propname, NULL);
+ if (!prop)
+ return -EINVAL;
+ if (!prop->value)
+ return -ENODATA;
+ if (strnlen(prop->value, prop->length) >= prop->length)
+ return -EILSEQ;
+ *out_string = prop->value;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_property_read_string);
+
+/**
+ * of_property_match_string() - Find string in a list and return index
+ * @np: pointer to node containing string list property
+ * @propname: string list property name
+ * @string: pointer to string to search for in string list
+ *
+ * This function searches a string list property and returns the index
+ * of a specific string value.
+ */
+int of_property_match_string(const struct device_node *np, const char *propname,
+ const char *string)
+{
+ const struct property *prop = of_find_property(np, propname, NULL);
+ size_t l;
+ int i;
+ const char *p, *end;
+
+ if (!prop)
+ return -EINVAL;
+ if (!prop->value)
+ return -ENODATA;
+
+ p = prop->value;
+ end = p + prop->length;
+
+ for (i = 0; p < end; i++, p += l) {
+ l = strnlen(p, end - p) + 1;
+ if (p + l > end)
+ return -EILSEQ;
+ pr_debug("comparing %s with %s\n", string, p);
+ if (strcmp(string, p) == 0)
+ return i; /* Found it; return index */
+ }
+ return -ENODATA;
+}
+EXPORT_SYMBOL_GPL(of_property_match_string);
+
+/**
+ * of_property_read_string_helper() - Utility helper for parsing string properties
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_strs: output array of string pointers.
+ * @sz: number of array elements to read.
+ * @skip: Number of strings to skip over at beginning of list.
+ *
+ * Don't call this function directly. It is a utility helper for the
+ * of_property_read_string*() family of functions.
+ */
+int of_property_read_string_helper(const struct device_node *np,
+ const char *propname, const char **out_strs,
+ size_t sz, int skip)
+{
+ const struct property *prop = of_find_property(np, propname, NULL);
+ int l = 0, i = 0;
+ const char *p, *end;
+
+ if (!prop)
+ return -EINVAL;
+ if (!prop->value)
+ return -ENODATA;
+ p = prop->value;
+ end = p + prop->length;
+
+ for (i = 0; p < end && (!out_strs || i < skip + sz); i++, p += l) {
+ l = strnlen(p, end - p) + 1;
+ if (p + l > end)
+ return -EILSEQ;
+ if (out_strs && i >= skip)
+ *out_strs++ = p;
+ }
+ i -= skip;
+ return i <= 0 ? -ENODATA : i;
+}
+EXPORT_SYMBOL_GPL(of_property_read_string_helper);
+
+const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur,
+ u32 *pu)
+{
+ const void *curv = cur;
+
+ if (!prop)
+ return NULL;
+
+ if (!cur) {
+ curv = prop->value;
+ goto out_val;
+ }
+
+ curv += sizeof(*cur);
+ if (curv >= prop->value + prop->length)
+ return NULL;
+
+out_val:
+ *pu = be32_to_cpup(curv);
+ return curv;
+}
+EXPORT_SYMBOL_GPL(of_prop_next_u32);
+
+const char *of_prop_next_string(struct property *prop, const char *cur)
+{
+ const void *curv = cur;
+
+ if (!prop)
+ return NULL;
+
+ if (!cur)
+ return prop->value;
+
+ curv += strlen(cur) + 1;
+ if (curv >= prop->value + prop->length)
+ return NULL;
+
+ return curv;
+}
+EXPORT_SYMBOL_GPL(of_prop_next_string);
+
+/**
+ * of_graph_parse_endpoint() - parse common endpoint node properties
+ * @node: pointer to endpoint device_node
+ * @endpoint: pointer to the OF endpoint data structure
+ *
+ * The caller should hold a reference to @node.
+ */
+int of_graph_parse_endpoint(const struct device_node *node,
+ struct of_endpoint *endpoint)
+{
+ struct device_node *port_node = of_get_parent(node);
+
+ WARN_ONCE(!port_node, "%s(): endpoint %s has no parent node\n",
+ __func__, node->full_name);
+
+ memset(endpoint, 0, sizeof(*endpoint));
+
+ endpoint->local_node = node;
+ /*
+ * It doesn't matter whether the two calls below succeed.
+ * If they don't then the default value 0 is used.
+ */
+ of_property_read_u32(port_node, "reg", &endpoint->port);
+ of_property_read_u32(node, "reg", &endpoint->id);
+
+ of_node_put(port_node);
+
+ return 0;
+}
+EXPORT_SYMBOL(of_graph_parse_endpoint);
+
+/**
+ * of_graph_get_port_by_id() - get the port matching a given id
+ * @parent: pointer to the parent device node
+ * @id: id of the port
+ *
+ * Return: A 'port' node pointer with refcount incremented. The caller
+ * has to use of_node_put() on it when done.
+ */
+struct device_node *of_graph_get_port_by_id(struct device_node *parent, u32 id)
+{
+ struct device_node *node, *port;
+
+ node = of_get_child_by_name(parent, "ports");
+ if (node)
+ parent = node;
+
+ for_each_child_of_node(parent, port) {
+ u32 port_id = 0;
+
+ if (of_node_cmp(port->name, "port") != 0)
+ continue;
+ of_property_read_u32(port, "reg", &port_id);
+ if (id == port_id)
+ break;
+ }
+
+ of_node_put(node);
+
+ return port;
+}
+EXPORT_SYMBOL(of_graph_get_port_by_id);
+
+/**
+ * of_graph_get_next_endpoint() - get next endpoint node
+ * @parent: pointer to the parent device node
+ * @prev: previous endpoint node, or NULL to get first
+ *
+ * Return: An 'endpoint' node pointer with refcount incremented. Refcount
+ * of the passed @prev node is decremented.
+ */
+struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
+ struct device_node *prev)
+{
+ struct device_node *endpoint;
+ struct device_node *port;
+
+ if (!parent)
+ return NULL;
+
+ /*
+ * Start by locating the port node. If no previous endpoint is specified
+ * search for the first port node, otherwise get the previous endpoint
+ * parent port node.
+ */
+ if (!prev) {
+ struct device_node *node;
+
+ node = of_get_child_by_name(parent, "ports");
+ if (node)
+ parent = node;
+
+ port = of_get_child_by_name(parent, "port");
+ of_node_put(node);
+
+ if (!port) {
+ pr_err("graph: no port node found in %s\n",
+ parent->full_name);
+ return NULL;
+ }
+ } else {
+ port = of_get_parent(prev);
+ if (WARN_ONCE(!port, "%s(): endpoint %s has no parent node\n",
+ __func__, prev->full_name))
+ return NULL;
+ }
+
+ while (1) {
+ /*
+ * Now that we have a port node, get the next endpoint by
+ * getting the next child. If the previous endpoint is NULL this
+ * will return the first child.
+ */
+ endpoint = of_get_next_child(port, prev);
+ if (endpoint) {
+ of_node_put(port);
+ return endpoint;
+ }
+
+ /* No more endpoints under this port, try the next one. */
+ prev = NULL;
+
+ do {
+ port = of_get_next_child(parent, port);
+ if (!port)
+ return NULL;
+ } while (of_node_cmp(port->name, "port"));
+ }
+}
+EXPORT_SYMBOL(of_graph_get_next_endpoint);
+
+/**
+ * of_graph_get_endpoint_by_regs() - get endpoint node of specific identifiers
+ * @parent: pointer to the parent device node
+ * @port_reg: identifier (value of reg property) of the parent port node
+ * @reg: identifier (value of reg property) of the endpoint node
+ *
+ * Return: An 'endpoint' node pointer which is identified by reg and at the same
+ * is the child of a port node identified by port_reg. reg and port_reg are
+ * ignored when they are -1.
+ */
+struct device_node *of_graph_get_endpoint_by_regs(
+ const struct device_node *parent, int port_reg, int reg)
+{
+ struct of_endpoint endpoint;
+ struct device_node *node = NULL;
+
+ for_each_endpoint_of_node(parent, node) {
+ of_graph_parse_endpoint(node, &endpoint);
+ if (((port_reg == -1) || (endpoint.port == port_reg)) &&
+ ((reg == -1) || (endpoint.id == reg)))
+ return node;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(of_graph_get_endpoint_by_regs);
+
+/**
+ * of_graph_get_remote_endpoint() - get remote endpoint node
+ * @node: pointer to a local endpoint device_node
+ *
+ * Return: Remote endpoint node associated with remote endpoint node linked
+ * to @node. Use of_node_put() on it when done.
+ */
+struct device_node *of_graph_get_remote_endpoint(const struct device_node *node)
+{
+ /* Get remote endpoint node. */
+ return of_parse_phandle(node, "remote-endpoint", 0);
+}
+EXPORT_SYMBOL(of_graph_get_remote_endpoint);
+
+/**
+ * of_graph_get_port_parent() - get port's parent node
+ * @node: pointer to a local endpoint device_node
+ *
+ * Return: device node associated with endpoint node linked
+ * to @node. Use of_node_put() on it when done.
+ */
+struct device_node *of_graph_get_port_parent(struct device_node *node)
+{
+ unsigned int depth;
+
+ /* Walk 3 levels up only if there is 'ports' node. */
+ for (depth = 3; depth && node; depth--) {
+ node = of_get_next_parent(node);
+ if (depth == 2 && of_node_cmp(node->name, "ports"))
+ break;
+ }
+ return node;
+}
+EXPORT_SYMBOL(of_graph_get_port_parent);
+
+/**
+ * of_graph_get_remote_port_parent() - get remote port's parent node
+ * @node: pointer to a local endpoint device_node
+ *
+ * Return: Remote device node associated with remote endpoint node linked
+ * to @node. Use of_node_put() on it when done.
+ */
+struct device_node *of_graph_get_remote_port_parent(
+ const struct device_node *node)
+{
+ struct device_node *np;
+
+ /* Get remote endpoint node. */
+ np = of_graph_get_remote_endpoint(node);
+
+ return of_graph_get_port_parent(np);
+}
+EXPORT_SYMBOL(of_graph_get_remote_port_parent);
+
+/**
+ * of_graph_get_remote_port() - get remote port node
+ * @node: pointer to a local endpoint device_node
+ *
+ * Return: Remote port node associated with remote endpoint node linked
+ * to @node. Use of_node_put() on it when done.
+ */
+struct device_node *of_graph_get_remote_port(const struct device_node *node)
+{
+ struct device_node *np;
+
+ /* Get remote endpoint node. */
+ np = of_graph_get_remote_endpoint(node);
+ if (!np)
+ return NULL;
+ return of_get_next_parent(np);
+}
+EXPORT_SYMBOL(of_graph_get_remote_port);
+
+int of_graph_get_endpoint_count(const struct device_node *np)
+{
+ struct device_node *endpoint;
+ int num = 0;
+
+ for_each_endpoint_of_node(np, endpoint)
+ num++;
+
+ return num;
+}
+EXPORT_SYMBOL(of_graph_get_endpoint_count);
+
+/**
+ * of_graph_get_remote_node() - get remote parent device_node for given port/endpoint
+ * @node: pointer to parent device_node containing graph port/endpoint
+ * @port: identifier (value of reg property) of the parent port node
+ * @endpoint: identifier (value of reg property) of the endpoint node
+ *
+ * Return: Remote device node associated with remote endpoint node linked
+ * to @node. Use of_node_put() on it when done.
+ */
+struct device_node *of_graph_get_remote_node(const struct device_node *node,
+ u32 port, u32 endpoint)
+{
+ struct device_node *endpoint_node, *remote;
+
+ endpoint_node = of_graph_get_endpoint_by_regs(node, port, endpoint);
+ if (!endpoint_node) {
+ pr_debug("no valid endpoint (%d, %d) for node %s\n",
+ port, endpoint, node->full_name);
+ return NULL;
+ }
+
+ remote = of_graph_get_remote_port_parent(endpoint_node);
+ of_node_put(endpoint_node);
+ if (!remote) {
+ pr_debug("no valid remote node\n");
+ return NULL;
+ }
+
+ if (!of_device_is_available(remote)) {
+ pr_debug("not available for remote node\n");
+ return NULL;
+ }
+
+ return remote;
+}
+EXPORT_SYMBOL(of_graph_get_remote_node);
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
index 771f4844c781..99309cb7d372 100644
--- a/drivers/of/resolver.c
+++ b/drivers/of/resolver.c
@@ -20,35 +20,11 @@
#include <linux/errno.h>
#include <linux/slab.h>
+#include "of_private.h"
+
/* illegal phandle value (set when unresolved) */
#define OF_PHANDLE_ILLEGAL 0xdeadbeef
-/**
- * Find a node with the give full name by recursively following any of
- * the child node links.
- */
-static struct device_node *find_node_by_full_name(struct device_node *node,
- const char *full_name)
-{
- struct device_node *child, *found;
-
- if (!node)
- return NULL;
-
- if (!of_node_cmp(node->full_name, full_name))
- return of_node_get(node);
-
- for_each_child_of_node(node, child) {
- found = find_node_by_full_name(child, full_name);
- if (found != NULL) {
- of_node_put(child);
- return found;
- }
- }
-
- return NULL;
-}
-
static phandle live_tree_max_phandle(void)
{
struct device_node *node;
@@ -138,7 +114,7 @@ static int update_usages_of_a_phandle_reference(struct device_node *overlay,
if (err)
goto err_fail;
- refnode = find_node_by_full_name(overlay, node_path);
+ refnode = __of_find_node_by_full_path(of_node_get(overlay), node_path);
if (!refnode)
continue;
@@ -165,8 +141,8 @@ err_fail:
static int node_name_cmp(const struct device_node *dn1,
const struct device_node *dn2)
{
- const char *n1 = strrchr(dn1->full_name, '/') ? : "/";
- const char *n2 = strrchr(dn2->full_name, '/') ? : "/";
+ const char *n1 = kbasename(dn1->full_name);
+ const char *n2 = kbasename(dn2->full_name);
return of_node_cmp(n1, n2);
}
diff --git a/drivers/of/unittest-data/tests-platform.dtsi b/drivers/of/unittest-data/tests-platform.dtsi
index eb20eeb2b062..a0c93822aee3 100644
--- a/drivers/of/unittest-data/tests-platform.dtsi
+++ b/drivers/of/unittest-data/tests-platform.dtsi
@@ -26,7 +26,9 @@
#size-cells = <0>;
dev@100 {
- compatible = "test-sub-device";
+ compatible = "test-sub-device",
+ "test-compat2",
+ "test-compat3";
reg = <0x100>;
};
};
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 987a1530282a..0107fc680335 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -239,6 +239,63 @@ static void __init of_unittest_check_tree_linkage(void)
pr_debug("allnodes list size (%i); sibling lists size (%i)\n", allnode_count, child_count);
}
+static void __init of_unittest_printf_one(struct device_node *np, const char *fmt,
+ const char *expected)
+{
+ unsigned char buf[strlen(expected)+10];
+ int size, i;
+
+ /* Baseline; check conversion with a large size limit */
+ memset(buf, 0xff, sizeof(buf));
+ size = snprintf(buf, sizeof(buf) - 2, fmt, np);
+
+ /* use strcmp() instead of strncmp() here to be absolutely sure strings match */
+ unittest((strcmp(buf, expected) == 0) && (buf[size+1] == 0xff),
+ "sprintf failed; fmt='%s' expected='%s' rslt='%s'\n",
+ fmt, expected, buf);
+
+ /* Make sure length limits work */
+ size++;
+ for (i = 0; i < 2; i++, size--) {
+ /* Clear the buffer, and make sure it works correctly still */
+ memset(buf, 0xff, sizeof(buf));
+ snprintf(buf, size+1, fmt, np);
+ unittest(strncmp(buf, expected, size) == 0 && (buf[size+1] == 0xff),
+ "snprintf failed; size=%i fmt='%s' expected='%s' rslt='%s'\n",
+ size, fmt, expected, buf);
+ }
+}
+
+static void __init of_unittest_printf(void)
+{
+ struct device_node *np;
+ const char *full_name = "/testcase-data/platform-tests/test-device@1/dev@100";
+ char phandle_str[16] = "";
+
+ np = of_find_node_by_path(full_name);
+ if (!np) {
+ unittest(np, "testcase data missing\n");
+ return;
+ }
+
+ num_to_str(phandle_str, sizeof(phandle_str), np->phandle);
+
+ of_unittest_printf_one(np, "%pOF", full_name);
+ of_unittest_printf_one(np, "%pOFf", full_name);
+ of_unittest_printf_one(np, "%pOFp", phandle_str);
+ of_unittest_printf_one(np, "%pOFP", "dev@100");
+ of_unittest_printf_one(np, "ABC %pOFP ABC", "ABC dev@100 ABC");
+ of_unittest_printf_one(np, "%10pOFP", " dev@100");
+ of_unittest_printf_one(np, "%-10pOFP", "dev@100 ");
+ of_unittest_printf_one(of_root, "%pOFP", "/");
+ of_unittest_printf_one(np, "%pOFF", "----");
+ of_unittest_printf_one(np, "%pOFPF", "dev@100:----");
+ of_unittest_printf_one(np, "%pOFPFPc", "dev@100:----:dev@100:test-sub-device");
+ of_unittest_printf_one(np, "%pOFc", "test-sub-device");
+ of_unittest_printf_one(np, "%pOFC",
+ "\"test-sub-device\",\"test-compat2\",\"test-compat3\"");
+}
+
struct node_hash {
struct hlist_node node;
struct device_node *np;
@@ -2269,6 +2326,7 @@ static int __init of_unittest(void)
of_unittest_find_node_by_name();
of_unittest_dynamic();
of_unittest_parse_phandle_with_args();
+ of_unittest_printf();
of_unittest_property_string();
of_unittest_property_copy();
of_unittest_changeset();
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 462c1f5f5546..66a21acad952 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -4,7 +4,8 @@
obj-y += access.o bus.o probe.o host-bridge.o remove.o pci.o \
pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
- irq.o vpd.o setup-bus.o vc.o mmap.o
+ irq.o vpd.o setup-bus.o vc.o mmap.o setup-irq.o
+
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSFS) += slot.o
@@ -29,20 +30,6 @@ obj-$(CONFIG_PCI_ATS) += ats.o
obj-$(CONFIG_PCI_IOV) += iov.o
#
-# Some architectures use the generic PCI setup functions
-#
-obj-$(CONFIG_ALPHA) += setup-irq.o
-obj-$(CONFIG_ARC) += setup-irq.o
-obj-$(CONFIG_ARM) += setup-irq.o
-obj-$(CONFIG_ARM64) += setup-irq.o
-obj-$(CONFIG_UNICORE32) += setup-irq.o
-obj-$(CONFIG_SUPERH) += setup-irq.o
-obj-$(CONFIG_MIPS) += setup-irq.o
-obj-$(CONFIG_TILE) += setup-irq.o
-obj-$(CONFIG_SPARC_LEON) += setup-irq.o
-obj-$(CONFIG_M68K) += setup-irq.o
-
-#
# ACPI Related PCI FW Functions
# ACPI _DSM provided firmware instance and string name
#
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index eeb9fb2b47aa..ad8ddbbbf245 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -153,23 +153,27 @@ int pci_enable_pri(struct pci_dev *pdev, u32 reqs)
u32 max_requests;
int pos;
+ if (WARN_ON(pdev->pri_enabled))
+ return -EBUSY;
+
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
- if ((control & PCI_PRI_CTRL_ENABLE) ||
- !(status & PCI_PRI_STATUS_STOPPED))
+ if (!(status & PCI_PRI_STATUS_STOPPED))
return -EBUSY;
pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ, &max_requests);
reqs = min(max_requests, reqs);
+ pdev->pri_reqs_alloc = reqs;
pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs);
- control |= PCI_PRI_CTRL_ENABLE;
+ control = PCI_PRI_CTRL_ENABLE;
pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+ pdev->pri_enabled = 1;
+
return 0;
}
EXPORT_SYMBOL_GPL(pci_enable_pri);
@@ -185,6 +189,9 @@ void pci_disable_pri(struct pci_dev *pdev)
u16 control;
int pos;
+ if (WARN_ON(!pdev->pri_enabled))
+ return;
+
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return;
@@ -192,10 +199,34 @@ void pci_disable_pri(struct pci_dev *pdev)
pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
control &= ~PCI_PRI_CTRL_ENABLE;
pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+
+ pdev->pri_enabled = 0;
}
EXPORT_SYMBOL_GPL(pci_disable_pri);
/**
+ * pci_restore_pri_state - Restore PRI
+ * @pdev: PCI device structure
+ */
+void pci_restore_pri_state(struct pci_dev *pdev)
+{
+ u16 control = PCI_PRI_CTRL_ENABLE;
+ u32 reqs = pdev->pri_reqs_alloc;
+ int pos;
+
+ if (!pdev->pri_enabled)
+ return;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+ if (!pos)
+ return;
+
+ pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs);
+ pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+}
+EXPORT_SYMBOL_GPL(pci_restore_pri_state);
+
+/**
* pci_reset_pri - Resets device's PRI state
* @pdev: PCI device structure
*
@@ -207,16 +238,14 @@ int pci_reset_pri(struct pci_dev *pdev)
u16 control;
int pos;
+ if (WARN_ON(pdev->pri_enabled))
+ return -EBUSY;
+
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
- if (control & PCI_PRI_CTRL_ENABLE)
- return -EBUSY;
-
- control |= PCI_PRI_CTRL_RESET;
-
+ control = PCI_PRI_CTRL_RESET;
pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
return 0;
@@ -239,16 +268,14 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
u16 control, supported;
int pos;
+ if (WARN_ON(pdev->pasid_enabled))
+ return -EBUSY;
+
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
if (!pos)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PASID_CTRL, &control);
pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
-
- if (control & PCI_PASID_CTRL_ENABLE)
- return -EINVAL;
-
supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV;
/* User wants to enable anything unsupported? */
@@ -256,9 +283,12 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
return -EINVAL;
control = PCI_PASID_CTRL_ENABLE | features;
+ pdev->pasid_features = features;
pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
+ pdev->pasid_enabled = 1;
+
return 0;
}
EXPORT_SYMBOL_GPL(pci_enable_pasid);
@@ -266,22 +296,47 @@ EXPORT_SYMBOL_GPL(pci_enable_pasid);
/**
* pci_disable_pasid - Disable the PASID capability
* @pdev: PCI device structure
- *
*/
void pci_disable_pasid(struct pci_dev *pdev)
{
u16 control = 0;
int pos;
+ if (WARN_ON(!pdev->pasid_enabled))
+ return;
+
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
if (!pos)
return;
pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
+
+ pdev->pasid_enabled = 0;
}
EXPORT_SYMBOL_GPL(pci_disable_pasid);
/**
+ * pci_restore_pasid_state - Restore PASID capabilities
+ * @pdev: PCI device structure
+ */
+void pci_restore_pasid_state(struct pci_dev *pdev)
+{
+ u16 control;
+ int pos;
+
+ if (!pdev->pasid_enabled)
+ return;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
+ if (!pos)
+ return;
+
+ control = PCI_PASID_CTRL_ENABLE | pdev->pasid_features;
+ pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
+}
+EXPORT_SYMBOL_GPL(pci_restore_pasid_state);
+
+/**
* pci_pasid_features - Check which PASID features are supported
* @pdev: PCI device structure
*
diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig
index b7e15526d676..d275aadc47ee 100644
--- a/drivers/pci/dwc/Kconfig
+++ b/drivers/pci/dwc/Kconfig
@@ -16,6 +16,7 @@ config PCIE_DW_EP
config PCI_DRA7XX
bool "TI DRA7xx PCIe controller"
+ depends on SOC_DRA7XX || COMPILE_TEST
depends on (PCI && PCI_MSI_IRQ_DOMAIN) || PCI_ENDPOINT
depends on OF && HAS_IOMEM && TI_PIPE3
help
@@ -158,4 +159,14 @@ config PCIE_ARTPEC6
Say Y here to enable PCIe controller support on Axis ARTPEC-6
SoCs. This PCIe controller uses the DesignWare core.
+config PCIE_KIRIN
+ depends on OF && ARM64
+ bool "HiSilicon Kirin series SoCs PCIe controllers"
+ depends on PCI
+ select PCIEPORTBUS
+ select PCIE_DW_HOST
+ help
+ Say Y here if you want PCIe controller support
+ on HiSilicon Kirin series SoCs.
+
endmenu
diff --git a/drivers/pci/dwc/Makefile b/drivers/pci/dwc/Makefile
index f31a8596442a..c61be9738cce 100644
--- a/drivers/pci/dwc/Makefile
+++ b/drivers/pci/dwc/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
+obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o
# The following drivers are for devices that use the generic ACPI
# pci_root.c driver but don't support standard ECAM config access.
diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c
index 8decf46cf525..f2fc5f47064e 100644
--- a/drivers/pci/dwc/pci-dra7xx.c
+++ b/drivers/pci/dwc/pci-dra7xx.c
@@ -174,7 +174,7 @@ static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
{
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
- ~LEG_EP_INTERRUPTS & ~MSI);
+ LEG_EP_INTERRUPTS | MSI);
dra7xx_pcie_writel(dra7xx,
PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
@@ -184,7 +184,7 @@ static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx)
{
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
- ~INTERRUPTS);
+ INTERRUPTS);
dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN,
INTERRUPTS);
}
@@ -208,7 +208,7 @@ static void dra7xx_pcie_host_init(struct pcie_port *pp)
dra7xx_pcie_enable_interrupts(dra7xx);
}
-static struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
+static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
.host_init = dra7xx_pcie_host_init,
};
diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c
index 546082ad5a3f..c78c06552590 100644
--- a/drivers/pci/dwc/pci-exynos.c
+++ b/drivers/pci/dwc/pci-exynos.c
@@ -590,7 +590,7 @@ static void exynos_pcie_host_init(struct pcie_port *pp)
exynos_pcie_enable_interrupts(ep);
}
-static struct dw_pcie_host_ops exynos_pcie_host_ops = {
+static const struct dw_pcie_host_ops exynos_pcie_host_ops = {
.rd_own_conf = exynos_pcie_rd_own_conf,
.wr_own_conf = exynos_pcie_wr_own_conf,
.host_init = exynos_pcie_host_init,
diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c
index 19a289b8cc94..bf5c3616e344 100644
--- a/drivers/pci/dwc/pci-imx6.c
+++ b/drivers/pci/dwc/pci-imx6.c
@@ -24,6 +24,7 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/resource.h>
#include <linux/signal.h>
#include <linux/types.h>
@@ -59,6 +60,7 @@ struct imx6_pcie {
u32 tx_swing_full;
u32 tx_swing_low;
int link_gen;
+ struct regulator *vpcie;
};
/* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
@@ -284,6 +286,8 @@ static int imx6q_pcie_abort_handler(unsigned long addr,
static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
{
+ struct device *dev = imx6_pcie->pci->dev;
+
switch (imx6_pcie->variant) {
case IMX7D:
reset_control_assert(imx6_pcie->pciephy_reset);
@@ -310,6 +314,14 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
break;
}
+
+ if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
+ int ret = regulator_disable(imx6_pcie->vpcie);
+
+ if (ret)
+ dev_err(dev, "failed to disable vpcie regulator: %d\n",
+ ret);
+ }
}
static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
@@ -376,10 +388,19 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
struct device *dev = pci->dev;
int ret;
+ if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
+ ret = regulator_enable(imx6_pcie->vpcie);
+ if (ret) {
+ dev_err(dev, "failed to enable vpcie regulator: %d\n",
+ ret);
+ return;
+ }
+ }
+
ret = clk_prepare_enable(imx6_pcie->pcie_phy);
if (ret) {
dev_err(dev, "unable to enable pcie_phy clock\n");
- return;
+ goto err_pcie_phy;
}
ret = clk_prepare_enable(imx6_pcie->pcie_bus);
@@ -439,6 +460,13 @@ err_pcie:
clk_disable_unprepare(imx6_pcie->pcie_bus);
err_pcie_bus:
clk_disable_unprepare(imx6_pcie->pcie_phy);
+err_pcie_phy:
+ if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
+ ret = regulator_disable(imx6_pcie->vpcie);
+ if (ret)
+ dev_err(dev, "failed to disable vpcie regulator: %d\n",
+ ret);
+ }
}
static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
@@ -629,7 +657,7 @@ static int imx6_pcie_link_up(struct dw_pcie *pci)
PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
}
-static struct dw_pcie_host_ops imx6_pcie_host_ops = {
+static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
.host_init = imx6_pcie_host_init,
};
@@ -802,6 +830,13 @@ static int imx6_pcie_probe(struct platform_device *pdev)
if (ret)
imx6_pcie->link_gen = 1;
+ imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
+ if (IS_ERR(imx6_pcie->vpcie)) {
+ if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ imx6_pcie->vpcie = NULL;
+ }
+
platform_set_drvdata(pdev, imx6_pcie);
ret = imx6_add_pcie_port(imx6_pcie, pdev);
diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c
index fcc9723bad6e..4783cec1f78d 100644
--- a/drivers/pci/dwc/pci-keystone.c
+++ b/drivers/pci/dwc/pci-keystone.c
@@ -291,7 +291,7 @@ static void __init ks_pcie_host_init(struct pcie_port *pp)
"Asynchronous external abort");
}
-static struct dw_pcie_host_ops keystone_pcie_host_ops = {
+static const struct dw_pcie_host_ops keystone_pcie_host_ops = {
.rd_other_conf = ks_dw_pcie_rd_other_conf,
.wr_other_conf = ks_dw_pcie_wr_other_conf,
.host_init = ks_pcie_host_init,
diff --git a/drivers/pci/dwc/pci-layerscape.c b/drivers/pci/dwc/pci-layerscape.c
index 27d638c4e134..fd861289ad8b 100644
--- a/drivers/pci/dwc/pci-layerscape.c
+++ b/drivers/pci/dwc/pci-layerscape.c
@@ -39,7 +39,7 @@ struct ls_pcie_drvdata {
u32 lut_offset;
u32 ltssm_shift;
u32 lut_dbg;
- struct dw_pcie_host_ops *ops;
+ const struct dw_pcie_host_ops *ops;
const struct dw_pcie_ops *dw_pcie_ops;
};
@@ -185,12 +185,12 @@ static int ls_pcie_msi_host_init(struct pcie_port *pp,
return 0;
}
-static struct dw_pcie_host_ops ls1021_pcie_host_ops = {
+static const struct dw_pcie_host_ops ls1021_pcie_host_ops = {
.host_init = ls1021_pcie_host_init,
.msi_host_init = ls_pcie_msi_host_init,
};
-static struct dw_pcie_host_ops ls_pcie_host_ops = {
+static const struct dw_pcie_host_ops ls_pcie_host_ops = {
.host_init = ls_pcie_host_init,
.msi_host_init = ls_pcie_msi_host_init,
};
diff --git a/drivers/pci/dwc/pcie-armada8k.c b/drivers/pci/dwc/pcie-armada8k.c
index 495b023042b3..ea8f34af6a85 100644
--- a/drivers/pci/dwc/pcie-armada8k.c
+++ b/drivers/pci/dwc/pcie-armada8k.c
@@ -160,7 +160,7 @@ static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static struct dw_pcie_host_ops armada8k_pcie_host_ops = {
+static const struct dw_pcie_host_ops armada8k_pcie_host_ops = {
.host_init = armada8k_pcie_host_init,
};
diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c
index 82a04acc42fd..01c6f7823672 100644
--- a/drivers/pci/dwc/pcie-artpec6.c
+++ b/drivers/pci/dwc/pcie-artpec6.c
@@ -184,7 +184,7 @@ static void artpec6_pcie_host_init(struct pcie_port *pp)
artpec6_pcie_enable_interrupts(artpec6_pcie);
}
-static struct dw_pcie_host_ops artpec6_pcie_host_ops = {
+static const struct dw_pcie_host_ops artpec6_pcie_host_ops = {
.host_init = artpec6_pcie_host_init,
};
diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c
index 28ed32ba4f1b..d29c020da082 100644
--- a/drivers/pci/dwc/pcie-designware-host.c
+++ b/drivers/pci/dwc/pcie-designware-host.c
@@ -280,9 +280,9 @@ int dw_pcie_host_init(struct pcie_port *pp)
struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
struct pci_bus *bus, *child;
+ struct pci_host_bridge *bridge;
struct resource *cfg_res;
int i, ret;
- LIST_HEAD(res);
struct resource_entry *win, *tmp;
cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
@@ -295,16 +295,21 @@ int dw_pcie_host_init(struct pcie_port *pp)
dev_err(dev, "missing *config* reg space\n");
}
- ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base);
+ bridge = pci_alloc_host_bridge(0);
+ if (!bridge)
+ return -ENOMEM;
+
+ ret = of_pci_get_host_bridge_resources(np, 0, 0xff,
+ &bridge->windows, &pp->io_base);
if (ret)
return ret;
- ret = devm_request_pci_bus_resources(dev, &res);
+ ret = devm_request_pci_bus_resources(dev, &bridge->windows);
if (ret)
goto error;
/* Get the I/O and memory ranges from DT */
- resource_list_for_each_entry_safe(win, tmp, &res) {
+ resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
switch (resource_type(win->res)) {
case IORESOURCE_IO:
ret = pci_remap_iospace(win->res, pp->io_base);
@@ -400,27 +405,27 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp->ops->host_init(pp);
pp->root_bus_nr = pp->busn->start;
+
+ bridge->dev.parent = dev;
+ bridge->sysdata = pp;
+ bridge->busnr = pp->root_bus_nr;
+ bridge->ops = &dw_pcie_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
- bus = pci_scan_root_bus_msi(dev, pp->root_bus_nr,
- &dw_pcie_ops, pp, &res,
- &dw_pcie_msi_chip);
+ bridge->msi = &dw_pcie_msi_chip;
dw_pcie_msi_chip.dev = dev;
- } else
- bus = pci_scan_root_bus(dev, pp->root_bus_nr, &dw_pcie_ops,
- pp, &res);
- if (!bus) {
- ret = -ENOMEM;
- goto error;
}
+ ret = pci_scan_root_bus_bridge(bridge);
+ if (ret)
+ goto error;
+
+ bus = bridge->bus;
+
if (pp->ops->scan_bus)
pp->ops->scan_bus(pp);
-#ifdef CONFIG_ARM
- /* support old dtbs that incorrectly describe IRQs */
- pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
-#endif
-
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
@@ -431,7 +436,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
return 0;
error:
- pci_free_resource_list(&res);
+ pci_free_host_bridge(bridge);
return ret;
}
diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c
index 32091b32f6e1..091b4e7ad059 100644
--- a/drivers/pci/dwc/pcie-designware-plat.c
+++ b/drivers/pci/dwc/pcie-designware-plat.c
@@ -46,7 +46,7 @@ static void dw_plat_pcie_host_init(struct pcie_port *pp)
dw_pcie_msi_init(pp);
}
-static struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
+static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = {
.host_init = dw_plat_pcie_host_init,
};
@@ -67,7 +67,8 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
ret = devm_request_irq(dev, pp->msi_irq,
dw_plat_pcie_msi_irq_handler,
- IRQF_SHARED, "dw-plat-pcie-msi", pp);
+ IRQF_SHARED | IRQF_NO_THREAD,
+ "dw-plat-pcie-msi", pp);
if (ret) {
dev_err(dev, "failed to request MSI IRQ\n");
return ret;
diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h
index c6a840575796..b4d2a89f8e58 100644
--- a/drivers/pci/dwc/pcie-designware.h
+++ b/drivers/pci/dwc/pcie-designware.h
@@ -162,7 +162,7 @@ struct pcie_port {
struct resource *mem;
struct resource *busn;
int irq;
- struct dw_pcie_host_ops *ops;
+ const struct dw_pcie_host_ops *ops;
int msi_irq;
struct irq_domain *irq_domain;
unsigned long msi_data;
diff --git a/drivers/pci/dwc/pcie-kirin.c b/drivers/pci/dwc/pcie-kirin.c
new file mode 100644
index 000000000000..33fddb9f6739
--- /dev/null
+++ b/drivers/pci/dwc/pcie-kirin.c
@@ -0,0 +1,517 @@
+/*
+ * PCIe host controller driver for Kirin Phone SoCs
+ *
+ * Copyright (C) 2017 Hilisicon Electronics Co., Ltd.
+ * http://www.huawei.com
+ *
+ * Author: Xiaowei Song <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/compiler.h>
+#include <linux/compiler.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+#include "pcie-designware.h"
+
+#define to_kirin_pcie(x) dev_get_drvdata((x)->dev)
+
+#define REF_CLK_FREQ 100000000
+
+/* PCIe ELBI registers */
+#define SOC_PCIECTRL_CTRL0_ADDR 0x000
+#define SOC_PCIECTRL_CTRL1_ADDR 0x004
+#define SOC_PCIEPHY_CTRL2_ADDR 0x008
+#define SOC_PCIEPHY_CTRL3_ADDR 0x00c
+#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21)
+
+/* info located in APB */
+#define PCIE_APP_LTSSM_ENABLE 0x01c
+#define PCIE_APB_PHY_CTRL0 0x0
+#define PCIE_APB_PHY_CTRL1 0x4
+#define PCIE_APB_PHY_STATUS0 0x400
+#define PCIE_LINKUP_ENABLE (0x8020)
+#define PCIE_LTSSM_ENABLE_BIT (0x1 << 11)
+#define PIPE_CLK_STABLE (0x1 << 19)
+#define PHY_REF_PAD_BIT (0x1 << 8)
+#define PHY_PWR_DOWN_BIT (0x1 << 22)
+#define PHY_RST_ACK_BIT (0x1 << 16)
+
+/* info located in sysctrl */
+#define SCTRL_PCIE_CMOS_OFFSET 0x60
+#define SCTRL_PCIE_CMOS_BIT 0x10
+#define SCTRL_PCIE_ISO_OFFSET 0x44
+#define SCTRL_PCIE_ISO_BIT 0x30
+#define SCTRL_PCIE_HPCLK_OFFSET 0x190
+#define SCTRL_PCIE_HPCLK_BIT 0x184000
+#define SCTRL_PCIE_OE_OFFSET 0x14a
+#define PCIE_DEBOUNCE_PARAM 0xF0F400
+#define PCIE_OE_BYPASS (0x3 << 28)
+
+/* peri_crg ctrl */
+#define CRGCTRL_PCIE_ASSERT_OFFSET 0x88
+#define CRGCTRL_PCIE_ASSERT_BIT 0x8c000000
+
+/* Time for delay */
+#define REF_2_PERST_MIN 20000
+#define REF_2_PERST_MAX 25000
+#define PERST_2_ACCESS_MIN 10000
+#define PERST_2_ACCESS_MAX 12000
+#define LINK_WAIT_MIN 900
+#define LINK_WAIT_MAX 1000
+#define PIPE_CLK_WAIT_MIN 550
+#define PIPE_CLK_WAIT_MAX 600
+#define TIME_CMOS_MIN 100
+#define TIME_CMOS_MAX 105
+#define TIME_PHY_PD_MIN 10
+#define TIME_PHY_PD_MAX 11
+
+struct kirin_pcie {
+ struct dw_pcie *pci;
+ void __iomem *apb_base;
+ void __iomem *phy_base;
+ struct regmap *crgctrl;
+ struct regmap *sysctrl;
+ struct clk *apb_sys_clk;
+ struct clk *apb_phy_clk;
+ struct clk *phy_ref_clk;
+ struct clk *pcie_aclk;
+ struct clk *pcie_aux_clk;
+ int gpio_id_reset;
+};
+
+/* Registers in PCIeCTRL */
+static inline void kirin_apb_ctrl_writel(struct kirin_pcie *kirin_pcie,
+ u32 val, u32 reg)
+{
+ writel(val, kirin_pcie->apb_base + reg);
+}
+
+static inline u32 kirin_apb_ctrl_readl(struct kirin_pcie *kirin_pcie, u32 reg)
+{
+ return readl(kirin_pcie->apb_base + reg);
+}
+
+/* Registers in PCIePHY */
+static inline void kirin_apb_phy_writel(struct kirin_pcie *kirin_pcie,
+ u32 val, u32 reg)
+{
+ writel(val, kirin_pcie->phy_base + reg);
+}
+
+static inline u32 kirin_apb_phy_readl(struct kirin_pcie *kirin_pcie, u32 reg)
+{
+ return readl(kirin_pcie->phy_base + reg);
+}
+
+static long kirin_pcie_get_clk(struct kirin_pcie *kirin_pcie,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ kirin_pcie->phy_ref_clk = devm_clk_get(dev, "pcie_phy_ref");
+ if (IS_ERR(kirin_pcie->phy_ref_clk))
+ return PTR_ERR(kirin_pcie->phy_ref_clk);
+
+ kirin_pcie->pcie_aux_clk = devm_clk_get(dev, "pcie_aux");
+ if (IS_ERR(kirin_pcie->pcie_aux_clk))
+ return PTR_ERR(kirin_pcie->pcie_aux_clk);
+
+ kirin_pcie->apb_phy_clk = devm_clk_get(dev, "pcie_apb_phy");
+ if (IS_ERR(kirin_pcie->apb_phy_clk))
+ return PTR_ERR(kirin_pcie->apb_phy_clk);
+
+ kirin_pcie->apb_sys_clk = devm_clk_get(dev, "pcie_apb_sys");
+ if (IS_ERR(kirin_pcie->apb_sys_clk))
+ return PTR_ERR(kirin_pcie->apb_sys_clk);
+
+ kirin_pcie->pcie_aclk = devm_clk_get(dev, "pcie_aclk");
+ if (IS_ERR(kirin_pcie->pcie_aclk))
+ return PTR_ERR(kirin_pcie->pcie_aclk);
+
+ return 0;
+}
+
+static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *apb;
+ struct resource *phy;
+ struct resource *dbi;
+
+ apb = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb");
+ kirin_pcie->apb_base = devm_ioremap_resource(dev, apb);
+ if (IS_ERR(kirin_pcie->apb_base))
+ return PTR_ERR(kirin_pcie->apb_base);
+
+ phy = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
+ kirin_pcie->phy_base = devm_ioremap_resource(dev, phy);
+ if (IS_ERR(kirin_pcie->phy_base))
+ return PTR_ERR(kirin_pcie->phy_base);
+
+ dbi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ kirin_pcie->pci->dbi_base = devm_ioremap_resource(dev, dbi);
+ if (IS_ERR(kirin_pcie->pci->dbi_base))
+ return PTR_ERR(kirin_pcie->pci->dbi_base);
+
+ kirin_pcie->crgctrl =
+ syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl");
+ if (IS_ERR(kirin_pcie->crgctrl))
+ return PTR_ERR(kirin_pcie->crgctrl);
+
+ kirin_pcie->sysctrl =
+ syscon_regmap_lookup_by_compatible("hisilicon,hi3660-sctrl");
+ if (IS_ERR(kirin_pcie->sysctrl))
+ return PTR_ERR(kirin_pcie->sysctrl);
+
+ return 0;
+}
+
+static int kirin_pcie_phy_init(struct kirin_pcie *kirin_pcie)
+{
+ struct device *dev = kirin_pcie->pci->dev;
+ u32 reg_val;
+
+ reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1);
+ reg_val &= ~PHY_REF_PAD_BIT;
+ kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1);
+
+ reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL0);
+ reg_val &= ~PHY_PWR_DOWN_BIT;
+ kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL0);
+ usleep_range(TIME_PHY_PD_MIN, TIME_PHY_PD_MAX);
+
+ reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1);
+ reg_val &= ~PHY_RST_ACK_BIT;
+ kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1);
+
+ usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX);
+ reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_STATUS0);
+ if (reg_val & PIPE_CLK_STABLE) {
+ dev_err(dev, "PIPE clk is not stable\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void kirin_pcie_oe_enable(struct kirin_pcie *kirin_pcie)
+{
+ u32 val;
+
+ regmap_read(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, &val);
+ val |= PCIE_DEBOUNCE_PARAM;
+ val &= ~PCIE_OE_BYPASS;
+ regmap_write(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, val);
+}
+
+static int kirin_pcie_clk_ctrl(struct kirin_pcie *kirin_pcie, bool enable)
+{
+ int ret = 0;
+
+ if (!enable)
+ goto close_clk;
+
+ ret = clk_set_rate(kirin_pcie->phy_ref_clk, REF_CLK_FREQ);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(kirin_pcie->phy_ref_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(kirin_pcie->apb_sys_clk);
+ if (ret)
+ goto apb_sys_fail;
+
+ ret = clk_prepare_enable(kirin_pcie->apb_phy_clk);
+ if (ret)
+ goto apb_phy_fail;
+
+ ret = clk_prepare_enable(kirin_pcie->pcie_aclk);
+ if (ret)
+ goto aclk_fail;
+
+ ret = clk_prepare_enable(kirin_pcie->pcie_aux_clk);
+ if (ret)
+ goto aux_clk_fail;
+
+ return 0;
+
+close_clk:
+ clk_disable_unprepare(kirin_pcie->pcie_aux_clk);
+aux_clk_fail:
+ clk_disable_unprepare(kirin_pcie->pcie_aclk);
+aclk_fail:
+ clk_disable_unprepare(kirin_pcie->apb_phy_clk);
+apb_phy_fail:
+ clk_disable_unprepare(kirin_pcie->apb_sys_clk);
+apb_sys_fail:
+ clk_disable_unprepare(kirin_pcie->phy_ref_clk);
+
+ return ret;
+}
+
+static int kirin_pcie_power_on(struct kirin_pcie *kirin_pcie)
+{
+ int ret;
+
+ /* Power supply for Host */
+ regmap_write(kirin_pcie->sysctrl,
+ SCTRL_PCIE_CMOS_OFFSET, SCTRL_PCIE_CMOS_BIT);
+ usleep_range(TIME_CMOS_MIN, TIME_CMOS_MAX);
+ kirin_pcie_oe_enable(kirin_pcie);
+
+ ret = kirin_pcie_clk_ctrl(kirin_pcie, true);
+ if (ret)
+ return ret;
+
+ /* ISO disable, PCIeCtrl, PHY assert and clk gate clear */
+ regmap_write(kirin_pcie->sysctrl,
+ SCTRL_PCIE_ISO_OFFSET, SCTRL_PCIE_ISO_BIT);
+ regmap_write(kirin_pcie->crgctrl,
+ CRGCTRL_PCIE_ASSERT_OFFSET, CRGCTRL_PCIE_ASSERT_BIT);
+ regmap_write(kirin_pcie->sysctrl,
+ SCTRL_PCIE_HPCLK_OFFSET, SCTRL_PCIE_HPCLK_BIT);
+
+ ret = kirin_pcie_phy_init(kirin_pcie);
+ if (ret)
+ goto close_clk;
+
+ /* perst assert Endpoint */
+ if (!gpio_request(kirin_pcie->gpio_id_reset, "pcie_perst")) {
+ usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX);
+ ret = gpio_direction_output(kirin_pcie->gpio_id_reset, 1);
+ if (ret)
+ goto close_clk;
+ usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX);
+
+ return 0;
+ }
+
+close_clk:
+ kirin_pcie_clk_ctrl(kirin_pcie, false);
+ return ret;
+}
+
+static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie,
+ bool on)
+{
+ u32 val;
+
+ val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL0_ADDR);
+ if (on)
+ val = val | PCIE_ELBI_SLV_DBI_ENABLE;
+ else
+ val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
+
+ kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL0_ADDR);
+}
+
+static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie,
+ bool on)
+{
+ u32 val;
+
+ val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL1_ADDR);
+ if (on)
+ val = val | PCIE_ELBI_SLV_DBI_ENABLE;
+ else
+ val = val & ~PCIE_ELBI_SLV_DBI_ENABLE;
+
+ kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL1_ADDR);
+}
+
+static int kirin_pcie_rd_own_conf(struct pcie_port *pp,
+ int where, int size, u32 *val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+ int ret;
+
+ kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true);
+ ret = dw_pcie_read(pci->dbi_base + where, size, val);
+ kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false);
+
+ return ret;
+}
+
+static int kirin_pcie_wr_own_conf(struct pcie_port *pp,
+ int where, int size, u32 val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+ int ret;
+
+ kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true);
+ ret = dw_pcie_write(pci->dbi_base + where, size, val);
+ kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
+
+ return ret;
+}
+
+static u32 kirin_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size)
+{
+ struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+ u32 ret;
+
+ kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true);
+ dw_pcie_read(base + reg, size, &ret);
+ kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false);
+
+ return ret;
+}
+
+static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,
+ u32 reg, size_t size, u32 val)
+{
+ struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+
+ kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true);
+ dw_pcie_write(base + reg, size, val);
+ kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false);
+}
+
+static int kirin_pcie_link_up(struct dw_pcie *pci)
+{
+ struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+ u32 val = kirin_apb_ctrl_readl(kirin_pcie, PCIE_APB_PHY_STATUS0);
+
+ if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE)
+ return 1;
+
+ return 0;
+}
+
+static int kirin_pcie_establish_link(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci);
+ struct device *dev = kirin_pcie->pci->dev;
+ int count = 0;
+
+ if (kirin_pcie_link_up(pci))
+ return 0;
+
+ dw_pcie_setup_rc(pp);
+
+ /* assert LTSSM enable */
+ kirin_apb_ctrl_writel(kirin_pcie, PCIE_LTSSM_ENABLE_BIT,
+ PCIE_APP_LTSSM_ENABLE);
+
+ /* check if the link is up or not */
+ while (!kirin_pcie_link_up(pci)) {
+ usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX);
+ count++;
+ if (count == 1000) {
+ dev_err(dev, "Link Fail\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void kirin_pcie_host_init(struct pcie_port *pp)
+{
+ kirin_pcie_establish_link(pp);
+}
+
+static struct dw_pcie_ops kirin_dw_pcie_ops = {
+ .read_dbi = kirin_pcie_read_dbi,
+ .write_dbi = kirin_pcie_write_dbi,
+ .link_up = kirin_pcie_link_up,
+};
+
+static struct dw_pcie_host_ops kirin_pcie_host_ops = {
+ .rd_own_conf = kirin_pcie_rd_own_conf,
+ .wr_own_conf = kirin_pcie_wr_own_conf,
+ .host_init = kirin_pcie_host_init,
+};
+
+static int __init kirin_add_pcie_port(struct dw_pcie *pci,
+ struct platform_device *pdev)
+{
+ pci->pp.ops = &kirin_pcie_host_ops;
+
+ return dw_pcie_host_init(&pci->pp);
+}
+
+static int kirin_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct kirin_pcie *kirin_pcie;
+ struct dw_pcie *pci;
+ int ret;
+
+ if (!dev->of_node) {
+ dev_err(dev, "NULL node\n");
+ return -EINVAL;
+ }
+
+ kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL);
+ if (!kirin_pcie)
+ return -ENOMEM;
+
+ pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci)
+ return -ENOMEM;
+
+ pci->dev = dev;
+ pci->ops = &kirin_dw_pcie_ops;
+ kirin_pcie->pci = pci;
+
+ ret = kirin_pcie_get_clk(kirin_pcie, pdev);
+ if (ret)
+ return ret;
+
+ ret = kirin_pcie_get_resource(kirin_pcie, pdev);
+ if (ret)
+ return ret;
+
+ kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node,
+ "reset-gpio", 0);
+ if (kirin_pcie->gpio_id_reset < 0)
+ return -ENODEV;
+
+ ret = kirin_pcie_power_on(kirin_pcie);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, kirin_pcie);
+
+ return kirin_add_pcie_port(pci, pdev);
+}
+
+static const struct of_device_id kirin_pcie_match[] = {
+ { .compatible = "hisilicon,kirin960-pcie" },
+ {},
+};
+
+struct platform_driver kirin_pcie_driver = {
+ .probe = kirin_pcie_probe,
+ .driver = {
+ .name = "kirin-pcie",
+ .of_match_table = kirin_pcie_match,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(kirin_pcie_driver);
diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c
index 5bf23d432fdb..68c5f2ab5bc8 100644
--- a/drivers/pci/dwc/pcie-qcom.c
+++ b/drivers/pci/dwc/pcie-qcom.c
@@ -51,6 +51,12 @@
#define PCIE20_ELBI_SYS_CTRL 0x04
#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
+#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818
+#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4
+#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5
+#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
+#define CFG_BRIDGE_SB_INIT BIT(0)
+
#define PCIE20_CAP 0x70
#define PERST_DELAY_US 1000
@@ -86,10 +92,29 @@ struct qcom_pcie_resources_v2 {
struct clk *pipe_clk;
};
+struct qcom_pcie_resources_v3 {
+ struct clk *aux_clk;
+ struct clk *master_clk;
+ struct clk *slave_clk;
+ struct reset_control *axi_m_reset;
+ struct reset_control *axi_s_reset;
+ struct reset_control *pipe_reset;
+ struct reset_control *axi_m_vmid_reset;
+ struct reset_control *axi_s_xpu_reset;
+ struct reset_control *parf_reset;
+ struct reset_control *phy_reset;
+ struct reset_control *axi_m_sticky_reset;
+ struct reset_control *pipe_sticky_reset;
+ struct reset_control *pwr_reset;
+ struct reset_control *ahb_reset;
+ struct reset_control *phy_ahb_reset;
+};
+
union qcom_pcie_resources {
struct qcom_pcie_resources_v0 v0;
struct qcom_pcie_resources_v1 v1;
struct qcom_pcie_resources_v2 v2;
+ struct qcom_pcie_resources_v3 v3;
};
struct qcom_pcie;
@@ -133,26 +158,6 @@ static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg)
return dw_handle_msi_irq(pp);
}
-static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie)
-{
- u32 val;
-
- /* enable link training */
- val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
- val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
- writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
-}
-
-static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie)
-{
- u32 val;
-
- /* enable link training */
- val = readl(pcie->parf + PCIE20_PARF_LTSSM);
- val |= BIT(8);
- writel(val, pcie->parf + PCIE20_PARF_LTSSM);
-}
-
static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
{
struct dw_pcie *pci = pcie->pci;
@@ -167,6 +172,16 @@ static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
return dw_pcie_wait_for_link(pci);
}
+static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie)
+{
+ u32 val;
+
+ /* enable link training */
+ val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
+ val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
+ writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
+}
+
static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
@@ -217,36 +232,6 @@ static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
return PTR_ERR_OR_ZERO(res->phy_reset);
}
-static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie)
-{
- struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
- struct dw_pcie *pci = pcie->pci;
- struct device *dev = pci->dev;
-
- res->vdda = devm_regulator_get(dev, "vdda");
- if (IS_ERR(res->vdda))
- return PTR_ERR(res->vdda);
-
- res->iface = devm_clk_get(dev, "iface");
- if (IS_ERR(res->iface))
- return PTR_ERR(res->iface);
-
- res->aux = devm_clk_get(dev, "aux");
- if (IS_ERR(res->aux))
- return PTR_ERR(res->aux);
-
- res->master_bus = devm_clk_get(dev, "master_bus");
- if (IS_ERR(res->master_bus))
- return PTR_ERR(res->master_bus);
-
- res->slave_bus = devm_clk_get(dev, "slave_bus");
- if (IS_ERR(res->slave_bus))
- return PTR_ERR(res->slave_bus);
-
- res->core = devm_reset_control_get(dev, "core");
- return PTR_ERR_OR_ZERO(res->core);
-}
-
static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
@@ -357,6 +342,13 @@ static int qcom_pcie_init_v0(struct qcom_pcie *pcie)
/* wait for clock acquisition */
usleep_range(1000, 1500);
+
+ /* Set the Max TLP size to 2K, instead of using default of 4K */
+ writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
+ pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
+ writel(CFG_BRIDGE_SB_INIT,
+ pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
+
return 0;
err_deassert_ahb:
@@ -375,6 +367,36 @@ err_refclk:
return ret;
}
+static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+
+ res->vdda = devm_regulator_get(dev, "vdda");
+ if (IS_ERR(res->vdda))
+ return PTR_ERR(res->vdda);
+
+ res->iface = devm_clk_get(dev, "iface");
+ if (IS_ERR(res->iface))
+ return PTR_ERR(res->iface);
+
+ res->aux = devm_clk_get(dev, "aux");
+ if (IS_ERR(res->aux))
+ return PTR_ERR(res->aux);
+
+ res->master_bus = devm_clk_get(dev, "master_bus");
+ if (IS_ERR(res->master_bus))
+ return PTR_ERR(res->master_bus);
+
+ res->slave_bus = devm_clk_get(dev, "slave_bus");
+ if (IS_ERR(res->slave_bus))
+ return PTR_ERR(res->slave_bus);
+
+ res->core = devm_reset_control_get(dev, "core");
+ return PTR_ERR_OR_ZERO(res->core);
+}
+
static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
@@ -455,6 +477,16 @@ err_res:
return ret;
}
+static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie)
+{
+ u32 val;
+
+ /* enable link training */
+ val = readl(pcie->parf + PCIE20_PARF_LTSSM);
+ val |= BIT(8);
+ writel(val, pcie->parf + PCIE20_PARF_LTSSM);
+}
+
static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
@@ -481,6 +513,17 @@ static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
return PTR_ERR_OR_ZERO(res->pipe_clk);
}
+static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
+
+ clk_disable_unprepare(res->pipe_clk);
+ clk_disable_unprepare(res->slave_clk);
+ clk_disable_unprepare(res->master_clk);
+ clk_disable_unprepare(res->cfg_clk);
+ clk_disable_unprepare(res->aux_clk);
+}
+
static int qcom_pcie_init_v2(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
@@ -562,22 +605,290 @@ static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie)
return 0;
}
-static int qcom_pcie_link_up(struct dw_pcie *pci)
+static int qcom_pcie_get_resources_v3(struct qcom_pcie *pcie)
{
- u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
+ struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
- return !!(val & PCI_EXP_LNKSTA_DLLLA);
+ res->aux_clk = devm_clk_get(dev, "aux");
+ if (IS_ERR(res->aux_clk))
+ return PTR_ERR(res->aux_clk);
+
+ res->master_clk = devm_clk_get(dev, "master_bus");
+ if (IS_ERR(res->master_clk))
+ return PTR_ERR(res->master_clk);
+
+ res->slave_clk = devm_clk_get(dev, "slave_bus");
+ if (IS_ERR(res->slave_clk))
+ return PTR_ERR(res->slave_clk);
+
+ res->axi_m_reset = devm_reset_control_get(dev, "axi_m");
+ if (IS_ERR(res->axi_m_reset))
+ return PTR_ERR(res->axi_m_reset);
+
+ res->axi_s_reset = devm_reset_control_get(dev, "axi_s");
+ if (IS_ERR(res->axi_s_reset))
+ return PTR_ERR(res->axi_s_reset);
+
+ res->pipe_reset = devm_reset_control_get(dev, "pipe");
+ if (IS_ERR(res->pipe_reset))
+ return PTR_ERR(res->pipe_reset);
+
+ res->axi_m_vmid_reset = devm_reset_control_get(dev, "axi_m_vmid");
+ if (IS_ERR(res->axi_m_vmid_reset))
+ return PTR_ERR(res->axi_m_vmid_reset);
+
+ res->axi_s_xpu_reset = devm_reset_control_get(dev, "axi_s_xpu");
+ if (IS_ERR(res->axi_s_xpu_reset))
+ return PTR_ERR(res->axi_s_xpu_reset);
+
+ res->parf_reset = devm_reset_control_get(dev, "parf");
+ if (IS_ERR(res->parf_reset))
+ return PTR_ERR(res->parf_reset);
+
+ res->phy_reset = devm_reset_control_get(dev, "phy");
+ if (IS_ERR(res->phy_reset))
+ return PTR_ERR(res->phy_reset);
+
+ res->axi_m_sticky_reset = devm_reset_control_get(dev, "axi_m_sticky");
+ if (IS_ERR(res->axi_m_sticky_reset))
+ return PTR_ERR(res->axi_m_sticky_reset);
+
+ res->pipe_sticky_reset = devm_reset_control_get(dev, "pipe_sticky");
+ if (IS_ERR(res->pipe_sticky_reset))
+ return PTR_ERR(res->pipe_sticky_reset);
+
+ res->pwr_reset = devm_reset_control_get(dev, "pwr");
+ if (IS_ERR(res->pwr_reset))
+ return PTR_ERR(res->pwr_reset);
+
+ res->ahb_reset = devm_reset_control_get(dev, "ahb");
+ if (IS_ERR(res->ahb_reset))
+ return PTR_ERR(res->ahb_reset);
+
+ res->phy_ahb_reset = devm_reset_control_get(dev, "phy_ahb");
+ if (IS_ERR(res->phy_ahb_reset))
+ return PTR_ERR(res->phy_ahb_reset);
+
+ return 0;
}
-static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie)
+static void qcom_pcie_deinit_v3(struct qcom_pcie *pcie)
{
- struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
-
- clk_disable_unprepare(res->pipe_clk);
+ struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
+
+ reset_control_assert(res->axi_m_reset);
+ reset_control_assert(res->axi_s_reset);
+ reset_control_assert(res->pipe_reset);
+ reset_control_assert(res->pipe_sticky_reset);
+ reset_control_assert(res->phy_reset);
+ reset_control_assert(res->phy_ahb_reset);
+ reset_control_assert(res->axi_m_sticky_reset);
+ reset_control_assert(res->pwr_reset);
+ reset_control_assert(res->ahb_reset);
+ clk_disable_unprepare(res->aux_clk);
+ clk_disable_unprepare(res->master_clk);
clk_disable_unprepare(res->slave_clk);
+}
+
+static int qcom_pcie_init_v3(struct qcom_pcie *pcie)
+{
+ struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
+ struct dw_pcie *pci = pcie->pci;
+ struct device *dev = pci->dev;
+ u32 val;
+ int ret;
+
+ ret = reset_control_assert(res->axi_m_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert axi master reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->axi_s_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert axi slave reset\n");
+ return ret;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = reset_control_assert(res->pipe_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert pipe reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->pipe_sticky_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert pipe sticky reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->phy_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert phy reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->phy_ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert phy ahb reset\n");
+ return ret;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = reset_control_assert(res->axi_m_sticky_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert axi master sticky reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->pwr_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert power reset\n");
+ return ret;
+ }
+
+ ret = reset_control_assert(res->ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot assert ahb reset\n");
+ return ret;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = reset_control_deassert(res->phy_ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert phy ahb reset\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(res->phy_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert phy reset\n");
+ goto err_rst_phy;
+ }
+
+ ret = reset_control_deassert(res->pipe_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert pipe reset\n");
+ goto err_rst_pipe;
+ }
+
+ ret = reset_control_deassert(res->pipe_sticky_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert pipe sticky reset\n");
+ goto err_rst_pipe_sticky;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = reset_control_deassert(res->axi_m_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert axi master reset\n");
+ goto err_rst_axi_m;
+ }
+
+ ret = reset_control_deassert(res->axi_m_sticky_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert axi master sticky reset\n");
+ goto err_rst_axi_m_sticky;
+ }
+
+ ret = reset_control_deassert(res->axi_s_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert axi slave reset\n");
+ goto err_rst_axi_s;
+ }
+
+ ret = reset_control_deassert(res->pwr_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert power reset\n");
+ goto err_rst_pwr;
+ }
+
+ ret = reset_control_deassert(res->ahb_reset);
+ if (ret) {
+ dev_err(dev, "cannot deassert ahb reset\n");
+ goto err_rst_ahb;
+ }
+
+ usleep_range(10000, 12000);
+
+ ret = clk_prepare_enable(res->aux_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable iface clock\n");
+ goto err_clk_aux;
+ }
+
+ ret = clk_prepare_enable(res->master_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable core clock\n");
+ goto err_clk_axi_m;
+ }
+
+ ret = clk_prepare_enable(res->slave_clk);
+ if (ret) {
+ dev_err(dev, "cannot prepare/enable phy clock\n");
+ goto err_clk_axi_s;
+ }
+
+ /* enable PCIe clocks and resets */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
+ val &= !BIT(0);
+ writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
+
+ /* change DBI base address */
+ writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
+
+ /* MAC PHY_POWERDOWN MUX DISABLE */
+ val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
+ val &= ~BIT(29);
+ writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
+
+ val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+ val |= BIT(4);
+ writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
+
+ val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+ val |= BIT(31);
+ writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
+
+ return 0;
+
+err_clk_axi_s:
clk_disable_unprepare(res->master_clk);
- clk_disable_unprepare(res->cfg_clk);
+err_clk_axi_m:
clk_disable_unprepare(res->aux_clk);
+err_clk_aux:
+ reset_control_assert(res->ahb_reset);
+err_rst_ahb:
+ reset_control_assert(res->pwr_reset);
+err_rst_pwr:
+ reset_control_assert(res->axi_s_reset);
+err_rst_axi_s:
+ reset_control_assert(res->axi_m_sticky_reset);
+err_rst_axi_m_sticky:
+ reset_control_assert(res->axi_m_reset);
+err_rst_axi_m:
+ reset_control_assert(res->pipe_sticky_reset);
+err_rst_pipe_sticky:
+ reset_control_assert(res->pipe_reset);
+err_rst_pipe:
+ reset_control_assert(res->phy_reset);
+err_rst_phy:
+ reset_control_assert(res->phy_ahb_reset);
+ return ret;
+}
+
+static int qcom_pcie_link_up(struct dw_pcie *pci)
+{
+ u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
+
+ return !!(val & PCI_EXP_LNKSTA_DLLLA);
}
static void qcom_pcie_host_init(struct pcie_port *pp)
@@ -634,7 +945,7 @@ static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
return dw_pcie_read(pci->dbi_base + where, size, val);
}
-static struct dw_pcie_host_ops qcom_pcie_dw_ops = {
+static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
.host_init = qcom_pcie_host_init,
.rd_own_conf = qcom_pcie_rd_own_conf,
};
@@ -665,6 +976,13 @@ static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = qcom_pcie_link_up,
};
+static const struct qcom_pcie_ops ops_v3 = {
+ .get_resources = qcom_pcie_get_resources_v3,
+ .init = qcom_pcie_init_v3,
+ .deinit = qcom_pcie_deinit_v3,
+ .ltssm_enable = qcom_pcie_v2_ltssm_enable,
+};
+
static int qcom_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -727,7 +1045,8 @@ static int qcom_pcie_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, pp->msi_irq,
qcom_pcie_msi_irq_handler,
- IRQF_SHARED, "qcom-pcie-msi", pp);
+ IRQF_SHARED | IRQF_NO_THREAD,
+ "qcom-pcie-msi", pp);
if (ret) {
dev_err(dev, "cannot request msi irq\n");
return ret;
@@ -754,6 +1073,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-apq8064", .data = &ops_v0 },
{ .compatible = "qcom,pcie-apq8084", .data = &ops_v1 },
{ .compatible = "qcom,pcie-msm8996", .data = &ops_v2 },
+ { .compatible = "qcom,pcie-ipq4019", .data = &ops_v3 },
{ }
};
diff --git a/drivers/pci/dwc/pcie-spear13xx.c b/drivers/pci/dwc/pcie-spear13xx.c
index 8ff36b3dbbdf..80897291e0fb 100644
--- a/drivers/pci/dwc/pcie-spear13xx.c
+++ b/drivers/pci/dwc/pcie-spear13xx.c
@@ -186,7 +186,7 @@ static void spear13xx_pcie_host_init(struct pcie_port *pp)
spear13xx_pcie_enable_interrupts(spear13xx_pcie);
}
-static struct dw_pcie_host_ops spear13xx_pcie_host_ops = {
+static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = {
.host_init = spear13xx_pcie_host_init,
};
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 7f47cd5e10a5..89d61c2cbfaa 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -180,6 +180,31 @@ config PCIE_ROCKCHIP
There is 1 internal PCIe port available to support GEN2 with
4 slots.
+config PCIE_MEDIATEK
+ bool "MediaTek PCIe controller"
+ depends on ARM && (ARCH_MEDIATEK || COMPILE_TEST)
+ depends on OF
+ depends on PCI
+ select PCIEPORTBUS
+ help
+ Say Y here if you want to enable PCIe controller support on
+ MT7623 series SoCs. There is one single root complex with 3 root
+ ports available. Each port supports Gen2 lane x1.
+
+config PCIE_TANGO_SMP8759
+ bool "Tango SMP8759 PCIe controller (DANGEROUS)"
+ depends on ARCH_TANGO && PCI_MSI && OF
+ depends on BROKEN
+ select PCI_HOST_COMMON
+ help
+ Say Y here to enable PCIe controller support for Sigma Designs
+ Tango SMP8759-based systems.
+
+ Note: The SMP8759 controller multiplexes PCI config and MMIO
+ accesses, and Linux doesn't provide a way to serialize them.
+ This can lead to data corruption if drivers perform concurrent
+ config and MMIO accesses.
+
config VMD
depends on PCI_MSI && X86_64 && SRCU
tristate "Intel Volume Management Device Driver"
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index cab879578003..12382785e02a 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -18,6 +18,8 @@ obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o
obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o
obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
+obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
+obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
obj-$(CONFIG_VMD) += vmd.o
# The following drivers are for devices that use the generic ACPI
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index 37d0bcd31f8a..5fb9b620ac78 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -886,12 +886,14 @@ static int advk_pcie_probe(struct platform_device *pdev)
struct advk_pcie *pcie;
struct resource *res;
struct pci_bus *bus, *child;
+ struct pci_host_bridge *bridge;
int ret, irq;
- pcie = devm_kzalloc(dev, sizeof(struct advk_pcie), GFP_KERNEL);
- if (!pcie)
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
+ if (!bridge)
return -ENOMEM;
+ pcie = pci_host_bridge_priv(bridge);
pcie->pdev = pdev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -929,14 +931,21 @@ static int advk_pcie_probe(struct platform_device *pdev)
return ret;
}
- bus = pci_scan_root_bus(dev, 0, &advk_pcie_ops,
- pcie, &pcie->resources);
- if (!bus) {
+ list_splice_init(&pcie->resources, &bridge->windows);
+ bridge->dev.parent = dev;
+ bridge->sysdata = pcie;
+ bridge->busnr = 0;
+ bridge->ops = &advk_pcie_ops;
+
+ ret = pci_scan_root_bus_bridge(bridge);
+ if (ret < 0) {
advk_pcie_remove_msi_irq_domain(pcie);
advk_pcie_remove_irq_domain(pcie);
- return -ENOMEM;
+ return ret;
}
+ bus = bridge->bus;
+
pci_bus_assign_resources(bus);
list_for_each_entry(child, &bus->children, node)
diff --git a/drivers/pci/host/pci-ftpci100.c b/drivers/pci/host/pci-ftpci100.c
index d26501c4145a..5162dffc102b 100644
--- a/drivers/pci/host/pci-ftpci100.c
+++ b/drivers/pci/host/pci-ftpci100.c
@@ -25,6 +25,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/bitops.h>
#include <linux/irq.h>
+#include <linux/clk.h>
/*
* Special configuration registers directly in the first few words
@@ -37,6 +38,7 @@
#define PCI_CONFIG 0x28 /* PCI configuration command register */
#define PCI_DATA 0x2C
+#define FARADAY_PCI_STATUS_CMD 0x04 /* Status and command */
#define FARADAY_PCI_PMC 0x40 /* Power management control */
#define FARADAY_PCI_PMCSR 0x44 /* Power management status */
#define FARADAY_PCI_CTRL1 0x48 /* Control register 1 */
@@ -45,6 +47,8 @@
#define FARADAY_PCI_MEM2_BASE_SIZE 0x54 /* Memory base and size #2 */
#define FARADAY_PCI_MEM3_BASE_SIZE 0x58 /* Memory base and size #3 */
+#define PCI_STATUS_66MHZ_CAPABLE BIT(21)
+
/* Bits 31..28 gives INTD..INTA status */
#define PCI_CTRL2_INTSTS_SHIFT 28
#define PCI_CTRL2_INTMASK_CMDERR BIT(27)
@@ -117,6 +121,7 @@ struct faraday_pci {
void __iomem *base;
struct irq_domain *irqdomain;
struct pci_bus *bus;
+ struct clk *bus_clk;
};
static int faraday_res_to_memcfg(resource_size_t mem_base,
@@ -178,12 +183,11 @@ static int faraday_res_to_memcfg(resource_size_t mem_base,
return 0;
}
-static int faraday_pci_read_config(struct pci_bus *bus, unsigned int fn,
- int config, int size, u32 *value)
+static int faraday_raw_pci_read_config(struct faraday_pci *p, int bus_number,
+ unsigned int fn, int config, int size,
+ u32 *value)
{
- struct faraday_pci *p = bus->sysdata;
-
- writel(PCI_CONF_BUS(bus->number) |
+ writel(PCI_CONF_BUS(bus_number) |
PCI_CONF_DEVICE(PCI_SLOT(fn)) |
PCI_CONF_FUNCTION(PCI_FUNC(fn)) |
PCI_CONF_WHERE(config) |
@@ -197,24 +201,28 @@ static int faraday_pci_read_config(struct pci_bus *bus, unsigned int fn,
else if (size == 2)
*value = (*value >> (8 * (config & 3))) & 0xFFFF;
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int faraday_pci_read_config(struct pci_bus *bus, unsigned int fn,
+ int config, int size, u32 *value)
+{
+ struct faraday_pci *p = bus->sysdata;
+
dev_dbg(&bus->dev,
"[read] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value);
- return PCIBIOS_SUCCESSFUL;
+ return faraday_raw_pci_read_config(p, bus->number, fn, config, size, value);
}
-static int faraday_pci_write_config(struct pci_bus *bus, unsigned int fn,
- int config, int size, u32 value)
+static int faraday_raw_pci_write_config(struct faraday_pci *p, int bus_number,
+ unsigned int fn, int config, int size,
+ u32 value)
{
- struct faraday_pci *p = bus->sysdata;
int ret = PCIBIOS_SUCCESSFUL;
- dev_dbg(&bus->dev,
- "[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
- PCI_SLOT(fn), PCI_FUNC(fn), config, size, value);
-
- writel(PCI_CONF_BUS(bus->number) |
+ writel(PCI_CONF_BUS(bus_number) |
PCI_CONF_DEVICE(PCI_SLOT(fn)) |
PCI_CONF_FUNCTION(PCI_FUNC(fn)) |
PCI_CONF_WHERE(config) |
@@ -238,6 +246,19 @@ static int faraday_pci_write_config(struct pci_bus *bus, unsigned int fn,
return ret;
}
+static int faraday_pci_write_config(struct pci_bus *bus, unsigned int fn,
+ int config, int size, u32 value)
+{
+ struct faraday_pci *p = bus->sysdata;
+
+ dev_dbg(&bus->dev,
+ "[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n",
+ PCI_SLOT(fn), PCI_FUNC(fn), config, size, value);
+
+ return faraday_raw_pci_write_config(p, bus->number, fn, config, size,
+ value);
+}
+
static struct pci_ops faraday_pci_ops = {
.read = faraday_pci_read_config,
.write = faraday_pci_write_config,
@@ -248,10 +269,10 @@ static void faraday_pci_ack_irq(struct irq_data *d)
struct faraday_pci *p = irq_data_get_irq_chip_data(d);
unsigned int reg;
- faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, &reg);
+ faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, &reg);
reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT);
reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTSTS_SHIFT);
- faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg);
+ faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg);
}
static void faraday_pci_mask_irq(struct irq_data *d)
@@ -259,10 +280,10 @@ static void faraday_pci_mask_irq(struct irq_data *d)
struct faraday_pci *p = irq_data_get_irq_chip_data(d);
unsigned int reg;
- faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, &reg);
+ faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, &reg);
reg &= ~((0xF << PCI_CTRL2_INTSTS_SHIFT)
| BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT));
- faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg);
+ faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg);
}
static void faraday_pci_unmask_irq(struct irq_data *d)
@@ -270,10 +291,10 @@ static void faraday_pci_unmask_irq(struct irq_data *d)
struct faraday_pci *p = irq_data_get_irq_chip_data(d);
unsigned int reg;
- faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, &reg);
+ faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, &reg);
reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT);
reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT);
- faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg);
+ faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg);
}
static void faraday_pci_irq_handler(struct irq_desc *desc)
@@ -282,7 +303,7 @@ static void faraday_pci_irq_handler(struct irq_desc *desc)
struct irq_chip *irqchip = irq_desc_get_chip(desc);
unsigned int irq_stat, reg, i;
- faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, &reg);
+ faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, &reg);
irq_stat = reg >> PCI_CTRL2_INTSTS_SHIFT;
chained_irq_enter(irqchip, desc);
@@ -403,8 +424,8 @@ static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p,
dev_info(dev, "DMA MEM%d BASE: 0x%016llx -> 0x%016llx config %08x\n",
i + 1, range.pci_addr, end, val);
if (i <= 2) {
- faraday_pci_write_config(p->bus, 0, confreg[i],
- 4, val);
+ faraday_raw_pci_write_config(p, 0, 0, confreg[i],
+ 4, val);
} else {
dev_err(dev, "ignore extraneous dma-range %d\n", i);
break;
@@ -428,11 +449,14 @@ static int faraday_pci_probe(struct platform_device *pdev)
struct resource *mem;
struct resource *io;
struct pci_host_bridge *host;
+ struct clk *clk;
+ unsigned char max_bus_speed = PCI_SPEED_33MHz;
+ unsigned char cur_bus_speed = PCI_SPEED_33MHz;
int ret;
u32 val;
LIST_HEAD(res);
- host = pci_alloc_host_bridge(sizeof(*p));
+ host = devm_pci_alloc_host_bridge(dev, sizeof(*p));
if (!host)
return -ENOMEM;
@@ -440,10 +464,30 @@ static int faraday_pci_probe(struct platform_device *pdev)
host->ops = &faraday_pci_ops;
host->busnr = 0;
host->msi = NULL;
+ host->map_irq = of_irq_parse_and_map_pci;
+ host->swizzle_irq = pci_common_swizzle;
p = pci_host_bridge_priv(host);
host->sysdata = p;
p->dev = dev;
+ /* Retrieve and enable optional clocks */
+ clk = devm_clk_get(dev, "PCLK");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ dev_err(dev, "could not prepare PCLK\n");
+ return ret;
+ }
+ p->bus_clk = devm_clk_get(dev, "PCICLK");
+ if (IS_ERR(p->bus_clk))
+ return PTR_ERR(clk);
+ ret = clk_prepare_enable(p->bus_clk);
+ if (ret) {
+ dev_err(dev, "could not prepare PCICLK\n");
+ return ret;
+ }
+
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
p->base = devm_ioremap_resource(dev, regs);
if (IS_ERR(p->base))
@@ -496,17 +540,8 @@ static int faraday_pci_probe(struct platform_device *pdev)
val |= PCI_COMMAND_MEMORY;
val |= PCI_COMMAND_MASTER;
writel(val, p->base + PCI_CTRL);
-
- list_splice_init(&res, &host->windows);
- ret = pci_register_host_bridge(host);
- if (ret) {
- dev_err(dev, "failed to register host: %d\n", ret);
- return ret;
- }
- p->bus = host->bus;
-
/* Mask and clear all interrupts */
- faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2 + 2, 2, 0xF000);
+ faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2 + 2, 2, 0xF000);
if (variant->cascaded_irq) {
ret = faraday_pci_setup_cascaded_irq(p);
if (ret) {
@@ -515,12 +550,48 @@ static int faraday_pci_probe(struct platform_device *pdev)
}
}
+ /* Check bus clock if we can gear up to 66 MHz */
+ if (!IS_ERR(p->bus_clk)) {
+ unsigned long rate;
+ u32 val;
+
+ faraday_raw_pci_read_config(p, 0, 0,
+ FARADAY_PCI_STATUS_CMD, 4, &val);
+ rate = clk_get_rate(p->bus_clk);
+
+ if ((rate == 33000000) && (val & PCI_STATUS_66MHZ_CAPABLE)) {
+ dev_info(dev, "33MHz bus is 66MHz capable\n");
+ max_bus_speed = PCI_SPEED_66MHz;
+ ret = clk_set_rate(p->bus_clk, 66000000);
+ if (ret)
+ dev_err(dev, "failed to set bus clock\n");
+ } else {
+ dev_info(dev, "33MHz only bus\n");
+ max_bus_speed = PCI_SPEED_33MHz;
+ }
+
+ /* Bumping the clock may fail so read back the rate */
+ rate = clk_get_rate(p->bus_clk);
+ if (rate == 33000000)
+ cur_bus_speed = PCI_SPEED_33MHz;
+ if (rate == 66000000)
+ cur_bus_speed = PCI_SPEED_66MHz;
+ }
+
ret = faraday_pci_parse_map_dma_ranges(p, dev->of_node);
if (ret)
return ret;
- pci_scan_child_bus(p->bus);
- pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
+ list_splice_init(&res, &host->windows);
+ ret = pci_scan_root_bus_bridge(host);
+ if (ret) {
+ dev_err(dev, "failed to scan host: %d\n", ret);
+ return ret;
+ }
+ p->bus = host->bus;
+ p->bus->max_bus_speed = max_bus_speed;
+ p->bus->cur_bus_speed = cur_bus_speed;
+
pci_bus_assign_resources(p->bus);
pci_bus_add_devices(p->bus);
pci_free_resource_list(&res);
diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c
index e9a53bae1c25..44a47d4f0b8f 100644
--- a/drivers/pci/host/pci-host-common.c
+++ b/drivers/pci/host/pci-host-common.c
@@ -117,8 +117,14 @@ int pci_host_common_probe(struct platform_device *pdev,
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct pci_bus *bus, *child;
+ struct pci_host_bridge *bridge;
struct pci_config_window *cfg;
struct list_head resources;
+ int ret;
+
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
+ if (!bridge)
+ return -ENOMEM;
type = of_get_property(np, "device_type", NULL);
if (!type || strcmp(type, "pci")) {
@@ -138,16 +144,21 @@ int pci_host_common_probe(struct platform_device *pdev,
if (!pci_has_flag(PCI_PROBE_ONLY))
pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
- bus = pci_scan_root_bus(dev, cfg->busr.start, &ops->pci_ops, cfg,
- &resources);
- if (!bus) {
- dev_err(dev, "Scanning rootbus failed");
- return -ENODEV;
+ list_splice_init(&resources, &bridge->windows);
+ bridge->dev.parent = dev;
+ bridge->sysdata = cfg;
+ bridge->busnr = cfg->busr.start;
+ bridge->ops = &ops->pci_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
+
+ ret = pci_scan_root_bus_bridge(bridge);
+ if (ret < 0) {
+ dev_err(dev, "Scanning root bridge failed");
+ return ret;
}
-#ifdef CONFIG_ARM
- pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
-#endif
+ bus = bridge->bus;
/*
* We insert PCI resources into the iomem_resource and
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index 84936383e269..415dcc69a502 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -64,22 +64,39 @@
* major version.
*/
-#define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (major)))
+#define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor)))
#define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16)
#define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff)
-enum {
- PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1),
- PCI_PROTOCOL_VERSION_CURRENT = PCI_PROTOCOL_VERSION_1_1
+enum pci_protocol_version_t {
+ PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), /* Win10 */
+ PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2), /* RS1 */
};
#define CPU_AFFINITY_ALL -1ULL
+
+/*
+ * Supported protocol versions in the order of probing - highest go
+ * first.
+ */
+static enum pci_protocol_version_t pci_protocol_versions[] = {
+ PCI_PROTOCOL_VERSION_1_2,
+ PCI_PROTOCOL_VERSION_1_1,
+};
+
+/*
+ * Protocol version negotiated by hv_pci_protocol_negotiation().
+ */
+static enum pci_protocol_version_t pci_protocol_version;
+
#define PCI_CONFIG_MMIO_LENGTH 0x2000
#define CFG_PAGE_OFFSET 0x1000
#define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
#define MAX_SUPPORTED_MSI_MESSAGES 0x400
+#define STATUS_REVISION_MISMATCH 0xC0000059
+
/*
* Message Types
*/
@@ -109,6 +126,9 @@ enum pci_message_type {
PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13,
PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14,
PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15,
+ PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16,
+ PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17,
+ PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */
PCI_MESSAGE_MAXIMUM
};
@@ -179,6 +199,30 @@ struct hv_msi_desc {
} __packed;
/**
+ * struct hv_msi_desc2 - 1.2 version of hv_msi_desc
+ * @vector: IDT entry
+ * @delivery_mode: As defined in Intel's Programmer's
+ * Reference Manual, Volume 3, Chapter 8.
+ * @vector_count: Number of contiguous entries in the
+ * Interrupt Descriptor Table that are
+ * occupied by this Message-Signaled
+ * Interrupt. For "MSI", as first defined
+ * in PCI 2.2, this can be between 1 and
+ * 32. For "MSI-X," as first defined in PCI
+ * 3.0, this must be 1, as each MSI-X table
+ * entry would have its own descriptor.
+ * @processor_count: number of bits enabled in array.
+ * @processor_array: All the target virtual processors.
+ */
+struct hv_msi_desc2 {
+ u8 vector;
+ u8 delivery_mode;
+ u16 vector_count;
+ u16 processor_count;
+ u16 processor_array[32];
+} __packed;
+
+/**
* struct tran_int_desc
* @reserved: unused, padding
* @vector_count: same as in hv_msi_desc
@@ -245,7 +289,7 @@ struct pci_packet {
struct pci_version_request {
struct pci_message message_type;
- enum pci_message_type protocol_version;
+ u32 protocol_version;
} __packed;
/*
@@ -294,6 +338,14 @@ struct pci_resources_assigned {
u32 reserved[4];
} __packed;
+struct pci_resources_assigned2 {
+ struct pci_message message_type;
+ union win_slot_encoding wslot;
+ u8 memory_range[0x14][6]; /* not used here */
+ u32 msi_descriptor_count;
+ u8 reserved[70];
+} __packed;
+
struct pci_create_interrupt {
struct pci_message message_type;
union win_slot_encoding wslot;
@@ -306,6 +358,12 @@ struct pci_create_int_response {
struct tran_int_desc int_desc;
} __packed;
+struct pci_create_interrupt2 {
+ struct pci_message message_type;
+ union win_slot_encoding wslot;
+ struct hv_msi_desc2 int_desc;
+} __packed;
+
struct pci_delete_interrupt {
struct pci_message message_type;
union win_slot_encoding wslot;
@@ -331,17 +389,42 @@ static int pci_ring_size = (4 * PAGE_SIZE);
#define HV_PARTITION_ID_SELF ((u64)-1)
#define HVCALL_RETARGET_INTERRUPT 0x7e
-struct retarget_msi_interrupt {
- u64 partition_id; /* use "self" */
- u64 device_id;
+struct hv_interrupt_entry {
u32 source; /* 1 for MSI(-X) */
u32 reserved1;
u32 address;
u32 data;
- u64 reserved2;
+};
+
+#define HV_VP_SET_BANK_COUNT_MAX 5 /* current implementation limit */
+
+struct hv_vp_set {
+ u64 format; /* 0 (HvGenericSetSparse4k) */
+ u64 valid_banks;
+ u64 masks[HV_VP_SET_BANK_COUNT_MAX];
+};
+
+/*
+ * flags for hv_device_interrupt_target.flags
+ */
+#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1
+#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2
+
+struct hv_device_interrupt_target {
u32 vector;
u32 flags;
- u64 vp_mask;
+ union {
+ u64 vp_mask;
+ struct hv_vp_set vp_set;
+ };
+};
+
+struct retarget_msi_interrupt {
+ u64 partition_id; /* use "self" */
+ u64 device_id;
+ struct hv_interrupt_entry int_entry;
+ u64 reserved2;
+ struct hv_device_interrupt_target int_target;
} __packed;
/*
@@ -382,7 +465,10 @@ struct hv_pcibus_device {
struct msi_domain_info msi_info;
struct msi_controller msi_chip;
struct irq_domain *irq_domain;
+
+ /* hypercall arg, must not cross page boundary */
struct retarget_msi_interrupt retarget_msi_interrupt_params;
+
spinlock_t retarget_msi_interrupt_lock;
};
@@ -476,6 +562,52 @@ static void put_pcichild(struct hv_pci_dev *hv_pcidev,
static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus);
static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus);
+
+/*
+ * Temporary CPU to vCPU mapping to address transitioning
+ * vmbus_cpu_number_to_vp_number() being migrated to
+ * hv_cpu_number_to_vp_number() in a separate patch. Once that patch
+ * has been picked up in the main line, remove this code here and use
+ * the official code.
+ */
+static struct hv_tmpcpumap
+{
+ bool initialized;
+ u32 vp_index[NR_CPUS];
+} hv_tmpcpumap;
+
+static void hv_tmpcpumap_init_cpu(void *_unused)
+{
+ int cpu = smp_processor_id();
+ u64 vp_index;
+
+ hv_get_vp_index(vp_index);
+
+ hv_tmpcpumap.vp_index[cpu] = vp_index;
+}
+
+static void hv_tmpcpumap_init(void)
+{
+ if (hv_tmpcpumap.initialized)
+ return;
+
+ memset(hv_tmpcpumap.vp_index, -1, sizeof(hv_tmpcpumap.vp_index));
+ on_each_cpu(hv_tmpcpumap_init_cpu, NULL, true);
+ hv_tmpcpumap.initialized = true;
+}
+
+/**
+ * hv_tmp_cpu_nr_to_vp_nr() - Convert Linux CPU nr to Hyper-V vCPU nr
+ *
+ * Remove once vmbus_cpu_number_to_vp_number() has been converted to
+ * hv_cpu_number_to_vp_number() and replace callers appropriately.
+ */
+static u32 hv_tmp_cpu_nr_to_vp_nr(int cpu)
+{
+ return hv_tmpcpumap.vp_index[cpu];
+}
+
+
/**
* devfn_to_wslot() - Convert from Linux PCI slot to Windows
* @devfn: The Linux representation of PCI slot
@@ -786,8 +918,11 @@ static void hv_irq_unmask(struct irq_data *data)
struct cpumask *dest;
struct pci_bus *pbus;
struct pci_dev *pdev;
- int cpu;
unsigned long flags;
+ u32 var_size = 0;
+ int cpu_vmbus;
+ int cpu;
+ u64 res;
dest = irq_data_get_affinity_mask(data);
pdev = msi_desc_to_pci_dev(msi_desc);
@@ -799,23 +934,74 @@ static void hv_irq_unmask(struct irq_data *data)
params = &hbus->retarget_msi_interrupt_params;
memset(params, 0, sizeof(*params));
params->partition_id = HV_PARTITION_ID_SELF;
- params->source = 1; /* MSI(-X) */
- params->address = msi_desc->msg.address_lo;
- params->data = msi_desc->msg.data;
+ params->int_entry.source = 1; /* MSI(-X) */
+ params->int_entry.address = msi_desc->msg.address_lo;
+ params->int_entry.data = msi_desc->msg.data;
params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
(hbus->hdev->dev_instance.b[4] << 16) |
(hbus->hdev->dev_instance.b[7] << 8) |
(hbus->hdev->dev_instance.b[6] & 0xf8) |
PCI_FUNC(pdev->devfn);
- params->vector = cfg->vector;
+ params->int_target.vector = cfg->vector;
+
+ /*
+ * Honoring apic->irq_delivery_mode set to dest_Fixed by
+ * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a
+ * spurious interrupt storm. Not doing so does not seem to have a
+ * negative effect (yet?).
+ */
+
+ if (pci_protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
+ /*
+ * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
+ * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
+ * with >64 VP support.
+ * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
+ * is not sufficient for this hypercall.
+ */
+ params->int_target.flags |=
+ HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
+ params->int_target.vp_set.valid_banks =
+ (1ull << HV_VP_SET_BANK_COUNT_MAX) - 1;
+
+ /*
+ * var-sized hypercall, var-size starts after vp_mask (thus
+ * vp_set.format does not count, but vp_set.valid_banks does).
+ */
+ var_size = 1 + HV_VP_SET_BANK_COUNT_MAX;
- for_each_cpu_and(cpu, dest, cpu_online_mask)
- params->vp_mask |= (1ULL << vmbus_cpu_number_to_vp_number(cpu));
+ for_each_cpu_and(cpu, dest, cpu_online_mask) {
+ cpu_vmbus = hv_tmp_cpu_nr_to_vp_nr(cpu);
- hv_do_hypercall(HVCALL_RETARGET_INTERRUPT, params, NULL);
+ if (cpu_vmbus >= HV_VP_SET_BANK_COUNT_MAX * 64) {
+ dev_err(&hbus->hdev->device,
+ "too high CPU %d", cpu_vmbus);
+ res = 1;
+ goto exit_unlock;
+ }
+ params->int_target.vp_set.masks[cpu_vmbus / 64] |=
+ (1ULL << (cpu_vmbus & 63));
+ }
+ } else {
+ for_each_cpu_and(cpu, dest, cpu_online_mask) {
+ params->int_target.vp_mask |=
+ (1ULL << hv_tmp_cpu_nr_to_vp_nr(cpu));
+ }
+ }
+
+ res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
+ params, NULL);
+
+exit_unlock:
spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
+ if (res) {
+ dev_err(&hbus->hdev->device,
+ "%s() failed: %#llx", __func__, res);
+ return;
+ }
+
pci_msi_unmask_irq(data);
}
@@ -836,6 +1022,53 @@ static void hv_pci_compose_compl(void *context, struct pci_response *resp,
complete(&comp_pkt->comp_pkt.host_event);
}
+static u32 hv_compose_msi_req_v1(
+ struct pci_create_interrupt *int_pkt, struct cpumask *affinity,
+ u32 slot, u8 vector)
+{
+ int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
+ int_pkt->wslot.slot = slot;
+ int_pkt->int_desc.vector = vector;
+ int_pkt->int_desc.vector_count = 1;
+ int_pkt->int_desc.delivery_mode =
+ (apic->irq_delivery_mode == dest_LowestPrio) ?
+ dest_LowestPrio : dest_Fixed;
+
+ /*
+ * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
+ * hv_irq_unmask().
+ */
+ int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
+
+ return sizeof(*int_pkt);
+}
+
+static u32 hv_compose_msi_req_v2(
+ struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity,
+ u32 slot, u8 vector)
+{
+ int cpu;
+
+ int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
+ int_pkt->wslot.slot = slot;
+ int_pkt->int_desc.vector = vector;
+ int_pkt->int_desc.vector_count = 1;
+ int_pkt->int_desc.delivery_mode =
+ (apic->irq_delivery_mode == dest_LowestPrio) ?
+ dest_LowestPrio : dest_Fixed;
+
+ /*
+ * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
+ * by subsequent retarget in hv_irq_unmask().
+ */
+ cpu = cpumask_first_and(affinity, cpu_online_mask);
+ int_pkt->int_desc.processor_array[0] =
+ hv_tmp_cpu_nr_to_vp_nr(cpu);
+ int_pkt->int_desc.processor_count = 1;
+
+ return sizeof(*int_pkt);
+}
+
/**
* hv_compose_msi_msg() - Supplies a valid MSI address/data
* @data: Everything about this MSI
@@ -854,15 +1087,17 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
struct hv_pci_dev *hpdev;
struct pci_bus *pbus;
struct pci_dev *pdev;
- struct pci_create_interrupt *int_pkt;
struct compose_comp_ctxt comp;
struct tran_int_desc *int_desc;
- struct cpumask *affinity;
struct {
- struct pci_packet pkt;
- u8 buffer[sizeof(struct pci_create_interrupt)];
- } ctxt;
- int cpu;
+ struct pci_packet pci_pkt;
+ union {
+ struct pci_create_interrupt v1;
+ struct pci_create_interrupt2 v2;
+ } int_pkts;
+ } __packed ctxt;
+
+ u32 size;
int ret;
pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data));
@@ -885,36 +1120,44 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
memset(&ctxt, 0, sizeof(ctxt));
init_completion(&comp.comp_pkt.host_event);
- ctxt.pkt.completion_func = hv_pci_compose_compl;
- ctxt.pkt.compl_ctxt = &comp;
- int_pkt = (struct pci_create_interrupt *)&ctxt.pkt.message;
- int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
- int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
- int_pkt->int_desc.vector = cfg->vector;
- int_pkt->int_desc.vector_count = 1;
- int_pkt->int_desc.delivery_mode =
- (apic->irq_delivery_mode == dest_LowestPrio) ? 1 : 0;
+ ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
+ ctxt.pci_pkt.compl_ctxt = &comp;
+
+ switch (pci_protocol_version) {
+ case PCI_PROTOCOL_VERSION_1_1:
+ size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
+ irq_data_get_affinity_mask(data),
+ hpdev->desc.win_slot.slot,
+ cfg->vector);
+ break;
- /*
- * This bit doesn't have to work on machines with more than 64
- * processors because Hyper-V only supports 64 in a guest.
- */
- affinity = irq_data_get_affinity_mask(data);
- if (cpumask_weight(affinity) >= 32) {
- int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
- } else {
- for_each_cpu_and(cpu, affinity, cpu_online_mask) {
- int_pkt->int_desc.cpu_mask |=
- (1ULL << vmbus_cpu_number_to_vp_number(cpu));
- }
+ case PCI_PROTOCOL_VERSION_1_2:
+ size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
+ irq_data_get_affinity_mask(data),
+ hpdev->desc.win_slot.slot,
+ cfg->vector);
+ break;
+
+ default:
+ /* As we only negotiate protocol versions known to this driver,
+ * this path should never hit. However, this is it not a hot
+ * path so we print a message to aid future updates.
+ */
+ dev_err(&hbus->hdev->device,
+ "Unexpected vPCI protocol, update driver.");
+ goto free_int_desc;
}
- ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt,
- sizeof(*int_pkt), (unsigned long)&ctxt.pkt,
+ ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
+ size, (unsigned long)&ctxt.pci_pkt,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (ret)
+ if (ret) {
+ dev_err(&hbus->hdev->device,
+ "Sending request for interrupt failed: 0x%x",
+ comp.comp_pkt.completion_status);
goto free_int_desc;
+ }
wait_for_completion(&comp.comp_pkt.host_event);
@@ -1513,12 +1756,12 @@ static void pci_devices_present_work(struct work_struct *work)
put_pcichild(hpdev, hv_pcidev_ref_initial);
}
- switch(hbus->state) {
+ switch (hbus->state) {
case hv_pcibus_installed:
/*
- * Tell the core to rescan bus
- * because there may have been changes.
- */
+ * Tell the core to rescan bus
+ * because there may have been changes.
+ */
pci_lock_rescan_remove();
pci_scan_child_bus(hbus->pci_bus);
pci_unlock_rescan_remove();
@@ -1800,6 +2043,7 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
struct hv_pci_compl comp_pkt;
struct pci_packet *pkt;
int ret;
+ int i;
/*
* Initiate the handshake with the host and negotiate
@@ -1816,26 +2060,44 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
pkt->compl_ctxt = &comp_pkt;
version_req = (struct pci_version_request *)&pkt->message;
version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
- version_req->protocol_version = PCI_PROTOCOL_VERSION_CURRENT;
- ret = vmbus_sendpacket(hdev->channel, version_req,
- sizeof(struct pci_version_request),
- (unsigned long)pkt, VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (ret)
- goto exit;
+ for (i = 0; i < ARRAY_SIZE(pci_protocol_versions); i++) {
+ version_req->protocol_version = pci_protocol_versions[i];
+ ret = vmbus_sendpacket(hdev->channel, version_req,
+ sizeof(struct pci_version_request),
+ (unsigned long)pkt, VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret) {
+ dev_err(&hdev->device,
+ "PCI Pass-through VSP failed sending version reqquest: %#x",
+ ret);
+ goto exit;
+ }
- wait_for_completion(&comp_pkt.host_event);
+ wait_for_completion(&comp_pkt.host_event);
- if (comp_pkt.completion_status < 0) {
- dev_err(&hdev->device,
- "PCI Pass-through VSP failed version request %x\n",
- comp_pkt.completion_status);
- ret = -EPROTO;
- goto exit;
+ if (comp_pkt.completion_status >= 0) {
+ pci_protocol_version = pci_protocol_versions[i];
+ dev_info(&hdev->device,
+ "PCI VMBus probing: Using version %#x\n",
+ pci_protocol_version);
+ goto exit;
+ }
+
+ if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) {
+ dev_err(&hdev->device,
+ "PCI Pass-through VSP failed version request: %#x",
+ comp_pkt.completion_status);
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ reinit_completion(&comp_pkt.host_event);
}
- ret = 0;
+ dev_err(&hdev->device,
+ "PCI pass-through VSP failed to find supported version");
+ ret = -EPROTO;
exit:
kfree(pkt);
@@ -2094,13 +2356,18 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
{
struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
struct pci_resources_assigned *res_assigned;
+ struct pci_resources_assigned2 *res_assigned2;
struct hv_pci_compl comp_pkt;
struct hv_pci_dev *hpdev;
struct pci_packet *pkt;
+ size_t size_res;
u32 wslot;
int ret;
- pkt = kmalloc(sizeof(*pkt) + sizeof(*res_assigned), GFP_KERNEL);
+ size_res = (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2)
+ ? sizeof(*res_assigned) : sizeof(*res_assigned2);
+
+ pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL);
if (!pkt)
return -ENOMEM;
@@ -2111,22 +2378,30 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
if (!hpdev)
continue;
- memset(pkt, 0, sizeof(*pkt) + sizeof(*res_assigned));
+ memset(pkt, 0, sizeof(*pkt) + size_res);
init_completion(&comp_pkt.host_event);
pkt->completion_func = hv_pci_generic_compl;
pkt->compl_ctxt = &comp_pkt;
- res_assigned = (struct pci_resources_assigned *)&pkt->message;
- res_assigned->message_type.type = PCI_RESOURCES_ASSIGNED;
- res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
+ if (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2) {
+ res_assigned =
+ (struct pci_resources_assigned *)&pkt->message;
+ res_assigned->message_type.type =
+ PCI_RESOURCES_ASSIGNED;
+ res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
+ } else {
+ res_assigned2 =
+ (struct pci_resources_assigned2 *)&pkt->message;
+ res_assigned2->message_type.type =
+ PCI_RESOURCES_ASSIGNED2;
+ res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
+ }
put_pcichild(hpdev, hv_pcidev_ref_by_slot);
- ret = vmbus_sendpacket(
- hdev->channel, &pkt->message,
- sizeof(*res_assigned),
- (unsigned long)pkt,
- VM_PKT_DATA_INBAND,
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ ret = vmbus_sendpacket(hdev->channel, &pkt->message,
+ size_res, (unsigned long)pkt,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret)
break;
@@ -2204,11 +2479,19 @@ static int hv_pci_probe(struct hv_device *hdev,
struct hv_pcibus_device *hbus;
int ret;
- hbus = kzalloc(sizeof(*hbus), GFP_KERNEL);
+ /*
+ * hv_pcibus_device contains the hypercall arguments for retargeting in
+ * hv_irq_unmask(). Those must not cross a page boundary.
+ */
+ BUILD_BUG_ON(sizeof(*hbus) > PAGE_SIZE);
+
+ hbus = (struct hv_pcibus_device *)get_zeroed_page(GFP_KERNEL);
if (!hbus)
return -ENOMEM;
hbus->state = hv_pcibus_init;
+ hv_tmpcpumap_init();
+
/*
* The PCI bus "domain" is what is called "segment" in ACPI and
* other specs. Pull it from the instance ID, to get something
@@ -2308,7 +2591,7 @@ free_config:
close:
vmbus_close(hdev->channel);
free_bus:
- kfree(hbus);
+ free_page((unsigned long)hbus);
return ret;
}
@@ -2386,7 +2669,7 @@ static int hv_pci_remove(struct hv_device *hdev)
irq_domain_free_fwnode(hbus->sysdata.fwnode);
put_hvpcibus(hbus);
wait_for_completion(&hbus->remove_event);
- kfree(hbus);
+ free_page((unsigned long)hbus);
return 0;
}
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index 85348590848b..6f879685fedd 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -429,7 +429,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
return 0;
}
-static struct of_device_id rcar_pci_of_match[] = {
+static const struct of_device_id rcar_pci_of_match[] = {
{ .compatible = "renesas,pci-r8a7790", },
{ .compatible = "renesas,pci-r8a7791", },
{ .compatible = "renesas,pci-r8a7794", },
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 2618f875a600..b3722b7709df 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -233,8 +233,8 @@ struct tegra_msi {
struct msi_controller chip;
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
struct irq_domain *domain;
- unsigned long pages;
struct mutex lock;
+ u64 phys;
int irq;
};
@@ -1448,9 +1448,8 @@ static int tegra_msi_setup_irq(struct msi_controller *chip,
irq_set_msi_desc(irq, desc);
- msg.address_lo = virt_to_phys((void *)msi->pages);
- /* 32 bit address only */
- msg.address_hi = 0;
+ msg.address_lo = lower_32_bits(msi->phys);
+ msg.address_hi = upper_32_bits(msi->phys);
msg.data = hwirq;
pci_write_msi_msg(irq, &msg);
@@ -1499,7 +1498,6 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
const struct tegra_pcie_soc *soc = pcie->soc;
struct tegra_msi *msi = &pcie->msi;
struct device *dev = pcie->dev;
- unsigned long base;
int err;
u32 reg;
@@ -1531,12 +1529,25 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
goto err;
}
- /* setup AFI/FPCI range */
- msi->pages = __get_free_pages(GFP_KERNEL, 0);
- base = virt_to_phys((void *)msi->pages);
+ /*
+ * The PCI host bridge on Tegra contains some logic that intercepts
+ * MSI writes, which means that the MSI target address doesn't have
+ * to point to actual physical memory. Rather than allocating one 4
+ * KiB page of system memory that's never used, we can simply pick
+ * an arbitrary address within an area reserved for system memory
+ * in the FPCI address map.
+ *
+ * However, in order to avoid confusion, we pick an address that
+ * doesn't map to physical memory. The FPCI address map reserves a
+ * 1012 GiB region for system memory and memory-mapped I/O. Since
+ * none of the Tegra SoCs that contain this PCI host bridge can
+ * address more than 16 GiB of system memory, the last 4 KiB of
+ * these 1012 GiB is a good candidate.
+ */
+ msi->phys = 0xfcfffff000;
- afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
- afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST);
+ afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
+ afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
/* this register is in 4K increments */
afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
@@ -1585,8 +1596,6 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
- free_pages(msi->pages, 0);
-
if (msi->irq > 0)
free_irq(msi->irq, pcie);
@@ -2238,7 +2247,7 @@ static int tegra_pcie_probe(struct platform_device *pdev)
struct pci_bus *child;
int err;
- host = pci_alloc_host_bridge(sizeof(*pcie));
+ host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
if (!host)
return -ENOMEM;
@@ -2284,16 +2293,15 @@ static int tegra_pcie_probe(struct platform_device *pdev)
host->busnr = pcie->busn.start;
host->dev.parent = &pdev->dev;
host->ops = &tegra_pcie_ops;
+ host->map_irq = tegra_pcie_map_irq;
+ host->swizzle_irq = pci_common_swizzle;
- err = pci_register_host_bridge(host);
+ err = pci_scan_root_bus_bridge(host);
if (err < 0) {
dev_err(dev, "failed to register host: %d\n", err);
goto disable_msi;
}
- pci_scan_child_bus(host->bus);
-
- pci_fixup_irqs(pci_common_swizzle, tegra_pcie_map_irq);
pci_bus_size_bridges(host->bus);
pci_bus_assign_resources(host->bus);
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
index 9281eee2d000..d417acab0ecf 100644
--- a/drivers/pci/host/pci-versatile.c
+++ b/drivers/pci/host/pci-versatile.c
@@ -120,30 +120,35 @@ out_release_res:
static int versatile_pci_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct resource *res;
int ret, i, myslot = -1;
u32 val;
void __iomem *local_pci_cfg_base;
struct pci_bus *bus, *child;
+ struct pci_host_bridge *bridge;
LIST_HEAD(pci_res);
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
+ if (!bridge)
+ return -ENOMEM;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- versatile_pci_base = devm_ioremap_resource(&pdev->dev, res);
+ versatile_pci_base = devm_ioremap_resource(dev, res);
if (IS_ERR(versatile_pci_base))
return PTR_ERR(versatile_pci_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- versatile_cfg_base[0] = devm_ioremap_resource(&pdev->dev, res);
+ versatile_cfg_base[0] = devm_ioremap_resource(dev, res);
if (IS_ERR(versatile_cfg_base[0]))
return PTR_ERR(versatile_cfg_base[0]);
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- versatile_cfg_base[1] = devm_pci_remap_cfg_resource(&pdev->dev,
- res);
+ versatile_cfg_base[1] = devm_pci_remap_cfg_resource(dev, res);
if (IS_ERR(versatile_cfg_base[1]))
return PTR_ERR(versatile_cfg_base[1]);
- ret = versatile_pci_parse_request_of_pci_ranges(&pdev->dev, &pci_res);
+ ret = versatile_pci_parse_request_of_pci_ranges(dev, &pci_res);
if (ret)
return ret;
@@ -159,7 +164,7 @@ static int versatile_pci_probe(struct platform_device *pdev)
}
}
if (myslot == -1) {
- dev_err(&pdev->dev, "Cannot find PCI core!\n");
+ dev_err(dev, "Cannot find PCI core!\n");
return -EIO;
}
/*
@@ -167,7 +172,7 @@ static int versatile_pci_probe(struct platform_device *pdev)
*/
pci_slot_ignore |= (1 << myslot);
- dev_info(&pdev->dev, "PCI core found (slot %d)\n", myslot);
+ dev_info(dev, "PCI core found (slot %d)\n", myslot);
writel(myslot, PCI_SELFID);
local_pci_cfg_base = versatile_cfg_base[1] + (myslot << 11);
@@ -199,11 +204,20 @@ static int versatile_pci_probe(struct platform_device *pdev)
pci_add_flags(PCI_ENABLE_PROC_DOMAINS);
pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC);
- bus = pci_scan_root_bus(&pdev->dev, 0, &pci_versatile_ops, NULL, &pci_res);
- if (!bus)
- return -ENOMEM;
+ list_splice_init(&pci_res, &bridge->windows);
+ bridge->dev.parent = dev;
+ bridge->sysdata = NULL;
+ bridge->busnr = 0;
+ bridge->ops = &pci_versatile_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
+
+ ret = pci_scan_root_bus_bridge(bridge);
+ if (ret < 0)
+ return ret;
+
+ bus = bridge->bus;
- pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
pci_assign_unassigned_bus_resources(bus);
list_for_each_entry(child, &bus->children, node)
pcie_bus_configure_settings(child);
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index 8cae013e7188..bd897479a215 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -636,13 +636,16 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
struct xgene_pcie_port *port;
resource_size_t iobase = 0;
struct pci_bus *bus, *child;
+ struct pci_host_bridge *bridge;
int ret;
LIST_HEAD(res);
- port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
- if (!port)
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
+ if (!bridge)
return -ENOMEM;
+ port = pci_host_bridge_priv(bridge);
+
port->node = of_node_get(dn);
port->dev = dev;
@@ -670,11 +673,19 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
if (ret)
goto error;
- bus = pci_create_root_bus(dev, 0, &xgene_pcie_ops, port, &res);
- if (!bus) {
- ret = -ENOMEM;
+ list_splice_init(&res, &bridge->windows);
+ bridge->dev.parent = dev;
+ bridge->sysdata = port;
+ bridge->busnr = 0;
+ bridge->ops = &xgene_pcie_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
+
+ ret = pci_scan_root_bus_bridge(bridge);
+ if (ret < 0)
goto error;
- }
+
+ bus = bridge->bus;
pci_scan_child_bus(bus);
pci_assign_unassigned_bus_resources(bus);
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index 75ec5cea26f6..4ea4f8f5dc77 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -579,12 +579,14 @@ static int altera_pcie_probe(struct platform_device *pdev)
struct altera_pcie *pcie;
struct pci_bus *bus;
struct pci_bus *child;
+ struct pci_host_bridge *bridge;
int ret;
- pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
- if (!pcie)
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!bridge)
return -ENOMEM;
+ pcie = pci_host_bridge_priv(bridge);
pcie->pdev = pdev;
ret = altera_pcie_parse_dt(pcie);
@@ -613,12 +615,20 @@ static int altera_pcie_probe(struct platform_device *pdev)
cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
altera_pcie_host_init(pcie);
- bus = pci_scan_root_bus(dev, pcie->root_bus_nr, &altera_pcie_ops,
- pcie, &pcie->resources);
- if (!bus)
- return -ENOMEM;
+ list_splice_init(&pcie->resources, &bridge->windows);
+ bridge->dev.parent = dev;
+ bridge->sysdata = pcie;
+ bridge->busnr = pcie->root_bus_nr;
+ bridge->ops = &altera_pcie_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
+
+ ret = pci_scan_root_bus_bridge(bridge);
+ if (ret < 0)
+ return ret;
+
+ bus = bridge->bus;
- pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
pci_assign_unassigned_bus_resources(bus);
/* Configure PCI Express setting. */
diff --git a/drivers/pci/host/pcie-iproc-bcma.c b/drivers/pci/host/pcie-iproc-bcma.c
index 384c27e664fe..f03d5e3612e9 100644
--- a/drivers/pci/host/pcie-iproc-bcma.c
+++ b/drivers/pci/host/pcie-iproc-bcma.c
@@ -45,12 +45,15 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
struct device *dev = &bdev->dev;
struct iproc_pcie *pcie;
LIST_HEAD(resources);
+ struct pci_host_bridge *bridge;
int ret;
- pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
- if (!pcie)
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!bridge)
return -ENOMEM;
+ pcie = pci_host_bridge_priv(bridge);
+
pcie->dev = dev;
pcie->type = IPROC_PCIE_PAXB_BCMA;
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index 90d2bdd94e41..22531190bc40 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -52,12 +52,15 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
struct resource reg;
resource_size_t iobase = 0;
LIST_HEAD(resources);
+ struct pci_host_bridge *bridge;
int ret;
- pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
- if (!pcie)
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!bridge)
return -ENOMEM;
+ pcie = pci_host_bridge_priv(bridge);
+
pcie->dev = dev;
pcie->type = (enum iproc_pcie_type) of_device_get_match_data(dev);
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index 0f39bd2a04cb..c57486348856 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -452,14 +452,13 @@ static inline void iproc_pcie_apb_err_disable(struct pci_bus *bus,
* Note access to the configuration registers are protected at the higher layer
* by 'pci_lock' in drivers/pci/access.c
*/
-static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
+static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie,
+ int busno,
unsigned int devfn,
int where)
{
- struct iproc_pcie *pcie = iproc_data(bus);
unsigned slot = PCI_SLOT(devfn);
unsigned fn = PCI_FUNC(devfn);
- unsigned busno = bus->number;
u32 val;
u16 offset;
@@ -499,6 +498,58 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
return (pcie->base + offset);
}
+static void __iomem *iproc_pcie_bus_map_cfg_bus(struct pci_bus *bus,
+ unsigned int devfn,
+ int where)
+{
+ return iproc_pcie_map_cfg_bus(iproc_data(bus), bus->number, devfn,
+ where);
+}
+
+static int iproc_pci_raw_config_read32(struct iproc_pcie *pcie,
+ unsigned int devfn, int where,
+ int size, u32 *val)
+{
+ void __iomem *addr;
+
+ addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3);
+ if (!addr) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ *val = readl(addr);
+
+ if (size <= 2)
+ *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int iproc_pci_raw_config_write32(struct iproc_pcie *pcie,
+ unsigned int devfn, int where,
+ int size, u32 val)
+{
+ void __iomem *addr;
+ u32 mask, tmp;
+
+ addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3);
+ if (!addr)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (size == 4) {
+ writel(val, addr);
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
+ tmp = readl(addr) & mask;
+ tmp |= val << ((where & 0x3) * 8);
+ writel(tmp, addr);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
@@ -524,7 +575,7 @@ static int iproc_pcie_config_write32(struct pci_bus *bus, unsigned int devfn,
}
static struct pci_ops iproc_pcie_ops = {
- .map_bus = iproc_pcie_map_cfg_bus,
+ .map_bus = iproc_pcie_bus_map_cfg_bus,
.read = iproc_pcie_config_read32,
.write = iproc_pcie_config_write32,
};
@@ -556,12 +607,11 @@ static void iproc_pcie_reset(struct iproc_pcie *pcie)
msleep(100);
}
-static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
+static int iproc_pcie_check_link(struct iproc_pcie *pcie)
{
struct device *dev = pcie->dev;
- u8 hdr_type;
- u32 link_ctrl, class, val;
- u16 pos = PCI_EXP_CAP, link_status;
+ u32 hdr_type, link_ctrl, link_status, class, val;
+ u16 pos = PCI_EXP_CAP;
bool link_is_active = false;
/*
@@ -578,7 +628,7 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
}
/* make sure we are not in EP mode */
- pci_bus_read_config_byte(bus, 0, PCI_HEADER_TYPE, &hdr_type);
+ iproc_pci_raw_config_read32(pcie, 0, PCI_HEADER_TYPE, 1, &hdr_type);
if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) {
dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type);
return -EFAULT;
@@ -588,13 +638,16 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
#define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c
#define PCI_CLASS_BRIDGE_MASK 0xffff00
#define PCI_CLASS_BRIDGE_SHIFT 8
- pci_bus_read_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, &class);
+ iproc_pci_raw_config_read32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET,
+ 4, &class);
class &= ~PCI_CLASS_BRIDGE_MASK;
class |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT);
- pci_bus_write_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, class);
+ iproc_pci_raw_config_write32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET,
+ 4, class);
/* check link status to see if link is active */
- pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA, &link_status);
+ iproc_pci_raw_config_read32(pcie, 0, pos + PCI_EXP_LNKSTA,
+ 2, &link_status);
if (link_status & PCI_EXP_LNKSTA_NLW)
link_is_active = true;
@@ -603,20 +656,21 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
#define PCI_TARGET_LINK_SPEED_MASK 0xf
#define PCI_TARGET_LINK_SPEED_GEN2 0x2
#define PCI_TARGET_LINK_SPEED_GEN1 0x1
- pci_bus_read_config_dword(bus, 0,
- pos + PCI_EXP_LNKCTL2,
+ iproc_pci_raw_config_read32(pcie, 0,
+ pos + PCI_EXP_LNKCTL2, 4,
&link_ctrl);
if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) ==
PCI_TARGET_LINK_SPEED_GEN2) {
link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK;
link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1;
- pci_bus_write_config_dword(bus, 0,
- pos + PCI_EXP_LNKCTL2,
- link_ctrl);
+ iproc_pci_raw_config_write32(pcie, 0,
+ pos + PCI_EXP_LNKCTL2,
+ 4, link_ctrl);
msleep(100);
- pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA,
- &link_status);
+ iproc_pci_raw_config_read32(pcie, 0,
+ pos + PCI_EXP_LNKSTA,
+ 2, &link_status);
if (link_status & PCI_EXP_LNKSTA_NLW)
link_is_active = true;
}
@@ -1205,7 +1259,8 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
struct device *dev;
int ret;
void *sysdata;
- struct pci_bus *bus, *child;
+ struct pci_bus *child;
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
dev = pcie->dev;
@@ -1252,18 +1307,10 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
sysdata = pcie;
#endif
- bus = pci_create_root_bus(dev, 0, &iproc_pcie_ops, sysdata, res);
- if (!bus) {
- dev_err(dev, "unable to create PCI root bus\n");
- ret = -ENOMEM;
- goto err_power_off_phy;
- }
- pcie->root_bus = bus;
-
- ret = iproc_pcie_check_link(pcie, bus);
+ ret = iproc_pcie_check_link(pcie);
if (ret) {
dev_err(dev, "no PCIe EP device detected\n");
- goto err_rm_root_bus;
+ goto err_power_off_phy;
}
iproc_pcie_enable(pcie);
@@ -1272,23 +1319,31 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
if (iproc_pcie_msi_enable(pcie))
dev_info(dev, "not using iProc MSI\n");
- pci_scan_child_bus(bus);
- pci_assign_unassigned_bus_resources(bus);
+ list_splice_init(res, &host->windows);
+ host->busnr = 0;
+ host->dev.parent = dev;
+ host->ops = &iproc_pcie_ops;
+ host->sysdata = sysdata;
+ host->map_irq = pcie->map_irq;
+ host->swizzle_irq = pci_common_swizzle;
- if (pcie->map_irq)
- pci_fixup_irqs(pci_common_swizzle, pcie->map_irq);
+ ret = pci_scan_root_bus_bridge(host);
+ if (ret < 0) {
+ dev_err(dev, "failed to scan host: %d\n", ret);
+ goto err_power_off_phy;
+ }
- list_for_each_entry(child, &bus->children, node)
+ pci_assign_unassigned_bus_resources(host->bus);
+
+ pcie->root_bus = host->bus;
+
+ list_for_each_entry(child, &host->bus->children, node)
pcie_bus_configure_settings(child);
- pci_bus_add_devices(bus);
+ pci_bus_add_devices(host->bus);
return 0;
-err_rm_root_bus:
- pci_stop_root_bus(bus);
- pci_remove_root_bus(bus);
-
err_power_off_phy:
phy_power_off(pcie->phy);
err_exit_phy:
diff --git a/drivers/pci/host/pcie-mediatek.c b/drivers/pci/host/pcie-mediatek.c
new file mode 100644
index 000000000000..5a9d8589ea0b
--- /dev/null
+++ b/drivers/pci/host/pcie-mediatek.c
@@ -0,0 +1,554 @@
+/*
+ * MediaTek PCIe host controller driver.
+ *
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Ryder Lee <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+/* PCIe shared registers */
+#define PCIE_SYS_CFG 0x00
+#define PCIE_INT_ENABLE 0x0c
+#define PCIE_CFG_ADDR 0x20
+#define PCIE_CFG_DATA 0x24
+
+/* PCIe per port registers */
+#define PCIE_BAR0_SETUP 0x10
+#define PCIE_CLASS 0x34
+#define PCIE_LINK_STATUS 0x50
+
+#define PCIE_PORT_INT_EN(x) BIT(20 + (x))
+#define PCIE_PORT_PERST(x) BIT(1 + (x))
+#define PCIE_PORT_LINKUP BIT(0)
+#define PCIE_BAR_MAP_MAX GENMASK(31, 16)
+
+#define PCIE_BAR_ENABLE BIT(0)
+#define PCIE_REVISION_ID BIT(0)
+#define PCIE_CLASS_CODE (0x60400 << 8)
+#define PCIE_CONF_REG(regn) (((regn) & GENMASK(7, 2)) | \
+ ((((regn) >> 8) & GENMASK(3, 0)) << 24))
+#define PCIE_CONF_FUN(fun) (((fun) << 8) & GENMASK(10, 8))
+#define PCIE_CONF_DEV(dev) (((dev) << 11) & GENMASK(15, 11))
+#define PCIE_CONF_BUS(bus) (((bus) << 16) & GENMASK(23, 16))
+#define PCIE_CONF_ADDR(regn, fun, dev, bus) \
+ (PCIE_CONF_REG(regn) | PCIE_CONF_FUN(fun) | \
+ PCIE_CONF_DEV(dev) | PCIE_CONF_BUS(bus))
+
+/* MediaTek specific configuration registers */
+#define PCIE_FTS_NUM 0x70c
+#define PCIE_FTS_NUM_MASK GENMASK(15, 8)
+#define PCIE_FTS_NUM_L0(x) ((x) & 0xff << 8)
+
+#define PCIE_FC_CREDIT 0x73c
+#define PCIE_FC_CREDIT_MASK (GENMASK(31, 31) | GENMASK(28, 16))
+#define PCIE_FC_CREDIT_VAL(x) ((x) << 16)
+
+/**
+ * struct mtk_pcie_port - PCIe port information
+ * @base: IO mapped register base
+ * @list: port list
+ * @pcie: pointer to PCIe host info
+ * @reset: pointer to port reset control
+ * @sys_ck: pointer to bus clock
+ * @phy: pointer to phy control block
+ * @lane: lane count
+ * @index: port index
+ */
+struct mtk_pcie_port {
+ void __iomem *base;
+ struct list_head list;
+ struct mtk_pcie *pcie;
+ struct reset_control *reset;
+ struct clk *sys_ck;
+ struct phy *phy;
+ u32 lane;
+ u32 index;
+};
+
+/**
+ * struct mtk_pcie - PCIe host information
+ * @dev: pointer to PCIe device
+ * @base: IO mapped register base
+ * @free_ck: free-run reference clock
+ * @io: IO resource
+ * @pio: PIO resource
+ * @mem: non-prefetchable memory resource
+ * @busn: bus range
+ * @offset: IO / Memory offset
+ * @ports: pointer to PCIe port information
+ */
+struct mtk_pcie {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *free_ck;
+
+ struct resource io;
+ struct resource pio;
+ struct resource mem;
+ struct resource busn;
+ struct {
+ resource_size_t mem;
+ resource_size_t io;
+ } offset;
+ struct list_head ports;
+};
+
+static inline bool mtk_pcie_link_up(struct mtk_pcie_port *port)
+{
+ return !!(readl(port->base + PCIE_LINK_STATUS) & PCIE_PORT_LINKUP);
+}
+
+static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+
+ clk_disable_unprepare(pcie->free_ck);
+
+ if (dev->pm_domain) {
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ }
+}
+
+static void mtk_pcie_port_free(struct mtk_pcie_port *port)
+{
+ struct mtk_pcie *pcie = port->pcie;
+ struct device *dev = pcie->dev;
+
+ devm_iounmap(dev, port->base);
+ list_del(&port->list);
+ devm_kfree(dev, port);
+}
+
+static void mtk_pcie_put_resources(struct mtk_pcie *pcie)
+{
+ struct mtk_pcie_port *port, *tmp;
+
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+ phy_power_off(port->phy);
+ clk_disable_unprepare(port->sys_ck);
+ mtk_pcie_port_free(port);
+ }
+
+ mtk_pcie_subsys_powerdown(pcie);
+}
+
+static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct pci_host_bridge *host = pci_find_host_bridge(bus);
+ struct mtk_pcie *pcie = pci_host_bridge_priv(host);
+
+ writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn),
+ bus->number), pcie->base + PCIE_CFG_ADDR);
+
+ return pcie->base + PCIE_CFG_DATA + (where & 3);
+}
+
+static struct pci_ops mtk_pcie_ops = {
+ .map_bus = mtk_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static void mtk_pcie_configure_rc(struct mtk_pcie_port *port)
+{
+ struct mtk_pcie *pcie = port->pcie;
+ u32 func = PCI_FUNC(port->index << 3);
+ u32 slot = PCI_SLOT(port->index << 3);
+ u32 val;
+
+ /* enable interrupt */
+ val = readl(pcie->base + PCIE_INT_ENABLE);
+ val |= PCIE_PORT_INT_EN(port->index);
+ writel(val, pcie->base + PCIE_INT_ENABLE);
+
+ /* map to all DDR region. We need to set it before cfg operation. */
+ writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE,
+ port->base + PCIE_BAR0_SETUP);
+
+ /* configure class code and revision ID */
+ writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS);
+
+ /* configure FC credit */
+ writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
+ pcie->base + PCIE_CFG_ADDR);
+ val = readl(pcie->base + PCIE_CFG_DATA);
+ val &= ~PCIE_FC_CREDIT_MASK;
+ val |= PCIE_FC_CREDIT_VAL(0x806c);
+ writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0),
+ pcie->base + PCIE_CFG_ADDR);
+ writel(val, pcie->base + PCIE_CFG_DATA);
+
+ /* configure RC FTS number to 250 when it leaves L0s */
+ writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
+ pcie->base + PCIE_CFG_ADDR);
+ val = readl(pcie->base + PCIE_CFG_DATA);
+ val &= ~PCIE_FTS_NUM_MASK;
+ val |= PCIE_FTS_NUM_L0(0x50);
+ writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0),
+ pcie->base + PCIE_CFG_ADDR);
+ writel(val, pcie->base + PCIE_CFG_DATA);
+}
+
+static void mtk_pcie_assert_ports(struct mtk_pcie_port *port)
+{
+ struct mtk_pcie *pcie = port->pcie;
+ u32 val;
+
+ /* assert port PERST_N */
+ val = readl(pcie->base + PCIE_SYS_CFG);
+ val |= PCIE_PORT_PERST(port->index);
+ writel(val, pcie->base + PCIE_SYS_CFG);
+
+ /* de-assert port PERST_N */
+ val = readl(pcie->base + PCIE_SYS_CFG);
+ val &= ~PCIE_PORT_PERST(port->index);
+ writel(val, pcie->base + PCIE_SYS_CFG);
+
+ /* PCIe v2.0 need at least 100ms delay to train from Gen1 to Gen2 */
+ msleep(100);
+}
+
+static void mtk_pcie_enable_ports(struct mtk_pcie_port *port)
+{
+ struct device *dev = port->pcie->dev;
+ int err;
+
+ err = clk_prepare_enable(port->sys_ck);
+ if (err) {
+ dev_err(dev, "failed to enable port%d clock\n", port->index);
+ goto err_sys_clk;
+ }
+
+ reset_control_assert(port->reset);
+ reset_control_deassert(port->reset);
+
+ err = phy_power_on(port->phy);
+ if (err) {
+ dev_err(dev, "failed to power on port%d phy\n", port->index);
+ goto err_phy_on;
+ }
+
+ mtk_pcie_assert_ports(port);
+
+ /* if link up, then setup root port configuration space */
+ if (mtk_pcie_link_up(port)) {
+ mtk_pcie_configure_rc(port);
+ return;
+ }
+
+ dev_info(dev, "Port%d link down\n", port->index);
+
+ phy_power_off(port->phy);
+err_phy_on:
+ clk_disable_unprepare(port->sys_ck);
+err_sys_clk:
+ mtk_pcie_port_free(port);
+}
+
+static int mtk_pcie_parse_ports(struct mtk_pcie *pcie,
+ struct device_node *node,
+ int index)
+{
+ struct mtk_pcie_port *port;
+ struct resource *regs;
+ struct device *dev = pcie->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ char name[10];
+ int err;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ err = of_property_read_u32(node, "num-lanes", &port->lane);
+ if (err) {
+ dev_err(dev, "missing num-lanes property\n");
+ return err;
+ }
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, index + 1);
+ port->base = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(port->base)) {
+ dev_err(dev, "failed to map port%d base\n", index);
+ return PTR_ERR(port->base);
+ }
+
+ snprintf(name, sizeof(name), "sys_ck%d", index);
+ port->sys_ck = devm_clk_get(dev, name);
+ if (IS_ERR(port->sys_ck)) {
+ dev_err(dev, "failed to get port%d clock\n", index);
+ return PTR_ERR(port->sys_ck);
+ }
+
+ snprintf(name, sizeof(name), "pcie-rst%d", index);
+ port->reset = devm_reset_control_get_optional(dev, name);
+ if (PTR_ERR(port->reset) == -EPROBE_DEFER)
+ return PTR_ERR(port->reset);
+
+ /* some platforms may use default PHY setting */
+ snprintf(name, sizeof(name), "pcie-phy%d", index);
+ port->phy = devm_phy_optional_get(dev, name);
+ if (IS_ERR(port->phy))
+ return PTR_ERR(port->phy);
+
+ port->index = index;
+ port->pcie = pcie;
+
+ INIT_LIST_HEAD(&port->list);
+ list_add_tail(&port->list, &pcie->ports);
+
+ return 0;
+}
+
+static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *regs;
+ int err;
+
+ /* get shared registers */
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pcie->base = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(pcie->base)) {
+ dev_err(dev, "failed to map shared register\n");
+ return PTR_ERR(pcie->base);
+ }
+
+ pcie->free_ck = devm_clk_get(dev, "free_ck");
+ if (IS_ERR(pcie->free_ck)) {
+ if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ pcie->free_ck = NULL;
+ }
+
+ if (dev->pm_domain) {
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+ }
+
+ /* enable top level clock */
+ err = clk_prepare_enable(pcie->free_ck);
+ if (err) {
+ dev_err(dev, "failed to enable free_ck\n");
+ goto err_free_ck;
+ }
+
+ return 0;
+
+err_free_ck:
+ if (dev->pm_domain) {
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ }
+
+ return err;
+}
+
+static int mtk_pcie_setup(struct mtk_pcie *pcie)
+{
+ struct device *dev = pcie->dev;
+ struct device_node *node = dev->of_node, *child;
+ struct of_pci_range_parser parser;
+ struct of_pci_range range;
+ struct resource res;
+ struct mtk_pcie_port *port, *tmp;
+ int err;
+
+ if (of_pci_range_parser_init(&parser, node)) {
+ dev_err(dev, "missing \"ranges\" property\n");
+ return -EINVAL;
+ }
+
+ for_each_of_pci_range(&parser, &range) {
+ err = of_pci_range_to_resource(&range, node, &res);
+ if (err < 0)
+ return err;
+
+ switch (res.flags & IORESOURCE_TYPE_BITS) {
+ case IORESOURCE_IO:
+ pcie->offset.io = res.start - range.pci_addr;
+
+ memcpy(&pcie->pio, &res, sizeof(res));
+ pcie->pio.name = node->full_name;
+
+ pcie->io.start = range.cpu_addr;
+ pcie->io.end = range.cpu_addr + range.size - 1;
+ pcie->io.flags = IORESOURCE_MEM;
+ pcie->io.name = "I/O";
+
+ memcpy(&res, &pcie->io, sizeof(res));
+ break;
+
+ case IORESOURCE_MEM:
+ pcie->offset.mem = res.start - range.pci_addr;
+
+ memcpy(&pcie->mem, &res, sizeof(res));
+ pcie->mem.name = "non-prefetchable";
+ break;
+ }
+ }
+
+ err = of_pci_parse_bus_range(node, &pcie->busn);
+ if (err < 0) {
+ dev_err(dev, "failed to parse bus ranges property: %d\n", err);
+ pcie->busn.name = node->name;
+ pcie->busn.start = 0;
+ pcie->busn.end = 0xff;
+ pcie->busn.flags = IORESOURCE_BUS;
+ }
+
+ for_each_available_child_of_node(node, child) {
+ int index;
+
+ err = of_pci_get_devfn(child);
+ if (err < 0) {
+ dev_err(dev, "failed to parse devfn: %d\n", err);
+ return err;
+ }
+
+ index = PCI_SLOT(err);
+
+ err = mtk_pcie_parse_ports(pcie, child, index);
+ if (err)
+ return err;
+ }
+
+ err = mtk_pcie_subsys_powerup(pcie);
+ if (err)
+ return err;
+
+ /* enable each port, and then check link status */
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list)
+ mtk_pcie_enable_ports(port);
+
+ /* power down PCIe subsys if slots are all empty (link down) */
+ if (list_empty(&pcie->ports))
+ mtk_pcie_subsys_powerdown(pcie);
+
+ return 0;
+}
+
+static int mtk_pcie_request_resources(struct mtk_pcie *pcie)
+{
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ struct list_head *windows = &host->windows;
+ struct device *dev = pcie->dev;
+ int err;
+
+ pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
+ pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
+ pci_add_resource(windows, &pcie->busn);
+
+ err = devm_request_pci_bus_resources(dev, windows);
+ if (err < 0)
+ return err;
+
+ pci_remap_iospace(&pcie->pio, pcie->io.start);
+
+ return 0;
+}
+
+static int mtk_pcie_register_host(struct pci_host_bridge *host)
+{
+ struct mtk_pcie *pcie = pci_host_bridge_priv(host);
+ struct pci_bus *child;
+ int err;
+
+ host->busnr = pcie->busn.start;
+ host->dev.parent = pcie->dev;
+ host->ops = &mtk_pcie_ops;
+ host->map_irq = of_irq_parse_and_map_pci;
+ host->swizzle_irq = pci_common_swizzle;
+
+ err = pci_scan_root_bus_bridge(host);
+ if (err < 0)
+ return err;
+
+ pci_bus_size_bridges(host->bus);
+ pci_bus_assign_resources(host->bus);
+
+ list_for_each_entry(child, &host->bus->children, node)
+ pcie_bus_configure_settings(child);
+
+ pci_bus_add_devices(host->bus);
+
+ return 0;
+}
+
+static int mtk_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_pcie *pcie;
+ struct pci_host_bridge *host;
+ int err;
+
+ host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!host)
+ return -ENOMEM;
+
+ pcie = pci_host_bridge_priv(host);
+
+ pcie->dev = dev;
+ platform_set_drvdata(pdev, pcie);
+ INIT_LIST_HEAD(&pcie->ports);
+
+ err = mtk_pcie_setup(pcie);
+ if (err)
+ return err;
+
+ err = mtk_pcie_request_resources(pcie);
+ if (err)
+ goto put_resources;
+
+ err = mtk_pcie_register_host(host);
+ if (err)
+ goto put_resources;
+
+ return 0;
+
+put_resources:
+ if (!list_empty(&pcie->ports))
+ mtk_pcie_put_resources(pcie);
+
+ return err;
+}
+
+static const struct of_device_id mtk_pcie_ids[] = {
+ { .compatible = "mediatek,mt7623-pcie"},
+ { .compatible = "mediatek,mt2701-pcie"},
+ {},
+};
+
+static struct platform_driver mtk_pcie_driver = {
+ .probe = mtk_pcie_probe,
+ .driver = {
+ .name = "mtk-pcie",
+ .of_match_table = mtk_pcie_ids,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(mtk_pcie_driver);
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index cb07c45c1858..246d485b24c6 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -450,29 +450,33 @@ done:
static int rcar_pcie_enable(struct rcar_pcie *pcie)
{
struct device *dev = pcie->dev;
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
struct pci_bus *bus, *child;
- LIST_HEAD(res);
+ int ret;
/* Try setting 5 GT/s link speed */
rcar_pcie_force_speedup(pcie);
- rcar_pcie_setup(&res, pcie);
+ rcar_pcie_setup(&bridge->windows, pcie);
pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
+ bridge->dev.parent = dev;
+ bridge->sysdata = pcie;
+ bridge->busnr = pcie->root_bus_nr;
+ bridge->ops = &rcar_pcie_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
if (IS_ENABLED(CONFIG_PCI_MSI))
- bus = pci_scan_root_bus_msi(dev, pcie->root_bus_nr,
- &rcar_pcie_ops, pcie, &res, &pcie->msi.chip);
- else
- bus = pci_scan_root_bus(dev, pcie->root_bus_nr,
- &rcar_pcie_ops, pcie, &res);
+ bridge->msi = &pcie->msi.chip;
- if (!bus) {
- dev_err(dev, "Scanning rootbus failed");
- return -ENODEV;
+ ret = pci_scan_root_bus_bridge(bridge);
+ if (ret < 0) {
+ kfree(bridge);
+ return ret;
}
- pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
+ bus = bridge->bus;
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
@@ -1127,11 +1131,14 @@ static int rcar_pcie_probe(struct platform_device *pdev)
unsigned int data;
int err;
int (*hw_init_fn)(struct rcar_pcie *);
+ struct pci_host_bridge *bridge;
- pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
- if (!pcie)
+ bridge = pci_alloc_host_bridge(sizeof(*pcie));
+ if (!bridge)
return -ENOMEM;
+ pcie = pci_host_bridge_priv(bridge);
+
pcie->dev = dev;
INIT_LIST_HEAD(&pcie->resources);
@@ -1141,12 +1148,12 @@ static int rcar_pcie_probe(struct platform_device *pdev)
err = rcar_pcie_get_resources(pcie);
if (err < 0) {
dev_err(dev, "failed to request resources: %d\n", err);
- return err;
+ goto err_free_bridge;
}
err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node);
if (err)
- return err;
+ goto err_free_bridge;
pm_runtime_enable(dev);
err = pm_runtime_get_sync(dev);
@@ -1183,6 +1190,9 @@ static int rcar_pcie_probe(struct platform_device *pdev)
return 0;
+err_free_bridge:
+ pci_free_host_bridge(bridge);
+
err_pm_put:
pm_runtime_put(dev);
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c
index 0e020b6e0943..5acf8694fb23 100644
--- a/drivers/pci/host/pcie-rockchip.c
+++ b/drivers/pci/host/pcie-rockchip.c
@@ -139,6 +139,7 @@
PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \
PCIE_CORE_INT_MMVC)
+#define PCIE_RC_CONFIG_NORMAL_BASE 0x800000
#define PCIE_RC_CONFIG_BASE 0xa00000
#define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08)
#define PCIE_RC_CONFIG_SCC_SHIFT 16
@@ -146,6 +147,9 @@
#define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18
#define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff
#define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26
+#define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8)
+#define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5)
+#define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5)
#define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc)
#define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10)
#define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0)
@@ -175,6 +179,8 @@
#define IB_ROOT_PORT_REG_SIZE_SHIFT 3
#define AXI_WRAPPER_IO_WRITE 0x6
#define AXI_WRAPPER_MEM_WRITE 0x2
+#define AXI_WRAPPER_TYPE0_CFG 0xa
+#define AXI_WRAPPER_TYPE1_CFG 0xb
#define AXI_WRAPPER_NOR_MSG 0xc
#define MAX_AXI_IB_ROOTPORT_REGION_NUM 3
@@ -198,6 +204,7 @@
#define RC_REGION_0_ADDR_TRANS_H 0x00000000
#define RC_REGION_0_ADDR_TRANS_L 0x00000000
#define RC_REGION_0_PASS_BITS (25 - 1)
+#define RC_REGION_0_TYPE_MASK GENMASK(3, 0)
#define MAX_AXI_WRAPPER_REGION_NUM 33
struct rockchip_pcie {
@@ -295,7 +302,9 @@ static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip,
static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip,
int where, int size, u32 *val)
{
- void __iomem *addr = rockchip->apb_base + PCIE_RC_CONFIG_BASE + where;
+ void __iomem *addr;
+
+ addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where;
if (!IS_ALIGNED((uintptr_t)addr, size)) {
*val = 0;
@@ -319,11 +328,13 @@ static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip,
int where, int size, u32 val)
{
u32 mask, tmp, offset;
+ void __iomem *addr;
offset = where & ~0x3;
+ addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset;
if (size == 4) {
- writel(val, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset);
+ writel(val, addr);
return PCIBIOS_SUCCESSFUL;
}
@@ -334,13 +345,33 @@ static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip,
* corrupt RW1C bits in adjacent registers. But the hardware
* doesn't support smaller writes.
*/
- tmp = readl(rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset) & mask;
+ tmp = readl(addr) & mask;
tmp |= val << ((where & 0x3) * 8);
- writel(tmp, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset);
+ writel(tmp, addr);
return PCIBIOS_SUCCESSFUL;
}
+static void rockchip_pcie_cfg_configuration_accesses(
+ struct rockchip_pcie *rockchip, u32 type)
+{
+ u32 ob_desc_0;
+
+ /* Configuration Accesses for region 0 */
+ rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
+
+ rockchip_pcie_write(rockchip,
+ (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS),
+ PCIE_CORE_OB_REGION_ADDR0);
+ rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H,
+ PCIE_CORE_OB_REGION_ADDR1);
+ ob_desc_0 = rockchip_pcie_read(rockchip, PCIE_CORE_OB_REGION_DESC0);
+ ob_desc_0 &= ~(RC_REGION_0_TYPE_MASK);
+ ob_desc_0 |= (type | (0x1 << 23));
+ rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0);
+ rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1);
+}
+
static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
struct pci_bus *bus, u32 devfn,
int where, int size, u32 *val)
@@ -355,6 +386,13 @@ static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip,
return PCIBIOS_BAD_REGISTER_NUMBER;
}
+ if (bus->parent->number == rockchip->root_bus_nr)
+ rockchip_pcie_cfg_configuration_accesses(rockchip,
+ AXI_WRAPPER_TYPE0_CFG);
+ else
+ rockchip_pcie_cfg_configuration_accesses(rockchip,
+ AXI_WRAPPER_TYPE1_CFG);
+
if (size == 4) {
*val = readl(rockchip->reg_base + busdev);
} else if (size == 2) {
@@ -379,6 +417,13 @@ static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip,
if (!IS_ALIGNED(busdev, size))
return PCIBIOS_BAD_REGISTER_NUMBER;
+ if (bus->parent->number == rockchip->root_bus_nr)
+ rockchip_pcie_cfg_configuration_accesses(rockchip,
+ AXI_WRAPPER_TYPE0_CFG);
+ else
+ rockchip_pcie_cfg_configuration_accesses(rockchip,
+ AXI_WRAPPER_TYPE1_CFG);
+
if (size == 4)
writel(val, rockchip->reg_base + busdev);
else if (size == 2)
@@ -664,15 +709,10 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip)
rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP);
}
- rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF);
-
- rockchip_pcie_write(rockchip,
- (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS),
- PCIE_CORE_OB_REGION_ADDR0);
- rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H,
- PCIE_CORE_OB_REGION_ADDR1);
- rockchip_pcie_write(rockchip, 0x0080000a, PCIE_CORE_OB_REGION_DESC0);
- rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1);
+ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR);
+ status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK;
+ status |= PCIE_RC_CONFIG_DCSR_MPS_256;
+ rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR);
return 0;
}
@@ -1156,13 +1196,16 @@ static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip,
return 0;
}
-static int rockchip_cfg_atu(struct rockchip_pcie *rockchip)
+static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip)
{
struct device *dev = rockchip->dev;
int offset;
int err;
int reg_no;
+ rockchip_pcie_cfg_configuration_accesses(rockchip,
+ AXI_WRAPPER_TYPE0_CFG);
+
for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) {
err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1,
AXI_WRAPPER_MEM_WRITE,
@@ -1251,6 +1294,9 @@ static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
clk_disable_unprepare(rockchip->aclk_perf_pcie);
clk_disable_unprepare(rockchip->aclk_pcie);
+ if (!IS_ERR(rockchip->vpcie0v9))
+ regulator_disable(rockchip->vpcie0v9);
+
return ret;
}
@@ -1259,24 +1305,54 @@ static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
int err;
- clk_prepare_enable(rockchip->clk_pcie_pm);
- clk_prepare_enable(rockchip->hclk_pcie);
- clk_prepare_enable(rockchip->aclk_perf_pcie);
- clk_prepare_enable(rockchip->aclk_pcie);
+ if (!IS_ERR(rockchip->vpcie0v9)) {
+ err = regulator_enable(rockchip->vpcie0v9);
+ if (err) {
+ dev_err(dev, "fail to enable vpcie0v9 regulator\n");
+ return err;
+ }
+ }
+
+ err = clk_prepare_enable(rockchip->clk_pcie_pm);
+ if (err)
+ goto err_pcie_pm;
+
+ err = clk_prepare_enable(rockchip->hclk_pcie);
+ if (err)
+ goto err_hclk_pcie;
+
+ err = clk_prepare_enable(rockchip->aclk_perf_pcie);
+ if (err)
+ goto err_aclk_perf_pcie;
+
+ err = clk_prepare_enable(rockchip->aclk_pcie);
+ if (err)
+ goto err_aclk_pcie;
err = rockchip_pcie_init_port(rockchip);
if (err)
- return err;
+ goto err_pcie_resume;
- err = rockchip_cfg_atu(rockchip);
+ err = rockchip_pcie_cfg_atu(rockchip);
if (err)
- return err;
+ goto err_pcie_resume;
/* Need this to enter L1 again */
rockchip_pcie_update_txcredit_mui(rockchip);
rockchip_pcie_enable_interrupts(rockchip);
return 0;
+
+err_pcie_resume:
+ clk_disable_unprepare(rockchip->aclk_pcie);
+err_aclk_pcie:
+ clk_disable_unprepare(rockchip->aclk_perf_pcie);
+err_aclk_perf_pcie:
+ clk_disable_unprepare(rockchip->hclk_pcie);
+err_hclk_pcie:
+ clk_disable_unprepare(rockchip->clk_pcie_pm);
+err_pcie_pm:
+ return err;
}
static int rockchip_pcie_probe(struct platform_device *pdev)
@@ -1284,6 +1360,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
struct rockchip_pcie *rockchip;
struct device *dev = &pdev->dev;
struct pci_bus *bus, *child;
+ struct pci_host_bridge *bridge;
struct resource_entry *win;
resource_size_t io_base;
struct resource *mem;
@@ -1295,10 +1372,12 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
if (!dev->of_node)
return -ENODEV;
- rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL);
- if (!rockchip)
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip));
+ if (!bridge)
return -ENOMEM;
+ rockchip = pci_host_bridge_priv(bridge);
+
platform_set_drvdata(pdev, rockchip);
rockchip->dev = dev;
@@ -1385,22 +1464,30 @@ static int rockchip_pcie_probe(struct platform_device *pdev)
}
}
- err = rockchip_cfg_atu(rockchip);
+ err = rockchip_pcie_cfg_atu(rockchip);
if (err)
goto err_free_res;
- rockchip->msg_region = devm_ioremap(rockchip->dev,
- rockchip->msg_bus_addr, SZ_1M);
+ rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M);
if (!rockchip->msg_region) {
err = -ENOMEM;
goto err_free_res;
}
- bus = pci_scan_root_bus(&pdev->dev, 0, &rockchip_pcie_ops, rockchip, &res);
- if (!bus) {
- err = -ENOMEM;
+ list_splice_init(&res, &bridge->windows);
+ bridge->dev.parent = dev;
+ bridge->sysdata = rockchip;
+ bridge->busnr = 0;
+ bridge->ops = &rockchip_pcie_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
+
+ err = pci_scan_root_bus_bridge(bridge);
+ if (!err)
goto err_free_res;
- }
+
+ bus = bridge->bus;
+
rockchip->root_bus = bus;
pci_bus_size_bridges(bus);
diff --git a/drivers/pci/host/pcie-tango.c b/drivers/pci/host/pcie-tango.c
new file mode 100644
index 000000000000..6bbb81f06a53
--- /dev/null
+++ b/drivers/pci/host/pcie-tango.c
@@ -0,0 +1,141 @@
+#include <linux/pci-ecam.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+
+#define SMP8759_MUX 0x48
+#define SMP8759_TEST_OUT 0x74
+
+struct tango_pcie {
+ void __iomem *base;
+};
+
+static int smp8759_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct tango_pcie *pcie = dev_get_drvdata(cfg->parent);
+ int ret;
+
+ /* Reads in configuration space outside devfn 0 return garbage */
+ if (devfn != 0)
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+
+ /*
+ * PCI config and MMIO accesses are muxed. Linux doesn't have a
+ * mutual exclusion mechanism for config vs. MMIO accesses, so
+ * concurrent accesses may cause corruption.
+ */
+ writel_relaxed(1, pcie->base + SMP8759_MUX);
+ ret = pci_generic_config_read(bus, devfn, where, size, val);
+ writel_relaxed(0, pcie->base + SMP8759_MUX);
+
+ return ret;
+}
+
+static int smp8759_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ struct tango_pcie *pcie = dev_get_drvdata(cfg->parent);
+ int ret;
+
+ writel_relaxed(1, pcie->base + SMP8759_MUX);
+ ret = pci_generic_config_write(bus, devfn, where, size, val);
+ writel_relaxed(0, pcie->base + SMP8759_MUX);
+
+ return ret;
+}
+
+static struct pci_ecam_ops smp8759_ecam_ops = {
+ .bus_shift = 20,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = smp8759_config_read,
+ .write = smp8759_config_write,
+ }
+};
+
+static int tango_pcie_link_up(struct tango_pcie *pcie)
+{
+ void __iomem *test_out = pcie->base + SMP8759_TEST_OUT;
+ int i;
+
+ writel_relaxed(16, test_out);
+ for (i = 0; i < 10; ++i) {
+ u32 ltssm_state = readl_relaxed(test_out) >> 8;
+ if ((ltssm_state & 0x1f) == 0xf) /* L0 */
+ return 1;
+ usleep_range(3000, 4000);
+ }
+
+ return 0;
+}
+
+static int tango_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tango_pcie *pcie;
+ struct resource *res;
+ int ret;
+
+ dev_warn(dev, "simultaneous PCI config and MMIO accesses may cause data corruption\n");
+ add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ pcie->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
+
+ platform_set_drvdata(pdev, pcie);
+
+ if (!tango_pcie_link_up(pcie))
+ return -ENODEV;
+
+ return pci_host_common_probe(pdev, &smp8759_ecam_ops);
+}
+
+static const struct of_device_id tango_pcie_ids[] = {
+ { .compatible = "sigma,smp8759-pcie" },
+ { },
+};
+
+static struct platform_driver tango_pcie_driver = {
+ .probe = tango_pcie_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = tango_pcie_ids,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver(tango_pcie_driver);
+
+/*
+ * The root complex advertises the wrong device class.
+ * Header Type 1 is for PCI-to-PCI bridges.
+ */
+static void tango_fixup_class(struct pci_dev *dev)
+{
+ dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0024, tango_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0028, tango_fixup_class);
+
+/*
+ * The root complex exposes a "fake" BAR, which is used to filter
+ * bus-to-system accesses. Only accesses within the range defined by this
+ * BAR are forwarded to the host, others are ignored.
+ *
+ * By default, the DMA framework expects an identity mapping, and DRAM0 is
+ * mapped at 0x80000000.
+ */
+static void tango_fixup_bar(struct pci_dev *dev)
+{
+ dev->non_compliant_bars = true;
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 0x80000000);
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0024, tango_fixup_bar);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0028, tango_fixup_bar);
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index 4b16b26ae909..eec641a34fc5 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -172,6 +172,7 @@ struct nwl_pcie {
u8 root_busno;
struct nwl_msi msi;
struct irq_domain *legacy_irq_domain;
+ raw_spinlock_t leg_mask_lock;
};
static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off)
@@ -383,11 +384,52 @@ static void nwl_pcie_msi_handler_low(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
+static void nwl_mask_leg_irq(struct irq_data *data)
+{
+ struct irq_desc *desc = irq_to_desc(data->irq);
+ struct nwl_pcie *pcie;
+ unsigned long flags;
+ u32 mask;
+ u32 val;
+
+ pcie = irq_desc_get_chip_data(desc);
+ mask = 1 << (data->hwirq - 1);
+ raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
+ val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
+ nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK);
+ raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
+}
+
+static void nwl_unmask_leg_irq(struct irq_data *data)
+{
+ struct irq_desc *desc = irq_to_desc(data->irq);
+ struct nwl_pcie *pcie;
+ unsigned long flags;
+ u32 mask;
+ u32 val;
+
+ pcie = irq_desc_get_chip_data(desc);
+ mask = 1 << (data->hwirq - 1);
+ raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
+ val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
+ nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK);
+ raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags);
+}
+
+static struct irq_chip nwl_leg_irq_chip = {
+ .name = "nwl_pcie:legacy",
+ .irq_enable = nwl_unmask_leg_irq,
+ .irq_disable = nwl_mask_leg_irq,
+ .irq_mask = nwl_mask_leg_irq,
+ .irq_unmask = nwl_unmask_leg_irq,
+};
+
static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
- irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_and_handler(irq, &nwl_leg_irq_chip, handle_level_irq);
irq_set_chip_data(irq, domain->host_data);
+ irq_set_status_flags(irq, IRQ_LEVEL);
return 0;
}
@@ -526,11 +568,12 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
return -ENOMEM;
}
+ raw_spin_lock_init(&pcie->leg_mask_lock);
nwl_pcie_init_msi_irq_domain(pcie);
return 0;
}
-static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus)
+static int nwl_pcie_enable_msi(struct nwl_pcie *pcie)
{
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
@@ -791,13 +834,16 @@ static int nwl_pcie_probe(struct platform_device *pdev)
struct nwl_pcie *pcie;
struct pci_bus *bus;
struct pci_bus *child;
+ struct pci_host_bridge *bridge;
int err;
resource_size_t iobase = 0;
LIST_HEAD(res);
- pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
- if (!pcie)
- return -ENOMEM;
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
+ if (!bridge)
+ return -ENODEV;
+
+ pcie = pci_host_bridge_priv(bridge);
pcie->dev = dev;
pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT;
@@ -830,21 +876,28 @@ static int nwl_pcie_probe(struct platform_device *pdev)
goto error;
}
- bus = pci_create_root_bus(dev, pcie->root_busno,
- &nwl_pcie_ops, pcie, &res);
- if (!bus) {
- err = -ENOMEM;
- goto error;
- }
+ list_splice_init(&res, &bridge->windows);
+ bridge->dev.parent = dev;
+ bridge->sysdata = pcie;
+ bridge->busnr = pcie->root_busno;
+ bridge->ops = &nwl_pcie_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
- err = nwl_pcie_enable_msi(pcie, bus);
+ err = nwl_pcie_enable_msi(pcie);
if (err < 0) {
dev_err(dev, "failed to enable MSI support: %d\n", err);
goto error;
}
}
- pci_scan_child_bus(bus);
+
+ err = pci_scan_root_bus_bridge(bridge);
+ if (err)
+ goto error;
+
+ bus = bridge->bus;
+
pci_assign_unassigned_bus_resources(bus);
list_for_each_entry(child, &bus->children, node)
pcie_bus_configure_settings(child);
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index 2fe2df51f9f8..f63fa5e0278c 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -633,6 +633,7 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct xilinx_pcie_port *port;
struct pci_bus *bus, *child;
+ struct pci_host_bridge *bridge;
int err;
resource_size_t iobase = 0;
LIST_HEAD(res);
@@ -640,9 +641,11 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
if (!dev->of_node)
return -ENODEV;
- port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
- if (!port)
- return -ENOMEM;
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
+ if (!bridge)
+ return -ENODEV;
+
+ port = pci_host_bridge_priv(bridge);
port->dev = dev;
@@ -671,21 +674,26 @@ static int xilinx_pcie_probe(struct platform_device *pdev)
if (err)
goto error;
- bus = pci_create_root_bus(dev, 0, &xilinx_pcie_ops, port, &res);
- if (!bus) {
- err = -ENOMEM;
- goto error;
- }
+
+ list_splice_init(&res, &bridge->windows);
+ bridge->dev.parent = dev;
+ bridge->sysdata = port;
+ bridge->busnr = 0;
+ bridge->ops = &xilinx_pcie_ops;
+ bridge->map_irq = of_irq_parse_and_map_pci;
+ bridge->swizzle_irq = pci_common_swizzle;
#ifdef CONFIG_PCI_MSI
xilinx_pcie_msi_chip.dev = dev;
- bus->msi = &xilinx_pcie_msi_chip;
+ bridge->msi = &xilinx_pcie_msi_chip;
#endif
- pci_scan_child_bus(bus);
+ err = pci_scan_root_bus_bridge(bridge);
+ if (err < 0)
+ goto error;
+
+ bus = bridge->bus;
+
pci_assign_unassigned_bus_resources(bus);
-#ifndef CONFIG_MICROBLAZE
- pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
-#endif
list_for_each_entry(child, &bus->children, node)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
@@ -696,7 +704,7 @@ error:
return err;
}
-static struct of_device_id xilinx_pcie_of_match[] = {
+static const struct of_device_id xilinx_pcie_of_match[] = {
{ .compatible = "xlnx,axi-pcie-host-1.00.a", },
{}
};
diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c
index 31203d69616d..6088c3083194 100644
--- a/drivers/pci/host/vmd.c
+++ b/drivers/pci/host/vmd.c
@@ -539,7 +539,10 @@ static void vmd_detach_resources(struct vmd_dev *vmd)
}
/*
- * VMD domains start at 0x1000 to not clash with ACPI _SEG domains.
+ * VMD domains start at 0x10000 to not clash with ACPI _SEG domains.
+ * Per ACPI r6.0, sec 6.5.6, _SEG returns an integer, of which the lower
+ * 16 bits are the PCI Segment Group (domain) number. Other bits are
+ * currently reserved.
*/
static int vmd_find_free_domain(void)
{
@@ -710,7 +713,8 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
- vmd_irq, 0, "vmd", &vmd->irqs[i]);
+ vmd_irq, IRQF_NO_THREAD,
+ "vmd", &vmd->irqs[i]);
if (err)
return err;
}
@@ -739,10 +743,10 @@ static void vmd_remove(struct pci_dev *dev)
struct vmd_dev *vmd = pci_get_drvdata(dev);
vmd_detach_resources(vmd);
- vmd_cleanup_srcu(vmd);
sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
pci_stop_root_bus(vmd->bus);
pci_remove_root_bus(vmd->bus);
+ vmd_cleanup_srcu(vmd);
vmd_teardown_dma_ops(vmd);
irq_domain_remove(vmd->irq_domain);
}
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index d9dc7363ac77..120485d6f352 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -461,8 +461,6 @@ found:
else
iov->dev = dev;
- mutex_init(&iov->lock);
-
dev->sriov = iov;
dev->is_physfn = 1;
rc = compute_max_vf_buses(dev);
@@ -491,8 +489,6 @@ static void sriov_release(struct pci_dev *dev)
if (dev != dev->sriov->dev)
pci_dev_put(dev->sriov->dev);
- mutex_destroy(&dev->sriov->lock);
-
kfree(dev->sriov);
dev->sriov = NULL;
}
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index fbad5dca3219..253d92409bb3 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1058,7 +1058,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
for (;;) {
if (affd) {
- nvec = irq_calc_affinity_vectors(nvec, affd);
+ nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
if (nvec < minvec)
return -ENOSPC;
}
@@ -1097,7 +1097,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
for (;;) {
if (affd) {
- nvec = irq_calc_affinity_vectors(nvec, affd);
+ nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
if (nvec < minvec)
return -ENOSPC;
}
@@ -1165,16 +1165,6 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
if (flags & PCI_IRQ_AFFINITY) {
if (!affd)
affd = &msi_default_affd;
-
- if (affd->pre_vectors + affd->post_vectors > min_vecs)
- return -EINVAL;
-
- /*
- * If there aren't any vectors left after applying the pre/post
- * vectors don't bother with assigning affinity.
- */
- if (affd->pre_vectors + affd->post_vectors == min_vecs)
- affd = NULL;
} else {
if (WARN_ON(affd))
affd = NULL;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index df4aead394f2..607f677f48d2 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -415,6 +415,8 @@ static int pci_device_probe(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev);
struct pci_driver *drv = to_pci_driver(dev->driver);
+ pci_assign_irq(pci_dev);
+
error = pcibios_alloc_irq(pci_dev);
if (error < 0)
return error;
@@ -967,6 +969,7 @@ static int pci_pm_thaw_noirq(struct device *dev)
return pci_legacy_resume_early(dev);
pci_update_current_state(pci_dev, PCI_D0);
+ pci_restore_state(pci_dev);
if (drv && drv->pm && drv->pm->thaw_noirq)
error = drv->pm->thaw_noirq(dev);
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index 2d8db3ead6e8..a7a41d9c29df 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -43,9 +43,11 @@ static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf,
{
const struct dmi_device *dmi;
struct dmi_dev_onboard *donboard;
+ int domain_nr;
int bus;
int devfn;
+ domain_nr = pci_domain_nr(pdev->bus);
bus = pdev->bus->number;
devfn = pdev->devfn;
@@ -53,8 +55,9 @@ static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf,
while ((dmi = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD,
NULL, dmi)) != NULL) {
donboard = dmi->device_data;
- if (donboard && donboard->bus == bus &&
- donboard->devfn == devfn) {
+ if (donboard && donboard->segment == domain_nr &&
+ donboard->bus == bus &&
+ donboard->devfn == devfn) {
if (buf) {
if (attribute == SMBIOS_ATTR_INSTANCE_SHOW)
return scnprintf(buf, PAGE_SIZE,
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 31e99613a12e..2f3780b50723 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -154,6 +154,129 @@ static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(resource);
+static ssize_t max_link_speed_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ u32 linkcap;
+ int err;
+ const char *speed;
+
+ err = pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &linkcap);
+ if (err)
+ return -EINVAL;
+
+ switch (linkcap & PCI_EXP_LNKCAP_SLS) {
+ case PCI_EXP_LNKCAP_SLS_8_0GB:
+ speed = "8 GT/s";
+ break;
+ case PCI_EXP_LNKCAP_SLS_5_0GB:
+ speed = "5 GT/s";
+ break;
+ case PCI_EXP_LNKCAP_SLS_2_5GB:
+ speed = "2.5 GT/s";
+ break;
+ default:
+ speed = "Unknown speed";
+ }
+
+ return sprintf(buf, "%s\n", speed);
+}
+static DEVICE_ATTR_RO(max_link_speed);
+
+static ssize_t max_link_width_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ u32 linkcap;
+ int err;
+
+ err = pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &linkcap);
+ if (err)
+ return -EINVAL;
+
+ return sprintf(buf, "%u\n", (linkcap & PCI_EXP_LNKCAP_MLW) >> 4);
+}
+static DEVICE_ATTR_RO(max_link_width);
+
+static ssize_t current_link_speed_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ u16 linkstat;
+ int err;
+ const char *speed;
+
+ err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
+ if (err)
+ return -EINVAL;
+
+ switch (linkstat & PCI_EXP_LNKSTA_CLS) {
+ case PCI_EXP_LNKSTA_CLS_8_0GB:
+ speed = "8 GT/s";
+ break;
+ case PCI_EXP_LNKSTA_CLS_5_0GB:
+ speed = "5 GT/s";
+ break;
+ case PCI_EXP_LNKSTA_CLS_2_5GB:
+ speed = "2.5 GT/s";
+ break;
+ default:
+ speed = "Unknown speed";
+ }
+
+ return sprintf(buf, "%s\n", speed);
+}
+static DEVICE_ATTR_RO(current_link_speed);
+
+static ssize_t current_link_width_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ u16 linkstat;
+ int err;
+
+ err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
+ if (err)
+ return -EINVAL;
+
+ return sprintf(buf, "%u\n",
+ (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT);
+}
+static DEVICE_ATTR_RO(current_link_width);
+
+static ssize_t secondary_bus_number_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ u8 sec_bus;
+ int err;
+
+ err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus);
+ if (err)
+ return -EINVAL;
+
+ return sprintf(buf, "%u\n", sec_bus);
+}
+static DEVICE_ATTR_RO(secondary_bus_number);
+
+static ssize_t subordinate_bus_number_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ u8 sub_bus;
+ int err;
+
+ err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus);
+ if (err)
+ return -EINVAL;
+
+ return sprintf(buf, "%u\n", sub_bus);
+}
+static DEVICE_ATTR_RO(subordinate_bus_number);
+
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -472,7 +595,6 @@ static ssize_t sriov_numvfs_store(struct device *dev,
const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct pci_sriov *iov = pdev->sriov;
int ret;
u16 num_vfs;
@@ -483,7 +605,7 @@ static ssize_t sriov_numvfs_store(struct device *dev,
if (num_vfs > pci_sriov_get_totalvfs(pdev))
return -ERANGE;
- mutex_lock(&iov->dev->sriov->lock);
+ device_lock(&pdev->dev);
if (num_vfs == pdev->sriov->num_VFs)
goto exit;
@@ -518,7 +640,7 @@ static ssize_t sriov_numvfs_store(struct device *dev,
num_vfs, ret);
exit:
- mutex_unlock(&iov->dev->sriov->lock);
+ device_unlock(&pdev->dev);
if (ret < 0)
return ret;
@@ -629,12 +751,17 @@ static struct attribute *pci_dev_attrs[] = {
NULL,
};
-static const struct attribute_group pci_dev_group = {
- .attrs = pci_dev_attrs,
+static struct attribute *pci_bridge_attrs[] = {
+ &dev_attr_subordinate_bus_number.attr,
+ &dev_attr_secondary_bus_number.attr,
+ NULL,
};
-const struct attribute_group *pci_dev_groups[] = {
- &pci_dev_group,
+static struct attribute *pcie_dev_attrs[] = {
+ &dev_attr_current_link_speed.attr,
+ &dev_attr_current_link_width.attr,
+ &dev_attr_max_link_width.attr,
+ &dev_attr_max_link_speed.attr,
NULL,
};
@@ -1557,6 +1684,57 @@ static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj,
return a->mode;
}
+static umode_t pci_bridge_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (pci_is_bridge(pdev))
+ return a->mode;
+
+ return 0;
+}
+
+static umode_t pcie_dev_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (pci_is_pcie(pdev))
+ return a->mode;
+
+ return 0;
+}
+
+static const struct attribute_group pci_dev_group = {
+ .attrs = pci_dev_attrs,
+};
+
+const struct attribute_group *pci_dev_groups[] = {
+ &pci_dev_group,
+ NULL,
+};
+
+static const struct attribute_group pci_bridge_group = {
+ .attrs = pci_bridge_attrs,
+};
+
+const struct attribute_group *pci_bridge_groups[] = {
+ &pci_bridge_group,
+ NULL,
+};
+
+static const struct attribute_group pcie_dev_group = {
+ .attrs = pcie_dev_attrs,
+};
+
+const struct attribute_group *pcie_dev_groups[] = {
+ &pcie_dev_group,
+ NULL,
+};
+
static struct attribute_group pci_dev_hp_attr_group = {
.attrs = pci_dev_hp_attrs,
.is_visible = pci_dev_hp_attrs_are_visible,
@@ -1592,12 +1770,24 @@ static struct attribute_group pci_dev_attr_group = {
.is_visible = pci_dev_attrs_are_visible,
};
+static struct attribute_group pci_bridge_attr_group = {
+ .attrs = pci_bridge_attrs,
+ .is_visible = pci_bridge_attrs_are_visible,
+};
+
+static struct attribute_group pcie_dev_attr_group = {
+ .attrs = pcie_dev_attrs,
+ .is_visible = pcie_dev_attrs_are_visible,
+};
+
static const struct attribute_group *pci_dev_attr_groups[] = {
&pci_dev_attr_group,
&pci_dev_hp_attr_group,
#ifdef CONFIG_PCI_IOV
&sriov_dev_attr_group,
#endif
+ &pci_bridge_attr_group,
+ &pcie_dev_attr_group,
NULL,
};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 0b5302a9fdae..d88edf5c563b 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -28,6 +28,7 @@
#include <linux/pm_runtime.h>
#include <linux/pci_hotplug.h>
#include <linux/vmalloc.h>
+#include <linux/pci-ats.h>
#include <asm/setup.h>
#include <asm/dma.h>
#include <linux/aer.h>
@@ -455,7 +456,7 @@ struct resource *pci_find_parent_resource(const struct pci_dev *dev,
pci_bus_for_each_resource(bus, r, i) {
if (!r)
continue;
- if (res->start && resource_contains(r, res)) {
+ if (resource_contains(r, res)) {
/*
* If the window is prefetchable but the BAR is
@@ -1166,6 +1167,8 @@ void pci_restore_state(struct pci_dev *dev)
/* PCI Express register must be restored first */
pci_restore_pcie_state(dev);
+ pci_restore_pasid_state(dev);
+ pci_restore_pri_state(dev);
pci_restore_ats_state(dev);
pci_restore_vc_state(dev);
@@ -1966,12 +1969,13 @@ EXPORT_SYMBOL(pci_wake_from_d3);
/**
* pci_target_state - find an appropriate low power state for a given PCI dev
* @dev: PCI device
+ * @wakeup: Whether or not wakeup functionality will be enabled for the device.
*
* Use underlying platform code to find a supported low power state for @dev.
* If the platform can't manage @dev, return the deepest state from which it
* can generate wake events, based on any available PME info.
*/
-static pci_power_t pci_target_state(struct pci_dev *dev)
+static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
{
pci_power_t target_state = PCI_D3hot;
@@ -2008,7 +2012,7 @@ static pci_power_t pci_target_state(struct pci_dev *dev)
if (dev->current_state == PCI_D3cold)
target_state = PCI_D3cold;
- if (device_may_wakeup(&dev->dev)) {
+ if (wakeup) {
/*
* Find the deepest state from which the device can generate
* wake-up events, make it the target state and enable device
@@ -2034,13 +2038,14 @@ static pci_power_t pci_target_state(struct pci_dev *dev)
*/
int pci_prepare_to_sleep(struct pci_dev *dev)
{
- pci_power_t target_state = pci_target_state(dev);
+ bool wakeup = device_may_wakeup(&dev->dev);
+ pci_power_t target_state = pci_target_state(dev, wakeup);
int error;
if (target_state == PCI_POWER_ERROR)
return -EIO;
- pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
+ pci_enable_wake(dev, target_state, wakeup);
error = pci_set_power_state(dev, target_state);
@@ -2073,9 +2078,10 @@ EXPORT_SYMBOL(pci_back_from_sleep);
*/
int pci_finish_runtime_suspend(struct pci_dev *dev)
{
- pci_power_t target_state = pci_target_state(dev);
+ pci_power_t target_state;
int error;
+ target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
if (target_state == PCI_POWER_ERROR)
return -EIO;
@@ -2111,8 +2117,8 @@ bool pci_dev_run_wake(struct pci_dev *dev)
if (!dev->pme_support)
return false;
- /* PME-capable in principle, but not from the intended sleep state */
- if (!pci_pme_capable(dev, pci_target_state(dev)))
+ /* PME-capable in principle, but not from the target power state */
+ if (!pci_pme_capable(dev, pci_target_state(dev, false)))
return false;
while (bus->parent) {
@@ -2147,9 +2153,10 @@ EXPORT_SYMBOL_GPL(pci_dev_run_wake);
bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
{
struct device *dev = &pci_dev->dev;
+ bool wakeup = device_may_wakeup(dev);
if (!pm_runtime_suspended(dev)
- || pci_target_state(pci_dev) != pci_dev->current_state
+ || pci_target_state(pci_dev, wakeup) != pci_dev->current_state
|| platform_pci_need_resume(pci_dev)
|| (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME))
return false;
@@ -2167,7 +2174,7 @@ bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
spin_lock_irq(&dev->power.lock);
if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold &&
- !device_may_wakeup(dev))
+ !wakeup)
__pci_pme_active(pci_dev, false);
spin_unlock_irq(&dev->power.lock);
@@ -3715,46 +3722,6 @@ void pci_intx(struct pci_dev *pdev, int enable)
}
EXPORT_SYMBOL_GPL(pci_intx);
-/**
- * pci_intx_mask_supported - probe for INTx masking support
- * @dev: the PCI device to operate on
- *
- * Check if the device dev support INTx masking via the config space
- * command word.
- */
-bool pci_intx_mask_supported(struct pci_dev *dev)
-{
- bool mask_supported = false;
- u16 orig, new;
-
- if (dev->broken_intx_masking)
- return false;
-
- pci_cfg_access_lock(dev);
-
- pci_read_config_word(dev, PCI_COMMAND, &orig);
- pci_write_config_word(dev, PCI_COMMAND,
- orig ^ PCI_COMMAND_INTX_DISABLE);
- pci_read_config_word(dev, PCI_COMMAND, &new);
-
- /*
- * There's no way to protect against hardware bugs or detect them
- * reliably, but as long as we know what the value should be, let's
- * go ahead and check it.
- */
- if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
- dev_err(&dev->dev, "Command register changed from 0x%x to 0x%x: driver or hardware bug?\n",
- orig, new);
- } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
- mask_supported = true;
- pci_write_config_word(dev, PCI_COMMAND, orig);
- }
-
- pci_cfg_access_unlock(dev);
- return mask_supported;
-}
-EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
-
static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
{
struct pci_bus *bus = dev->bus;
@@ -3805,7 +3772,7 @@ done:
* @dev: the PCI device to operate on
*
* Check if the device dev has its INTx line asserted, mask it and
- * return true in that case. False is returned if not interrupt was
+ * return true in that case. False is returned if no interrupt was
* pending.
*/
bool pci_check_and_mask_intx(struct pci_dev *dev)
@@ -4075,40 +4042,6 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
}
-static int __pci_dev_reset(struct pci_dev *dev, int probe)
-{
- int rc;
-
- might_sleep();
-
- rc = pci_dev_specific_reset(dev, probe);
- if (rc != -ENOTTY)
- goto done;
-
- if (pcie_has_flr(dev)) {
- if (!probe)
- pcie_flr(dev);
- rc = 0;
- goto done;
- }
-
- rc = pci_af_flr(dev, probe);
- if (rc != -ENOTTY)
- goto done;
-
- rc = pci_pm_reset(dev, probe);
- if (rc != -ENOTTY)
- goto done;
-
- rc = pci_dev_reset_slot_function(dev, probe);
- if (rc != -ENOTTY)
- goto done;
-
- rc = pci_parent_bus_reset(dev, probe);
-done:
- return rc;
-}
-
static void pci_dev_lock(struct pci_dev *dev)
{
pci_cfg_access_lock(dev);
@@ -4134,26 +4067,18 @@ static void pci_dev_unlock(struct pci_dev *dev)
pci_cfg_access_unlock(dev);
}
-/**
- * pci_reset_notify - notify device driver of reset
- * @dev: device to be notified of reset
- * @prepare: 'true' if device is about to be reset; 'false' if reset attempt
- * completed
- *
- * Must be called prior to device access being disabled and after device
- * access is restored.
- */
-static void pci_reset_notify(struct pci_dev *dev, bool prepare)
+static void pci_dev_save_and_disable(struct pci_dev *dev)
{
const struct pci_error_handlers *err_handler =
dev->driver ? dev->driver->err_handler : NULL;
- if (err_handler && err_handler->reset_notify)
- err_handler->reset_notify(dev, prepare);
-}
-static void pci_dev_save_and_disable(struct pci_dev *dev)
-{
- pci_reset_notify(dev, true);
+ /*
+ * dev->driver->err_handler->reset_prepare() is protected against
+ * races with ->remove() by the device lock, which must be held by
+ * the caller.
+ */
+ if (err_handler && err_handler->reset_prepare)
+ err_handler->reset_prepare(dev);
/*
* Wake-up device prior to save. PM registers default to D0 after
@@ -4175,23 +4100,18 @@ static void pci_dev_save_and_disable(struct pci_dev *dev)
static void pci_dev_restore(struct pci_dev *dev)
{
- pci_restore_state(dev);
- pci_reset_notify(dev, false);
-}
-
-static int pci_dev_reset(struct pci_dev *dev, int probe)
-{
- int rc;
-
- if (!probe)
- pci_dev_lock(dev);
-
- rc = __pci_dev_reset(dev, probe);
+ const struct pci_error_handlers *err_handler =
+ dev->driver ? dev->driver->err_handler : NULL;
- if (!probe)
- pci_dev_unlock(dev);
+ pci_restore_state(dev);
- return rc;
+ /*
+ * dev->driver->err_handler->reset_done() is protected against
+ * races with ->remove() by the device lock, which must be held by
+ * the caller.
+ */
+ if (err_handler && err_handler->reset_done)
+ err_handler->reset_done(dev);
}
/**
@@ -4213,7 +4133,13 @@ static int pci_dev_reset(struct pci_dev *dev, int probe)
*/
int __pci_reset_function(struct pci_dev *dev)
{
- return pci_dev_reset(dev, 0);
+ int ret;
+
+ pci_dev_lock(dev);
+ ret = __pci_reset_function_locked(dev);
+ pci_dev_unlock(dev);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(__pci_reset_function);
@@ -4238,7 +4164,27 @@ EXPORT_SYMBOL_GPL(__pci_reset_function);
*/
int __pci_reset_function_locked(struct pci_dev *dev)
{
- return __pci_dev_reset(dev, 0);
+ int rc;
+
+ might_sleep();
+
+ rc = pci_dev_specific_reset(dev, 0);
+ if (rc != -ENOTTY)
+ return rc;
+ if (pcie_has_flr(dev)) {
+ pcie_flr(dev);
+ return 0;
+ }
+ rc = pci_af_flr(dev, 0);
+ if (rc != -ENOTTY)
+ return rc;
+ rc = pci_pm_reset(dev, 0);
+ if (rc != -ENOTTY)
+ return rc;
+ rc = pci_dev_reset_slot_function(dev, 0);
+ if (rc != -ENOTTY)
+ return rc;
+ return pci_parent_bus_reset(dev, 0);
}
EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
@@ -4255,7 +4201,26 @@ EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
*/
int pci_probe_reset_function(struct pci_dev *dev)
{
- return pci_dev_reset(dev, 1);
+ int rc;
+
+ might_sleep();
+
+ rc = pci_dev_specific_reset(dev, 1);
+ if (rc != -ENOTTY)
+ return rc;
+ if (pcie_has_flr(dev))
+ return 0;
+ rc = pci_af_flr(dev, 1);
+ if (rc != -ENOTTY)
+ return rc;
+ rc = pci_pm_reset(dev, 1);
+ if (rc != -ENOTTY)
+ return rc;
+ rc = pci_dev_reset_slot_function(dev, 1);
+ if (rc != -ENOTTY)
+ return rc;
+
+ return pci_parent_bus_reset(dev, 1);
}
/**
@@ -4278,15 +4243,17 @@ int pci_reset_function(struct pci_dev *dev)
{
int rc;
- rc = pci_dev_reset(dev, 1);
+ rc = pci_probe_reset_function(dev);
if (rc)
return rc;
+ pci_dev_lock(dev);
pci_dev_save_and_disable(dev);
- rc = pci_dev_reset(dev, 0);
+ rc = __pci_reset_function_locked(dev);
pci_dev_restore(dev);
+ pci_dev_unlock(dev);
return rc;
}
@@ -4302,20 +4269,18 @@ int pci_try_reset_function(struct pci_dev *dev)
{
int rc;
- rc = pci_dev_reset(dev, 1);
+ rc = pci_probe_reset_function(dev);
if (rc)
return rc;
- pci_dev_save_and_disable(dev);
+ if (!pci_dev_trylock(dev))
+ return -EAGAIN;
- if (pci_dev_trylock(dev)) {
- rc = __pci_dev_reset(dev, 0);
- pci_dev_unlock(dev);
- } else
- rc = -EAGAIN;
+ pci_dev_save_and_disable(dev);
+ rc = __pci_reset_function_locked(dev);
+ pci_dev_unlock(dev);
pci_dev_restore(dev);
-
return rc;
}
EXPORT_SYMBOL_GPL(pci_try_reset_function);
@@ -4465,7 +4430,9 @@ static void pci_bus_save_and_disable(struct pci_bus *bus)
struct pci_dev *dev;
list_for_each_entry(dev, &bus->devices, bus_list) {
+ pci_dev_lock(dev);
pci_dev_save_and_disable(dev);
+ pci_dev_unlock(dev);
if (dev->subordinate)
pci_bus_save_and_disable(dev->subordinate);
}
@@ -4480,7 +4447,9 @@ static void pci_bus_restore(struct pci_bus *bus)
struct pci_dev *dev;
list_for_each_entry(dev, &bus->devices, bus_list) {
+ pci_dev_lock(dev);
pci_dev_restore(dev);
+ pci_dev_unlock(dev);
if (dev->subordinate)
pci_bus_restore(dev->subordinate);
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 240b2c0fed4b..03e3d0285aea 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -267,7 +267,6 @@ struct pci_sriov {
u16 driver_max_VFs; /* max num VFs driver supports */
struct pci_dev *dev; /* lowest numbered PF */
struct pci_dev *self; /* this PF */
- struct mutex lock; /* lock for setting sriov_numvfs in sysfs */
resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */
bool drivers_autoprobe; /* auto probing of VFs by driver */
};
diff --git a/drivers/pci/pcie/pcie-dpc.c b/drivers/pci/pcie/pcie-dpc.c
index 77d2ca99d2ec..c39f32e42b4d 100644
--- a/drivers/pci/pcie/pcie-dpc.c
+++ b/drivers/pci/pcie/pcie-dpc.c
@@ -92,7 +92,7 @@ static irqreturn_t dpc_irq(int irq, void *context)
pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, &status);
pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_SOURCE_ID,
&source);
- if (!status)
+ if (!status || status == (u16)(~0))
return IRQ_NONE;
dev_info(&dpc->dev->device, "DPC containment event, status:%#06x source:%#06x\n",
@@ -144,7 +144,7 @@ static int dpc_probe(struct pcie_device *dev)
dpc->rp = (cap & PCI_EXP_DPC_CAP_RP_EXT);
- ctl |= PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN;
+ ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN;
pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
dev_info(&dev->device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 587aef36030d..4334fd5d7de9 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -13,10 +13,11 @@
#define PCIE_PORT_DEVICE_MAXSERVICES 5
/*
- * According to the PCI Express Base Specification 2.0, the indices of
- * the MSI-X table entries used by port services must not exceed 31
+ * The PCIe Capability Interrupt Message Number (PCIe r3.1, sec 7.8.2) must
+ * be one of the first 32 MSI-X entries. Per PCI r3.0, sec 6.8.3.1, MSI
+ * supports a maximum of 32 vectors per function.
*/
-#define PCIE_PORT_MAX_MSIX_ENTRIES 32
+#define PCIE_PORT_MAX_MSI_ENTRIES 32
#define get_descriptor_id(type, service) (((type - 4) << 8) | service)
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index cea504f6f478..313a21df1692 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -44,14 +44,15 @@ static void release_pcie_device(struct device *dev)
}
/**
- * pcie_port_enable_msix - try to set up MSI-X as interrupt mode for given port
+ * pcie_port_enable_irq_vec - try to set up MSI-X or MSI as interrupt mode
+ * for given port
* @dev: PCI Express port to handle
* @irqs: Array of interrupt vectors to populate
* @mask: Bitmask of port capabilities returned by get_port_device_capability()
*
* Return value: 0 on success, error code on failure
*/
-static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
+static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask)
{
int nr_entries, entry, nvec = 0;
@@ -61,8 +62,8 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
* equal to the number of entries this port actually uses, we'll happily
* go through without any tricks.
*/
- nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSIX_ENTRIES,
- PCI_IRQ_MSIX);
+ nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSI_ENTRIES,
+ PCI_IRQ_MSIX | PCI_IRQ_MSI);
if (nr_entries < 0)
return nr_entries;
@@ -70,14 +71,19 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
u16 reg16;
/*
- * The code below follows the PCI Express Base Specification 2.0
- * stating in Section 6.1.6 that "PME and Hot-Plug Event
- * interrupts (when both are implemented) always share the same
- * MSI or MSI-X vector, as indicated by the Interrupt Message
- * Number field in the PCI Express Capabilities register", where
- * according to Section 7.8.2 of the specification "For MSI-X,
- * the value in this field indicates which MSI-X Table entry is
- * used to generate the interrupt message."
+ * Per PCIe r3.1, sec 6.1.6, "PME and Hot-Plug Event
+ * interrupts (when both are implemented) always share the
+ * same MSI or MSI-X vector, as indicated by the Interrupt
+ * Message Number field in the PCI Express Capabilities
+ * register".
+ *
+ * Per sec 7.8.2, "For MSI, the [Interrupt Message Number]
+ * indicates the offset between the base Message Data and
+ * the interrupt message that is generated."
+ *
+ * "For MSI-X, the [Interrupt Message Number] indicates
+ * which MSI-X Table entry is used to generate the
+ * interrupt message."
*/
pcie_capability_read_word(dev, PCI_EXP_FLAGS, &reg16);
entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
@@ -94,13 +100,17 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
u32 reg32, pos;
/*
- * The code below follows Section 7.10.10 of the PCI Express
- * Base Specification 2.0 stating that bits 31-27 of the Root
- * Error Status Register contain a value indicating which of the
- * MSI/MSI-X vectors assigned to the port is going to be used
- * for AER, where "For MSI-X, the value in this register
- * indicates which MSI-X Table entry is used to generate the
- * interrupt message."
+ * Per PCIe r3.1, sec 7.10.10, the Advanced Error Interrupt
+ * Message Number in the Root Error Status register
+ * indicates which MSI/MSI-X vector is used for AER.
+ *
+ * "For MSI, the [Advanced Error Interrupt Message Number]
+ * indicates the offset between the base Message Data and
+ * the interrupt message that is generated."
+ *
+ * "For MSI-X, the [Advanced Error Interrupt Message
+ * Number] indicates which MSI-X Table entry is used to
+ * generate the interrupt message."
*/
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
@@ -113,6 +123,33 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
nvec = max(nvec, entry + 1);
}
+ if (mask & PCIE_PORT_SERVICE_DPC) {
+ u16 reg16, pos;
+
+ /*
+ * Per PCIe r4.0 (v0.9), sec 7.9.15.2, the DPC Interrupt
+ * Message Number in the DPC Capability register indicates
+ * which MSI/MSI-X vector is used for DPC.
+ *
+ * "For MSI, the [DPC Interrupt Message Number] indicates
+ * the offset between the base Message Data and the
+ * interrupt message that is generated."
+ *
+ * "For MSI-X, the [DPC Interrupt Message Number] indicates
+ * which MSI-X Table entry is used to generate the
+ * interrupt message."
+ */
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC);
+ pci_read_config_word(dev, pos + PCI_EXP_DPC_CAP, &reg16);
+ entry = reg16 & 0x1f;
+ if (entry >= nr_entries)
+ goto out_free_irqs;
+
+ irqs[PCIE_PORT_SERVICE_DPC_SHIFT] = pci_irq_vector(dev, entry);
+
+ nvec = max(nvec, entry + 1);
+ }
+
/*
* If nvec is equal to the allocated number of entries, we can just use
* what we have. Otherwise, the port has some extra entries not for the
@@ -124,7 +161,7 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
/* Now allocate the MSI-X vectors for real */
nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec,
- PCI_IRQ_MSIX);
+ PCI_IRQ_MSIX | PCI_IRQ_MSI);
if (nr_entries < 0)
return nr_entries;
}
@@ -146,26 +183,29 @@ out_free_irqs:
*/
static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
{
- unsigned flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI;
int ret, i;
for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
irqs[i] = -1;
/*
- * If MSI cannot be used for PCIe PME or hotplug, we have to use
- * INTx or other interrupts, e.g. system shared interrupt.
+ * If we support PME or hotplug, but we can't use MSI/MSI-X for
+ * them, we have to fall back to INTx or other interrupts, e.g., a
+ * system shared interrupt.
*/
- if (((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) ||
- ((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi())) {
- flags &= ~PCI_IRQ_MSI;
- } else {
- /* Try to use MSI-X if supported */
- if (!pcie_port_enable_msix(dev, irqs, mask))
- return 0;
- }
+ if ((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi())
+ goto legacy_irq;
+
+ if ((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi())
+ goto legacy_irq;
+
+ /* Try to use MSI-X or MSI if supported */
+ if (pcie_port_enable_irq_vec(dev, irqs, mask) == 0)
+ return 0;
- ret = pci_alloc_irq_vectors(dev, 1, 1, flags);
+legacy_irq:
+ /* fall back to legacy IRQ */
+ ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY);
if (ret < 0)
return -ENODEV;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 19c8950c6c38..c31310db0404 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -510,16 +510,18 @@ static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
return b;
}
-static void pci_release_host_bridge_dev(struct device *dev)
+static void devm_pci_release_host_bridge_dev(struct device *dev)
{
struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
if (bridge->release_fn)
bridge->release_fn(bridge);
+}
- pci_free_resource_list(&bridge->windows);
-
- kfree(bridge);
+static void pci_release_host_bridge_dev(struct device *dev)
+{
+ devm_pci_release_host_bridge_dev(dev);
+ pci_free_host_bridge(to_pci_host_bridge(dev));
}
struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
@@ -531,11 +533,36 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
return NULL;
INIT_LIST_HEAD(&bridge->windows);
+ bridge->dev.release = pci_release_host_bridge_dev;
return bridge;
}
EXPORT_SYMBOL(pci_alloc_host_bridge);
+struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
+ size_t priv)
+{
+ struct pci_host_bridge *bridge;
+
+ bridge = devm_kzalloc(dev, sizeof(*bridge) + priv, GFP_KERNEL);
+ if (!bridge)
+ return NULL;
+
+ INIT_LIST_HEAD(&bridge->windows);
+ bridge->dev.release = devm_pci_release_host_bridge_dev;
+
+ return bridge;
+}
+EXPORT_SYMBOL(devm_pci_alloc_host_bridge);
+
+void pci_free_host_bridge(struct pci_host_bridge *bridge)
+{
+ pci_free_resource_list(&bridge->windows);
+
+ kfree(bridge);
+}
+EXPORT_SYMBOL(pci_free_host_bridge);
+
static const unsigned char pcix_bus_speed[] = {
PCI_SPEED_UNKNOWN, /* 0 */
PCI_SPEED_66MHz_PCIX, /* 1 */
@@ -719,7 +746,7 @@ static void pci_set_bus_msi_domain(struct pci_bus *bus)
dev_set_msi_domain(&bus->dev, d);
}
-int pci_register_host_bridge(struct pci_host_bridge *bridge)
+static int pci_register_host_bridge(struct pci_host_bridge *bridge)
{
struct device *parent = bridge->dev.parent;
struct resource_entry *window, *n;
@@ -834,7 +861,6 @@ free:
kfree(bus);
return err;
}
-EXPORT_SYMBOL(pci_register_host_bridge);
static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
struct pci_dev *bridge, int busnr)
@@ -1330,6 +1356,34 @@ static void pci_msi_setup_pci_dev(struct pci_dev *dev)
}
/**
+ * pci_intx_mask_broken - test PCI_COMMAND_INTX_DISABLE writability
+ * @dev: PCI device
+ *
+ * Test whether PCI_COMMAND_INTX_DISABLE is writable for @dev. Check this
+ * at enumeration-time to avoid modifying PCI_COMMAND at run-time.
+ */
+static int pci_intx_mask_broken(struct pci_dev *dev)
+{
+ u16 orig, toggle, new;
+
+ pci_read_config_word(dev, PCI_COMMAND, &orig);
+ toggle = orig ^ PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(dev, PCI_COMMAND, toggle);
+ pci_read_config_word(dev, PCI_COMMAND, &new);
+
+ pci_write_config_word(dev, PCI_COMMAND, orig);
+
+ /*
+ * PCI_COMMAND_INTX_DISABLE was reserved and read-only prior to PCI
+ * r2.3, so strictly speaking, a device is not *broken* if it's not
+ * writable. But we'll live with the misnomer for now.
+ */
+ if (new != toggle)
+ return 1;
+ return 0;
+}
+
+/**
* pci_setup_device - fill in class and map information of a device
* @dev: the device structure to fill
*
@@ -1399,6 +1453,8 @@ int pci_setup_device(struct pci_dev *dev)
}
}
+ dev->broken_intx_masking = pci_intx_mask_broken(dev);
+
switch (dev->hdr_type) { /* header type */
case PCI_HEADER_TYPE_NORMAL: /* standard header */
if (class == PCI_CLASS_BRIDGE_PCI)
@@ -1674,6 +1730,11 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
/* Initialize Advanced Error Capabilities and Control Register */
pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
+ /* Don't enable ECRC generation or checking if unsupported */
+ if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
+ reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
+ if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC))
+ reg32 &= ~PCI_ERR_CAP_ECRC_CHKE;
pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
/*
@@ -2298,9 +2359,8 @@ void __weak pcibios_remove_bus(struct pci_bus *bus)
{
}
-static struct pci_bus *pci_create_root_bus_msi(struct device *parent,
- int bus, struct pci_ops *ops, void *sysdata,
- struct list_head *resources, struct msi_controller *msi)
+struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
+ struct pci_ops *ops, void *sysdata, struct list_head *resources)
{
int error;
struct pci_host_bridge *bridge;
@@ -2310,13 +2370,11 @@ static struct pci_bus *pci_create_root_bus_msi(struct device *parent,
return NULL;
bridge->dev.parent = parent;
- bridge->dev.release = pci_release_host_bridge_dev;
list_splice_init(resources, &bridge->windows);
bridge->sysdata = sysdata;
bridge->busnr = bus;
bridge->ops = ops;
- bridge->msi = msi;
error = pci_register_host_bridge(bridge);
if (error < 0)
@@ -2328,13 +2386,6 @@ err_out:
kfree(bridge);
return NULL;
}
-
-struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
- struct pci_ops *ops, void *sysdata, struct list_head *resources)
-{
- return pci_create_root_bus_msi(parent, bus, ops, sysdata, resources,
- NULL);
-}
EXPORT_SYMBOL_GPL(pci_create_root_bus);
int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
@@ -2400,24 +2451,28 @@ void pci_bus_release_busn_res(struct pci_bus *b)
res, ret ? "can not be" : "is");
}
-struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
- struct pci_ops *ops, void *sysdata,
- struct list_head *resources, struct msi_controller *msi)
+int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge)
{
struct resource_entry *window;
bool found = false;
struct pci_bus *b;
- int max;
+ int max, bus, ret;
- resource_list_for_each_entry(window, resources)
+ if (!bridge)
+ return -EINVAL;
+
+ resource_list_for_each_entry(window, &bridge->windows)
if (window->res->flags & IORESOURCE_BUS) {
found = true;
break;
}
- b = pci_create_root_bus_msi(parent, bus, ops, sysdata, resources, msi);
- if (!b)
- return NULL;
+ ret = pci_register_host_bridge(bridge);
+ if (ret < 0)
+ return ret;
+
+ b = bridge->bus;
+ bus = bridge->busnr;
if (!found) {
dev_info(&b->dev,
@@ -2431,14 +2486,41 @@ struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
if (!found)
pci_bus_update_busn_res_end(b, max);
- return b;
+ return 0;
}
+EXPORT_SYMBOL(pci_scan_root_bus_bridge);
struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata, struct list_head *resources)
{
- return pci_scan_root_bus_msi(parent, bus, ops, sysdata, resources,
- NULL);
+ struct resource_entry *window;
+ bool found = false;
+ struct pci_bus *b;
+ int max;
+
+ resource_list_for_each_entry(window, resources)
+ if (window->res->flags & IORESOURCE_BUS) {
+ found = true;
+ break;
+ }
+
+ b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
+ if (!b)
+ return NULL;
+
+ if (!found) {
+ dev_info(&b->dev,
+ "No busn resource found for root bus, will use [bus %02x-ff]\n",
+ bus);
+ pci_bus_insert_busn_res(b, bus, 255);
+ }
+
+ max = pci_scan_child_bus(b);
+
+ if (!found)
+ pci_bus_update_busn_res_end(b, max);
+
+ return b;
}
EXPORT_SYMBOL(pci_scan_root_bus);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 085fb787aa9e..6967c6b4cf6b 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -304,7 +304,7 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev)
{
int i;
- for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
+ for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
struct resource *r = &dev->resource[i];
if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
@@ -1684,6 +1684,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
+static void quirk_radeon_pm(struct pci_dev *dev)
+{
+ if (dev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
+ dev->subsystem_device == 0x00e2) {
+ if (dev->d3_delay < 20) {
+ dev->d3_delay = 20;
+ dev_info(&dev->dev, "extending delay after power-on from D3 to %d msec\n",
+ dev->d3_delay);
+ }
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6741, quirk_radeon_pm);
+
#ifdef CONFIG_X86_IO_APIC
static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
{
@@ -3236,6 +3249,10 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588,
quirk_broken_intx_masking);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589,
quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158a,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158b,
+ quirk_broken_intx_masking);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0,
quirk_broken_intx_masking);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1,
diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c
index 95c225be49d1..81eda3d93a5d 100644
--- a/drivers/pci/setup-irq.c
+++ b/drivers/pci/setup-irq.c
@@ -15,6 +15,7 @@
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/cache.h>
+#include "pci.h"
void __weak pcibios_update_irq(struct pci_dev *dev, int irq)
{
@@ -22,12 +23,17 @@ void __weak pcibios_update_irq(struct pci_dev *dev, int irq)
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
}
-static void pdev_fixup_irq(struct pci_dev *dev,
- u8 (*swizzle)(struct pci_dev *, u8 *),
- int (*map_irq)(const struct pci_dev *, u8, u8))
+void pci_assign_irq(struct pci_dev *dev)
{
- u8 pin, slot;
+ u8 pin;
+ u8 slot = -1;
int irq = 0;
+ struct pci_host_bridge *hbrg = pci_find_host_bridge(dev->bus);
+
+ if (!(hbrg->map_irq)) {
+ dev_dbg(&dev->dev, "runtime IRQ mapping not provided by arch\n");
+ return;
+ }
/* If this device is not on the primary bus, we need to figure out
which interrupt pin it will come in on. We know which slot it
@@ -40,17 +46,22 @@ static void pdev_fixup_irq(struct pci_dev *dev,
if (pin > 4)
pin = 1;
- if (pin != 0) {
+ if (pin) {
/* Follow the chain of bridges, swizzling as we go. */
- slot = (*swizzle)(dev, &pin);
+ if (hbrg->swizzle_irq)
+ slot = (*(hbrg->swizzle_irq))(dev, &pin);
- irq = (*map_irq)(dev, slot, pin);
+ /*
+ * If a swizzling function is not used map_irq must
+ * ignore slot
+ */
+ irq = (*(hbrg->map_irq))(dev, slot, pin);
if (irq == -1)
irq = 0;
}
dev->irq = irq;
- dev_dbg(&dev->dev, "fixup irq: got %d\n", dev->irq);
+ dev_dbg(&dev->dev, "assign IRQ: got %d\n", dev->irq);
/* Always tell the device, so the driver knows what is
the real IRQ to use; the device does not use it. */
@@ -60,9 +71,23 @@ static void pdev_fixup_irq(struct pci_dev *dev,
void pci_fixup_irqs(u8 (*swizzle)(struct pci_dev *, u8 *),
int (*map_irq)(const struct pci_dev *, u8, u8))
{
+ /*
+ * Implement pci_fixup_irqs() through pci_assign_irq().
+ * This code should be remove eventually, it is a wrapper
+ * around pci_assign_irq() interface to keep current
+ * pci_fixup_irqs() behaviour unchanged on architecture
+ * code still relying on its interface.
+ */
struct pci_dev *dev = NULL;
+ struct pci_host_bridge *hbrg = NULL;
- for_each_pci_dev(dev)
- pdev_fixup_irq(dev, swizzle, map_irq);
+ for_each_pci_dev(dev) {
+ hbrg = pci_find_host_bridge(dev->bus);
+ hbrg->swizzle_irq = swizzle;
+ hbrg->map_irq = map_irq;
+ pci_assign_irq(dev);
+ hbrg->swizzle_irq = NULL;
+ hbrg->map_irq = NULL;
+ }
}
EXPORT_SYMBOL_GPL(pci_fixup_irqs);
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index f6a63406c76e..af81b2dec42e 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -120,6 +120,13 @@ struct sw_event_regs {
u32 reserved16[4];
} __packed;
+enum {
+ SWITCHTEC_CFG0_RUNNING = 0x04,
+ SWITCHTEC_CFG1_RUNNING = 0x05,
+ SWITCHTEC_IMG0_RUNNING = 0x03,
+ SWITCHTEC_IMG1_RUNNING = 0x07,
+};
+
struct sys_info_regs {
u32 device_id;
u32 device_version;
@@ -129,7 +136,9 @@ struct sys_info_regs {
u32 table_format_version;
u32 partition_id;
u32 cfg_file_fmt_version;
- u32 reserved2[58];
+ u16 cfg_running;
+ u16 img_running;
+ u32 reserved2[57];
char vendor_id[8];
char product_id[16];
char product_revision[4];
@@ -807,6 +816,7 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev,
{
struct switchtec_ioctl_flash_part_info info = {0};
struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
+ struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
u32 active_addr = -1;
if (copy_from_user(&info, uinfo, sizeof(info)))
@@ -816,18 +826,26 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev,
case SWITCHTEC_IOCTL_PART_CFG0:
active_addr = ioread32(&fi->active_cfg);
set_fw_info_part(&info, &fi->cfg0);
+ if (ioread16(&si->cfg_running) == SWITCHTEC_CFG0_RUNNING)
+ info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
break;
case SWITCHTEC_IOCTL_PART_CFG1:
active_addr = ioread32(&fi->active_cfg);
set_fw_info_part(&info, &fi->cfg1);
+ if (ioread16(&si->cfg_running) == SWITCHTEC_CFG1_RUNNING)
+ info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
break;
case SWITCHTEC_IOCTL_PART_IMG0:
active_addr = ioread32(&fi->active_img);
set_fw_info_part(&info, &fi->img0);
+ if (ioread16(&si->img_running) == SWITCHTEC_IMG0_RUNNING)
+ info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
break;
case SWITCHTEC_IOCTL_PART_IMG1:
active_addr = ioread32(&fi->active_img);
set_fw_info_part(&info, &fi->img1);
+ if (ioread16(&si->img_running) == SWITCHTEC_IMG1_RUNNING)
+ info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
break;
case SWITCHTEC_IOCTL_PART_NVLOG:
set_fw_info_part(&info, &fi->nvlog);
@@ -861,7 +879,7 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev,
}
if (info.address == active_addr)
- info.active = 1;
+ info.active |= SWITCHTEC_IOCTL_PART_ACTIVE;
if (copy_to_user(uinfo, &info, sizeof(info)))
return -EFAULT;
@@ -1540,6 +1558,24 @@ static const struct pci_device_id switchtec_pci_tbl[] = {
SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3
SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3
SWITCHTEC_PCI_DEVICE(0x8546), //PSX 96xG3
+ SWITCHTEC_PCI_DEVICE(0x8551), //PAX 24XG3
+ SWITCHTEC_PCI_DEVICE(0x8552), //PAX 32XG3
+ SWITCHTEC_PCI_DEVICE(0x8553), //PAX 48XG3
+ SWITCHTEC_PCI_DEVICE(0x8554), //PAX 64XG3
+ SWITCHTEC_PCI_DEVICE(0x8555), //PAX 80XG3
+ SWITCHTEC_PCI_DEVICE(0x8556), //PAX 96XG3
+ SWITCHTEC_PCI_DEVICE(0x8561), //PFXL 24XG3
+ SWITCHTEC_PCI_DEVICE(0x8562), //PFXL 32XG3
+ SWITCHTEC_PCI_DEVICE(0x8563), //PFXL 48XG3
+ SWITCHTEC_PCI_DEVICE(0x8564), //PFXL 64XG3
+ SWITCHTEC_PCI_DEVICE(0x8565), //PFXL 80XG3
+ SWITCHTEC_PCI_DEVICE(0x8566), //PFXL 96XG3
+ SWITCHTEC_PCI_DEVICE(0x8571), //PFXI 24XG3
+ SWITCHTEC_PCI_DEVICE(0x8572), //PFXI 32XG3
+ SWITCHTEC_PCI_DEVICE(0x8573), //PFXI 48XG3
+ SWITCHTEC_PCI_DEVICE(0x8574), //PFXI 64XG3
+ SWITCHTEC_PCI_DEVICE(0x8575), //PFXI 80XG3
+ SWITCHTEC_PCI_DEVICE(0x8576), //PFXI 96XG3
{0}
};
MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 37af5e3029d5..e14b46c7b37f 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -146,6 +146,20 @@ config PINCTRL_FALCON
depends on SOC_FALCON
depends on PINCTRL_LANTIQ
+config PINCTRL_MCP23S08
+ tristate "Microchip MCP23xxx I/O expander"
+ depends on SPI_MASTER || I2C
+ depends on I2C || I2C=n
+ select GPIOLIB_IRQCHIP
+ select REGMAP_I2C if I2C
+ select REGMAP_SPI if SPI_MASTER
+ select GENERIC_PINCONF
+ help
+ SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
+ I/O expanders.
+ This provides a GPIO interface supporting inputs and outputs.
+ The I2C versions of the chips can be used as interrupt-controller.
+
config PINCTRL_MESON
bool
depends on OF
@@ -174,6 +188,17 @@ config PINCTRL_ROCKCHIP
select GENERIC_IRQ_CHIP
select MFD_SYSCON
+config PINCTRL_RZA1
+ bool "Renesas RZ/A1 gpio and pinctrl driver"
+ depends on OF
+ depends on ARCH_R7S72100 || COMPILE_TEST
+ select GPIOLIB
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GENERIC_PINCONF
+ help
+ This selects pinctrl driver for Renesas RZ/A1 platforms.
+
config PINCTRL_SINGLE
tristate "One-register-per-pin type device tree based pinctrl driver"
depends on OF
@@ -296,6 +321,16 @@ config PINCTRL_ZYNQ
help
This selects the pinctrl driver for Xilinx Zynq.
+config PINCTRL_INGENIC
+ bool "Pinctrl driver for the Ingenic JZ47xx SoCs"
+ default y
+ depends on OF
+ depends on MACH_INGENIC || COMPILE_TEST
+ select GENERIC_PINCONF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select REGMAP_MMIO
+
source "drivers/pinctrl/aspeed/Kconfig"
source "drivers/pinctrl/bcm/Kconfig"
source "drivers/pinctrl/berlin/Kconfig"
@@ -315,6 +350,7 @@ source "drivers/pinctrl/ti/Kconfig"
source "drivers/pinctrl/uniphier/Kconfig"
source "drivers/pinctrl/vt8500/Kconfig"
source "drivers/pinctrl/mediatek/Kconfig"
+source "drivers/pinctrl/zte/Kconfig"
config PINCTRL_XWAY
bool
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 0e9b2226a7c2..2bc641d62400 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -19,12 +19,14 @@ obj-$(CONFIG_PINCTRL_DA850_PUPD) += pinctrl-da850-pupd.o
obj-$(CONFIG_PINCTRL_DIGICOLOR) += pinctrl-digicolor.o
obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o
obj-$(CONFIG_PINCTRL_MAX77620) += pinctrl-max77620.o
+obj-$(CONFIG_PINCTRL_MCP23S08) += pinctrl-mcp23s08.o
obj-$(CONFIG_PINCTRL_MESON) += meson/
obj-$(CONFIG_PINCTRL_OXNAS) += pinctrl-oxnas.o
obj-$(CONFIG_PINCTRL_PALMAS) += pinctrl-palmas.o
obj-$(CONFIG_PINCTRL_PIC32) += pinctrl-pic32.o
obj-$(CONFIG_PINCTRL_PISTACHIO) += pinctrl-pistachio.o
obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
+obj-$(CONFIG_PINCTRL_RZA1) += pinctrl-rza1.o
obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
obj-$(CONFIG_PINCTRL_SIRF) += sirf/
obj-$(CONFIG_PINCTRL_SX150X) += pinctrl-sx150x.o
@@ -39,6 +41,7 @@ obj-$(CONFIG_PINCTRL_LPC18XX) += pinctrl-lpc18xx.o
obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o
obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o
obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o
+obj-$(CONFIG_PINCTRL_INGENIC) += pinctrl-ingenic.o
obj-$(CONFIG_ARCH_ASPEED) += aspeed/
obj-y += bcm/
@@ -58,3 +61,4 @@ obj-y += ti/
obj-$(CONFIG_PINCTRL_UNIPHIER) += uniphier/
obj-$(CONFIG_ARCH_VT8500) += vt8500/
obj-$(CONFIG_PINCTRL_MTK) += mediatek/
+obj-$(CONFIG_PINCTRL_ZX) += zte/
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
index 810a81786f62..a7cceffcedfa 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Broadcom Corporation
+ * Copyright (C) 2013-2017 Broadcom
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -10,9 +10,10 @@
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/pinctrl.h>
@@ -1444,10 +1445,4 @@ static struct platform_driver bcm281xx_pinctrl_driver = {
.of_match_table = bcm281xx_pinctrl_of_match,
},
};
-
-module_platform_driver_probe(bcm281xx_pinctrl_driver, bcm281xx_pinctrl_probe);
-
-MODULE_AUTHOR("Broadcom Corporation <[email protected]>");
-MODULE_AUTHOR("Sherman Yin <[email protected]>");
-MODULE_DESCRIPTION("Broadcom BCM281xx pinctrl driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver_probe(bcm281xx_pinctrl_driver, bcm281xx_pinctrl_probe);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 85d009112864..230883168e99 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -27,7 +27,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of_address.h>
#include <linux/of.h>
#include <linux/of_irq.h>
@@ -1048,6 +1048,10 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
for (i = 0; i < BCM2835_NUM_IRQS; i++) {
pc->irq[i] = irq_of_parse_and_map(np, i);
pc->irq_group[i] = i;
+
+ if (pc->irq[i] == 0)
+ continue;
+
/*
* Use the same handler for all groups: this is necessary
* since we use one gpiochip to cover all lines - the
@@ -1075,31 +1079,17 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
return 0;
}
-static int bcm2835_pinctrl_remove(struct platform_device *pdev)
-{
- struct bcm2835_pinctrl *pc = platform_get_drvdata(pdev);
-
- gpiochip_remove(&pc->gpio_chip);
-
- return 0;
-}
-
static const struct of_device_id bcm2835_pinctrl_match[] = {
{ .compatible = "brcm,bcm2835-gpio" },
{}
};
-MODULE_DEVICE_TABLE(of, bcm2835_pinctrl_match);
static struct platform_driver bcm2835_pinctrl_driver = {
.probe = bcm2835_pinctrl_probe,
- .remove = bcm2835_pinctrl_remove,
.driver = {
.name = MODULE_NAME,
.of_match_table = bcm2835_pinctrl_match,
+ .suppress_bind_attrs = true,
},
};
-module_platform_driver(bcm2835_pinctrl_driver);
-
-MODULE_AUTHOR("Chris Boot, Simon Arlott, Stephen Warren");
-MODULE_DESCRIPTION("BCM2835 Pin control driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(bcm2835_pinctrl_driver);
diff --git a/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c b/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
index d31c95701a92..44df35942a43 100644
--- a/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
@@ -1,4 +1,5 @@
-/* Copyright (C) 2014-2015 Broadcom Corporation
+/*
+ * Copyright (C) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -8,6 +9,10 @@
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
+ */
+
+/*
+ * Broadcom Cygnus IOMUX driver
*
* This file contains the Cygnus IOMUX driver that supports group based PINMUX
* configuration. Although PINMUX configuration is mainly group based, the
@@ -17,7 +22,6 @@
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
@@ -1016,7 +1020,3 @@ static int __init cygnus_pinmux_init(void)
return platform_driver_register(&cygnus_pinmux_driver);
}
arch_initcall(cygnus_pinmux_init);
-
-MODULE_AUTHOR("Ray Jui <[email protected]>");
-MODULE_DESCRIPTION("Broadcom Cygnus IOMUX driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index af5e904d4a1e..85a8c97d9dfe 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014-2015 Broadcom Corporation
+ * Copyright (C) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -9,7 +9,9 @@
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
+ */
+
+/*
* This file contains the Broadcom Iproc GPIO driver that supports 3
* GPIO controllers on Iproc including the ASIU GPIO controller, the
* chipCommonG GPIO controller, and the always-on GPIO controller. Basic
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index 22442438275a..1cfe45fd391f 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 Broadcom Corporation
+ * Copyright (C) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -9,7 +9,9 @@
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
+ */
+
+/*
* This file contains the Broadcom Northstar Plus (NSP) GPIO driver that
* supports the chipCommonA GPIO controller. Basic PINCONF such as bias,
* pull up/down, slew and drive strength are also supported in this driver.
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index bd459a93b0e7..c5e2c5705058 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -170,7 +170,7 @@ const char *pin_get_name(struct pinctrl_dev *pctldev, const unsigned pin)
const struct pin_desc *desc;
desc = pin_desc_get(pctldev, pin);
- if (desc == NULL) {
+ if (!desc) {
dev_err(pctldev->dev, "failed to get pin(%d) name\n",
pin);
return NULL;
@@ -214,7 +214,7 @@ static void pinctrl_free_pindescs(struct pinctrl_dev *pctldev,
pindesc = radix_tree_lookup(&pctldev->pin_desc_tree,
pins[i].number);
- if (pindesc != NULL) {
+ if (pindesc) {
radix_tree_delete(&pctldev->pin_desc_tree,
pins[i].number);
if (pindesc->dynamic_name)
@@ -230,7 +230,7 @@ static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev,
struct pin_desc *pindesc;
pindesc = pin_desc_get(pctldev, pin->number);
- if (pindesc != NULL) {
+ if (pindesc) {
dev_err(pctldev->dev, "pin %d already registered\n",
pin->number);
return -EINVAL;
@@ -248,7 +248,7 @@ static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev,
pindesc->name = pin->name;
} else {
pindesc->name = kasprintf(GFP_KERNEL, "PIN%u", pin->number);
- if (pindesc->name == NULL) {
+ if (!pindesc->name) {
kfree(pindesc);
return -ENOMEM;
}
@@ -402,7 +402,7 @@ static int pinctrl_get_device_gpio_range(unsigned gpio,
struct pinctrl_gpio_range *range;
range = pinctrl_match_gpio_range(pctldev, gpio);
- if (range != NULL) {
+ if (range) {
*outdev = pctldev;
*outrange = range;
mutex_unlock(&pinctrldev_list_mutex);
@@ -933,7 +933,7 @@ static int add_setting(struct pinctrl *p, struct pinctrl_dev *pctldev,
else
setting->pctldev =
get_pinctrl_dev_from_devname(map->ctrl_dev_name);
- if (setting->pctldev == NULL) {
+ if (!setting->pctldev) {
kfree(setting);
/* Do not defer probing of hogs (circular loop) */
if (!strcmp(map->ctrl_dev_name, map->dev_name))
@@ -1024,6 +1024,16 @@ static struct pinctrl *create_pinctrl(struct device *dev,
/* Map must be for this device */
if (strcmp(map->dev_name, devname))
continue;
+ /*
+ * If pctldev is not null, we are claiming hog for it,
+ * that means, setting that is served by pctldev by itself.
+ *
+ * Thus we must skip map that is for this device but is served
+ * by other device.
+ */
+ if (pctldev &&
+ strcmp(dev_name(pctldev->dev), map->ctrl_dev_name))
+ continue;
ret = add_setting(p, pctldev, map);
/*
@@ -1080,7 +1090,7 @@ struct pinctrl *pinctrl_get(struct device *dev)
* return another pointer to it.
*/
p = find_pinctrl(dev);
- if (p != NULL) {
+ if (p) {
dev_dbg(dev, "obtain a copy of previously claimed pinctrl\n");
kref_get(&p->users);
return p;
@@ -1551,7 +1561,7 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
pin = pctldev->desc->pins[i].number;
desc = pin_desc_get(pctldev, pin);
/* Pin space may be sparse */
- if (desc == NULL)
+ if (!desc)
continue;
seq_printf(s, "pin %d (%s) ", pin, desc->name);
@@ -1718,7 +1728,7 @@ static int pinctrl_maps_show(struct seq_file *s, void *what)
break;
}
- seq_printf(s, "\n");
+ seq_putc(s, '\n');
}
mutex_unlock(&pinctrl_maps_mutex);
@@ -2131,7 +2141,7 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev)
{
struct pinctrl_gpio_range *range, *n;
- if (pctldev == NULL)
+ if (!pctldev)
return;
mutex_lock(&pctldev->mutex);
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index cae05e76c111..0b266b2aecd4 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -2,7 +2,7 @@ config PINCTRL_IMX
bool
select GENERIC_PINCTRL_GROUPS
select GENERIC_PINMUX_FUNCTIONS
- select PINCONF
+ select GENERIC_PINCONF
select REGMAP
config PINCTRL_IMX1_CORE
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 74bd90dfd7b1..72aca758f4c6 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -27,6 +27,7 @@
#include <linux/regmap.h>
#include "../core.h"
+#include "../pinconf.h"
#include "../pinmux.h"
#include "pinctrl-imx.h"
@@ -196,14 +197,16 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
if (info->flags & SHARE_MUX_CONF_REG) {
u32 reg;
reg = readl(ipctl->base + pin_reg->mux_reg);
- reg &= ~(0x7 << 20);
- reg |= (pin->mux_mode << 20);
+ reg &= ~info->mux_mask;
+ reg |= (pin->mux_mode << info->mux_shift);
writel(reg, ipctl->base + pin_reg->mux_reg);
+ dev_dbg(ipctl->dev, "write: offset 0x%x val 0x%x\n",
+ pin_reg->mux_reg, reg);
} else {
writel(pin->mux_mode, ipctl->base + pin_reg->mux_reg);
+ dev_dbg(ipctl->dev, "write: offset 0x%x val 0x%x\n",
+ pin_reg->mux_reg, pin->mux_mode);
}
- dev_dbg(ipctl->dev, "write: offset 0x%x val 0x%x\n",
- pin_reg->mux_reg, pin->mux_mode);
/*
* If the select input value begins with 0xff, it's a quirky
@@ -287,7 +290,7 @@ static int imx_pmx_gpio_request_enable(struct pinctrl_dev *pctldev,
mux_pin:
reg = readl(ipctl->base + pin_reg->mux_reg);
- reg &= ~(0x7 << 20);
+ reg &= ~info->mux_mask;
reg |= imx_pin->config;
writel(reg, ipctl->base + pin_reg->mux_reg);
@@ -359,6 +362,62 @@ static const struct pinmux_ops imx_pmx_ops = {
.gpio_set_direction = imx_pmx_gpio_set_direction,
};
+/* decode generic config into raw register values */
+static u32 imx_pinconf_decode_generic_config(struct imx_pinctrl *ipctl,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct imx_pinctrl_soc_info *info = ipctl->info;
+ struct imx_cfg_params_decode *decode;
+ enum pin_config_param param;
+ u32 raw_config = 0;
+ u32 param_val;
+ int i, j;
+
+ WARN_ON(num_configs > info->num_decodes);
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ param_val = pinconf_to_config_argument(configs[i]);
+ decode = info->decodes;
+ for (j = 0; j < info->num_decodes; j++) {
+ if (param == decode->param) {
+ if (decode->invert)
+ param_val = !param_val;
+ raw_config |= (param_val << decode->shift)
+ & decode->mask;
+ break;
+ }
+ decode++;
+ }
+ }
+
+ if (info->fixup)
+ info->fixup(configs, num_configs, &raw_config);
+
+ return raw_config;
+}
+
+static u32 imx_pinconf_parse_generic_config(struct device_node *np,
+ struct imx_pinctrl *ipctl)
+{
+ struct imx_pinctrl_soc_info *info = ipctl->info;
+ struct pinctrl_dev *pctl = ipctl->pctl;
+ unsigned int num_configs;
+ unsigned long *configs;
+ int ret;
+
+ if (!info->generic_pinconf)
+ return 0;
+
+ ret = pinconf_generic_parse_dt_config(np, pctl, &configs,
+ &num_configs);
+ if (ret)
+ return 0;
+
+ return imx_pinconf_decode_generic_config(ipctl, configs, num_configs);
+}
+
static int imx_pinconf_get(struct pinctrl_dev *pctldev,
unsigned pin_id, unsigned long *config)
{
@@ -375,7 +434,7 @@ static int imx_pinconf_get(struct pinctrl_dev *pctldev,
*config = readl(ipctl->base + pin_reg->conf_reg);
if (info->flags & SHARE_MUX_CONF_REG)
- *config &= 0xffff;
+ *config &= ~info->mux_mask;
return 0;
}
@@ -402,14 +461,16 @@ static int imx_pinconf_set(struct pinctrl_dev *pctldev,
if (info->flags & SHARE_MUX_CONF_REG) {
u32 reg;
reg = readl(ipctl->base + pin_reg->conf_reg);
- reg &= ~0xffff;
+ reg &= info->mux_mask;
reg |= configs[i];
writel(reg, ipctl->base + pin_reg->conf_reg);
+ dev_dbg(ipctl->dev, "write: offset 0x%x val 0x%x\n",
+ pin_reg->conf_reg, reg);
} else {
writel(configs[i], ipctl->base + pin_reg->conf_reg);
+ dev_dbg(ipctl->dev, "write: offset 0x%x val 0x%lx\n",
+ pin_reg->conf_reg, configs[i]);
}
- dev_dbg(ipctl->dev, "write: offset 0x%x val 0x%lx\n",
- pin_reg->conf_reg, configs[i]);
} /* for each config */
return 0;
@@ -475,9 +536,10 @@ static const struct pinconf_ops imx_pinconf_ops = {
static int imx_pinctrl_parse_groups(struct device_node *np,
struct group_desc *grp,
- struct imx_pinctrl_soc_info *info,
+ struct imx_pinctrl *ipctl,
u32 index)
{
+ struct imx_pinctrl_soc_info *info = ipctl->info;
int size, pin_size;
const __be32 *list;
int i;
@@ -489,25 +551,44 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
pin_size = SHARE_FSL_PIN_SIZE;
else
pin_size = FSL_PIN_SIZE;
+
+ if (info->generic_pinconf)
+ pin_size -= 4;
+
/* Initialise group */
grp->name = np->name;
/*
* the binding format is fsl,pins = <PIN_FUNC_ID CONFIG ...>,
* do sanity check and calculate pins number
+ *
+ * First try legacy 'fsl,pins' property, then fall back to the
+ * generic 'pins'.
+ *
+ * Note: for generic 'pins' case, there's no CONFIG part in
+ * the binding format.
*/
list = of_get_property(np, "fsl,pins", &size);
if (!list) {
- dev_err(info->dev, "no fsl,pins property in node %s\n", np->full_name);
- return -EINVAL;
+ list = of_get_property(np, "pins", &size);
+ if (!list) {
+ dev_err(info->dev,
+ "no fsl,pins and pins property in node %s\n",
+ np->full_name);
+ return -EINVAL;
+ }
}
/* we do not check return since it's safe node passed down */
if (!size || size % pin_size) {
- dev_err(info->dev, "Invalid fsl,pins property in node %s\n", np->full_name);
+ dev_err(info->dev, "Invalid fsl,pins or pins property in node %s\n",
+ np->full_name);
return -EINVAL;
}
+ /* first try to parse the generic pin config */
+ config = imx_pinconf_parse_generic_config(np, ipctl);
+
grp->num_pins = size / pin_size;
grp->data = devm_kzalloc(info->dev, grp->num_pins *
sizeof(struct imx_pin), GFP_KERNEL);
@@ -544,11 +625,18 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
pin->mux_mode = be32_to_cpu(*list++);
pin->input_val = be32_to_cpu(*list++);
- /* SION bit is in mux register */
- config = be32_to_cpu(*list++);
- if (config & IMX_PAD_SION)
- pin->mux_mode |= IOMUXC_CONFIG_SION;
- pin->config = config & ~IMX_PAD_SION;
+ if (info->generic_pinconf) {
+ /* generic pin config decoded */
+ pin->config = config;
+ } else {
+ /* legacy pin config read from devicetree */
+ config = be32_to_cpu(*list++);
+
+ /* SION bit is in mux register */
+ if (config & IMX_PAD_SION)
+ pin->mux_mode |= IOMUXC_CONFIG_SION;
+ pin->config = config & ~IMX_PAD_SION;
+ }
dev_dbg(info->dev, "%s: 0x%x 0x%08lx", info->pins[pin_id].name,
pin->mux_mode, pin->config);
@@ -581,9 +669,10 @@ static int imx_pinctrl_parse_functions(struct device_node *np,
dev_err(info->dev, "no groups defined in %s\n", np->full_name);
return -EINVAL;
}
- func->group_names = devm_kzalloc(info->dev,
- func->num_group_names *
+ func->group_names = devm_kcalloc(info->dev, func->num_group_names,
sizeof(char *), GFP_KERNEL);
+ if (!func->group_names)
+ return -ENOMEM;
for_each_child_of_node(np, child) {
func->group_names[i] = child->name;
@@ -598,7 +687,7 @@ static int imx_pinctrl_parse_functions(struct device_node *np,
info->group_index++, grp);
mutex_unlock(&info->mutex);
- imx_pinctrl_parse_groups(child, grp, info, i++);
+ imx_pinctrl_parse_groups(child, grp, ipctl, i++);
}
return 0;
@@ -769,6 +858,10 @@ int imx_pinctrl_probe(struct platform_device *pdev,
imx_pinctrl_desc->confops = &imx_pinconf_ops;
imx_pinctrl_desc->owner = THIS_MODULE;
+ /* for generic pinconf */
+ imx_pinctrl_desc->custom_params = info->custom_params;
+ imx_pinctrl_desc->num_custom_params = info->num_custom_params;
+
mutex_init(&info->mutex);
ipctl->info = info;
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.h b/drivers/pinctrl/freescale/pinctrl-imx.h
index ff2d3e56b7c5..880bba7fd1ab 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.h
+++ b/drivers/pinctrl/freescale/pinctrl-imx.h
@@ -15,6 +15,8 @@
#ifndef __DRIVERS_PINCTRL_IMX_H
#define __DRIVERS_PINCTRL_IMX_H
+#include <linux/pinctrl/pinconf-generic.h>
+
struct platform_device;
/**
@@ -44,6 +46,14 @@ struct imx_pin_reg {
s16 conf_reg;
};
+/* decode a generic config into raw register value */
+struct imx_cfg_params_decode {
+ enum pin_config_param param;
+ u32 mask;
+ u8 shift;
+ bool invert;
+};
+
struct imx_pinctrl_soc_info {
struct device *dev;
const struct pinctrl_pin_desc *pins;
@@ -53,8 +63,27 @@ struct imx_pinctrl_soc_info {
unsigned int flags;
const char *gpr_compatible;
struct mutex mutex;
+
+ /* MUX_MODE shift and mask in case SHARE_MUX_CONF_REG */
+ unsigned int mux_mask;
+ u8 mux_shift;
+
+ /* generic pinconf */
+ bool generic_pinconf;
+ const struct pinconf_generic_params *custom_params;
+ unsigned int num_custom_params;
+ struct imx_cfg_params_decode *decodes;
+ unsigned int num_decodes;
+ void (*fixup)(unsigned long *configs, unsigned int num_configs,
+ u32 *raw_config);
};
+#define IMX_CFG_PARAMS_DECODE(p, m, o) \
+ { .param = p, .mask = m, .shift = o, .invert = false, }
+
+#define IMX_CFG_PARAMS_DECODE_INVERT(p, m, o) \
+ { .param = p, .mask = m, .shift = o, .invert = true, }
+
#define SHARE_MUX_CONF_REG 0x1
#define ZERO_OFFSET_VALID 0x2
diff --git a/drivers/pinctrl/freescale/pinctrl-imx7d.c b/drivers/pinctrl/freescale/pinctrl-imx7d.c
index a465a66c3ef4..754159ee7b1e 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx7d.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx7d.c
@@ -358,19 +358,19 @@ static const struct pinctrl_pin_desc imx7d_lpsr_pinctrl_pads[] = {
IMX_PINCTRL_PIN(MX7D_PAD_GPIO1_IO07),
};
-static struct imx_pinctrl_soc_info imx7d_pinctrl_info = {
+static const struct imx_pinctrl_soc_info imx7d_pinctrl_info = {
.pins = imx7d_pinctrl_pads,
.npins = ARRAY_SIZE(imx7d_pinctrl_pads),
.gpr_compatible = "fsl,imx7d-iomuxc-gpr",
};
-static struct imx_pinctrl_soc_info imx7d_lpsr_pinctrl_info = {
+static const struct imx_pinctrl_soc_info imx7d_lpsr_pinctrl_info = {
.pins = imx7d_lpsr_pinctrl_pads,
.npins = ARRAY_SIZE(imx7d_lpsr_pinctrl_pads),
.flags = ZERO_OFFSET_VALID,
};
-static struct of_device_id imx7d_pinctrl_of_match[] = {
+static const struct of_device_id imx7d_pinctrl_of_match[] = {
{ .compatible = "fsl,imx7d-iomuxc", .data = &imx7d_pinctrl_info, },
{ .compatible = "fsl,imx7d-iomuxc-lpsr", .data = &imx7d_lpsr_pinctrl_info },
{ /* sentinel */ }
diff --git a/drivers/pinctrl/freescale/pinctrl-vf610.c b/drivers/pinctrl/freescale/pinctrl-vf610.c
index 2b1e198e3092..3bd85564d1e4 100644
--- a/drivers/pinctrl/freescale/pinctrl-vf610.c
+++ b/drivers/pinctrl/freescale/pinctrl-vf610.c
@@ -299,6 +299,8 @@ static struct imx_pinctrl_soc_info vf610_pinctrl_info = {
.pins = vf610_pinctrl_pads,
.npins = ARRAY_SIZE(vf610_pinctrl_pads),
.flags = SHARE_MUX_CONF_REG | ZERO_OFFSET_VALID,
+ .mux_mask = 0x700000,
+ .mux_shift = 20,
};
static const struct of_device_id vf610_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index 396830a41127..b82d6ff3116f 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -56,6 +56,14 @@ config PINCTRL_BROXTON
Broxton pinctrl driver provides an interface that allows
configuring of SoC pins and using them as GPIOs.
+config PINCTRL_CANNONLAKE
+ tristate "Intel Cannon Lake PCH pinctrl and GPIO driver"
+ depends on ACPI
+ select PINCTRL_INTEL
+ help
+ This pinctrl driver provides an interface that allows configuring
+ of Intel Cannon Lake PCH pins and using them as GPIOs.
+
config PINCTRL_GEMINILAKE
tristate "Intel Gemini Lake SoC pinctrl and GPIO driver"
depends on ACPI
diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile
index 12f3af5b2ca5..81df3cf408e3 100644
--- a/drivers/pinctrl/intel/Makefile
+++ b/drivers/pinctrl/intel/Makefile
@@ -5,5 +5,6 @@ obj-$(CONFIG_PINCTRL_CHERRYVIEW) += pinctrl-cherryview.o
obj-$(CONFIG_PINCTRL_MERRIFIELD) += pinctrl-merrifield.o
obj-$(CONFIG_PINCTRL_INTEL) += pinctrl-intel.o
obj-$(CONFIG_PINCTRL_BROXTON) += pinctrl-broxton.o
+obj-$(CONFIG_PINCTRL_CANNONLAKE) += pinctrl-cannonlake.o
obj-$(CONFIG_PINCTRL_GEMINILAKE) += pinctrl-geminilake.o
obj-$(CONFIG_PINCTRL_SUNRISEPOINT) += pinctrl-sunrisepoint.o
diff --git a/drivers/pinctrl/intel/pinctrl-cannonlake.c b/drivers/pinctrl/intel/pinctrl-cannonlake.c
new file mode 100644
index 000000000000..3bc609b67dc2
--- /dev/null
+++ b/drivers/pinctrl/intel/pinctrl-cannonlake.c
@@ -0,0 +1,442 @@
+/*
+ * Intel Cannon Lake PCH pinctrl/GPIO driver
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Author: Mika Westerberg <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-intel.h"
+
+#define CNL_PAD_OWN 0x020
+#define CNL_PADCFGLOCK 0x080
+#define CNL_HOSTSW_OWN 0x0b0
+#define CNL_GPI_IE 0x120
+
+#define CNL_GPP(r, s, e) \
+ { \
+ .reg_num = (r), \
+ .base = (s), \
+ .size = ((e) - (s) + 1), \
+ }
+
+#define CNL_COMMUNITY(b, s, e, g) \
+ { \
+ .barno = (b), \
+ .padown_offset = CNL_PAD_OWN, \
+ .padcfglock_offset = CNL_PADCFGLOCK, \
+ .hostown_offset = CNL_HOSTSW_OWN, \
+ .ie_offset = CNL_GPI_IE, \
+ .pin_base = (s), \
+ .npins = ((e) - (s) + 1), \
+ .gpps = (g), \
+ .ngpps = ARRAY_SIZE(g), \
+ }
+
+/* Cannon Lake-LP */
+static const struct pinctrl_pin_desc cnllp_pins[] = {
+ /* GPP_A */
+ PINCTRL_PIN(0, "RCINB"),
+ PINCTRL_PIN(1, "LAD_0"),
+ PINCTRL_PIN(2, "LAD_1"),
+ PINCTRL_PIN(3, "LAD_2"),
+ PINCTRL_PIN(4, "LAD_3"),
+ PINCTRL_PIN(5, "LFRAMEB"),
+ PINCTRL_PIN(6, "SERIRQ"),
+ PINCTRL_PIN(7, "PIRQAB"),
+ PINCTRL_PIN(8, "CLKRUNB"),
+ PINCTRL_PIN(9, "CLKOUT_LPC_0"),
+ PINCTRL_PIN(10, "CLKOUT_LPC_1"),
+ PINCTRL_PIN(11, "PMEB"),
+ PINCTRL_PIN(12, "BM_BUSYB"),
+ PINCTRL_PIN(13, "SUSWARNB_SUSPWRDNACK"),
+ PINCTRL_PIN(14, "SUS_STATB"),
+ PINCTRL_PIN(15, "SUSACKB"),
+ PINCTRL_PIN(16, "SD_1P8_SEL"),
+ PINCTRL_PIN(17, "SD_PWR_EN_B"),
+ PINCTRL_PIN(18, "ISH_GP_0"),
+ PINCTRL_PIN(19, "ISH_GP_1"),
+ PINCTRL_PIN(20, "ISH_GP_2"),
+ PINCTRL_PIN(21, "ISH_GP_3"),
+ PINCTRL_PIN(22, "ISH_GP_4"),
+ PINCTRL_PIN(23, "ISH_GP_5"),
+ PINCTRL_PIN(24, "ESPI_CLK_LOOPBK"),
+ /* GPP_B */
+ PINCTRL_PIN(25, "CORE_VID_0"),
+ PINCTRL_PIN(26, "CORE_VID_1"),
+ PINCTRL_PIN(27, "VRALERTB"),
+ PINCTRL_PIN(28, "CPU_GP_2"),
+ PINCTRL_PIN(29, "CPU_GP_3"),
+ PINCTRL_PIN(30, "SRCCLKREQB_0"),
+ PINCTRL_PIN(31, "SRCCLKREQB_1"),
+ PINCTRL_PIN(32, "SRCCLKREQB_2"),
+ PINCTRL_PIN(33, "SRCCLKREQB_3"),
+ PINCTRL_PIN(34, "SRCCLKREQB_4"),
+ PINCTRL_PIN(35, "SRCCLKREQB_5"),
+ PINCTRL_PIN(36, "EXT_PWR_GATEB"),
+ PINCTRL_PIN(37, "SLP_S0B"),
+ PINCTRL_PIN(38, "PLTRSTB"),
+ PINCTRL_PIN(39, "SPKR"),
+ PINCTRL_PIN(40, "GSPI0_CS0B"),
+ PINCTRL_PIN(41, "GSPI0_CLK"),
+ PINCTRL_PIN(42, "GSPI0_MISO"),
+ PINCTRL_PIN(43, "GSPI0_MOSI"),
+ PINCTRL_PIN(44, "GSPI1_CS0B"),
+ PINCTRL_PIN(45, "GSPI1_CLK"),
+ PINCTRL_PIN(46, "GSPI1_MISO"),
+ PINCTRL_PIN(47, "GSPI1_MOSI"),
+ PINCTRL_PIN(48, "SML1ALERTB"),
+ PINCTRL_PIN(49, "GSPI0_CLK_LOOPBK"),
+ PINCTRL_PIN(50, "GSPI1_CLK_LOOPBK"),
+ /* GPP_G */
+ PINCTRL_PIN(51, "SD3_CMD"),
+ PINCTRL_PIN(52, "SD3_D0_SD4_RCLK_P"),
+ PINCTRL_PIN(53, "SD3_D1_SD4_RCLK_N"),
+ PINCTRL_PIN(54, "SD3_D2"),
+ PINCTRL_PIN(55, "SD3_D3"),
+ PINCTRL_PIN(56, "SD3_CDB"),
+ PINCTRL_PIN(57, "SD3_CLK"),
+ PINCTRL_PIN(58, "SD3_WP"),
+ /* SPI */
+ PINCTRL_PIN(59, "SPI0_IO_2"),
+ PINCTRL_PIN(60, "SPI0_IO_3"),
+ PINCTRL_PIN(61, "SPI0_MOSI_IO_0"),
+ PINCTRL_PIN(62, "SPI0_MISO_IO_1"),
+ PINCTRL_PIN(63, "SPI0_TPM_CSB"),
+ PINCTRL_PIN(64, "SPI0_FLASH_0_CSB"),
+ PINCTRL_PIN(65, "SPI0_FLASH_1_CSB"),
+ PINCTRL_PIN(66, "SPI0_CLK"),
+ PINCTRL_PIN(67, "SPI0_CLK_LOOPBK"),
+ /* GPP_D */
+ PINCTRL_PIN(68, "SPI1_CSB"),
+ PINCTRL_PIN(69, "SPI1_CLK"),
+ PINCTRL_PIN(70, "SPI1_MISO_IO_1"),
+ PINCTRL_PIN(71, "SPI1_MOSI_IO_0"),
+ PINCTRL_PIN(72, "IMGCLKOUT_0"),
+ PINCTRL_PIN(73, "ISH_I2C0_SDA"),
+ PINCTRL_PIN(74, "ISH_I2C0_SCL"),
+ PINCTRL_PIN(75, "ISH_I2C1_SDA"),
+ PINCTRL_PIN(76, "ISH_I2C1_SCL"),
+ PINCTRL_PIN(77, "ISH_SPI_CSB"),
+ PINCTRL_PIN(78, "ISH_SPI_CLK"),
+ PINCTRL_PIN(79, "ISH_SPI_MISO"),
+ PINCTRL_PIN(80, "ISH_SPI_MOSI"),
+ PINCTRL_PIN(81, "ISH_UART0_RXD"),
+ PINCTRL_PIN(82, "ISH_UART0_TXD"),
+ PINCTRL_PIN(83, "ISH_UART0_RTSB"),
+ PINCTRL_PIN(84, "ISH_UART0_CTSB"),
+ PINCTRL_PIN(85, "DMIC_CLK_1"),
+ PINCTRL_PIN(86, "DMIC_DATA_1"),
+ PINCTRL_PIN(87, "DMIC_CLK_0"),
+ PINCTRL_PIN(88, "DMIC_DATA_0"),
+ PINCTRL_PIN(89, "SPI1_IO_2"),
+ PINCTRL_PIN(90, "SPI1_IO_3"),
+ PINCTRL_PIN(91, "SSP_MCLK"),
+ PINCTRL_PIN(92, "GSPI2_CLK_LOOPBK"),
+ /* GPP_F */
+ PINCTRL_PIN(93, "CNV_GNSS_PA_BLANKING"),
+ PINCTRL_PIN(94, "CNV_GNSS_FTA"),
+ PINCTRL_PIN(95, "CNV_GNSS_SYSCK"),
+ PINCTRL_PIN(96, "EMMC_HIP_MON"),
+ PINCTRL_PIN(97, "CNV_BRI_DT"),
+ PINCTRL_PIN(98, "CNV_BRI_RSP"),
+ PINCTRL_PIN(99, "CNV_RGI_DT"),
+ PINCTRL_PIN(100, "CNV_RGI_RSP"),
+ PINCTRL_PIN(101, "CNV_MFUART2_RXD"),
+ PINCTRL_PIN(102, "CNV_MFUART2_TXD"),
+ PINCTRL_PIN(103, "GPP_F_10"),
+ PINCTRL_PIN(104, "EMMC_CMD"),
+ PINCTRL_PIN(105, "EMMC_DATA_0"),
+ PINCTRL_PIN(106, "EMMC_DATA_1"),
+ PINCTRL_PIN(107, "EMMC_DATA_2"),
+ PINCTRL_PIN(108, "EMMC_DATA_3"),
+ PINCTRL_PIN(109, "EMMC_DATA_4"),
+ PINCTRL_PIN(110, "EMMC_DATA_5"),
+ PINCTRL_PIN(111, "EMMC_DATA_6"),
+ PINCTRL_PIN(112, "EMMC_DATA_7"),
+ PINCTRL_PIN(113, "EMMC_RCLK"),
+ PINCTRL_PIN(114, "EMMC_CLK"),
+ PINCTRL_PIN(115, "EMMC_RESETB"),
+ PINCTRL_PIN(116, "A4WP_PRESENT"),
+ /* GPP_H */
+ PINCTRL_PIN(117, "SSP2_SCLK"),
+ PINCTRL_PIN(118, "SSP2_SFRM"),
+ PINCTRL_PIN(119, "SSP2_TXD"),
+ PINCTRL_PIN(120, "SSP2_RXD"),
+ PINCTRL_PIN(121, "I2C2_SDA"),
+ PINCTRL_PIN(122, "I2C2_SCL"),
+ PINCTRL_PIN(123, "I2C3_SDA"),
+ PINCTRL_PIN(124, "I2C3_SCL"),
+ PINCTRL_PIN(125, "I2C4_SDA"),
+ PINCTRL_PIN(126, "I2C4_SCL"),
+ PINCTRL_PIN(127, "I2C5_SDA"),
+ PINCTRL_PIN(128, "I2C5_SCL"),
+ PINCTRL_PIN(129, "M2_SKT2_CFG_0"),
+ PINCTRL_PIN(130, "M2_SKT2_CFG_1"),
+ PINCTRL_PIN(131, "M2_SKT2_CFG_2"),
+ PINCTRL_PIN(132, "M2_SKT2_CFG_3"),
+ PINCTRL_PIN(133, "DDPF_CTRLCLK"),
+ PINCTRL_PIN(134, "DDPF_CTRLDATA"),
+ PINCTRL_PIN(135, "CPU_VCCIO_PWR_GATEB"),
+ PINCTRL_PIN(136, "TIMESYNC_0"),
+ PINCTRL_PIN(137, "IMGCLKOUT_1"),
+ PINCTRL_PIN(138, "GPPC_H_21"),
+ PINCTRL_PIN(139, "GPPC_H_22"),
+ PINCTRL_PIN(140, "GPPC_H_23"),
+ /* vGPIO */
+ PINCTRL_PIN(141, "CNV_BTEN"),
+ PINCTRL_PIN(142, "CNV_GNEN"),
+ PINCTRL_PIN(143, "CNV_WFEN"),
+ PINCTRL_PIN(144, "CNV_WCEN"),
+ PINCTRL_PIN(145, "CNV_BT_HOST_WAKEB"),
+ PINCTRL_PIN(146, "CNV_BT_IF_SELECT"),
+ PINCTRL_PIN(147, "vCNV_BT_UART_TXD"),
+ PINCTRL_PIN(148, "vCNV_BT_UART_RXD"),
+ PINCTRL_PIN(149, "vCNV_BT_UART_CTS_B"),
+ PINCTRL_PIN(150, "vCNV_BT_UART_RTS_B"),
+ PINCTRL_PIN(151, "vCNV_MFUART1_TXD"),
+ PINCTRL_PIN(152, "vCNV_MFUART1_RXD"),
+ PINCTRL_PIN(153, "vCNV_MFUART1_CTS_B"),
+ PINCTRL_PIN(154, "vCNV_MFUART1_RTS_B"),
+ PINCTRL_PIN(155, "vCNV_GNSS_UART_TXD"),
+ PINCTRL_PIN(156, "vCNV_GNSS_UART_RXD"),
+ PINCTRL_PIN(157, "vCNV_GNSS_UART_CTS_B"),
+ PINCTRL_PIN(158, "vCNV_GNSS_UART_RTS_B"),
+ PINCTRL_PIN(159, "vUART0_TXD"),
+ PINCTRL_PIN(160, "vUART0_RXD"),
+ PINCTRL_PIN(161, "vUART0_CTS_B"),
+ PINCTRL_PIN(162, "vUART0_RTS_B"),
+ PINCTRL_PIN(163, "vISH_UART0_TXD"),
+ PINCTRL_PIN(164, "vISH_UART0_RXD"),
+ PINCTRL_PIN(165, "vISH_UART0_CTS_B"),
+ PINCTRL_PIN(166, "vISH_UART0_RTS_B"),
+ PINCTRL_PIN(167, "vISH_UART1_TXD"),
+ PINCTRL_PIN(168, "vISH_UART1_RXD"),
+ PINCTRL_PIN(169, "vISH_UART1_CTS_B"),
+ PINCTRL_PIN(170, "vISH_UART1_RTS_B"),
+ PINCTRL_PIN(171, "vCNV_BT_I2S_BCLK"),
+ PINCTRL_PIN(172, "vCNV_BT_I2S_WS_SYNC"),
+ PINCTRL_PIN(173, "vCNV_BT_I2S_SDO"),
+ PINCTRL_PIN(174, "vCNV_BT_I2S_SDI"),
+ PINCTRL_PIN(175, "vSSP2_SCLK"),
+ PINCTRL_PIN(176, "vSSP2_SFRM"),
+ PINCTRL_PIN(177, "vSSP2_TXD"),
+ PINCTRL_PIN(178, "vSSP2_RXD"),
+ PINCTRL_PIN(179, "vCNV_GNSS_HOST_WAKEB"),
+ PINCTRL_PIN(180, "vSD3_CD_B"),
+ /* GPP_C */
+ PINCTRL_PIN(181, "SMBCLK"),
+ PINCTRL_PIN(182, "SMBDATA"),
+ PINCTRL_PIN(183, "SMBALERTB"),
+ PINCTRL_PIN(184, "SML0CLK"),
+ PINCTRL_PIN(185, "SML0DATA"),
+ PINCTRL_PIN(186, "SML0ALERTB"),
+ PINCTRL_PIN(187, "SML1CLK"),
+ PINCTRL_PIN(188, "SML1DATA"),
+ PINCTRL_PIN(189, "UART0_RXD"),
+ PINCTRL_PIN(190, "UART0_TXD"),
+ PINCTRL_PIN(191, "UART0_RTSB"),
+ PINCTRL_PIN(192, "UART0_CTSB"),
+ PINCTRL_PIN(193, "UART1_RXD"),
+ PINCTRL_PIN(194, "UART1_TXD"),
+ PINCTRL_PIN(195, "UART1_RTSB"),
+ PINCTRL_PIN(196, "UART1_CTSB"),
+ PINCTRL_PIN(197, "I2C0_SDA"),
+ PINCTRL_PIN(198, "I2C0_SCL"),
+ PINCTRL_PIN(199, "I2C1_SDA"),
+ PINCTRL_PIN(200, "I2C1_SCL"),
+ PINCTRL_PIN(201, "UART2_RXD"),
+ PINCTRL_PIN(202, "UART2_TXD"),
+ PINCTRL_PIN(203, "UART2_RTSB"),
+ PINCTRL_PIN(204, "UART2_CTSB"),
+ /* GPP_E */
+ PINCTRL_PIN(205, "SATAXPCIE_0"),
+ PINCTRL_PIN(206, "SATAXPCIE_1"),
+ PINCTRL_PIN(207, "SATAXPCIE_2"),
+ PINCTRL_PIN(208, "CPU_GP_0"),
+ PINCTRL_PIN(209, "SATA_DEVSLP_0"),
+ PINCTRL_PIN(210, "SATA_DEVSLP_1"),
+ PINCTRL_PIN(211, "SATA_DEVSLP_2"),
+ PINCTRL_PIN(212, "CPU_GP_1"),
+ PINCTRL_PIN(213, "SATA_LEDB"),
+ PINCTRL_PIN(214, "USB2_OCB_0"),
+ PINCTRL_PIN(215, "USB2_OCB_1"),
+ PINCTRL_PIN(216, "USB2_OCB_2"),
+ PINCTRL_PIN(217, "USB2_OCB_3"),
+ PINCTRL_PIN(218, "DDSP_HPD_0"),
+ PINCTRL_PIN(219, "DDSP_HPD_1"),
+ PINCTRL_PIN(220, "DDSP_HPD_2"),
+ PINCTRL_PIN(221, "DDSP_HPD_3"),
+ PINCTRL_PIN(222, "EDP_HPD"),
+ PINCTRL_PIN(223, "DDPB_CTRLCLK"),
+ PINCTRL_PIN(224, "DDPB_CTRLDATA"),
+ PINCTRL_PIN(225, "DDPC_CTRLCLK"),
+ PINCTRL_PIN(226, "DDPC_CTRLDATA"),
+ PINCTRL_PIN(227, "DDPD_CTRLCLK"),
+ PINCTRL_PIN(228, "DDPD_CTRLDATA"),
+ /* JTAG */
+ PINCTRL_PIN(229, "JTAG_TDO"),
+ PINCTRL_PIN(230, "JTAGX"),
+ PINCTRL_PIN(231, "PRDYB"),
+ PINCTRL_PIN(232, "PREQB"),
+ PINCTRL_PIN(233, "CPU_TRSTB"),
+ PINCTRL_PIN(234, "JTAG_TDI"),
+ PINCTRL_PIN(235, "JTAG_TMS"),
+ PINCTRL_PIN(236, "JTAG_TCK"),
+ PINCTRL_PIN(237, "ITP_PMODE"),
+ /* HVCMOS */
+ PINCTRL_PIN(238, "L_BKLTEN"),
+ PINCTRL_PIN(239, "L_BKLTCTL"),
+ PINCTRL_PIN(240, "L_VDDEN"),
+ PINCTRL_PIN(241, "SYS_PWROK"),
+ PINCTRL_PIN(242, "SYS_RESETB"),
+ PINCTRL_PIN(243, "MLK_RSTB"),
+};
+
+static const unsigned int cnllp_spi0_pins[] = { 40, 41, 42, 43, 7 };
+static const unsigned int cnllp_spi0_modes[] = { 1, 1, 1, 1, 2 };
+static const unsigned int cnllp_spi1_pins[] = { 44, 45, 46, 47, 11 };
+static const unsigned int cnllp_spi1_modes[] = { 1, 1, 1, 1, 2 };
+static const unsigned int cnllp_spi2_pins[] = { 77, 78, 79, 80, 83 };
+static const unsigned int cnllp_spi2_modes[] = { 3, 3, 3, 3, 2 };
+
+static const unsigned int cnllp_i2c0_pins[] = { 197, 198 };
+static const unsigned int cnllp_i2c1_pins[] = { 199, 200 };
+static const unsigned int cnllp_i2c2_pins[] = { 121, 122 };
+static const unsigned int cnllp_i2c3_pins[] = { 123, 124 };
+static const unsigned int cnllp_i2c4_pins[] = { 125, 126 };
+static const unsigned int cnllp_i2c5_pins[] = { 127, 128 };
+
+static const unsigned int cnllp_uart0_pins[] = { 189, 190, 191, 192 };
+static const unsigned int cnllp_uart1_pins[] = { 193, 194, 195, 196 };
+static const unsigned int cnllp_uart2_pins[] = { 201, 202, 203, 204 };
+
+static const struct intel_pingroup cnllp_groups[] = {
+ PIN_GROUP("spi0_grp", cnllp_spi0_pins, cnllp_spi0_modes),
+ PIN_GROUP("spi1_grp", cnllp_spi1_pins, cnllp_spi1_modes),
+ PIN_GROUP("spi2_grp", cnllp_spi2_pins, cnllp_spi2_modes),
+ PIN_GROUP("i2c0_grp", cnllp_i2c0_pins, 1),
+ PIN_GROUP("i2c1_grp", cnllp_i2c1_pins, 1),
+ PIN_GROUP("i2c2_grp", cnllp_i2c2_pins, 1),
+ PIN_GROUP("i2c3_grp", cnllp_i2c3_pins, 1),
+ PIN_GROUP("i2c4_grp", cnllp_i2c4_pins, 1),
+ PIN_GROUP("i2c5_grp", cnllp_i2c5_pins, 1),
+ PIN_GROUP("uart0_grp", cnllp_uart0_pins, 1),
+ PIN_GROUP("uart1_grp", cnllp_uart1_pins, 1),
+ PIN_GROUP("uart2_grp", cnllp_uart2_pins, 1),
+};
+
+static const char * const cnllp_spi0_groups[] = { "spi0_grp" };
+static const char * const cnllp_spi1_groups[] = { "spi1_grp" };
+static const char * const cnllp_spi2_groups[] = { "spi2_grp" };
+static const char * const cnllp_i2c0_groups[] = { "i2c0_grp" };
+static const char * const cnllp_i2c1_groups[] = { "i2c1_grp" };
+static const char * const cnllp_i2c2_groups[] = { "i2c2_grp" };
+static const char * const cnllp_i2c3_groups[] = { "i2c3_grp" };
+static const char * const cnllp_i2c4_groups[] = { "i2c4_grp" };
+static const char * const cnllp_i2c5_groups[] = { "i2c5_grp" };
+static const char * const cnllp_uart0_groups[] = { "uart0_grp" };
+static const char * const cnllp_uart1_groups[] = { "uart1_grp" };
+static const char * const cnllp_uart2_groups[] = { "uart2_grp" };
+
+static const struct intel_function cnllp_functions[] = {
+ FUNCTION("spi0", cnllp_spi0_groups),
+ FUNCTION("spi1", cnllp_spi1_groups),
+ FUNCTION("spi2", cnllp_spi2_groups),
+ FUNCTION("i2c0", cnllp_i2c0_groups),
+ FUNCTION("i2c1", cnllp_i2c1_groups),
+ FUNCTION("i2c2", cnllp_i2c2_groups),
+ FUNCTION("i2c3", cnllp_i2c3_groups),
+ FUNCTION("i2c4", cnllp_i2c4_groups),
+ FUNCTION("i2c5", cnllp_i2c5_groups),
+ FUNCTION("uart0", cnllp_uart0_groups),
+ FUNCTION("uart1", cnllp_uart1_groups),
+ FUNCTION("uart2", cnllp_uart2_groups),
+};
+
+static const struct intel_padgroup cnllp_community0_gpps[] = {
+ CNL_GPP(0, 0, 24), /* GPP_A */
+ CNL_GPP(1, 25, 50), /* GPP_B */
+ CNL_GPP(2, 51, 58), /* GPP_G */
+ CNL_GPP(3, 59, 67), /* SPI */
+};
+
+static const struct intel_padgroup cnllp_community1_gpps[] = {
+ CNL_GPP(0, 68, 92), /* GPP_D */
+ CNL_GPP(1, 93, 116), /* GPP_F */
+ CNL_GPP(2, 117, 140), /* GPP_H */
+ CNL_GPP(3, 141, 172), /* vGPIO */
+ CNL_GPP(4, 173, 180), /* vGPIO */
+};
+
+static const struct intel_padgroup cnllp_community4_gpps[] = {
+ CNL_GPP(0, 181, 204), /* GPP_C */
+ CNL_GPP(1, 205, 228), /* GPP_E */
+ CNL_GPP(2, 229, 237), /* JTAG */
+ CNL_GPP(3, 238, 243), /* HVCMOS */
+};
+
+static const struct intel_community cnllp_communities[] = {
+ CNL_COMMUNITY(0, 0, 67, cnllp_community0_gpps),
+ CNL_COMMUNITY(1, 68, 180, cnllp_community1_gpps),
+ CNL_COMMUNITY(2, 181, 243, cnllp_community4_gpps),
+};
+
+static const struct intel_pinctrl_soc_data cnllp_soc_data = {
+ .pins = cnllp_pins,
+ .npins = ARRAY_SIZE(cnllp_pins),
+ .groups = cnllp_groups,
+ .ngroups = ARRAY_SIZE(cnllp_groups),
+ .functions = cnllp_functions,
+ .nfunctions = ARRAY_SIZE(cnllp_functions),
+ .communities = cnllp_communities,
+ .ncommunities = ARRAY_SIZE(cnllp_communities),
+};
+
+static const struct acpi_device_id cnl_pinctrl_acpi_match[] = {
+ { "INT34BB", (kernel_ulong_t)&cnllp_soc_data },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, cnl_pinctrl_acpi_match);
+
+static int cnl_pinctrl_probe(struct platform_device *pdev)
+{
+ const struct intel_pinctrl_soc_data *soc_data;
+ const struct acpi_device_id *id;
+
+ id = acpi_match_device(cnl_pinctrl_acpi_match, &pdev->dev);
+ if (!id || !id->driver_data)
+ return -ENODEV;
+
+ soc_data = (const struct intel_pinctrl_soc_data *)id->driver_data;
+ return intel_pinctrl_probe(pdev, soc_data);
+}
+
+static const struct dev_pm_ops cnl_pinctrl_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(intel_pinctrl_suspend,
+ intel_pinctrl_resume)
+};
+
+static struct platform_driver cnl_pinctrl_driver = {
+ .probe = cnl_pinctrl_probe,
+ .driver = {
+ .name = "cannonlake-pinctrl",
+ .acpi_match_table = cnl_pinctrl_acpi_match,
+ .pm = &cnl_pinctrl_pm_ops,
+ },
+};
+
+module_platform_driver(cnl_pinctrl_driver);
+
+MODULE_AUTHOR("Mika Westerberg <[email protected]>");
+MODULE_DESCRIPTION("Intel Cannon Lake PCH pinctrl/GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 592b465e981e..6dc1096d3d34 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -117,6 +117,7 @@ struct intel_pinctrl {
};
#define pin_to_padno(c, p) ((p) - (c)->pin_base)
+#define padgroup_offset(g, p) ((p) - (g)->base)
static struct intel_community *intel_get_community(struct intel_pinctrl *pctrl,
unsigned pin)
@@ -135,6 +136,22 @@ static struct intel_community *intel_get_community(struct intel_pinctrl *pctrl,
return NULL;
}
+static const struct intel_padgroup *
+intel_community_get_padgroup(const struct intel_community *community,
+ unsigned pin)
+{
+ int i;
+
+ for (i = 0; i < community->ngpps; i++) {
+ const struct intel_padgroup *padgrp = &community->gpps[i];
+
+ if (pin >= padgrp->base && pin < padgrp->base + padgrp->size)
+ return padgrp;
+ }
+
+ return NULL;
+}
+
static void __iomem *intel_get_padcfg(struct intel_pinctrl *pctrl, unsigned pin,
unsigned reg)
{
@@ -158,7 +175,8 @@ static void __iomem *intel_get_padcfg(struct intel_pinctrl *pctrl, unsigned pin,
static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin)
{
const struct intel_community *community;
- unsigned padno, gpp, offset, group;
+ const struct intel_padgroup *padgrp;
+ unsigned gpp, offset, gpp_offset;
void __iomem *padown;
community = intel_get_community(pctrl, pin);
@@ -167,19 +185,23 @@ static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin)
if (!community->padown_offset)
return true;
- padno = pin_to_padno(community, pin);
- group = padno / community->gpp_size;
- gpp = PADOWN_GPP(padno % community->gpp_size);
- offset = community->padown_offset + 0x10 * group + gpp * 4;
+ padgrp = intel_community_get_padgroup(community, pin);
+ if (!padgrp)
+ return false;
+
+ gpp_offset = padgroup_offset(padgrp, pin);
+ gpp = PADOWN_GPP(gpp_offset);
+ offset = community->padown_offset + padgrp->padown_num * 4 + gpp * 4;
padown = community->regs + offset;
- return !(readl(padown) & PADOWN_MASK(padno));
+ return !(readl(padown) & PADOWN_MASK(gpp_offset));
}
static bool intel_pad_acpi_mode(struct intel_pinctrl *pctrl, unsigned pin)
{
const struct intel_community *community;
- unsigned padno, gpp, offset;
+ const struct intel_padgroup *padgrp;
+ unsigned offset, gpp_offset;
void __iomem *hostown;
community = intel_get_community(pctrl, pin);
@@ -188,18 +210,22 @@ static bool intel_pad_acpi_mode(struct intel_pinctrl *pctrl, unsigned pin)
if (!community->hostown_offset)
return false;
- padno = pin_to_padno(community, pin);
- gpp = padno / community->gpp_size;
- offset = community->hostown_offset + gpp * 4;
+ padgrp = intel_community_get_padgroup(community, pin);
+ if (!padgrp)
+ return true;
+
+ gpp_offset = padgroup_offset(padgrp, pin);
+ offset = community->hostown_offset + padgrp->reg_num * 4;
hostown = community->regs + offset;
- return !(readl(hostown) & BIT(padno % community->gpp_size));
+ return !(readl(hostown) & BIT(gpp_offset));
}
static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin)
{
struct intel_community *community;
- unsigned padno, gpp, offset;
+ const struct intel_padgroup *padgrp;
+ unsigned offset, gpp_offset;
u32 value;
community = intel_get_community(pctrl, pin);
@@ -208,22 +234,25 @@ static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin)
if (!community->padcfglock_offset)
return false;
- padno = pin_to_padno(community, pin);
- gpp = padno / community->gpp_size;
+ padgrp = intel_community_get_padgroup(community, pin);
+ if (!padgrp)
+ return true;
+
+ gpp_offset = padgroup_offset(padgrp, pin);
/*
* If PADCFGLOCK and PADCFGLOCKTX bits are both clear for this pad,
* the pad is considered unlocked. Any other case means that it is
* either fully or partially locked and we don't touch it.
*/
- offset = community->padcfglock_offset + gpp * 8;
+ offset = community->padcfglock_offset + padgrp->reg_num * 8;
value = readl(community->regs + offset);
- if (value & BIT(pin % community->gpp_size))
+ if (value & BIT(gpp_offset))
return true;
- offset = community->padcfglock_offset + 4 + gpp * 8;
+ offset = community->padcfglock_offset + 4 + padgrp->reg_num * 8;
value = readl(community->regs + offset);
- if (value & BIT(pin % community->gpp_size))
+ if (value & BIT(gpp_offset))
return true;
return false;
@@ -369,7 +398,11 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
value = readl(padcfg0);
value &= ~PADCFG0_PMODE_MASK;
- value |= grp->mode << PADCFG0_PMODE_SHIFT;
+
+ if (grp->modes)
+ value |= grp->modes[i] << PADCFG0_PMODE_SHIFT;
+ else
+ value |= grp->mode << PADCFG0_PMODE_SHIFT;
writel(value, padcfg0);
}
@@ -777,18 +810,22 @@ static void intel_gpio_irq_ack(struct irq_data *d)
const struct intel_community *community;
unsigned pin = irqd_to_hwirq(d);
- raw_spin_lock(&pctrl->lock);
-
community = intel_get_community(pctrl, pin);
if (community) {
- unsigned padno = pin_to_padno(community, pin);
- unsigned gpp_offset = padno % community->gpp_size;
- unsigned gpp = padno / community->gpp_size;
+ const struct intel_padgroup *padgrp;
+ unsigned gpp, gpp_offset;
+
+ padgrp = intel_community_get_padgroup(community, pin);
+ if (!padgrp)
+ return;
+ gpp = padgrp->reg_num;
+ gpp_offset = padgroup_offset(padgrp, pin);
+
+ raw_spin_lock(&pctrl->lock);
writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4);
+ raw_spin_unlock(&pctrl->lock);
}
-
- raw_spin_unlock(&pctrl->lock);
}
static void intel_gpio_irq_enable(struct irq_data *d)
@@ -797,27 +834,30 @@ static void intel_gpio_irq_enable(struct irq_data *d)
struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
const struct intel_community *community;
unsigned pin = irqd_to_hwirq(d);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&pctrl->lock, flags);
community = intel_get_community(pctrl, pin);
if (community) {
- unsigned padno = pin_to_padno(community, pin);
- unsigned gpp_size = community->gpp_size;
- unsigned gpp_offset = padno % gpp_size;
- unsigned gpp = padno / gpp_size;
+ const struct intel_padgroup *padgrp;
+ unsigned gpp, gpp_offset;
+ unsigned long flags;
u32 value;
+ padgrp = intel_community_get_padgroup(community, pin);
+ if (!padgrp)
+ return;
+
+ gpp = padgrp->reg_num;
+ gpp_offset = padgroup_offset(padgrp, pin);
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
/* Clear interrupt status first to avoid unexpected interrupt */
writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4);
value = readl(community->regs + community->ie_offset + gpp * 4);
value |= BIT(gpp_offset);
writel(value, community->regs + community->ie_offset + gpp * 4);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
-
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
@@ -826,28 +866,33 @@ static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
const struct intel_community *community;
unsigned pin = irqd_to_hwirq(d);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&pctrl->lock, flags);
community = intel_get_community(pctrl, pin);
if (community) {
- unsigned padno = pin_to_padno(community, pin);
- unsigned gpp_offset = padno % community->gpp_size;
- unsigned gpp = padno / community->gpp_size;
+ const struct intel_padgroup *padgrp;
+ unsigned gpp, gpp_offset;
+ unsigned long flags;
void __iomem *reg;
u32 value;
+ padgrp = intel_community_get_padgroup(community, pin);
+ if (!padgrp)
+ return;
+
+ gpp = padgrp->reg_num;
+ gpp_offset = padgroup_offset(padgrp, pin);
+
reg = community->regs + community->ie_offset + gpp * 4;
+
+ raw_spin_lock_irqsave(&pctrl->lock, flags);
value = readl(reg);
if (mask)
value &= ~BIT(gpp_offset);
else
value |= BIT(gpp_offset);
writel(value, reg);
+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
-
- raw_spin_unlock_irqrestore(&pctrl->lock, flags);
}
static void intel_gpio_irq_mask(struct irq_data *d)
@@ -938,23 +983,20 @@ static irqreturn_t intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
int gpp;
for (gpp = 0; gpp < community->ngpps; gpp++) {
+ const struct intel_padgroup *padgrp = &community->gpps[gpp];
unsigned long pending, enabled, gpp_offset;
- pending = readl(community->regs + GPI_IS + gpp * 4);
+ pending = readl(community->regs + GPI_IS + padgrp->reg_num * 4);
enabled = readl(community->regs + community->ie_offset +
- gpp * 4);
+ padgrp->reg_num * 4);
/* Only interrupts that are enabled */
pending &= enabled;
- for_each_set_bit(gpp_offset, &pending, community->gpp_size) {
+ for_each_set_bit(gpp_offset, &pending, padgrp->size) {
unsigned padno, irq;
- /*
- * The last group in community can have less pins
- * than NPADS_IN_GPP.
- */
- padno = gpp_offset + gpp * community->gpp_size;
+ padno = padgrp->base - community->pin_base + gpp_offset;
if (padno >= community->npins)
break;
@@ -1045,6 +1087,56 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
return 0;
}
+static int intel_pinctrl_add_padgroups(struct intel_pinctrl *pctrl,
+ struct intel_community *community)
+{
+ struct intel_padgroup *gpps;
+ unsigned npins = community->npins;
+ unsigned padown_num = 0;
+ size_t ngpps, i;
+
+ if (community->gpps)
+ ngpps = community->ngpps;
+ else
+ ngpps = DIV_ROUND_UP(community->npins, community->gpp_size);
+
+ gpps = devm_kcalloc(pctrl->dev, ngpps, sizeof(*gpps), GFP_KERNEL);
+ if (!gpps)
+ return -ENOMEM;
+
+ for (i = 0; i < ngpps; i++) {
+ if (community->gpps) {
+ gpps[i] = community->gpps[i];
+ } else {
+ unsigned gpp_size = community->gpp_size;
+
+ gpps[i].reg_num = i;
+ gpps[i].base = community->pin_base + i * gpp_size;
+ gpps[i].size = min(gpp_size, npins);
+ npins -= gpps[i].size;
+ }
+
+ if (gpps[i].size > 32)
+ return -EINVAL;
+
+ gpps[i].padown_num = padown_num;
+
+ /*
+ * In older hardware the number of padown registers per
+ * group is fixed regardless of the group size.
+ */
+ if (community->gpp_num_padown_regs)
+ padown_num += community->gpp_num_padown_regs;
+ else
+ padown_num += DIV_ROUND_UP(gpps[i].size * 4, 32);
+ }
+
+ community->ngpps = ngpps;
+ community->gpps = gpps;
+
+ return 0;
+}
+
static int intel_pinctrl_pm_init(struct intel_pinctrl *pctrl)
{
#ifdef CONFIG_PM_SLEEP
@@ -1142,8 +1234,10 @@ int intel_pinctrl_probe(struct platform_device *pdev,
community->regs = regs;
community->pad_regs = regs + padbar;
- community->ngpps = DIV_ROUND_UP(community->npins,
- community->gpp_size);
+
+ ret = intel_pinctrl_add_padgroups(pctrl, community);
+ if (ret)
+ return ret;
}
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index fe9521f345b5..7fdb07753c2d 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -22,13 +22,16 @@ struct device;
* @name: Name of the groups
* @pins: All pins in this group
* @npins: Number of pins in this groups
- * @mode: Native mode in which the group is muxed out @pins
+ * @mode: Native mode in which the group is muxed out @pins. Used if @modes
+ * is %NULL.
+ * @modes: If not %NULL this will hold mode for each pin in @pins
*/
struct intel_pingroup {
const char *name;
const unsigned *pins;
size_t npins;
unsigned short mode;
+ const unsigned *modes;
};
/**
@@ -44,6 +47,23 @@ struct intel_function {
};
/**
+ * struct intel_padgroup - Hardware pad group information
+ * @reg_num: GPI_IS register number
+ * @base: Starting pin of this group
+ * @size: Size of this group (maximum is 32).
+ * @padown_num: PAD_OWN register number (assigned by the core driver)
+ *
+ * If pad groups of a community are not the same size, use this structure
+ * to specify them.
+ */
+struct intel_padgroup {
+ unsigned reg_num;
+ unsigned base;
+ unsigned size;
+ unsigned padown_num;
+};
+
+/**
* struct intel_community - Intel pin community description
* @barno: MMIO BAR number where registers for this community reside
* @padown_offset: Register offset of PAD_OWN register from @regs. If %0
@@ -56,13 +76,22 @@ struct intel_function {
* @ie_offset: Register offset of GPI_IE from @regs.
* @pin_base: Starting pin of pins in this community
* @gpp_size: Maximum number of pads in each group, such as PADCFGLOCK,
- * HOSTSW_OWN, GPI_IS, GPI_IE, etc.
+ * HOSTSW_OWN, GPI_IS, GPI_IE, etc. Used when @gpps is %NULL.
+ * @gpp_num_padown_regs: Number of pad registers each pad group consumes at
+ * minimum. Use %0 if the number of registers can be
+ * determined by the size of the group.
* @npins: Number of pins in this community
* @features: Additional features supported by the hardware
+ * @gpps: Pad groups if the controller has variable size pad groups
+ * @ngpps: Number of pad groups in this community
* @regs: Community specific common registers (reserved for core driver)
* @pad_regs: Community specific pad registers (reserved for core driver)
- * @ngpps: Number of groups (hw groups) in this community (reserved for
- * core driver)
+ *
+ * Most Intel GPIO host controllers this driver supports each pad group is
+ * of equal size (except the last one). In that case the driver can just
+ * fill in @gpp_size field and let the core driver to handle the rest. If
+ * the controller has pad groups of variable size the client driver can
+ * pass custom @gpps and @ngpps instead.
*/
struct intel_community {
unsigned barno;
@@ -72,23 +101,37 @@ struct intel_community {
unsigned ie_offset;
unsigned pin_base;
unsigned gpp_size;
+ unsigned gpp_num_padown_regs;
size_t npins;
unsigned features;
+ const struct intel_padgroup *gpps;
+ size_t ngpps;
+ /* Reserved for the core driver */
void __iomem *regs;
void __iomem *pad_regs;
- size_t ngpps;
};
/* Additional features supported by the hardware */
#define PINCTRL_FEATURE_DEBOUNCE BIT(0)
#define PINCTRL_FEATURE_1K_PD BIT(1)
-#define PIN_GROUP(n, p, m) \
- { \
- .name = (n), \
- .pins = (p), \
- .npins = ARRAY_SIZE((p)), \
- .mode = (m), \
+/**
+ * PIN_GROUP - Declare a pin group
+ * @n: Name of the group
+ * @p: An array of pins this group consists
+ * @m: Mode which the pins are put when this group is active. Can be either
+ * a single integer or an array of integers in which case mode is per
+ * pin.
+ */
+#define PIN_GROUP(n, p, m) \
+ { \
+ .name = (n), \
+ .pins = (p), \
+ .npins = ARRAY_SIZE((p)), \
+ .mode = __builtin_choose_expr( \
+ __builtin_constant_p((m)), (m), 0), \
+ .modes = __builtin_choose_expr( \
+ __builtin_constant_p((m)), NULL, (m)), \
}
#define FUNCTION(n, g) \
diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
index 9877526c0807..8870a4100164 100644
--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
+++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
@@ -31,6 +31,7 @@
.hostown_offset = SPT_HOSTSW_OWN, \
.ie_offset = SPT_GPI_IE, \
.gpp_size = 24, \
+ .gpp_num_padown_regs = 4, \
.pin_base = (s), \
.npins = ((e) - (s) + 1), \
}
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index 80fe3b48796c..fac9866311f3 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -11,18 +11,11 @@ config PINCTRL_MTK
# For ARMv7 SoCs
config PINCTRL_MT2701
bool "Mediatek MT2701 pin control"
- depends on MACH_MT2701 || COMPILE_TEST
+ depends on MACH_MT7623 || MACH_MT2701 || COMPILE_TEST
depends on OF
default MACH_MT2701
select PINCTRL_MTK
-config PINCTRL_MT7623
- bool "Mediatek MT7623 pin control"
- depends on MACH_MT7623 || COMPILE_TEST
- depends on OF
- default MACH_MT7623
- select PINCTRL_MTK_COMMON
-
config PINCTRL_MT8135
bool "Mediatek MT8135 pin control"
depends on MACH_MT8135 || COMPILE_TEST
diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
index 3e3390a14716..e59c613d4ddd 100644
--- a/drivers/pinctrl/mediatek/Makefile
+++ b/drivers/pinctrl/mediatek/Makefile
@@ -3,7 +3,6 @@ obj-y += pinctrl-mtk-common.o
# SoC Drivers
obj-$(CONFIG_PINCTRL_MT2701) += pinctrl-mt2701.o
-obj-$(CONFIG_PINCTRL_MT7623) += pinctrl-mt7623.o
obj-$(CONFIG_PINCTRL_MT8135) += pinctrl-mt8135.o
obj-$(CONFIG_PINCTRL_MT8127) += pinctrl-mt8127.o
obj-$(CONFIG_PINCTRL_MT8173) += pinctrl-mt8173.o
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt2701.c b/drivers/pinctrl/mediatek/pinctrl-mt2701.c
index 8d802fa7decd..f86f3b379607 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt2701.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt2701.c
@@ -565,6 +565,7 @@ static int mt2701_pinctrl_probe(struct platform_device *pdev)
static const struct of_device_id mt2701_pctrl_match[] = {
{ .compatible = "mediatek,mt2701-pinctrl", },
+ { .compatible = "mediatek,mt7623-pinctrl", },
{}
};
MODULE_DEVICE_TABLE(of, mt2701_pctrl_match);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7623.c b/drivers/pinctrl/mediatek/pinctrl-mt7623.c
deleted file mode 100644
index fa28dd6b871b..000000000000
--- a/drivers/pinctrl/mediatek/pinctrl-mt7623.c
+++ /dev/null
@@ -1,379 +0,0 @@
-/*
- * Copyright (c) 2016 John Crispin <[email protected]>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <dt-bindings/pinctrl/mt65xx.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/regmap.h>
-
-#include "pinctrl-mtk-common.h"
-#include "pinctrl-mtk-mt7623.h"
-
-static const struct mtk_drv_group_desc mt7623_drv_grp[] = {
- /* 0E4E8SR 4/8/12/16 */
- MTK_DRV_GRP(4, 16, 1, 2, 4),
- /* 0E2E4SR 2/4/6/8 */
- MTK_DRV_GRP(2, 8, 1, 2, 2),
- /* E8E4E2 2/4/6/8/10/12/14/16 */
- MTK_DRV_GRP(2, 16, 0, 2, 2)
-};
-
-#define DRV_SEL0 0xf50
-#define DRV_SEL1 0xf60
-#define DRV_SEL2 0xf70
-#define DRV_SEL3 0xf80
-#define DRV_SEL4 0xf90
-#define DRV_SEL5 0xfa0
-#define DRV_SEL6 0xfb0
-#define DRV_SEL7 0xfe0
-#define DRV_SEL8 0xfd0
-#define DRV_SEL9 0xff0
-#define DRV_SEL10 0xf00
-
-#define MSDC0_CTRL0 0xcc0
-#define MSDC0_CTRL1 0xcd0
-#define MSDC0_CTRL2 0xce0
-#define MSDC0_CTRL3 0xcf0
-#define MSDC0_CTRL4 0xd00
-#define MSDC0_CTRL5 0xd10
-#define MSDC0_CTRL6 0xd20
-#define MSDC1_CTRL0 0xd30
-#define MSDC1_CTRL1 0xd40
-#define MSDC1_CTRL2 0xd50
-#define MSDC1_CTRL3 0xd60
-#define MSDC1_CTRL4 0xd70
-#define MSDC1_CTRL5 0xd80
-#define MSDC1_CTRL6 0xd90
-
-#define IES_EN0 0xb20
-#define IES_EN1 0xb30
-#define IES_EN2 0xb40
-
-#define SMT_EN0 0xb50
-#define SMT_EN1 0xb60
-#define SMT_EN2 0xb70
-
-static const struct mtk_pin_drv_grp mt7623_pin_drv[] = {
- MTK_PIN_DRV_GRP(0, DRV_SEL0, 0, 1),
- MTK_PIN_DRV_GRP(1, DRV_SEL0, 0, 1),
- MTK_PIN_DRV_GRP(2, DRV_SEL0, 0, 1),
- MTK_PIN_DRV_GRP(3, DRV_SEL0, 0, 1),
- MTK_PIN_DRV_GRP(4, DRV_SEL0, 0, 1),
- MTK_PIN_DRV_GRP(5, DRV_SEL0, 0, 1),
- MTK_PIN_DRV_GRP(6, DRV_SEL0, 0, 1),
- MTK_PIN_DRV_GRP(7, DRV_SEL0, 4, 1),
- MTK_PIN_DRV_GRP(8, DRV_SEL0, 4, 1),
- MTK_PIN_DRV_GRP(9, DRV_SEL0, 4, 1),
- MTK_PIN_DRV_GRP(10, DRV_SEL0, 8, 1),
- MTK_PIN_DRV_GRP(11, DRV_SEL0, 8, 1),
- MTK_PIN_DRV_GRP(12, DRV_SEL0, 8, 1),
- MTK_PIN_DRV_GRP(13, DRV_SEL0, 8, 1),
- MTK_PIN_DRV_GRP(14, DRV_SEL0, 12, 0),
- MTK_PIN_DRV_GRP(15, DRV_SEL0, 12, 0),
- MTK_PIN_DRV_GRP(18, DRV_SEL1, 4, 0),
- MTK_PIN_DRV_GRP(19, DRV_SEL1, 4, 0),
- MTK_PIN_DRV_GRP(20, DRV_SEL1, 4, 0),
- MTK_PIN_DRV_GRP(21, DRV_SEL1, 4, 0),
- MTK_PIN_DRV_GRP(22, DRV_SEL1, 8, 0),
- MTK_PIN_DRV_GRP(23, DRV_SEL1, 8, 0),
- MTK_PIN_DRV_GRP(24, DRV_SEL1, 8, 0),
- MTK_PIN_DRV_GRP(25, DRV_SEL1, 8, 0),
- MTK_PIN_DRV_GRP(26, DRV_SEL1, 8, 0),
- MTK_PIN_DRV_GRP(27, DRV_SEL1, 12, 0),
- MTK_PIN_DRV_GRP(28, DRV_SEL1, 12, 0),
- MTK_PIN_DRV_GRP(29, DRV_SEL1, 12, 0),
- MTK_PIN_DRV_GRP(33, DRV_SEL2, 0, 0),
- MTK_PIN_DRV_GRP(34, DRV_SEL2, 0, 0),
- MTK_PIN_DRV_GRP(35, DRV_SEL2, 0, 0),
- MTK_PIN_DRV_GRP(36, DRV_SEL2, 0, 0),
- MTK_PIN_DRV_GRP(37, DRV_SEL2, 0, 0),
- MTK_PIN_DRV_GRP(39, DRV_SEL2, 8, 1),
- MTK_PIN_DRV_GRP(40, DRV_SEL2, 8, 1),
- MTK_PIN_DRV_GRP(41, DRV_SEL2, 8, 1),
- MTK_PIN_DRV_GRP(42, DRV_SEL2, 8, 1),
- MTK_PIN_DRV_GRP(43, DRV_SEL2, 12, 0),
- MTK_PIN_DRV_GRP(44, DRV_SEL2, 12, 0),
- MTK_PIN_DRV_GRP(45, DRV_SEL2, 12, 0),
- MTK_PIN_DRV_GRP(47, DRV_SEL3, 0, 0),
- MTK_PIN_DRV_GRP(48, DRV_SEL3, 0, 0),
- MTK_PIN_DRV_GRP(49, DRV_SEL3, 4, 0),
- MTK_PIN_DRV_GRP(53, DRV_SEL3, 12, 0),
- MTK_PIN_DRV_GRP(54, DRV_SEL3, 12, 0),
- MTK_PIN_DRV_GRP(55, DRV_SEL3, 12, 0),
- MTK_PIN_DRV_GRP(56, DRV_SEL3, 12, 0),
- MTK_PIN_DRV_GRP(60, DRV_SEL4, 8, 1),
- MTK_PIN_DRV_GRP(61, DRV_SEL4, 8, 1),
- MTK_PIN_DRV_GRP(62, DRV_SEL4, 8, 1),
- MTK_PIN_DRV_GRP(63, DRV_SEL4, 12, 1),
- MTK_PIN_DRV_GRP(64, DRV_SEL4, 12, 1),
- MTK_PIN_DRV_GRP(65, DRV_SEL4, 12, 1),
- MTK_PIN_DRV_GRP(66, DRV_SEL5, 0, 1),
- MTK_PIN_DRV_GRP(67, DRV_SEL5, 0, 1),
- MTK_PIN_DRV_GRP(68, DRV_SEL5, 0, 1),
- MTK_PIN_DRV_GRP(69, DRV_SEL5, 0, 1),
- MTK_PIN_DRV_GRP(70, DRV_SEL5, 0, 1),
- MTK_PIN_DRV_GRP(71, DRV_SEL5, 0, 1),
- MTK_PIN_DRV_GRP(72, DRV_SEL3, 4, 0),
- MTK_PIN_DRV_GRP(73, DRV_SEL3, 4, 0),
- MTK_PIN_DRV_GRP(74, DRV_SEL3, 4, 0),
- MTK_PIN_DRV_GRP(83, DRV_SEL5, 0, 1),
- MTK_PIN_DRV_GRP(84, DRV_SEL5, 0, 1),
- MTK_PIN_DRV_GRP(105, MSDC1_CTRL1, 0, 1),
- MTK_PIN_DRV_GRP(106, MSDC1_CTRL0, 0, 1),
- MTK_PIN_DRV_GRP(107, MSDC1_CTRL2, 0, 1),
- MTK_PIN_DRV_GRP(108, MSDC1_CTRL2, 0, 1),
- MTK_PIN_DRV_GRP(109, MSDC1_CTRL2, 0, 1),
- MTK_PIN_DRV_GRP(110, MSDC1_CTRL2, 0, 1),
- MTK_PIN_DRV_GRP(111, MSDC0_CTRL2, 0, 1),
- MTK_PIN_DRV_GRP(112, MSDC0_CTRL2, 0, 1),
- MTK_PIN_DRV_GRP(113, MSDC0_CTRL2, 0, 1),
- MTK_PIN_DRV_GRP(114, MSDC0_CTRL2, 0, 1),
- MTK_PIN_DRV_GRP(115, MSDC0_CTRL2, 0, 1),
- MTK_PIN_DRV_GRP(116, MSDC0_CTRL1, 0, 1),
- MTK_PIN_DRV_GRP(117, MSDC0_CTRL0, 0, 1),
- MTK_PIN_DRV_GRP(118, MSDC0_CTRL2, 0, 1),
- MTK_PIN_DRV_GRP(119, MSDC0_CTRL2, 0, 1),
- MTK_PIN_DRV_GRP(120, MSDC0_CTRL2, 0, 1),
- MTK_PIN_DRV_GRP(121, MSDC0_CTRL2, 0, 1),
- MTK_PIN_DRV_GRP(126, DRV_SEL3, 4, 0),
- MTK_PIN_DRV_GRP(199, DRV_SEL0, 4, 1),
- MTK_PIN_DRV_GRP(200, DRV_SEL8, 0, 0),
- MTK_PIN_DRV_GRP(201, DRV_SEL8, 0, 0),
- MTK_PIN_DRV_GRP(203, DRV_SEL8, 4, 0),
- MTK_PIN_DRV_GRP(204, DRV_SEL8, 4, 0),
- MTK_PIN_DRV_GRP(205, DRV_SEL8, 4, 0),
- MTK_PIN_DRV_GRP(206, DRV_SEL8, 4, 0),
- MTK_PIN_DRV_GRP(207, DRV_SEL8, 4, 0),
- MTK_PIN_DRV_GRP(208, DRV_SEL8, 8, 0),
- MTK_PIN_DRV_GRP(209, DRV_SEL8, 8, 0),
- MTK_PIN_DRV_GRP(236, DRV_SEL9, 4, 0),
- MTK_PIN_DRV_GRP(237, DRV_SEL9, 4, 0),
- MTK_PIN_DRV_GRP(238, DRV_SEL9, 4, 0),
- MTK_PIN_DRV_GRP(239, DRV_SEL9, 4, 0),
- MTK_PIN_DRV_GRP(240, DRV_SEL9, 4, 0),
- MTK_PIN_DRV_GRP(241, DRV_SEL9, 4, 0),
- MTK_PIN_DRV_GRP(242, DRV_SEL9, 8, 0),
- MTK_PIN_DRV_GRP(243, DRV_SEL9, 8, 0),
- MTK_PIN_DRV_GRP(257, MSDC0_CTRL2, 0, 1),
- MTK_PIN_DRV_GRP(261, MSDC1_CTRL2, 0, 1),
- MTK_PIN_DRV_GRP(262, DRV_SEL10, 8, 0),
- MTK_PIN_DRV_GRP(263, DRV_SEL10, 8, 0),
- MTK_PIN_DRV_GRP(264, DRV_SEL10, 8, 0),
- MTK_PIN_DRV_GRP(265, DRV_SEL10, 8, 0),
- MTK_PIN_DRV_GRP(266, DRV_SEL10, 8, 0),
- MTK_PIN_DRV_GRP(267, DRV_SEL10, 8, 0),
- MTK_PIN_DRV_GRP(268, DRV_SEL10, 8, 0),
- MTK_PIN_DRV_GRP(269, DRV_SEL10, 8, 0),
- MTK_PIN_DRV_GRP(270, DRV_SEL10, 8, 0),
- MTK_PIN_DRV_GRP(271, DRV_SEL10, 8, 0),
- MTK_PIN_DRV_GRP(272, DRV_SEL10, 8, 0),
- MTK_PIN_DRV_GRP(274, DRV_SEL10, 8, 0),
- MTK_PIN_DRV_GRP(275, DRV_SEL10, 8, 0),
- MTK_PIN_DRV_GRP(276, DRV_SEL10, 8, 0),
- MTK_PIN_DRV_GRP(278, DRV_SEL2, 8, 1),
-};
-
-static const struct mtk_pin_spec_pupd_set_samereg mt7623_spec_pupd[] = {
- MTK_PIN_PUPD_SPEC_SR(105, MSDC1_CTRL1, 8, 9, 10),
- MTK_PIN_PUPD_SPEC_SR(106, MSDC1_CTRL0, 8, 9, 10),
- MTK_PIN_PUPD_SPEC_SR(107, MSDC1_CTRL3, 0, 1, 2),
- MTK_PIN_PUPD_SPEC_SR(108, MSDC1_CTRL3, 4, 5, 6),
- MTK_PIN_PUPD_SPEC_SR(109, MSDC1_CTRL3, 8, 9, 10),
- MTK_PIN_PUPD_SPEC_SR(110, MSDC1_CTRL3, 12, 13, 14),
- MTK_PIN_PUPD_SPEC_SR(111, MSDC0_CTRL4, 12, 13, 14),
- MTK_PIN_PUPD_SPEC_SR(112, MSDC0_CTRL4, 8, 9, 10),
- MTK_PIN_PUPD_SPEC_SR(113, MSDC0_CTRL4, 4, 5, 6),
- MTK_PIN_PUPD_SPEC_SR(114, MSDC0_CTRL4, 0, 1, 2),
- MTK_PIN_PUPD_SPEC_SR(115, MSDC0_CTRL5, 0, 1, 2),
- MTK_PIN_PUPD_SPEC_SR(116, MSDC0_CTRL1, 8, 9, 10),
- MTK_PIN_PUPD_SPEC_SR(117, MSDC0_CTRL0, 8, 9, 10),
- MTK_PIN_PUPD_SPEC_SR(118, MSDC0_CTRL3, 12, 13, 14),
- MTK_PIN_PUPD_SPEC_SR(119, MSDC0_CTRL3, 8, 9, 10),
- MTK_PIN_PUPD_SPEC_SR(120, MSDC0_CTRL3, 4, 5, 6),
- MTK_PIN_PUPD_SPEC_SR(121, MSDC0_CTRL3, 0, 1, 2),
-};
-
-static int mt7623_spec_pull_set(struct regmap *regmap, unsigned int pin,
- unsigned char align, bool isup, unsigned int r1r0)
-{
- return mtk_pctrl_spec_pull_set_samereg(regmap, mt7623_spec_pupd,
- ARRAY_SIZE(mt7623_spec_pupd), pin, align, isup, r1r0);
-}
-
-static const struct mtk_pin_ies_smt_set mt7623_ies_set[] = {
- MTK_PIN_IES_SMT_SPEC(0, 6, IES_EN0, 0),
- MTK_PIN_IES_SMT_SPEC(7, 9, IES_EN0, 1),
- MTK_PIN_IES_SMT_SPEC(10, 13, IES_EN0, 2),
- MTK_PIN_IES_SMT_SPEC(14, 15, IES_EN0, 3),
- MTK_PIN_IES_SMT_SPEC(18, 21, IES_EN0, 5),
- MTK_PIN_IES_SMT_SPEC(22, 26, IES_EN0, 6),
- MTK_PIN_IES_SMT_SPEC(27, 29, IES_EN0, 7),
- MTK_PIN_IES_SMT_SPEC(33, 37, IES_EN0, 8),
- MTK_PIN_IES_SMT_SPEC(39, 42, IES_EN0, 9),
- MTK_PIN_IES_SMT_SPEC(43, 45, IES_EN0, 10),
- MTK_PIN_IES_SMT_SPEC(47, 48, IES_EN0, 11),
- MTK_PIN_IES_SMT_SPEC(49, 49, IES_EN0, 12),
- MTK_PIN_IES_SMT_SPEC(53, 56, IES_EN0, 14),
- MTK_PIN_IES_SMT_SPEC(60, 62, IES_EN1, 0),
- MTK_PIN_IES_SMT_SPEC(63, 65, IES_EN1, 1),
- MTK_PIN_IES_SMT_SPEC(66, 71, IES_EN1, 2),
- MTK_PIN_IES_SMT_SPEC(72, 74, IES_EN0, 12),
- MTK_PIN_IES_SMT_SPEC(75, 76, IES_EN1, 3),
- MTK_PIN_IES_SMT_SPEC(83, 84, IES_EN1, 2),
- MTK_PIN_IES_SMT_SPEC(105, 121, MSDC1_CTRL1, 4),
- MTK_PIN_IES_SMT_SPEC(122, 125, IES_EN1, 7),
- MTK_PIN_IES_SMT_SPEC(126, 126, IES_EN0, 12),
- MTK_PIN_IES_SMT_SPEC(199, 201, IES_EN0, 1),
- MTK_PIN_IES_SMT_SPEC(203, 207, IES_EN2, 2),
- MTK_PIN_IES_SMT_SPEC(208, 209, IES_EN2, 3),
- MTK_PIN_IES_SMT_SPEC(236, 241, IES_EN2, 6),
- MTK_PIN_IES_SMT_SPEC(242, 243, IES_EN2, 7),
- MTK_PIN_IES_SMT_SPEC(261, 261, MSDC1_CTRL2, 4),
- MTK_PIN_IES_SMT_SPEC(262, 272, IES_EN2, 12),
- MTK_PIN_IES_SMT_SPEC(274, 276, IES_EN2, 12),
- MTK_PIN_IES_SMT_SPEC(278, 278, IES_EN2, 13),
-};
-
-static const struct mtk_pin_ies_smt_set mt7623_smt_set[] = {
- MTK_PIN_IES_SMT_SPEC(0, 6, SMT_EN0, 0),
- MTK_PIN_IES_SMT_SPEC(7, 9, SMT_EN0, 1),
- MTK_PIN_IES_SMT_SPEC(10, 13, SMT_EN0, 2),
- MTK_PIN_IES_SMT_SPEC(14, 15, SMT_EN0, 3),
- MTK_PIN_IES_SMT_SPEC(18, 21, SMT_EN0, 5),
- MTK_PIN_IES_SMT_SPEC(22, 26, SMT_EN0, 6),
- MTK_PIN_IES_SMT_SPEC(27, 29, SMT_EN0, 7),
- MTK_PIN_IES_SMT_SPEC(33, 37, SMT_EN0, 8),
- MTK_PIN_IES_SMT_SPEC(39, 42, SMT_EN0, 9),
- MTK_PIN_IES_SMT_SPEC(43, 45, SMT_EN0, 10),
- MTK_PIN_IES_SMT_SPEC(47, 48, SMT_EN0, 11),
- MTK_PIN_IES_SMT_SPEC(49, 49, SMT_EN0, 12),
- MTK_PIN_IES_SMT_SPEC(53, 56, SMT_EN0, 14),
- MTK_PIN_IES_SMT_SPEC(60, 62, SMT_EN1, 0),
- MTK_PIN_IES_SMT_SPEC(63, 65, SMT_EN1, 1),
- MTK_PIN_IES_SMT_SPEC(66, 71, SMT_EN1, 2),
- MTK_PIN_IES_SMT_SPEC(72, 74, SMT_EN0, 12),
- MTK_PIN_IES_SMT_SPEC(75, 76, SMT_EN1, 3),
- MTK_PIN_IES_SMT_SPEC(83, 84, SMT_EN1, 2),
- MTK_PIN_IES_SMT_SPEC(105, 106, MSDC1_CTRL1, 11),
- MTK_PIN_IES_SMT_SPEC(107, 107, MSDC1_CTRL3, 3),
- MTK_PIN_IES_SMT_SPEC(108, 108, MSDC1_CTRL3, 7),
- MTK_PIN_IES_SMT_SPEC(109, 109, MSDC1_CTRL3, 11),
- MTK_PIN_IES_SMT_SPEC(110, 111, MSDC1_CTRL3, 15),
- MTK_PIN_IES_SMT_SPEC(112, 112, MSDC0_CTRL4, 11),
- MTK_PIN_IES_SMT_SPEC(113, 113, MSDC0_CTRL4, 7),
- MTK_PIN_IES_SMT_SPEC(114, 115, MSDC0_CTRL4, 3),
- MTK_PIN_IES_SMT_SPEC(116, 117, MSDC0_CTRL1, 11),
- MTK_PIN_IES_SMT_SPEC(118, 118, MSDC0_CTRL3, 15),
- MTK_PIN_IES_SMT_SPEC(119, 119, MSDC0_CTRL3, 11),
- MTK_PIN_IES_SMT_SPEC(120, 120, MSDC0_CTRL3, 7),
- MTK_PIN_IES_SMT_SPEC(121, 121, MSDC0_CTRL3, 3),
- MTK_PIN_IES_SMT_SPEC(122, 125, SMT_EN1, 7),
- MTK_PIN_IES_SMT_SPEC(126, 126, SMT_EN0, 12),
- MTK_PIN_IES_SMT_SPEC(199, 201, SMT_EN0, 1),
- MTK_PIN_IES_SMT_SPEC(203, 207, SMT_EN2, 2),
- MTK_PIN_IES_SMT_SPEC(208, 209, SMT_EN2, 3),
- MTK_PIN_IES_SMT_SPEC(236, 241, SMT_EN2, 6),
- MTK_PIN_IES_SMT_SPEC(242, 243, SMT_EN2, 7),
- MTK_PIN_IES_SMT_SPEC(261, 261, MSDC1_CTRL6, 3),
- MTK_PIN_IES_SMT_SPEC(262, 272, SMT_EN2, 12),
- MTK_PIN_IES_SMT_SPEC(274, 276, SMT_EN2, 12),
- MTK_PIN_IES_SMT_SPEC(278, 278, SMT_EN2, 13),
-};
-
-static int mt7623_ies_smt_set(struct regmap *regmap, unsigned int pin,
- unsigned char align, int value, enum pin_config_param arg)
-{
- if (arg == PIN_CONFIG_INPUT_ENABLE)
- return mtk_pconf_spec_set_ies_smt_range(regmap, mt7623_ies_set,
- ARRAY_SIZE(mt7623_ies_set), pin, align, value);
- else if (arg == PIN_CONFIG_INPUT_SCHMITT_ENABLE)
- return mtk_pconf_spec_set_ies_smt_range(regmap, mt7623_smt_set,
- ARRAY_SIZE(mt7623_smt_set), pin, align, value);
- return -EINVAL;
-}
-
-static const struct mtk_pinctrl_devdata mt7623_pinctrl_data = {
- .pins = mtk_pins_mt7623,
- .npins = ARRAY_SIZE(mtk_pins_mt7623),
- .grp_desc = mt7623_drv_grp,
- .n_grp_cls = ARRAY_SIZE(mt7623_drv_grp),
- .pin_drv_grp = mt7623_pin_drv,
- .n_pin_drv_grps = ARRAY_SIZE(mt7623_pin_drv),
- .spec_pull_set = mt7623_spec_pull_set,
- .spec_ies_smt_set = mt7623_ies_smt_set,
- .dir_offset = 0x0000,
- .pullen_offset = 0x0150,
- .pullsel_offset = 0x0280,
- .dout_offset = 0x0500,
- .din_offset = 0x0630,
- .pinmux_offset = 0x0760,
- .type1_start = 280,
- .type1_end = 280,
- .port_shf = 4,
- .port_mask = 0x1f,
- .port_align = 4,
- .eint_offsets = {
- .name = "mt7623_eint",
- .stat = 0x000,
- .ack = 0x040,
- .mask = 0x080,
- .mask_set = 0x0c0,
- .mask_clr = 0x100,
- .sens = 0x140,
- .sens_set = 0x180,
- .sens_clr = 0x1c0,
- .soft = 0x200,
- .soft_set = 0x240,
- .soft_clr = 0x280,
- .pol = 0x300,
- .pol_set = 0x340,
- .pol_clr = 0x380,
- .dom_en = 0x400,
- .dbnc_ctrl = 0x500,
- .dbnc_set = 0x600,
- .dbnc_clr = 0x700,
- .port_mask = 6,
- .ports = 6,
- },
- .ap_num = 169,
- .db_cnt = 16,
-};
-
-static int mt7623_pinctrl_probe(struct platform_device *pdev)
-{
- return mtk_pctrl_init(pdev, &mt7623_pinctrl_data, NULL);
-}
-
-static const struct of_device_id mt7623_pctrl_match[] = {
- { .compatible = "mediatek,mt7623-pinctrl", },
- {}
-};
-MODULE_DEVICE_TABLE(of, mt7623_pctrl_match);
-
-static struct platform_driver mtk_pinctrl_driver = {
- .probe = mt7623_pinctrl_probe,
- .driver = {
- .name = "mediatek-mt7623-pinctrl",
- .of_match_table = mt7623_pctrl_match,
- },
-};
-
-static int __init mtk_pinctrl_init(void)
-{
- return platform_driver_register(&mtk_pinctrl_driver);
-}
-
-arch_initcall(mtk_pinctrl_init);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt7623.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt7623.h
deleted file mode 100644
index e06cfc40da0f..000000000000
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-mt7623.h
+++ /dev/null
@@ -1,1936 +0,0 @@
-/*
- * Copyright (c) 2016 John Crispin <[email protected]>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __PINCTRL_MTK_MT7623_H
-#define __PINCTRL_MTK_MT7623_H
-
-#include <linux/pinctrl/pinctrl.h>
-#include "pinctrl-mtk-common.h"
-
-static const struct mtk_desc_pin mtk_pins_mt7623[] = {
- MTK_PIN(
- PINCTRL_PIN(0, "PWRAP_SPI0_MI"),
- "J20", "mt7623",
- MTK_EINT_FUNCTION(0, 148),
- MTK_FUNCTION(0, "GPIO0"),
- MTK_FUNCTION(1, "PWRAP_SPIDO"),
- MTK_FUNCTION(2, "PWRAP_SPIDI")
- ),
- MTK_PIN(
- PINCTRL_PIN(1, "PWRAP_SPI0_MO"),
- "D10", "mt7623",
- MTK_EINT_FUNCTION(0, 149),
- MTK_FUNCTION(0, "GPIO1"),
- MTK_FUNCTION(1, "PWRAP_SPIDI"),
- MTK_FUNCTION(2, "PWRAP_SPIDO")
- ),
- MTK_PIN(
- PINCTRL_PIN(2, "PWRAP_INT"),
- "E11", "mt7623",
- MTK_EINT_FUNCTION(0, 150),
- MTK_FUNCTION(0, "GPIO2"),
- MTK_FUNCTION(1, "PWRAP_INT")
- ),
- MTK_PIN(
- PINCTRL_PIN(3, "PWRAP_SPI0_CK"),
- "H12", "mt7623",
- MTK_EINT_FUNCTION(0, 151),
- MTK_FUNCTION(0, "GPIO3"),
- MTK_FUNCTION(1, "PWRAP_SPICK_I")
- ),
- MTK_PIN(
- PINCTRL_PIN(4, "PWRAP_SPI0_CSN"),
- "E12", "mt7623",
- MTK_EINT_FUNCTION(0, 152),
- MTK_FUNCTION(0, "GPIO4"),
- MTK_FUNCTION(1, "PWRAP_SPICS_B_I")
- ),
- MTK_PIN(
- PINCTRL_PIN(5, "PWRAP_SPI0_CK2"),
- "H11", "mt7623",
- MTK_EINT_FUNCTION(0, 155),
- MTK_FUNCTION(0, "GPIO5"),
- MTK_FUNCTION(1, "PWRAP_SPICK2_I")
- ),
- MTK_PIN(
- PINCTRL_PIN(6, "PWRAP_SPI0_CSN2"),
- "G11", "mt7623",
- MTK_EINT_FUNCTION(0, 156),
- MTK_FUNCTION(0, "GPIO6"),
- MTK_FUNCTION(1, "PWRAP_SPICS2_B_I")
- ),
- MTK_PIN(
- PINCTRL_PIN(7, "SPI1_CSN"),
- "G19", "mt7623",
- MTK_EINT_FUNCTION(0, 153),
- MTK_FUNCTION(0, "GPIO7"),
- MTK_FUNCTION(1, "SPI1_CS")
- ),
- MTK_PIN(
- PINCTRL_PIN(8, "SPI1_MI"),
- "F19", "mt7623",
- MTK_EINT_FUNCTION(0, 154),
- MTK_FUNCTION(0, "GPIO8"),
- MTK_FUNCTION(1, "SPI1_MI"),
- MTK_FUNCTION(2, "SPI1_MO")
- ),
- MTK_PIN(
- PINCTRL_PIN(9, "SPI1_MO"),
- "G20", "mt7623",
- MTK_EINT_FUNCTION(0, 157),
- MTK_FUNCTION(0, "GPIO9"),
- MTK_FUNCTION(1, "SPI1_MO"),
- MTK_FUNCTION(2, "SPI1_MI")
- ),
- MTK_PIN(
- PINCTRL_PIN(10, "RTC32K_CK"),
- "A13", "mt7623",
- MTK_EINT_FUNCTION(0, 158),
- MTK_FUNCTION(0, "GPIO10"),
- MTK_FUNCTION(1, "RTC32K_CK")
- ),
- MTK_PIN(
- PINCTRL_PIN(11, "WATCHDOG"),
- "D14", "mt7623",
- MTK_EINT_FUNCTION(0, 159),
- MTK_FUNCTION(0, "GPIO11"),
- MTK_FUNCTION(1, "WATCHDOG")
- ),
- MTK_PIN(
- PINCTRL_PIN(12, "SRCLKENA"),
- "C13", "mt7623",
- MTK_EINT_FUNCTION(0, 169),
- MTK_FUNCTION(0, "GPIO12"),
- MTK_FUNCTION(1, "SRCLKENA")
- ),
- MTK_PIN(
- PINCTRL_PIN(13, "SRCLKENAI"),
- "B13", "mt7623",
- MTK_EINT_FUNCTION(0, 161),
- MTK_FUNCTION(0, "GPIO13"),
- MTK_FUNCTION(1, "SRCLKENAI")
- ),
- MTK_PIN(
- PINCTRL_PIN(14, "GPIO14"),
- "E18", "mt7623",
- MTK_EINT_FUNCTION(0, 162),
- MTK_FUNCTION(0, "GPIO14"),
- MTK_FUNCTION(1, "URXD2"),
- MTK_FUNCTION(2, "UTXD2")
- ),
- MTK_PIN(
- PINCTRL_PIN(15, "GPIO15"),
- "E17", "mt7623",
- MTK_EINT_FUNCTION(0, 163),
- MTK_FUNCTION(0, "GPIO15"),
- MTK_FUNCTION(1, "UTXD2"),
- MTK_FUNCTION(2, "URXD2")
- ),
- MTK_PIN(
- PINCTRL_PIN(16, "GPIO16"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO16")
- ),
- MTK_PIN(
- PINCTRL_PIN(17, "GPIO17"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO17")
- ),
- MTK_PIN(
- PINCTRL_PIN(18, "PCM_CLK"),
- "C19", "mt7623",
- MTK_EINT_FUNCTION(0, 166),
- MTK_FUNCTION(0, "GPIO18"),
- MTK_FUNCTION(1, "PCM_CLK0"),
- MTK_FUNCTION(6, "AP_PCM_CLKO")
- ),
- MTK_PIN(
- PINCTRL_PIN(19, "PCM_SYNC"),
- "D19", "mt7623",
- MTK_EINT_FUNCTION(0, 167),
- MTK_FUNCTION(0, "GPIO19"),
- MTK_FUNCTION(1, "PCM_SYNC"),
- MTK_FUNCTION(6, "AP_PCM_SYNC")
- ),
- MTK_PIN(
- PINCTRL_PIN(20, "PCM_RX"),
- "D18", "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO20"),
- MTK_FUNCTION(1, "PCM_RX"),
- MTK_FUNCTION(4, "PCM_TX"),
- MTK_FUNCTION(6, "AP_PCM_RX")
- ),
- MTK_PIN(
- PINCTRL_PIN(21, "PCM_TX"),
- "C18", "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO21"),
- MTK_FUNCTION(1, "PCM_TX"),
- MTK_FUNCTION(4, "PCM_RX"),
- MTK_FUNCTION(6, "AP_PCM_TX")
- ),
- MTK_PIN(
- PINCTRL_PIN(22, "EINT0"),
- "H15", "mt7623",
- MTK_EINT_FUNCTION(0, 0),
- MTK_FUNCTION(0, "GPIO22"),
- MTK_FUNCTION(1, "UCTS0"),
- MTK_FUNCTION(2, "PCIE0_PERST_N")
- ),
- MTK_PIN(
- PINCTRL_PIN(23, "EINT1"),
- "J16", "mt7623",
- MTK_EINT_FUNCTION(0, 1),
- MTK_FUNCTION(0, "GPIO23"),
- MTK_FUNCTION(1, "URTS0"),
- MTK_FUNCTION(2, "PCIE1_PERST_N")
- ),
- MTK_PIN(
- PINCTRL_PIN(24, "EINT2"),
- "H16", "mt7623",
- MTK_EINT_FUNCTION(0, 2),
- MTK_FUNCTION(0, "GPIO24"),
- MTK_FUNCTION(1, "UCTS1"),
- MTK_FUNCTION(2, "PCIE2_PERST_N")
- ),
- MTK_PIN(
- PINCTRL_PIN(25, "EINT3"),
- "K15", "mt7623",
- MTK_EINT_FUNCTION(0, 3),
- MTK_FUNCTION(0, "GPIO25"),
- MTK_FUNCTION(1, "URTS1")
- ),
- MTK_PIN(
- PINCTRL_PIN(26, "EINT4"),
- "G15", "mt7623",
- MTK_EINT_FUNCTION(0, 4),
- MTK_FUNCTION(0, "GPIO26"),
- MTK_FUNCTION(1, "UCTS3"),
- MTK_FUNCTION(6, "PCIE2_WAKE_N")
- ),
- MTK_PIN(
- PINCTRL_PIN(27, "EINT5"),
- "F15", "mt7623",
- MTK_EINT_FUNCTION(0, 5),
- MTK_FUNCTION(0, "GPIO27"),
- MTK_FUNCTION(1, "URTS3"),
- MTK_FUNCTION(6, "PCIE1_WAKE_N")
- ),
- MTK_PIN(
- PINCTRL_PIN(28, "EINT6"),
- "J15", "mt7623",
- MTK_EINT_FUNCTION(0, 6),
- MTK_FUNCTION(0, "GPIO28"),
- MTK_FUNCTION(1, "DRV_VBUS"),
- MTK_FUNCTION(6, "PCIE0_WAKE_N")
- ),
- MTK_PIN(
- PINCTRL_PIN(29, "EINT7"),
- "E15", "mt7623",
- MTK_EINT_FUNCTION(0, 7),
- MTK_FUNCTION(0, "GPIO29"),
- MTK_FUNCTION(1, "IDDIG"),
- MTK_FUNCTION(2, "MSDC1_WP"),
- MTK_FUNCTION(6, "PCIE2_PERST_N")
- ),
- MTK_PIN(
- PINCTRL_PIN(30, "GPIO30"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO30")
- ),
- MTK_PIN(
- PINCTRL_PIN(31, "GPIO31"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO31")
- ),
- MTK_PIN(
- PINCTRL_PIN(32, "GPIO32"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO32")
- ),
- MTK_PIN(
- PINCTRL_PIN(33, "I2S1_DATA"),
- "Y18", "mt7623",
- MTK_EINT_FUNCTION(0, 15),
- MTK_FUNCTION(0, "GPIO33"),
- MTK_FUNCTION(1, "I2S1_DATA"),
- MTK_FUNCTION(3, "PCM_TX"),
- MTK_FUNCTION(6, "AP_PCM_TX")
- ),
- MTK_PIN(
- PINCTRL_PIN(34, "I2S1_DATA_IN"),
- "Y17", "mt7623",
- MTK_EINT_FUNCTION(0, 16),
- MTK_FUNCTION(0, "GPIO34"),
- MTK_FUNCTION(1, "I2S1_DATA_IN"),
- MTK_FUNCTION(3, "PCM_RX"),
- MTK_FUNCTION(6, "AP_PCM_RX")
- ),
- MTK_PIN(
- PINCTRL_PIN(35, "I2S1_BCK"),
- "V17", "mt7623",
- MTK_EINT_FUNCTION(0, 17),
- MTK_FUNCTION(0, "GPIO35"),
- MTK_FUNCTION(1, "I2S1_BCK"),
- MTK_FUNCTION(3, "PCM_CLK0"),
- MTK_FUNCTION(6, "AP_PCM_CLKO")
- ),
- MTK_PIN(
- PINCTRL_PIN(36, "I2S1_LRCK"),
- "W17", "mt7623",
- MTK_EINT_FUNCTION(0, 18),
- MTK_FUNCTION(0, "GPIO36"),
- MTK_FUNCTION(1, "I2S1_LRCK"),
- MTK_FUNCTION(3, "PCM_SYNC"),
- MTK_FUNCTION(6, "AP_PCM_SYNC")
- ),
- MTK_PIN(
- PINCTRL_PIN(37, "I2S1_MCLK"),
- "AA18", "mt7623",
- MTK_EINT_FUNCTION(0, 19),
- MTK_FUNCTION(0, "GPIO37"),
- MTK_FUNCTION(1, "I2S1_MCLK")
- ),
- MTK_PIN(
- PINCTRL_PIN(38, "GPIO38"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO38")
- ),
- MTK_PIN(
- PINCTRL_PIN(39, "JTMS"),
- "G21", "mt7623",
- MTK_EINT_FUNCTION(0, 21),
- MTK_FUNCTION(0, "GPIO39"),
- MTK_FUNCTION(1, "JTMS")
- ),
- MTK_PIN(
- PINCTRL_PIN(40, "GPIO40"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO40")
- ),
- MTK_PIN(
- PINCTRL_PIN(41, "JTDI"),
- "H22", "mt7623",
- MTK_EINT_FUNCTION(0, 23),
- MTK_FUNCTION(0, "GPIO41"),
- MTK_FUNCTION(1, "JTDI")
- ),
- MTK_PIN(
- PINCTRL_PIN(42, "JTDO"),
- "H21", "mt7623",
- MTK_EINT_FUNCTION(0, 24),
- MTK_FUNCTION(0, "GPIO42"),
- MTK_FUNCTION(1, "JTDO")
- ),
- MTK_PIN(
- PINCTRL_PIN(43, "NCLE"),
- "C7", "mt7623",
- MTK_EINT_FUNCTION(0, 25),
- MTK_FUNCTION(0, "GPIO43"),
- MTK_FUNCTION(1, "NCLE"),
- MTK_FUNCTION(2, "EXT_XCS2")
- ),
- MTK_PIN(
- PINCTRL_PIN(44, "NCEB1"),
- "C6", "mt7623",
- MTK_EINT_FUNCTION(0, 26),
- MTK_FUNCTION(0, "GPIO44"),
- MTK_FUNCTION(1, "NCEB1"),
- MTK_FUNCTION(2, "IDDIG")
- ),
- MTK_PIN(
- PINCTRL_PIN(45, "NCEB0"),
- "D7", "mt7623",
- MTK_EINT_FUNCTION(0, 27),
- MTK_FUNCTION(0, "GPIO45"),
- MTK_FUNCTION(1, "NCEB0"),
- MTK_FUNCTION(2, "DRV_VBUS")
- ),
- MTK_PIN(
- PINCTRL_PIN(46, "IR"),
- "D15", "mt7623",
- MTK_EINT_FUNCTION(0, 28),
- MTK_FUNCTION(0, "GPIO46"),
- MTK_FUNCTION(1, "IR")
- ),
- MTK_PIN(
- PINCTRL_PIN(47, "NREB"),
- "A6", "mt7623",
- MTK_EINT_FUNCTION(0, 29),
- MTK_FUNCTION(0, "GPIO47"),
- MTK_FUNCTION(1, "NREB")
- ),
- MTK_PIN(
- PINCTRL_PIN(48, "NRNB"),
- "B6", "mt7623",
- MTK_EINT_FUNCTION(0, 30),
- MTK_FUNCTION(0, "GPIO48"),
- MTK_FUNCTION(1, "NRNB")
- ),
- MTK_PIN(
- PINCTRL_PIN(49, "I2S0_DATA"),
- "AB18", "mt7623",
- MTK_EINT_FUNCTION(0, 31),
- MTK_FUNCTION(0, "GPIO49"),
- MTK_FUNCTION(1, "I2S0_DATA"),
- MTK_FUNCTION(3, "PCM_TX"),
- MTK_FUNCTION(6, "AP_I2S_DO")
- ),
- MTK_PIN(
- PINCTRL_PIN(50, "GPIO50"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO50")
- ),
- MTK_PIN(
- PINCTRL_PIN(51, "GPIO51"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO51")
- ),
- MTK_PIN(
- PINCTRL_PIN(52, "GPIO52"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO52")
- ),
- MTK_PIN(
- PINCTRL_PIN(53, "SPI0_CSN"),
- "E7", "mt7623",
- MTK_EINT_FUNCTION(0, 35),
- MTK_FUNCTION(0, "GPIO53"),
- MTK_FUNCTION(1, "SPI0_CS"),
- MTK_FUNCTION(5, "PWM1")
- ),
- MTK_PIN(
- PINCTRL_PIN(54, "SPI0_CK"),
- "F7", "mt7623",
- MTK_EINT_FUNCTION(0, 36),
- MTK_FUNCTION(0, "GPIO54"),
- MTK_FUNCTION(1, "SPI0_CK")
- ),
- MTK_PIN(
- PINCTRL_PIN(55, "SPI0_MI"),
- "E6", "mt7623",
- MTK_EINT_FUNCTION(0, 37),
- MTK_FUNCTION(0, "GPIO55"),
- MTK_FUNCTION(1, "SPI0_MI"),
- MTK_FUNCTION(2, "SPI0_MO"),
- MTK_FUNCTION(3, "MSDC1_WP"),
- MTK_FUNCTION(5, "PWM2")
- ),
- MTK_PIN(
- PINCTRL_PIN(56, "SPI0_MO"),
- "G7", "mt7623",
- MTK_EINT_FUNCTION(0, 38),
- MTK_FUNCTION(0, "GPIO56"),
- MTK_FUNCTION(1, "SPI0_MO"),
- MTK_FUNCTION(2, "SPI0_MI")
- ),
- MTK_PIN(
- PINCTRL_PIN(57, "GPIO57"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO57")
- ),
- MTK_PIN(
- PINCTRL_PIN(58, "GPIO58"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO58")
- ),
- MTK_PIN(
- PINCTRL_PIN(59, "GPIO59"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO59")
- ),
- MTK_PIN(
- PINCTRL_PIN(60, "WB_RSTB"),
- "Y21", "mt7623",
- MTK_EINT_FUNCTION(0, 41),
- MTK_FUNCTION(0, "GPIO60"),
- MTK_FUNCTION(1, "WB_RSTB")
- ),
- MTK_PIN(
- PINCTRL_PIN(61, "GPIO61"),
- "AA21", "mt7623",
- MTK_EINT_FUNCTION(0, 42),
- MTK_FUNCTION(0, "GPIO61"),
- MTK_FUNCTION(1, "TEST_FD")
- ),
- MTK_PIN(
- PINCTRL_PIN(62, "GPIO62"),
- "AB22", "mt7623",
- MTK_EINT_FUNCTION(0, 43),
- MTK_FUNCTION(0, "GPIO62"),
- MTK_FUNCTION(1, "TEST_FC")
- ),
- MTK_PIN(
- PINCTRL_PIN(63, "WB_SCLK"),
- "AC23", "mt7623",
- MTK_EINT_FUNCTION(0, 44),
- MTK_FUNCTION(0, "GPIO63"),
- MTK_FUNCTION(1, "WB_SCLK")
- ),
- MTK_PIN(
- PINCTRL_PIN(64, "WB_SDATA"),
- "AB21", "mt7623",
- MTK_EINT_FUNCTION(0, 45),
- MTK_FUNCTION(0, "GPIO64"),
- MTK_FUNCTION(1, "WB_SDATA")
- ),
- MTK_PIN(
- PINCTRL_PIN(65, "WB_SEN"),
- "AB24", "mt7623",
- MTK_EINT_FUNCTION(0, 46),
- MTK_FUNCTION(0, "GPIO65"),
- MTK_FUNCTION(1, "WB_SEN")
- ),
- MTK_PIN(
- PINCTRL_PIN(66, "WB_CRTL0"),
- "AB20", "mt7623",
- MTK_EINT_FUNCTION(0, 47),
- MTK_FUNCTION(0, "GPIO66"),
- MTK_FUNCTION(1, "WB_CRTL0")
- ),
- MTK_PIN(
- PINCTRL_PIN(67, "WB_CRTL1"),
- "AC20", "mt7623",
- MTK_EINT_FUNCTION(0, 48),
- MTK_FUNCTION(0, "GPIO67"),
- MTK_FUNCTION(1, "WB_CRTL1")
- ),
- MTK_PIN(
- PINCTRL_PIN(68, "WB_CRTL2"),
- "AB19", "mt7623",
- MTK_EINT_FUNCTION(0, 49),
- MTK_FUNCTION(0, "GPIO68"),
- MTK_FUNCTION(1, "WB_CRTL2")
- ),
- MTK_PIN(
- PINCTRL_PIN(69, "WB_CRTL3"),
- "AC19", "mt7623",
- MTK_EINT_FUNCTION(0, 50),
- MTK_FUNCTION(0, "GPIO69"),
- MTK_FUNCTION(1, "WB_CRTL3")
- ),
- MTK_PIN(
- PINCTRL_PIN(70, "WB_CRTL4"),
- "AD19", "mt7623",
- MTK_EINT_FUNCTION(0, 51),
- MTK_FUNCTION(0, "GPIO70"),
- MTK_FUNCTION(1, "WB_CRTL4")
- ),
- MTK_PIN(
- PINCTRL_PIN(71, "WB_CRTL5"),
- "AE19", "mt7623",
- MTK_EINT_FUNCTION(0, 52),
- MTK_FUNCTION(0, "GPIO71"),
- MTK_FUNCTION(1, "WB_CRTL5")
- ),
- MTK_PIN(
- PINCTRL_PIN(72, "I2S0_DATA_IN"),
- "AA20", "mt7623",
- MTK_EINT_FUNCTION(0, 53),
- MTK_FUNCTION(0, "GPIO72"),
- MTK_FUNCTION(1, "I2S0_DATA_IN"),
- MTK_FUNCTION(3, "PCM_RX"),
- MTK_FUNCTION(4, "PWM0"),
- MTK_FUNCTION(5, "DISP_PWM"),
- MTK_FUNCTION(6, "AP_I2S_DI")
- ),
- MTK_PIN(
- PINCTRL_PIN(73, "I2S0_LRCK"),
- "Y20", "mt7623",
- MTK_EINT_FUNCTION(0, 54),
- MTK_FUNCTION(0, "GPIO73"),
- MTK_FUNCTION(1, "I2S0_LRCK"),
- MTK_FUNCTION(3, "PCM_SYNC"),
- MTK_FUNCTION(6, "AP_I2S_LRCK")
- ),
- MTK_PIN(
- PINCTRL_PIN(74, "I2S0_BCK"),
- "Y19", "mt7623",
- MTK_EINT_FUNCTION(0, 55),
- MTK_FUNCTION(0, "GPIO74"),
- MTK_FUNCTION(1, "I2S0_BCK"),
- MTK_FUNCTION(3, "PCM_CLK0"),
- MTK_FUNCTION(6, "AP_I2S_BCK")
- ),
- MTK_PIN(
- PINCTRL_PIN(75, "SDA0"),
- "K19", "mt7623",
- MTK_EINT_FUNCTION(0, 56),
- MTK_FUNCTION(0, "GPIO75"),
- MTK_FUNCTION(1, "SDA0")
- ),
- MTK_PIN(
- PINCTRL_PIN(76, "SCL0"),
- "K20", "mt7623",
- MTK_EINT_FUNCTION(0, 57),
- MTK_FUNCTION(0, "GPIO76"),
- MTK_FUNCTION(1, "SCL0")
- ),
- MTK_PIN(
- PINCTRL_PIN(77, "GPIO77"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO77")
- ),
- MTK_PIN(
- PINCTRL_PIN(78, "GPIO78"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO78")
- ),
- MTK_PIN(
- PINCTRL_PIN(79, "GPIO79"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO79")
- ),
- MTK_PIN(
- PINCTRL_PIN(80, "GPIO80"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO80")
- ),
- MTK_PIN(
- PINCTRL_PIN(81, "GPIO81"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO81")
- ),
- MTK_PIN(
- PINCTRL_PIN(82, "GPIO82"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO82")
- ),
- MTK_PIN(
- PINCTRL_PIN(83, "LCM_RST"),
- "V16", "mt7623",
- MTK_EINT_FUNCTION(0, 64),
- MTK_FUNCTION(0, "GPIO83"),
- MTK_FUNCTION(1, "LCM_RST")
- ),
- MTK_PIN(
- PINCTRL_PIN(84, "DSI_TE"),
- "V14", "mt7623",
- MTK_EINT_FUNCTION(0, 65),
- MTK_FUNCTION(0, "GPIO84"),
- MTK_FUNCTION(1, "DSI_TE")
- ),
- MTK_PIN(
- PINCTRL_PIN(85, "GPIO85"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO85")
- ),
- MTK_PIN(
- PINCTRL_PIN(86, "GPIO86"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO86")
- ),
- MTK_PIN(
- PINCTRL_PIN(87, "GPIO87"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO87")
- ),
- MTK_PIN(
- PINCTRL_PIN(88, "GPIO88"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO88")
- ),
- MTK_PIN(
- PINCTRL_PIN(89, "GPIO89"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO89")
- ),
- MTK_PIN(
- PINCTRL_PIN(90, "GPIO90"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO90")
- ),
- MTK_PIN(
- PINCTRL_PIN(91, "GPIO91"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO91")
- ),
- MTK_PIN(
- PINCTRL_PIN(92, "GPIO92"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO92")
- ),
- MTK_PIN(
- PINCTRL_PIN(93, "GPIO93"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO93")
- ),
- MTK_PIN(
- PINCTRL_PIN(94, "GPIO94"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO94")
- ),
- MTK_PIN(
- PINCTRL_PIN(95, "MIPI_TCN"),
- "AB14", "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO95"),
- MTK_FUNCTION(1, "TCN")
- ),
- MTK_PIN(
- PINCTRL_PIN(96, "MIPI_TCP"),
- "AC14", "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO96"),
- MTK_FUNCTION(1, "TCP")
- ),
- MTK_PIN(
- PINCTRL_PIN(97, "MIPI_TDN1"),
- "AE15", "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO97"),
- MTK_FUNCTION(1, "TDN1")
- ),
- MTK_PIN(
- PINCTRL_PIN(98, "MIPI_TDP1"),
- "AD15", "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO98"),
- MTK_FUNCTION(1, "TDP1")
- ),
- MTK_PIN(
- PINCTRL_PIN(99, "MIPI_TDN0"),
- "AB15", "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO99"),
- MTK_FUNCTION(1, "TDN0")
- ),
- MTK_PIN(
- PINCTRL_PIN(100, "MIPI_TDP0"),
- "AC15", "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO100"),
- MTK_FUNCTION(1, "TDP0")
- ),
- MTK_PIN(
- PINCTRL_PIN(101, "GPIO101"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO101")
- ),
- MTK_PIN(
- PINCTRL_PIN(102, "GPIO102"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO102")
- ),
- MTK_PIN(
- PINCTRL_PIN(103, "GPIO103"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO103")
- ),
- MTK_PIN(
- PINCTRL_PIN(104, "GPIO104"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO104")
- ),
- MTK_PIN(
- PINCTRL_PIN(105, "MSDC1_CMD"),
- "AD2", "mt7623",
- MTK_EINT_FUNCTION(0, 78),
- MTK_FUNCTION(0, "GPIO105"),
- MTK_FUNCTION(1, "MSDC1_CMD"),
- MTK_FUNCTION(3, "SDA1"),
- MTK_FUNCTION(6, "I2SOUT_BCK")
- ),
- MTK_PIN(
- PINCTRL_PIN(106, "MSDC1_CLK"),
- "AD3", "mt7623",
- MTK_EINT_FUNCTION(0, 79),
- MTK_FUNCTION(0, "GPIO106"),
- MTK_FUNCTION(1, "MSDC1_CLK"),
- MTK_FUNCTION(3, "SCL1"),
- MTK_FUNCTION(6, "I2SOUT_LRCK")
- ),
- MTK_PIN(
- PINCTRL_PIN(107, "MSDC1_DAT0"),
- "AE2", "mt7623",
- MTK_EINT_FUNCTION(0, 80),
- MTK_FUNCTION(0, "GPIO107"),
- MTK_FUNCTION(1, "MSDC1_DAT0"),
- MTK_FUNCTION(5, "UTXD0"),
- MTK_FUNCTION(6, "I2SOUT_DATA_OUT")
- ),
- MTK_PIN(
- PINCTRL_PIN(108, "MSDC1_DAT1"),
- "AC1", "mt7623",
- MTK_EINT_FUNCTION(0, 81),
- MTK_FUNCTION(0, "GPIO108"),
- MTK_FUNCTION(1, "MSDC1_DAT1"),
- MTK_FUNCTION(3, "PWM0"),
- MTK_FUNCTION(5, "URXD0"),
- MTK_FUNCTION(6, "PWM1")
- ),
- MTK_PIN(
- PINCTRL_PIN(109, "MSDC1_DAT2"),
- "AC3", "mt7623",
- MTK_EINT_FUNCTION(0, 82),
- MTK_FUNCTION(0, "GPIO109"),
- MTK_FUNCTION(1, "MSDC1_DAT2"),
- MTK_FUNCTION(3, "SDA2"),
- MTK_FUNCTION(5, "UTXD1"),
- MTK_FUNCTION(6, "PWM2")
- ),
- MTK_PIN(
- PINCTRL_PIN(110, "MSDC1_DAT3"),
- "AC4", "mt7623",
- MTK_EINT_FUNCTION(0, 83),
- MTK_FUNCTION(0, "GPIO110"),
- MTK_FUNCTION(1, "MSDC1_DAT3"),
- MTK_FUNCTION(3, "SCL2"),
- MTK_FUNCTION(5, "URXD1"),
- MTK_FUNCTION(6, "PWM3")
- ),
- MTK_PIN(
- PINCTRL_PIN(111, "MSDC0_DAT7"),
- "A2", "mt7623",
- MTK_EINT_FUNCTION(0, 84),
- MTK_FUNCTION(0, "GPIO111"),
- MTK_FUNCTION(1, "MSDC0_DAT7"),
- MTK_FUNCTION(4, "NLD7")
- ),
- MTK_PIN(
- PINCTRL_PIN(112, "MSDC0_DAT6"),
- "B3", "mt7623",
- MTK_EINT_FUNCTION(0, 85),
- MTK_FUNCTION(0, "GPIO112"),
- MTK_FUNCTION(1, "MSDC0_DAT6"),
- MTK_FUNCTION(4, "NLD6")
- ),
- MTK_PIN(
- PINCTRL_PIN(113, "MSDC0_DAT5"),
- "C4", "mt7623",
- MTK_EINT_FUNCTION(0, 86),
- MTK_FUNCTION(0, "GPIO113"),
- MTK_FUNCTION(1, "MSDC0_DAT5"),
- MTK_FUNCTION(4, "NLD5")
- ),
- MTK_PIN(
- PINCTRL_PIN(114, "MSDC0_DAT4"),
- "A4", "mt7623",
- MTK_EINT_FUNCTION(0, 87),
- MTK_FUNCTION(0, "GPIO114"),
- MTK_FUNCTION(1, "MSDC0_DAT4"),
- MTK_FUNCTION(4, "NLD4")
- ),
- MTK_PIN(
- PINCTRL_PIN(115, "MSDC0_RSTB"),
- "C5", "mt7623",
- MTK_EINT_FUNCTION(0, 88),
- MTK_FUNCTION(0, "GPIO115"),
- MTK_FUNCTION(1, "MSDC0_RSTB"),
- MTK_FUNCTION(4, "NLD8")
- ),
- MTK_PIN(
- PINCTRL_PIN(116, "MSDC0_CMD"),
- "D5", "mt7623",
- MTK_EINT_FUNCTION(0, 89),
- MTK_FUNCTION(0, "GPIO116"),
- MTK_FUNCTION(1, "MSDC0_CMD"),
- MTK_FUNCTION(4, "NALE")
- ),
- MTK_PIN(
- PINCTRL_PIN(117, "MSDC0_CLK"),
- "B1", "mt7623",
- MTK_EINT_FUNCTION(0, 90),
- MTK_FUNCTION(0, "GPIO117"),
- MTK_FUNCTION(1, "MSDC0_CLK"),
- MTK_FUNCTION(4, "NWEB")
- ),
- MTK_PIN(
- PINCTRL_PIN(118, "MSDC0_DAT3"),
- "D6", "mt7623",
- MTK_EINT_FUNCTION(0, 91),
- MTK_FUNCTION(0, "GPIO118"),
- MTK_FUNCTION(1, "MSDC0_DAT3"),
- MTK_FUNCTION(4, "NLD3")
- ),
- MTK_PIN(
- PINCTRL_PIN(119, "MSDC0_DAT2"),
- "B2", "mt7623",
- MTK_EINT_FUNCTION(0, 92),
- MTK_FUNCTION(0, "GPIO119"),
- MTK_FUNCTION(1, "MSDC0_DAT2"),
- MTK_FUNCTION(4, "NLD2")
- ),
- MTK_PIN(
- PINCTRL_PIN(120, "MSDC0_DAT1"),
- "A3", "mt7623",
- MTK_EINT_FUNCTION(0, 93),
- MTK_FUNCTION(0, "GPIO120"),
- MTK_FUNCTION(1, "MSDC0_DAT1"),
- MTK_FUNCTION(4, "NLD1")
- ),
- MTK_PIN(
- PINCTRL_PIN(121, "MSDC0_DAT0"),
- "B4", "mt7623",
- MTK_EINT_FUNCTION(0, 94),
- MTK_FUNCTION(0, "GPIO121"),
- MTK_FUNCTION(1, "MSDC0_DAT0"),
- MTK_FUNCTION(4, "NLD0"),
- MTK_FUNCTION(5, "WATCHDOG")
- ),
- MTK_PIN(
- PINCTRL_PIN(122, "GPIO122"),
- "H17", "mt7623",
- MTK_EINT_FUNCTION(0, 95),
- MTK_FUNCTION(0, "GPIO122"),
- MTK_FUNCTION(1, "TEST"),
- MTK_FUNCTION(4, "SDA2"),
- MTK_FUNCTION(5, "URXD0")
- ),
- MTK_PIN(
- PINCTRL_PIN(123, "GPIO123"),
- "F17", "mt7623",
- MTK_EINT_FUNCTION(0, 96),
- MTK_FUNCTION(0, "GPIO123"),
- MTK_FUNCTION(1, "TEST"),
- MTK_FUNCTION(4, "SCL2"),
- MTK_FUNCTION(5, "UTXD0")
- ),
- MTK_PIN(
- PINCTRL_PIN(124, "GPIO124"),
- "H18", "mt7623",
- MTK_EINT_FUNCTION(0, 97),
- MTK_FUNCTION(0, "GPIO124"),
- MTK_FUNCTION(1, "TEST"),
- MTK_FUNCTION(4, "SDA1"),
- MTK_FUNCTION(5, "PWM3")
- ),
- MTK_PIN(
- PINCTRL_PIN(125, "GPIO125"),
- "G17", "mt7623",
- MTK_EINT_FUNCTION(0, 98),
- MTK_FUNCTION(0, "GPIO125"),
- MTK_FUNCTION(1, "TEST"),
- MTK_FUNCTION(4, "SCL1"),
- MTK_FUNCTION(5, "PWM4")
- ),
- MTK_PIN(
- PINCTRL_PIN(126, "I2S0_MCLK"),
- "AA19", "mt7623",
- MTK_EINT_FUNCTION(0, 99),
- MTK_FUNCTION(0, "GPIO126"),
- MTK_FUNCTION(1, "I2S0_MCLK"),
- MTK_FUNCTION(6, "AP_I2S_MCLK")
- ),
- MTK_PIN(
- PINCTRL_PIN(127, "GPIO127"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO127")
- ),
- MTK_PIN(
- PINCTRL_PIN(128, "GPIO128"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO128")
- ),
- MTK_PIN(
- PINCTRL_PIN(129, "GPIO129"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO129")
- ),
- MTK_PIN(
- PINCTRL_PIN(130, "GPIO130"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO130")
- ),
- MTK_PIN(
- PINCTRL_PIN(131, "GPIO131"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO131")
- ),
- MTK_PIN(
- PINCTRL_PIN(132, "GPIO132"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO132")
- ),
- MTK_PIN(
- PINCTRL_PIN(133, "GPIO133"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO133")
- ),
- MTK_PIN(
- PINCTRL_PIN(134, "GPIO134"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO134")
- ),
- MTK_PIN(
- PINCTRL_PIN(135, "GPIO135"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO135")
- ),
- MTK_PIN(
- PINCTRL_PIN(136, "GPIO136"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO136")
- ),
- MTK_PIN(
- PINCTRL_PIN(137, "GPIO137"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO137")
- ),
- MTK_PIN(
- PINCTRL_PIN(138, "GPIO138"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO138")
- ),
- MTK_PIN(
- PINCTRL_PIN(139, "GPIO139"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO139")
- ),
- MTK_PIN(
- PINCTRL_PIN(140, "GPIO140"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO140")
- ),
- MTK_PIN(
- PINCTRL_PIN(141, "GPIO141"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO141")
- ),
- MTK_PIN(
- PINCTRL_PIN(142, "GPIO142"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO142")
- ),
- MTK_PIN(
- PINCTRL_PIN(143, "GPIO143"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO143")
- ),
- MTK_PIN(
- PINCTRL_PIN(144, "GPIO144"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO144")
- ),
- MTK_PIN(
- PINCTRL_PIN(145, "GPIO145"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO145")
- ),
- MTK_PIN(
- PINCTRL_PIN(146, "GPIO146"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO146")
- ),
- MTK_PIN(
- PINCTRL_PIN(147, "GPIO147"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO147")
- ),
- MTK_PIN(
- PINCTRL_PIN(148, "GPIO148"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO148")
- ),
- MTK_PIN(
- PINCTRL_PIN(149, "GPIO149"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO149")
- ),
- MTK_PIN(
- PINCTRL_PIN(150, "GPIO150"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO150")
- ),
- MTK_PIN(
- PINCTRL_PIN(151, "GPIO151"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO151")
- ),
- MTK_PIN(
- PINCTRL_PIN(152, "GPIO152"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO152")
- ),
- MTK_PIN(
- PINCTRL_PIN(153, "GPIO153"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO153")
- ),
- MTK_PIN(
- PINCTRL_PIN(154, "GPIO154"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO154")
- ),
- MTK_PIN(
- PINCTRL_PIN(155, "GPIO155"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO155")
- ),
- MTK_PIN(
- PINCTRL_PIN(156, "GPIO156"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO156")
- ),
- MTK_PIN(
- PINCTRL_PIN(157, "GPIO157"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO157")
- ),
- MTK_PIN(
- PINCTRL_PIN(158, "GPIO158"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO158")
- ),
- MTK_PIN(
- PINCTRL_PIN(159, "GPIO159"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO159")
- ),
- MTK_PIN(
- PINCTRL_PIN(160, "GPIO160"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO160")
- ),
- MTK_PIN(
- PINCTRL_PIN(161, "GPIO161"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO161")
- ),
- MTK_PIN(
- PINCTRL_PIN(162, "GPIO162"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO162")
- ),
- MTK_PIN(
- PINCTRL_PIN(163, "GPIO163"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO163")
- ),
- MTK_PIN(
- PINCTRL_PIN(164, "GPIO164"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO164")
- ),
- MTK_PIN(
- PINCTRL_PIN(165, "GPIO165"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO165")
- ),
- MTK_PIN(
- PINCTRL_PIN(166, "GPIO166"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO166")
- ),
- MTK_PIN(
- PINCTRL_PIN(167, "GPIO167"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO167")
- ),
- MTK_PIN(
- PINCTRL_PIN(168, "GPIO168"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO168")
- ),
- MTK_PIN(
- PINCTRL_PIN(169, "GPIO169"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO169")
- ),
- MTK_PIN(
- PINCTRL_PIN(170, "GPIO170"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO170")
- ),
- MTK_PIN(
- PINCTRL_PIN(171, "GPIO171"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO171")
- ),
- MTK_PIN(
- PINCTRL_PIN(172, "GPIO172"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO172")
- ),
- MTK_PIN(
- PINCTRL_PIN(173, "GPIO173"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO173")
- ),
- MTK_PIN(
- PINCTRL_PIN(174, "GPIO174"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO174")
- ),
- MTK_PIN(
- PINCTRL_PIN(175, "GPIO175"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO175")
- ),
- MTK_PIN(
- PINCTRL_PIN(176, "GPIO176"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO176")
- ),
- MTK_PIN(
- PINCTRL_PIN(177, "GPIO177"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO177")
- ),
- MTK_PIN(
- PINCTRL_PIN(178, "GPIO178"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO178")
- ),
- MTK_PIN(
- PINCTRL_PIN(179, "GPIO179"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO179")
- ),
- MTK_PIN(
- PINCTRL_PIN(180, "GPIO180"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO180")
- ),
- MTK_PIN(
- PINCTRL_PIN(181, "GPIO181"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO181")
- ),
- MTK_PIN(
- PINCTRL_PIN(182, "GPIO182"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO182")
- ),
- MTK_PIN(
- PINCTRL_PIN(183, "GPIO183"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO183")
- ),
- MTK_PIN(
- PINCTRL_PIN(184, "GPIO184"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO184")
- ),
- MTK_PIN(
- PINCTRL_PIN(185, "GPIO185"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO185")
- ),
- MTK_PIN(
- PINCTRL_PIN(186, "GPIO186"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO186")
- ),
- MTK_PIN(
- PINCTRL_PIN(187, "GPIO187"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO187")
- ),
- MTK_PIN(
- PINCTRL_PIN(188, "GPIO188"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO188")
- ),
- MTK_PIN(
- PINCTRL_PIN(189, "GPIO189"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO189")
- ),
- MTK_PIN(
- PINCTRL_PIN(190, "GPIO190"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO190")
- ),
- MTK_PIN(
- PINCTRL_PIN(191, "GPIO191"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO191")
- ),
- MTK_PIN(
- PINCTRL_PIN(192, "GPIO192"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO192")
- ),
- MTK_PIN(
- PINCTRL_PIN(193, "GPIO193"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO193")
- ),
- MTK_PIN(
- PINCTRL_PIN(194, "GPIO194"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO194")
- ),
- MTK_PIN(
- PINCTRL_PIN(195, "GPIO195"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO195")
- ),
- MTK_PIN(
- PINCTRL_PIN(196, "GPIO196"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO196")
- ),
- MTK_PIN(
- PINCTRL_PIN(197, "GPIO197"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO197")
- ),
- MTK_PIN(
- PINCTRL_PIN(198, "GPIO198"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO198")
- ),
- MTK_PIN(
- PINCTRL_PIN(199, "SPI1_CK"),
- "E19", "mt7623",
- MTK_EINT_FUNCTION(0, 111),
- MTK_FUNCTION(0, "GPIO199"),
- MTK_FUNCTION(1, "SPI1_CK")
- ),
- MTK_PIN(
- PINCTRL_PIN(200, "URXD2"),
- "K18", "mt7623",
- MTK_EINT_FUNCTION(0, 112),
- MTK_FUNCTION(0, "GPIO200"),
- MTK_FUNCTION(6, "URXD2")
- ),
- MTK_PIN(
- PINCTRL_PIN(201, "UTXD2"),
- "L18", "mt7623",
- MTK_EINT_FUNCTION(0, 113),
- MTK_FUNCTION(0, "GPIO201"),
- MTK_FUNCTION(6, "UTXD2")
- ),
- MTK_PIN(
- PINCTRL_PIN(202, "GPIO202"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO202")
- ),
- MTK_PIN(
- PINCTRL_PIN(203, "PWM0"),
- "AA16", "mt7623",
- MTK_EINT_FUNCTION(0, 115),
- MTK_FUNCTION(0, "GPIO203"),
- MTK_FUNCTION(1, "PWM0"),
- MTK_FUNCTION(2, "DISP_PWM")
- ),
- MTK_PIN(
- PINCTRL_PIN(204, "PWM1"),
- "Y16", "mt7623",
- MTK_EINT_FUNCTION(0, 116),
- MTK_FUNCTION(0, "GPIO204"),
- MTK_FUNCTION(1, "PWM1")
- ),
- MTK_PIN(
- PINCTRL_PIN(205, "PWM2"),
- "AA15", "mt7623",
- MTK_EINT_FUNCTION(0, 117),
- MTK_FUNCTION(0, "GPIO205"),
- MTK_FUNCTION(1, "PWM2")
- ),
- MTK_PIN(
- PINCTRL_PIN(206, "PWM3"),
- "AA17", "mt7623",
- MTK_EINT_FUNCTION(0, 118),
- MTK_FUNCTION(0, "GPIO206"),
- MTK_FUNCTION(1, "PWM3")
- ),
- MTK_PIN(
- PINCTRL_PIN(207, "PWM4"),
- "Y15", "mt7623",
- MTK_EINT_FUNCTION(0, 119),
- MTK_FUNCTION(0, "GPIO207"),
- MTK_FUNCTION(1, "PWM4")
- ),
- MTK_PIN(
- PINCTRL_PIN(208, "AUD_EXT_CK1"),
- "W14", "mt7623",
- MTK_EINT_FUNCTION(0, 120),
- MTK_FUNCTION(0, "GPIO208"),
- MTK_FUNCTION(1, "AUD_EXT_CK1"),
- MTK_FUNCTION(2, "PWM0"),
- MTK_FUNCTION(3, "PCIE0_PERST_N"),
- MTK_FUNCTION(5, "DISP_PWM")
- ),
- MTK_PIN(
- PINCTRL_PIN(209, "AUD_EXT_CK2"),
- "V15", "mt7623",
- MTK_EINT_FUNCTION(0, 121),
- MTK_FUNCTION(0, "GPIO209"),
- MTK_FUNCTION(1, "AUD_EXT_CK2"),
- MTK_FUNCTION(2, "MSDC1_WP"),
- MTK_FUNCTION(3, "PCIE1_PERST_N"),
- MTK_FUNCTION(5, "PWM1")
- ),
- MTK_PIN(
- PINCTRL_PIN(210, "GPIO210"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO210")
- ),
- MTK_PIN(
- PINCTRL_PIN(211, "GPIO211"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO211")
- ),
- MTK_PIN(
- PINCTRL_PIN(212, "GPIO212"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO212")
- ),
- MTK_PIN(
- PINCTRL_PIN(213, "GPIO213"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO213")
- ),
- MTK_PIN(
- PINCTRL_PIN(214, "GPIO214"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO214")
- ),
- MTK_PIN(
- PINCTRL_PIN(215, "GPIO215"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO215")
- ),
- MTK_PIN(
- PINCTRL_PIN(216, "GPIO216"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO216")
- ),
- MTK_PIN(
- PINCTRL_PIN(217, "GPIO217"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO217")
- ),
- MTK_PIN(
- PINCTRL_PIN(218, "GPIO218"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO218")
- ),
- MTK_PIN(
- PINCTRL_PIN(219, "GPIO219"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO219")
- ),
- MTK_PIN(
- PINCTRL_PIN(220, "GPIO220"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO220")
- ),
- MTK_PIN(
- PINCTRL_PIN(221, "GPIO221"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO221")
- ),
- MTK_PIN(
- PINCTRL_PIN(222, "GPIO222"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO222")
- ),
- MTK_PIN(
- PINCTRL_PIN(223, "GPIO223"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO223")
- ),
- MTK_PIN(
- PINCTRL_PIN(224, "GPIO224"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO224")
- ),
- MTK_PIN(
- PINCTRL_PIN(225, "GPIO225"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO225")
- ),
- MTK_PIN(
- PINCTRL_PIN(226, "GPIO226"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO226")
- ),
- MTK_PIN(
- PINCTRL_PIN(227, "GPIO227"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO227")
- ),
- MTK_PIN(
- PINCTRL_PIN(228, "GPIO228"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO228")
- ),
- MTK_PIN(
- PINCTRL_PIN(229, "GPIO229"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO229")
- ),
- MTK_PIN(
- PINCTRL_PIN(230, "GPIO230"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO230")
- ),
- MTK_PIN(
- PINCTRL_PIN(231, "GPIO231"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO231")
- ),
- MTK_PIN(
- PINCTRL_PIN(232, "GPIO232"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO232")
- ),
- MTK_PIN(
- PINCTRL_PIN(233, "GPIO233"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO233")
- ),
- MTK_PIN(
- PINCTRL_PIN(234, "GPIO234"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO234")
- ),
- MTK_PIN(
- PINCTRL_PIN(235, "GPIO235"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO235")
- ),
- MTK_PIN(
- PINCTRL_PIN(236, "EXT_SDIO3"),
- "A8", "mt7623",
- MTK_EINT_FUNCTION(0, 122),
- MTK_FUNCTION(0, "GPIO236"),
- MTK_FUNCTION(1, "EXT_SDIO3"),
- MTK_FUNCTION(2, "IDDIG")
- ),
- MTK_PIN(
- PINCTRL_PIN(237, "EXT_SDIO2"),
- "D8", "mt7623",
- MTK_EINT_FUNCTION(0, 123),
- MTK_FUNCTION(0, "GPIO237"),
- MTK_FUNCTION(1, "EXT_SDIO2"),
- MTK_FUNCTION(2, "DRV_VBUS")
- ),
- MTK_PIN(
- PINCTRL_PIN(238, "EXT_SDIO1"),
- "D9", "mt7623",
- MTK_EINT_FUNCTION(0, 124),
- MTK_FUNCTION(0, "GPIO238"),
- MTK_FUNCTION(1, "EXT_SDIO1")
- ),
- MTK_PIN(
- PINCTRL_PIN(239, "EXT_SDIO0"),
- "B8", "mt7623",
- MTK_EINT_FUNCTION(0, 125),
- MTK_FUNCTION(0, "GPIO239"),
- MTK_FUNCTION(1, "EXT_SDIO0")
- ),
- MTK_PIN(
- PINCTRL_PIN(240, "EXT_XCS"),
- "C9", "mt7623",
- MTK_EINT_FUNCTION(0, 126),
- MTK_FUNCTION(0, "GPIO240"),
- MTK_FUNCTION(1, "EXT_XCS")
- ),
- MTK_PIN(
- PINCTRL_PIN(241, "EXT_SCK"),
- "C8", "mt7623",
- MTK_EINT_FUNCTION(0, 127),
- MTK_FUNCTION(0, "GPIO241"),
- MTK_FUNCTION(1, "EXT_SCK")
- ),
- MTK_PIN(
- PINCTRL_PIN(242, "URTS2"),
- "G18", "mt7623",
- MTK_EINT_FUNCTION(0, 128),
- MTK_FUNCTION(0, "GPIO242"),
- MTK_FUNCTION(1, "URTS2"),
- MTK_FUNCTION(2, "UTXD3"),
- MTK_FUNCTION(3, "URXD3"),
- MTK_FUNCTION(4, "SCL1")
- ),
- MTK_PIN(
- PINCTRL_PIN(243, "UCTS2"),
- "H19", "mt7623",
- MTK_EINT_FUNCTION(0, 129),
- MTK_FUNCTION(0, "GPIO243"),
- MTK_FUNCTION(1, "UCTS2"),
- MTK_FUNCTION(2, "URXD3"),
- MTK_FUNCTION(3, "UTXD3"),
- MTK_FUNCTION(4, "SDA1")
- ),
- MTK_PIN(
- PINCTRL_PIN(244, "GPIO244"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO244")
- ),
- MTK_PIN(
- PINCTRL_PIN(245, "GPIO245"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO245")
- ),
- MTK_PIN(
- PINCTRL_PIN(246, "GPIO246"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO246")
- ),
- MTK_PIN(
- PINCTRL_PIN(247, "GPIO247"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO247")
- ),
- MTK_PIN(
- PINCTRL_PIN(248, "GPIO248"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO248")
- ),
- MTK_PIN(
- PINCTRL_PIN(249, "GPIO249"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO249")
- ),
- MTK_PIN(
- PINCTRL_PIN(250, "GPIO250"),
- "A15", "mt7623",
- MTK_EINT_FUNCTION(0, 135),
- MTK_FUNCTION(0, "GPIO250"),
- MTK_FUNCTION(1, "TEST_MD7"),
- MTK_FUNCTION(6, "PCIE0_CLKREQ_N")
- ),
- MTK_PIN(
- PINCTRL_PIN(251, "GPIO251"),
- "B15", "mt7623",
- MTK_EINT_FUNCTION(0, 136),
- MTK_FUNCTION(0, "GPIO251"),
- MTK_FUNCTION(1, "TEST_MD6"),
- MTK_FUNCTION(6, "PCIE0_WAKE_N")
- ),
- MTK_PIN(
- PINCTRL_PIN(252, "GPIO252"),
- "C16", "mt7623",
- MTK_EINT_FUNCTION(0, 137),
- MTK_FUNCTION(0, "GPIO252"),
- MTK_FUNCTION(1, "TEST_MD5"),
- MTK_FUNCTION(6, "PCIE1_CLKREQ_N")
- ),
- MTK_PIN(
- PINCTRL_PIN(253, "GPIO253"),
- "D17", "mt7623",
- MTK_EINT_FUNCTION(0, 138),
- MTK_FUNCTION(0, "GPIO253"),
- MTK_FUNCTION(1, "TEST_MD4"),
- MTK_FUNCTION(6, "PCIE1_WAKE_N")
- ),
- MTK_PIN(
- PINCTRL_PIN(254, "GPIO254"),
- "D16", "mt7623",
- MTK_EINT_FUNCTION(0, 139),
- MTK_FUNCTION(0, "GPIO254"),
- MTK_FUNCTION(1, "TEST_MD3"),
- MTK_FUNCTION(6, "PCIE2_CLKREQ_N")
- ),
- MTK_PIN(
- PINCTRL_PIN(255, "GPIO255"),
- "C17", "mt7623",
- MTK_EINT_FUNCTION(0, 140),
- MTK_FUNCTION(0, "GPIO255"),
- MTK_FUNCTION(1, "TEST_MD2"),
- MTK_FUNCTION(6, "PCIE2_WAKE_N")
- ),
- MTK_PIN(
- PINCTRL_PIN(256, "GPIO256"),
- "B17", "mt7623",
- MTK_EINT_FUNCTION(0, 141),
- MTK_FUNCTION(0, "GPIO256"),
- MTK_FUNCTION(1, "TEST_MD1")
- ),
- MTK_PIN(
- PINCTRL_PIN(257, "GPIO257"),
- "C15", "mt7623",
- MTK_EINT_FUNCTION(0, 142),
- MTK_FUNCTION(0, "GPIO257"),
- MTK_FUNCTION(1, "TEST_MD0")
- ),
- MTK_PIN(
- PINCTRL_PIN(258, "GPIO258"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO258")
- ),
- MTK_PIN(
- PINCTRL_PIN(259, "GPIO259"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO259")
- ),
- MTK_PIN(
- PINCTRL_PIN(260, "GPIO260"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO260")
- ),
- MTK_PIN(
- PINCTRL_PIN(261, "MSDC1_INS"),
- "AD1", "mt7623",
- MTK_EINT_FUNCTION(0, 146),
- MTK_FUNCTION(0, "GPIO261"),
- MTK_FUNCTION(1, "MSDC1_INS")
- ),
- MTK_PIN(
- PINCTRL_PIN(262, "G2_TXEN"),
- "A23", "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO262"),
- MTK_FUNCTION(1, "G2_TXEN")
- ),
- MTK_PIN(
- PINCTRL_PIN(263, "G2_TXD3"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO263"),
- MTK_FUNCTION(1, "G2_TXD3")
- ),
- MTK_PIN(
- PINCTRL_PIN(264, "G2_TXD2"),
- "C24", "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO264"),
- MTK_FUNCTION(1, "G2_TXD2")
- ),
- MTK_PIN(
- PINCTRL_PIN(265, "G2_TXD1"),
- "B25", "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO265"),
- MTK_FUNCTION(1, "G2_TXD1")
- ),
- MTK_PIN(
- PINCTRL_PIN(266, "G2_TXD0"),
- "A24", "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO266"),
- MTK_FUNCTION(1, "G2_TXD0")
- ),
- MTK_PIN(
- PINCTRL_PIN(267, "G2_TXCLK"),
- "C23", "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO267"),
- MTK_FUNCTION(1, "G2_TXC")
- ),
- MTK_PIN(
- PINCTRL_PIN(268, "G2_RXCLK"),
- "B23", "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO268"),
- MTK_FUNCTION(1, "G2_RXC")
- ),
- MTK_PIN(
- PINCTRL_PIN(269, "G2_RXD0"),
- "D21", "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO269"),
- MTK_FUNCTION(1, "G2_RXD0")
- ),
- MTK_PIN(
- PINCTRL_PIN(270, "G2_RXD1"),
- "B22", "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO270"),
- MTK_FUNCTION(1, "G2_RXD1")
- ),
- MTK_PIN(
- PINCTRL_PIN(271, "G2_RXD2"),
- "A22", "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO271"),
- MTK_FUNCTION(1, "G2_RXD2")
- ),
- MTK_PIN(
- PINCTRL_PIN(272, "G2_RXD3"),
- "C22", "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO272"),
- MTK_FUNCTION(1, "G2_RXD3")
- ),
- MTK_PIN(
- PINCTRL_PIN(273, "GPIO273"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO273")
- ),
- MTK_PIN(
- PINCTRL_PIN(274, "G2_RXDV"),
- "C21", "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO274"),
- MTK_FUNCTION(1, "G2_RXDV")
- ),
- MTK_PIN(
- PINCTRL_PIN(275, "G2_MDC"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO275"),
- MTK_FUNCTION(1, "MDC")
- ),
- MTK_PIN(
- PINCTRL_PIN(276, "G2_MDIO"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO276"),
- MTK_FUNCTION(1, "MDIO")
- ),
- MTK_PIN(
- PINCTRL_PIN(277, "GPIO277"),
- NULL, "mt7623",
- MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT),
- MTK_FUNCTION(0, "GPIO277")
- ),
- MTK_PIN(
- PINCTRL_PIN(278, "JTAG_RESET"),
- "H20", "mt7623",
- MTK_EINT_FUNCTION(0, 147),
- MTK_FUNCTION(0, "GPIO278"),
- MTK_FUNCTION(1, "JTAG_RESET")
- ),
-};
-
-#endif /* __PINCTRL_MTK_MT7623_H */
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index 9b00be15d258..7bbc0d3cddcf 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -85,6 +85,7 @@ static const struct pinctrl_pin_desc meson_gxbb_periphs_pins[] = {
MESON_PIN(GPIODV_15, EE_OFF),
MESON_PIN(GPIODV_16, EE_OFF),
MESON_PIN(GPIODV_17, EE_OFF),
+ MESON_PIN(GPIODV_18, EE_OFF),
MESON_PIN(GPIODV_19, EE_OFF),
MESON_PIN(GPIODV_20, EE_OFF),
MESON_PIN(GPIODV_21, EE_OFF),
@@ -137,7 +138,6 @@ static const struct pinctrl_pin_desc meson_gxbb_periphs_pins[] = {
MESON_PIN(GPIOX_19, EE_OFF),
MESON_PIN(GPIOX_20, EE_OFF),
MESON_PIN(GPIOX_21, EE_OFF),
- MESON_PIN(GPIOX_22, EE_OFF),
MESON_PIN(GPIOCLK_0, EE_OFF),
MESON_PIN(GPIOCLK_1, EE_OFF),
@@ -161,6 +161,11 @@ static const unsigned int nor_q_pins[] = { PIN(BOOT_12, EE_OFF) };
static const unsigned int nor_c_pins[] = { PIN(BOOT_13, EE_OFF) };
static const unsigned int nor_cs_pins[] = { PIN(BOOT_15, EE_OFF) };
+static const unsigned int spi_sclk_pins[] = { PIN(GPIOZ_6, EE_OFF) };
+static const unsigned int spi_ss0_pins[] = { PIN(GPIOZ_7, EE_OFF) };
+static const unsigned int spi_miso_pins[] = { PIN(GPIOZ_12, EE_OFF) };
+static const unsigned int spi_mosi_pins[] = { PIN(GPIOZ_13, EE_OFF) };
+
static const unsigned int sdcard_d0_pins[] = { PIN(CARD_1, EE_OFF) };
static const unsigned int sdcard_d1_pins[] = { PIN(CARD_0, EE_OFF) };
static const unsigned int sdcard_d2_pins[] = { PIN(CARD_5, EE_OFF) };
@@ -290,6 +295,9 @@ static const unsigned int i2s_out_ch45_ao_pins[] = { PIN(GPIOAO_13, 0) };
static const unsigned int spdif_out_ao_6_pins[] = { PIN(GPIOAO_6, 0) };
static const unsigned int spdif_out_ao_13_pins[] = { PIN(GPIOAO_13, 0) };
+static const unsigned int ao_cec_pins[] = { PIN(GPIOAO_12, 0) };
+static const unsigned int ee_cec_pins[] = { PIN(GPIOAO_12, 0) };
+
static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
GPIO_GROUP(GPIOZ_0, EE_OFF),
GPIO_GROUP(GPIOZ_1, EE_OFF),
@@ -462,6 +470,10 @@ static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
GROUP(eth_txd1, 6, 4),
GROUP(eth_txd2, 6, 3),
GROUP(eth_txd3, 6, 2),
+ GROUP(spi_ss0, 5, 26),
+ GROUP(spi_sclk, 5, 27),
+ GROUP(spi_miso, 5, 28),
+ GROUP(spi_mosi, 5, 29),
/* Bank H */
GROUP(hdmi_hpd, 1, 26),
@@ -551,6 +563,8 @@ static struct meson_pmx_group meson_gxbb_aobus_groups[] = {
GROUP(i2s_out_ch45_ao, 1, 1),
GROUP(spdif_out_ao_6, 0, 16),
GROUP(spdif_out_ao_13, 0, 4),
+ GROUP(ao_cec, 0, 15),
+ GROUP(ee_cec, 0, 14),
};
static const char * const gpio_periphs_groups[] = {
@@ -598,6 +612,10 @@ static const char * const nor_groups[] = {
"nor_d", "nor_q", "nor_c", "nor_cs",
};
+static const char * const spi_groups[] = {
+ "spi_mosi", "spi_miso", "spi_ss0", "spi_sclk",
+};
+
static const char * const sdcard_groups[] = {
"sdcard_d0", "sdcard_d1", "sdcard_d2", "sdcard_d3",
"sdcard_cmd", "sdcard_clk",
@@ -739,10 +757,15 @@ static const char * const spdif_out_ao_groups[] = {
"spdif_out_ao_6", "spdif_out_ao_13",
};
+static const char * const cec_ao_groups[] = {
+ "ao_cec", "ee_cec",
+};
+
static struct meson_pmx_func meson_gxbb_periphs_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(emmc),
FUNCTION(nor),
+ FUNCTION(spi),
FUNCTION(sdcard),
FUNCTION(sdio),
FUNCTION(nand),
@@ -779,23 +802,24 @@ static struct meson_pmx_func meson_gxbb_aobus_functions[] = {
FUNCTION(pwm_ao_b),
FUNCTION(i2s_out_ao),
FUNCTION(spdif_out_ao),
+ FUNCTION(cec_ao),
};
static struct meson_bank meson_gxbb_periphs_banks[] = {
- /* name first last pullen pull dir out in */
- BANK("X", PIN(GPIOX_0, EE_OFF), PIN(GPIOX_22, EE_OFF), 4, 0, 4, 0, 12, 0, 13, 0, 14, 0),
- BANK("Y", PIN(GPIOY_0, EE_OFF), PIN(GPIOY_16, EE_OFF), 1, 0, 1, 0, 3, 0, 4, 0, 5, 0),
- BANK("DV", PIN(GPIODV_0, EE_OFF), PIN(GPIODV_29, EE_OFF), 0, 0, 0, 0, 0, 0, 1, 0, 2, 0),
- BANK("H", PIN(GPIOH_0, EE_OFF), PIN(GPIOH_3, EE_OFF), 1, 20, 1, 20, 3, 20, 4, 20, 5, 20),
- BANK("Z", PIN(GPIOZ_0, EE_OFF), PIN(GPIOZ_15, EE_OFF), 3, 0, 3, 0, 9, 0, 10, 0, 11, 0),
- BANK("CARD", PIN(CARD_0, EE_OFF), PIN(CARD_6, EE_OFF), 2, 20, 2, 20, 6, 20, 7, 20, 8, 20),
- BANK("BOOT", PIN(BOOT_0, EE_OFF), PIN(BOOT_17, EE_OFF), 2, 0, 2, 0, 6, 0, 7, 0, 8, 0),
- BANK("CLK", PIN(GPIOCLK_0, EE_OFF), PIN(GPIOCLK_3, EE_OFF), 3, 28, 3, 28, 9, 28, 10, 28, 11, 28),
+ /* name first last irq pullen pull dir out in */
+ BANK("X", PIN(GPIOX_0, EE_OFF), PIN(GPIOX_22, EE_OFF), 106, 128, 4, 0, 4, 0, 12, 0, 13, 0, 14, 0),
+ BANK("Y", PIN(GPIOY_0, EE_OFF), PIN(GPIOY_16, EE_OFF), 89, 105, 1, 0, 1, 0, 3, 0, 4, 0, 5, 0),
+ BANK("DV", PIN(GPIODV_0, EE_OFF), PIN(GPIODV_29, EE_OFF), 59, 88, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0),
+ BANK("H", PIN(GPIOH_0, EE_OFF), PIN(GPIOH_3, EE_OFF), 30, 33, 1, 20, 1, 20, 3, 20, 4, 20, 5, 20),
+ BANK("Z", PIN(GPIOZ_0, EE_OFF), PIN(GPIOZ_15, EE_OFF), 14, 29, 3, 0, 3, 0, 9, 0, 10, 0, 11, 0),
+ BANK("CARD", PIN(CARD_0, EE_OFF), PIN(CARD_6, EE_OFF), 52, 58, 2, 20, 2, 20, 6, 20, 7, 20, 8, 20),
+ BANK("BOOT", PIN(BOOT_0, EE_OFF), PIN(BOOT_17, EE_OFF), 34, 51, 2, 0, 2, 0, 6, 0, 7, 0, 8, 0),
+ BANK("CLK", PIN(GPIOCLK_0, EE_OFF), PIN(GPIOCLK_3, EE_OFF), 129, 132, 3, 28, 3, 28, 9, 28, 10, 28, 11, 28),
};
static struct meson_bank meson_gxbb_aobus_banks[] = {
- /* name first last pullen pull dir out in */
- BANK("AO", PIN(GPIOAO_0, 0), PIN(GPIOAO_13, 0), 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+ /* name first last irq pullen pull dir out in */
+ BANK("AO", PIN(GPIOAO_0, 0), PIN(GPIOAO_13, 0), 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
};
struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index 998210eacf37..36c14b85fc7c 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -89,6 +89,7 @@ static const struct pinctrl_pin_desc meson_gxl_periphs_pins[] = {
MESON_PIN(GPIODV_15, EE_OFF),
MESON_PIN(GPIODV_16, EE_OFF),
MESON_PIN(GPIODV_17, EE_OFF),
+ MESON_PIN(GPIODV_18, EE_OFF),
MESON_PIN(GPIODV_19, EE_OFF),
MESON_PIN(GPIODV_20, EE_OFF),
MESON_PIN(GPIODV_21, EE_OFF),
@@ -141,6 +142,11 @@ static const unsigned int nor_q_pins[] = { PIN(BOOT_12, EE_OFF) };
static const unsigned int nor_c_pins[] = { PIN(BOOT_13, EE_OFF) };
static const unsigned int nor_cs_pins[] = { PIN(BOOT_15, EE_OFF) };
+static const unsigned int spi_mosi_pins[] = { PIN(GPIOX_8, EE_OFF) };
+static const unsigned int spi_miso_pins[] = { PIN(GPIOX_9, EE_OFF) };
+static const unsigned int spi_ss0_pins[] = { PIN(GPIOX_10, EE_OFF) };
+static const unsigned int spi_sclk_pins[] = { PIN(GPIOX_11, EE_OFF) };
+
static const unsigned int sdcard_d0_pins[] = { PIN(CARD_1, EE_OFF) };
static const unsigned int sdcard_d1_pins[] = { PIN(CARD_0, EE_OFF) };
static const unsigned int sdcard_d2_pins[] = { PIN(CARD_5, EE_OFF) };
@@ -234,6 +240,28 @@ static const unsigned int i2s_out_ch67_z_pins[] = { PIN(GPIOZ_7, EE_OFF) };
static const unsigned int spdif_out_h_pins[] = { PIN(GPIOH_4, EE_OFF) };
+static const unsigned int eth_link_led_pins[] = { PIN(GPIOZ_14, EE_OFF) };
+static const unsigned int eth_act_led_pins[] = { PIN(GPIOZ_15, EE_OFF) };
+
+static const unsigned int tsin_a_d0_pins[] = { PIN(GPIODV_0, EE_OFF) };
+static const unsigned int tsin_a_d0_x_pins[] = { PIN(GPIOX_10, EE_OFF) };
+static const unsigned int tsin_a_clk_pins[] = { PIN(GPIODV_8, EE_OFF) };
+static const unsigned int tsin_a_clk_x_pins[] = { PIN(GPIOX_11, EE_OFF) };
+static const unsigned int tsin_a_sop_pins[] = { PIN(GPIODV_9, EE_OFF) };
+static const unsigned int tsin_a_sop_x_pins[] = { PIN(GPIOX_8, EE_OFF) };
+static const unsigned int tsin_a_d_valid_pins[] = { PIN(GPIODV_10, EE_OFF) };
+static const unsigned int tsin_a_d_valid_x_pins[] = { PIN(GPIOX_9, EE_OFF) };
+static const unsigned int tsin_a_fail_pins[] = { PIN(GPIODV_11, EE_OFF) };
+static const unsigned int tsin_a_dp_pins[] = {
+ PIN(GPIODV_1, EE_OFF),
+ PIN(GPIODV_2, EE_OFF),
+ PIN(GPIODV_3, EE_OFF),
+ PIN(GPIODV_4, EE_OFF),
+ PIN(GPIODV_5, EE_OFF),
+ PIN(GPIODV_6, EE_OFF),
+ PIN(GPIODV_7, EE_OFF),
+};
+
static const struct pinctrl_pin_desc meson_gxl_aobus_pins[] = {
MESON_PIN(GPIOAO_0, 0),
MESON_PIN(GPIOAO_1, 0),
@@ -271,11 +299,14 @@ static const unsigned int pwm_ao_a_8_pins[] = { PIN(GPIOAO_8, 0) };
static const unsigned int pwm_ao_b_pins[] = { PIN(GPIOAO_9, 0) };
static const unsigned int pwm_ao_b_6_pins[] = { PIN(GPIOAO_6, 0) };
-static const unsigned int i2s_out_ch23_ao_pins[] = { PIN(GPIOAO_8, EE_OFF) };
-static const unsigned int i2s_out_ch45_ao_pins[] = { PIN(GPIOAO_9, EE_OFF) };
+static const unsigned int i2s_out_ch23_ao_pins[] = { PIN(GPIOAO_8, 0) };
+static const unsigned int i2s_out_ch45_ao_pins[] = { PIN(GPIOAO_9, 0) };
-static const unsigned int spdif_out_ao_6_pins[] = { PIN(GPIOAO_6, EE_OFF) };
-static const unsigned int spdif_out_ao_9_pins[] = { PIN(GPIOAO_9, EE_OFF) };
+static const unsigned int spdif_out_ao_6_pins[] = { PIN(GPIOAO_6, 0) };
+static const unsigned int spdif_out_ao_9_pins[] = { PIN(GPIOAO_9, 0) };
+
+static const unsigned int ao_cec_pins[] = { PIN(GPIOAO_8, 0) };
+static const unsigned int ee_cec_pins[] = { PIN(GPIOAO_8, 0) };
static struct meson_pmx_group meson_gxl_periphs_groups[] = {
GPIO_GROUP(GPIOZ_0, EE_OFF),
@@ -405,6 +436,14 @@ static struct meson_pmx_group meson_gxl_periphs_groups[] = {
GROUP(pwm_a, 5, 25),
GROUP(pwm_e, 5, 15),
GROUP(pwm_f_x, 5, 14),
+ GROUP(spi_mosi, 5, 3),
+ GROUP(spi_miso, 5, 2),
+ GROUP(spi_ss0, 5, 1),
+ GROUP(spi_sclk, 5, 0),
+ GROUP(tsin_a_sop_x, 6, 3),
+ GROUP(tsin_a_d_valid_x, 6, 2),
+ GROUP(tsin_a_d0_x, 6, 1),
+ GROUP(tsin_a_clk_x, 6, 0),
/* Bank Z */
GROUP(eth_mdio, 4, 23),
@@ -425,6 +464,8 @@ static struct meson_pmx_group meson_gxl_periphs_groups[] = {
GROUP(i2s_out_ch23_z, 3, 26),
GROUP(i2s_out_ch45_z, 3, 25),
GROUP(i2s_out_ch67_z, 3, 24),
+ GROUP(eth_link_led, 4, 25),
+ GROUP(eth_act_led, 4, 24),
/* Bank H */
GROUP(hdmi_hpd, 6, 31),
@@ -451,6 +492,12 @@ static struct meson_pmx_group meson_gxl_periphs_groups[] = {
GROUP(i2c_sck_c, 1, 10),
GROUP(pwm_b, 2, 11),
GROUP(pwm_d, 2, 12),
+ GROUP(tsin_a_d0, 2, 4),
+ GROUP(tsin_a_dp, 2, 3),
+ GROUP(tsin_a_clk, 2, 2),
+ GROUP(tsin_a_sop, 2, 1),
+ GROUP(tsin_a_d_valid, 2, 0),
+ GROUP(tsin_a_fail, 1, 31),
/* Bank BOOT */
GROUP(emmc_nand_d07, 7, 31),
@@ -518,6 +565,8 @@ static struct meson_pmx_group meson_gxl_aobus_groups[] = {
GROUP(i2s_out_ch45_ao, 1, 1),
GROUP(spdif_out_ao_6, 0, 16),
GROUP(spdif_out_ao_9, 0, 4),
+ GROUP(ao_cec, 0, 15),
+ GROUP(ee_cec, 0, 14),
};
static const char * const gpio_periphs_groups[] = {
@@ -560,6 +609,10 @@ static const char * const nor_groups[] = {
"nor_d", "nor_q", "nor_c", "nor_cs",
};
+static const char * const spi_groups[] = {
+ "spi_mosi", "spi_miso", "spi_ss0", "spi_sclk",
+};
+
static const char * const sdcard_groups[] = {
"sdcard_d0", "sdcard_d1", "sdcard_d2", "sdcard_d3",
"sdcard_cmd", "sdcard_clk",
@@ -647,6 +700,16 @@ static const char * const spdif_out_groups[] = {
"spdif_out_h",
};
+static const char * const eth_led_groups[] = {
+ "eth_link_led", "eth_act_led",
+};
+
+static const char * const tsin_a_groups[] = {
+ "tsin_a_clk", "tsin_a_clk_x", "tsin_a_sop", "tsin_a_sop_x",
+ "tsin_a_d_valid", "tsin_a_d_valid_x", "tsin_a_d0", "tsin_a_d0_x",
+ "tsin_a_dp", "tsin_a_fail",
+};
+
static const char * const gpio_aobus_groups[] = {
"GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", "GPIOAO_4",
"GPIOAO_5", "GPIOAO_6", "GPIOAO_7", "GPIOAO_8", "GPIOAO_9",
@@ -689,10 +752,15 @@ static const char * const spdif_out_ao_groups[] = {
"spdif_out_ao_6", "spdif_out_ao_9",
};
+static const char * const cec_ao_groups[] = {
+ "ao_cec", "ee_cec",
+};
+
static struct meson_pmx_func meson_gxl_periphs_functions[] = {
FUNCTION(gpio_periphs),
FUNCTION(emmc),
FUNCTION(nor),
+ FUNCTION(spi),
FUNCTION(sdcard),
FUNCTION(sdio),
FUNCTION(nand),
@@ -713,6 +781,8 @@ static struct meson_pmx_func meson_gxl_periphs_functions[] = {
FUNCTION(hdmi_i2c),
FUNCTION(i2s_out),
FUNCTION(spdif_out),
+ FUNCTION(eth_led),
+ FUNCTION(tsin_a),
};
static struct meson_pmx_func meson_gxl_aobus_functions[] = {
@@ -726,22 +796,23 @@ static struct meson_pmx_func meson_gxl_aobus_functions[] = {
FUNCTION(pwm_ao_b),
FUNCTION(i2s_out_ao),
FUNCTION(spdif_out_ao),
+ FUNCTION(cec_ao),
};
static struct meson_bank meson_gxl_periphs_banks[] = {
- /* name first last pullen pull dir out in */
- BANK("X", PIN(GPIOX_0, EE_OFF), PIN(GPIOX_18, EE_OFF), 4, 0, 4, 0, 12, 0, 13, 0, 14, 0),
- BANK("DV", PIN(GPIODV_0, EE_OFF), PIN(GPIODV_29, EE_OFF), 0, 0, 0, 0, 0, 0, 1, 0, 2, 0),
- BANK("H", PIN(GPIOH_0, EE_OFF), PIN(GPIOH_9, EE_OFF), 1, 20, 1, 20, 3, 20, 4, 20, 5, 20),
- BANK("Z", PIN(GPIOZ_0, EE_OFF), PIN(GPIOZ_15, EE_OFF), 3, 0, 3, 0, 9, 0, 10, 0, 11, 0),
- BANK("CARD", PIN(CARD_0, EE_OFF), PIN(CARD_6, EE_OFF), 2, 20, 2, 20, 6, 20, 7, 20, 8, 20),
- BANK("BOOT", PIN(BOOT_0, EE_OFF), PIN(BOOT_15, EE_OFF), 2, 0, 2, 0, 6, 0, 7, 0, 8, 0),
- BANK("CLK", PIN(GPIOCLK_0, EE_OFF), PIN(GPIOCLK_1, EE_OFF), 3, 28, 3, 28, 9, 28, 10, 28, 11, 28),
+ /* name first last irq pullen pull dir out in */
+ BANK("X", PIN(GPIOX_0, EE_OFF), PIN(GPIOX_18, EE_OFF), 89, 107, 4, 0, 4, 0, 12, 0, 13, 0, 14, 0),
+ BANK("DV", PIN(GPIODV_0, EE_OFF), PIN(GPIODV_29, EE_OFF), 83, 88, 0, 0, 0, 0, 0, 0, 1, 0, 2, 0),
+ BANK("H", PIN(GPIOH_0, EE_OFF), PIN(GPIOH_9, EE_OFF), 26, 35, 1, 20, 1, 20, 3, 20, 4, 20, 5, 20),
+ BANK("Z", PIN(GPIOZ_0, EE_OFF), PIN(GPIOZ_15, EE_OFF), 10, 25, 3, 0, 3, 0, 9, 0, 10, 0, 11, 0),
+ BANK("CARD", PIN(CARD_0, EE_OFF), PIN(CARD_6, EE_OFF), 52, 58, 2, 20, 2, 20, 6, 20, 7, 20, 8, 20),
+ BANK("BOOT", PIN(BOOT_0, EE_OFF), PIN(BOOT_15, EE_OFF), 36, 51, 2, 0, 2, 0, 6, 0, 7, 0, 8, 0),
+ BANK("CLK", PIN(GPIOCLK_0, EE_OFF), PIN(GPIOCLK_1, EE_OFF), 108, 109, 3, 28, 3, 28, 9, 28, 10, 28, 11, 28),
};
static struct meson_bank meson_gxl_aobus_banks[] = {
- /* name first last pullen pull dir out in */
- BANK("AO", PIN(GPIOAO_0, 0), PIN(GPIOAO_9, 0), 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+ /* name first last irq pullen pull dir out in */
+ BANK("AO", PIN(GPIOAO_0, 0), PIN(GPIOAO_9, 0), 0, 9, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
};
struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h
index 1aa871d5431e..890f296f5840 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.h
+++ b/drivers/pinctrl/meson/pinctrl-meson.h
@@ -81,6 +81,7 @@ enum meson_reg_type {
* @name: bank name
* @first: first pin of the bank
* @last: last pin of the bank
+ * @irq: hwirq base number of the bank
* @regs: array of register descriptors
*
* A bank represents a set of pins controlled by a contiguous set of
@@ -92,6 +93,8 @@ struct meson_bank {
const char *name;
unsigned int first;
unsigned int last;
+ int irq_first;
+ int irq_last;
struct meson_reg_desc regs[NUM_REG];
};
@@ -147,12 +150,14 @@ struct meson_pinctrl {
.num_groups = ARRAY_SIZE(fn ## _groups), \
}
-#define BANK(n, f, l, per, peb, pr, pb, dr, db, or, ob, ir, ib) \
+#define BANK(n, f, l, fi, li, per, peb, pr, pb, dr, db, or, ob, ir, ib) \
{ \
- .name = n, \
- .first = f, \
- .last = l, \
- .regs = { \
+ .name = n, \
+ .first = f, \
+ .last = l, \
+ .irq_first = fi, \
+ .irq_last = li, \
+ .regs = { \
[REG_PULLEN] = { per, peb }, \
[REG_PULL] = { pr, pb }, \
[REG_DIR] = { dr, db }, \
diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c
index 07f1cb21c1b8..970f6f14502c 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8.c
@@ -205,6 +205,9 @@ static const unsigned int i2c_sck_d0_pins[] = { PIN(GPIOX_17, 0) };
static const unsigned int xtal_32k_out_pins[] = { PIN(GPIOX_10, 0) };
static const unsigned int xtal_24m_out_pins[] = { PIN(GPIOX_11, 0) };
+static const unsigned int pwm_e_pins[] = { PIN(GPIOX_10, 0) };
+static const unsigned int pwm_b_x_pins[] = { PIN(GPIOX_11, 0) };
+
/* bank Y */
static const unsigned int uart_tx_c_pins[] = { PIN(GPIOY_0, 0) };
static const unsigned int uart_rx_c_pins[] = { PIN(GPIOY_1, 0) };
@@ -219,6 +222,20 @@ static const unsigned int pcm_clk_b_pins[] = { PIN(GPIOY_7, 0) };
static const unsigned int i2c_sda_c0_pins[] = { PIN(GPIOY_0, 0) };
static const unsigned int i2c_sck_c0_pins[] = { PIN(GPIOY_1, 0) };
+static const unsigned int pwm_a_y_pins[] = { PIN(GPIOY_16, 0) };
+
+static const unsigned int i2s_out_ch45_pins[] = { PIN(GPIOY_0, 0) };
+static const unsigned int i2s_out_ch23_pins[] = { PIN(GPIOY_1, 0) };
+static const unsigned int i2s_out_ch01_pins[] = { PIN(GPIOY_4, 0) };
+static const unsigned int i2s_in_ch01_pins[] = { PIN(GPIOY_5, 0) };
+static const unsigned int i2s_lr_clk_in_pins[] = { PIN(GPIOY_6, 0) };
+static const unsigned int i2s_ao_clk_in_pins[] = { PIN(GPIOY_7, 0) };
+static const unsigned int i2s_am_clk_pins[] = { PIN(GPIOY_8, 0) };
+static const unsigned int i2s_out_ch78_pins[] = { PIN(GPIOY_9, 0) };
+
+static const unsigned int spdif_in_pins[] = { PIN(GPIOY_2, 0) };
+static const unsigned int spdif_out_pins[] = { PIN(GPIOY_3, 0) };
+
/* bank DV */
static const unsigned int dvin_rgb_pins[] = { PIN(GPIODV_0, 0), PIN(GPIODV_1, 0),
PIN(GPIODV_2, 0), PIN(GPIODV_3, 0),
@@ -264,6 +281,10 @@ static const unsigned int uart_rts_b1_pins[] = { PIN(GPIODV_27, 0) };
static const unsigned int vga_vs_pins[] = { PIN(GPIODV_24, 0) };
static const unsigned int vga_hs_pins[] = { PIN(GPIODV_25, 0) };
+static const unsigned int pwm_c_dv9_pins[] = { PIN(GPIODV_9, 0) };
+static const unsigned int pwm_c_dv29_pins[] = { PIN(GPIODV_29, 0) };
+static const unsigned int pwm_d_pins[] = { PIN(GPIODV_28, 0) };
+
/* bank H */
static const unsigned int hdmi_hpd_pins[] = { PIN(GPIOH_0, 0) };
static const unsigned int hdmi_sda_pins[] = { PIN(GPIOH_1, 0) };
@@ -312,6 +333,11 @@ static const unsigned int i2c_sck_a1_pins[] = { PIN(GPIOZ_1, 0) };
static const unsigned int i2c_sda_a2_pins[] = { PIN(GPIOZ_0, 0) };
static const unsigned int i2c_sck_a2_pins[] = { PIN(GPIOZ_1, 0) };
+static const unsigned int pwm_a_z0_pins[] = { PIN(GPIOZ_0, 0) };
+static const unsigned int pwm_a_z7_pins[] = { PIN(GPIOZ_7, 0) };
+static const unsigned int pwm_b_z_pins[] = { PIN(GPIOZ_1, 0) };
+static const unsigned int pwm_c_z_pins[] = { PIN(GPIOZ_8, 0) };
+
/* bank BOOT */
static const unsigned int sd_d0_c_pins[] = { PIN(BOOT_0, 0) };
static const unsigned int sd_d1_c_pins[] = { PIN(BOOT_1, 0) };
@@ -369,6 +395,7 @@ static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, AO_OFF) };
static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, AO_OFF) };
static const unsigned int remote_input_pins[] = { PIN(GPIOAO_7, AO_OFF) };
+static const unsigned int remote_output_ao_pins[] = { PIN(GPIOAO_13, AO_OFF) };
static const unsigned int i2c_slave_sck_ao_pins[] = { PIN(GPIOAO_4, AO_OFF) };
static const unsigned int i2c_slave_sda_ao_pins[] = { PIN(GPIOAO_5, AO_OFF) };
@@ -382,6 +409,15 @@ static const unsigned int uart_rx_ao_b1_pins[] = { PIN(GPIOAO_5, AO_OFF) };
static const unsigned int i2c_mst_sck_ao_pins[] = { PIN(GPIOAO_4, AO_OFF) };
static const unsigned int i2c_mst_sda_ao_pins[] = { PIN(GPIOAO_5, AO_OFF) };
+static const unsigned int pwm_f_ao_pins[] = { PIN(GPIO_TEST_N, AO_OFF) };
+
+static const unsigned int i2s_am_clk_out_ao_pins[] = { PIN(GPIOAO_8, AO_OFF) };
+static const unsigned int i2s_ao_clk_out_ao_pins[] = { PIN(GPIOAO_9, AO_OFF) };
+static const unsigned int i2s_lr_clk_out_ao_pins[] = { PIN(GPIOAO_10, AO_OFF) };
+static const unsigned int i2s_out_ch01_ao_pins[] = { PIN(GPIOAO_11, AO_OFF) };
+
+static const unsigned int hdmi_cec_ao_pins[] = { PIN(GPIOAO_12, AO_OFF) };
+
static struct meson_pmx_group meson8_cbus_groups[] = {
GPIO_GROUP(GPIOX_0, 0),
GPIO_GROUP(GPIOX_1, 0),
@@ -523,6 +559,9 @@ static struct meson_pmx_group meson8_cbus_groups[] = {
GROUP(xtal_32k_out, 3, 22),
GROUP(xtal_24m_out, 3, 23),
+ GROUP(pwm_e, 9, 19),
+ GROUP(pwm_b_x, 2, 3),
+
/* bank Y */
GROUP(uart_tx_c, 1, 19),
GROUP(uart_rx_c, 1, 18),
@@ -537,6 +576,20 @@ static struct meson_pmx_group meson8_cbus_groups[] = {
GROUP(i2c_sda_c0, 1, 15),
GROUP(i2c_sck_c0, 1, 14),
+ GROUP(pwm_a_y, 9, 14),
+
+ GROUP(i2s_out_ch45, 1, 10),
+ GROUP(i2s_out_ch23, 1, 19),
+ GROUP(i2s_out_ch01, 1, 6),
+ GROUP(i2s_in_ch01, 1, 5),
+ GROUP(i2s_lr_clk_in, 1, 4),
+ GROUP(i2s_ao_clk_in, 1, 2),
+ GROUP(i2s_am_clk, 1, 0),
+ GROUP(i2s_out_ch78, 1, 11),
+
+ GROUP(spdif_in, 1, 8),
+ GROUP(spdif_out, 1, 7),
+
/* bank DV */
GROUP(dvin_rgb, 0, 6),
GROUP(dvin_vs, 0, 9),
@@ -571,6 +624,10 @@ static struct meson_pmx_group meson8_cbus_groups[] = {
GROUP(vga_vs, 0, 21),
GROUP(vga_hs, 0, 20),
+ GROUP(pwm_c_dv9, 3, 24),
+ GROUP(pwm_c_dv29, 3, 25),
+ GROUP(pwm_d, 3, 26),
+
/* bank H */
GROUP(hdmi_hpd, 1, 26),
GROUP(hdmi_sda, 1, 25),
@@ -619,6 +676,11 @@ static struct meson_pmx_group meson8_cbus_groups[] = {
GROUP(i2c_sda_a2, 5, 7),
GROUP(i2c_sck_a2, 5, 6),
+ GROUP(pwm_a_z0, 9, 16),
+ GROUP(pwm_a_z7, 2, 0),
+ GROUP(pwm_b_z, 9, 15),
+ GROUP(pwm_c_z, 2, 1),
+
/* bank BOOT */
GROUP(sd_d0_c, 6, 29),
GROUP(sd_d1_c, 6, 28),
@@ -689,6 +751,7 @@ static struct meson_pmx_group meson8_aobus_groups[] = {
GROUP(uart_rts_ao_a, 0, 9),
GROUP(remote_input, 0, 0),
+ GROUP(remote_output_ao, 0, 31),
GROUP(i2c_slave_sck_ao, 0, 2),
GROUP(i2c_slave_sda_ao, 0, 1),
@@ -701,6 +764,15 @@ static struct meson_pmx_group meson8_aobus_groups[] = {
GROUP(i2c_mst_sck_ao, 0, 6),
GROUP(i2c_mst_sda_ao, 0, 5),
+
+ GROUP(pwm_f_ao, 0, 19),
+
+ GROUP(i2s_am_clk_out_ao, 0, 30),
+ GROUP(i2s_ao_clk_out_ao, 0, 29),
+ GROUP(i2s_lr_clk_out_ao, 0, 28),
+ GROUP(i2s_out_ch01_ao, 0, 27),
+
+ GROUP(hdmi_cec_ao, 0, 17),
};
static const char * const gpio_groups[] = {
@@ -828,6 +900,12 @@ static const char * const i2c_b_groups[] = {
"i2c_sda_b", "i2c_sck_b"
};
+static const char * const i2s_groups[] = {
+ "i2s_out_ch45", "i2s_out_ch23_pins", "i2s_out_ch01_pins",
+ "i2s_in_ch01_pins", "i2s_lr_clk_in_pins", "i2s_ao_clk_in_pins",
+ "i2s_am_clk_pins", "i2s_out_ch78_pins"
+};
+
static const char * const sd_c_groups[] = {
"sd_d0_c", "sd_d1_c", "sd_d2_c", "sd_d3_c",
"sd_cmd_c", "sd_clk_c"
@@ -849,6 +927,26 @@ static const char * const nor_groups[] = {
"nor_d", "nor_q", "nor_c", "nor_cs"
};
+static const char * const pwm_a_groups[] = {
+ "pwm_a_y", "pwm_a_z0", "pwm_a_z7"
+};
+
+static const char * const pwm_b_groups[] = {
+ "pwm_b_x", "pwm_b_z"
+};
+
+static const char * const pwm_c_groups[] = {
+ "pwm_c_dv9", "pwm_c_dv29", "pwm_c_z"
+};
+
+static const char * const pwm_d_groups[] = {
+ "pwm_d"
+};
+
+static const char * const pwm_e_groups[] = {
+ "pwm_e"
+};
+
static const char * const sd_b_groups[] = {
"sd_d1_b", "sd_d0_b", "sd_clk_b", "sd_cmd_b",
"sd_d3_b", "sd_d2_b"
@@ -858,12 +956,16 @@ static const char * const sdxc_b_groups[] = {
"sdxc_d13_b", "sdxc_d0_b", "sdxc_clk_b", "sdxc_cmd_b"
};
+static const char * const spdif_groups[] = {
+ "spdif_in", "spdif_out"
+};
+
static const char * const uart_ao_groups[] = {
"uart_tx_ao_a", "uart_rx_ao_a", "uart_cts_ao_a", "uart_rts_ao_a"
};
static const char * const remote_groups[] = {
- "remote_input"
+ "remote_input", "remote_output_ao"
};
static const char * const i2c_slave_ao_groups[] = {
@@ -878,6 +980,19 @@ static const char * const i2c_mst_ao_groups[] = {
"i2c_mst_sck_ao", "i2c_mst_sda_ao"
};
+static const char * const pwm_f_ao_groups[] = {
+ "pwm_f_ao"
+};
+
+static const char * const i2s_ao_groups[] = {
+ "i2s_am_clk_out_ao", "i2s_ao_clk_out_ao", "i2s_lr_clk_out_ao",
+ "i2s_out_ch01_ao"
+};
+
+static const char * const hdmi_cec_ao_groups[] = {
+ "hdmi_cec_ao"
+};
+
static struct meson_pmx_func meson8_cbus_functions[] = {
FUNCTION(gpio),
FUNCTION(sd_a),
@@ -905,6 +1020,13 @@ static struct meson_pmx_func meson8_cbus_functions[] = {
FUNCTION(nor),
FUNCTION(sd_b),
FUNCTION(sdxc_b),
+ FUNCTION(pwm_a),
+ FUNCTION(pwm_b),
+ FUNCTION(pwm_c),
+ FUNCTION(pwm_d),
+ FUNCTION(pwm_e),
+ FUNCTION(i2s),
+ FUNCTION(spdif),
};
static struct meson_pmx_func meson8_aobus_functions[] = {
@@ -913,22 +1035,25 @@ static struct meson_pmx_func meson8_aobus_functions[] = {
FUNCTION(i2c_slave_ao),
FUNCTION(uart_ao_b),
FUNCTION(i2c_mst_ao),
+ FUNCTION(pwm_f_ao),
+ FUNCTION(i2s_ao),
+ FUNCTION(hdmi_cec_ao),
};
static struct meson_bank meson8_cbus_banks[] = {
- /* name first last pullen pull dir out in */
- BANK("X", PIN(GPIOX_0, 0), PIN(GPIOX_21, 0), 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
- BANK("Y", PIN(GPIOY_0, 0), PIN(GPIOY_16, 0), 3, 0, 3, 0, 3, 0, 4, 0, 5, 0),
- BANK("DV", PIN(GPIODV_0, 0), PIN(GPIODV_29, 0), 0, 0, 0, 0, 7, 0, 8, 0, 9, 0),
- BANK("H", PIN(GPIOH_0, 0), PIN(GPIOH_9, 0), 1, 16, 1, 16, 9, 19, 10, 19, 11, 19),
- BANK("Z", PIN(GPIOZ_0, 0), PIN(GPIOZ_14, 0), 1, 0, 1, 0, 3, 17, 4, 17, 5, 17),
- BANK("CARD", PIN(CARD_0, 0), PIN(CARD_6, 0), 2, 20, 2, 20, 0, 22, 1, 22, 2, 22),
- BANK("BOOT", PIN(BOOT_0, 0), PIN(BOOT_18, 0), 2, 0, 2, 0, 9, 0, 10, 0, 11, 0),
+ /* name first last irq pullen pull dir out in */
+ BANK("X", PIN(GPIOX_0, 0), PIN(GPIOX_21, 0), 112, 133, 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
+ BANK("Y", PIN(GPIOY_0, 0), PIN(GPIOY_16, 0), 95, 111, 3, 0, 3, 0, 3, 0, 4, 0, 5, 0),
+ BANK("DV", PIN(GPIODV_0, 0), PIN(GPIODV_29, 0), 65, 94, 0, 0, 0, 0, 7, 0, 8, 0, 9, 0),
+ BANK("H", PIN(GPIOH_0, 0), PIN(GPIOH_9, 0), 29, 38, 1, 16, 1, 16, 9, 19, 10, 19, 11, 19),
+ BANK("Z", PIN(GPIOZ_0, 0), PIN(GPIOZ_14, 0), 14, 28, 1, 0, 1, 0, 3, 17, 4, 17, 5, 17),
+ BANK("CARD", PIN(CARD_0, 0), PIN(CARD_6, 0), 58, 64, 2, 20, 2, 20, 0, 22, 1, 22, 2, 22),
+ BANK("BOOT", PIN(BOOT_0, 0), PIN(BOOT_18, 0), 39, 57, 2, 0, 2, 0, 9, 0, 10, 0, 11, 0),
};
static struct meson_bank meson8_aobus_banks[] = {
- /* name first last pullen pull dir out in */
- BANK("AO", PIN(GPIOAO_0, AO_OFF), PIN(GPIO_TEST_N, AO_OFF), 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+ /* name first last irq pullen pull dir out in */
+ BANK("AO", PIN(GPIOAO_0, AO_OFF), PIN(GPIO_TEST_N, AO_OFF), 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
};
struct meson_pinctrl_data meson8_cbus_pinctrl_data = {
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index bf747eb1f3f4..71f216b5b0b9 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -124,6 +124,12 @@ static const struct pinctrl_pin_desc meson8b_aobus_pins[] = {
MESON_PIN(GPIOAO_11, AO_OFF),
MESON_PIN(GPIOAO_12, AO_OFF),
MESON_PIN(GPIOAO_13, AO_OFF),
+
+ /*
+ * The following 2 pins are not mentionned in the public datasheet
+ * According to this datasheet, they can't be used with the gpio
+ * interrupt controller
+ */
MESON_PIN(GPIO_BSD_EN, AO_OFF),
MESON_PIN(GPIO_TEST_N, AO_OFF),
};
@@ -881,19 +887,25 @@ static struct meson_pmx_func meson8b_aobus_functions[] = {
};
static struct meson_bank meson8b_cbus_banks[] = {
- /* name first last pullen pull dir out in */
- BANK("X", PIN(GPIOX_0, 0), PIN(GPIOX_21, 0), 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
- BANK("Y", PIN(GPIOY_0, 0), PIN(GPIOY_14, 0), 3, 0, 3, 0, 3, 0, 4, 0, 5, 0),
- BANK("DV", PIN(GPIODV_9, 0), PIN(GPIODV_29, 0), 0, 0, 0, 0, 7, 0, 8, 0, 9, 0),
- BANK("H", PIN(GPIOH_0, 0), PIN(GPIOH_9, 0), 1, 16, 1, 16, 9, 19, 10, 19, 11, 19),
- BANK("CARD", PIN(CARD_0, 0), PIN(CARD_6, 0), 2, 20, 2, 20, 0, 22, 1, 22, 2, 22),
- BANK("BOOT", PIN(BOOT_0, 0), PIN(BOOT_18, 0), 2, 0, 2, 0, 9, 0, 10, 0, 11, 0),
- BANK("DIF", PIN(DIF_0_P, 0), PIN(DIF_4_N, 0), 5, 8, 5, 8, 12, 12, 13, 12, 14, 12),
+ /* name first last irq pullen pull dir out in */
+ BANK("X", PIN(GPIOX_0, 0), PIN(GPIOX_21, 0), 97, 118, 4, 0, 4, 0, 0, 0, 1, 0, 2, 0),
+ BANK("Y", PIN(GPIOY_0, 0), PIN(GPIOY_14, 0), 80, 96, 3, 0, 3, 0, 3, 0, 4, 0, 5, 0),
+ BANK("DV", PIN(GPIODV_9, 0), PIN(GPIODV_29, 0), 59, 79, 0, 0, 0, 0, 7, 0, 8, 0, 9, 0),
+ BANK("H", PIN(GPIOH_0, 0), PIN(GPIOH_9, 0), 14, 23, 1, 16, 1, 16, 9, 19, 10, 19, 11, 19),
+ BANK("CARD", PIN(CARD_0, 0), PIN(CARD_6, 0), 43, 49, 2, 20, 2, 20, 0, 22, 1, 22, 2, 22),
+ BANK("BOOT", PIN(BOOT_0, 0), PIN(BOOT_18, 0), 24, 42, 2, 0, 2, 0, 9, 0, 10, 0, 11, 0),
+
+ /*
+ * The following bank is not mentionned in the public datasheet
+ * There is no information whether it can be used with the gpio
+ * interrupt controller
+ */
+ BANK("DIF", PIN(DIF_0_P, 0), PIN(DIF_4_N, 0), -1, -1, 5, 8, 5, 8, 12, 12, 13, 12, 14, 12),
};
static struct meson_bank meson8b_aobus_banks[] = {
- /* name first last pullen pull dir out in */
- BANK("AO", PIN(GPIOAO_0, AO_OFF), PIN(GPIO_TEST_N, AO_OFF), 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+ /* name first last irq pullen pull dir out in */
+ BANK("AO", PIN(GPIOAO_0, AO_OFF), PIN(GPIO_TEST_N, AO_OFF), 0, 13, 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
};
struct meson_pinctrl_data meson8b_cbus_pinctrl_data = {
diff --git a/drivers/pinctrl/mvebu/Kconfig b/drivers/pinctrl/mvebu/Kconfig
index 5bade32d3089..d9773b77ff9f 100644
--- a/drivers/pinctrl/mvebu/Kconfig
+++ b/drivers/pinctrl/mvebu/Kconfig
@@ -1,5 +1,3 @@
-if PLAT_ORION
-
config PINCTRL_MVEBU
bool
select PINMUX
@@ -30,6 +28,14 @@ config PINCTRL_ARMADA_39X
bool
select PINCTRL_MVEBU
+config PINCTRL_ARMADA_AP806
+ bool
+ select PINCTRL_MVEBU
+
+config PINCTRL_ARMADA_CP110
+ bool
+ select PINCTRL_MVEBU
+
config PINCTRL_ARMADA_XP
bool
select PINCTRL_MVEBU
@@ -38,8 +44,6 @@ config PINCTRL_ORION
bool
select PINCTRL_MVEBU
-endif
-
config PINCTRL_ARMADA_37XX
bool
select GENERIC_PINCONF
diff --git a/drivers/pinctrl/mvebu/Makefile b/drivers/pinctrl/mvebu/Makefile
index 60c245a60f39..5b03fd55e28d 100644
--- a/drivers/pinctrl/mvebu/Makefile
+++ b/drivers/pinctrl/mvebu/Makefile
@@ -5,6 +5,8 @@ obj-$(CONFIG_PINCTRL_ARMADA_370) += pinctrl-armada-370.o
obj-$(CONFIG_PINCTRL_ARMADA_375) += pinctrl-armada-375.o
obj-$(CONFIG_PINCTRL_ARMADA_38X) += pinctrl-armada-38x.o
obj-$(CONFIG_PINCTRL_ARMADA_39X) += pinctrl-armada-39x.o
+obj-$(CONFIG_PINCTRL_ARMADA_AP806) += pinctrl-armada-ap806.o
+obj-$(CONFIG_PINCTRL_ARMADA_CP110) += pinctrl-armada-cp110.o
obj-$(CONFIG_PINCTRL_ARMADA_XP) += pinctrl-armada-xp.o
obj-$(CONFIG_PINCTRL_ARMADA_37XX) += pinctrl-armada-37xx.o
obj-$(CONFIG_PINCTRL_ORION) += pinctrl-orion.o
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index 5c96f5558310..f024e25787fc 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -13,7 +13,9 @@
#include <linux/gpio/driver.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinctrl.h>
@@ -30,6 +32,11 @@
#define OUTPUT_CTL 0x20
#define SELECTION 0x30
+#define IRQ_EN 0x0
+#define IRQ_POL 0x08
+#define IRQ_STATUS 0x10
+#define IRQ_WKUP 0x18
+
#define NB_FUNCS 2
#define GPIO_PER_REG 32
@@ -75,9 +82,12 @@ struct armada_37xx_pmx_func {
struct armada_37xx_pinctrl {
struct regmap *regmap;
+ void __iomem *base;
const struct armada_37xx_pin_data *data;
struct device *dev;
struct gpio_chip gpio_chip;
+ struct irq_chip irq_chip;
+ spinlock_t irq_lock;
struct pinctrl_desc pctl;
struct pinctrl_dev *pctl_dev;
struct armada_37xx_pin_group *groups;
@@ -147,8 +157,9 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
PIN_GRP_GPIO("onewire", 4, 1, BIT(16), "onewire"),
PIN_GRP_GPIO("uart1", 25, 2, BIT(17), "uart"),
PIN_GRP_GPIO("spi_quad", 15, 2, BIT(18), "spi"),
- PIN_GRP_EXTRA("uart2", 9, 2, BIT(13) | BIT(14) | BIT(19),
- BIT(13) | BIT(14), BIT(19), 18, 2, "gpio", "uart"),
+ PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19),
+ BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19),
+ 18, 2, "gpio", "uart"),
PIN_GRP_GPIO("led0_od", 11, 1, BIT(20), "led"),
PIN_GRP_GPIO("led1_od", 12, 1, BIT(21), "led"),
PIN_GRP_GPIO("led2_od", 13, 1, BIT(22), "led"),
@@ -159,8 +170,8 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
static struct armada_37xx_pin_group armada_37xx_sb_groups[] = {
PIN_GRP_GPIO("usb32_drvvbus0", 0, 1, BIT(0), "drvbus"),
PIN_GRP_GPIO("usb2_drvvbus1", 1, 1, BIT(1), "drvbus"),
- PIN_GRP_GPIO("sdio_sb", 24, 5, BIT(2), "sdio"),
- PIN_GRP_EXTRA("rgmii", 6, 14, BIT(3), 0, BIT(3), 23, 1, "mii", "gpio"),
+ PIN_GRP_GPIO("sdio_sb", 24, 6, BIT(2), "sdio"),
+ PIN_GRP_EXTRA("rgmii", 6, 12, BIT(3), 0, BIT(3), 23, 1, "mii", "gpio"),
PIN_GRP_GPIO("pcie1", 3, 2, BIT(4), "pcie"),
PIN_GRP_GPIO("ptp", 20, 3, BIT(5), "ptp"),
PIN_GRP("ptp_clk", 21, 1, BIT(6), "ptp", "mii"),
@@ -346,6 +357,14 @@ static int armada_37xx_pmx_set(struct pinctrl_dev *pctldev,
return armada_37xx_pmx_set_by_name(pctldev, name, grp);
}
+static inline void armada_37xx_irq_update_reg(unsigned int *reg,
+ struct irq_data *d)
+{
+ int offset = irqd_to_hwirq(d);
+
+ armada_37xx_update_reg(reg, offset);
+}
+
static int armada_37xx_gpio_direction_input(struct gpio_chip *chip,
unsigned int offset)
{
@@ -468,6 +487,214 @@ static const struct gpio_chip armada_37xx_gpiolib_chip = {
.owner = THIS_MODULE,
};
+static void armada_37xx_irq_ack(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct armada_37xx_pinctrl *info = gpiochip_get_data(chip);
+ u32 reg = IRQ_STATUS;
+ unsigned long flags;
+
+ armada_37xx_irq_update_reg(&reg, d);
+ spin_lock_irqsave(&info->irq_lock, flags);
+ writel(d->mask, info->base + reg);
+ spin_unlock_irqrestore(&info->irq_lock, flags);
+}
+
+static void armada_37xx_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct armada_37xx_pinctrl *info = gpiochip_get_data(chip);
+ u32 val, reg = IRQ_EN;
+ unsigned long flags;
+
+ armada_37xx_irq_update_reg(&reg, d);
+ spin_lock_irqsave(&info->irq_lock, flags);
+ val = readl(info->base + reg);
+ writel(val & ~d->mask, info->base + reg);
+ spin_unlock_irqrestore(&info->irq_lock, flags);
+}
+
+static void armada_37xx_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct armada_37xx_pinctrl *info = gpiochip_get_data(chip);
+ u32 val, reg = IRQ_EN;
+ unsigned long flags;
+
+ armada_37xx_irq_update_reg(&reg, d);
+ spin_lock_irqsave(&info->irq_lock, flags);
+ val = readl(info->base + reg);
+ writel(val | d->mask, info->base + reg);
+ spin_unlock_irqrestore(&info->irq_lock, flags);
+}
+
+static int armada_37xx_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct armada_37xx_pinctrl *info = gpiochip_get_data(chip);
+ u32 val, reg = IRQ_WKUP;
+ unsigned long flags;
+
+ armada_37xx_irq_update_reg(&reg, d);
+ spin_lock_irqsave(&info->irq_lock, flags);
+ val = readl(info->base + reg);
+ if (on)
+ val |= d->mask;
+ else
+ val &= ~d->mask;
+ writel(val, info->base + reg);
+ spin_unlock_irqrestore(&info->irq_lock, flags);
+
+ return 0;
+}
+
+static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct armada_37xx_pinctrl *info = gpiochip_get_data(chip);
+ u32 val, reg = IRQ_POL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->irq_lock, flags);
+ armada_37xx_irq_update_reg(&reg, d);
+ val = readl(info->base + reg);
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ val &= ~d->mask;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ val |= d->mask;
+ break;
+ default:
+ spin_unlock_irqrestore(&info->irq_lock, flags);
+ return -EINVAL;
+ }
+ writel(val, info->base + reg);
+ spin_unlock_irqrestore(&info->irq_lock, flags);
+
+ return 0;
+}
+
+
+static void armada_37xx_irq_handler(struct irq_desc *desc)
+{
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct armada_37xx_pinctrl *info = gpiochip_get_data(gc);
+ struct irq_domain *d = gc->irqdomain;
+ int i;
+
+ chained_irq_enter(chip, desc);
+ for (i = 0; i <= d->revmap_size / GPIO_PER_REG; i++) {
+ u32 status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->irq_lock, flags);
+ status = readl_relaxed(info->base + IRQ_STATUS + 4 * i);
+ /* Manage only the interrupt that was enabled */
+ status &= readl_relaxed(info->base + IRQ_EN + 4 * i);
+ spin_unlock_irqrestore(&info->irq_lock, flags);
+ while (status) {
+ u32 hwirq = ffs(status) - 1;
+ u32 virq = irq_find_mapping(d, hwirq +
+ i * GPIO_PER_REG);
+
+ generic_handle_irq(virq);
+
+ /* Update status in case a new IRQ appears */
+ spin_lock_irqsave(&info->irq_lock, flags);
+ status = readl_relaxed(info->base +
+ IRQ_STATUS + 4 * i);
+ /* Manage only the interrupt that was enabled */
+ status &= readl_relaxed(info->base + IRQ_EN + 4 * i);
+ spin_unlock_irqrestore(&info->irq_lock, flags);
+ }
+ }
+ chained_irq_exit(chip, desc);
+}
+
+static int armada_37xx_irqchip_register(struct platform_device *pdev,
+ struct armada_37xx_pinctrl *info)
+{
+ struct device_node *np = info->dev->of_node;
+ int nrirqs = info->data->nr_pins;
+ struct gpio_chip *gc = &info->gpio_chip;
+ struct irq_chip *irqchip = &info->irq_chip;
+ struct resource res;
+ int ret = -ENODEV, i, nr_irq_parent;
+
+ /* Check if we have at least one gpio-controller child node */
+ for_each_child_of_node(info->dev->of_node, np) {
+ if (of_property_read_bool(np, "gpio-controller")) {
+ ret = 0;
+ break;
+ }
+ };
+ if (ret)
+ return ret;
+
+ nr_irq_parent = of_irq_count(np);
+ spin_lock_init(&info->irq_lock);
+
+ if (!nr_irq_parent) {
+ dev_err(&pdev->dev, "Invalid or no IRQ\n");
+ return 0;
+ }
+
+ if (of_address_to_resource(info->dev->of_node, 1, &res)) {
+ dev_err(info->dev, "cannot find IO resource\n");
+ return -ENOENT;
+ }
+
+ info->base = devm_ioremap_resource(info->dev, &res);
+ if (IS_ERR(info->base))
+ return PTR_ERR(info->base);
+
+ irqchip->irq_ack = armada_37xx_irq_ack;
+ irqchip->irq_mask = armada_37xx_irq_mask;
+ irqchip->irq_unmask = armada_37xx_irq_unmask;
+ irqchip->irq_set_wake = armada_37xx_irq_set_wake;
+ irqchip->irq_set_type = armada_37xx_irq_set_type;
+ irqchip->name = info->data->name;
+
+ ret = gpiochip_irqchip_add(gc, irqchip, 0,
+ handle_edge_irq, IRQ_TYPE_NONE);
+ if (ret) {
+ dev_info(&pdev->dev, "could not add irqchip\n");
+ return ret;
+ }
+
+ /*
+ * Many interrupts are connected to the parent interrupt
+ * controller. But we do not take advantage of this and use
+ * the chained irq with all of them.
+ */
+ for (i = 0; i < nrirqs; i++) {
+ struct irq_data *d = irq_get_irq_data(gc->irq_base + i);
+
+ /*
+ * The mask field is a "precomputed bitmask for
+ * accessing the chip registers" which was introduced
+ * for the generic irqchip framework. As we don't use
+ * this framework, we can reuse this field for our own
+ * usage.
+ */
+ d->mask = BIT(i % GPIO_PER_REG);
+ }
+
+ for (i = 0; i < nr_irq_parent; i++) {
+ int irq = irq_of_parse_and_map(np, i);
+
+ if (irq < 0)
+ continue;
+
+ gpiochip_set_chained_irqchip(gc, irqchip, irq,
+ armada_37xx_irq_handler);
+ }
+
+ return 0;
+}
+
static int armada_37xx_gpiochip_register(struct platform_device *pdev,
struct armada_37xx_pinctrl *info)
{
@@ -496,6 +723,9 @@ static int armada_37xx_gpiochip_register(struct platform_device *pdev,
ret = devm_gpiochip_add_data(&pdev->dev, gc, info);
if (ret)
return ret;
+ ret = armada_37xx_irqchip_register(pdev, info);
+ if (ret)
+ return ret;
return 0;
}
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-ap806.c b/drivers/pinctrl/mvebu/pinctrl-armada-ap806.c
new file mode 100644
index 000000000000..66e442260a4e
--- /dev/null
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-ap806.c
@@ -0,0 +1,140 @@
+/*
+ * Marvell Armada ap806 pinctrl driver based on mvebu pinctrl core
+ *
+ * Copyright (C) 2017 Marvell
+ *
+ * Thomas Petazzoni <[email protected]>
+ * Hanna Hawa <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-mvebu.h"
+
+static struct mvebu_mpp_mode armada_ap806_mpp_modes[] = {
+ MPP_MODE(0,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "clk"),
+ MPP_FUNCTION(3, "spi0", "clk")),
+ MPP_MODE(1,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "cmd"),
+ MPP_FUNCTION(3, "spi0", "miso")),
+ MPP_MODE(2,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "d0"),
+ MPP_FUNCTION(3, "spi0", "mosi")),
+ MPP_MODE(3,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "d1"),
+ MPP_FUNCTION(3, "spi0", "cs0n")),
+ MPP_MODE(4,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "d2"),
+ MPP_FUNCTION(3, "i2c0", "sda")),
+ MPP_MODE(5,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "d3"),
+ MPP_FUNCTION(3, "i2c0", "sdk")),
+ MPP_MODE(6,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "ds")),
+ MPP_MODE(7,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "d4"),
+ MPP_FUNCTION(3, "uart1", "rxd")),
+ MPP_MODE(8,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "d5"),
+ MPP_FUNCTION(3, "uart1", "txd")),
+ MPP_MODE(9,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "d6"),
+ MPP_FUNCTION(3, "spi0", "cs1n")),
+ MPP_MODE(10,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "d7")),
+ MPP_MODE(11,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(3, "uart0", "txd")),
+ MPP_MODE(12,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "pw_off"),
+ MPP_FUNCTION(2, "sdio", "hw_rst")),
+ MPP_MODE(13,
+ MPP_FUNCTION(0, "gpio", NULL)),
+ MPP_MODE(14,
+ MPP_FUNCTION(0, "gpio", NULL)),
+ MPP_MODE(15,
+ MPP_FUNCTION(0, "gpio", NULL)),
+ MPP_MODE(16,
+ MPP_FUNCTION(0, "gpio", NULL)),
+ MPP_MODE(17,
+ MPP_FUNCTION(0, "gpio", NULL)),
+ MPP_MODE(18,
+ MPP_FUNCTION(0, "gpio", NULL)),
+ MPP_MODE(19,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(3, "uart0", "rxd"),
+ MPP_FUNCTION(4, "sdio", "pw_off")),
+};
+
+static struct mvebu_pinctrl_soc_info armada_ap806_pinctrl_info;
+
+static const struct of_device_id armada_ap806_pinctrl_of_match[] = {
+ {
+ .compatible = "marvell,ap806-pinctrl",
+ },
+ { },
+};
+
+static const struct mvebu_mpp_ctrl armada_ap806_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 19, NULL, mvebu_regmap_mpp_ctrl),
+};
+
+static struct pinctrl_gpio_range armada_ap806_mpp_gpio_ranges[] = {
+ MPP_GPIO_RANGE(0, 0, 0, 20),
+};
+
+static int armada_ap806_pinctrl_probe(struct platform_device *pdev)
+{
+ struct mvebu_pinctrl_soc_info *soc = &armada_ap806_pinctrl_info;
+ const struct of_device_id *match =
+ of_match_device(armada_ap806_pinctrl_of_match, &pdev->dev);
+
+ if (!match || !pdev->dev.parent)
+ return -ENODEV;
+
+ soc->variant = 0; /* no variants for Armada AP806 */
+ soc->controls = armada_ap806_mpp_controls;
+ soc->ncontrols = ARRAY_SIZE(armada_ap806_mpp_controls);
+ soc->gpioranges = armada_ap806_mpp_gpio_ranges;
+ soc->ngpioranges = ARRAY_SIZE(armada_ap806_mpp_gpio_ranges);
+ soc->modes = armada_ap806_mpp_modes;
+ soc->nmodes = armada_ap806_mpp_controls[0].npins;
+
+ pdev->dev.platform_data = soc;
+
+ return mvebu_pinctrl_simple_regmap_probe(pdev, pdev->dev.parent, 0);
+}
+
+static struct platform_driver armada_ap806_pinctrl_driver = {
+ .driver = {
+ .name = "armada-ap806-pinctrl",
+ .of_match_table = of_match_ptr(armada_ap806_pinctrl_of_match),
+ },
+ .probe = armada_ap806_pinctrl_probe,
+};
+
+builtin_platform_driver(armada_ap806_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c b/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c
new file mode 100644
index 000000000000..7f85beb45482
--- /dev/null
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-cp110.c
@@ -0,0 +1,687 @@
+/*
+ * Marvell Armada CP110 pinctrl driver based on mvebu pinctrl core
+ *
+ * Copyright (C) 2017 Marvell
+ *
+ * Hanna Hawa <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-mvebu.h"
+
+/*
+ * Even if the pin controller is the same the MMP available depend on the SoC
+ * integration.
+ * - In Armada7K (single CP) almost all the MPPs are available (except the
+ * MMP 39 to 43)
+ * - In Armada8K (dual CP) the MPPs are split into 2 parts, MPPs 0-31 from
+ * CPS, and MPPs 32-62 from CPM, the below flags (V_ARMADA_8K_CPM,
+ * V_ARMADA_8K_CPS) set which MPP is available to the CPx.
+ * The x_PLUS enum mean that the MPP available for CPx and for Armada70x0
+ */
+enum {
+ V_ARMADA_7K = BIT(0),
+ V_ARMADA_8K_CPM = BIT(1),
+ V_ARMADA_8K_CPS = BIT(2),
+ V_ARMADA_7K_8K_CPM = (V_ARMADA_7K | V_ARMADA_8K_CPM),
+ V_ARMADA_7K_8K_CPS = (V_ARMADA_7K | V_ARMADA_8K_CPS),
+};
+
+static struct mvebu_mpp_mode armada_cp110_mpp_modes[] = {
+ MPP_MODE(0,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "ale1"),
+ MPP_FUNCTION(2, "au", "i2smclk"),
+ MPP_FUNCTION(3, "ge0", "rxd3"),
+ MPP_FUNCTION(4, "tdm", "pclk"),
+ MPP_FUNCTION(6, "ptp", "pulse"),
+ MPP_FUNCTION(7, "mss_i2c", "sda"),
+ MPP_FUNCTION(8, "uart0", "rxd"),
+ MPP_FUNCTION(9, "sata0", "present_act"),
+ MPP_FUNCTION(10, "ge", "mdio")),
+ MPP_MODE(1,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "ale0"),
+ MPP_FUNCTION(2, "au", "i2sdo_spdifo"),
+ MPP_FUNCTION(3, "ge0", "rxd2"),
+ MPP_FUNCTION(4, "tdm", "drx"),
+ MPP_FUNCTION(6, "ptp", "clk"),
+ MPP_FUNCTION(7, "mss_i2c", "sck"),
+ MPP_FUNCTION(8, "uart0", "txd"),
+ MPP_FUNCTION(9, "sata1", "present_act"),
+ MPP_FUNCTION(10, "ge", "mdc")),
+ MPP_MODE(2,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "ad15"),
+ MPP_FUNCTION(2, "au", "i2sextclk"),
+ MPP_FUNCTION(3, "ge0", "rxd1"),
+ MPP_FUNCTION(4, "tdm", "dtx"),
+ MPP_FUNCTION(5, "mss_uart", "rxd"),
+ MPP_FUNCTION(6, "ptp", "pclk_out"),
+ MPP_FUNCTION(7, "i2c1", "sck"),
+ MPP_FUNCTION(8, "uart1", "rxd"),
+ MPP_FUNCTION(9, "sata0", "present_act"),
+ MPP_FUNCTION(10, "xg", "mdc")),
+ MPP_MODE(3,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "ad14"),
+ MPP_FUNCTION(2, "au", "i2slrclk"),
+ MPP_FUNCTION(3, "ge0", "rxd0"),
+ MPP_FUNCTION(4, "tdm", "fsync"),
+ MPP_FUNCTION(5, "mss_uart", "txd"),
+ MPP_FUNCTION(6, "pcie", "rstoutn"),
+ MPP_FUNCTION(7, "i2c1", "sda"),
+ MPP_FUNCTION(8, "uart1", "txd"),
+ MPP_FUNCTION(9, "sata1", "present_act"),
+ MPP_FUNCTION(10, "xg", "mdio")),
+ MPP_MODE(4,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "ad13"),
+ MPP_FUNCTION(2, "au", "i2sbclk"),
+ MPP_FUNCTION(3, "ge0", "rxctl"),
+ MPP_FUNCTION(4, "tdm", "rstn"),
+ MPP_FUNCTION(5, "mss_uart", "rxd"),
+ MPP_FUNCTION(6, "uart1", "cts"),
+ MPP_FUNCTION(7, "pcie0", "clkreq"),
+ MPP_FUNCTION(8, "uart3", "rxd"),
+ MPP_FUNCTION(10, "ge", "mdc")),
+ MPP_MODE(5,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "ad12"),
+ MPP_FUNCTION(2, "au", "i2sdi"),
+ MPP_FUNCTION(3, "ge0", "rxclk"),
+ MPP_FUNCTION(4, "tdm", "intn"),
+ MPP_FUNCTION(5, "mss_uart", "txd"),
+ MPP_FUNCTION(6, "uart1", "rts"),
+ MPP_FUNCTION(7, "pcie1", "clkreq"),
+ MPP_FUNCTION(8, "uart3", "txd"),
+ MPP_FUNCTION(10, "ge", "mdio")),
+ MPP_MODE(6,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "ad11"),
+ MPP_FUNCTION(3, "ge0", "txd3"),
+ MPP_FUNCTION(4, "spi0", "csn2"),
+ MPP_FUNCTION(5, "au", "i2sextclk"),
+ MPP_FUNCTION(6, "sata1", "present_act"),
+ MPP_FUNCTION(7, "pcie2", "clkreq"),
+ MPP_FUNCTION(8, "uart0", "rxd"),
+ MPP_FUNCTION(9, "ptp", "pulse")),
+ MPP_MODE(7,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "ad10"),
+ MPP_FUNCTION(3, "ge0", "txd2"),
+ MPP_FUNCTION(4, "spi0", "csn1"),
+ MPP_FUNCTION(5, "spi1", "csn1"),
+ MPP_FUNCTION(6, "sata0", "present_act"),
+ MPP_FUNCTION(7, "led", "data"),
+ MPP_FUNCTION(8, "uart0", "txd"),
+ MPP_FUNCTION(9, "ptp", "clk")),
+ MPP_MODE(8,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "ad9"),
+ MPP_FUNCTION(3, "ge0", "txd1"),
+ MPP_FUNCTION(4, "spi0", "csn0"),
+ MPP_FUNCTION(5, "spi1", "csn0"),
+ MPP_FUNCTION(6, "uart0", "cts"),
+ MPP_FUNCTION(7, "led", "stb"),
+ MPP_FUNCTION(8, "uart2", "rxd"),
+ MPP_FUNCTION(9, "ptp", "pclk_out"),
+ MPP_FUNCTION(10, "synce1", "clk")),
+ MPP_MODE(9,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "ad8"),
+ MPP_FUNCTION(3, "ge0", "txd0"),
+ MPP_FUNCTION(4, "spi0", "mosi"),
+ MPP_FUNCTION(5, "spi1", "mosi"),
+ MPP_FUNCTION(7, "pcie", "rstoutn"),
+ MPP_FUNCTION(10, "synce2", "clk")),
+ MPP_MODE(10,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "readyn"),
+ MPP_FUNCTION(3, "ge0", "txctl"),
+ MPP_FUNCTION(4, "spi0", "miso"),
+ MPP_FUNCTION(5, "spi1", "miso"),
+ MPP_FUNCTION(6, "uart0", "cts"),
+ MPP_FUNCTION(7, "sata1", "present_act")),
+ MPP_MODE(11,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "wen1"),
+ MPP_FUNCTION(3, "ge0", "txclkout"),
+ MPP_FUNCTION(4, "spi0", "clk"),
+ MPP_FUNCTION(5, "spi1", "clk"),
+ MPP_FUNCTION(6, "uart0", "rts"),
+ MPP_FUNCTION(7, "led", "clk"),
+ MPP_FUNCTION(8, "uart2", "txd"),
+ MPP_FUNCTION(9, "sata0", "present_act")),
+ MPP_MODE(12,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "clk_out"),
+ MPP_FUNCTION(2, "nf", "rbn1"),
+ MPP_FUNCTION(3, "spi1", "csn1"),
+ MPP_FUNCTION(4, "ge0", "rxclk")),
+ MPP_MODE(13,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "burstn"),
+ MPP_FUNCTION(2, "nf", "rbn0"),
+ MPP_FUNCTION(3, "spi1", "miso"),
+ MPP_FUNCTION(4, "ge0", "rxctl"),
+ MPP_FUNCTION(8, "mss_spi", "miso")),
+ MPP_MODE(14,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "bootcsn"),
+ MPP_FUNCTION(2, "dev", "csn0"),
+ MPP_FUNCTION(3, "spi1", "csn0"),
+ MPP_FUNCTION(4, "spi0", "csn3"),
+ MPP_FUNCTION(5, "au", "i2sextclk"),
+ MPP_FUNCTION(6, "spi0", "miso"),
+ MPP_FUNCTION(7, "sata0", "present_act"),
+ MPP_FUNCTION(8, "mss_spi", "csn")),
+ MPP_MODE(15,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "ad7"),
+ MPP_FUNCTION(3, "spi1", "mosi"),
+ MPP_FUNCTION(6, "spi0", "mosi"),
+ MPP_FUNCTION(8, "mss_spi", "mosi"),
+ MPP_FUNCTION(11, "ptp", "pulse_cp2cp")),
+ MPP_MODE(16,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "ad6"),
+ MPP_FUNCTION(3, "spi1", "clk"),
+ MPP_FUNCTION(8, "mss_spi", "clk")),
+ MPP_MODE(17,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "ad5"),
+ MPP_FUNCTION(4, "ge0", "txd3")),
+ MPP_MODE(18,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "ad4"),
+ MPP_FUNCTION(4, "ge0", "txd2"),
+ MPP_FUNCTION(11, "ptp", "clk_cp2cp")),
+ MPP_MODE(19,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "ad3"),
+ MPP_FUNCTION(4, "ge0", "txd1"),
+ MPP_FUNCTION(11, "wakeup", "out_cp2cp")),
+ MPP_MODE(20,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "ad2"),
+ MPP_FUNCTION(4, "ge0", "txd0")),
+ MPP_MODE(21,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "ad1"),
+ MPP_FUNCTION(4, "ge0", "txctl"),
+ MPP_FUNCTION(11, "sei", "in_cp2cp")),
+ MPP_MODE(22,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "ad0"),
+ MPP_FUNCTION(4, "ge0", "txclkout"),
+ MPP_FUNCTION(11, "wakeup", "in_cp2cp")),
+ MPP_MODE(23,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "a1"),
+ MPP_FUNCTION(5, "au", "i2smclk"),
+ MPP_FUNCTION(11, "link", "rd_in_cp2cp")),
+ MPP_MODE(24,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "a0"),
+ MPP_FUNCTION(5, "au", "i2slrclk")),
+ MPP_MODE(25,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "oen"),
+ MPP_FUNCTION(5, "au", "i2sdo_spdifo")),
+ MPP_MODE(26,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "wen0"),
+ MPP_FUNCTION(5, "au", "i2sbclk")),
+ MPP_MODE(27,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "csn0"),
+ MPP_FUNCTION(2, "spi1", "miso"),
+ MPP_FUNCTION(3, "mss_gpio4", NULL),
+ MPP_FUNCTION(4, "ge0", "rxd3"),
+ MPP_FUNCTION(5, "spi0", "csn4"),
+ MPP_FUNCTION(8, "ge", "mdio"),
+ MPP_FUNCTION(9, "sata0", "present_act"),
+ MPP_FUNCTION(10, "uart0", "rts"),
+ MPP_FUNCTION(11, "rei", "in_cp2cp")),
+ MPP_MODE(28,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "csn1"),
+ MPP_FUNCTION(2, "spi1", "csn0"),
+ MPP_FUNCTION(3, "mss_gpio5", NULL),
+ MPP_FUNCTION(4, "ge0", "rxd2"),
+ MPP_FUNCTION(5, "spi0", "csn5"),
+ MPP_FUNCTION(6, "pcie2", "clkreq"),
+ MPP_FUNCTION(7, "ptp", "pulse"),
+ MPP_FUNCTION(8, "ge", "mdc"),
+ MPP_FUNCTION(9, "sata1", "present_act"),
+ MPP_FUNCTION(10, "uart0", "cts"),
+ MPP_FUNCTION(11, "led", "data")),
+ MPP_MODE(29,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "csn2"),
+ MPP_FUNCTION(2, "spi1", "mosi"),
+ MPP_FUNCTION(3, "mss_gpio6", NULL),
+ MPP_FUNCTION(4, "ge0", "rxd1"),
+ MPP_FUNCTION(5, "spi0", "csn6"),
+ MPP_FUNCTION(6, "pcie1", "clkreq"),
+ MPP_FUNCTION(7, "ptp", "clk"),
+ MPP_FUNCTION(8, "mss_i2c", "sda"),
+ MPP_FUNCTION(9, "sata0", "present_act"),
+ MPP_FUNCTION(10, "uart0", "rxd"),
+ MPP_FUNCTION(11, "led", "stb")),
+ MPP_MODE(30,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "csn3"),
+ MPP_FUNCTION(2, "spi1", "clk"),
+ MPP_FUNCTION(3, "mss_gpio7", NULL),
+ MPP_FUNCTION(4, "ge0", "rxd0"),
+ MPP_FUNCTION(5, "spi0", "csn7"),
+ MPP_FUNCTION(6, "pcie0", "clkreq"),
+ MPP_FUNCTION(7, "ptp", "pclk_out"),
+ MPP_FUNCTION(8, "mss_i2c", "sck"),
+ MPP_FUNCTION(9, "sata1", "present_act"),
+ MPP_FUNCTION(10, "uart0", "txd"),
+ MPP_FUNCTION(11, "led", "clk")),
+ MPP_MODE(31,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "dev", "a2"),
+ MPP_FUNCTION(3, "mss_gpio4", NULL),
+ MPP_FUNCTION(6, "pcie", "rstoutn"),
+ MPP_FUNCTION(8, "ge", "mdc")),
+ MPP_MODE(32,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "mii", "col"),
+ MPP_FUNCTION(2, "mii", "txerr"),
+ MPP_FUNCTION(3, "mss_spi", "miso"),
+ MPP_FUNCTION(4, "tdm", "drx"),
+ MPP_FUNCTION(5, "au", "i2sextclk"),
+ MPP_FUNCTION(6, "au", "i2sdi"),
+ MPP_FUNCTION(7, "ge", "mdio"),
+ MPP_FUNCTION(8, "sdio", "v18_en"),
+ MPP_FUNCTION(9, "pcie1", "clkreq"),
+ MPP_FUNCTION(10, "mss_gpio0", NULL)),
+ MPP_MODE(33,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "mii", "txclk"),
+ MPP_FUNCTION(2, "sdio", "pwr10"),
+ MPP_FUNCTION(3, "mss_spi", "csn"),
+ MPP_FUNCTION(4, "tdm", "fsync"),
+ MPP_FUNCTION(5, "au", "i2smclk"),
+ MPP_FUNCTION(6, "sdio", "bus_pwr"),
+ MPP_FUNCTION(8, "xg", "mdio"),
+ MPP_FUNCTION(9, "pcie2", "clkreq"),
+ MPP_FUNCTION(10, "mss_gpio1", NULL)),
+ MPP_MODE(34,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "mii", "rxerr"),
+ MPP_FUNCTION(2, "sdio", "pwr11"),
+ MPP_FUNCTION(3, "mss_spi", "mosi"),
+ MPP_FUNCTION(4, "tdm", "dtx"),
+ MPP_FUNCTION(5, "au", "i2slrclk"),
+ MPP_FUNCTION(6, "sdio", "wr_protect"),
+ MPP_FUNCTION(7, "ge", "mdc"),
+ MPP_FUNCTION(9, "pcie0", "clkreq"),
+ MPP_FUNCTION(10, "mss_gpio2", NULL)),
+ MPP_MODE(35,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sata1", "present_act"),
+ MPP_FUNCTION(2, "i2c1", "sda"),
+ MPP_FUNCTION(3, "mss_spi", "clk"),
+ MPP_FUNCTION(4, "tdm", "pclk"),
+ MPP_FUNCTION(5, "au", "i2sdo_spdifo"),
+ MPP_FUNCTION(6, "sdio", "card_detect"),
+ MPP_FUNCTION(7, "xg", "mdio"),
+ MPP_FUNCTION(8, "ge", "mdio"),
+ MPP_FUNCTION(9, "pcie", "rstoutn"),
+ MPP_FUNCTION(10, "mss_gpio3", NULL)),
+ MPP_MODE(36,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "synce2", "clk"),
+ MPP_FUNCTION(2, "i2c1", "sck"),
+ MPP_FUNCTION(3, "ptp", "clk"),
+ MPP_FUNCTION(4, "synce1", "clk"),
+ MPP_FUNCTION(5, "au", "i2sbclk"),
+ MPP_FUNCTION(6, "sata0", "present_act"),
+ MPP_FUNCTION(7, "xg", "mdc"),
+ MPP_FUNCTION(8, "ge", "mdc"),
+ MPP_FUNCTION(9, "pcie2", "clkreq"),
+ MPP_FUNCTION(10, "mss_gpio5", NULL)),
+ MPP_MODE(37,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "uart2", "rxd"),
+ MPP_FUNCTION(2, "i2c0", "sck"),
+ MPP_FUNCTION(3, "ptp", "pclk_out"),
+ MPP_FUNCTION(4, "tdm", "intn"),
+ MPP_FUNCTION(5, "mss_i2c", "sck"),
+ MPP_FUNCTION(6, "sata1", "present_act"),
+ MPP_FUNCTION(7, "ge", "mdc"),
+ MPP_FUNCTION(8, "xg", "mdc"),
+ MPP_FUNCTION(9, "pcie1", "clkreq"),
+ MPP_FUNCTION(10, "mss_gpio6", NULL),
+ MPP_FUNCTION(11, "link", "rd_out_cp2cp")),
+ MPP_MODE(38,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "uart2", "txd"),
+ MPP_FUNCTION(2, "i2c0", "sda"),
+ MPP_FUNCTION(3, "ptp", "pulse"),
+ MPP_FUNCTION(4, "tdm", "rstn"),
+ MPP_FUNCTION(5, "mss_i2c", "sda"),
+ MPP_FUNCTION(6, "sata0", "present_act"),
+ MPP_FUNCTION(7, "ge", "mdio"),
+ MPP_FUNCTION(8, "xg", "mdio"),
+ MPP_FUNCTION(9, "au", "i2sextclk"),
+ MPP_FUNCTION(10, "mss_gpio7", NULL),
+ MPP_FUNCTION(11, "ptp", "pulse_cp2cp")),
+ MPP_MODE(39,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "wr_protect"),
+ MPP_FUNCTION(4, "au", "i2sbclk"),
+ MPP_FUNCTION(5, "ptp", "clk"),
+ MPP_FUNCTION(6, "spi0", "csn1"),
+ MPP_FUNCTION(9, "sata1", "present_act"),
+ MPP_FUNCTION(10, "mss_gpio0", NULL)),
+ MPP_MODE(40,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "pwr11"),
+ MPP_FUNCTION(2, "synce1", "clk"),
+ MPP_FUNCTION(3, "mss_i2c", "sda"),
+ MPP_FUNCTION(4, "au", "i2sdo_spdifo"),
+ MPP_FUNCTION(5, "ptp", "pclk_out"),
+ MPP_FUNCTION(6, "spi0", "clk"),
+ MPP_FUNCTION(7, "uart1", "txd"),
+ MPP_FUNCTION(8, "ge", "mdio"),
+ MPP_FUNCTION(9, "sata0", "present_act"),
+ MPP_FUNCTION(10, "mss_gpio1", NULL)),
+ MPP_MODE(41,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "pwr10"),
+ MPP_FUNCTION(2, "sdio", "bus_pwr"),
+ MPP_FUNCTION(3, "mss_i2c", "sck"),
+ MPP_FUNCTION(4, "au", "i2slrclk"),
+ MPP_FUNCTION(5, "ptp", "pulse"),
+ MPP_FUNCTION(6, "spi0", "mosi"),
+ MPP_FUNCTION(7, "uart1", "rxd"),
+ MPP_FUNCTION(8, "ge", "mdc"),
+ MPP_FUNCTION(9, "sata1", "present_act"),
+ MPP_FUNCTION(10, "mss_gpio2", NULL),
+ MPP_FUNCTION(11, "rei", "out_cp2cp")),
+ MPP_MODE(42,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "v18_en"),
+ MPP_FUNCTION(2, "sdio", "wr_protect"),
+ MPP_FUNCTION(3, "synce2", "clk"),
+ MPP_FUNCTION(4, "au", "i2smclk"),
+ MPP_FUNCTION(5, "mss_uart", "txd"),
+ MPP_FUNCTION(6, "spi0", "miso"),
+ MPP_FUNCTION(7, "uart1", "cts"),
+ MPP_FUNCTION(8, "xg", "mdc"),
+ MPP_FUNCTION(9, "sata0", "present_act"),
+ MPP_FUNCTION(10, "mss_gpio4", NULL)),
+ MPP_MODE(43,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "sdio", "card_detect"),
+ MPP_FUNCTION(3, "synce1", "clk"),
+ MPP_FUNCTION(4, "au", "i2sextclk"),
+ MPP_FUNCTION(5, "mss_uart", "rxd"),
+ MPP_FUNCTION(6, "spi0", "csn0"),
+ MPP_FUNCTION(7, "uart1", "rts"),
+ MPP_FUNCTION(8, "xg", "mdio"),
+ MPP_FUNCTION(9, "sata1", "present_act"),
+ MPP_FUNCTION(10, "mss_gpio5", NULL),
+ MPP_FUNCTION(11, "wakeup", "out_cp2cp")),
+ MPP_MODE(44,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "ge1", "txd2"),
+ MPP_FUNCTION(7, "uart0", "rts"),
+ MPP_FUNCTION(11, "ptp", "clk_cp2cp")),
+ MPP_MODE(45,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "ge1", "txd3"),
+ MPP_FUNCTION(7, "uart0", "txd"),
+ MPP_FUNCTION(9, "pcie", "rstoutn")),
+ MPP_MODE(46,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "ge1", "txd1"),
+ MPP_FUNCTION(7, "uart1", "rts")),
+ MPP_MODE(47,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "ge1", "txd0"),
+ MPP_FUNCTION(5, "spi1", "clk"),
+ MPP_FUNCTION(7, "uart1", "txd"),
+ MPP_FUNCTION(8, "ge", "mdc")),
+ MPP_MODE(48,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "ge1", "txctl_txen"),
+ MPP_FUNCTION(5, "spi1", "mosi"),
+ MPP_FUNCTION(8, "xg", "mdc"),
+ MPP_FUNCTION(11, "wakeup", "in_cp2cp")),
+ MPP_MODE(49,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "ge1", "txclkout"),
+ MPP_FUNCTION(2, "mii", "crs"),
+ MPP_FUNCTION(5, "spi1", "miso"),
+ MPP_FUNCTION(7, "uart1", "rxd"),
+ MPP_FUNCTION(8, "ge", "mdio"),
+ MPP_FUNCTION(9, "pcie0", "clkreq"),
+ MPP_FUNCTION(10, "sdio", "v18_en"),
+ MPP_FUNCTION(11, "sei", "out_cp2cp")),
+ MPP_MODE(50,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "ge1", "rxclk"),
+ MPP_FUNCTION(2, "mss_i2c", "sda"),
+ MPP_FUNCTION(5, "spi1", "csn0"),
+ MPP_FUNCTION(6, "uart2", "txd"),
+ MPP_FUNCTION(7, "uart0", "rxd"),
+ MPP_FUNCTION(8, "xg", "mdio"),
+ MPP_FUNCTION(10, "sdio", "pwr11")),
+ MPP_MODE(51,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "ge1", "rxd0"),
+ MPP_FUNCTION(2, "mss_i2c", "sck"),
+ MPP_FUNCTION(5, "spi1", "csn1"),
+ MPP_FUNCTION(6, "uart2", "rxd"),
+ MPP_FUNCTION(7, "uart0", "cts"),
+ MPP_FUNCTION(10, "sdio", "pwr10")),
+ MPP_MODE(52,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "ge1", "rxd1"),
+ MPP_FUNCTION(2, "synce1", "clk"),
+ MPP_FUNCTION(4, "synce2", "clk"),
+ MPP_FUNCTION(5, "spi1", "csn2"),
+ MPP_FUNCTION(7, "uart1", "cts"),
+ MPP_FUNCTION(8, "led", "clk"),
+ MPP_FUNCTION(9, "pcie", "rstoutn"),
+ MPP_FUNCTION(10, "pcie0", "clkreq")),
+ MPP_MODE(53,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "ge1", "rxd2"),
+ MPP_FUNCTION(3, "ptp", "clk"),
+ MPP_FUNCTION(5, "spi1", "csn3"),
+ MPP_FUNCTION(7, "uart1", "rxd"),
+ MPP_FUNCTION(8, "led", "stb"),
+ MPP_FUNCTION(11, "sdio", "led")),
+ MPP_MODE(54,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "ge1", "rxd3"),
+ MPP_FUNCTION(2, "synce2", "clk"),
+ MPP_FUNCTION(3, "ptp", "pclk_out"),
+ MPP_FUNCTION(4, "synce1", "clk"),
+ MPP_FUNCTION(8, "led", "data"),
+ MPP_FUNCTION(10, "sdio", "hw_rst"),
+ MPP_FUNCTION(11, "sdio", "wr_protect")),
+ MPP_MODE(55,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "ge1", "rxctl_rxdv"),
+ MPP_FUNCTION(3, "ptp", "pulse"),
+ MPP_FUNCTION(10, "sdio", "led"),
+ MPP_FUNCTION(11, "sdio", "card_detect")),
+ MPP_MODE(56,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(4, "tdm", "drx"),
+ MPP_FUNCTION(5, "au", "i2sdo_spdifo"),
+ MPP_FUNCTION(6, "spi0", "clk"),
+ MPP_FUNCTION(7, "uart1", "rxd"),
+ MPP_FUNCTION(9, "sata1", "present_act"),
+ MPP_FUNCTION(14, "sdio", "clk")),
+ MPP_MODE(57,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(2, "mss_i2c", "sda"),
+ MPP_FUNCTION(3, "ptp", "pclk_out"),
+ MPP_FUNCTION(4, "tdm", "intn"),
+ MPP_FUNCTION(5, "au", "i2sbclk"),
+ MPP_FUNCTION(6, "spi0", "mosi"),
+ MPP_FUNCTION(7, "uart1", "txd"),
+ MPP_FUNCTION(9, "sata0", "present_act"),
+ MPP_FUNCTION(14, "sdio", "cmd")),
+ MPP_MODE(58,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(2, "mss_i2c", "sck"),
+ MPP_FUNCTION(3, "ptp", "clk"),
+ MPP_FUNCTION(4, "tdm", "rstn"),
+ MPP_FUNCTION(5, "au", "i2sdi"),
+ MPP_FUNCTION(6, "spi0", "miso"),
+ MPP_FUNCTION(7, "uart1", "cts"),
+ MPP_FUNCTION(8, "led", "clk"),
+ MPP_FUNCTION(14, "sdio", "d0")),
+ MPP_MODE(59,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "mss_gpio7", NULL),
+ MPP_FUNCTION(2, "synce2", "clk"),
+ MPP_FUNCTION(4, "tdm", "fsync"),
+ MPP_FUNCTION(5, "au", "i2slrclk"),
+ MPP_FUNCTION(6, "spi0", "csn0"),
+ MPP_FUNCTION(7, "uart0", "cts"),
+ MPP_FUNCTION(8, "led", "stb"),
+ MPP_FUNCTION(9, "uart1", "txd"),
+ MPP_FUNCTION(14, "sdio", "d1")),
+ MPP_MODE(60,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "mss_gpio6", NULL),
+ MPP_FUNCTION(3, "ptp", "pulse"),
+ MPP_FUNCTION(4, "tdm", "dtx"),
+ MPP_FUNCTION(5, "au", "i2smclk"),
+ MPP_FUNCTION(6, "spi0", "csn1"),
+ MPP_FUNCTION(7, "uart0", "rts"),
+ MPP_FUNCTION(8, "led", "data"),
+ MPP_FUNCTION(9, "uart1", "rxd"),
+ MPP_FUNCTION(14, "sdio", "d2")),
+ MPP_MODE(61,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "mss_gpio5", NULL),
+ MPP_FUNCTION(3, "ptp", "clk"),
+ MPP_FUNCTION(4, "tdm", "pclk"),
+ MPP_FUNCTION(5, "au", "i2sextclk"),
+ MPP_FUNCTION(6, "spi0", "csn2"),
+ MPP_FUNCTION(7, "uart0", "txd"),
+ MPP_FUNCTION(8, "uart2", "txd"),
+ MPP_FUNCTION(9, "sata1", "present_act"),
+ MPP_FUNCTION(10, "ge", "mdio"),
+ MPP_FUNCTION(14, "sdio", "d3")),
+ MPP_MODE(62,
+ MPP_FUNCTION(0, "gpio", NULL),
+ MPP_FUNCTION(1, "mss_gpio4", NULL),
+ MPP_FUNCTION(2, "synce1", "clk"),
+ MPP_FUNCTION(3, "ptp", "pclk_out"),
+ MPP_FUNCTION(5, "sata1", "present_act"),
+ MPP_FUNCTION(6, "spi0", "csn3"),
+ MPP_FUNCTION(7, "uart0", "rxd"),
+ MPP_FUNCTION(8, "uart2", "rxd"),
+ MPP_FUNCTION(9, "sata0", "present_act"),
+ MPP_FUNCTION(10, "ge", "mdc")),
+};
+
+static const struct of_device_id armada_cp110_pinctrl_of_match[] = {
+ {
+ .compatible = "marvell,armada-7k-pinctrl",
+ .data = (void *) V_ARMADA_7K,
+ },
+ {
+ .compatible = "marvell,armada-8k-cpm-pinctrl",
+ .data = (void *) V_ARMADA_8K_CPM,
+ },
+ {
+ .compatible = "marvell,armada-8k-cps-pinctrl",
+ .data = (void *) V_ARMADA_8K_CPS,
+ },
+ { },
+};
+
+static const struct mvebu_mpp_ctrl armada_cp110_mpp_controls[] = {
+ MPP_FUNC_CTRL(0, 62, NULL, mvebu_regmap_mpp_ctrl),
+};
+
+static void mvebu_pinctrl_assign_variant(struct mvebu_mpp_mode *m,
+ u8 variant)
+{
+ struct mvebu_mpp_ctrl_setting *s;
+
+ for (s = m->settings ; s->name ; s++)
+ s->variant = variant;
+}
+
+static int armada_cp110_pinctrl_probe(struct platform_device *pdev)
+{
+ struct mvebu_pinctrl_soc_info *soc;
+ const struct of_device_id *match =
+ of_match_device(armada_cp110_pinctrl_of_match, &pdev->dev);
+ int i;
+
+ if (!pdev->dev.parent)
+ return -ENODEV;
+
+ soc = devm_kzalloc(&pdev->dev,
+ sizeof(struct mvebu_pinctrl_soc_info), GFP_KERNEL);
+ if (!soc)
+ return -ENOMEM;
+
+ soc->variant = (unsigned long) match->data & 0xff;
+ soc->controls = armada_cp110_mpp_controls;
+ soc->ncontrols = ARRAY_SIZE(armada_cp110_mpp_controls);
+ soc->modes = armada_cp110_mpp_modes;
+ soc->nmodes = ARRAY_SIZE(armada_cp110_mpp_modes);
+ for (i = 0; i < ARRAY_SIZE(armada_cp110_mpp_modes); i++) {
+ struct mvebu_mpp_mode *m = &armada_cp110_mpp_modes[i];
+
+ switch (i) {
+ case 0 ... 31:
+ mvebu_pinctrl_assign_variant(m, V_ARMADA_7K_8K_CPS);
+ break;
+ case 32 ... 38:
+ mvebu_pinctrl_assign_variant(m, V_ARMADA_7K_8K_CPM);
+ break;
+ case 39 ... 43:
+ mvebu_pinctrl_assign_variant(m, V_ARMADA_8K_CPM);
+ break;
+ case 44 ... 62:
+ mvebu_pinctrl_assign_variant(m, V_ARMADA_7K_8K_CPM);
+ break;
+ }
+ }
+ pdev->dev.platform_data = soc;
+
+ return mvebu_pinctrl_simple_regmap_probe(pdev, pdev->dev.parent, 0);
+}
+
+static struct platform_driver armada_cp110_pinctrl_driver = {
+ .driver = {
+ .name = "armada-cp110-pinctrl",
+ .of_match_table = of_match_ptr(armada_cp110_pinctrl_of_match),
+ },
+ .probe = armada_cp110_pinctrl_probe,
+};
+
+builtin_platform_driver(armada_cp110_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index e4dda12d371a..163d4614b0f8 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -810,21 +810,17 @@ int mvebu_regmap_mpp_ctrl_set(struct mvebu_mpp_ctrl_data *data,
}
int mvebu_pinctrl_simple_regmap_probe(struct platform_device *pdev,
- struct device *syscon_dev)
+ struct device *syscon_dev, u32 offset)
{
struct mvebu_pinctrl_soc_info *soc = dev_get_platdata(&pdev->dev);
struct mvebu_mpp_ctrl_data *mpp_data;
struct regmap *regmap;
- u32 offset;
int i;
regmap = syscon_node_to_regmap(syscon_dev->of_node);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- if (of_property_read_u32(pdev->dev.of_node, "offset", &offset))
- return -EINVAL;
-
mpp_data = devm_kcalloc(&pdev->dev, soc->ncontrols, sizeof(*mpp_data),
GFP_KERNEL);
if (!mpp_data)
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.h b/drivers/pinctrl/mvebu/pinctrl-mvebu.h
index c90704e74884..75bba436bf59 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.h
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.h
@@ -210,6 +210,6 @@ int mvebu_regmap_mpp_ctrl_set(struct mvebu_mpp_ctrl_data *data, unsigned pid,
int mvebu_pinctrl_probe(struct platform_device *pdev);
int mvebu_pinctrl_simple_mmio_probe(struct platform_device *pdev);
int mvebu_pinctrl_simple_regmap_probe(struct platform_device *pdev,
- struct device *syscon_dev);
+ struct device *syscon_dev, u32 offset);
#endif
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 720a19fd38d2..fc0c230aa11f 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -44,6 +44,7 @@ static const struct pin_config_item conf_items[] = {
PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT, "input schmitt trigger", NULL, false),
PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT_ENABLE, "input schmitt enabled", NULL, false),
PCONFDUMP(PIN_CONFIG_LOW_POWER_MODE, "pin low power", "mode", true),
+ PCONFDUMP(PIN_CONFIG_OUTPUT_ENABLE, "output enabled", NULL, false),
PCONFDUMP(PIN_CONFIG_OUTPUT, "pin output", "level", true),
PCONFDUMP(PIN_CONFIG_POWER_SOURCE, "pin power source", "selector", true),
PCONFDUMP(PIN_CONFIG_SLEW_RATE, "slew rate", NULL, true),
@@ -172,6 +173,8 @@ static const struct pinconf_generic_params dt_params[] = {
{ "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 },
{ "low-power-disable", PIN_CONFIG_LOW_POWER_MODE, 0 },
{ "low-power-enable", PIN_CONFIG_LOW_POWER_MODE, 1 },
+ { "output-disable", PIN_CONFIG_OUTPUT_ENABLE, 0 },
+ { "output-enable", PIN_CONFIG_OUTPUT_ENABLE, 1 },
{ "output-high", PIN_CONFIG_OUTPUT, 1, },
{ "output-low", PIN_CONFIG_OUTPUT, 0, },
{ "power-source", PIN_CONFIG_POWER_SOURCE, 0 },
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index a02dba35fcf3..7fc417e4ae96 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -87,9 +87,8 @@ int pin_config_group_get(const char *dev_name, const char *pin_group,
ops = pctldev->desc->confops;
if (!ops || !ops->pin_config_group_get) {
- dev_dbg(pctldev->dev, "cannot get configuration for pin "
- "group, missing group config get function in "
- "driver\n");
+ dev_dbg(pctldev->dev,
+ "cannot get configuration for pin group, missing group config get function in driver\n");
ret = -ENOTSUPP;
goto unlock;
}
@@ -232,7 +231,7 @@ static void pinconf_show_config(struct seq_file *s, struct pinctrl_dev *pctldev,
configs[i]);
else
seq_printf(s, "%08lx", configs[i]);
- seq_puts(s, "\n");
+ seq_putc(s, '\n');
}
}
@@ -244,10 +243,10 @@ void pinconf_show_map(struct seq_file *s, struct pinctrl_map const *map)
switch (map->type) {
case PIN_MAP_TYPE_CONFIGS_PIN:
- seq_printf(s, "pin ");
+ seq_puts(s, "pin ");
break;
case PIN_MAP_TYPE_CONFIGS_GROUP:
- seq_printf(s, "group ");
+ seq_puts(s, "group ");
break;
default:
break;
@@ -319,14 +318,13 @@ static int pinconf_pins_show(struct seq_file *s, void *what)
pin = pctldev->desc->pins[i].number;
desc = pin_desc_get(pctldev, pin);
/* Skip if we cannot search the pin */
- if (desc == NULL)
+ if (!desc)
continue;
seq_printf(s, "pin %d (%s): ", pin, desc->name);
pinconf_dump_pin(pctldev, s, pin);
-
- seq_printf(s, "\n");
+ seq_putc(s, '\n');
}
mutex_unlock(&pctldev->mutex);
@@ -361,8 +359,7 @@ static int pinconf_groups_show(struct seq_file *s, void *what)
seq_printf(s, "%u (%s): ", selector, gname);
pinconf_dump_group(pctldev, s, selector, gname);
- seq_printf(s, "\n");
-
+ seq_putc(s, '\n');
selector++;
}
@@ -397,9 +394,9 @@ static const struct file_operations pinconf_groups_ops = {
struct dbg_cfg {
enum pinctrl_map_type map_type;
- char dev_name[MAX_NAME_LEN+1];
- char state_name[MAX_NAME_LEN+1];
- char pin_name[MAX_NAME_LEN+1];
+ char dev_name[MAX_NAME_LEN + 1];
+ char state_name[MAX_NAME_LEN + 1];
+ char pin_name[MAX_NAME_LEN + 1];
};
/*
@@ -485,7 +482,7 @@ static ssize_t pinconf_dbg_config_write(struct file *file,
const struct pinconf_ops *confops = NULL;
struct dbg_cfg *dbg = &pinconf_dbg_conf;
const struct pinctrl_map_configs *configs;
- char config[MAX_NAME_LEN+1];
+ char config[MAX_NAME_LEN + 1];
char buf[128];
char *b = &buf[0];
int buf_size;
@@ -526,7 +523,7 @@ static ssize_t pinconf_dbg_config_write(struct file *file,
/* get arg 'device_name' */
token = strsep(&b, " ");
- if (token == NULL)
+ if (!token)
return -EINVAL;
if (strlen(token) >= MAX_NAME_LEN)
return -EINVAL;
@@ -534,7 +531,7 @@ static ssize_t pinconf_dbg_config_write(struct file *file,
/* get arg 'state_name' */
token = strsep(&b, " ");
- if (token == NULL)
+ if (!token)
return -EINVAL;
if (strlen(token) >= MAX_NAME_LEN)
return -EINVAL;
@@ -542,7 +539,7 @@ static ssize_t pinconf_dbg_config_write(struct file *file,
/* get arg 'pin_name' */
token = strsep(&b, " ");
- if (token == NULL)
+ if (!token)
return -EINVAL;
if (strlen(token) >= MAX_NAME_LEN)
return -EINVAL;
@@ -550,7 +547,7 @@ static ssize_t pinconf_dbg_config_write(struct file *file,
/* get new_value of config' */
token = strsep(&b, " ");
- if (token == NULL)
+ if (!token)
return -EINVAL;
if (strlen(token) >= MAX_NAME_LEN)
return -EINVAL;
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index e432ec887479..e6779d4352a2 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -8,6 +8,10 @@
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
+ *
+ * Contact Information: Nehal Shah <[email protected]>
+ * Shyam Sundar S K <[email protected]>
+ *
*/
#include <linux/err.h>
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
new file mode 100644
index 000000000000..d8e8842967d6
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -0,0 +1,852 @@
+/*
+ * Ingenic SoCs pinctrl driver
+ *
+ * Copyright (c) 2017 Paul Cercueil <[email protected]>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/compiler.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "core.h"
+#include "pinconf.h"
+#include "pinmux.h"
+
+#define JZ4740_GPIO_DATA 0x10
+#define JZ4740_GPIO_PULL_DIS 0x30
+#define JZ4740_GPIO_FUNC 0x40
+#define JZ4740_GPIO_SELECT 0x50
+#define JZ4740_GPIO_DIR 0x60
+#define JZ4740_GPIO_TRIG 0x70
+#define JZ4740_GPIO_FLAG 0x80
+
+#define JZ4770_GPIO_INT 0x10
+#define JZ4770_GPIO_MSK 0x20
+#define JZ4770_GPIO_PAT1 0x30
+#define JZ4770_GPIO_PAT0 0x40
+#define JZ4770_GPIO_FLAG 0x50
+#define JZ4770_GPIO_PEN 0x70
+
+#define REG_SET(x) ((x) + 0x4)
+#define REG_CLEAR(x) ((x) + 0x8)
+
+#define PINS_PER_GPIO_CHIP 32
+
+enum jz_version {
+ ID_JZ4740,
+ ID_JZ4770,
+ ID_JZ4780,
+};
+
+struct ingenic_chip_info {
+ unsigned int num_chips;
+
+ const struct group_desc *groups;
+ unsigned int num_groups;
+
+ const struct function_desc *functions;
+ unsigned int num_functions;
+
+ const u32 *pull_ups, *pull_downs;
+};
+
+struct ingenic_pinctrl {
+ struct device *dev;
+ struct regmap *map;
+ struct pinctrl_dev *pctl;
+ struct pinctrl_pin_desc *pdesc;
+ enum jz_version version;
+
+ const struct ingenic_chip_info *info;
+};
+
+static const u32 jz4740_pull_ups[4] = {
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+};
+
+static const u32 jz4740_pull_downs[4] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+static int jz4740_mmc_1bit_pins[] = { 0x69, 0x68, 0x6a, };
+static int jz4740_mmc_4bit_pins[] = { 0x6b, 0x6c, 0x6d, };
+static int jz4740_uart0_data_pins[] = { 0x7a, 0x79, };
+static int jz4740_uart0_hwflow_pins[] = { 0x7e, 0x7f, };
+static int jz4740_uart1_data_pins[] = { 0x7e, 0x7f, };
+static int jz4740_lcd_8bit_pins[] = {
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x52, 0x53, 0x54,
+};
+static int jz4740_lcd_16bit_pins[] = {
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x55,
+};
+static int jz4740_lcd_18bit_pins[] = { 0x50, 0x51, };
+static int jz4740_lcd_18bit_tft_pins[] = { 0x56, 0x57, 0x31, 0x32, };
+static int jz4740_nand_cs1_pins[] = { 0x39, };
+static int jz4740_nand_cs2_pins[] = { 0x3a, };
+static int jz4740_nand_cs3_pins[] = { 0x3b, };
+static int jz4740_nand_cs4_pins[] = { 0x3c, };
+static int jz4740_pwm_pwm0_pins[] = { 0x77, };
+static int jz4740_pwm_pwm1_pins[] = { 0x78, };
+static int jz4740_pwm_pwm2_pins[] = { 0x79, };
+static int jz4740_pwm_pwm3_pins[] = { 0x7a, };
+static int jz4740_pwm_pwm4_pins[] = { 0x7b, };
+static int jz4740_pwm_pwm5_pins[] = { 0x7c, };
+static int jz4740_pwm_pwm6_pins[] = { 0x7e, };
+static int jz4740_pwm_pwm7_pins[] = { 0x7f, };
+
+static int jz4740_mmc_1bit_funcs[] = { 0, 0, 0, };
+static int jz4740_mmc_4bit_funcs[] = { 0, 0, 0, };
+static int jz4740_uart0_data_funcs[] = { 1, 1, };
+static int jz4740_uart0_hwflow_funcs[] = { 1, 1, };
+static int jz4740_uart1_data_funcs[] = { 2, 2, };
+static int jz4740_lcd_8bit_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
+static int jz4740_lcd_16bit_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, };
+static int jz4740_lcd_18bit_funcs[] = { 0, 0, };
+static int jz4740_lcd_18bit_tft_funcs[] = { 0, 0, 0, 0, };
+static int jz4740_nand_cs1_funcs[] = { 0, };
+static int jz4740_nand_cs2_funcs[] = { 0, };
+static int jz4740_nand_cs3_funcs[] = { 0, };
+static int jz4740_nand_cs4_funcs[] = { 0, };
+static int jz4740_pwm_pwm0_funcs[] = { 0, };
+static int jz4740_pwm_pwm1_funcs[] = { 0, };
+static int jz4740_pwm_pwm2_funcs[] = { 0, };
+static int jz4740_pwm_pwm3_funcs[] = { 0, };
+static int jz4740_pwm_pwm4_funcs[] = { 0, };
+static int jz4740_pwm_pwm5_funcs[] = { 0, };
+static int jz4740_pwm_pwm6_funcs[] = { 0, };
+static int jz4740_pwm_pwm7_funcs[] = { 0, };
+
+#define INGENIC_PIN_GROUP(name, id) \
+ { \
+ name, \
+ id##_pins, \
+ ARRAY_SIZE(id##_pins), \
+ id##_funcs, \
+ }
+
+static const struct group_desc jz4740_groups[] = {
+ INGENIC_PIN_GROUP("mmc-1bit", jz4740_mmc_1bit),
+ INGENIC_PIN_GROUP("mmc-4bit", jz4740_mmc_4bit),
+ INGENIC_PIN_GROUP("uart0-data", jz4740_uart0_data),
+ INGENIC_PIN_GROUP("uart0-hwflow", jz4740_uart0_hwflow),
+ INGENIC_PIN_GROUP("uart1-data", jz4740_uart1_data),
+ INGENIC_PIN_GROUP("lcd-8bit", jz4740_lcd_8bit),
+ INGENIC_PIN_GROUP("lcd-16bit", jz4740_lcd_16bit),
+ INGENIC_PIN_GROUP("lcd-18bit", jz4740_lcd_18bit),
+ INGENIC_PIN_GROUP("lcd-18bit-tft", jz4740_lcd_18bit_tft),
+ { "lcd-no-pins", },
+ INGENIC_PIN_GROUP("nand-cs1", jz4740_nand_cs1),
+ INGENIC_PIN_GROUP("nand-cs2", jz4740_nand_cs2),
+ INGENIC_PIN_GROUP("nand-cs3", jz4740_nand_cs3),
+ INGENIC_PIN_GROUP("nand-cs4", jz4740_nand_cs4),
+ INGENIC_PIN_GROUP("pwm0", jz4740_pwm_pwm0),
+ INGENIC_PIN_GROUP("pwm1", jz4740_pwm_pwm1),
+ INGENIC_PIN_GROUP("pwm2", jz4740_pwm_pwm2),
+ INGENIC_PIN_GROUP("pwm3", jz4740_pwm_pwm3),
+ INGENIC_PIN_GROUP("pwm4", jz4740_pwm_pwm4),
+ INGENIC_PIN_GROUP("pwm5", jz4740_pwm_pwm5),
+ INGENIC_PIN_GROUP("pwm6", jz4740_pwm_pwm6),
+ INGENIC_PIN_GROUP("pwm7", jz4740_pwm_pwm7),
+};
+
+static const char *jz4740_mmc_groups[] = { "mmc-1bit", "mmc-4bit", };
+static const char *jz4740_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
+static const char *jz4740_uart1_groups[] = { "uart1-data", };
+static const char *jz4740_lcd_groups[] = {
+ "lcd-8bit", "lcd-16bit", "lcd-18bit", "lcd-18bit-tft", "lcd-no-pins",
+};
+static const char *jz4740_nand_groups[] = {
+ "nand-cs1", "nand-cs2", "nand-cs3", "nand-cs4",
+};
+static const char *jz4740_pwm0_groups[] = { "pwm0", };
+static const char *jz4740_pwm1_groups[] = { "pwm1", };
+static const char *jz4740_pwm2_groups[] = { "pwm2", };
+static const char *jz4740_pwm3_groups[] = { "pwm3", };
+static const char *jz4740_pwm4_groups[] = { "pwm4", };
+static const char *jz4740_pwm5_groups[] = { "pwm5", };
+static const char *jz4740_pwm6_groups[] = { "pwm6", };
+static const char *jz4740_pwm7_groups[] = { "pwm7", };
+
+static const struct function_desc jz4740_functions[] = {
+ { "mmc", jz4740_mmc_groups, ARRAY_SIZE(jz4740_mmc_groups), },
+ { "uart0", jz4740_uart0_groups, ARRAY_SIZE(jz4740_uart0_groups), },
+ { "uart1", jz4740_uart1_groups, ARRAY_SIZE(jz4740_uart1_groups), },
+ { "lcd", jz4740_lcd_groups, ARRAY_SIZE(jz4740_lcd_groups), },
+ { "nand", jz4740_nand_groups, ARRAY_SIZE(jz4740_nand_groups), },
+ { "pwm0", jz4740_pwm0_groups, ARRAY_SIZE(jz4740_pwm0_groups), },
+ { "pwm1", jz4740_pwm1_groups, ARRAY_SIZE(jz4740_pwm1_groups), },
+ { "pwm2", jz4740_pwm2_groups, ARRAY_SIZE(jz4740_pwm2_groups), },
+ { "pwm3", jz4740_pwm3_groups, ARRAY_SIZE(jz4740_pwm3_groups), },
+ { "pwm4", jz4740_pwm4_groups, ARRAY_SIZE(jz4740_pwm4_groups), },
+ { "pwm5", jz4740_pwm5_groups, ARRAY_SIZE(jz4740_pwm5_groups), },
+ { "pwm6", jz4740_pwm6_groups, ARRAY_SIZE(jz4740_pwm6_groups), },
+ { "pwm7", jz4740_pwm7_groups, ARRAY_SIZE(jz4740_pwm7_groups), },
+};
+
+static const struct ingenic_chip_info jz4740_chip_info = {
+ .num_chips = 4,
+ .groups = jz4740_groups,
+ .num_groups = ARRAY_SIZE(jz4740_groups),
+ .functions = jz4740_functions,
+ .num_functions = ARRAY_SIZE(jz4740_functions),
+ .pull_ups = jz4740_pull_ups,
+ .pull_downs = jz4740_pull_downs,
+};
+
+static const u32 jz4770_pull_ups[6] = {
+ 0x3fffffff, 0xfff0030c, 0xffffffff, 0xffff4fff, 0xfffffb7c, 0xffa7f00f,
+};
+
+static const u32 jz4770_pull_downs[6] = {
+ 0x00000000, 0x000f0c03, 0x00000000, 0x0000b000, 0x00000483, 0x00580ff0,
+};
+
+static int jz4770_uart0_data_pins[] = { 0xa0, 0xa3, };
+static int jz4770_uart0_hwflow_pins[] = { 0xa1, 0xa2, };
+static int jz4770_uart1_data_pins[] = { 0x7a, 0x7c, };
+static int jz4770_uart1_hwflow_pins[] = { 0x7b, 0x7d, };
+static int jz4770_uart2_data_pins[] = { 0x66, 0x67, };
+static int jz4770_uart2_hwflow_pins[] = { 0x65, 0x64, };
+static int jz4770_uart3_data_pins[] = { 0x6c, 0x85, };
+static int jz4770_uart3_hwflow_pins[] = { 0x88, 0x89, };
+static int jz4770_uart4_data_pins[] = { 0x54, 0x4a, };
+static int jz4770_mmc0_8bit_a_pins[] = { 0x04, 0x05, 0x06, 0x07, 0x18, };
+static int jz4770_mmc0_4bit_a_pins[] = { 0x15, 0x16, 0x17, };
+static int jz4770_mmc0_1bit_a_pins[] = { 0x12, 0x13, 0x14, };
+static int jz4770_mmc0_4bit_e_pins[] = { 0x95, 0x96, 0x97, };
+static int jz4770_mmc0_1bit_e_pins[] = { 0x9c, 0x9d, 0x94, };
+static int jz4770_mmc1_4bit_d_pins[] = { 0x75, 0x76, 0x77, };
+static int jz4770_mmc1_1bit_d_pins[] = { 0x78, 0x79, 0x74, };
+static int jz4770_mmc1_4bit_e_pins[] = { 0x95, 0x96, 0x97, };
+static int jz4770_mmc1_1bit_e_pins[] = { 0x9c, 0x9d, 0x94, };
+static int jz4770_nemc_data_pins[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+};
+static int jz4770_nemc_cle_ale_pins[] = { 0x20, 0x21, };
+static int jz4770_nemc_addr_pins[] = { 0x22, 0x23, 0x24, 0x25, };
+static int jz4770_nemc_rd_we_pins[] = { 0x10, 0x11, };
+static int jz4770_nemc_frd_fwe_pins[] = { 0x12, 0x13, };
+static int jz4770_nemc_cs1_pins[] = { 0x15, };
+static int jz4770_nemc_cs2_pins[] = { 0x16, };
+static int jz4770_nemc_cs3_pins[] = { 0x17, };
+static int jz4770_nemc_cs4_pins[] = { 0x18, };
+static int jz4770_nemc_cs5_pins[] = { 0x19, };
+static int jz4770_nemc_cs6_pins[] = { 0x1a, };
+static int jz4770_i2c0_pins[] = { 0x6e, 0x6f, };
+static int jz4770_i2c1_pins[] = { 0x8e, 0x8f, };
+static int jz4770_i2c2_pins[] = { 0xb0, 0xb1, };
+static int jz4770_i2c3_pins[] = { 0x6a, 0x6b, };
+static int jz4770_i2c4_e_pins[] = { 0x8c, 0x8d, };
+static int jz4770_i2c4_f_pins[] = { 0xb9, 0xb8, };
+static int jz4770_cim_pins[] = {
+ 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31,
+};
+static int jz4770_lcd_32bit_pins[] = {
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x51,
+};
+static int jz4770_pwm_pwm0_pins[] = { 0x80, };
+static int jz4770_pwm_pwm1_pins[] = { 0x81, };
+static int jz4770_pwm_pwm2_pins[] = { 0x82, };
+static int jz4770_pwm_pwm3_pins[] = { 0x83, };
+static int jz4770_pwm_pwm4_pins[] = { 0x84, };
+static int jz4770_pwm_pwm5_pins[] = { 0x85, };
+static int jz4770_pwm_pwm6_pins[] = { 0x6a, };
+static int jz4770_pwm_pwm7_pins[] = { 0x6b, };
+
+static int jz4770_uart0_data_funcs[] = { 0, 0, };
+static int jz4770_uart0_hwflow_funcs[] = { 0, 0, };
+static int jz4770_uart1_data_funcs[] = { 0, 0, };
+static int jz4770_uart1_hwflow_funcs[] = { 0, 0, };
+static int jz4770_uart2_data_funcs[] = { 1, 1, };
+static int jz4770_uart2_hwflow_funcs[] = { 1, 1, };
+static int jz4770_uart3_data_funcs[] = { 0, 1, };
+static int jz4770_uart3_hwflow_funcs[] = { 0, 0, };
+static int jz4770_uart4_data_funcs[] = { 2, 2, };
+static int jz4770_mmc0_8bit_a_funcs[] = { 1, 1, 1, 1, 1, };
+static int jz4770_mmc0_4bit_a_funcs[] = { 1, 1, 1, };
+static int jz4770_mmc0_1bit_a_funcs[] = { 1, 1, 0, };
+static int jz4770_mmc0_4bit_e_funcs[] = { 0, 0, 0, };
+static int jz4770_mmc0_1bit_e_funcs[] = { 0, 0, 0, };
+static int jz4770_mmc1_4bit_d_funcs[] = { 0, 0, 0, };
+static int jz4770_mmc1_1bit_d_funcs[] = { 0, 0, 0, };
+static int jz4770_mmc1_4bit_e_funcs[] = { 1, 1, 1, };
+static int jz4770_mmc1_1bit_e_funcs[] = { 1, 1, 1, };
+static int jz4770_nemc_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, };
+static int jz4770_nemc_cle_ale_funcs[] = { 0, 0, };
+static int jz4770_nemc_addr_funcs[] = { 0, 0, 0, 0, };
+static int jz4770_nemc_rd_we_funcs[] = { 0, 0, };
+static int jz4770_nemc_frd_fwe_funcs[] = { 0, 0, };
+static int jz4770_nemc_cs1_funcs[] = { 0, };
+static int jz4770_nemc_cs2_funcs[] = { 0, };
+static int jz4770_nemc_cs3_funcs[] = { 0, };
+static int jz4770_nemc_cs4_funcs[] = { 0, };
+static int jz4770_nemc_cs5_funcs[] = { 0, };
+static int jz4770_nemc_cs6_funcs[] = { 0, };
+static int jz4770_i2c0_funcs[] = { 0, 0, };
+static int jz4770_i2c1_funcs[] = { 0, 0, };
+static int jz4770_i2c2_funcs[] = { 2, 2, };
+static int jz4770_i2c3_funcs[] = { 1, 1, };
+static int jz4770_i2c4_e_funcs[] = { 1, 1, };
+static int jz4770_i2c4_f_funcs[] = { 1, 1, };
+static int jz4770_cim_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, };
+static int jz4770_lcd_32bit_funcs[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0,
+};
+static int jz4770_pwm_pwm0_funcs[] = { 0, };
+static int jz4770_pwm_pwm1_funcs[] = { 0, };
+static int jz4770_pwm_pwm2_funcs[] = { 0, };
+static int jz4770_pwm_pwm3_funcs[] = { 0, };
+static int jz4770_pwm_pwm4_funcs[] = { 0, };
+static int jz4770_pwm_pwm5_funcs[] = { 0, };
+static int jz4770_pwm_pwm6_funcs[] = { 0, };
+static int jz4770_pwm_pwm7_funcs[] = { 0, };
+
+static const struct group_desc jz4770_groups[] = {
+ INGENIC_PIN_GROUP("uart0-data", jz4770_uart0_data),
+ INGENIC_PIN_GROUP("uart0-hwflow", jz4770_uart0_hwflow),
+ INGENIC_PIN_GROUP("uart1-data", jz4770_uart1_data),
+ INGENIC_PIN_GROUP("uart1-hwflow", jz4770_uart1_hwflow),
+ INGENIC_PIN_GROUP("uart2-data", jz4770_uart2_data),
+ INGENIC_PIN_GROUP("uart2-hwflow", jz4770_uart2_hwflow),
+ INGENIC_PIN_GROUP("uart3-data", jz4770_uart3_data),
+ INGENIC_PIN_GROUP("uart3-hwflow", jz4770_uart3_hwflow),
+ INGENIC_PIN_GROUP("uart4-data", jz4770_uart4_data),
+ INGENIC_PIN_GROUP("mmc0-8bit-a", jz4770_mmc0_8bit_a),
+ INGENIC_PIN_GROUP("mmc0-4bit-a", jz4770_mmc0_4bit_a),
+ INGENIC_PIN_GROUP("mmc0-1bit-a", jz4770_mmc0_1bit_a),
+ INGENIC_PIN_GROUP("mmc0-4bit-e", jz4770_mmc0_4bit_e),
+ INGENIC_PIN_GROUP("mmc0-1bit-e", jz4770_mmc0_1bit_e),
+ INGENIC_PIN_GROUP("mmc1-4bit-d", jz4770_mmc1_4bit_d),
+ INGENIC_PIN_GROUP("mmc1-1bit-d", jz4770_mmc1_1bit_d),
+ INGENIC_PIN_GROUP("mmc1-4bit-e", jz4770_mmc1_4bit_e),
+ INGENIC_PIN_GROUP("mmc1-1bit-e", jz4770_mmc1_1bit_e),
+ INGENIC_PIN_GROUP("nemc-data", jz4770_nemc_data),
+ INGENIC_PIN_GROUP("nemc-cle-ale", jz4770_nemc_cle_ale),
+ INGENIC_PIN_GROUP("nemc-addr", jz4770_nemc_addr),
+ INGENIC_PIN_GROUP("nemc-rd-we", jz4770_nemc_rd_we),
+ INGENIC_PIN_GROUP("nemc-frd-fwe", jz4770_nemc_frd_fwe),
+ INGENIC_PIN_GROUP("nemc-cs1", jz4770_nemc_cs1),
+ INGENIC_PIN_GROUP("nemc-cs2", jz4770_nemc_cs2),
+ INGENIC_PIN_GROUP("nemc-cs3", jz4770_nemc_cs3),
+ INGENIC_PIN_GROUP("nemc-cs4", jz4770_nemc_cs4),
+ INGENIC_PIN_GROUP("nemc-cs5", jz4770_nemc_cs5),
+ INGENIC_PIN_GROUP("nemc-cs6", jz4770_nemc_cs6),
+ INGENIC_PIN_GROUP("i2c0-data", jz4770_i2c0),
+ INGENIC_PIN_GROUP("i2c1-data", jz4770_i2c1),
+ INGENIC_PIN_GROUP("i2c2-data", jz4770_i2c2),
+ INGENIC_PIN_GROUP("i2c3-data", jz4770_i2c3),
+ INGENIC_PIN_GROUP("i2c4-data-e", jz4770_i2c4_e),
+ INGENIC_PIN_GROUP("i2c4-data-f", jz4770_i2c4_f),
+ INGENIC_PIN_GROUP("cim-data", jz4770_cim),
+ INGENIC_PIN_GROUP("lcd-32bit", jz4770_lcd_32bit),
+ { "lcd-no-pins", },
+ INGENIC_PIN_GROUP("pwm0", jz4770_pwm_pwm0),
+ INGENIC_PIN_GROUP("pwm1", jz4770_pwm_pwm1),
+ INGENIC_PIN_GROUP("pwm2", jz4770_pwm_pwm2),
+ INGENIC_PIN_GROUP("pwm3", jz4770_pwm_pwm3),
+ INGENIC_PIN_GROUP("pwm4", jz4770_pwm_pwm4),
+ INGENIC_PIN_GROUP("pwm5", jz4770_pwm_pwm5),
+ INGENIC_PIN_GROUP("pwm6", jz4770_pwm_pwm6),
+ INGENIC_PIN_GROUP("pwm7", jz4770_pwm_pwm7),
+};
+
+static const char *jz4770_uart0_groups[] = { "uart0-data", "uart0-hwflow", };
+static const char *jz4770_uart1_groups[] = { "uart1-data", "uart1-hwflow", };
+static const char *jz4770_uart2_groups[] = { "uart2-data", "uart2-hwflow", };
+static const char *jz4770_uart3_groups[] = { "uart3-data", "uart3-hwflow", };
+static const char *jz4770_uart4_groups[] = { "uart4-data", };
+static const char *jz4770_mmc0_groups[] = {
+ "mmc0-8bit-a", "mmc0-4bit-a", "mmc0-1bit-a",
+ "mmc0-1bit-e", "mmc0-4bit-e",
+};
+static const char *jz4770_mmc1_groups[] = {
+ "mmc1-1bit-d", "mmc1-4bit-d", "mmc1-1bit-e", "mmc1-4bit-e",
+};
+static const char *jz4770_nemc_groups[] = {
+ "nemc-data", "nemc-cle-ale", "nemc-addr", "nemc-rd-we", "nemc-frd-fwe",
+};
+static const char *jz4770_cs1_groups[] = { "nemc-cs1", };
+static const char *jz4770_cs6_groups[] = { "nemc-cs6", };
+static const char *jz4770_i2c0_groups[] = { "i2c0-data", };
+static const char *jz4770_i2c1_groups[] = { "i2c1-data", };
+static const char *jz4770_i2c2_groups[] = { "i2c2-data", };
+static const char *jz4770_i2c3_groups[] = { "i2c3-data", };
+static const char *jz4770_i2c4_groups[] = { "i2c4-data-e", "i2c4-data-f", };
+static const char *jz4770_cim_groups[] = { "cim-data", };
+static const char *jz4770_lcd_groups[] = { "lcd-32bit", "lcd-no-pins", };
+static const char *jz4770_pwm0_groups[] = { "pwm0", };
+static const char *jz4770_pwm1_groups[] = { "pwm1", };
+static const char *jz4770_pwm2_groups[] = { "pwm2", };
+static const char *jz4770_pwm3_groups[] = { "pwm3", };
+static const char *jz4770_pwm4_groups[] = { "pwm4", };
+static const char *jz4770_pwm5_groups[] = { "pwm5", };
+static const char *jz4770_pwm6_groups[] = { "pwm6", };
+static const char *jz4770_pwm7_groups[] = { "pwm7", };
+
+static const struct function_desc jz4770_functions[] = {
+ { "uart0", jz4770_uart0_groups, ARRAY_SIZE(jz4770_uart0_groups), },
+ { "uart1", jz4770_uart1_groups, ARRAY_SIZE(jz4770_uart1_groups), },
+ { "uart2", jz4770_uart2_groups, ARRAY_SIZE(jz4770_uart2_groups), },
+ { "uart3", jz4770_uart3_groups, ARRAY_SIZE(jz4770_uart3_groups), },
+ { "uart4", jz4770_uart4_groups, ARRAY_SIZE(jz4770_uart4_groups), },
+ { "mmc0", jz4770_mmc0_groups, ARRAY_SIZE(jz4770_mmc0_groups), },
+ { "mmc1", jz4770_mmc1_groups, ARRAY_SIZE(jz4770_mmc1_groups), },
+ { "nemc", jz4770_nemc_groups, ARRAY_SIZE(jz4770_nemc_groups), },
+ { "nemc-cs1", jz4770_cs1_groups, ARRAY_SIZE(jz4770_cs1_groups), },
+ { "nemc-cs6", jz4770_cs6_groups, ARRAY_SIZE(jz4770_cs6_groups), },
+ { "i2c0", jz4770_i2c0_groups, ARRAY_SIZE(jz4770_i2c0_groups), },
+ { "i2c1", jz4770_i2c1_groups, ARRAY_SIZE(jz4770_i2c1_groups), },
+ { "i2c2", jz4770_i2c2_groups, ARRAY_SIZE(jz4770_i2c2_groups), },
+ { "i2c3", jz4770_i2c3_groups, ARRAY_SIZE(jz4770_i2c3_groups), },
+ { "i2c4", jz4770_i2c4_groups, ARRAY_SIZE(jz4770_i2c4_groups), },
+ { "cim", jz4770_cim_groups, ARRAY_SIZE(jz4770_cim_groups), },
+ { "lcd", jz4770_lcd_groups, ARRAY_SIZE(jz4770_lcd_groups), },
+ { "pwm0", jz4770_pwm0_groups, ARRAY_SIZE(jz4770_pwm0_groups), },
+ { "pwm1", jz4770_pwm1_groups, ARRAY_SIZE(jz4770_pwm1_groups), },
+ { "pwm2", jz4770_pwm2_groups, ARRAY_SIZE(jz4770_pwm2_groups), },
+ { "pwm3", jz4770_pwm3_groups, ARRAY_SIZE(jz4770_pwm3_groups), },
+ { "pwm4", jz4770_pwm4_groups, ARRAY_SIZE(jz4770_pwm4_groups), },
+ { "pwm5", jz4770_pwm5_groups, ARRAY_SIZE(jz4770_pwm5_groups), },
+ { "pwm6", jz4770_pwm6_groups, ARRAY_SIZE(jz4770_pwm6_groups), },
+ { "pwm7", jz4770_pwm7_groups, ARRAY_SIZE(jz4770_pwm7_groups), },
+};
+
+static const struct ingenic_chip_info jz4770_chip_info = {
+ .num_chips = 6,
+ .groups = jz4770_groups,
+ .num_groups = ARRAY_SIZE(jz4770_groups),
+ .functions = jz4770_functions,
+ .num_functions = ARRAY_SIZE(jz4770_functions),
+ .pull_ups = jz4770_pull_ups,
+ .pull_downs = jz4770_pull_downs,
+};
+
+static inline void ingenic_config_pin(struct ingenic_pinctrl *jzpc,
+ unsigned int pin, u8 reg, bool set)
+{
+ unsigned int idx = pin % PINS_PER_GPIO_CHIP;
+ unsigned int offt = pin / PINS_PER_GPIO_CHIP;
+
+ regmap_write(jzpc->map, offt * 0x100 +
+ (set ? REG_SET(reg) : REG_CLEAR(reg)), BIT(idx));
+}
+
+static inline bool ingenic_get_pin_config(struct ingenic_pinctrl *jzpc,
+ unsigned int pin, u8 reg)
+{
+ unsigned int idx = pin % PINS_PER_GPIO_CHIP;
+ unsigned int offt = pin / PINS_PER_GPIO_CHIP;
+ unsigned int val;
+
+ regmap_read(jzpc->map, offt * 0x100 + reg, &val);
+
+ return val & BIT(idx);
+}
+
+static struct pinctrl_ops ingenic_pctlops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc,
+ int pin, int func)
+{
+ unsigned int idx = pin % PINS_PER_GPIO_CHIP;
+ unsigned int offt = pin / PINS_PER_GPIO_CHIP;
+
+ dev_dbg(jzpc->dev, "set pin P%c%u to function %u\n",
+ 'A' + offt, idx, func);
+
+ if (jzpc->version >= ID_JZ4770) {
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_MSK, false);
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, func & 0x2);
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, func & 0x1);
+ } else {
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, true);
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_TRIG, func & 0x2);
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func > 0);
+ }
+
+ return 0;
+}
+
+static int ingenic_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int selector, unsigned int group)
+{
+ struct ingenic_pinctrl *jzpc = pinctrl_dev_get_drvdata(pctldev);
+ struct function_desc *func;
+ struct group_desc *grp;
+ unsigned int i;
+
+ func = pinmux_generic_get_function(pctldev, selector);
+ if (!func)
+ return -EINVAL;
+
+ grp = pinctrl_generic_get_group(pctldev, group);
+ if (!grp)
+ return -EINVAL;
+
+ dev_dbg(pctldev->dev, "enable function %s group %s\n",
+ func->name, grp->name);
+
+ for (i = 0; i < grp->num_pins; i++) {
+ int *pin_modes = grp->data;
+
+ ingenic_pinmux_set_pin_fn(jzpc, grp->pins[i], pin_modes[i]);
+ }
+
+ return 0;
+}
+
+static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin, bool input)
+{
+ struct ingenic_pinctrl *jzpc = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int idx = pin % PINS_PER_GPIO_CHIP;
+ unsigned int offt = pin / PINS_PER_GPIO_CHIP;
+
+ dev_dbg(pctldev->dev, "set pin P%c%u to %sput\n",
+ 'A' + offt, idx, input ? "in" : "out");
+
+ if (jzpc->version >= ID_JZ4770) {
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_MSK, true);
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
+ } else {
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false);
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, input);
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, false);
+ }
+
+ return 0;
+}
+
+static struct pinmux_ops ingenic_pmxops = {
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .set_mux = ingenic_pinmux_set_mux,
+ .gpio_set_direction = ingenic_pinmux_gpio_set_direction,
+};
+
+static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *config)
+{
+ struct ingenic_pinctrl *jzpc = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ unsigned int idx = pin % PINS_PER_GPIO_CHIP;
+ unsigned int offt = pin / PINS_PER_GPIO_CHIP;
+ bool pull;
+
+ if (jzpc->version >= ID_JZ4770)
+ pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
+ else
+ pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (pull)
+ return -EINVAL;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (!pull || !(jzpc->info->pull_ups[offt] & BIT(idx)))
+ return -EINVAL;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (!pull || !(jzpc->info->pull_downs[offt] & BIT(idx)))
+ return -EINVAL;
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, 1);
+ return 0;
+}
+
+static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
+ unsigned int pin, bool enabled)
+{
+ if (jzpc->version >= ID_JZ4770)
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PEN, !enabled);
+ else
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !enabled);
+}
+
+static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct ingenic_pinctrl *jzpc = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int idx = pin % PINS_PER_GPIO_CHIP;
+ unsigned int offt = pin / PINS_PER_GPIO_CHIP;
+ unsigned int cfg;
+
+ for (cfg = 0; cfg < num_configs; cfg++) {
+ switch (pinconf_to_config_param(configs[cfg])) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ continue;
+ default:
+ return -ENOTSUPP;
+ }
+ }
+
+ for (cfg = 0; cfg < num_configs; cfg++) {
+ switch (pinconf_to_config_param(configs[cfg])) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ dev_dbg(jzpc->dev, "disable pull-over for pin P%c%u\n",
+ 'A' + offt, idx);
+ ingenic_set_bias(jzpc, pin, false);
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (!(jzpc->info->pull_ups[offt] & BIT(idx)))
+ return -EINVAL;
+ dev_dbg(jzpc->dev, "set pull-up for pin P%c%u\n",
+ 'A' + offt, idx);
+ ingenic_set_bias(jzpc, pin, true);
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (!(jzpc->info->pull_downs[offt] & BIT(idx)))
+ return -EINVAL;
+ dev_dbg(jzpc->dev, "set pull-down for pin P%c%u\n",
+ 'A' + offt, idx);
+ ingenic_set_bias(jzpc, pin, true);
+ break;
+
+ default:
+ unreachable();
+ }
+ }
+
+ return 0;
+}
+
+static int ingenic_pinconf_group_get(struct pinctrl_dev *pctldev,
+ unsigned int group, unsigned long *config)
+{
+ const unsigned int *pins;
+ unsigned int i, npins, old = 0;
+ int ret;
+
+ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < npins; i++) {
+ if (ingenic_pinconf_get(pctldev, pins[i], config))
+ return -ENOTSUPP;
+
+ /* configs do not match between two pins */
+ if (i && (old != *config))
+ return -ENOTSUPP;
+
+ old = *config;
+ }
+
+ return 0;
+}
+
+static int ingenic_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int group, unsigned long *configs,
+ unsigned int num_configs)
+{
+ const unsigned int *pins;
+ unsigned int i, npins;
+ int ret;
+
+ ret = pinctrl_generic_get_group_pins(pctldev, group, &pins, &npins);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < npins; i++) {
+ ret = ingenic_pinconf_set(pctldev,
+ pins[i], configs, num_configs);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct pinconf_ops ingenic_confops = {
+ .is_generic = true,
+ .pin_config_get = ingenic_pinconf_get,
+ .pin_config_set = ingenic_pinconf_set,
+ .pin_config_group_get = ingenic_pinconf_group_get,
+ .pin_config_group_set = ingenic_pinconf_group_set,
+};
+
+static const struct regmap_config ingenic_pinctrl_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static const struct of_device_id ingenic_pinctrl_of_match[] = {
+ { .compatible = "ingenic,jz4740-pinctrl", .data = (void *) ID_JZ4740 },
+ { .compatible = "ingenic,jz4770-pinctrl", .data = (void *) ID_JZ4770 },
+ { .compatible = "ingenic,jz4780-pinctrl", .data = (void *) ID_JZ4780 },
+ {},
+};
+
+int ingenic_pinctrl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ingenic_pinctrl *jzpc;
+ struct pinctrl_desc *pctl_desc;
+ void __iomem *base;
+ const struct platform_device_id *id = platform_get_device_id(pdev);
+ const struct of_device_id *of_id = of_match_device(
+ ingenic_pinctrl_of_match, dev);
+ const struct ingenic_chip_info *chip_info;
+ unsigned int i;
+ int err;
+
+ jzpc = devm_kzalloc(dev, sizeof(*jzpc), GFP_KERNEL);
+ if (!jzpc)
+ return -ENOMEM;
+
+ base = devm_ioremap_resource(dev,
+ platform_get_resource(pdev, IORESOURCE_MEM, 0));
+ if (IS_ERR(base)) {
+ dev_err(dev, "Failed to ioremap registers\n");
+ return PTR_ERR(base);
+ }
+
+ jzpc->map = devm_regmap_init_mmio(dev, base,
+ &ingenic_pinctrl_regmap_config);
+ if (IS_ERR(jzpc->map)) {
+ dev_err(dev, "Failed to create regmap\n");
+ return PTR_ERR(jzpc->map);
+ }
+
+ jzpc->dev = dev;
+
+ if (of_id)
+ jzpc->version = (enum jz_version)of_id->data;
+ else
+ jzpc->version = (enum jz_version)id->driver_data;
+
+ if (jzpc->version >= ID_JZ4770)
+ chip_info = &jz4770_chip_info;
+ else
+ chip_info = &jz4740_chip_info;
+ jzpc->info = chip_info;
+
+ pctl_desc = devm_kzalloc(&pdev->dev, sizeof(*pctl_desc), GFP_KERNEL);
+ if (!pctl_desc)
+ return -ENOMEM;
+
+ /* fill in pinctrl_desc structure */
+ pctl_desc->name = dev_name(dev);
+ pctl_desc->owner = THIS_MODULE;
+ pctl_desc->pctlops = &ingenic_pctlops;
+ pctl_desc->pmxops = &ingenic_pmxops;
+ pctl_desc->confops = &ingenic_confops;
+ pctl_desc->npins = chip_info->num_chips * PINS_PER_GPIO_CHIP;
+ pctl_desc->pins = jzpc->pdesc = devm_kzalloc(&pdev->dev,
+ sizeof(*jzpc->pdesc) * pctl_desc->npins, GFP_KERNEL);
+ if (!jzpc->pdesc)
+ return -ENOMEM;
+
+ for (i = 0; i < pctl_desc->npins; i++) {
+ jzpc->pdesc[i].number = i;
+ jzpc->pdesc[i].name = kasprintf(GFP_KERNEL, "P%c%d",
+ 'A' + (i / PINS_PER_GPIO_CHIP),
+ i % PINS_PER_GPIO_CHIP);
+ }
+
+ jzpc->pctl = devm_pinctrl_register(dev, pctl_desc, jzpc);
+ if (IS_ERR(jzpc->pctl)) {
+ dev_err(dev, "Failed to register pinctrl\n");
+ return PTR_ERR(jzpc->pctl);
+ }
+
+ for (i = 0; i < chip_info->num_groups; i++) {
+ const struct group_desc *group = &chip_info->groups[i];
+
+ err = pinctrl_generic_add_group(jzpc->pctl, group->name,
+ group->pins, group->num_pins, group->data);
+ if (err) {
+ dev_err(dev, "Failed to register group %s\n",
+ group->name);
+ return err;
+ }
+ }
+
+ for (i = 0; i < chip_info->num_functions; i++) {
+ const struct function_desc *func = &chip_info->functions[i];
+
+ err = pinmux_generic_add_function(jzpc->pctl, func->name,
+ func->group_names, func->num_group_names,
+ func->data);
+ if (err) {
+ dev_err(dev, "Failed to register function %s\n",
+ func->name);
+ return err;
+ }
+ }
+
+ dev_set_drvdata(dev, jzpc->map);
+
+ if (dev->of_node) {
+ err = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (err) {
+ dev_err(dev, "Failed to probe GPIO devices\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id ingenic_pinctrl_ids[] = {
+ { "jz4740-pinctrl", ID_JZ4740 },
+ { "jz4770-pinctrl", ID_JZ4770 },
+ { "jz4780-pinctrl", ID_JZ4780 },
+ {},
+};
+
+static struct platform_driver ingenic_pinctrl_driver = {
+ .driver = {
+ .name = "pinctrl-ingenic",
+ .of_match_table = of_match_ptr(ingenic_pinctrl_of_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = ingenic_pinctrl_probe,
+ .id_table = ingenic_pinctrl_ids,
+};
+
+static int __init ingenic_pinctrl_drv_register(void)
+{
+ return platform_driver_register(&ingenic_pinctrl_driver);
+}
+postcore_initcall(ingenic_pinctrl_drv_register);
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 2a57d024481d..3e40d4245512 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -1,14 +1,4 @@
-/*
- * MCP23S08 SPI/I2C GPIO gpio expander driver
- *
- * The inputs and outputs of the mcp23s08, mcp23s17, mcp23008 and mcp23017 are
- * supported.
- * For the I2C versions of the chips (mcp23008 and mcp23017) generation of
- * interrupts is also supported.
- * The hardware of the SPI versions of the chips (mcp23s08 and mcp23s17) is
- * also capable of generating interrupts, but the linux driver does not
- * support that yet.
- */
+/* MCP23S08 SPI/I2C GPIO driver */
#include <linux/kernel.h>
#include <linux/device.h>
@@ -21,11 +11,13 @@
#include <linux/slab.h>
#include <asm/byteorder.h>
#include <linux/interrupt.h>
-#include <linux/of_irq.h>
#include <linux/of_device.h>
#include <linux/regmap.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
-/**
+/*
* MCP types supported by driver
*/
#define MCP_TYPE_S08 0
@@ -34,6 +26,8 @@
#define MCP_TYPE_017 3
#define MCP_TYPE_S18 4
+#define MCP_MAX_DEV_PER_CS 8
+
/* Registers are all 8 bits wide.
*
* The mcp23s17 has twice as many bits, and can be configured to work
@@ -64,19 +58,52 @@ struct mcp23s08 {
bool irq_active_high;
bool reg_shift;
- u16 cache[11];
u16 irq_rise;
u16 irq_fall;
int irq;
bool irq_controller;
- /* lock protects the cached values */
+ int cached_gpio;
+ /* lock protects regmap access with bypass/cache flags */
struct mutex lock;
- struct mutex irq_lock;
struct gpio_chip chip;
struct regmap *regmap;
struct device *dev;
+
+ struct pinctrl_dev *pctldev;
+ struct pinctrl_desc pinctrl_desc;
+};
+
+static const struct reg_default mcp23x08_defaults[] = {
+ {.reg = MCP_IODIR, .def = 0xff},
+ {.reg = MCP_IPOL, .def = 0x00},
+ {.reg = MCP_GPINTEN, .def = 0x00},
+ {.reg = MCP_DEFVAL, .def = 0x00},
+ {.reg = MCP_INTCON, .def = 0x00},
+ {.reg = MCP_IOCON, .def = 0x00},
+ {.reg = MCP_GPPU, .def = 0x00},
+ {.reg = MCP_OLAT, .def = 0x00},
+};
+
+static const struct regmap_range mcp23x08_volatile_range = {
+ .range_min = MCP_INTF,
+ .range_max = MCP_GPIO,
+};
+
+static const struct regmap_access_table mcp23x08_volatile_table = {
+ .yes_ranges = &mcp23x08_volatile_range,
+ .n_yes_ranges = 1,
+};
+
+static const struct regmap_range mcp23x08_precious_range = {
+ .range_min = MCP_GPIO,
+ .range_max = MCP_GPIO,
+};
+
+static const struct regmap_access_table mcp23x08_precious_table = {
+ .yes_ranges = &mcp23x08_precious_range,
+ .n_yes_ranges = 1,
};
static const struct regmap_config mcp23x08_regmap = {
@@ -84,18 +111,203 @@ static const struct regmap_config mcp23x08_regmap = {
.val_bits = 8,
.reg_stride = 1,
+ .volatile_table = &mcp23x08_volatile_table,
+ .precious_table = &mcp23x08_precious_table,
+ .reg_defaults = mcp23x08_defaults,
+ .num_reg_defaults = ARRAY_SIZE(mcp23x08_defaults),
+ .cache_type = REGCACHE_FLAT,
.max_register = MCP_OLAT,
};
+static const struct reg_default mcp23x16_defaults[] = {
+ {.reg = MCP_IODIR << 1, .def = 0xffff},
+ {.reg = MCP_IPOL << 1, .def = 0x0000},
+ {.reg = MCP_GPINTEN << 1, .def = 0x0000},
+ {.reg = MCP_DEFVAL << 1, .def = 0x0000},
+ {.reg = MCP_INTCON << 1, .def = 0x0000},
+ {.reg = MCP_IOCON << 1, .def = 0x0000},
+ {.reg = MCP_GPPU << 1, .def = 0x0000},
+ {.reg = MCP_OLAT << 1, .def = 0x0000},
+};
+
+static const struct regmap_range mcp23x16_volatile_range = {
+ .range_min = MCP_INTF << 1,
+ .range_max = MCP_GPIO << 1,
+};
+
+static const struct regmap_access_table mcp23x16_volatile_table = {
+ .yes_ranges = &mcp23x16_volatile_range,
+ .n_yes_ranges = 1,
+};
+
+static const struct regmap_range mcp23x16_precious_range = {
+ .range_min = MCP_GPIO << 1,
+ .range_max = MCP_GPIO << 1,
+};
+
+static const struct regmap_access_table mcp23x16_precious_table = {
+ .yes_ranges = &mcp23x16_precious_range,
+ .n_yes_ranges = 1,
+};
+
static const struct regmap_config mcp23x17_regmap = {
.reg_bits = 8,
.val_bits = 16,
.reg_stride = 2,
.max_register = MCP_OLAT << 1,
+ .volatile_table = &mcp23x16_volatile_table,
+ .precious_table = &mcp23x16_precious_table,
+ .reg_defaults = mcp23x16_defaults,
+ .num_reg_defaults = ARRAY_SIZE(mcp23x16_defaults),
+ .cache_type = REGCACHE_FLAT,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
};
+static int mcp_read(struct mcp23s08 *mcp, unsigned int reg, unsigned int *val)
+{
+ return regmap_read(mcp->regmap, reg << mcp->reg_shift, val);
+}
+
+static int mcp_write(struct mcp23s08 *mcp, unsigned int reg, unsigned int val)
+{
+ return regmap_write(mcp->regmap, reg << mcp->reg_shift, val);
+}
+
+static int mcp_set_mask(struct mcp23s08 *mcp, unsigned int reg,
+ unsigned int mask, bool enabled)
+{
+ u16 val = enabled ? 0xffff : 0x0000;
+ return regmap_update_bits(mcp->regmap, reg << mcp->reg_shift,
+ mask, val);
+}
+
+static int mcp_set_bit(struct mcp23s08 *mcp, unsigned int reg,
+ unsigned int pin, bool enabled)
+{
+ u16 mask = BIT(pin);
+ return mcp_set_mask(mcp, reg, mask, enabled);
+}
+
+static const struct pinctrl_pin_desc mcp23x08_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+};
+
+static const struct pinctrl_pin_desc mcp23x17_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ PINCTRL_PIN(8, "gpio8"),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ PINCTRL_PIN(12, "gpio12"),
+ PINCTRL_PIN(13, "gpio13"),
+ PINCTRL_PIN(14, "gpio14"),
+ PINCTRL_PIN(15, "gpio15"),
+};
+
+static int mcp_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ return 0;
+}
+
+static const char *mcp_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int group)
+{
+ return NULL;
+}
+
+static int mcp_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int group,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ return -ENOTSUPP;
+}
+
+static const struct pinctrl_ops mcp_pinctrl_ops = {
+ .get_groups_count = mcp_pinctrl_get_groups_count,
+ .get_group_name = mcp_pinctrl_get_group_name,
+ .get_group_pins = mcp_pinctrl_get_group_pins,
+#ifdef CONFIG_OF
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinconf_generic_dt_free_map,
+#endif
+};
+
+static int mcp_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct mcp23s08 *mcp = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ unsigned int data, status;
+ int ret;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ret = mcp_read(mcp, MCP_GPPU, &data);
+ if (ret < 0)
+ return ret;
+ status = (data & BIT(pin)) ? 1 : 0;
+ break;
+ default:
+ dev_err(mcp->dev, "Invalid config param %04x\n", param);
+ return -ENOTSUPP;
+ }
+
+ *config = 0;
+
+ return status ? 0 : -EINVAL;
+}
+
+static int mcp_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct mcp23s08 *mcp = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param;
+ u32 arg, mask;
+ u16 val;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ val = arg ? 0xFFFF : 0x0000;
+ mask = BIT(pin);
+ ret = mcp_set_bit(mcp, MCP_GPPU, pin, arg);
+ break;
+ default:
+ dev_err(mcp->dev, "Invalid config param %04x\n", param);
+ return -ENOTSUPP;
+ }
+ }
+
+ return ret;
+}
+
+static const struct pinconf_ops mcp_pinconf_ops = {
+ .pin_config_get = mcp_pinconf_get,
+ .pin_config_set = mcp_pinconf_set,
+ .is_generic = true,
+};
+
/*----------------------------------------------------------------------*/
#ifdef CONFIG_SPI_MASTER
@@ -158,30 +370,6 @@ static const struct regmap_bus mcp23sxx_spi_regmap = {
#endif /* CONFIG_SPI_MASTER */
-static int mcp_read(struct mcp23s08 *mcp, unsigned int reg, unsigned int *val)
-{
- return regmap_read(mcp->regmap, reg << mcp->reg_shift, val);
-}
-
-static int mcp_write(struct mcp23s08 *mcp, unsigned int reg, unsigned int val)
-{
- return regmap_write(mcp->regmap, reg << mcp->reg_shift, val);
-}
-
-static int mcp_update_cache(struct mcp23s08 *mcp)
-{
- int ret, reg, i;
-
- for (i = 0; i < ARRAY_SIZE(mcp->cache); i++) {
- ret = mcp_read(mcp, i, &reg);
- if (ret < 0)
- return ret;
- mcp->cache[i] = reg;
- }
-
- return 0;
-}
-
/*----------------------------------------------------------------------*/
/* A given spi_device can represent up to eight mcp23sxx chips
@@ -202,9 +390,9 @@ static int mcp23s08_direction_input(struct gpio_chip *chip, unsigned offset)
int status;
mutex_lock(&mcp->lock);
- mcp->cache[MCP_IODIR] |= (1 << offset);
- status = mcp_write(mcp, MCP_IODIR, mcp->cache[MCP_IODIR]);
+ status = mcp_set_bit(mcp, MCP_IODIR, offset, true);
mutex_unlock(&mcp->lock);
+
return status;
}
@@ -219,33 +407,27 @@ static int mcp23s08_get(struct gpio_chip *chip, unsigned offset)
ret = mcp_read(mcp, MCP_GPIO, &status);
if (ret < 0)
status = 0;
- else {
- mcp->cache[MCP_GPIO] = status;
+ else
status = !!(status & (1 << offset));
- }
+
+ mcp->cached_gpio = status;
+
mutex_unlock(&mcp->lock);
return status;
}
-static int __mcp23s08_set(struct mcp23s08 *mcp, unsigned mask, int value)
+static int __mcp23s08_set(struct mcp23s08 *mcp, unsigned mask, bool value)
{
- unsigned olat = mcp->cache[MCP_OLAT];
-
- if (value)
- olat |= mask;
- else
- olat &= ~mask;
- mcp->cache[MCP_OLAT] = olat;
- return mcp_write(mcp, MCP_OLAT, olat);
+ return mcp_set_mask(mcp, MCP_OLAT, mask, value);
}
static void mcp23s08_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct mcp23s08 *mcp = gpiochip_get_data(chip);
- unsigned mask = 1 << offset;
+ unsigned mask = BIT(offset);
mutex_lock(&mcp->lock);
- __mcp23s08_set(mcp, mask, value);
+ __mcp23s08_set(mcp, mask, !!value);
mutex_unlock(&mcp->lock);
}
@@ -253,14 +435,13 @@ static int
mcp23s08_direction_output(struct gpio_chip *chip, unsigned offset, int value)
{
struct mcp23s08 *mcp = gpiochip_get_data(chip);
- unsigned mask = 1 << offset;
+ unsigned mask = BIT(offset);
int status;
mutex_lock(&mcp->lock);
status = __mcp23s08_set(mcp, mask, value);
if (status == 0) {
- mcp->cache[MCP_IODIR] &= ~mask;
- status = mcp_write(mcp, MCP_IODIR, mcp->cache[MCP_IODIR]);
+ status = mcp_set_mask(mcp, MCP_IODIR, mask, false);
}
mutex_unlock(&mcp->lock);
return status;
@@ -270,7 +451,7 @@ mcp23s08_direction_output(struct gpio_chip *chip, unsigned offset, int value)
static irqreturn_t mcp23s08_irq(int irq, void *data)
{
struct mcp23s08 *mcp = data;
- int intcap, intf, i, gpio, gpio_orig, intcap_mask;
+ int intcap, intcon, intf, i, gpio, gpio_orig, intcap_mask, defval;
unsigned int child_irq;
bool intf_set, intcap_changed, gpio_bit_changed,
defval_changed, gpio_set;
@@ -281,25 +462,31 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
return IRQ_HANDLED;
}
- mcp->cache[MCP_INTF] = intf;
-
if (mcp_read(mcp, MCP_INTCAP, &intcap) < 0) {
mutex_unlock(&mcp->lock);
return IRQ_HANDLED;
}
- mcp->cache[MCP_INTCAP] = intcap;
+ if (mcp_read(mcp, MCP_INTCON, &intcon) < 0) {
+ mutex_unlock(&mcp->lock);
+ return IRQ_HANDLED;
+ }
+
+ if (mcp_read(mcp, MCP_DEFVAL, &defval) < 0) {
+ mutex_unlock(&mcp->lock);
+ return IRQ_HANDLED;
+ }
/* This clears the interrupt(configurable on S18) */
if (mcp_read(mcp, MCP_GPIO, &gpio) < 0) {
mutex_unlock(&mcp->lock);
return IRQ_HANDLED;
}
- gpio_orig = mcp->cache[MCP_GPIO];
- mcp->cache[MCP_GPIO] = gpio;
+ gpio_orig = mcp->cached_gpio;
+ mcp->cached_gpio = gpio;
mutex_unlock(&mcp->lock);
- if (mcp->cache[MCP_INTF] == 0) {
+ if (intf == 0) {
/* There is no interrupt pending */
return IRQ_HANDLED;
}
@@ -327,7 +514,7 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
* to see if the input has changed.
*/
- intf_set = BIT(i) & mcp->cache[MCP_INTF];
+ intf_set = intf & BIT(i);
if (i < 8 && intf_set)
intcap_mask = 0x00FF;
else if (i >= 8 && intf_set)
@@ -336,14 +523,14 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
intcap_mask = 0x00;
intcap_changed = (intcap_mask &
- (BIT(i) & mcp->cache[MCP_INTCAP])) !=
+ (intcap & BIT(i))) !=
(intcap_mask & (BIT(i) & gpio_orig));
- gpio_set = BIT(i) & mcp->cache[MCP_GPIO];
+ gpio_set = BIT(i) & gpio;
gpio_bit_changed = (BIT(i) & gpio_orig) !=
- (BIT(i) & mcp->cache[MCP_GPIO]);
- defval_changed = (BIT(i) & mcp->cache[MCP_INTCON]) &&
- ((BIT(i) & mcp->cache[MCP_GPIO]) !=
- (BIT(i) & mcp->cache[MCP_DEFVAL]));
+ (BIT(i) & gpio);
+ defval_changed = (BIT(i) & intcon) &&
+ ((BIT(i) & gpio) !=
+ (BIT(i) & defval));
if (((gpio_bit_changed || intcap_changed) &&
(BIT(i) & mcp->irq_rise) && gpio_set) ||
@@ -364,7 +551,7 @@ static void mcp23s08_irq_mask(struct irq_data *data)
struct mcp23s08 *mcp = gpiochip_get_data(gc);
unsigned int pos = data->hwirq;
- mcp->cache[MCP_GPINTEN] &= ~BIT(pos);
+ mcp_set_bit(mcp, MCP_GPINTEN, pos, false);
}
static void mcp23s08_irq_unmask(struct irq_data *data)
@@ -373,7 +560,7 @@ static void mcp23s08_irq_unmask(struct irq_data *data)
struct mcp23s08 *mcp = gpiochip_get_data(gc);
unsigned int pos = data->hwirq;
- mcp->cache[MCP_GPINTEN] |= BIT(pos);
+ mcp_set_bit(mcp, MCP_GPINTEN, pos, true);
}
static int mcp23s08_irq_set_type(struct irq_data *data, unsigned int type)
@@ -384,23 +571,23 @@ static int mcp23s08_irq_set_type(struct irq_data *data, unsigned int type)
int status = 0;
if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
- mcp->cache[MCP_INTCON] &= ~BIT(pos);
+ mcp_set_bit(mcp, MCP_INTCON, pos, false);
mcp->irq_rise |= BIT(pos);
mcp->irq_fall |= BIT(pos);
} else if (type & IRQ_TYPE_EDGE_RISING) {
- mcp->cache[MCP_INTCON] &= ~BIT(pos);
+ mcp_set_bit(mcp, MCP_INTCON, pos, false);
mcp->irq_rise |= BIT(pos);
mcp->irq_fall &= ~BIT(pos);
} else if (type & IRQ_TYPE_EDGE_FALLING) {
- mcp->cache[MCP_INTCON] &= ~BIT(pos);
+ mcp_set_bit(mcp, MCP_INTCON, pos, false);
mcp->irq_rise &= ~BIT(pos);
mcp->irq_fall |= BIT(pos);
} else if (type & IRQ_TYPE_LEVEL_HIGH) {
- mcp->cache[MCP_INTCON] |= BIT(pos);
- mcp->cache[MCP_DEFVAL] &= ~BIT(pos);
+ mcp_set_bit(mcp, MCP_INTCON, pos, true);
+ mcp_set_bit(mcp, MCP_DEFVAL, pos, false);
} else if (type & IRQ_TYPE_LEVEL_LOW) {
- mcp->cache[MCP_INTCON] |= BIT(pos);
- mcp->cache[MCP_DEFVAL] |= BIT(pos);
+ mcp_set_bit(mcp, MCP_INTCON, pos, true);
+ mcp_set_bit(mcp, MCP_DEFVAL, pos, true);
} else
return -EINVAL;
@@ -412,7 +599,8 @@ static void mcp23s08_irq_bus_lock(struct irq_data *data)
struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
struct mcp23s08 *mcp = gpiochip_get_data(gc);
- mutex_lock(&mcp->irq_lock);
+ mutex_lock(&mcp->lock);
+ regcache_cache_only(mcp->regmap, true);
}
static void mcp23s08_irq_bus_unlock(struct irq_data *data)
@@ -420,12 +608,10 @@ static void mcp23s08_irq_bus_unlock(struct irq_data *data)
struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
struct mcp23s08 *mcp = gpiochip_get_data(gc);
- mutex_lock(&mcp->lock);
- mcp_write(mcp, MCP_GPINTEN, mcp->cache[MCP_GPINTEN]);
- mcp_write(mcp, MCP_DEFVAL, mcp->cache[MCP_DEFVAL]);
- mcp_write(mcp, MCP_INTCON, mcp->cache[MCP_INTCON]);
+ regcache_cache_only(mcp->regmap, false);
+ regcache_sync(mcp->regmap);
+
mutex_unlock(&mcp->lock);
- mutex_unlock(&mcp->irq_lock);
}
static struct irq_chip mcp23s08_irq_chip = {
@@ -443,8 +629,6 @@ static int mcp23s08_irq_setup(struct mcp23s08 *mcp)
int err;
unsigned long irqflags = IRQF_ONESHOT | IRQF_SHARED;
- mutex_init(&mcp->irq_lock);
-
if (mcp->irq_active_high)
irqflags |= IRQF_TRIGGER_HIGH;
else
@@ -484,6 +668,47 @@ static int mcp23s08_irq_setup(struct mcp23s08 *mcp)
#include <linux/seq_file.h>
/*
+ * This compares the chip's registers with the register
+ * cache and corrects any incorrectly set register. This
+ * can be used to fix state for MCP23xxx, that temporary
+ * lost its power supply.
+ */
+#define MCP23S08_CONFIG_REGS 8
+static int __check_mcp23s08_reg_cache(struct mcp23s08 *mcp)
+{
+ int cached[MCP23S08_CONFIG_REGS];
+ int err = 0, i;
+
+ /* read cached config registers */
+ for (i = 0; i < MCP23S08_CONFIG_REGS; i++) {
+ err = mcp_read(mcp, i, &cached[i]);
+ if (err)
+ goto out;
+ }
+
+ regcache_cache_bypass(mcp->regmap, true);
+
+ for (i = 0; i < MCP23S08_CONFIG_REGS; i++) {
+ int uncached;
+ err = mcp_read(mcp, i, &uncached);
+ if (err)
+ goto out;
+
+ if (uncached != cached[i]) {
+ dev_err(mcp->dev, "restoring reg 0x%02x from 0x%04x to 0x%04x (power-loss?)\n",
+ i, uncached, cached[i]);
+ mcp_write(mcp, i, cached[i]);
+ }
+ }
+
+out:
+ if (err)
+ dev_err(mcp->dev, "read error: reg=%02x, err=%d", i, err);
+ regcache_cache_bypass(mcp->regmap, false);
+ return err;
+}
+
+/*
* This shows more info than the generic gpio dump code:
* pullups, deglitching, open drain drive.
*/
@@ -493,6 +718,7 @@ static void mcp23s08_dbg_show(struct seq_file *s, struct gpio_chip *chip)
char bank;
int t;
unsigned mask;
+ int iodir, gpio, gppu;
mcp = gpiochip_get_data(chip);
@@ -500,14 +726,30 @@ static void mcp23s08_dbg_show(struct seq_file *s, struct gpio_chip *chip)
bank = '0' + ((mcp->addr >> 1) & 0x7);
mutex_lock(&mcp->lock);
- t = mcp_update_cache(mcp);
- if (t < 0) {
- seq_printf(s, " I/O ERROR %d\n", t);
+
+ t = __check_mcp23s08_reg_cache(mcp);
+ if (t) {
+ seq_printf(s, " I/O Error\n");
+ goto done;
+ }
+ t = mcp_read(mcp, MCP_IODIR, &iodir);
+ if (t) {
+ seq_printf(s, " I/O Error\n");
+ goto done;
+ }
+ t = mcp_read(mcp, MCP_GPIO, &gpio);
+ if (t) {
+ seq_printf(s, " I/O Error\n");
+ goto done;
+ }
+ t = mcp_read(mcp, MCP_GPPU, &gppu);
+ if (t) {
+ seq_printf(s, " I/O Error\n");
goto done;
}
- for (t = 0, mask = 1; t < chip->ngpio; t++, mask <<= 1) {
- const char *label;
+ for (t = 0, mask = BIT(0); t < chip->ngpio; t++, mask <<= 1) {
+ const char *label;
label = gpiochip_is_requested(chip, t);
if (!label)
@@ -515,9 +757,9 @@ static void mcp23s08_dbg_show(struct seq_file *s, struct gpio_chip *chip)
seq_printf(s, " gpio-%-3d P%c.%d (%-12s) %s %s %s",
chip->base + t, bank, t, label,
- (mcp->cache[MCP_IODIR] & mask) ? "in " : "out",
- (mcp->cache[MCP_GPIO] & mask) ? "hi" : "lo",
- (mcp->cache[MCP_GPPU] & mask) ? "up" : " ");
+ (iodir & mask) ? "in " : "out",
+ (gpio & mask) ? "hi" : "lo",
+ (gppu & mask) ? "up" : " ");
/* NOTE: ignoring the irq-related registers */
seq_puts(s, "\n");
}
@@ -533,7 +775,7 @@ done:
static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
void *data, unsigned addr, unsigned type,
- struct mcp23s08_platform_data *pdata, int cs)
+ unsigned int base, int cs)
{
int status, ret;
bool mirror = false;
@@ -605,7 +847,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
if (IS_ERR(mcp->regmap))
return PTR_ERR(mcp->regmap);
- mcp->chip.base = pdata->base;
+ mcp->chip.base = base;
mcp->chip.can_sleep = true;
mcp->chip.parent = dev;
mcp->chip.owner = THIS_MODULE;
@@ -618,13 +860,14 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
if (ret < 0)
goto fail;
- mcp->irq_controller = pdata->irq_controller;
+ mcp->irq_controller =
+ device_property_read_bool(dev, "interrupt-controller");
if (mcp->irq && mcp->irq_controller) {
mcp->irq_active_high =
- of_property_read_bool(mcp->chip.parent->of_node,
+ device_property_read_bool(dev,
"microchip,irq-active-high");
- mirror = pdata->mirror;
+ mirror = device_property_read_bool(dev, "microchip,irq-mirror");
}
if ((status & IOCON_SEQOP) || !(status & IOCON_HAEN) || mirror ||
@@ -648,32 +891,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
goto fail;
}
- /* configure ~100K pullups */
- ret = mcp_write(mcp, MCP_GPPU, pdata->chip[cs].pullups);
- if (ret < 0)
- goto fail;
-
- ret = mcp_update_cache(mcp);
- if (ret < 0)
- goto fail;
-
- /* disable inverter on input */
- if (mcp->cache[MCP_IPOL] != 0) {
- mcp->cache[MCP_IPOL] = 0;
- ret = mcp_write(mcp, MCP_IPOL, 0);
- if (ret < 0)
- goto fail;
- }
-
- /* disable irqs */
- if (mcp->cache[MCP_GPINTEN] != 0) {
- mcp->cache[MCP_GPINTEN] = 0;
- ret = mcp_write(mcp, MCP_GPINTEN, 0);
- if (ret < 0)
- goto fail;
- }
-
- ret = gpiochip_add_data(&mcp->chip, mcp);
+ ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp);
if (ret < 0)
goto fail;
@@ -682,6 +900,23 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
if (ret)
goto fail;
}
+
+ mcp->pinctrl_desc.name = "mcp23xxx-pinctrl";
+ mcp->pinctrl_desc.pctlops = &mcp_pinctrl_ops;
+ mcp->pinctrl_desc.confops = &mcp_pinconf_ops;
+ mcp->pinctrl_desc.npins = mcp->chip.ngpio;
+ if (mcp->pinctrl_desc.npins == 8)
+ mcp->pinctrl_desc.pins = mcp23x08_pins;
+ else if (mcp->pinctrl_desc.npins == 16)
+ mcp->pinctrl_desc.pins = mcp23x17_pins;
+ mcp->pinctrl_desc.owner = THIS_MODULE;
+
+ mcp->pctldev = devm_pinctrl_register(dev, &mcp->pinctrl_desc, mcp);
+ if (IS_ERR(mcp->pctldev)) {
+ ret = PTR_ERR(mcp->pctldev);
+ goto fail;
+ }
+
fail:
if (ret < 0)
dev_dbg(dev, "can't setup chip %d, --> %d\n", addr, ret);
@@ -753,60 +988,26 @@ static int mcp230xx_probe(struct i2c_client *client,
struct mcp23s08_platform_data *pdata, local_pdata;
struct mcp23s08 *mcp;
int status;
- const struct of_device_id *match;
- match = of_match_device(of_match_ptr(mcp23s08_i2c_of_match),
- &client->dev);
- if (match) {
+ pdata = dev_get_platdata(&client->dev);
+ if (!pdata) {
pdata = &local_pdata;
pdata->base = -1;
- pdata->chip[0].pullups = 0;
- pdata->irq_controller = of_property_read_bool(
- client->dev.of_node,
- "interrupt-controller");
- pdata->mirror = of_property_read_bool(client->dev.of_node,
- "microchip,irq-mirror");
- client->irq = irq_of_parse_and_map(client->dev.of_node, 0);
- } else {
- pdata = dev_get_platdata(&client->dev);
- if (!pdata) {
- pdata = devm_kzalloc(&client->dev,
- sizeof(struct mcp23s08_platform_data),
- GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
- pdata->base = -1;
- }
}
- mcp = kzalloc(sizeof(*mcp), GFP_KERNEL);
+ mcp = devm_kzalloc(&client->dev, sizeof(*mcp), GFP_KERNEL);
if (!mcp)
return -ENOMEM;
mcp->irq = client->irq;
status = mcp23s08_probe_one(mcp, &client->dev, client, client->addr,
- id->driver_data, pdata, 0);
+ id->driver_data, pdata->base, 0);
if (status)
- goto fail;
+ return status;
i2c_set_clientdata(client, mcp);
return 0;
-
-fail:
- kfree(mcp);
-
- return status;
-}
-
-static int mcp230xx_remove(struct i2c_client *client)
-{
- struct mcp23s08 *mcp = i2c_get_clientdata(client);
-
- gpiochip_remove(&mcp->chip);
- kfree(mcp);
-
- return 0;
}
static const struct i2c_device_id mcp230xx_id[] = {
@@ -822,7 +1023,6 @@ static struct i2c_driver mcp230xx_driver = {
.of_match_table = of_match_ptr(mcp23s08_i2c_of_match),
},
.probe = mcp230xx_probe,
- .remove = mcp230xx_remove,
.id_table = mcp230xx_id,
};
@@ -856,60 +1056,40 @@ static int mcp23s08_probe(struct spi_device *spi)
int status, type;
unsigned ngpio = 0;
const struct of_device_id *match;
- u32 spi_present_mask = 0;
match = of_match_device(of_match_ptr(mcp23s08_spi_of_match), &spi->dev);
- if (match) {
+ if (match)
type = (int)(uintptr_t)match->data;
- status = of_property_read_u32(spi->dev.of_node,
- "microchip,spi-present-mask", &spi_present_mask);
+ else
+ type = spi_get_device_id(spi)->driver_data;
+
+ pdata = dev_get_platdata(&spi->dev);
+ if (!pdata) {
+ pdata = &local_pdata;
+ pdata->base = -1;
+
+ status = device_property_read_u32(&spi->dev,
+ "microchip,spi-present-mask", &pdata->spi_present_mask);
if (status) {
- status = of_property_read_u32(spi->dev.of_node,
- "mcp,spi-present-mask", &spi_present_mask);
+ status = device_property_read_u32(&spi->dev,
+ "mcp,spi-present-mask",
+ &pdata->spi_present_mask);
+
if (status) {
- dev_err(&spi->dev,
- "DT has no spi-present-mask\n");
+ dev_err(&spi->dev, "missing spi-present-mask");
return -ENODEV;
}
}
- if ((spi_present_mask <= 0) || (spi_present_mask >= 256)) {
- dev_err(&spi->dev, "invalid spi-present-mask\n");
- return -ENODEV;
- }
+ }
- pdata = &local_pdata;
- pdata->base = -1;
- for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
- pdata->chip[addr].pullups = 0;
- if (spi_present_mask & (1 << addr))
- chips++;
- }
- pdata->irq_controller = of_property_read_bool(
- spi->dev.of_node,
- "interrupt-controller");
- pdata->mirror = of_property_read_bool(spi->dev.of_node,
- "microchip,irq-mirror");
- } else {
- type = spi_get_device_id(spi)->driver_data;
- pdata = dev_get_platdata(&spi->dev);
- if (!pdata) {
- pdata = devm_kzalloc(&spi->dev,
- sizeof(struct mcp23s08_platform_data),
- GFP_KERNEL);
- pdata->base = -1;
- }
+ if (!pdata->spi_present_mask || pdata->spi_present_mask > 0xff) {
+ dev_err(&spi->dev, "invalid spi-present-mask");
+ return -ENODEV;
+ }
- for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
- if (!pdata->chip[addr].is_present)
- continue;
+ for (addr = 0; addr < MCP_MAX_DEV_PER_CS; addr++) {
+ if (pdata->spi_present_mask & BIT(addr))
chips++;
- if ((type == MCP_TYPE_S08) && (addr > 3)) {
- dev_err(&spi->dev,
- "mcp23s08 only supports address 0..3\n");
- return -EINVAL;
- }
- spi_present_mask |= 1 << addr;
- }
}
if (!chips)
@@ -923,19 +1103,17 @@ static int mcp23s08_probe(struct spi_device *spi)
spi_set_drvdata(spi, data);
- spi->irq = irq_of_parse_and_map(spi->dev.of_node, 0);
-
- for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
- if (!(spi_present_mask & (1 << addr)))
+ for (addr = 0; addr < MCP_MAX_DEV_PER_CS; addr++) {
+ if (!(pdata->spi_present_mask & BIT(addr)))
continue;
chips--;
data->mcp[addr] = &data->chip[chips];
data->mcp[addr]->irq = spi->irq;
status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi,
- 0x40 | (addr << 1), type, pdata,
- addr);
+ 0x40 | (addr << 1), type,
+ pdata->base, addr);
if (status < 0)
- goto fail;
+ return status;
if (pdata->base != -1)
pdata->base += data->mcp[addr]->chip.ngpio;
@@ -943,36 +1121,6 @@ static int mcp23s08_probe(struct spi_device *spi)
}
data->ngpio = ngpio;
- /* NOTE: these chips have a relatively sane IRQ framework, with
- * per-signal masking and level/edge triggering. It's not yet
- * handled here...
- */
-
- return 0;
-
-fail:
- for (addr = 0; addr < ARRAY_SIZE(data->mcp); addr++) {
-
- if (!data->mcp[addr])
- continue;
- gpiochip_remove(&data->mcp[addr]->chip);
- }
- return status;
-}
-
-static int mcp23s08_remove(struct spi_device *spi)
-{
- struct mcp23s08_driver_data *data = spi_get_drvdata(spi);
- unsigned addr;
-
- for (addr = 0; addr < ARRAY_SIZE(data->mcp); addr++) {
-
- if (!data->mcp[addr])
- continue;
-
- gpiochip_remove(&data->mcp[addr]->chip);
- }
-
return 0;
}
@@ -986,7 +1134,6 @@ MODULE_DEVICE_TABLE(spi, mcp23s08_ids);
static struct spi_driver mcp23s08_driver = {
.probe = mcp23s08_probe,
- .remove = mcp23s08_remove,
.id_table = mcp23s08_ids,
.driver = {
.name = "mcp23s08",
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 9dd981ddbb17..e831647c56a6 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -143,6 +143,7 @@ struct rockchip_drv {
* @gpio_chip: gpiolib chip
* @grange: gpio range
* @slock: spinlock for the gpio bank
+ * @route_mask: bits describing the routing pins of per bank
*/
struct rockchip_pin_bank {
void __iomem *reg_base;
@@ -165,6 +166,7 @@ struct rockchip_pin_bank {
struct pinctrl_gpio_range grange;
raw_spinlock_t slock;
u32 toggle_edge_mode;
+ u32 route_mask;
};
#define PIN_BANK(id, pins, label) \
@@ -288,6 +290,22 @@ struct rockchip_pin_bank {
}
/**
+ * struct rockchip_mux_recalced_data: represent a pin iomux data.
+ * @bank_num: bank number.
+ * @pin: index at register or used to calc index.
+ * @func: the min pin.
+ * @route_offset: the max pin.
+ * @route_val: the register offset.
+ */
+struct rockchip_mux_route_data {
+ u8 bank_num;
+ u8 pin;
+ u8 func;
+ u32 route_offset;
+ u32 route_val;
+};
+
+/**
*/
struct rockchip_pin_ctrl {
struct rockchip_pin_bank *pin_banks;
@@ -299,6 +317,8 @@ struct rockchip_pin_ctrl {
int pmu_mux_offset;
int grf_drv_offset;
int pmu_drv_offset;
+ struct rockchip_mux_route_data *iomux_routes;
+ u32 niomux_routes;
void (*pull_calc_reg)(struct rockchip_pin_bank *bank,
int pin_num, struct regmap **regmap,
@@ -580,6 +600,280 @@ static void rk3328_recalc_mux(u8 bank_num, int pin, int *reg,
*bit = data->bit;
}
+static struct rockchip_mux_route_data rk3228_mux_route_data[] = {
+ {
+ /* pwm0-0 */
+ .bank_num = 0,
+ .pin = 26,
+ .func = 1,
+ .route_offset = 0x50,
+ .route_val = BIT(16),
+ }, {
+ /* pwm0-1 */
+ .bank_num = 3,
+ .pin = 21,
+ .func = 1,
+ .route_offset = 0x50,
+ .route_val = BIT(16) | BIT(0),
+ }, {
+ /* pwm1-0 */
+ .bank_num = 0,
+ .pin = 27,
+ .func = 1,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 1),
+ }, {
+ /* pwm1-1 */
+ .bank_num = 0,
+ .pin = 30,
+ .func = 2,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 1) | BIT(1),
+ }, {
+ /* pwm2-0 */
+ .bank_num = 0,
+ .pin = 28,
+ .func = 1,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 2),
+ }, {
+ /* pwm2-1 */
+ .bank_num = 1,
+ .pin = 12,
+ .func = 2,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 2) | BIT(2),
+ }, {
+ /* pwm3-0 */
+ .bank_num = 3,
+ .pin = 26,
+ .func = 1,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 3),
+ }, {
+ /* pwm3-1 */
+ .bank_num = 1,
+ .pin = 11,
+ .func = 2,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 3) | BIT(3),
+ }, {
+ /* sdio-0_d0 */
+ .bank_num = 1,
+ .pin = 1,
+ .func = 1,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 4),
+ }, {
+ /* sdio-1_d0 */
+ .bank_num = 3,
+ .pin = 2,
+ .func = 1,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 4) | BIT(4),
+ }, {
+ /* spi-0_rx */
+ .bank_num = 0,
+ .pin = 13,
+ .func = 2,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 5),
+ }, {
+ /* spi-1_rx */
+ .bank_num = 2,
+ .pin = 0,
+ .func = 2,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 5) | BIT(5),
+ }, {
+ /* emmc-0_cmd */
+ .bank_num = 1,
+ .pin = 22,
+ .func = 2,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 7),
+ }, {
+ /* emmc-1_cmd */
+ .bank_num = 2,
+ .pin = 4,
+ .func = 2,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 7) | BIT(7),
+ }, {
+ /* uart2-0_rx */
+ .bank_num = 1,
+ .pin = 19,
+ .func = 2,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 8),
+ }, {
+ /* uart2-1_rx */
+ .bank_num = 1,
+ .pin = 10,
+ .func = 2,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 8) | BIT(8),
+ }, {
+ /* uart1-0_rx */
+ .bank_num = 1,
+ .pin = 10,
+ .func = 1,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 11),
+ }, {
+ /* uart1-1_rx */
+ .bank_num = 3,
+ .pin = 13,
+ .func = 1,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 11) | BIT(11),
+ },
+};
+
+static struct rockchip_mux_route_data rk3328_mux_route_data[] = {
+ {
+ /* uart2dbg_rxm0 */
+ .bank_num = 1,
+ .pin = 1,
+ .func = 2,
+ .route_offset = 0x50,
+ .route_val = BIT(16) | BIT(16 + 1),
+ }, {
+ /* uart2dbg_rxm1 */
+ .bank_num = 2,
+ .pin = 1,
+ .func = 1,
+ .route_offset = 0x50,
+ .route_val = BIT(16) | BIT(16 + 1) | BIT(0),
+ }, {
+ /* gmac-m1-optimized_rxd0 */
+ .bank_num = 1,
+ .pin = 11,
+ .func = 2,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 2) | BIT(16 + 10) | BIT(2) | BIT(10),
+ }, {
+ /* pdm_sdi0m0 */
+ .bank_num = 2,
+ .pin = 19,
+ .func = 2,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 3),
+ }, {
+ /* pdm_sdi0m1 */
+ .bank_num = 1,
+ .pin = 23,
+ .func = 3,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 3) | BIT(3),
+ }, {
+ /* spi_rxdm2 */
+ .bank_num = 3,
+ .pin = 2,
+ .func = 4,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 4) | BIT(16 + 5) | BIT(5),
+ }, {
+ /* i2s2_sdim0 */
+ .bank_num = 1,
+ .pin = 24,
+ .func = 1,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 6),
+ }, {
+ /* i2s2_sdim1 */
+ .bank_num = 3,
+ .pin = 2,
+ .func = 6,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 6) | BIT(6),
+ }, {
+ /* card_iom1 */
+ .bank_num = 2,
+ .pin = 22,
+ .func = 3,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 7) | BIT(7),
+ }, {
+ /* tsp_d5m1 */
+ .bank_num = 2,
+ .pin = 16,
+ .func = 3,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 8) | BIT(8),
+ }, {
+ /* cif_data5m1 */
+ .bank_num = 2,
+ .pin = 16,
+ .func = 4,
+ .route_offset = 0x50,
+ .route_val = BIT(16 + 9) | BIT(9),
+ },
+};
+
+static struct rockchip_mux_route_data rk3399_mux_route_data[] = {
+ {
+ /* uart2dbga_rx */
+ .bank_num = 4,
+ .pin = 8,
+ .func = 2,
+ .route_offset = 0xe21c,
+ .route_val = BIT(16 + 10) | BIT(16 + 11),
+ }, {
+ /* uart2dbgb_rx */
+ .bank_num = 4,
+ .pin = 16,
+ .func = 2,
+ .route_offset = 0xe21c,
+ .route_val = BIT(16 + 10) | BIT(16 + 11) | BIT(10),
+ }, {
+ /* uart2dbgc_rx */
+ .bank_num = 4,
+ .pin = 19,
+ .func = 1,
+ .route_offset = 0xe21c,
+ .route_val = BIT(16 + 10) | BIT(16 + 11) | BIT(11),
+ }, {
+ /* pcie_clkreqn */
+ .bank_num = 2,
+ .pin = 26,
+ .func = 2,
+ .route_offset = 0xe21c,
+ .route_val = BIT(16 + 14),
+ }, {
+ /* pcie_clkreqnb */
+ .bank_num = 4,
+ .pin = 24,
+ .func = 1,
+ .route_offset = 0xe21c,
+ .route_val = BIT(16 + 14) | BIT(14),
+ },
+};
+
+static bool rockchip_get_mux_route(struct rockchip_pin_bank *bank, int pin,
+ int mux, u32 *reg, u32 *value)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+ struct rockchip_pin_ctrl *ctrl = info->ctrl;
+ struct rockchip_mux_route_data *data;
+ int i;
+
+ for (i = 0; i < ctrl->niomux_routes; i++) {
+ data = &ctrl->iomux_routes[i];
+ if ((data->bank_num == bank->bank_num) &&
+ (data->pin == pin) && (data->func == mux))
+ break;
+ }
+
+ if (i >= ctrl->niomux_routes)
+ return false;
+
+ *reg = data->route_offset;
+ *value = data->route_val;
+
+ return true;
+}
+
static int rockchip_get_mux(struct rockchip_pin_bank *bank, int pin)
{
struct rockchip_pinctrl *info = bank->drvdata;
@@ -678,7 +972,7 @@ static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
struct regmap *regmap;
int reg, ret, mask, mux_type;
u8 bit;
- u32 data, rmask;
+ u32 data, rmask, route_reg, route_val;
ret = rockchip_verify_mux(bank, pin, mux);
if (ret < 0)
@@ -714,6 +1008,15 @@ static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
if (ctrl->iomux_recalc && (mux_type & IOMUX_RECALCED))
ctrl->iomux_recalc(bank->bank_num, pin, &reg, &bit, &mask);
+ if (bank->route_mask & BIT(pin)) {
+ if (rockchip_get_mux_route(bank, pin, mux, &route_reg,
+ &route_val)) {
+ ret = regmap_write(regmap, route_reg, route_val);
+ if (ret)
+ return ret;
+ }
+ }
+
data = (mask << (bit + 16));
rmask = data | (data >> 16);
data |= (mux & mask) << bit;
@@ -2549,6 +2852,16 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
bank_pins += 8;
}
+
+ /* calculate the per-bank route_mask */
+ for (j = 0; j < ctrl->niomux_routes; j++) {
+ int pin = 0;
+
+ if (ctrl->iomux_routes[j].bank_num == bank->bank_num) {
+ pin = ctrl->iomux_routes[j].pin;
+ bank->route_mask |= BIT(pin);
+ }
+ }
}
return ctrl;
@@ -2799,6 +3112,8 @@ static struct rockchip_pin_ctrl rk3228_pin_ctrl = {
.label = "RK3228-GPIO",
.type = RK3288,
.grf_mux_offset = 0x0,
+ .iomux_routes = rk3228_mux_route_data,
+ .niomux_routes = ARRAY_SIZE(rk3228_mux_route_data),
.pull_calc_reg = rk3228_calc_pull_reg_and_bit,
.drv_calc_reg = rk3228_calc_drv_reg_and_bit,
};
@@ -2866,6 +3181,8 @@ static struct rockchip_pin_ctrl rk3328_pin_ctrl = {
.label = "RK3328-GPIO",
.type = RK3288,
.grf_mux_offset = 0x0,
+ .iomux_routes = rk3328_mux_route_data,
+ .niomux_routes = ARRAY_SIZE(rk3328_mux_route_data),
.pull_calc_reg = rk3228_calc_pull_reg_and_bit,
.drv_calc_reg = rk3228_calc_drv_reg_and_bit,
.iomux_recalc = rk3328_recalc_mux,
@@ -2956,33 +3273,35 @@ static struct rockchip_pin_ctrl rk3399_pin_ctrl = {
.pmu_mux_offset = 0x0,
.grf_drv_offset = 0xe100,
.pmu_drv_offset = 0x80,
+ .iomux_routes = rk3399_mux_route_data,
+ .niomux_routes = ARRAY_SIZE(rk3399_mux_route_data),
.pull_calc_reg = rk3399_calc_pull_reg_and_bit,
.drv_calc_reg = rk3399_calc_drv_reg_and_bit,
};
static const struct of_device_id rockchip_pinctrl_dt_match[] = {
{ .compatible = "rockchip,rv1108-pinctrl",
- .data = (void *)&rv1108_pin_ctrl },
+ .data = &rv1108_pin_ctrl },
{ .compatible = "rockchip,rk2928-pinctrl",
- .data = (void *)&rk2928_pin_ctrl },
+ .data = &rk2928_pin_ctrl },
{ .compatible = "rockchip,rk3036-pinctrl",
- .data = (void *)&rk3036_pin_ctrl },
+ .data = &rk3036_pin_ctrl },
{ .compatible = "rockchip,rk3066a-pinctrl",
- .data = (void *)&rk3066a_pin_ctrl },
+ .data = &rk3066a_pin_ctrl },
{ .compatible = "rockchip,rk3066b-pinctrl",
- .data = (void *)&rk3066b_pin_ctrl },
+ .data = &rk3066b_pin_ctrl },
{ .compatible = "rockchip,rk3188-pinctrl",
- .data = (void *)&rk3188_pin_ctrl },
+ .data = &rk3188_pin_ctrl },
{ .compatible = "rockchip,rk3228-pinctrl",
- .data = (void *)&rk3228_pin_ctrl },
+ .data = &rk3228_pin_ctrl },
{ .compatible = "rockchip,rk3288-pinctrl",
- .data = (void *)&rk3288_pin_ctrl },
+ .data = &rk3288_pin_ctrl },
{ .compatible = "rockchip,rk3328-pinctrl",
- .data = (void *)&rk3328_pin_ctrl },
+ .data = &rk3328_pin_ctrl },
{ .compatible = "rockchip,rk3368-pinctrl",
- .data = (void *)&rk3368_pin_ctrl },
+ .data = &rk3368_pin_ctrl },
{ .compatible = "rockchip,rk3399-pinctrl",
- .data = (void *)&rk3399_pin_ctrl },
+ .data = &rk3399_pin_ctrl },
{},
};
diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c
new file mode 100644
index 000000000000..dc164da10446
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-rza1.c
@@ -0,0 +1,1308 @@
+/*
+ * Combined GPIO and pin controller support for Renesas RZ/A1 (r7s72100) SoC
+ *
+ * Copyright (C) 2017 Jacopo Mondi
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/*
+ * This pin controller/gpio combined driver supports Renesas devices of RZ/A1
+ * family.
+ * This includes SoCs which are sub- or super- sets of this particular line,
+ * as RZ/A1H (r7s721000), RZ/A1M (r7s721010) and RZ/A1L (r7s721020).
+ */
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/slab.h>
+
+#include "core.h"
+#include "devicetree.h"
+#include "pinconf.h"
+#include "pinmux.h"
+
+#define DRIVER_NAME "pinctrl-rza1"
+
+#define RZA1_P_REG 0x0000
+#define RZA1_PPR_REG 0x0200
+#define RZA1_PM_REG 0x0300
+#define RZA1_PMC_REG 0x0400
+#define RZA1_PFC_REG 0x0500
+#define RZA1_PFCE_REG 0x0600
+#define RZA1_PFCEA_REG 0x0a00
+#define RZA1_PIBC_REG 0x4000
+#define RZA1_PBDC_REG 0x4100
+#define RZA1_PIPC_REG 0x4200
+
+#define RZA1_ADDR(mem, reg, port) ((mem) + (reg) + ((port) * 4))
+
+#define RZA1_NPORTS 12
+#define RZA1_PINS_PER_PORT 16
+#define RZA1_NPINS (RZA1_PINS_PER_PORT * RZA1_NPORTS)
+#define RZA1_PIN_ID_TO_PORT(id) ((id) / RZA1_PINS_PER_PORT)
+#define RZA1_PIN_ID_TO_PIN(id) ((id) % RZA1_PINS_PER_PORT)
+
+/*
+ * Use 16 lower bits [15:0] for pin identifier
+ * Use 16 higher bits [31:16] for pin mux function
+ */
+#define MUX_PIN_ID_MASK GENMASK(15, 0)
+#define MUX_FUNC_MASK GENMASK(31, 16)
+
+#define MUX_FUNC_OFFS 16
+#define MUX_FUNC(pinconf) \
+ ((pinconf & MUX_FUNC_MASK) >> MUX_FUNC_OFFS)
+#define MUX_FUNC_PFC_MASK BIT(0)
+#define MUX_FUNC_PFCE_MASK BIT(1)
+#define MUX_FUNC_PFCEA_MASK BIT(2)
+
+/* Pin mux flags */
+#define MUX_FLAGS_BIDIR BIT(0)
+#define MUX_FLAGS_SWIO_INPUT BIT(1)
+#define MUX_FLAGS_SWIO_OUTPUT BIT(2)
+
+/* ----------------------------------------------------------------------------
+ * RZ/A1 pinmux flags
+ */
+
+/**
+ * rza1_bidir_pin - describe a single pin that needs bidir flag applied.
+ */
+struct rza1_bidir_pin {
+ u8 pin: 4;
+ u8 func: 4;
+};
+
+/**
+ * rza1_bidir_entry - describe a list of pins that needs bidir flag applied.
+ * Each struct rza1_bidir_entry describes a port.
+ */
+struct rza1_bidir_entry {
+ const unsigned int npins;
+ const struct rza1_bidir_pin *pins;
+};
+
+/**
+ * rza1_swio_pin - describe a single pin that needs bidir flag applied.
+ */
+struct rza1_swio_pin {
+ u16 pin: 4;
+ u16 port: 4;
+ u16 func: 4;
+ u16 input: 1;
+};
+
+/**
+ * rza1_swio_entry - describe a list of pins that needs swio flag applied
+ */
+struct rza1_swio_entry {
+ const unsigned int npins;
+ const struct rza1_swio_pin *pins;
+};
+
+/**
+ * rza1_pinmux_conf - group together bidir and swio pinmux flag tables
+ */
+struct rza1_pinmux_conf {
+ const struct rza1_bidir_entry *bidir_entries;
+ const struct rza1_swio_entry *swio_entries;
+};
+
+/* ----------------------------------------------------------------------------
+ * RZ/A1H (r7s72100) pinmux flags
+ */
+
+static const struct rza1_bidir_pin rza1h_bidir_pins_p1[] = {
+ { .pin = 0, .func = 1 },
+ { .pin = 1, .func = 1 },
+ { .pin = 2, .func = 1 },
+ { .pin = 3, .func = 1 },
+ { .pin = 4, .func = 1 },
+ { .pin = 5, .func = 1 },
+ { .pin = 6, .func = 1 },
+ { .pin = 7, .func = 1 },
+};
+
+static const struct rza1_bidir_pin rza1h_bidir_pins_p2[] = {
+ { .pin = 0, .func = 1 },
+ { .pin = 1, .func = 1 },
+ { .pin = 2, .func = 1 },
+ { .pin = 3, .func = 1 },
+ { .pin = 4, .func = 1 },
+ { .pin = 0, .func = 4 },
+ { .pin = 1, .func = 4 },
+ { .pin = 2, .func = 4 },
+ { .pin = 3, .func = 4 },
+ { .pin = 5, .func = 1 },
+ { .pin = 6, .func = 1 },
+ { .pin = 7, .func = 1 },
+ { .pin = 8, .func = 1 },
+ { .pin = 9, .func = 1 },
+ { .pin = 10, .func = 1 },
+ { .pin = 11, .func = 1 },
+ { .pin = 12, .func = 1 },
+ { .pin = 13, .func = 1 },
+ { .pin = 14, .func = 1 },
+ { .pin = 15, .func = 1 },
+ { .pin = 12, .func = 4 },
+ { .pin = 13, .func = 4 },
+ { .pin = 14, .func = 4 },
+ { .pin = 15, .func = 4 },
+};
+
+static const struct rza1_bidir_pin rza1h_bidir_pins_p3[] = {
+ { .pin = 3, .func = 2 },
+ { .pin = 10, .func = 7 },
+ { .pin = 11, .func = 7 },
+ { .pin = 13, .func = 7 },
+ { .pin = 14, .func = 7 },
+ { .pin = 15, .func = 7 },
+ { .pin = 10, .func = 8 },
+ { .pin = 11, .func = 8 },
+ { .pin = 13, .func = 8 },
+ { .pin = 14, .func = 8 },
+ { .pin = 15, .func = 8 },
+};
+
+static const struct rza1_bidir_pin rza1h_bidir_pins_p4[] = {
+ { .pin = 0, .func = 8 },
+ { .pin = 1, .func = 8 },
+ { .pin = 2, .func = 8 },
+ { .pin = 3, .func = 8 },
+ { .pin = 10, .func = 3 },
+ { .pin = 11, .func = 3 },
+ { .pin = 13, .func = 3 },
+ { .pin = 14, .func = 3 },
+ { .pin = 15, .func = 3 },
+ { .pin = 10, .func = 4 },
+ { .pin = 11, .func = 4 },
+ { .pin = 13, .func = 4 },
+ { .pin = 14, .func = 4 },
+ { .pin = 15, .func = 4 },
+ { .pin = 12, .func = 5 },
+ { .pin = 13, .func = 5 },
+ { .pin = 14, .func = 5 },
+ { .pin = 15, .func = 5 },
+};
+
+static const struct rza1_bidir_pin rza1h_bidir_pins_p6[] = {
+ { .pin = 0, .func = 1 },
+ { .pin = 1, .func = 1 },
+ { .pin = 2, .func = 1 },
+ { .pin = 3, .func = 1 },
+ { .pin = 4, .func = 1 },
+ { .pin = 5, .func = 1 },
+ { .pin = 6, .func = 1 },
+ { .pin = 7, .func = 1 },
+ { .pin = 8, .func = 1 },
+ { .pin = 9, .func = 1 },
+ { .pin = 10, .func = 1 },
+ { .pin = 11, .func = 1 },
+ { .pin = 12, .func = 1 },
+ { .pin = 13, .func = 1 },
+ { .pin = 14, .func = 1 },
+ { .pin = 15, .func = 1 },
+};
+
+static const struct rza1_bidir_pin rza1h_bidir_pins_p7[] = {
+ { .pin = 13, .func = 3 },
+};
+
+static const struct rza1_bidir_pin rza1h_bidir_pins_p8[] = {
+ { .pin = 8, .func = 3 },
+ { .pin = 9, .func = 3 },
+ { .pin = 10, .func = 3 },
+ { .pin = 11, .func = 3 },
+ { .pin = 14, .func = 2 },
+ { .pin = 15, .func = 2 },
+ { .pin = 14, .func = 3 },
+ { .pin = 15, .func = 3 },
+};
+
+static const struct rza1_bidir_pin rza1h_bidir_pins_p9[] = {
+ { .pin = 0, .func = 2 },
+ { .pin = 1, .func = 2 },
+ { .pin = 4, .func = 2 },
+ { .pin = 5, .func = 2 },
+ { .pin = 6, .func = 2 },
+ { .pin = 7, .func = 2 },
+};
+
+static const struct rza1_bidir_pin rza1h_bidir_pins_p11[] = {
+ { .pin = 6, .func = 2 },
+ { .pin = 7, .func = 2 },
+ { .pin = 9, .func = 2 },
+ { .pin = 6, .func = 4 },
+ { .pin = 7, .func = 4 },
+ { .pin = 9, .func = 4 },
+ { .pin = 10, .func = 2 },
+ { .pin = 11, .func = 2 },
+ { .pin = 10, .func = 4 },
+ { .pin = 11, .func = 4 },
+ { .pin = 12, .func = 4 },
+ { .pin = 13, .func = 4 },
+ { .pin = 14, .func = 4 },
+ { .pin = 15, .func = 4 },
+};
+
+static const struct rza1_swio_pin rza1h_swio_pins[] = {
+ { .port = 2, .pin = 7, .func = 4, .input = 0 },
+ { .port = 2, .pin = 11, .func = 4, .input = 0 },
+ { .port = 3, .pin = 7, .func = 3, .input = 0 },
+ { .port = 3, .pin = 7, .func = 8, .input = 0 },
+ { .port = 4, .pin = 7, .func = 5, .input = 0 },
+ { .port = 4, .pin = 7, .func = 11, .input = 0 },
+ { .port = 4, .pin = 15, .func = 6, .input = 0 },
+ { .port = 5, .pin = 0, .func = 1, .input = 1 },
+ { .port = 5, .pin = 1, .func = 1, .input = 1 },
+ { .port = 5, .pin = 2, .func = 1, .input = 1 },
+ { .port = 5, .pin = 3, .func = 1, .input = 1 },
+ { .port = 5, .pin = 4, .func = 1, .input = 1 },
+ { .port = 5, .pin = 5, .func = 1, .input = 1 },
+ { .port = 5, .pin = 6, .func = 1, .input = 1 },
+ { .port = 5, .pin = 7, .func = 1, .input = 1 },
+ { .port = 7, .pin = 4, .func = 6, .input = 0 },
+ { .port = 7, .pin = 11, .func = 2, .input = 0 },
+ { .port = 8, .pin = 10, .func = 8, .input = 0 },
+ { .port = 10, .pin = 15, .func = 2, .input = 0 },
+};
+
+static const struct rza1_bidir_entry rza1h_bidir_entries[RZA1_NPORTS] = {
+ [1] = { ARRAY_SIZE(rza1h_bidir_pins_p1), rza1h_bidir_pins_p1 },
+ [2] = { ARRAY_SIZE(rza1h_bidir_pins_p2), rza1h_bidir_pins_p2 },
+ [3] = { ARRAY_SIZE(rza1h_bidir_pins_p3), rza1h_bidir_pins_p3 },
+ [4] = { ARRAY_SIZE(rza1h_bidir_pins_p4), rza1h_bidir_pins_p4 },
+ [6] = { ARRAY_SIZE(rza1h_bidir_pins_p6), rza1h_bidir_pins_p6 },
+ [7] = { ARRAY_SIZE(rza1h_bidir_pins_p7), rza1h_bidir_pins_p7 },
+ [8] = { ARRAY_SIZE(rza1h_bidir_pins_p8), rza1h_bidir_pins_p8 },
+ [9] = { ARRAY_SIZE(rza1h_bidir_pins_p9), rza1h_bidir_pins_p9 },
+ [11] = { ARRAY_SIZE(rza1h_bidir_pins_p11), rza1h_bidir_pins_p11 },
+};
+
+static const struct rza1_swio_entry rza1h_swio_entries[] = {
+ [0] = { ARRAY_SIZE(rza1h_swio_pins), rza1h_swio_pins },
+};
+
+/* RZ/A1H (r7s72100x) pinmux flags table */
+static const struct rza1_pinmux_conf rza1h_pmx_conf = {
+ .bidir_entries = rza1h_bidir_entries,
+ .swio_entries = rza1h_swio_entries,
+};
+
+/* ----------------------------------------------------------------------------
+ * RZ/A1 types
+ */
+/**
+ * rza1_mux_conf - describes a pin multiplexing operation
+ *
+ * @id: the pin identifier from 0 to RZA1_NPINS
+ * @port: the port where pin sits on
+ * @pin: pin id
+ * @mux_func: alternate function id number
+ * @mux_flags: alternate function flags
+ * @value: output value to set the pin to
+ */
+struct rza1_mux_conf {
+ u16 id;
+ u8 port;
+ u8 pin;
+ u8 mux_func;
+ u8 mux_flags;
+ u8 value;
+};
+
+/**
+ * rza1_port - describes a pin port
+ *
+ * This is mostly useful to lock register writes per-bank and not globally.
+ *
+ * @lock: protect access to HW registers
+ * @id: port number
+ * @base: logical address base
+ * @pins: pins sitting on this port
+ */
+struct rza1_port {
+ spinlock_t lock;
+ unsigned int id;
+ void __iomem *base;
+ struct pinctrl_pin_desc *pins;
+};
+
+/**
+ * rza1_pinctrl - RZ pincontroller device
+ *
+ * @dev: parent device structure
+ * @mutex: protect [pinctrl|pinmux]_generic functions
+ * @base: logical address base
+ * @nports: number of pin controller ports
+ * @ports: pin controller banks
+ * @pins: pin array for pinctrl core
+ * @desc: pincontroller desc for pinctrl core
+ * @pctl: pinctrl device
+ * @data: device specific data
+ */
+struct rza1_pinctrl {
+ struct device *dev;
+
+ struct mutex mutex;
+
+ void __iomem *base;
+
+ unsigned int nport;
+ struct rza1_port *ports;
+
+ struct pinctrl_pin_desc *pins;
+ struct pinctrl_desc desc;
+ struct pinctrl_dev *pctl;
+
+ const void *data;
+};
+
+/* ----------------------------------------------------------------------------
+ * RZ/A1 pinmux flags
+ */
+static inline bool rza1_pinmux_get_bidir(unsigned int port,
+ unsigned int pin,
+ unsigned int func,
+ const struct rza1_bidir_entry *table)
+{
+ const struct rza1_bidir_entry *entry = &table[port];
+ const struct rza1_bidir_pin *bidir_pin;
+ unsigned int i;
+
+ for (i = 0; i < entry->npins; ++i) {
+ bidir_pin = &entry->pins[i];
+ if (bidir_pin->pin == pin && bidir_pin->func == func)
+ return true;
+ }
+
+ return false;
+}
+
+static inline int rza1_pinmux_get_swio(unsigned int port,
+ unsigned int pin,
+ unsigned int func,
+ const struct rza1_swio_entry *table)
+{
+ const struct rza1_swio_pin *swio_pin;
+ unsigned int i;
+
+
+ for (i = 0; i < table->npins; ++i) {
+ swio_pin = &table->pins[i];
+ if (swio_pin->port == port && swio_pin->pin == pin &&
+ swio_pin->func == func)
+ return swio_pin->input;
+ }
+
+ return -ENOENT;
+}
+
+/**
+ * rza1_pinmux_get_flags() - return pinmux flags associated to a pin
+ */
+static unsigned int rza1_pinmux_get_flags(unsigned int port, unsigned int pin,
+ unsigned int func,
+ struct rza1_pinctrl *rza1_pctl)
+
+{
+ const struct rza1_pinmux_conf *pmx_conf = rza1_pctl->data;
+ const struct rza1_bidir_entry *bidir_entries = pmx_conf->bidir_entries;
+ const struct rza1_swio_entry *swio_entries = pmx_conf->swio_entries;
+ unsigned int pmx_flags = 0;
+ int ret;
+
+ if (rza1_pinmux_get_bidir(port, pin, func, bidir_entries))
+ pmx_flags |= MUX_FLAGS_BIDIR;
+
+ ret = rza1_pinmux_get_swio(port, pin, func, swio_entries);
+ if (ret == 0)
+ pmx_flags |= MUX_FLAGS_SWIO_OUTPUT;
+ else if (ret > 0)
+ pmx_flags |= MUX_FLAGS_SWIO_INPUT;
+
+ return pmx_flags;
+}
+
+/* ----------------------------------------------------------------------------
+ * RZ/A1 SoC operations
+ */
+
+/**
+ * rza1_set_bit() - un-locked set/clear a single bit in pin configuration
+ * registers
+ */
+static inline void rza1_set_bit(struct rza1_port *port, unsigned int reg,
+ unsigned int bit, bool set)
+{
+ void __iomem *mem = RZA1_ADDR(port->base, reg, port->id);
+ u16 val = ioread16(mem);
+
+ if (set)
+ val |= BIT(bit);
+ else
+ val &= ~BIT(bit);
+
+ iowrite16(val, mem);
+}
+
+static inline unsigned int rza1_get_bit(struct rza1_port *port,
+ unsigned int reg, unsigned int bit)
+{
+ void __iomem *mem = RZA1_ADDR(port->base, reg, port->id);
+
+ return ioread16(mem) & BIT(bit);
+}
+
+/**
+ * rza1_pin_reset() - reset a pin to default initial state
+ *
+ * Reset pin state disabling input buffer and bi-directional control,
+ * and configure it as input port.
+ * Note that pin is now configured with direction as input but with input
+ * buffer disabled. This implies the pin value cannot be read in this state.
+ *
+ * @port: port where pin sits on
+ * @pin: pin offset
+ */
+static void rza1_pin_reset(struct rza1_port *port, unsigned int pin)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&port->lock, irqflags);
+ rza1_set_bit(port, RZA1_PIBC_REG, pin, 0);
+ rza1_set_bit(port, RZA1_PBDC_REG, pin, 0);
+
+ rza1_set_bit(port, RZA1_PM_REG, pin, 1);
+ rza1_set_bit(port, RZA1_PMC_REG, pin, 0);
+ rza1_set_bit(port, RZA1_PIPC_REG, pin, 0);
+ spin_unlock_irqrestore(&port->lock, irqflags);
+}
+
+static inline int rza1_pin_get_direction(struct rza1_port *port,
+ unsigned int pin)
+{
+ unsigned long irqflags;
+ int input;
+
+ spin_lock_irqsave(&port->lock, irqflags);
+ input = rza1_get_bit(port, RZA1_PM_REG, pin);
+ spin_unlock_irqrestore(&port->lock, irqflags);
+
+ return !!input;
+}
+
+/**
+ * rza1_pin_set_direction() - set I/O direction on a pin in port mode
+ *
+ * When running in output port mode keep PBDC enabled to allow reading the
+ * pin value from PPR.
+ *
+ * @port: port where pin sits on
+ * @pin: pin offset
+ * @input: input enable/disable flag
+ */
+static inline void rza1_pin_set_direction(struct rza1_port *port,
+ unsigned int pin, bool input)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&port->lock, irqflags);
+
+ rza1_set_bit(port, RZA1_PIBC_REG, pin, 1);
+ if (input) {
+ rza1_set_bit(port, RZA1_PM_REG, pin, 1);
+ rza1_set_bit(port, RZA1_PBDC_REG, pin, 0);
+ } else {
+ rza1_set_bit(port, RZA1_PM_REG, pin, 0);
+ rza1_set_bit(port, RZA1_PBDC_REG, pin, 1);
+ }
+
+ spin_unlock_irqrestore(&port->lock, irqflags);
+}
+
+static inline void rza1_pin_set(struct rza1_port *port, unsigned int pin,
+ unsigned int value)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&port->lock, irqflags);
+ rza1_set_bit(port, RZA1_P_REG, pin, !!value);
+ spin_unlock_irqrestore(&port->lock, irqflags);
+}
+
+static inline int rza1_pin_get(struct rza1_port *port, unsigned int pin)
+{
+ unsigned long irqflags;
+ int val;
+
+ spin_lock_irqsave(&port->lock, irqflags);
+ val = rza1_get_bit(port, RZA1_PPR_REG, pin);
+ spin_unlock_irqrestore(&port->lock, irqflags);
+
+ return val;
+}
+
+/**
+ * rza1_pin_mux_single() - configure pin multiplexing on a single pin
+ *
+ * @pinctrl: RZ/A1 pin controller device
+ * @mux_conf: pin multiplexing descriptor
+ */
+static int rza1_pin_mux_single(struct rza1_pinctrl *rza1_pctl,
+ struct rza1_mux_conf *mux_conf)
+{
+ struct rza1_port *port = &rza1_pctl->ports[mux_conf->port];
+ unsigned int pin = mux_conf->pin;
+ u8 mux_func = mux_conf->mux_func;
+ u8 mux_flags = mux_conf->mux_flags;
+ u8 mux_flags_from_table;
+
+ rza1_pin_reset(port, pin);
+
+ /* SWIO pinmux flags coming from DT are high precedence */
+ mux_flags_from_table = rza1_pinmux_get_flags(port->id, pin, mux_func,
+ rza1_pctl);
+ if (mux_flags)
+ mux_flags |= (mux_flags_from_table & MUX_FLAGS_BIDIR);
+ else
+ mux_flags = mux_flags_from_table;
+
+ if (mux_flags & MUX_FLAGS_BIDIR)
+ rza1_set_bit(port, RZA1_PBDC_REG, pin, 1);
+
+ /*
+ * Enable alternate function mode and select it.
+ *
+ * Be careful here: the pin mux sub-nodes in device tree
+ * enumerate alternate functions from 1 to 8;
+ * subtract 1 before using macros to match registers configuration
+ * which expects numbers from 0 to 7 instead.
+ *
+ * ----------------------------------------------------
+ * Alternate mode selection table:
+ *
+ * PMC PFC PFCE PFCAE (mux_func - 1)
+ * 1 0 0 0 0
+ * 1 1 0 0 1
+ * 1 0 1 0 2
+ * 1 1 1 0 3
+ * 1 0 0 1 4
+ * 1 1 0 1 5
+ * 1 0 1 1 6
+ * 1 1 1 1 7
+ * ----------------------------------------------------
+ */
+ mux_func -= 1;
+ rza1_set_bit(port, RZA1_PFC_REG, pin, mux_func & MUX_FUNC_PFC_MASK);
+ rza1_set_bit(port, RZA1_PFCE_REG, pin, mux_func & MUX_FUNC_PFCE_MASK);
+ rza1_set_bit(port, RZA1_PFCEA_REG, pin, mux_func & MUX_FUNC_PFCEA_MASK);
+
+ /*
+ * All alternate functions except a few need PIPCn = 1.
+ * If PIPCn has to stay disabled (SW IO mode), configure PMn according
+ * to I/O direction specified by pin configuration -after- PMC has been
+ * set to one.
+ */
+ if (mux_flags & (MUX_FLAGS_SWIO_INPUT | MUX_FLAGS_SWIO_OUTPUT))
+ rza1_set_bit(port, RZA1_PM_REG, pin,
+ mux_flags & MUX_FLAGS_SWIO_INPUT);
+ else
+ rza1_set_bit(port, RZA1_PIPC_REG, pin, 1);
+
+ rza1_set_bit(port, RZA1_PMC_REG, pin, 1);
+
+ return 0;
+}
+
+/* ----------------------------------------------------------------------------
+ * gpio operations
+ */
+
+/**
+ * rza1_gpio_request() - configure pin in port mode
+ *
+ * Configure a pin as gpio (port mode).
+ * After reset, the pin is in input mode with input buffer disabled.
+ * To use the pin as input or output, set_direction shall be called first
+ *
+ * @chip: gpio chip where the gpio sits on
+ * @gpio: gpio offset
+ */
+static int rza1_gpio_request(struct gpio_chip *chip, unsigned int gpio)
+{
+ struct rza1_port *port = gpiochip_get_data(chip);
+
+ rza1_pin_reset(port, gpio);
+
+ return 0;
+}
+
+/**
+ * rza1_gpio_disable_free() - reset a pin
+ *
+ * Surprisingly, disable_free a gpio, is equivalent to request it.
+ * Reset pin to port mode, with input buffer disabled. This overwrites all
+ * port direction settings applied with set_direction
+ *
+ * @chip: gpio chip where the gpio sits on
+ * @gpio: gpio offset
+ */
+static void rza1_gpio_free(struct gpio_chip *chip, unsigned int gpio)
+{
+ struct rza1_port *port = gpiochip_get_data(chip);
+
+ rza1_pin_reset(port, gpio);
+}
+
+static int rza1_gpio_get_direction(struct gpio_chip *chip, unsigned int gpio)
+{
+ struct rza1_port *port = gpiochip_get_data(chip);
+
+ return rza1_pin_get_direction(port, gpio);
+}
+
+static int rza1_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int gpio)
+{
+ struct rza1_port *port = gpiochip_get_data(chip);
+
+ rza1_pin_set_direction(port, gpio, true);
+
+ return 0;
+}
+
+static int rza1_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int gpio,
+ int value)
+{
+ struct rza1_port *port = gpiochip_get_data(chip);
+
+ /* Set value before driving pin direction */
+ rza1_pin_set(port, gpio, value);
+ rza1_pin_set_direction(port, gpio, false);
+
+ return 0;
+}
+
+/**
+ * rza1_gpio_get() - read a gpio pin value
+ *
+ * Read gpio pin value through PPR register.
+ * Requires bi-directional mode to work when reading the value of a pin
+ * in output mode
+ *
+ * @chip: gpio chip where the gpio sits on
+ * @gpio: gpio offset
+ */
+static int rza1_gpio_get(struct gpio_chip *chip, unsigned int gpio)
+{
+ struct rza1_port *port = gpiochip_get_data(chip);
+
+ return rza1_pin_get(port, gpio);
+}
+
+static void rza1_gpio_set(struct gpio_chip *chip, unsigned int gpio,
+ int value)
+{
+ struct rza1_port *port = gpiochip_get_data(chip);
+
+ rza1_pin_set(port, gpio, value);
+}
+
+static struct gpio_chip rza1_gpiochip_template = {
+ .request = rza1_gpio_request,
+ .free = rza1_gpio_free,
+ .get_direction = rza1_gpio_get_direction,
+ .direction_input = rza1_gpio_direction_input,
+ .direction_output = rza1_gpio_direction_output,
+ .get = rza1_gpio_get,
+ .set = rza1_gpio_set,
+};
+/* ----------------------------------------------------------------------------
+ * pinctrl operations
+ */
+
+/**
+ * rza1_dt_node_pin_count() - Count number of pins in a dt node or in all its
+ * children sub-nodes
+ *
+ * @np: device tree node to parse
+ */
+static int rza1_dt_node_pin_count(struct device_node *np)
+{
+ struct device_node *child;
+ struct property *of_pins;
+ unsigned int npins;
+
+ of_pins = of_find_property(np, "pinmux", NULL);
+ if (of_pins)
+ return of_pins->length / sizeof(u32);
+
+ npins = 0;
+ for_each_child_of_node(np, child) {
+ of_pins = of_find_property(child, "pinmux", NULL);
+ if (!of_pins)
+ return -EINVAL;
+
+ npins += of_pins->length / sizeof(u32);
+ }
+
+ return npins;
+}
+
+/**
+ * rza1_parse_pmx_function() - parse a pin mux sub-node
+ *
+ * @rza1_pctl: RZ/A1 pin controller device
+ * @np: of pmx sub-node
+ * @mux_confs: array of pin mux configurations to fill with parsed info
+ * @grpins: array of pin ids to mux
+ */
+static int rza1_parse_pinmux_node(struct rza1_pinctrl *rza1_pctl,
+ struct device_node *np,
+ struct rza1_mux_conf *mux_confs,
+ unsigned int *grpins)
+{
+ struct pinctrl_dev *pctldev = rza1_pctl->pctl;
+ char const *prop_name = "pinmux";
+ unsigned long *pin_configs;
+ unsigned int npin_configs;
+ struct property *of_pins;
+ unsigned int npins;
+ u8 pinmux_flags;
+ unsigned int i;
+ int ret;
+
+ of_pins = of_find_property(np, prop_name, NULL);
+ if (!of_pins) {
+ dev_dbg(rza1_pctl->dev, "Missing %s property\n", prop_name);
+ return -ENOENT;
+ }
+ npins = of_pins->length / sizeof(u32);
+
+ /*
+ * Collect pin configuration properties: they apply to all pins in
+ * this sub-node
+ */
+ ret = pinconf_generic_parse_dt_config(np, pctldev, &pin_configs,
+ &npin_configs);
+ if (ret) {
+ dev_err(rza1_pctl->dev,
+ "Unable to parse pin configuration options for %s\n",
+ np->name);
+ return ret;
+ }
+
+ /*
+ * Create a mask with pinmux flags from pin configuration;
+ * very few pins (TIOC[0-4][A|B|C|D] require SWIO direction
+ * specified in device tree.
+ */
+ pinmux_flags = 0;
+ for (i = 0; i < npin_configs && pinmux_flags == 0; i++)
+ switch (pinconf_to_config_param(pin_configs[i])) {
+ case PIN_CONFIG_INPUT_ENABLE:
+ pinmux_flags |= MUX_FLAGS_SWIO_INPUT;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ pinmux_flags |= MUX_FLAGS_SWIO_OUTPUT;
+ default:
+ break;
+
+ }
+
+ kfree(pin_configs);
+
+ /* Collect pin positions and their mux settings. */
+ for (i = 0; i < npins; ++i) {
+ u32 of_pinconf;
+ struct rza1_mux_conf *mux_conf = &mux_confs[i];
+
+ ret = of_property_read_u32_index(np, prop_name, i, &of_pinconf);
+ if (ret)
+ return ret;
+
+ mux_conf->id = of_pinconf & MUX_PIN_ID_MASK;
+ mux_conf->port = RZA1_PIN_ID_TO_PORT(mux_conf->id);
+ mux_conf->pin = RZA1_PIN_ID_TO_PIN(mux_conf->id);
+ mux_conf->mux_func = MUX_FUNC(of_pinconf);
+ mux_conf->mux_flags = pinmux_flags;
+
+ if (mux_conf->port >= RZA1_NPORTS ||
+ mux_conf->pin >= RZA1_PINS_PER_PORT) {
+ dev_err(rza1_pctl->dev,
+ "Wrong port %u pin %u for %s property\n",
+ mux_conf->port, mux_conf->pin, prop_name);
+ return -EINVAL;
+ }
+
+ grpins[i] = mux_conf->id;
+ }
+
+ return npins;
+}
+
+/**
+ * rza1_dt_node_to_map() - map a pin mux node to a function/group
+ *
+ * Parse and register a pin mux function.
+ *
+ * @pctldev: pin controller device
+ * @np: device tree node to parse
+ * @map: pointer to pin map (output)
+ * @num_maps: number of collected maps (output)
+ */
+static int rza1_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned int *num_maps)
+{
+ struct rza1_pinctrl *rza1_pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct rza1_mux_conf *mux_confs, *mux_conf;
+ unsigned int *grpins, *grpin;
+ struct device_node *child;
+ const char *grpname;
+ const char **fngrps;
+ int ret, npins;
+
+ npins = rza1_dt_node_pin_count(np);
+ if (npins < 0) {
+ dev_err(rza1_pctl->dev, "invalid pinmux node structure\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Functions are made of 1 group only;
+ * in fact, functions and groups are identical for this pin controller
+ * except that functions carry an array of per-pin mux configuration
+ * settings.
+ */
+ mux_confs = devm_kcalloc(rza1_pctl->dev, npins, sizeof(*mux_confs),
+ GFP_KERNEL);
+ grpins = devm_kcalloc(rza1_pctl->dev, npins, sizeof(*grpins),
+ GFP_KERNEL);
+ fngrps = devm_kzalloc(rza1_pctl->dev, sizeof(*fngrps), GFP_KERNEL);
+
+ if (!mux_confs || !grpins || !fngrps)
+ return -ENOMEM;
+
+ /*
+ * Parse the pinmux node.
+ * If the node does not contain "pinmux" property (-ENOENT)
+ * that property shall be specified in all its children sub-nodes.
+ */
+ mux_conf = &mux_confs[0];
+ grpin = &grpins[0];
+
+ ret = rza1_parse_pinmux_node(rza1_pctl, np, mux_conf, grpin);
+ if (ret == -ENOENT)
+ for_each_child_of_node(np, child) {
+ ret = rza1_parse_pinmux_node(rza1_pctl, child, mux_conf,
+ grpin);
+ if (ret < 0)
+ return ret;
+
+ grpin += ret;
+ mux_conf += ret;
+ }
+ else if (ret < 0)
+ return ret;
+
+ /* Register pin group and function name to pinctrl_generic */
+ grpname = np->name;
+ fngrps[0] = grpname;
+
+ mutex_lock(&rza1_pctl->mutex);
+ ret = pinctrl_generic_add_group(pctldev, grpname, grpins, npins,
+ NULL);
+ if (ret) {
+ mutex_unlock(&rza1_pctl->mutex);
+ return ret;
+ }
+
+ ret = pinmux_generic_add_function(pctldev, grpname, fngrps, 1,
+ mux_confs);
+ if (ret)
+ goto remove_group;
+ mutex_unlock(&rza1_pctl->mutex);
+
+ dev_info(rza1_pctl->dev, "Parsed function and group %s with %d pins\n",
+ grpname, npins);
+
+ /* Create map where to retrieve function and mux settings from */
+ *num_maps = 0;
+ *map = kzalloc(sizeof(**map), GFP_KERNEL);
+ if (!*map) {
+ ret = -ENOMEM;
+ goto remove_function;
+ }
+
+ (*map)->type = PIN_MAP_TYPE_MUX_GROUP;
+ (*map)->data.mux.group = np->name;
+ (*map)->data.mux.function = np->name;
+ *num_maps = 1;
+
+ return 0;
+
+remove_function:
+ mutex_lock(&rza1_pctl->mutex);
+ pinmux_generic_remove_last_function(pctldev);
+
+remove_group:
+ pinctrl_generic_remove_last_group(pctldev);
+ mutex_unlock(&rza1_pctl->mutex);
+
+ dev_info(rza1_pctl->dev, "Unable to parse function and group %s\n",
+ grpname);
+
+ return ret;
+}
+
+static void rza1_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map, unsigned int num_maps)
+{
+ kfree(map);
+}
+
+static const struct pinctrl_ops rza1_pinctrl_ops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+ .dt_node_to_map = rza1_dt_node_to_map,
+ .dt_free_map = rza1_dt_free_map,
+};
+
+/* ----------------------------------------------------------------------------
+ * pinmux operations
+ */
+
+/**
+ * rza1_set_mux() - retrieve pins from a group and apply their mux settings
+ *
+ * @pctldev: pin controller device
+ * @selector: function selector
+ * @group: group selector
+ */
+static int rza1_set_mux(struct pinctrl_dev *pctldev, unsigned int selector,
+ unsigned int group)
+{
+ struct rza1_pinctrl *rza1_pctl = pinctrl_dev_get_drvdata(pctldev);
+ struct rza1_mux_conf *mux_confs;
+ struct function_desc *func;
+ struct group_desc *grp;
+ int i;
+
+ grp = pinctrl_generic_get_group(pctldev, group);
+ if (!grp)
+ return -EINVAL;
+
+ func = pinmux_generic_get_function(pctldev, selector);
+ if (!func)
+ return -EINVAL;
+
+ mux_confs = (struct rza1_mux_conf *)func->data;
+ for (i = 0; i < grp->num_pins; ++i) {
+ int ret;
+
+ ret = rza1_pin_mux_single(rza1_pctl, &mux_confs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct pinmux_ops rza1_pinmux_ops = {
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .set_mux = rza1_set_mux,
+ .strict = true,
+};
+
+/* ----------------------------------------------------------------------------
+ * RZ/A1 pin controller driver operations
+ */
+
+static unsigned int rza1_count_gpio_chips(struct device_node *np)
+{
+ struct device_node *child;
+ unsigned int count = 0;
+
+ for_each_child_of_node(np, child) {
+ if (!of_property_read_bool(child, "gpio-controller"))
+ continue;
+
+ count++;
+ }
+
+ return count;
+}
+
+/**
+ * rza1_parse_gpiochip() - parse and register a gpio chip and pin range
+ *
+ * The gpio controller subnode shall provide a "gpio-ranges" list property as
+ * defined by gpio device tree binding documentation.
+ *
+ * @rza1_pctl: RZ/A1 pin controller device
+ * @np: of gpio-controller node
+ * @chip: gpio chip to register to gpiolib
+ * @range: pin range to register to pinctrl core
+ */
+static int rza1_parse_gpiochip(struct rza1_pinctrl *rza1_pctl,
+ struct device_node *np,
+ struct gpio_chip *chip,
+ struct pinctrl_gpio_range *range)
+{
+ const char *list_name = "gpio-ranges";
+ struct of_phandle_args of_args;
+ unsigned int gpioport;
+ u32 pinctrl_base;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(np, list_name, 3, 0, &of_args);
+ if (ret) {
+ dev_err(rza1_pctl->dev, "Unable to parse %s list property\n",
+ list_name);
+ return ret;
+ }
+
+ /*
+ * Find out on which port this gpio-chip maps to by inspecting the
+ * second argument of the "gpio-ranges" property.
+ */
+ pinctrl_base = of_args.args[1];
+ gpioport = RZA1_PIN_ID_TO_PORT(pinctrl_base);
+ if (gpioport > RZA1_NPORTS) {
+ dev_err(rza1_pctl->dev,
+ "Invalid values in property %s\n", list_name);
+ return -EINVAL;
+ }
+
+ *chip = rza1_gpiochip_template;
+ chip->base = -1;
+ chip->label = devm_kasprintf(rza1_pctl->dev, GFP_KERNEL, "%s-%u",
+ np->name, gpioport);
+ chip->ngpio = of_args.args[2];
+ chip->of_node = np;
+ chip->parent = rza1_pctl->dev;
+
+ range->id = gpioport;
+ range->name = chip->label;
+ range->pin_base = range->base = pinctrl_base;
+ range->npins = of_args.args[2];
+ range->gc = chip;
+
+ ret = devm_gpiochip_add_data(rza1_pctl->dev, chip,
+ &rza1_pctl->ports[gpioport]);
+ if (ret)
+ return ret;
+
+ pinctrl_add_gpio_range(rza1_pctl->pctl, range);
+
+ dev_info(rza1_pctl->dev, "Parsed gpiochip %s with %d pins\n",
+ chip->label, chip->ngpio);
+
+ return 0;
+}
+
+/**
+ * rza1_gpio_register() - parse DT to collect gpio-chips and gpio-ranges
+ *
+ * @rza1_pctl: RZ/A1 pin controller device
+ */
+static int rza1_gpio_register(struct rza1_pinctrl *rza1_pctl)
+{
+ struct device_node *np = rza1_pctl->dev->of_node;
+ struct pinctrl_gpio_range *gpio_ranges;
+ struct gpio_chip *gpio_chips;
+ struct device_node *child;
+ unsigned int ngpiochips;
+ unsigned int i;
+ int ret;
+
+ ngpiochips = rza1_count_gpio_chips(np);
+ if (ngpiochips == 0) {
+ dev_dbg(rza1_pctl->dev, "No gpiochip registered\n");
+ return 0;
+ }
+
+ gpio_chips = devm_kcalloc(rza1_pctl->dev, ngpiochips,
+ sizeof(*gpio_chips), GFP_KERNEL);
+ gpio_ranges = devm_kcalloc(rza1_pctl->dev, ngpiochips,
+ sizeof(*gpio_ranges), GFP_KERNEL);
+ if (!gpio_chips || !gpio_ranges)
+ return -ENOMEM;
+
+ i = 0;
+ for_each_child_of_node(np, child) {
+ if (!of_property_read_bool(child, "gpio-controller"))
+ continue;
+
+ ret = rza1_parse_gpiochip(rza1_pctl, child, &gpio_chips[i],
+ &gpio_ranges[i]);
+ if (ret)
+ goto gpiochip_remove;
+
+ ++i;
+ }
+
+ dev_info(rza1_pctl->dev, "Registered %u gpio controllers\n", i);
+
+ return 0;
+
+gpiochip_remove:
+ for (; i > 0; i--)
+ devm_gpiochip_remove(rza1_pctl->dev, &gpio_chips[i - 1]);
+
+ return ret;
+}
+
+/**
+ * rza1_pinctrl_register() - Enumerate pins, ports and gpiochips; register
+ * them to pinctrl and gpio cores.
+ *
+ * @rza1_pctl: RZ/A1 pin controller device
+ */
+static int rza1_pinctrl_register(struct rza1_pinctrl *rza1_pctl)
+{
+ struct pinctrl_pin_desc *pins;
+ struct rza1_port *ports;
+ unsigned int i;
+ int ret;
+
+ pins = devm_kcalloc(rza1_pctl->dev, RZA1_NPINS, sizeof(*pins),
+ GFP_KERNEL);
+ ports = devm_kcalloc(rza1_pctl->dev, RZA1_NPORTS, sizeof(*ports),
+ GFP_KERNEL);
+ if (!pins || !ports)
+ return -ENOMEM;
+
+ rza1_pctl->pins = pins;
+ rza1_pctl->desc.pins = pins;
+ rza1_pctl->desc.npins = RZA1_NPINS;
+ rza1_pctl->ports = ports;
+
+ for (i = 0; i < RZA1_NPINS; ++i) {
+ unsigned int pin = RZA1_PIN_ID_TO_PIN(i);
+ unsigned int port = RZA1_PIN_ID_TO_PORT(i);
+
+ pins[i].number = i;
+ pins[i].name = devm_kasprintf(rza1_pctl->dev, GFP_KERNEL,
+ "P%u-%u", port, pin);
+
+ if (i % RZA1_PINS_PER_PORT == 0) {
+ /*
+ * Setup ports;
+ * they provide per-port lock and logical base address.
+ */
+ unsigned int port_id = RZA1_PIN_ID_TO_PORT(i);
+
+ ports[port_id].id = port_id;
+ ports[port_id].base = rza1_pctl->base;
+ ports[port_id].pins = &pins[i];
+ spin_lock_init(&ports[port_id].lock);
+ }
+ }
+
+ ret = devm_pinctrl_register_and_init(rza1_pctl->dev, &rza1_pctl->desc,
+ rza1_pctl, &rza1_pctl->pctl);
+ if (ret) {
+ dev_err(rza1_pctl->dev,
+ "RZ/A1 pin controller registration failed\n");
+ return ret;
+ }
+
+ ret = pinctrl_enable(rza1_pctl->pctl);
+ if (ret) {
+ dev_err(rza1_pctl->dev,
+ "RZ/A1 pin controller failed to start\n");
+ return ret;
+ }
+
+ ret = rza1_gpio_register(rza1_pctl);
+ if (ret) {
+ dev_err(rza1_pctl->dev, "RZ/A1 GPIO registration failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rza1_pinctrl_probe(struct platform_device *pdev)
+{
+ struct rza1_pinctrl *rza1_pctl;
+ struct resource *res;
+ int ret;
+
+ rza1_pctl = devm_kzalloc(&pdev->dev, sizeof(*rza1_pctl), GFP_KERNEL);
+ if (!rza1_pctl)
+ return -ENOMEM;
+
+ rza1_pctl->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rza1_pctl->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rza1_pctl->base))
+ return PTR_ERR(rza1_pctl->base);
+
+ mutex_init(&rza1_pctl->mutex);
+
+ platform_set_drvdata(pdev, rza1_pctl);
+
+ rza1_pctl->desc.name = DRIVER_NAME;
+ rza1_pctl->desc.pctlops = &rza1_pinctrl_ops;
+ rza1_pctl->desc.pmxops = &rza1_pinmux_ops;
+ rza1_pctl->desc.owner = THIS_MODULE;
+ rza1_pctl->data = of_device_get_match_data(&pdev->dev);
+
+ ret = rza1_pinctrl_register(rza1_pctl);
+ if (ret)
+ return ret;
+
+ dev_info(&pdev->dev,
+ "RZ/A1 pin controller and gpio successfully registered\n");
+
+ return 0;
+}
+
+static const struct of_device_id rza1_pinctrl_of_match[] = {
+ {
+ .compatible = "renesas,r7s72100-ports",
+ .data = &rza1h_pmx_conf,
+ },
+ { }
+};
+
+static struct platform_driver rza1_pinctrl_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = rza1_pinctrl_of_match,
+ },
+ .probe = rza1_pinctrl_probe,
+};
+
+static int __init rza1_pinctrl_init(void)
+{
+ return platform_driver_register(&rza1_pinctrl_driver);
+}
+core_initcall(rza1_pinctrl_init);
+
+MODULE_AUTHOR("Jacopo Mondi <[email protected]");
+MODULE_DESCRIPTION("Pin and gpio controller driver for Reneas RZ/A1 SoC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 9c267dcda094..b8b3d932cd73 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1270,8 +1270,6 @@ static void pcs_free_resources(struct pcs_device *pcs)
#endif
}
-static const struct of_device_id pcs_of_match[];
-
static int pcs_add_gpio_func(struct device_node *node, struct pcs_device *pcs)
{
const char *propname = "pinctrl-single,gpio-range";
@@ -1637,15 +1635,14 @@ static int pcs_quirk_missing_pinctrl_cells(struct pcs_device *pcs,
static int pcs_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- const struct of_device_id *match;
struct pcs_pdata *pdata;
struct resource *res;
struct pcs_device *pcs;
const struct pcs_soc_data *soc;
int ret;
- match = of_match_device(pcs_of_match, &pdev->dev);
- if (!match)
+ soc = of_device_get_match_data(&pdev->dev);
+ if (WARN_ON(!soc))
return -EINVAL;
pcs = devm_kzalloc(&pdev->dev, sizeof(*pcs), GFP_KERNEL);
@@ -1658,7 +1655,6 @@ static int pcs_probe(struct platform_device *pdev)
raw_spin_lock_init(&pcs->lock);
mutex_init(&pcs->mutex);
INIT_LIST_HEAD(&pcs->gpiofuncs);
- soc = match->data;
pcs->flags = soc->flags;
memcpy(&pcs->socdata, soc, sizeof(*soc));
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index d4167e2c173a..f9e98a7d4f0c 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -1028,7 +1028,7 @@ static const struct ltq_pin_group xrx200_grps[] = {
GRP_MUX("spi_cs5", SPI, xrx200_pins_spi_cs5),
GRP_MUX("spi_cs6", SPI, xrx200_pins_spi_cs6),
GRP_MUX("usif uart_rx", USIF, xrx200_pins_usif_uart_rx),
- GRP_MUX("usif uart_rx", USIF, xrx200_pins_usif_uart_tx),
+ GRP_MUX("usif uart_tx", USIF, xrx200_pins_usif_uart_tx),
GRP_MUX("usif uart_rts", USIF, xrx200_pins_usif_uart_rts),
GRP_MUX("usif uart_cts", USIF, xrx200_pins_usif_uart_cts),
GRP_MUX("usif uart_dtr", USIF, xrx200_pins_usif_uart_dtr),
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 3ebdc01f53c0..9e504dbc7fb5 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -39,6 +39,16 @@ config PINCTRL_IPQ8064
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm IPQ8064 platform.
+config PINCTRL_IPQ8074
+ tristate "Qualcomm Technologies, Inc. IPQ8074 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for
+ the Qualcomm Technologies Inc. TLMM block found on the
+ Qualcomm Technologies Inc. IPQ8074 platform. Select this for
+ IPQ8074.
+
config PINCTRL_MSM8660
tristate "Qualcomm 8660 pin controller driver"
depends on GPIOLIB && OF
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index ab47764dbc5c..06c8b2ace05f 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_PINCTRL_APQ8064) += pinctrl-apq8064.o
obj-$(CONFIG_PINCTRL_APQ8084) += pinctrl-apq8084.o
obj-$(CONFIG_PINCTRL_IPQ4019) += pinctrl-ipq4019.o
obj-$(CONFIG_PINCTRL_IPQ8064) += pinctrl-ipq8064.o
+obj-$(CONFIG_PINCTRL_IPQ8074) += pinctrl-ipq8074.o
obj-$(CONFIG_PINCTRL_MSM8660) += pinctrl-msm8660.o
obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o
obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq8074.c b/drivers/pinctrl/qcom/pinctrl-ipq8074.c
new file mode 100644
index 000000000000..10fb076e2456
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-ipq8074.c
@@ -0,0 +1,1076 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_SIZE 0x1000
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = 0x8 + REG_SIZE * id, \
+ .intr_status_reg = 0xc + REG_SIZE * id, \
+ .intr_target_reg = 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+static const struct pinctrl_pin_desc ipq8074_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+
+enum ipq8074_functions {
+ msm_mux_atest_char,
+ msm_mux_atest_char0,
+ msm_mux_atest_char1,
+ msm_mux_atest_char2,
+ msm_mux_atest_char3,
+ msm_mux_audio_rxbclk,
+ msm_mux_audio_rxd,
+ msm_mux_audio_rxfsync,
+ msm_mux_audio_rxmclk,
+ msm_mux_audio_txbclk,
+ msm_mux_audio_txd,
+ msm_mux_audio_txfsync,
+ msm_mux_audio_txmclk,
+ msm_mux_blsp0_i2c,
+ msm_mux_blsp0_spi,
+ msm_mux_blsp0_uart,
+ msm_mux_blsp1_i2c,
+ msm_mux_blsp1_spi,
+ msm_mux_blsp1_uart,
+ msm_mux_blsp2_i2c,
+ msm_mux_blsp2_spi,
+ msm_mux_blsp2_uart,
+ msm_mux_blsp3_i2c,
+ msm_mux_blsp3_spi,
+ msm_mux_blsp3_spi0,
+ msm_mux_blsp3_spi1,
+ msm_mux_blsp3_spi2,
+ msm_mux_blsp3_spi3,
+ msm_mux_blsp3_uart,
+ msm_mux_blsp4_i2c0,
+ msm_mux_blsp4_i2c1,
+ msm_mux_blsp4_spi0,
+ msm_mux_blsp4_spi1,
+ msm_mux_blsp4_uart0,
+ msm_mux_blsp4_uart1,
+ msm_mux_blsp5_i2c,
+ msm_mux_blsp5_spi,
+ msm_mux_blsp5_uart,
+ msm_mux_burn0,
+ msm_mux_burn1,
+ msm_mux_cri_trng,
+ msm_mux_cri_trng0,
+ msm_mux_cri_trng1,
+ msm_mux_cxc0,
+ msm_mux_cxc1,
+ msm_mux_dbg_out,
+ msm_mux_gcc_plltest,
+ msm_mux_gcc_tlmm,
+ msm_mux_gpio,
+ msm_mux_ldo_en,
+ msm_mux_ldo_update,
+ msm_mux_led0,
+ msm_mux_led1,
+ msm_mux_led2,
+ msm_mux_mac0_sa0,
+ msm_mux_mac0_sa1,
+ msm_mux_mac1_sa0,
+ msm_mux_mac1_sa1,
+ msm_mux_mac1_sa2,
+ msm_mux_mac1_sa3,
+ msm_mux_mac2_sa0,
+ msm_mux_mac2_sa1,
+ msm_mux_mdc,
+ msm_mux_mdio,
+ msm_mux_pcie0_clk,
+ msm_mux_pcie0_rst,
+ msm_mux_pcie0_wake,
+ msm_mux_pcie1_clk,
+ msm_mux_pcie1_rst,
+ msm_mux_pcie1_wake,
+ msm_mux_pcm_drx,
+ msm_mux_pcm_dtx,
+ msm_mux_pcm_fsync,
+ msm_mux_pcm_pclk,
+ msm_mux_pcm_zsi0,
+ msm_mux_pcm_zsi1,
+ msm_mux_prng_rosc,
+ msm_mux_pta1_0,
+ msm_mux_pta1_1,
+ msm_mux_pta1_2,
+ msm_mux_pta2_0,
+ msm_mux_pta2_1,
+ msm_mux_pta2_2,
+ msm_mux_pwm0,
+ msm_mux_pwm1,
+ msm_mux_pwm2,
+ msm_mux_pwm3,
+ msm_mux_qdss_cti_trig_in_a0,
+ msm_mux_qdss_cti_trig_in_a1,
+ msm_mux_qdss_cti_trig_in_b0,
+ msm_mux_qdss_cti_trig_in_b1,
+ msm_mux_qdss_cti_trig_out_a0,
+ msm_mux_qdss_cti_trig_out_a1,
+ msm_mux_qdss_cti_trig_out_b0,
+ msm_mux_qdss_cti_trig_out_b1,
+ msm_mux_qdss_traceclk_a,
+ msm_mux_qdss_traceclk_b,
+ msm_mux_qdss_tracectl_a,
+ msm_mux_qdss_tracectl_b,
+ msm_mux_qdss_tracedata_a,
+ msm_mux_qdss_tracedata_b,
+ msm_mux_qpic,
+ msm_mux_rx0,
+ msm_mux_rx1,
+ msm_mux_rx2,
+ msm_mux_sd_card,
+ msm_mux_sd_write,
+ msm_mux_tsens_max,
+ msm_mux_wci2a,
+ msm_mux_wci2b,
+ msm_mux_wci2c,
+ msm_mux_wci2d,
+ msm_mux_NA,
+};
+
+static const char * const qpic_groups[] = {
+ "gpio0", /* LCD_TE */
+ "gpio1", /* BUSY_N */
+ "gpio2", /* LCD_RS_N */
+ "gpio3", /* WE_N */
+ "gpio4", /* OE_N */
+ "gpio5", /* DATA[0] */
+ "gpio6", /* DATA[1] */
+ "gpio7", /* DATA[2] */
+ "gpio8", /* DATA[3] */
+ "gpio9", /* CS_CSR_LCD */
+ "gpio10", /* CLE */
+ "gpio11", /* NAND_CS_N */
+ "gpio12", /* DATA[4] */
+ "gpio13", /* DATA[5] */
+ "gpio14", /* DATA[6] */
+ "gpio15", /* DATA[7] */
+ "gpio16", /* DATA[8] */
+ "gpio17", /* ALE */
+};
+
+static const char * const blsp5_i2c_groups[] = {
+ "gpio0", "gpio2",
+};
+
+static const char * const blsp5_spi_groups[] = {
+ "gpio0", "gpio2", "gpio9", "gpio16",
+};
+
+static const char * const wci2a_groups[] = {
+ "gpio0", "gpio2",
+};
+
+static const char * const blsp3_spi3_groups[] = {
+ "gpio0", "gpio2", "gpio9",
+};
+
+static const char * const burn0_groups[] = {
+ "gpio0",
+};
+
+static const char * const pcm_zsi0_groups[] = {
+ "gpio1",
+};
+
+static const char * const blsp5_uart_groups[] = {
+ "gpio0", "gpio2", "gpio9", "gpio16",
+};
+
+static const char * const mac1_sa2_groups[] = {
+ "gpio1", "gpio11",
+};
+
+static const char * const blsp3_spi0_groups[] = {
+ "gpio1", "gpio3", "gpio4",
+};
+
+static const char * const burn1_groups[] = {
+ "gpio1",
+};
+
+static const char * const mac0_sa1_groups[] = {
+ "gpio3", "gpio4",
+};
+
+static const char * const qdss_cti_trig_out_b0_groups[] = {
+ "gpio3",
+};
+
+static const char * const qdss_cti_trig_in_b0_groups[] = {
+ "gpio4",
+};
+
+static const char * const blsp4_uart0_groups[] = {
+ "gpio5", "gpio6", "gpio7", "gpio8",
+};
+
+static const char * const blsp4_i2c0_groups[] = {
+ "gpio5", "gpio6",
+};
+
+static const char * const blsp4_spi0_groups[] = {
+ "gpio5", "gpio6", "gpio7", "gpio8",
+};
+
+static const char * const mac2_sa1_groups[] = {
+ "gpio5", "gpio6",
+};
+
+static const char * const qdss_cti_trig_out_b1_groups[] = {
+ "gpio5",
+};
+
+static const char * const qdss_cti_trig_in_b1_groups[] = {
+ "gpio6",
+};
+
+static const char * const cxc0_groups[] = {
+ "gpio9", "gpio16",
+};
+
+static const char * const mac1_sa3_groups[] = {
+ "gpio9", "gpio16",
+};
+
+static const char * const qdss_cti_trig_in_a1_groups[] = {
+ "gpio9",
+};
+
+static const char * const qdss_cti_trig_out_a1_groups[] = {
+ "gpio10",
+};
+
+static const char * const wci2c_groups[] = {
+ "gpio11", "gpio17",
+};
+
+static const char * const qdss_cti_trig_in_a0_groups[] = {
+ "gpio11",
+};
+
+static const char * const qdss_cti_trig_out_a0_groups[] = {
+ "gpio12",
+};
+
+static const char * const qdss_traceclk_b_groups[] = {
+ "gpio14",
+};
+
+static const char * const qdss_tracectl_b_groups[] = {
+ "gpio15",
+};
+
+static const char * const pcm_zsi1_groups[] = {
+ "gpio16",
+};
+
+static const char * const qdss_tracedata_b_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
+ "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", "gpio29",
+ "gpio30", "gpio31",
+};
+
+static const char * const led0_groups[] = {
+ "gpio18",
+};
+
+static const char * const pwm0_groups[] = {
+ "gpio18", "gpio21", "gpio25", "gpio29", "gpio63",
+};
+
+static const char * const led1_groups[] = {
+ "gpio19",
+};
+
+static const char * const pwm1_groups[] = {
+ "gpio19", "gpio22", "gpio26", "gpio30", "gpio64",
+};
+
+static const char * const led2_groups[] = {
+ "gpio20",
+};
+
+static const char * const pwm2_groups[] = {
+ "gpio20", "gpio23", "gpio27", "gpio31", "gpio66",
+};
+
+static const char * const blsp4_uart1_groups[] = {
+ "gpio21", "gpio22", "gpio23", "gpio24",
+};
+
+static const char * const blsp4_i2c1_groups[] = {
+ "gpio21", "gpio22",
+};
+
+static const char * const blsp4_spi1_groups[] = {
+ "gpio21", "gpio22", "gpio23", "gpio24",
+};
+
+static const char * const wci2d_groups[] = {
+ "gpio21", "gpio22",
+};
+
+static const char * const mac1_sa1_groups[] = {
+ "gpio21", "gpio22",
+};
+
+static const char * const blsp3_spi2_groups[] = {
+ "gpio21", "gpio22", "gpio23",
+};
+
+static const char * const pwm3_groups[] = {
+ "gpio24", "gpio28", "gpio32", "gpio67",
+};
+
+static const char * const audio_txmclk_groups[] = {
+ "gpio25",
+};
+
+static const char * const audio_txbclk_groups[] = {
+ "gpio26",
+};
+
+static const char * const audio_txfsync_groups[] = {
+ "gpio27",
+};
+
+static const char * const audio_txd_groups[] = {
+ "gpio28",
+};
+
+static const char * const audio_rxmclk_groups[] = {
+ "gpio29",
+};
+
+static const char * const atest_char0_groups[] = {
+ "gpio29",
+};
+
+static const char * const audio_rxbclk_groups[] = {
+ "gpio30",
+};
+
+static const char * const atest_char1_groups[] = {
+ "gpio30",
+};
+
+static const char * const audio_rxfsync_groups[] = {
+ "gpio31",
+};
+
+static const char * const atest_char2_groups[] = {
+ "gpio31",
+};
+
+static const char * const audio_rxd_groups[] = {
+ "gpio32",
+};
+
+static const char * const atest_char3_groups[] = {
+ "gpio32",
+};
+
+static const char * const pcm_drx_groups[] = {
+ "gpio33",
+};
+
+static const char * const mac1_sa0_groups[] = {
+ "gpio33", "gpio34",
+};
+
+static const char * const mac0_sa0_groups[] = {
+ "gpio33", "gpio34",
+};
+
+static const char * const pcm_dtx_groups[] = {
+ "gpio34",
+};
+
+static const char * const pcm_fsync_groups[] = {
+ "gpio35",
+};
+
+static const char * const mac2_sa0_groups[] = {
+ "gpio35", "gpio36",
+};
+
+static const char * const qdss_traceclk_a_groups[] = {
+ "gpio35",
+};
+
+static const char * const pcm_pclk_groups[] = {
+ "gpio36",
+};
+
+static const char * const qdss_tracectl_a_groups[] = {
+ "gpio36",
+};
+
+static const char * const atest_char_groups[] = {
+ "gpio37",
+};
+
+static const char * const qdss_tracedata_a_groups[] = {
+ "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43",
+ "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49", "gpio50",
+ "gpio51", "gpio52",
+};
+
+static const char * const blsp0_uart_groups[] = {
+ "gpio38", "gpio39", "gpio40", "gpio41",
+};
+
+static const char * const blsp0_i2c_groups[] = {
+ "gpio38", "gpio39",
+};
+
+static const char * const blsp0_spi_groups[] = {
+ "gpio38", "gpio39", "gpio40", "gpio41",
+};
+
+static const char * const blsp1_uart_groups[] = {
+ "gpio42", "gpio43", "gpio44", "gpio45",
+};
+
+static const char * const blsp1_i2c_groups[] = {
+ "gpio42", "gpio43",
+};
+
+static const char * const blsp1_spi_groups[] = {
+ "gpio42", "gpio43", "gpio44", "gpio45",
+};
+
+static const char * const blsp2_uart_groups[] = {
+ "gpio46", "gpio47", "gpio48", "gpio49",
+};
+
+static const char * const blsp2_i2c_groups[] = {
+ "gpio46", "gpio47",
+};
+
+static const char * const blsp2_spi_groups[] = {
+ "gpio46", "gpio47", "gpio48", "gpio49",
+};
+
+static const char * const blsp3_uart_groups[] = {
+ "gpio50", "gpio51", "gpio52", "gpio53",
+};
+
+static const char * const blsp3_i2c_groups[] = {
+ "gpio50", "gpio51",
+};
+
+static const char * const blsp3_spi_groups[] = {
+ "gpio50", "gpio51", "gpio52", "gpio53",
+};
+
+static const char * const pta2_0_groups[] = {
+ "gpio54",
+};
+
+static const char * const wci2b_groups[] = {
+ "gpio54", "gpio56",
+};
+
+static const char * const cxc1_groups[] = {
+ "gpio54", "gpio56",
+};
+
+static const char * const blsp3_spi1_groups[] = {
+ "gpio54", "gpio55", "gpio56",
+};
+
+static const char * const pta2_1_groups[] = {
+ "gpio55",
+};
+
+static const char * const pta2_2_groups[] = {
+ "gpio56",
+};
+
+static const char * const pcie0_clk_groups[] = {
+ "gpio57",
+};
+
+static const char * const dbg_out_groups[] = {
+ "gpio57",
+};
+
+static const char * const cri_trng0_groups[] = {
+ "gpio57",
+};
+
+static const char * const pcie0_rst_groups[] = {
+ "gpio58",
+};
+
+static const char * const cri_trng1_groups[] = {
+ "gpio58",
+};
+
+static const char * const pcie0_wake_groups[] = {
+ "gpio59",
+};
+
+static const char * const cri_trng_groups[] = {
+ "gpio59",
+};
+
+static const char * const pcie1_clk_groups[] = {
+ "gpio60",
+};
+
+static const char * const rx2_groups[] = {
+ "gpio60",
+};
+
+static const char * const ldo_update_groups[] = {
+ "gpio60",
+};
+
+static const char * const pcie1_rst_groups[] = {
+ "gpio61",
+};
+
+static const char * const ldo_en_groups[] = {
+ "gpio61",
+};
+
+static const char * const pcie1_wake_groups[] = {
+ "gpio62",
+};
+
+static const char * const gcc_plltest_groups[] = {
+ "gpio62", "gpio63",
+};
+
+static const char * const sd_card_groups[] = {
+ "gpio63",
+};
+
+static const char * const pta1_1_groups[] = {
+ "gpio64",
+};
+
+static const char * const rx1_groups[] = {
+ "gpio64",
+};
+
+static const char * const pta1_2_groups[] = {
+ "gpio65",
+};
+
+static const char * const gcc_tlmm_groups[] = {
+ "gpio65",
+};
+
+static const char * const pta1_0_groups[] = {
+ "gpio66",
+};
+
+static const char * const prng_rosc_groups[] = {
+ "gpio66",
+};
+
+static const char * const sd_write_groups[] = {
+ "gpio67",
+};
+
+static const char * const rx0_groups[] = {
+ "gpio67",
+};
+
+static const char * const tsens_max_groups[] = {
+ "gpio67",
+};
+
+static const char * const mdc_groups[] = {
+ "gpio68",
+};
+
+static const char * const mdio_groups[] = {
+ "gpio69",
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69",
+};
+
+static const struct msm_function ipq8074_functions[] = {
+ FUNCTION(atest_char),
+ FUNCTION(atest_char0),
+ FUNCTION(atest_char1),
+ FUNCTION(atest_char2),
+ FUNCTION(atest_char3),
+ FUNCTION(audio_rxbclk),
+ FUNCTION(audio_rxd),
+ FUNCTION(audio_rxfsync),
+ FUNCTION(audio_rxmclk),
+ FUNCTION(audio_txbclk),
+ FUNCTION(audio_txd),
+ FUNCTION(audio_txfsync),
+ FUNCTION(audio_txmclk),
+ FUNCTION(blsp0_i2c),
+ FUNCTION(blsp0_spi),
+ FUNCTION(blsp0_uart),
+ FUNCTION(blsp1_i2c),
+ FUNCTION(blsp1_spi),
+ FUNCTION(blsp1_uart),
+ FUNCTION(blsp2_i2c),
+ FUNCTION(blsp2_spi),
+ FUNCTION(blsp2_uart),
+ FUNCTION(blsp3_i2c),
+ FUNCTION(blsp3_spi),
+ FUNCTION(blsp3_spi0),
+ FUNCTION(blsp3_spi1),
+ FUNCTION(blsp3_spi2),
+ FUNCTION(blsp3_spi3),
+ FUNCTION(blsp3_uart),
+ FUNCTION(blsp4_i2c0),
+ FUNCTION(blsp4_i2c1),
+ FUNCTION(blsp4_spi0),
+ FUNCTION(blsp4_spi1),
+ FUNCTION(blsp4_uart0),
+ FUNCTION(blsp4_uart1),
+ FUNCTION(blsp5_i2c),
+ FUNCTION(blsp5_spi),
+ FUNCTION(blsp5_uart),
+ FUNCTION(burn0),
+ FUNCTION(burn1),
+ FUNCTION(cri_trng),
+ FUNCTION(cri_trng0),
+ FUNCTION(cri_trng1),
+ FUNCTION(cxc0),
+ FUNCTION(cxc1),
+ FUNCTION(dbg_out),
+ FUNCTION(gcc_plltest),
+ FUNCTION(gcc_tlmm),
+ FUNCTION(gpio),
+ FUNCTION(ldo_en),
+ FUNCTION(ldo_update),
+ FUNCTION(led0),
+ FUNCTION(led1),
+ FUNCTION(led2),
+ FUNCTION(mac0_sa0),
+ FUNCTION(mac0_sa1),
+ FUNCTION(mac1_sa0),
+ FUNCTION(mac1_sa1),
+ FUNCTION(mac1_sa2),
+ FUNCTION(mac1_sa3),
+ FUNCTION(mac2_sa0),
+ FUNCTION(mac2_sa1),
+ FUNCTION(mdc),
+ FUNCTION(mdio),
+ FUNCTION(pcie0_clk),
+ FUNCTION(pcie0_rst),
+ FUNCTION(pcie0_wake),
+ FUNCTION(pcie1_clk),
+ FUNCTION(pcie1_rst),
+ FUNCTION(pcie1_wake),
+ FUNCTION(pcm_drx),
+ FUNCTION(pcm_dtx),
+ FUNCTION(pcm_fsync),
+ FUNCTION(pcm_pclk),
+ FUNCTION(pcm_zsi0),
+ FUNCTION(pcm_zsi1),
+ FUNCTION(prng_rosc),
+ FUNCTION(pta1_0),
+ FUNCTION(pta1_1),
+ FUNCTION(pta1_2),
+ FUNCTION(pta2_0),
+ FUNCTION(pta2_1),
+ FUNCTION(pta2_2),
+ FUNCTION(pwm0),
+ FUNCTION(pwm1),
+ FUNCTION(pwm2),
+ FUNCTION(pwm3),
+ FUNCTION(qdss_cti_trig_in_a0),
+ FUNCTION(qdss_cti_trig_in_a1),
+ FUNCTION(qdss_cti_trig_in_b0),
+ FUNCTION(qdss_cti_trig_in_b1),
+ FUNCTION(qdss_cti_trig_out_a0),
+ FUNCTION(qdss_cti_trig_out_a1),
+ FUNCTION(qdss_cti_trig_out_b0),
+ FUNCTION(qdss_cti_trig_out_b1),
+ FUNCTION(qdss_traceclk_a),
+ FUNCTION(qdss_traceclk_b),
+ FUNCTION(qdss_tracectl_a),
+ FUNCTION(qdss_tracectl_b),
+ FUNCTION(qdss_tracedata_a),
+ FUNCTION(qdss_tracedata_b),
+ FUNCTION(qpic),
+ FUNCTION(rx0),
+ FUNCTION(rx1),
+ FUNCTION(rx2),
+ FUNCTION(sd_card),
+ FUNCTION(sd_write),
+ FUNCTION(tsens_max),
+ FUNCTION(wci2a),
+ FUNCTION(wci2b),
+ FUNCTION(wci2c),
+ FUNCTION(wci2d),
+};
+
+static const struct msm_pingroup ipq8074_groups[] = {
+ PINGROUP(0, qpic, blsp5_uart, blsp5_i2c, blsp5_spi, wci2a,
+ blsp3_spi3, NA, burn0, NA),
+ PINGROUP(1, qpic, pcm_zsi0, mac1_sa2, blsp3_spi0, NA, burn1, NA, NA,
+ NA),
+ PINGROUP(2, qpic, blsp5_uart, blsp5_i2c, blsp5_spi, wci2a,
+ blsp3_spi3, NA, NA, NA),
+ PINGROUP(3, qpic, mac0_sa1, blsp3_spi0, qdss_cti_trig_out_b0, NA, NA,
+ NA, NA, NA),
+ PINGROUP(4, qpic, mac0_sa1, blsp3_spi0, qdss_cti_trig_in_b0, NA, NA,
+ NA, NA, NA),
+ PINGROUP(5, qpic, blsp4_uart0, blsp4_i2c0, blsp4_spi0, mac2_sa1,
+ qdss_cti_trig_out_b1, NA, NA, NA),
+ PINGROUP(6, qpic, blsp4_uart0, blsp4_i2c0, blsp4_spi0, mac2_sa1,
+ qdss_cti_trig_in_b1, NA, NA, NA),
+ PINGROUP(7, qpic, blsp4_uart0, blsp4_spi0, NA, NA, NA, NA, NA, NA),
+ PINGROUP(8, qpic, blsp4_uart0, blsp4_spi0, NA, NA, NA, NA, NA, NA),
+ PINGROUP(9, qpic, blsp5_uart, blsp5_spi, cxc0, mac1_sa3, blsp3_spi3,
+ qdss_cti_trig_in_a1, NA, NA),
+ PINGROUP(10, qpic, qdss_cti_trig_out_a1, NA, NA, NA, NA, NA, NA,
+ NA),
+ PINGROUP(11, qpic, wci2c, mac1_sa2, qdss_cti_trig_in_a0, NA, NA, NA,
+ NA, NA),
+ PINGROUP(12, qpic, qdss_cti_trig_out_a0, NA, NA, NA, NA, NA, NA,
+ NA),
+ PINGROUP(13, qpic, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(14, qpic, qdss_traceclk_b, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(15, qpic, qdss_tracectl_b, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(16, qpic, blsp5_uart, pcm_zsi1, blsp5_spi, cxc0, mac1_sa3,
+ qdss_tracedata_b, NA, NA),
+ PINGROUP(17, qpic, wci2c, qdss_tracedata_b, NA, NA, NA, NA, NA, NA),
+ PINGROUP(18, led0, pwm0, qdss_tracedata_b, NA, NA, NA, NA, NA, NA),
+ PINGROUP(19, led1, pwm1, NA, qdss_tracedata_b, NA, NA, NA, NA, NA),
+ PINGROUP(20, led2, pwm2, NA, qdss_tracedata_b, NA, NA, NA, NA, NA),
+ PINGROUP(21, pwm0, blsp4_uart1, blsp4_i2c1, blsp4_spi1, wci2d, mac1_sa1,
+ blsp3_spi2, NA, qdss_tracedata_b),
+ PINGROUP(22, pwm1, blsp4_uart1, blsp4_i2c1, blsp4_spi1, wci2d, mac1_sa1,
+ blsp3_spi2, NA, qdss_tracedata_b),
+ PINGROUP(23, pwm2, blsp4_uart1, blsp4_spi1, blsp3_spi2, NA,
+ qdss_tracedata_b, NA, NA, NA),
+ PINGROUP(24, pwm3, blsp4_uart1, blsp4_spi1, NA, qdss_tracedata_b, NA,
+ NA, NA, NA),
+ PINGROUP(25, audio_txmclk, pwm0, NA, qdss_tracedata_b, NA, NA, NA, NA,
+ NA),
+ PINGROUP(26, audio_txbclk, pwm1, NA, qdss_tracedata_b, NA, NA, NA, NA,
+ NA),
+ PINGROUP(27, audio_txfsync, pwm2, NA, qdss_tracedata_b, NA, NA, NA,
+ NA, NA),
+ PINGROUP(28, audio_txd, pwm3, NA, qdss_tracedata_b, NA, NA, NA, NA,
+ NA),
+ PINGROUP(29, audio_rxmclk, pwm0, atest_char0, NA, qdss_tracedata_b,
+ NA, NA, NA, NA),
+ PINGROUP(30, audio_rxbclk, pwm1, atest_char1, NA, qdss_tracedata_b,
+ NA, NA, NA, NA),
+ PINGROUP(31, audio_rxfsync, pwm2, atest_char2, NA, qdss_tracedata_b,
+ NA, NA, NA, NA),
+ PINGROUP(32, audio_rxd, pwm3, atest_char3, NA, NA, NA, NA, NA, NA),
+ PINGROUP(33, pcm_drx, mac1_sa0, mac0_sa0, NA, NA, NA, NA, NA, NA),
+ PINGROUP(34, pcm_dtx, mac1_sa0, mac0_sa0, NA, NA, NA, NA, NA, NA),
+ PINGROUP(35, pcm_fsync, mac2_sa0, qdss_traceclk_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(36, pcm_pclk, mac2_sa0, NA, qdss_tracectl_a, NA, NA, NA, NA, NA),
+ PINGROUP(37, atest_char, NA, qdss_tracedata_a, NA, NA, NA, NA, NA, NA),
+ PINGROUP(38, blsp0_uart, blsp0_i2c, blsp0_spi, NA, qdss_tracedata_a,
+ NA, NA, NA, NA),
+ PINGROUP(39, blsp0_uart, blsp0_i2c, blsp0_spi, NA, qdss_tracedata_a,
+ NA, NA, NA, NA),
+ PINGROUP(40, blsp0_uart, blsp0_spi, NA, qdss_tracedata_a, NA, NA, NA,
+ NA, NA),
+ PINGROUP(41, blsp0_uart, blsp0_spi, NA, qdss_tracedata_a, NA, NA, NA,
+ NA, NA),
+ PINGROUP(42, blsp1_uart, blsp1_i2c, blsp1_spi, NA, qdss_tracedata_a,
+ NA, NA, NA, NA),
+ PINGROUP(43, blsp1_uart, blsp1_i2c, blsp1_spi, NA, qdss_tracedata_a,
+ NA, NA, NA, NA),
+ PINGROUP(44, blsp1_uart, blsp1_spi, NA, qdss_tracedata_a, NA, NA, NA,
+ NA, NA),
+ PINGROUP(45, blsp1_uart, blsp1_spi, qdss_tracedata_a, NA, NA, NA, NA,
+ NA, NA),
+ PINGROUP(46, blsp2_uart, blsp2_i2c, blsp2_spi, qdss_tracedata_a, NA,
+ NA, NA, NA, NA),
+ PINGROUP(47, blsp2_uart, blsp2_i2c, blsp2_spi, NA, qdss_tracedata_a,
+ NA, NA, NA, NA),
+ PINGROUP(48, blsp2_uart, blsp2_spi, NA, qdss_tracedata_a, NA, NA, NA,
+ NA, NA),
+ PINGROUP(49, blsp2_uart, blsp2_spi, NA, qdss_tracedata_a, NA, NA, NA,
+ NA, NA),
+ PINGROUP(50, blsp3_uart, blsp3_i2c, blsp3_spi, NA, qdss_tracedata_a,
+ NA, NA, NA, NA),
+ PINGROUP(51, blsp3_uart, blsp3_i2c, blsp3_spi, NA, qdss_tracedata_a,
+ NA, NA, NA, NA),
+ PINGROUP(52, blsp3_uart, blsp3_spi, NA, qdss_tracedata_a, NA, NA, NA,
+ NA, NA),
+ PINGROUP(53, blsp3_uart, blsp3_spi, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(54, pta2_0, wci2b, cxc1, blsp3_spi1, NA, NA, NA, NA, NA),
+ PINGROUP(55, pta2_1, blsp3_spi1, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(56, pta2_2, wci2b, cxc1, blsp3_spi1, NA, NA, NA, NA, NA),
+ PINGROUP(57, pcie0_clk, NA, dbg_out, cri_trng0, NA, NA, NA, NA, NA),
+ PINGROUP(58, pcie0_rst, NA, cri_trng1, NA, NA, NA, NA, NA, NA),
+ PINGROUP(59, pcie0_wake, NA, cri_trng, NA, NA, NA, NA, NA, NA),
+ PINGROUP(60, pcie1_clk, rx2, ldo_update, NA, NA, NA, NA, NA, NA),
+ PINGROUP(61, pcie1_rst, ldo_en, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(62, pcie1_wake, gcc_plltest, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(63, sd_card, pwm0, NA, gcc_plltest, NA, NA, NA, NA, NA),
+ PINGROUP(64, pta1_1, pwm1, NA, rx1, NA, NA, NA, NA, NA),
+ PINGROUP(65, pta1_2, NA, gcc_tlmm, NA, NA, NA, NA, NA, NA),
+ PINGROUP(66, pta1_0, pwm2, prng_rosc, NA, NA, NA, NA, NA, NA),
+ PINGROUP(67, sd_write, pwm3, rx0, tsens_max, NA, NA, NA, NA, NA),
+ PINGROUP(68, mdc, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(69, mdio, NA, NA, NA, NA, NA, NA, NA, NA),
+};
+
+static const struct msm_pinctrl_soc_data ipq8074_pinctrl = {
+ .pins = ipq8074_pins,
+ .npins = ARRAY_SIZE(ipq8074_pins),
+ .functions = ipq8074_functions,
+ .nfunctions = ARRAY_SIZE(ipq8074_functions),
+ .groups = ipq8074_groups,
+ .ngroups = ARRAY_SIZE(ipq8074_groups),
+ .ngpios = 70,
+};
+
+static int ipq8074_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &ipq8074_pinctrl);
+}
+
+static const struct of_device_id ipq8074_pinctrl_of_match[] = {
+ { .compatible = "qcom,ipq8074-pinctrl", },
+ { },
+};
+
+static struct platform_driver ipq8074_pinctrl_driver = {
+ .driver = {
+ .name = "ipq8074-pinctrl",
+ .of_match_table = ipq8074_pinctrl_of_match,
+ },
+ .probe = ipq8074_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init ipq8074_pinctrl_init(void)
+{
+ return platform_driver_register(&ipq8074_pinctrl_driver);
+}
+arch_initcall(ipq8074_pinctrl_init);
+
+static void __exit ipq8074_pinctrl_exit(void)
+{
+ platform_driver_unregister(&ipq8074_pinctrl_driver);
+}
+module_exit(ipq8074_pinctrl_exit);
+
+MODULE_DESCRIPTION("Qualcomm ipq8074 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, ipq8074_pinctrl_of_match);
diff --git a/drivers/pinctrl/samsung/Kconfig b/drivers/pinctrl/samsung/Kconfig
index d0461cd5d707..0357f9701eb9 100644
--- a/drivers/pinctrl/samsung/Kconfig
+++ b/drivers/pinctrl/samsung/Kconfig
@@ -10,6 +10,16 @@ config PINCTRL_EXYNOS
bool "Pinctrl driver data for Samsung EXYNOS SoCs other than 5440"
depends on OF && GPIOLIB && (ARCH_EXYNOS || ARCH_S5PV210)
select PINCTRL_SAMSUNG
+ select PINCTRL_EXYNOS_ARM if ARM && (ARCH_EXYNOS || ARCH_S5PV210)
+ select PINCTRL_EXYNOS_ARM64 if ARM64 && ARCH_EXYNOS
+
+config PINCTRL_EXYNOS_ARM
+ bool "ARMv7-specific pinctrl driver data for Exynos (except Exynos5440)" if COMPILE_TEST
+ depends on PINCTRL_EXYNOS
+
+config PINCTRL_EXYNOS_ARM64
+ bool "ARMv8-specific pinctrl driver data for Exynos" if COMPILE_TEST
+ depends on PINCTRL_EXYNOS
config PINCTRL_EXYNOS5440
bool "Samsung EXYNOS5440 SoC pinctrl driver"
diff --git a/drivers/pinctrl/samsung/Makefile b/drivers/pinctrl/samsung/Makefile
index 70160c059edd..595995851ea5 100644
--- a/drivers/pinctrl/samsung/Makefile
+++ b/drivers/pinctrl/samsung/Makefile
@@ -2,6 +2,8 @@
obj-$(CONFIG_PINCTRL_SAMSUNG) += pinctrl-samsung.o
obj-$(CONFIG_PINCTRL_EXYNOS) += pinctrl-exynos.o
+obj-$(CONFIG_PINCTRL_EXYNOS_ARM) += pinctrl-exynos-arm.o
+obj-$(CONFIG_PINCTRL_EXYNOS_ARM64) += pinctrl-exynos-arm64.o
obj-$(CONFIG_PINCTRL_EXYNOS5440) += pinctrl-exynos5440.o
obj-$(CONFIG_PINCTRL_S3C24XX) += pinctrl-s3c24xx.o
obj-$(CONFIG_PINCTRL_S3C64XX) += pinctrl-s3c64xx.o
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
new file mode 100644
index 000000000000..071084d3ee9c
--- /dev/null
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
@@ -0,0 +1,815 @@
+/*
+ * Exynos specific support for Samsung pinctrl/gpiolib driver with eint support.
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ * Copyright (c) 2012 Linaro Ltd
+ * http://www.linaro.org
+ *
+ * Author: Thomas Abraham <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This file contains the Samsung Exynos specific information required by the
+ * the Samsung pinctrl/gpiolib driver. It also includes the implementation of
+ * external gpio and wakeup interrupt support.
+ */
+
+#include <linux/device.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+
+#include "pinctrl-samsung.h"
+#include "pinctrl-exynos.h"
+
+static const struct samsung_pin_bank_type bank_type_off = {
+ .fld_width = { 4, 1, 2, 2, 2, 2, },
+ .reg_offset = { 0x00, 0x04, 0x08, 0x0c, 0x10, 0x14, },
+};
+
+static const struct samsung_pin_bank_type bank_type_alive = {
+ .fld_width = { 4, 1, 2, 2, },
+ .reg_offset = { 0x00, 0x04, 0x08, 0x0c, },
+};
+
+/* Retention control for S5PV210 are located at the end of clock controller */
+#define S5P_OTHERS 0xE000
+
+#define S5P_OTHERS_RET_IO (1 << 31)
+#define S5P_OTHERS_RET_CF (1 << 30)
+#define S5P_OTHERS_RET_MMC (1 << 29)
+#define S5P_OTHERS_RET_UART (1 << 28)
+
+static void s5pv210_retention_disable(struct samsung_pinctrl_drv_data *drvdata)
+{
+ void __iomem *clk_base = (void __iomem *)drvdata->retention_ctrl->priv;
+ u32 tmp;
+
+ tmp = __raw_readl(clk_base + S5P_OTHERS);
+ tmp |= (S5P_OTHERS_RET_IO | S5P_OTHERS_RET_CF | S5P_OTHERS_RET_MMC |
+ S5P_OTHERS_RET_UART);
+ __raw_writel(tmp, clk_base + S5P_OTHERS);
+}
+
+static struct samsung_retention_ctrl *
+s5pv210_retention_init(struct samsung_pinctrl_drv_data *drvdata,
+ const struct samsung_retention_data *data)
+{
+ struct samsung_retention_ctrl *ctrl;
+ struct device_node *np;
+ void __iomem *clk_base;
+
+ ctrl = devm_kzalloc(drvdata->dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return ERR_PTR(-ENOMEM);
+
+ np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock");
+ if (!np) {
+ pr_err("%s: failed to find clock controller DT node\n",
+ __func__);
+ return ERR_PTR(-ENODEV);
+ }
+
+ clk_base = of_iomap(np, 0);
+ if (!clk_base) {
+ pr_err("%s: failed to map clock registers\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ctrl->priv = (void __force *)clk_base;
+ ctrl->disable = s5pv210_retention_disable;
+
+ return ctrl;
+}
+
+static const struct samsung_retention_data s5pv210_retention_data __initconst = {
+ .init = s5pv210_retention_init,
+};
+
+/* pin banks of s5pv210 pin-controller */
+static const struct samsung_pin_bank_data s5pv210_pin_bank[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpa1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
+ EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpc0", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpc1", 0x10),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0a0, "gpd0", 0x14),
+ EXYNOS_PIN_BANK_EINTG(6, 0x0c0, "gpd1", 0x18),
+ EXYNOS_PIN_BANK_EINTG(8, 0x0e0, "gpe0", 0x1c),
+ EXYNOS_PIN_BANK_EINTG(5, 0x100, "gpe1", 0x20),
+ EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpf0", 0x24),
+ EXYNOS_PIN_BANK_EINTG(8, 0x140, "gpf1", 0x28),
+ EXYNOS_PIN_BANK_EINTG(8, 0x160, "gpf2", 0x2c),
+ EXYNOS_PIN_BANK_EINTG(6, 0x180, "gpf3", 0x30),
+ EXYNOS_PIN_BANK_EINTG(7, 0x1a0, "gpg0", 0x34),
+ EXYNOS_PIN_BANK_EINTG(7, 0x1c0, "gpg1", 0x38),
+ EXYNOS_PIN_BANK_EINTG(7, 0x1e0, "gpg2", 0x3c),
+ EXYNOS_PIN_BANK_EINTG(7, 0x200, "gpg3", 0x40),
+ EXYNOS_PIN_BANK_EINTN(7, 0x220, "gpi"),
+ EXYNOS_PIN_BANK_EINTG(8, 0x240, "gpj0", 0x44),
+ EXYNOS_PIN_BANK_EINTG(6, 0x260, "gpj1", 0x48),
+ EXYNOS_PIN_BANK_EINTG(8, 0x280, "gpj2", 0x4c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x2a0, "gpj3", 0x50),
+ EXYNOS_PIN_BANK_EINTG(5, 0x2c0, "gpj4", 0x54),
+ EXYNOS_PIN_BANK_EINTN(8, 0x2e0, "mp01"),
+ EXYNOS_PIN_BANK_EINTN(4, 0x300, "mp02"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x320, "mp03"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x340, "mp04"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x360, "mp05"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x380, "mp06"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x3a0, "mp07"),
+ EXYNOS_PIN_BANK_EINTW(8, 0xc00, "gph0", 0x00),
+ EXYNOS_PIN_BANK_EINTW(8, 0xc20, "gph1", 0x04),
+ EXYNOS_PIN_BANK_EINTW(8, 0xc40, "gph2", 0x08),
+ EXYNOS_PIN_BANK_EINTW(8, 0xc60, "gph3", 0x0c),
+};
+
+const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = {
+ {
+ /* pin-controller instance 0 data */
+ .pin_banks = s5pv210_pin_bank,
+ .nr_banks = ARRAY_SIZE(s5pv210_pin_bank),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .eint_wkup_init = exynos_eint_wkup_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ .retention_data = &s5pv210_retention_data,
+ },
+};
+
+/* Pad retention control code for accessing PMU regmap */
+static atomic_t exynos_shared_retention_refcnt;
+
+/* pin banks of exynos3250 pin-controller 0 */
+static const struct samsung_pin_bank_data exynos3250_pin_banks0[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
+ EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpc0", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpc1", 0x10),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0a0, "gpd0", 0x14),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0c0, "gpd1", 0x18),
+};
+
+/* pin banks of exynos3250 pin-controller 1 */
+static const struct samsung_pin_bank_data exynos3250_pin_banks1[] __initconst = {
+ EXYNOS_PIN_BANK_EINTN(8, 0x120, "gpe0"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x140, "gpe1"),
+ EXYNOS_PIN_BANK_EINTN(3, 0x180, "gpe2"),
+ EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpk0", 0x08),
+ EXYNOS_PIN_BANK_EINTG(7, 0x060, "gpk1", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(7, 0x080, "gpk2", 0x10),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0c0, "gpl0", 0x18),
+ EXYNOS_PIN_BANK_EINTG(8, 0x260, "gpm0", 0x24),
+ EXYNOS_PIN_BANK_EINTG(7, 0x280, "gpm1", 0x28),
+ EXYNOS_PIN_BANK_EINTG(5, 0x2a0, "gpm2", 0x2c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x2c0, "gpm3", 0x30),
+ EXYNOS_PIN_BANK_EINTG(8, 0x2e0, "gpm4", 0x34),
+ EXYNOS_PIN_BANK_EINTW(8, 0xc00, "gpx0", 0x00),
+ EXYNOS_PIN_BANK_EINTW(8, 0xc20, "gpx1", 0x04),
+ EXYNOS_PIN_BANK_EINTW(8, 0xc40, "gpx2", 0x08),
+ EXYNOS_PIN_BANK_EINTW(8, 0xc60, "gpx3", 0x0c),
+};
+
+/*
+ * PMU pad retention groups for Exynos3250 doesn't match pin banks, so handle
+ * them all together
+ */
+static const u32 exynos3250_retention_regs[] = {
+ S5P_PAD_RET_MAUDIO_OPTION,
+ S5P_PAD_RET_GPIO_OPTION,
+ S5P_PAD_RET_UART_OPTION,
+ S5P_PAD_RET_MMCA_OPTION,
+ S5P_PAD_RET_MMCB_OPTION,
+ S5P_PAD_RET_EBIA_OPTION,
+ S5P_PAD_RET_EBIB_OPTION,
+ S5P_PAD_RET_MMC2_OPTION,
+ S5P_PAD_RET_SPI_OPTION,
+};
+
+static const struct samsung_retention_data exynos3250_retention_data __initconst = {
+ .regs = exynos3250_retention_regs,
+ .nr_regs = ARRAY_SIZE(exynos3250_retention_regs),
+ .value = EXYNOS_WAKEUP_FROM_LOWPWR,
+ .refcnt = &exynos_shared_retention_refcnt,
+ .init = exynos_retention_init,
+};
+
+/*
+ * Samsung pinctrl driver data for Exynos3250 SoC. Exynos3250 SoC includes
+ * two gpio/pin-mux/pinconfig controllers.
+ */
+const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = {
+ {
+ /* pin-controller instance 0 data */
+ .pin_banks = exynos3250_pin_banks0,
+ .nr_banks = ARRAY_SIZE(exynos3250_pin_banks0),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ .retention_data = &exynos3250_retention_data,
+ }, {
+ /* pin-controller instance 1 data */
+ .pin_banks = exynos3250_pin_banks1,
+ .nr_banks = ARRAY_SIZE(exynos3250_pin_banks1),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .eint_wkup_init = exynos_eint_wkup_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ .retention_data = &exynos3250_retention_data,
+ },
+};
+
+/* pin banks of exynos4210 pin-controller 0 */
+static const struct samsung_pin_bank_data exynos4210_pin_banks0[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
+ EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpc0", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpc1", 0x10),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0A0, "gpd0", 0x14),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0C0, "gpd1", 0x18),
+ EXYNOS_PIN_BANK_EINTG(5, 0x0E0, "gpe0", 0x1c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x100, "gpe1", 0x20),
+ EXYNOS_PIN_BANK_EINTG(6, 0x120, "gpe2", 0x24),
+ EXYNOS_PIN_BANK_EINTG(8, 0x140, "gpe3", 0x28),
+ EXYNOS_PIN_BANK_EINTG(8, 0x160, "gpe4", 0x2c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpf0", 0x30),
+ EXYNOS_PIN_BANK_EINTG(8, 0x1A0, "gpf1", 0x34),
+ EXYNOS_PIN_BANK_EINTG(8, 0x1C0, "gpf2", 0x38),
+ EXYNOS_PIN_BANK_EINTG(6, 0x1E0, "gpf3", 0x3c),
+};
+
+/* pin banks of exynos4210 pin-controller 1 */
+static const struct samsung_pin_bank_data exynos4210_pin_banks1[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpj0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(5, 0x020, "gpj1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpk0", 0x08),
+ EXYNOS_PIN_BANK_EINTG(7, 0x060, "gpk1", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(7, 0x080, "gpk2", 0x10),
+ EXYNOS_PIN_BANK_EINTG(7, 0x0A0, "gpk3", 0x14),
+ EXYNOS_PIN_BANK_EINTG(8, 0x0C0, "gpl0", 0x18),
+ EXYNOS_PIN_BANK_EINTG(3, 0x0E0, "gpl1", 0x1c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x100, "gpl2", 0x20),
+ EXYNOS_PIN_BANK_EINTN(6, 0x120, "gpy0"),
+ EXYNOS_PIN_BANK_EINTN(4, 0x140, "gpy1"),
+ EXYNOS_PIN_BANK_EINTN(6, 0x160, "gpy2"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x180, "gpy3"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x1A0, "gpy4"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x1C0, "gpy5"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x1E0, "gpy6"),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC00, "gpx0", 0x00),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC20, "gpx1", 0x04),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC40, "gpx2", 0x08),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC60, "gpx3", 0x0c),
+};
+
+/* pin banks of exynos4210 pin-controller 2 */
+static const struct samsung_pin_bank_data exynos4210_pin_banks2[] __initconst = {
+ EXYNOS_PIN_BANK_EINTN(7, 0x000, "gpz"),
+};
+
+/* PMU pad retention groups registers for Exynos4 (without audio) */
+static const u32 exynos4_retention_regs[] = {
+ S5P_PAD_RET_GPIO_OPTION,
+ S5P_PAD_RET_UART_OPTION,
+ S5P_PAD_RET_MMCA_OPTION,
+ S5P_PAD_RET_MMCB_OPTION,
+ S5P_PAD_RET_EBIA_OPTION,
+ S5P_PAD_RET_EBIB_OPTION,
+};
+
+static const struct samsung_retention_data exynos4_retention_data __initconst = {
+ .regs = exynos4_retention_regs,
+ .nr_regs = ARRAY_SIZE(exynos4_retention_regs),
+ .value = EXYNOS_WAKEUP_FROM_LOWPWR,
+ .refcnt = &exynos_shared_retention_refcnt,
+ .init = exynos_retention_init,
+};
+
+/* PMU retention control for audio pins can be tied to audio pin bank */
+static const u32 exynos4_audio_retention_regs[] = {
+ S5P_PAD_RET_MAUDIO_OPTION,
+};
+
+static const struct samsung_retention_data exynos4_audio_retention_data __initconst = {
+ .regs = exynos4_audio_retention_regs,
+ .nr_regs = ARRAY_SIZE(exynos4_audio_retention_regs),
+ .value = EXYNOS_WAKEUP_FROM_LOWPWR,
+ .init = exynos_retention_init,
+};
+
+/*
+ * Samsung pinctrl driver data for Exynos4210 SoC. Exynos4210 SoC includes
+ * three gpio/pin-mux/pinconfig controllers.
+ */
+const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = {
+ {
+ /* pin-controller instance 0 data */
+ .pin_banks = exynos4210_pin_banks0,
+ .nr_banks = ARRAY_SIZE(exynos4210_pin_banks0),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ .retention_data = &exynos4_retention_data,
+ }, {
+ /* pin-controller instance 1 data */
+ .pin_banks = exynos4210_pin_banks1,
+ .nr_banks = ARRAY_SIZE(exynos4210_pin_banks1),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .eint_wkup_init = exynos_eint_wkup_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ .retention_data = &exynos4_retention_data,
+ }, {
+ /* pin-controller instance 2 data */
+ .pin_banks = exynos4210_pin_banks2,
+ .nr_banks = ARRAY_SIZE(exynos4210_pin_banks2),
+ .retention_data = &exynos4_audio_retention_data,
+ },
+};
+
+/* pin banks of exynos4x12 pin-controller 0 */
+static const struct samsung_pin_bank_data exynos4x12_pin_banks0[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
+ EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpc0", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpc1", 0x10),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0A0, "gpd0", 0x14),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0C0, "gpd1", 0x18),
+ EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpf0", 0x30),
+ EXYNOS_PIN_BANK_EINTG(8, 0x1A0, "gpf1", 0x34),
+ EXYNOS_PIN_BANK_EINTG(8, 0x1C0, "gpf2", 0x38),
+ EXYNOS_PIN_BANK_EINTG(6, 0x1E0, "gpf3", 0x3c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x240, "gpj0", 0x40),
+ EXYNOS_PIN_BANK_EINTG(5, 0x260, "gpj1", 0x44),
+};
+
+/* pin banks of exynos4x12 pin-controller 1 */
+static const struct samsung_pin_bank_data exynos4x12_pin_banks1[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpk0", 0x08),
+ EXYNOS_PIN_BANK_EINTG(7, 0x060, "gpk1", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(7, 0x080, "gpk2", 0x10),
+ EXYNOS_PIN_BANK_EINTG(7, 0x0A0, "gpk3", 0x14),
+ EXYNOS_PIN_BANK_EINTG(7, 0x0C0, "gpl0", 0x18),
+ EXYNOS_PIN_BANK_EINTG(2, 0x0E0, "gpl1", 0x1c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x100, "gpl2", 0x20),
+ EXYNOS_PIN_BANK_EINTG(8, 0x260, "gpm0", 0x24),
+ EXYNOS_PIN_BANK_EINTG(7, 0x280, "gpm1", 0x28),
+ EXYNOS_PIN_BANK_EINTG(5, 0x2A0, "gpm2", 0x2c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x2C0, "gpm3", 0x30),
+ EXYNOS_PIN_BANK_EINTG(8, 0x2E0, "gpm4", 0x34),
+ EXYNOS_PIN_BANK_EINTN(6, 0x120, "gpy0"),
+ EXYNOS_PIN_BANK_EINTN(4, 0x140, "gpy1"),
+ EXYNOS_PIN_BANK_EINTN(6, 0x160, "gpy2"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x180, "gpy3"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x1A0, "gpy4"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x1C0, "gpy5"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x1E0, "gpy6"),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC00, "gpx0", 0x00),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC20, "gpx1", 0x04),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC40, "gpx2", 0x08),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC60, "gpx3", 0x0c),
+};
+
+/* pin banks of exynos4x12 pin-controller 2 */
+static const struct samsung_pin_bank_data exynos4x12_pin_banks2[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
+};
+
+/* pin banks of exynos4x12 pin-controller 3 */
+static const struct samsung_pin_bank_data exynos4x12_pin_banks3[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpv0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpv1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpv2", 0x08),
+ EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpv3", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(2, 0x080, "gpv4", 0x10),
+};
+
+/*
+ * Samsung pinctrl driver data for Exynos4x12 SoC. Exynos4x12 SoC includes
+ * four gpio/pin-mux/pinconfig controllers.
+ */
+const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = {
+ {
+ /* pin-controller instance 0 data */
+ .pin_banks = exynos4x12_pin_banks0,
+ .nr_banks = ARRAY_SIZE(exynos4x12_pin_banks0),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ .retention_data = &exynos4_retention_data,
+ }, {
+ /* pin-controller instance 1 data */
+ .pin_banks = exynos4x12_pin_banks1,
+ .nr_banks = ARRAY_SIZE(exynos4x12_pin_banks1),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .eint_wkup_init = exynos_eint_wkup_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ .retention_data = &exynos4_retention_data,
+ }, {
+ /* pin-controller instance 2 data */
+ .pin_banks = exynos4x12_pin_banks2,
+ .nr_banks = ARRAY_SIZE(exynos4x12_pin_banks2),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ .retention_data = &exynos4_audio_retention_data,
+ }, {
+ /* pin-controller instance 3 data */
+ .pin_banks = exynos4x12_pin_banks3,
+ .nr_banks = ARRAY_SIZE(exynos4x12_pin_banks3),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ },
+};
+
+/* pin banks of exynos5250 pin-controller 0 */
+static const struct samsung_pin_bank_data exynos5250_pin_banks0[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
+ EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpb0", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpb1", 0x10),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0A0, "gpb2", 0x14),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0C0, "gpb3", 0x18),
+ EXYNOS_PIN_BANK_EINTG(7, 0x0E0, "gpc0", 0x1c),
+ EXYNOS_PIN_BANK_EINTG(4, 0x100, "gpc1", 0x20),
+ EXYNOS_PIN_BANK_EINTG(7, 0x120, "gpc2", 0x24),
+ EXYNOS_PIN_BANK_EINTG(7, 0x140, "gpc3", 0x28),
+ EXYNOS_PIN_BANK_EINTG(4, 0x160, "gpd0", 0x2c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpd1", 0x30),
+ EXYNOS_PIN_BANK_EINTG(7, 0x2E0, "gpc4", 0x34),
+ EXYNOS_PIN_BANK_EINTN(6, 0x1A0, "gpy0"),
+ EXYNOS_PIN_BANK_EINTN(4, 0x1C0, "gpy1"),
+ EXYNOS_PIN_BANK_EINTN(6, 0x1E0, "gpy2"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x200, "gpy3"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x220, "gpy4"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x240, "gpy5"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x260, "gpy6"),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC00, "gpx0", 0x00),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC20, "gpx1", 0x04),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC40, "gpx2", 0x08),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC60, "gpx3", 0x0c),
+};
+
+/* pin banks of exynos5250 pin-controller 1 */
+static const struct samsung_pin_bank_data exynos5250_pin_banks1[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpe0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(2, 0x020, "gpe1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(4, 0x040, "gpf0", 0x08),
+ EXYNOS_PIN_BANK_EINTG(4, 0x060, "gpf1", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x080, "gpg0", 0x10),
+ EXYNOS_PIN_BANK_EINTG(8, 0x0A0, "gpg1", 0x14),
+ EXYNOS_PIN_BANK_EINTG(2, 0x0C0, "gpg2", 0x18),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0E0, "gph0", 0x1c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x100, "gph1", 0x20),
+};
+
+/* pin banks of exynos5250 pin-controller 2 */
+static const struct samsung_pin_bank_data exynos5250_pin_banks2[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpv0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpv1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpv2", 0x08),
+ EXYNOS_PIN_BANK_EINTG(8, 0x080, "gpv3", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(2, 0x0C0, "gpv4", 0x10),
+};
+
+/* pin banks of exynos5250 pin-controller 3 */
+static const struct samsung_pin_bank_data exynos5250_pin_banks3[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
+};
+
+/*
+ * Samsung pinctrl driver data for Exynos5250 SoC. Exynos5250 SoC includes
+ * four gpio/pin-mux/pinconfig controllers.
+ */
+const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = {
+ {
+ /* pin-controller instance 0 data */
+ .pin_banks = exynos5250_pin_banks0,
+ .nr_banks = ARRAY_SIZE(exynos5250_pin_banks0),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .eint_wkup_init = exynos_eint_wkup_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ .retention_data = &exynos4_retention_data,
+ }, {
+ /* pin-controller instance 1 data */
+ .pin_banks = exynos5250_pin_banks1,
+ .nr_banks = ARRAY_SIZE(exynos5250_pin_banks1),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ .retention_data = &exynos4_retention_data,
+ }, {
+ /* pin-controller instance 2 data */
+ .pin_banks = exynos5250_pin_banks2,
+ .nr_banks = ARRAY_SIZE(exynos5250_pin_banks2),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 3 data */
+ .pin_banks = exynos5250_pin_banks3,
+ .nr_banks = ARRAY_SIZE(exynos5250_pin_banks3),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ .retention_data = &exynos4_audio_retention_data,
+ },
+};
+
+/* pin banks of exynos5260 pin-controller 0 */
+static const struct samsung_pin_bank_data exynos5260_pin_banks0[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpa0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpa1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
+ EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpb0", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(4, 0x080, "gpb1", 0x10),
+ EXYNOS_PIN_BANK_EINTG(5, 0x0a0, "gpb2", 0x14),
+ EXYNOS_PIN_BANK_EINTG(8, 0x0c0, "gpb3", 0x18),
+ EXYNOS_PIN_BANK_EINTG(8, 0x0e0, "gpb4", 0x1c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x100, "gpb5", 0x20),
+ EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpd0", 0x24),
+ EXYNOS_PIN_BANK_EINTG(7, 0x140, "gpd1", 0x28),
+ EXYNOS_PIN_BANK_EINTG(5, 0x160, "gpd2", 0x2c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpe0", 0x30),
+ EXYNOS_PIN_BANK_EINTG(5, 0x1a0, "gpe1", 0x34),
+ EXYNOS_PIN_BANK_EINTG(4, 0x1c0, "gpf0", 0x38),
+ EXYNOS_PIN_BANK_EINTG(8, 0x1e0, "gpf1", 0x3c),
+ EXYNOS_PIN_BANK_EINTG(2, 0x200, "gpk0", 0x40),
+ EXYNOS_PIN_BANK_EINTW(8, 0xc00, "gpx0", 0x00),
+ EXYNOS_PIN_BANK_EINTW(8, 0xc20, "gpx1", 0x04),
+ EXYNOS_PIN_BANK_EINTW(8, 0xc40, "gpx2", 0x08),
+ EXYNOS_PIN_BANK_EINTW(8, 0xc60, "gpx3", 0x0c),
+};
+
+/* pin banks of exynos5260 pin-controller 1 */
+static const struct samsung_pin_bank_data exynos5260_pin_banks1[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpc0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpc1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpc2", 0x08),
+ EXYNOS_PIN_BANK_EINTG(4, 0x060, "gpc3", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(4, 0x080, "gpc4", 0x10),
+};
+
+/* pin banks of exynos5260 pin-controller 2 */
+static const struct samsung_pin_bank_data exynos5260_pin_banks2[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
+};
+
+/*
+ * Samsung pinctrl driver data for Exynos5260 SoC. Exynos5260 SoC includes
+ * three gpio/pin-mux/pinconfig controllers.
+ */
+const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = {
+ {
+ /* pin-controller instance 0 data */
+ .pin_banks = exynos5260_pin_banks0,
+ .nr_banks = ARRAY_SIZE(exynos5260_pin_banks0),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .eint_wkup_init = exynos_eint_wkup_init,
+ }, {
+ /* pin-controller instance 1 data */
+ .pin_banks = exynos5260_pin_banks1,
+ .nr_banks = ARRAY_SIZE(exynos5260_pin_banks1),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ }, {
+ /* pin-controller instance 2 data */
+ .pin_banks = exynos5260_pin_banks2,
+ .nr_banks = ARRAY_SIZE(exynos5260_pin_banks2),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ },
+};
+
+/* pin banks of exynos5410 pin-controller 0 */
+static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
+ EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpb0", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpb1", 0x10),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0A0, "gpb2", 0x14),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0C0, "gpb3", 0x18),
+ EXYNOS_PIN_BANK_EINTG(7, 0x0E0, "gpc0", 0x1c),
+ EXYNOS_PIN_BANK_EINTG(4, 0x100, "gpc3", 0x20),
+ EXYNOS_PIN_BANK_EINTG(7, 0x120, "gpc1", 0x24),
+ EXYNOS_PIN_BANK_EINTG(7, 0x140, "gpc2", 0x28),
+ EXYNOS_PIN_BANK_EINTN(2, 0x160, "gpm5"),
+ EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpd1", 0x2c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x1A0, "gpe0", 0x30),
+ EXYNOS_PIN_BANK_EINTG(2, 0x1C0, "gpe1", 0x34),
+ EXYNOS_PIN_BANK_EINTG(6, 0x1E0, "gpf0", 0x38),
+ EXYNOS_PIN_BANK_EINTG(8, 0x200, "gpf1", 0x3c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x220, "gpg0", 0x40),
+ EXYNOS_PIN_BANK_EINTG(8, 0x240, "gpg1", 0x44),
+ EXYNOS_PIN_BANK_EINTG(2, 0x260, "gpg2", 0x48),
+ EXYNOS_PIN_BANK_EINTG(4, 0x280, "gph0", 0x4c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x2A0, "gph1", 0x50),
+ EXYNOS_PIN_BANK_EINTN(8, 0x2C0, "gpm7"),
+ EXYNOS_PIN_BANK_EINTN(6, 0x2E0, "gpy0"),
+ EXYNOS_PIN_BANK_EINTN(4, 0x300, "gpy1"),
+ EXYNOS_PIN_BANK_EINTN(6, 0x320, "gpy2"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x340, "gpy3"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x360, "gpy4"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x380, "gpy5"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x3A0, "gpy6"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x3C0, "gpy7"),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC00, "gpx0", 0x00),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC20, "gpx1", 0x04),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC40, "gpx2", 0x08),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC60, "gpx3", 0x0c),
+};
+
+/* pin banks of exynos5410 pin-controller 1 */
+static const struct samsung_pin_bank_data exynos5410_pin_banks1[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(5, 0x000, "gpj0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpj1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpj2", 0x08),
+ EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpj3", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(2, 0x080, "gpj4", 0x10),
+ EXYNOS_PIN_BANK_EINTG(8, 0x0A0, "gpk0", 0x14),
+ EXYNOS_PIN_BANK_EINTG(8, 0x0C0, "gpk1", 0x18),
+ EXYNOS_PIN_BANK_EINTG(8, 0x0E0, "gpk2", 0x1c),
+ EXYNOS_PIN_BANK_EINTG(7, 0x100, "gpk3", 0x20),
+};
+
+/* pin banks of exynos5410 pin-controller 2 */
+static const struct samsung_pin_bank_data exynos5410_pin_banks2[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpv0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpv1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpv2", 0x08),
+ EXYNOS_PIN_BANK_EINTG(8, 0x080, "gpv3", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(2, 0x0C0, "gpv4", 0x10),
+};
+
+/* pin banks of exynos5410 pin-controller 3 */
+static const struct samsung_pin_bank_data exynos5410_pin_banks3[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
+};
+
+/*
+ * Samsung pinctrl driver data for Exynos5410 SoC. Exynos5410 SoC includes
+ * four gpio/pin-mux/pinconfig controllers.
+ */
+const struct samsung_pin_ctrl exynos5410_pin_ctrl[] __initconst = {
+ {
+ /* pin-controller instance 0 data */
+ .pin_banks = exynos5410_pin_banks0,
+ .nr_banks = ARRAY_SIZE(exynos5410_pin_banks0),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .eint_wkup_init = exynos_eint_wkup_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 1 data */
+ .pin_banks = exynos5410_pin_banks1,
+ .nr_banks = ARRAY_SIZE(exynos5410_pin_banks1),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 2 data */
+ .pin_banks = exynos5410_pin_banks2,
+ .nr_banks = ARRAY_SIZE(exynos5410_pin_banks2),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 3 data */
+ .pin_banks = exynos5410_pin_banks3,
+ .nr_banks = ARRAY_SIZE(exynos5410_pin_banks3),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ },
+};
+
+/* pin banks of exynos5420 pin-controller 0 */
+static const struct samsung_pin_bank_data exynos5420_pin_banks0[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpy7", 0x00),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC00, "gpx0", 0x00),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC20, "gpx1", 0x04),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC40, "gpx2", 0x08),
+ EXYNOS_PIN_BANK_EINTW(8, 0xC60, "gpx3", 0x0c),
+};
+
+/* pin banks of exynos5420 pin-controller 1 */
+static const struct samsung_pin_bank_data exynos5420_pin_banks1[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpc0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpc1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpc2", 0x08),
+ EXYNOS_PIN_BANK_EINTG(4, 0x060, "gpc3", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(2, 0x080, "gpc4", 0x10),
+ EXYNOS_PIN_BANK_EINTG(8, 0x0A0, "gpd1", 0x14),
+ EXYNOS_PIN_BANK_EINTN(6, 0x0C0, "gpy0"),
+ EXYNOS_PIN_BANK_EINTN(4, 0x0E0, "gpy1"),
+ EXYNOS_PIN_BANK_EINTN(6, 0x100, "gpy2"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x120, "gpy3"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x140, "gpy4"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x160, "gpy5"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x180, "gpy6"),
+};
+
+/* pin banks of exynos5420 pin-controller 2 */
+static const struct samsung_pin_bank_data exynos5420_pin_banks2[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpe0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(2, 0x020, "gpe1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(6, 0x040, "gpf0", 0x08),
+ EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpf1", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x080, "gpg0", 0x10),
+ EXYNOS_PIN_BANK_EINTG(8, 0x0A0, "gpg1", 0x14),
+ EXYNOS_PIN_BANK_EINTG(2, 0x0C0, "gpg2", 0x18),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0E0, "gpj4", 0x1c),
+};
+
+/* pin banks of exynos5420 pin-controller 3 */
+static const struct samsung_pin_bank_data exynos5420_pin_banks3[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
+ EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpb0", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpb1", 0x10),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0A0, "gpb2", 0x14),
+ EXYNOS_PIN_BANK_EINTG(8, 0x0C0, "gpb3", 0x18),
+ EXYNOS_PIN_BANK_EINTG(2, 0x0E0, "gpb4", 0x1c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x100, "gph0", 0x20),
+};
+
+/* pin banks of exynos5420 pin-controller 4 */
+static const struct samsung_pin_bank_data exynos5420_pin_banks4[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
+};
+
+/* PMU pad retention groups registers for Exynos5420 (without audio) */
+static const u32 exynos5420_retention_regs[] = {
+ EXYNOS_PAD_RET_DRAM_OPTION,
+ EXYNOS_PAD_RET_JTAG_OPTION,
+ EXYNOS5420_PAD_RET_GPIO_OPTION,
+ EXYNOS5420_PAD_RET_UART_OPTION,
+ EXYNOS5420_PAD_RET_MMCA_OPTION,
+ EXYNOS5420_PAD_RET_MMCB_OPTION,
+ EXYNOS5420_PAD_RET_MMCC_OPTION,
+ EXYNOS5420_PAD_RET_HSI_OPTION,
+ EXYNOS_PAD_RET_EBIA_OPTION,
+ EXYNOS_PAD_RET_EBIB_OPTION,
+ EXYNOS5420_PAD_RET_SPI_OPTION,
+ EXYNOS5420_PAD_RET_DRAM_COREBLK_OPTION,
+};
+
+static const struct samsung_retention_data exynos5420_retention_data __initconst = {
+ .regs = exynos5420_retention_regs,
+ .nr_regs = ARRAY_SIZE(exynos5420_retention_regs),
+ .value = EXYNOS_WAKEUP_FROM_LOWPWR,
+ .refcnt = &exynos_shared_retention_refcnt,
+ .init = exynos_retention_init,
+};
+
+/*
+ * Samsung pinctrl driver data for Exynos5420 SoC. Exynos5420 SoC includes
+ * four gpio/pin-mux/pinconfig controllers.
+ */
+const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {
+ {
+ /* pin-controller instance 0 data */
+ .pin_banks = exynos5420_pin_banks0,
+ .nr_banks = ARRAY_SIZE(exynos5420_pin_banks0),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .eint_wkup_init = exynos_eint_wkup_init,
+ .retention_data = &exynos5420_retention_data,
+ }, {
+ /* pin-controller instance 1 data */
+ .pin_banks = exynos5420_pin_banks1,
+ .nr_banks = ARRAY_SIZE(exynos5420_pin_banks1),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .retention_data = &exynos5420_retention_data,
+ }, {
+ /* pin-controller instance 2 data */
+ .pin_banks = exynos5420_pin_banks2,
+ .nr_banks = ARRAY_SIZE(exynos5420_pin_banks2),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .retention_data = &exynos5420_retention_data,
+ }, {
+ /* pin-controller instance 3 data */
+ .pin_banks = exynos5420_pin_banks3,
+ .nr_banks = ARRAY_SIZE(exynos5420_pin_banks3),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .retention_data = &exynos5420_retention_data,
+ }, {
+ /* pin-controller instance 4 data */
+ .pin_banks = exynos5420_pin_banks4,
+ .nr_banks = ARRAY_SIZE(exynos5420_pin_banks4),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .retention_data = &exynos4_audio_retention_data,
+ },
+};
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
new file mode 100644
index 000000000000..08e9fdb58fd2
--- /dev/null
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
@@ -0,0 +1,399 @@
+/*
+ * Exynos ARMv8 specific support for Samsung pinctrl/gpiolib driver
+ * with eint support.
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ * Copyright (c) 2012 Linaro Ltd
+ * http://www.linaro.org
+ * Copyright (c) 2017 Krzysztof Kozlowski <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This file contains the Samsung Exynos specific information required by the
+ * the Samsung pinctrl/gpiolib driver. It also includes the implementation of
+ * external gpio and wakeup interrupt support.
+ */
+
+#include <linux/slab.h>
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+
+#include "pinctrl-samsung.h"
+#include "pinctrl-exynos.h"
+
+static const struct samsung_pin_bank_type bank_type_off = {
+ .fld_width = { 4, 1, 2, 2, 2, 2, },
+ .reg_offset = { 0x00, 0x04, 0x08, 0x0c, 0x10, 0x14, },
+};
+
+static const struct samsung_pin_bank_type bank_type_alive = {
+ .fld_width = { 4, 1, 2, 2, },
+ .reg_offset = { 0x00, 0x04, 0x08, 0x0c, },
+};
+
+/* Exynos5433 has the 4bit widths for PINCFG_TYPE_DRV bitfields. */
+static const struct samsung_pin_bank_type exynos5433_bank_type_off = {
+ .fld_width = { 4, 1, 2, 4, 2, 2, },
+ .reg_offset = { 0x00, 0x04, 0x08, 0x0c, 0x10, 0x14, },
+};
+
+static const struct samsung_pin_bank_type exynos5433_bank_type_alive = {
+ .fld_width = { 4, 1, 2, 4, },
+ .reg_offset = { 0x00, 0x04, 0x08, 0x0c, },
+};
+
+/* Pad retention control code for accessing PMU regmap */
+static atomic_t exynos_shared_retention_refcnt;
+
+/* pin banks of exynos5433 pin-controller - ALIVE */
+static const struct samsung_pin_bank_data exynos5433_pin_banks0[] __initconst = {
+ EXYNOS5433_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
+ EXYNOS5433_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
+ EXYNOS5433_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
+ EXYNOS5433_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
+ EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1),
+ EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1),
+ EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1),
+ EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1),
+ EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1),
+};
+
+/* pin banks of exynos5433 pin-controller - AUD */
+static const struct samsung_pin_bank_data exynos5433_pin_banks1[] __initconst = {
+ EXYNOS5433_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
+ EXYNOS5433_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
+};
+
+/* pin banks of exynos5433 pin-controller - CPIF */
+static const struct samsung_pin_bank_data exynos5433_pin_banks2[] __initconst = {
+ EXYNOS5433_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
+};
+
+/* pin banks of exynos5433 pin-controller - eSE */
+static const struct samsung_pin_bank_data exynos5433_pin_banks3[] __initconst = {
+ EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
+};
+
+/* pin banks of exynos5433 pin-controller - FINGER */
+static const struct samsung_pin_bank_data exynos5433_pin_banks4[] __initconst = {
+ EXYNOS5433_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
+};
+
+/* pin banks of exynos5433 pin-controller - FSYS */
+static const struct samsung_pin_bank_data exynos5433_pin_banks5[] __initconst = {
+ EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
+ EXYNOS5433_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
+ EXYNOS5433_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
+ EXYNOS5433_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c),
+ EXYNOS5433_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10),
+ EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14),
+};
+
+/* pin banks of exynos5433 pin-controller - IMEM */
+static const struct samsung_pin_bank_data exynos5433_pin_banks6[] __initconst = {
+ EXYNOS5433_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
+};
+
+/* pin banks of exynos5433 pin-controller - NFC */
+static const struct samsung_pin_bank_data exynos5433_pin_banks7[] __initconst = {
+ EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
+};
+
+/* pin banks of exynos5433 pin-controller - PERIC */
+static const struct samsung_pin_bank_data exynos5433_pin_banks8[] __initconst = {
+ EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
+ EXYNOS5433_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
+ EXYNOS5433_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
+ EXYNOS5433_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c),
+ EXYNOS5433_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10),
+ EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14),
+ EXYNOS5433_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18),
+ EXYNOS5433_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c),
+ EXYNOS5433_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20),
+ EXYNOS5433_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24),
+ EXYNOS5433_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28),
+ EXYNOS5433_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c),
+ EXYNOS5433_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30),
+ EXYNOS5433_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34),
+ EXYNOS5433_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38),
+ EXYNOS5433_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c),
+ EXYNOS5433_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40),
+};
+
+/* pin banks of exynos5433 pin-controller - TOUCH */
+static const struct samsung_pin_bank_data exynos5433_pin_banks9[] __initconst = {
+ EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
+};
+
+/* PMU pin retention groups registers for Exynos5433 (without audio & fsys) */
+static const u32 exynos5433_retention_regs[] = {
+ EXYNOS5433_PAD_RETENTION_TOP_OPTION,
+ EXYNOS5433_PAD_RETENTION_UART_OPTION,
+ EXYNOS5433_PAD_RETENTION_EBIA_OPTION,
+ EXYNOS5433_PAD_RETENTION_EBIB_OPTION,
+ EXYNOS5433_PAD_RETENTION_SPI_OPTION,
+ EXYNOS5433_PAD_RETENTION_MIF_OPTION,
+ EXYNOS5433_PAD_RETENTION_USBXTI_OPTION,
+ EXYNOS5433_PAD_RETENTION_BOOTLDO_OPTION,
+ EXYNOS5433_PAD_RETENTION_UFS_OPTION,
+ EXYNOS5433_PAD_RETENTION_FSYSGENIO_OPTION,
+};
+
+static const struct samsung_retention_data exynos5433_retention_data __initconst = {
+ .regs = exynos5433_retention_regs,
+ .nr_regs = ARRAY_SIZE(exynos5433_retention_regs),
+ .value = EXYNOS_WAKEUP_FROM_LOWPWR,
+ .refcnt = &exynos_shared_retention_refcnt,
+ .init = exynos_retention_init,
+};
+
+/* PMU retention control for audio pins can be tied to audio pin bank */
+static const u32 exynos5433_audio_retention_regs[] = {
+ EXYNOS5433_PAD_RETENTION_AUD_OPTION,
+};
+
+static const struct samsung_retention_data exynos5433_audio_retention_data __initconst = {
+ .regs = exynos5433_audio_retention_regs,
+ .nr_regs = ARRAY_SIZE(exynos5433_audio_retention_regs),
+ .value = EXYNOS_WAKEUP_FROM_LOWPWR,
+ .init = exynos_retention_init,
+};
+
+/* PMU retention control for mmc pins can be tied to fsys pin bank */
+static const u32 exynos5433_fsys_retention_regs[] = {
+ EXYNOS5433_PAD_RETENTION_MMC0_OPTION,
+ EXYNOS5433_PAD_RETENTION_MMC1_OPTION,
+ EXYNOS5433_PAD_RETENTION_MMC2_OPTION,
+};
+
+static const struct samsung_retention_data exynos5433_fsys_retention_data __initconst = {
+ .regs = exynos5433_fsys_retention_regs,
+ .nr_regs = ARRAY_SIZE(exynos5433_fsys_retention_regs),
+ .value = EXYNOS_WAKEUP_FROM_LOWPWR,
+ .init = exynos_retention_init,
+};
+
+/*
+ * Samsung pinctrl driver data for Exynos5433 SoC. Exynos5433 SoC includes
+ * ten gpio/pin-mux/pinconfig controllers.
+ */
+const struct samsung_pin_ctrl exynos5433_pin_ctrl[] __initconst = {
+ {
+ /* pin-controller instance 0 data */
+ .pin_banks = exynos5433_pin_banks0,
+ .nr_banks = ARRAY_SIZE(exynos5433_pin_banks0),
+ .eint_wkup_init = exynos_eint_wkup_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ .nr_ext_resources = 1,
+ .retention_data = &exynos5433_retention_data,
+ }, {
+ /* pin-controller instance 1 data */
+ .pin_banks = exynos5433_pin_banks1,
+ .nr_banks = ARRAY_SIZE(exynos5433_pin_banks1),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ .retention_data = &exynos5433_audio_retention_data,
+ }, {
+ /* pin-controller instance 2 data */
+ .pin_banks = exynos5433_pin_banks2,
+ .nr_banks = ARRAY_SIZE(exynos5433_pin_banks2),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ .retention_data = &exynos5433_retention_data,
+ }, {
+ /* pin-controller instance 3 data */
+ .pin_banks = exynos5433_pin_banks3,
+ .nr_banks = ARRAY_SIZE(exynos5433_pin_banks3),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ .retention_data = &exynos5433_retention_data,
+ }, {
+ /* pin-controller instance 4 data */
+ .pin_banks = exynos5433_pin_banks4,
+ .nr_banks = ARRAY_SIZE(exynos5433_pin_banks4),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ .retention_data = &exynos5433_retention_data,
+ }, {
+ /* pin-controller instance 5 data */
+ .pin_banks = exynos5433_pin_banks5,
+ .nr_banks = ARRAY_SIZE(exynos5433_pin_banks5),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ .retention_data = &exynos5433_fsys_retention_data,
+ }, {
+ /* pin-controller instance 6 data */
+ .pin_banks = exynos5433_pin_banks6,
+ .nr_banks = ARRAY_SIZE(exynos5433_pin_banks6),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ .retention_data = &exynos5433_retention_data,
+ }, {
+ /* pin-controller instance 7 data */
+ .pin_banks = exynos5433_pin_banks7,
+ .nr_banks = ARRAY_SIZE(exynos5433_pin_banks7),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ .retention_data = &exynos5433_retention_data,
+ }, {
+ /* pin-controller instance 8 data */
+ .pin_banks = exynos5433_pin_banks8,
+ .nr_banks = ARRAY_SIZE(exynos5433_pin_banks8),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ .retention_data = &exynos5433_retention_data,
+ }, {
+ /* pin-controller instance 9 data */
+ .pin_banks = exynos5433_pin_banks9,
+ .nr_banks = ARRAY_SIZE(exynos5433_pin_banks9),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ .retention_data = &exynos5433_retention_data,
+ },
+};
+
+/* pin banks of exynos7 pin-controller - ALIVE */
+static const struct samsung_pin_bank_data exynos7_pin_banks0[] __initconst = {
+ EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
+ EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
+ EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
+ EXYNOS_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
+};
+
+/* pin banks of exynos7 pin-controller - BUS0 */
+static const struct samsung_pin_bank_data exynos7_pin_banks1[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(5, 0x000, "gpb0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpc0", 0x04),
+ EXYNOS_PIN_BANK_EINTG(2, 0x040, "gpc1", 0x08),
+ EXYNOS_PIN_BANK_EINTG(6, 0x060, "gpc2", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x080, "gpc3", 0x10),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0a0, "gpd0", 0x14),
+ EXYNOS_PIN_BANK_EINTG(6, 0x0c0, "gpd1", 0x18),
+ EXYNOS_PIN_BANK_EINTG(8, 0x0e0, "gpd2", 0x1c),
+ EXYNOS_PIN_BANK_EINTG(5, 0x100, "gpd4", 0x20),
+ EXYNOS_PIN_BANK_EINTG(4, 0x120, "gpd5", 0x24),
+ EXYNOS_PIN_BANK_EINTG(6, 0x140, "gpd6", 0x28),
+ EXYNOS_PIN_BANK_EINTG(3, 0x160, "gpd7", 0x2c),
+ EXYNOS_PIN_BANK_EINTG(2, 0x180, "gpd8", 0x30),
+ EXYNOS_PIN_BANK_EINTG(2, 0x1a0, "gpg0", 0x34),
+ EXYNOS_PIN_BANK_EINTG(4, 0x1c0, "gpg3", 0x38),
+};
+
+/* pin banks of exynos7 pin-controller - NFC */
+static const struct samsung_pin_bank_data exynos7_pin_banks2[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
+};
+
+/* pin banks of exynos7 pin-controller - TOUCH */
+static const struct samsung_pin_bank_data exynos7_pin_banks3[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
+};
+
+/* pin banks of exynos7 pin-controller - FF */
+static const struct samsung_pin_bank_data exynos7_pin_banks4[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpg4", 0x00),
+};
+
+/* pin banks of exynos7 pin-controller - ESE */
+static const struct samsung_pin_bank_data exynos7_pin_banks5[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(5, 0x000, "gpv7", 0x00),
+};
+
+/* pin banks of exynos7 pin-controller - FSYS0 */
+static const struct samsung_pin_bank_data exynos7_pin_banks6[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpr4", 0x00),
+};
+
+/* pin banks of exynos7 pin-controller - FSYS1 */
+static const struct samsung_pin_bank_data exynos7_pin_banks7[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpr0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpr1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(5, 0x040, "gpr2", 0x08),
+ EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpr3", 0x0c),
+};
+
+/* pin banks of exynos7 pin-controller - BUS1 */
+static const struct samsung_pin_bank_data exynos7_pin_banks8[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpf0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpf1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(4, 0x060, "gpf2", 0x08),
+ EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpf3", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpf4", 0x10),
+ EXYNOS_PIN_BANK_EINTG(8, 0x0c0, "gpf5", 0x14),
+ EXYNOS_PIN_BANK_EINTG(5, 0x0e0, "gpg1", 0x18),
+ EXYNOS_PIN_BANK_EINTG(5, 0x100, "gpg2", 0x1c),
+ EXYNOS_PIN_BANK_EINTG(6, 0x120, "gph1", 0x20),
+ EXYNOS_PIN_BANK_EINTG(3, 0x140, "gpv6", 0x24),
+};
+
+static const struct samsung_pin_bank_data exynos7_pin_banks9[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
+};
+
+const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = {
+ {
+ /* pin-controller instance 0 Alive data */
+ .pin_banks = exynos7_pin_banks0,
+ .nr_banks = ARRAY_SIZE(exynos7_pin_banks0),
+ .eint_wkup_init = exynos_eint_wkup_init,
+ }, {
+ /* pin-controller instance 1 BUS0 data */
+ .pin_banks = exynos7_pin_banks1,
+ .nr_banks = ARRAY_SIZE(exynos7_pin_banks1),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ }, {
+ /* pin-controller instance 2 NFC data */
+ .pin_banks = exynos7_pin_banks2,
+ .nr_banks = ARRAY_SIZE(exynos7_pin_banks2),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ }, {
+ /* pin-controller instance 3 TOUCH data */
+ .pin_banks = exynos7_pin_banks3,
+ .nr_banks = ARRAY_SIZE(exynos7_pin_banks3),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ }, {
+ /* pin-controller instance 4 FF data */
+ .pin_banks = exynos7_pin_banks4,
+ .nr_banks = ARRAY_SIZE(exynos7_pin_banks4),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ }, {
+ /* pin-controller instance 5 ESE data */
+ .pin_banks = exynos7_pin_banks5,
+ .nr_banks = ARRAY_SIZE(exynos7_pin_banks5),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ }, {
+ /* pin-controller instance 6 FSYS0 data */
+ .pin_banks = exynos7_pin_banks6,
+ .nr_banks = ARRAY_SIZE(exynos7_pin_banks6),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ }, {
+ /* pin-controller instance 7 FSYS1 data */
+ .pin_banks = exynos7_pin_banks7,
+ .nr_banks = ARRAY_SIZE(exynos7_pin_banks7),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ }, {
+ /* pin-controller instance 8 BUS1 data */
+ .pin_banks = exynos7_pin_banks8,
+ .nr_banks = ARRAY_SIZE(exynos7_pin_banks8),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ }, {
+ /* pin-controller instance 9 AUD data */
+ .pin_banks = exynos7_pin_banks9,
+ .nr_banks = ARRAY_SIZE(exynos7_pin_banks9),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ },
+};
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index 7b0e6cc35e04..731530a9ce38 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -18,21 +18,18 @@
* external gpio and wakeup interrupt support.
*/
-#include <linux/module.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irq.h>
#include <linux/irqchip/chained_irq.h>
-#include <linux/of_address.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/io.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/regmap.h>
#include <linux/err.h>
#include <linux/soc/samsung/exynos-pmu.h>
-#include <linux/soc/samsung/exynos-regs-pmu.h>
#include "pinctrl-samsung.h"
#include "pinctrl-exynos.h"
@@ -50,27 +47,6 @@ static inline struct exynos_irq_chip *to_exynos_irq_chip(struct irq_chip *chip)
return container_of(chip, struct exynos_irq_chip, chip);
}
-static const struct samsung_pin_bank_type bank_type_off = {
- .fld_width = { 4, 1, 2, 2, 2, 2, },
- .reg_offset = { 0x00, 0x04, 0x08, 0x0c, 0x10, 0x14, },
-};
-
-static const struct samsung_pin_bank_type bank_type_alive = {
- .fld_width = { 4, 1, 2, 2, },
- .reg_offset = { 0x00, 0x04, 0x08, 0x0c, },
-};
-
-/* Exynos5433 has the 4bit widths for PINCFG_TYPE_DRV bitfields. */
-static const struct samsung_pin_bank_type exynos5433_bank_type_off = {
- .fld_width = { 4, 1, 2, 4, 2, 2, },
- .reg_offset = { 0x00, 0x04, 0x08, 0x0c, 0x10, 0x14, },
-};
-
-static const struct samsung_pin_bank_type exynos5433_bank_type_alive = {
- .fld_width = { 4, 1, 2, 4, },
- .reg_offset = { 0x00, 0x04, 0x08, 0x0c, },
-};
-
static void exynos_irq_mask(struct irq_data *irqd)
{
struct irq_chip *chip = irq_data_get_irq_chip(irqd);
@@ -205,8 +181,6 @@ static int exynos_irq_request_resources(struct irq_data *irqd)
spin_unlock_irqrestore(&bank->slock, flags);
- exynos_irq_unmask(irqd);
-
return 0;
}
@@ -226,8 +200,6 @@ static void exynos_irq_release_resources(struct irq_data *irqd)
shift = irqd->hwirq * bank_type->fld_width[PINCFG_TYPE_FUNC];
mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1;
- exynos_irq_mask(irqd);
-
spin_lock_irqsave(&bank->slock, flags);
con = readl(bank->eint_base + reg_con);
@@ -308,7 +280,7 @@ struct exynos_eint_gpio_save {
* exynos_eint_gpio_init() - setup handling of external gpio interrupts.
* @d: driver data of samsung pinctrl driver.
*/
-static int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d)
+int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d)
{
struct samsung_pin_bank *bank;
struct device *dev = d->dev;
@@ -387,7 +359,7 @@ static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on)
/*
* irq_chip for wakeup interrupts
*/
-static struct exynos_irq_chip exynos4210_wkup_irq_chip __initdata = {
+static const struct exynos_irq_chip exynos4210_wkup_irq_chip __initconst = {
.chip = {
.name = "exynos4210_wkup_irq_chip",
.irq_unmask = exynos_irq_unmask,
@@ -403,7 +375,7 @@ static struct exynos_irq_chip exynos4210_wkup_irq_chip __initdata = {
.eint_pend = EXYNOS_WKUP_EPEND_OFFSET,
};
-static struct exynos_irq_chip exynos7_wkup_irq_chip __initdata = {
+static const struct exynos_irq_chip exynos7_wkup_irq_chip __initconst = {
.chip = {
.name = "exynos7_wkup_irq_chip",
.irq_unmask = exynos_irq_unmask,
@@ -483,7 +455,7 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
* exynos_eint_wkup_init() - setup handling of external wakeup interrupts.
* @d: driver data of samsung pinctrl driver.
*/
-static int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
+int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
{
struct device *dev = d->dev;
struct device_node *wkup_np = NULL;
@@ -503,6 +475,8 @@ static int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d)
if (match) {
irq_chip = kmemdup(match->data,
sizeof(*irq_chip), GFP_KERNEL);
+ if (!irq_chip)
+ return -ENOMEM;
wkup_np = np;
break;
}
@@ -599,7 +573,7 @@ static void exynos_pinctrl_suspend_bank(
pr_debug("%s: save fltcon1 %#010x\n", bank->name, save->eint_fltcon1);
}
-static void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata)
+void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata)
{
struct samsung_pin_bank *bank = drvdata->pin_banks;
int i;
@@ -634,7 +608,7 @@ static void exynos_pinctrl_resume_bank(
+ 2 * bank->eint_offset + 4);
}
-static void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
+void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
{
struct samsung_pin_bank *bank = drvdata->pin_banks;
int i;
@@ -644,114 +618,6 @@ static void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
exynos_pinctrl_resume_bank(drvdata, bank);
}
-/* Retention control for S5PV210 are located at the end of clock controller */
-#define S5P_OTHERS 0xE000
-
-#define S5P_OTHERS_RET_IO (1 << 31)
-#define S5P_OTHERS_RET_CF (1 << 30)
-#define S5P_OTHERS_RET_MMC (1 << 29)
-#define S5P_OTHERS_RET_UART (1 << 28)
-
-static void s5pv210_retention_disable(struct samsung_pinctrl_drv_data *drvdata)
-{
- void *clk_base = drvdata->retention_ctrl->priv;
- u32 tmp;
-
- tmp = __raw_readl(clk_base + S5P_OTHERS);
- tmp |= (S5P_OTHERS_RET_IO | S5P_OTHERS_RET_CF | S5P_OTHERS_RET_MMC |
- S5P_OTHERS_RET_UART);
- __raw_writel(tmp, clk_base + S5P_OTHERS);
-}
-
-static struct samsung_retention_ctrl *
-s5pv210_retention_init(struct samsung_pinctrl_drv_data *drvdata,
- const struct samsung_retention_data *data)
-{
- struct samsung_retention_ctrl *ctrl;
- struct device_node *np;
- void *clk_base;
-
- ctrl = devm_kzalloc(drvdata->dev, sizeof(*ctrl), GFP_KERNEL);
- if (!ctrl)
- return ERR_PTR(-ENOMEM);
-
- np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock");
- if (!np) {
- pr_err("%s: failed to find clock controller DT node\n",
- __func__);
- return ERR_PTR(-ENODEV);
- }
-
- clk_base = of_iomap(np, 0);
- if (!clk_base) {
- pr_err("%s: failed to map clock registers\n", __func__);
- return ERR_PTR(-EINVAL);
- }
-
- ctrl->priv = clk_base;
- ctrl->disable = s5pv210_retention_disable;
-
- return ctrl;
-}
-
-static const struct samsung_retention_data s5pv210_retention_data __initconst = {
- .init = s5pv210_retention_init,
-};
-
-/* pin banks of s5pv210 pin-controller */
-static const struct samsung_pin_bank_data s5pv210_pin_bank[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
- EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpa1", 0x04),
- EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
- EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpc0", 0x0c),
- EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpc1", 0x10),
- EXYNOS_PIN_BANK_EINTG(4, 0x0a0, "gpd0", 0x14),
- EXYNOS_PIN_BANK_EINTG(6, 0x0c0, "gpd1", 0x18),
- EXYNOS_PIN_BANK_EINTG(8, 0x0e0, "gpe0", 0x1c),
- EXYNOS_PIN_BANK_EINTG(5, 0x100, "gpe1", 0x20),
- EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpf0", 0x24),
- EXYNOS_PIN_BANK_EINTG(8, 0x140, "gpf1", 0x28),
- EXYNOS_PIN_BANK_EINTG(8, 0x160, "gpf2", 0x2c),
- EXYNOS_PIN_BANK_EINTG(6, 0x180, "gpf3", 0x30),
- EXYNOS_PIN_BANK_EINTG(7, 0x1a0, "gpg0", 0x34),
- EXYNOS_PIN_BANK_EINTG(7, 0x1c0, "gpg1", 0x38),
- EXYNOS_PIN_BANK_EINTG(7, 0x1e0, "gpg2", 0x3c),
- EXYNOS_PIN_BANK_EINTG(7, 0x200, "gpg3", 0x40),
- EXYNOS_PIN_BANK_EINTN(7, 0x220, "gpi"),
- EXYNOS_PIN_BANK_EINTG(8, 0x240, "gpj0", 0x44),
- EXYNOS_PIN_BANK_EINTG(6, 0x260, "gpj1", 0x48),
- EXYNOS_PIN_BANK_EINTG(8, 0x280, "gpj2", 0x4c),
- EXYNOS_PIN_BANK_EINTG(8, 0x2a0, "gpj3", 0x50),
- EXYNOS_PIN_BANK_EINTG(5, 0x2c0, "gpj4", 0x54),
- EXYNOS_PIN_BANK_EINTN(8, 0x2e0, "mp01"),
- EXYNOS_PIN_BANK_EINTN(4, 0x300, "mp02"),
- EXYNOS_PIN_BANK_EINTN(8, 0x320, "mp03"),
- EXYNOS_PIN_BANK_EINTN(8, 0x340, "mp04"),
- EXYNOS_PIN_BANK_EINTN(8, 0x360, "mp05"),
- EXYNOS_PIN_BANK_EINTN(8, 0x380, "mp06"),
- EXYNOS_PIN_BANK_EINTN(8, 0x3a0, "mp07"),
- EXYNOS_PIN_BANK_EINTW(8, 0xc00, "gph0", 0x00),
- EXYNOS_PIN_BANK_EINTW(8, 0xc20, "gph1", 0x04),
- EXYNOS_PIN_BANK_EINTW(8, 0xc40, "gph2", 0x08),
- EXYNOS_PIN_BANK_EINTW(8, 0xc60, "gph3", 0x0c),
-};
-
-const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = {
- {
- /* pin-controller instance 0 data */
- .pin_banks = s5pv210_pin_bank,
- .nr_banks = ARRAY_SIZE(s5pv210_pin_bank),
- .eint_gpio_init = exynos_eint_gpio_init,
- .eint_wkup_init = exynos_eint_wkup_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- .retention_data = &s5pv210_retention_data,
- },
-};
-
-/* Pad retention control code for accessing PMU regmap */
-static atomic_t exynos_shared_retention_refcnt;
-
static void exynos_retention_enable(struct samsung_pinctrl_drv_data *drvdata)
{
if (drvdata->retention_ctrl->refcnt)
@@ -771,7 +637,7 @@ static void exynos_retention_disable(struct samsung_pinctrl_drv_data *drvdata)
regmap_write(pmu_regs, ctrl->regs[i], ctrl->value);
}
-static struct samsung_retention_ctrl *
+struct samsung_retention_ctrl *
exynos_retention_init(struct samsung_pinctrl_drv_data *drvdata,
const struct samsung_retention_data *data)
{
@@ -801,1022 +667,3 @@ exynos_retention_init(struct samsung_pinctrl_drv_data *drvdata,
return ctrl;
}
-
-/* pin banks of exynos3250 pin-controller 0 */
-static const struct samsung_pin_bank_data exynos3250_pin_banks0[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
- EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
- EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
- EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpc0", 0x0c),
- EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpc1", 0x10),
- EXYNOS_PIN_BANK_EINTG(4, 0x0a0, "gpd0", 0x14),
- EXYNOS_PIN_BANK_EINTG(4, 0x0c0, "gpd1", 0x18),
-};
-
-/* pin banks of exynos3250 pin-controller 1 */
-static const struct samsung_pin_bank_data exynos3250_pin_banks1[] __initconst = {
- EXYNOS_PIN_BANK_EINTN(8, 0x120, "gpe0"),
- EXYNOS_PIN_BANK_EINTN(8, 0x140, "gpe1"),
- EXYNOS_PIN_BANK_EINTN(3, 0x180, "gpe2"),
- EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpk0", 0x08),
- EXYNOS_PIN_BANK_EINTG(7, 0x060, "gpk1", 0x0c),
- EXYNOS_PIN_BANK_EINTG(7, 0x080, "gpk2", 0x10),
- EXYNOS_PIN_BANK_EINTG(4, 0x0c0, "gpl0", 0x18),
- EXYNOS_PIN_BANK_EINTG(8, 0x260, "gpm0", 0x24),
- EXYNOS_PIN_BANK_EINTG(7, 0x280, "gpm1", 0x28),
- EXYNOS_PIN_BANK_EINTG(5, 0x2a0, "gpm2", 0x2c),
- EXYNOS_PIN_BANK_EINTG(8, 0x2c0, "gpm3", 0x30),
- EXYNOS_PIN_BANK_EINTG(8, 0x2e0, "gpm4", 0x34),
- EXYNOS_PIN_BANK_EINTW(8, 0xc00, "gpx0", 0x00),
- EXYNOS_PIN_BANK_EINTW(8, 0xc20, "gpx1", 0x04),
- EXYNOS_PIN_BANK_EINTW(8, 0xc40, "gpx2", 0x08),
- EXYNOS_PIN_BANK_EINTW(8, 0xc60, "gpx3", 0x0c),
-};
-
-/*
- * PMU pad retention groups for Exynos3250 doesn't match pin banks, so handle
- * them all together
- */
-static const u32 exynos3250_retention_regs[] = {
- S5P_PAD_RET_MAUDIO_OPTION,
- S5P_PAD_RET_GPIO_OPTION,
- S5P_PAD_RET_UART_OPTION,
- S5P_PAD_RET_MMCA_OPTION,
- S5P_PAD_RET_MMCB_OPTION,
- S5P_PAD_RET_EBIA_OPTION,
- S5P_PAD_RET_EBIB_OPTION,
- S5P_PAD_RET_MMC2_OPTION,
- S5P_PAD_RET_SPI_OPTION,
-};
-
-static const struct samsung_retention_data exynos3250_retention_data __initconst = {
- .regs = exynos3250_retention_regs,
- .nr_regs = ARRAY_SIZE(exynos3250_retention_regs),
- .value = EXYNOS_WAKEUP_FROM_LOWPWR,
- .refcnt = &exynos_shared_retention_refcnt,
- .init = exynos_retention_init,
-};
-
-/*
- * Samsung pinctrl driver data for Exynos3250 SoC. Exynos3250 SoC includes
- * two gpio/pin-mux/pinconfig controllers.
- */
-const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = {
- {
- /* pin-controller instance 0 data */
- .pin_banks = exynos3250_pin_banks0,
- .nr_banks = ARRAY_SIZE(exynos3250_pin_banks0),
- .eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- .retention_data = &exynos3250_retention_data,
- }, {
- /* pin-controller instance 1 data */
- .pin_banks = exynos3250_pin_banks1,
- .nr_banks = ARRAY_SIZE(exynos3250_pin_banks1),
- .eint_gpio_init = exynos_eint_gpio_init,
- .eint_wkup_init = exynos_eint_wkup_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- .retention_data = &exynos3250_retention_data,
- },
-};
-
-/* pin banks of exynos4210 pin-controller 0 */
-static const struct samsung_pin_bank_data exynos4210_pin_banks0[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
- EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
- EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
- EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpc0", 0x0c),
- EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpc1", 0x10),
- EXYNOS_PIN_BANK_EINTG(4, 0x0A0, "gpd0", 0x14),
- EXYNOS_PIN_BANK_EINTG(4, 0x0C0, "gpd1", 0x18),
- EXYNOS_PIN_BANK_EINTG(5, 0x0E0, "gpe0", 0x1c),
- EXYNOS_PIN_BANK_EINTG(8, 0x100, "gpe1", 0x20),
- EXYNOS_PIN_BANK_EINTG(6, 0x120, "gpe2", 0x24),
- EXYNOS_PIN_BANK_EINTG(8, 0x140, "gpe3", 0x28),
- EXYNOS_PIN_BANK_EINTG(8, 0x160, "gpe4", 0x2c),
- EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpf0", 0x30),
- EXYNOS_PIN_BANK_EINTG(8, 0x1A0, "gpf1", 0x34),
- EXYNOS_PIN_BANK_EINTG(8, 0x1C0, "gpf2", 0x38),
- EXYNOS_PIN_BANK_EINTG(6, 0x1E0, "gpf3", 0x3c),
-};
-
-/* pin banks of exynos4210 pin-controller 1 */
-static const struct samsung_pin_bank_data exynos4210_pin_banks1[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpj0", 0x00),
- EXYNOS_PIN_BANK_EINTG(5, 0x020, "gpj1", 0x04),
- EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpk0", 0x08),
- EXYNOS_PIN_BANK_EINTG(7, 0x060, "gpk1", 0x0c),
- EXYNOS_PIN_BANK_EINTG(7, 0x080, "gpk2", 0x10),
- EXYNOS_PIN_BANK_EINTG(7, 0x0A0, "gpk3", 0x14),
- EXYNOS_PIN_BANK_EINTG(8, 0x0C0, "gpl0", 0x18),
- EXYNOS_PIN_BANK_EINTG(3, 0x0E0, "gpl1", 0x1c),
- EXYNOS_PIN_BANK_EINTG(8, 0x100, "gpl2", 0x20),
- EXYNOS_PIN_BANK_EINTN(6, 0x120, "gpy0"),
- EXYNOS_PIN_BANK_EINTN(4, 0x140, "gpy1"),
- EXYNOS_PIN_BANK_EINTN(6, 0x160, "gpy2"),
- EXYNOS_PIN_BANK_EINTN(8, 0x180, "gpy3"),
- EXYNOS_PIN_BANK_EINTN(8, 0x1A0, "gpy4"),
- EXYNOS_PIN_BANK_EINTN(8, 0x1C0, "gpy5"),
- EXYNOS_PIN_BANK_EINTN(8, 0x1E0, "gpy6"),
- EXYNOS_PIN_BANK_EINTW(8, 0xC00, "gpx0", 0x00),
- EXYNOS_PIN_BANK_EINTW(8, 0xC20, "gpx1", 0x04),
- EXYNOS_PIN_BANK_EINTW(8, 0xC40, "gpx2", 0x08),
- EXYNOS_PIN_BANK_EINTW(8, 0xC60, "gpx3", 0x0c),
-};
-
-/* pin banks of exynos4210 pin-controller 2 */
-static const struct samsung_pin_bank_data exynos4210_pin_banks2[] __initconst = {
- EXYNOS_PIN_BANK_EINTN(7, 0x000, "gpz"),
-};
-
-/* PMU pad retention groups registers for Exynos4 (without audio) */
-static const u32 exynos4_retention_regs[] = {
- S5P_PAD_RET_GPIO_OPTION,
- S5P_PAD_RET_UART_OPTION,
- S5P_PAD_RET_MMCA_OPTION,
- S5P_PAD_RET_MMCB_OPTION,
- S5P_PAD_RET_EBIA_OPTION,
- S5P_PAD_RET_EBIB_OPTION,
-};
-
-static const struct samsung_retention_data exynos4_retention_data __initconst = {
- .regs = exynos4_retention_regs,
- .nr_regs = ARRAY_SIZE(exynos4_retention_regs),
- .value = EXYNOS_WAKEUP_FROM_LOWPWR,
- .refcnt = &exynos_shared_retention_refcnt,
- .init = exynos_retention_init,
-};
-
-/* PMU retention control for audio pins can be tied to audio pin bank */
-static const u32 exynos4_audio_retention_regs[] = {
- S5P_PAD_RET_MAUDIO_OPTION,
-};
-
-static const struct samsung_retention_data exynos4_audio_retention_data __initconst = {
- .regs = exynos4_audio_retention_regs,
- .nr_regs = ARRAY_SIZE(exynos4_audio_retention_regs),
- .value = EXYNOS_WAKEUP_FROM_LOWPWR,
- .init = exynos_retention_init,
-};
-
-/*
- * Samsung pinctrl driver data for Exynos4210 SoC. Exynos4210 SoC includes
- * three gpio/pin-mux/pinconfig controllers.
- */
-const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = {
- {
- /* pin-controller instance 0 data */
- .pin_banks = exynos4210_pin_banks0,
- .nr_banks = ARRAY_SIZE(exynos4210_pin_banks0),
- .eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- .retention_data = &exynos4_retention_data,
- }, {
- /* pin-controller instance 1 data */
- .pin_banks = exynos4210_pin_banks1,
- .nr_banks = ARRAY_SIZE(exynos4210_pin_banks1),
- .eint_gpio_init = exynos_eint_gpio_init,
- .eint_wkup_init = exynos_eint_wkup_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- .retention_data = &exynos4_retention_data,
- }, {
- /* pin-controller instance 2 data */
- .pin_banks = exynos4210_pin_banks2,
- .nr_banks = ARRAY_SIZE(exynos4210_pin_banks2),
- .retention_data = &exynos4_audio_retention_data,
- },
-};
-
-/* pin banks of exynos4x12 pin-controller 0 */
-static const struct samsung_pin_bank_data exynos4x12_pin_banks0[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
- EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
- EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
- EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpc0", 0x0c),
- EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpc1", 0x10),
- EXYNOS_PIN_BANK_EINTG(4, 0x0A0, "gpd0", 0x14),
- EXYNOS_PIN_BANK_EINTG(4, 0x0C0, "gpd1", 0x18),
- EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpf0", 0x30),
- EXYNOS_PIN_BANK_EINTG(8, 0x1A0, "gpf1", 0x34),
- EXYNOS_PIN_BANK_EINTG(8, 0x1C0, "gpf2", 0x38),
- EXYNOS_PIN_BANK_EINTG(6, 0x1E0, "gpf3", 0x3c),
- EXYNOS_PIN_BANK_EINTG(8, 0x240, "gpj0", 0x40),
- EXYNOS_PIN_BANK_EINTG(5, 0x260, "gpj1", 0x44),
-};
-
-/* pin banks of exynos4x12 pin-controller 1 */
-static const struct samsung_pin_bank_data exynos4x12_pin_banks1[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpk0", 0x08),
- EXYNOS_PIN_BANK_EINTG(7, 0x060, "gpk1", 0x0c),
- EXYNOS_PIN_BANK_EINTG(7, 0x080, "gpk2", 0x10),
- EXYNOS_PIN_BANK_EINTG(7, 0x0A0, "gpk3", 0x14),
- EXYNOS_PIN_BANK_EINTG(7, 0x0C0, "gpl0", 0x18),
- EXYNOS_PIN_BANK_EINTG(2, 0x0E0, "gpl1", 0x1c),
- EXYNOS_PIN_BANK_EINTG(8, 0x100, "gpl2", 0x20),
- EXYNOS_PIN_BANK_EINTG(8, 0x260, "gpm0", 0x24),
- EXYNOS_PIN_BANK_EINTG(7, 0x280, "gpm1", 0x28),
- EXYNOS_PIN_BANK_EINTG(5, 0x2A0, "gpm2", 0x2c),
- EXYNOS_PIN_BANK_EINTG(8, 0x2C0, "gpm3", 0x30),
- EXYNOS_PIN_BANK_EINTG(8, 0x2E0, "gpm4", 0x34),
- EXYNOS_PIN_BANK_EINTN(6, 0x120, "gpy0"),
- EXYNOS_PIN_BANK_EINTN(4, 0x140, "gpy1"),
- EXYNOS_PIN_BANK_EINTN(6, 0x160, "gpy2"),
- EXYNOS_PIN_BANK_EINTN(8, 0x180, "gpy3"),
- EXYNOS_PIN_BANK_EINTN(8, 0x1A0, "gpy4"),
- EXYNOS_PIN_BANK_EINTN(8, 0x1C0, "gpy5"),
- EXYNOS_PIN_BANK_EINTN(8, 0x1E0, "gpy6"),
- EXYNOS_PIN_BANK_EINTW(8, 0xC00, "gpx0", 0x00),
- EXYNOS_PIN_BANK_EINTW(8, 0xC20, "gpx1", 0x04),
- EXYNOS_PIN_BANK_EINTW(8, 0xC40, "gpx2", 0x08),
- EXYNOS_PIN_BANK_EINTW(8, 0xC60, "gpx3", 0x0c),
-};
-
-/* pin banks of exynos4x12 pin-controller 2 */
-static const struct samsung_pin_bank_data exynos4x12_pin_banks2[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
-};
-
-/* pin banks of exynos4x12 pin-controller 3 */
-static const struct samsung_pin_bank_data exynos4x12_pin_banks3[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpv0", 0x00),
- EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpv1", 0x04),
- EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpv2", 0x08),
- EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpv3", 0x0c),
- EXYNOS_PIN_BANK_EINTG(2, 0x080, "gpv4", 0x10),
-};
-
-/*
- * Samsung pinctrl driver data for Exynos4x12 SoC. Exynos4x12 SoC includes
- * four gpio/pin-mux/pinconfig controllers.
- */
-const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = {
- {
- /* pin-controller instance 0 data */
- .pin_banks = exynos4x12_pin_banks0,
- .nr_banks = ARRAY_SIZE(exynos4x12_pin_banks0),
- .eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- .retention_data = &exynos4_retention_data,
- }, {
- /* pin-controller instance 1 data */
- .pin_banks = exynos4x12_pin_banks1,
- .nr_banks = ARRAY_SIZE(exynos4x12_pin_banks1),
- .eint_gpio_init = exynos_eint_gpio_init,
- .eint_wkup_init = exynos_eint_wkup_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- .retention_data = &exynos4_retention_data,
- }, {
- /* pin-controller instance 2 data */
- .pin_banks = exynos4x12_pin_banks2,
- .nr_banks = ARRAY_SIZE(exynos4x12_pin_banks2),
- .eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- .retention_data = &exynos4_audio_retention_data,
- }, {
- /* pin-controller instance 3 data */
- .pin_banks = exynos4x12_pin_banks3,
- .nr_banks = ARRAY_SIZE(exynos4x12_pin_banks3),
- .eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- },
-};
-
-/* pin banks of exynos5250 pin-controller 0 */
-static const struct samsung_pin_bank_data exynos5250_pin_banks0[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
- EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
- EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
- EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpb0", 0x0c),
- EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpb1", 0x10),
- EXYNOS_PIN_BANK_EINTG(4, 0x0A0, "gpb2", 0x14),
- EXYNOS_PIN_BANK_EINTG(4, 0x0C0, "gpb3", 0x18),
- EXYNOS_PIN_BANK_EINTG(7, 0x0E0, "gpc0", 0x1c),
- EXYNOS_PIN_BANK_EINTG(4, 0x100, "gpc1", 0x20),
- EXYNOS_PIN_BANK_EINTG(7, 0x120, "gpc2", 0x24),
- EXYNOS_PIN_BANK_EINTG(7, 0x140, "gpc3", 0x28),
- EXYNOS_PIN_BANK_EINTG(4, 0x160, "gpd0", 0x2c),
- EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpd1", 0x30),
- EXYNOS_PIN_BANK_EINTG(7, 0x2E0, "gpc4", 0x34),
- EXYNOS_PIN_BANK_EINTN(6, 0x1A0, "gpy0"),
- EXYNOS_PIN_BANK_EINTN(4, 0x1C0, "gpy1"),
- EXYNOS_PIN_BANK_EINTN(6, 0x1E0, "gpy2"),
- EXYNOS_PIN_BANK_EINTN(8, 0x200, "gpy3"),
- EXYNOS_PIN_BANK_EINTN(8, 0x220, "gpy4"),
- EXYNOS_PIN_BANK_EINTN(8, 0x240, "gpy5"),
- EXYNOS_PIN_BANK_EINTN(8, 0x260, "gpy6"),
- EXYNOS_PIN_BANK_EINTW(8, 0xC00, "gpx0", 0x00),
- EXYNOS_PIN_BANK_EINTW(8, 0xC20, "gpx1", 0x04),
- EXYNOS_PIN_BANK_EINTW(8, 0xC40, "gpx2", 0x08),
- EXYNOS_PIN_BANK_EINTW(8, 0xC60, "gpx3", 0x0c),
-};
-
-/* pin banks of exynos5250 pin-controller 1 */
-static const struct samsung_pin_bank_data exynos5250_pin_banks1[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpe0", 0x00),
- EXYNOS_PIN_BANK_EINTG(2, 0x020, "gpe1", 0x04),
- EXYNOS_PIN_BANK_EINTG(4, 0x040, "gpf0", 0x08),
- EXYNOS_PIN_BANK_EINTG(4, 0x060, "gpf1", 0x0c),
- EXYNOS_PIN_BANK_EINTG(8, 0x080, "gpg0", 0x10),
- EXYNOS_PIN_BANK_EINTG(8, 0x0A0, "gpg1", 0x14),
- EXYNOS_PIN_BANK_EINTG(2, 0x0C0, "gpg2", 0x18),
- EXYNOS_PIN_BANK_EINTG(4, 0x0E0, "gph0", 0x1c),
- EXYNOS_PIN_BANK_EINTG(8, 0x100, "gph1", 0x20),
-};
-
-/* pin banks of exynos5250 pin-controller 2 */
-static const struct samsung_pin_bank_data exynos5250_pin_banks2[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpv0", 0x00),
- EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpv1", 0x04),
- EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpv2", 0x08),
- EXYNOS_PIN_BANK_EINTG(8, 0x080, "gpv3", 0x0c),
- EXYNOS_PIN_BANK_EINTG(2, 0x0C0, "gpv4", 0x10),
-};
-
-/* pin banks of exynos5250 pin-controller 3 */
-static const struct samsung_pin_bank_data exynos5250_pin_banks3[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
-};
-
-/*
- * Samsung pinctrl driver data for Exynos5250 SoC. Exynos5250 SoC includes
- * four gpio/pin-mux/pinconfig controllers.
- */
-const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = {
- {
- /* pin-controller instance 0 data */
- .pin_banks = exynos5250_pin_banks0,
- .nr_banks = ARRAY_SIZE(exynos5250_pin_banks0),
- .eint_gpio_init = exynos_eint_gpio_init,
- .eint_wkup_init = exynos_eint_wkup_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- .retention_data = &exynos4_retention_data,
- }, {
- /* pin-controller instance 1 data */
- .pin_banks = exynos5250_pin_banks1,
- .nr_banks = ARRAY_SIZE(exynos5250_pin_banks1),
- .eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- .retention_data = &exynos4_retention_data,
- }, {
- /* pin-controller instance 2 data */
- .pin_banks = exynos5250_pin_banks2,
- .nr_banks = ARRAY_SIZE(exynos5250_pin_banks2),
- .eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- }, {
- /* pin-controller instance 3 data */
- .pin_banks = exynos5250_pin_banks3,
- .nr_banks = ARRAY_SIZE(exynos5250_pin_banks3),
- .eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- .retention_data = &exynos4_audio_retention_data,
- },
-};
-
-/* pin banks of exynos5260 pin-controller 0 */
-static const struct samsung_pin_bank_data exynos5260_pin_banks0[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpa0", 0x00),
- EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpa1", 0x04),
- EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
- EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpb0", 0x0c),
- EXYNOS_PIN_BANK_EINTG(4, 0x080, "gpb1", 0x10),
- EXYNOS_PIN_BANK_EINTG(5, 0x0a0, "gpb2", 0x14),
- EXYNOS_PIN_BANK_EINTG(8, 0x0c0, "gpb3", 0x18),
- EXYNOS_PIN_BANK_EINTG(8, 0x0e0, "gpb4", 0x1c),
- EXYNOS_PIN_BANK_EINTG(8, 0x100, "gpb5", 0x20),
- EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpd0", 0x24),
- EXYNOS_PIN_BANK_EINTG(7, 0x140, "gpd1", 0x28),
- EXYNOS_PIN_BANK_EINTG(5, 0x160, "gpd2", 0x2c),
- EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpe0", 0x30),
- EXYNOS_PIN_BANK_EINTG(5, 0x1a0, "gpe1", 0x34),
- EXYNOS_PIN_BANK_EINTG(4, 0x1c0, "gpf0", 0x38),
- EXYNOS_PIN_BANK_EINTG(8, 0x1e0, "gpf1", 0x3c),
- EXYNOS_PIN_BANK_EINTG(2, 0x200, "gpk0", 0x40),
- EXYNOS_PIN_BANK_EINTW(8, 0xc00, "gpx0", 0x00),
- EXYNOS_PIN_BANK_EINTW(8, 0xc20, "gpx1", 0x04),
- EXYNOS_PIN_BANK_EINTW(8, 0xc40, "gpx2", 0x08),
- EXYNOS_PIN_BANK_EINTW(8, 0xc60, "gpx3", 0x0c),
-};
-
-/* pin banks of exynos5260 pin-controller 1 */
-static const struct samsung_pin_bank_data exynos5260_pin_banks1[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpc0", 0x00),
- EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpc1", 0x04),
- EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpc2", 0x08),
- EXYNOS_PIN_BANK_EINTG(4, 0x060, "gpc3", 0x0c),
- EXYNOS_PIN_BANK_EINTG(4, 0x080, "gpc4", 0x10),
-};
-
-/* pin banks of exynos5260 pin-controller 2 */
-static const struct samsung_pin_bank_data exynos5260_pin_banks2[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
- EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
-};
-
-/*
- * Samsung pinctrl driver data for Exynos5260 SoC. Exynos5260 SoC includes
- * three gpio/pin-mux/pinconfig controllers.
- */
-const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = {
- {
- /* pin-controller instance 0 data */
- .pin_banks = exynos5260_pin_banks0,
- .nr_banks = ARRAY_SIZE(exynos5260_pin_banks0),
- .eint_gpio_init = exynos_eint_gpio_init,
- .eint_wkup_init = exynos_eint_wkup_init,
- }, {
- /* pin-controller instance 1 data */
- .pin_banks = exynos5260_pin_banks1,
- .nr_banks = ARRAY_SIZE(exynos5260_pin_banks1),
- .eint_gpio_init = exynos_eint_gpio_init,
- }, {
- /* pin-controller instance 2 data */
- .pin_banks = exynos5260_pin_banks2,
- .nr_banks = ARRAY_SIZE(exynos5260_pin_banks2),
- .eint_gpio_init = exynos_eint_gpio_init,
- },
-};
-
-/* pin banks of exynos5410 pin-controller 0 */
-static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
- EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
- EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
- EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpb0", 0x0c),
- EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpb1", 0x10),
- EXYNOS_PIN_BANK_EINTG(4, 0x0A0, "gpb2", 0x14),
- EXYNOS_PIN_BANK_EINTG(4, 0x0C0, "gpb3", 0x18),
- EXYNOS_PIN_BANK_EINTG(7, 0x0E0, "gpc0", 0x1c),
- EXYNOS_PIN_BANK_EINTG(4, 0x100, "gpc3", 0x20),
- EXYNOS_PIN_BANK_EINTG(7, 0x120, "gpc1", 0x24),
- EXYNOS_PIN_BANK_EINTG(7, 0x140, "gpc2", 0x28),
- EXYNOS_PIN_BANK_EINTN(2, 0x160, "gpm5"),
- EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpd1", 0x2c),
- EXYNOS_PIN_BANK_EINTG(8, 0x1A0, "gpe0", 0x30),
- EXYNOS_PIN_BANK_EINTG(2, 0x1C0, "gpe1", 0x34),
- EXYNOS_PIN_BANK_EINTG(6, 0x1E0, "gpf0", 0x38),
- EXYNOS_PIN_BANK_EINTG(8, 0x200, "gpf1", 0x3c),
- EXYNOS_PIN_BANK_EINTG(8, 0x220, "gpg0", 0x40),
- EXYNOS_PIN_BANK_EINTG(8, 0x240, "gpg1", 0x44),
- EXYNOS_PIN_BANK_EINTG(2, 0x260, "gpg2", 0x48),
- EXYNOS_PIN_BANK_EINTG(4, 0x280, "gph0", 0x4c),
- EXYNOS_PIN_BANK_EINTG(8, 0x2A0, "gph1", 0x50),
- EXYNOS_PIN_BANK_EINTN(8, 0x2C0, "gpm7"),
- EXYNOS_PIN_BANK_EINTN(6, 0x2E0, "gpy0"),
- EXYNOS_PIN_BANK_EINTN(4, 0x300, "gpy1"),
- EXYNOS_PIN_BANK_EINTN(6, 0x320, "gpy2"),
- EXYNOS_PIN_BANK_EINTN(8, 0x340, "gpy3"),
- EXYNOS_PIN_BANK_EINTN(8, 0x360, "gpy4"),
- EXYNOS_PIN_BANK_EINTN(8, 0x380, "gpy5"),
- EXYNOS_PIN_BANK_EINTN(8, 0x3A0, "gpy6"),
- EXYNOS_PIN_BANK_EINTN(8, 0x3C0, "gpy7"),
- EXYNOS_PIN_BANK_EINTW(8, 0xC00, "gpx0", 0x00),
- EXYNOS_PIN_BANK_EINTW(8, 0xC20, "gpx1", 0x04),
- EXYNOS_PIN_BANK_EINTW(8, 0xC40, "gpx2", 0x08),
- EXYNOS_PIN_BANK_EINTW(8, 0xC60, "gpx3", 0x0c),
-};
-
-/* pin banks of exynos5410 pin-controller 1 */
-static const struct samsung_pin_bank_data exynos5410_pin_banks1[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(5, 0x000, "gpj0", 0x00),
- EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpj1", 0x04),
- EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpj2", 0x08),
- EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpj3", 0x0c),
- EXYNOS_PIN_BANK_EINTG(2, 0x080, "gpj4", 0x10),
- EXYNOS_PIN_BANK_EINTG(8, 0x0A0, "gpk0", 0x14),
- EXYNOS_PIN_BANK_EINTG(8, 0x0C0, "gpk1", 0x18),
- EXYNOS_PIN_BANK_EINTG(8, 0x0E0, "gpk2", 0x1c),
- EXYNOS_PIN_BANK_EINTG(7, 0x100, "gpk3", 0x20),
-};
-
-/* pin banks of exynos5410 pin-controller 2 */
-static const struct samsung_pin_bank_data exynos5410_pin_banks2[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpv0", 0x00),
- EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpv1", 0x04),
- EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpv2", 0x08),
- EXYNOS_PIN_BANK_EINTG(8, 0x080, "gpv3", 0x0c),
- EXYNOS_PIN_BANK_EINTG(2, 0x0C0, "gpv4", 0x10),
-};
-
-/* pin banks of exynos5410 pin-controller 3 */
-static const struct samsung_pin_bank_data exynos5410_pin_banks3[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
-};
-
-/*
- * Samsung pinctrl driver data for Exynos5410 SoC. Exynos5410 SoC includes
- * four gpio/pin-mux/pinconfig controllers.
- */
-const struct samsung_pin_ctrl exynos5410_pin_ctrl[] __initconst = {
- {
- /* pin-controller instance 0 data */
- .pin_banks = exynos5410_pin_banks0,
- .nr_banks = ARRAY_SIZE(exynos5410_pin_banks0),
- .eint_gpio_init = exynos_eint_gpio_init,
- .eint_wkup_init = exynos_eint_wkup_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- }, {
- /* pin-controller instance 1 data */
- .pin_banks = exynos5410_pin_banks1,
- .nr_banks = ARRAY_SIZE(exynos5410_pin_banks1),
- .eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- }, {
- /* pin-controller instance 2 data */
- .pin_banks = exynos5410_pin_banks2,
- .nr_banks = ARRAY_SIZE(exynos5410_pin_banks2),
- .eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- }, {
- /* pin-controller instance 3 data */
- .pin_banks = exynos5410_pin_banks3,
- .nr_banks = ARRAY_SIZE(exynos5410_pin_banks3),
- .eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- },
-};
-
-/* pin banks of exynos5420 pin-controller 0 */
-static const struct samsung_pin_bank_data exynos5420_pin_banks0[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpy7", 0x00),
- EXYNOS_PIN_BANK_EINTW(8, 0xC00, "gpx0", 0x00),
- EXYNOS_PIN_BANK_EINTW(8, 0xC20, "gpx1", 0x04),
- EXYNOS_PIN_BANK_EINTW(8, 0xC40, "gpx2", 0x08),
- EXYNOS_PIN_BANK_EINTW(8, 0xC60, "gpx3", 0x0c),
-};
-
-/* pin banks of exynos5420 pin-controller 1 */
-static const struct samsung_pin_bank_data exynos5420_pin_banks1[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpc0", 0x00),
- EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpc1", 0x04),
- EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpc2", 0x08),
- EXYNOS_PIN_BANK_EINTG(4, 0x060, "gpc3", 0x0c),
- EXYNOS_PIN_BANK_EINTG(2, 0x080, "gpc4", 0x10),
- EXYNOS_PIN_BANK_EINTG(8, 0x0A0, "gpd1", 0x14),
- EXYNOS_PIN_BANK_EINTN(6, 0x0C0, "gpy0"),
- EXYNOS_PIN_BANK_EINTN(4, 0x0E0, "gpy1"),
- EXYNOS_PIN_BANK_EINTN(6, 0x100, "gpy2"),
- EXYNOS_PIN_BANK_EINTN(8, 0x120, "gpy3"),
- EXYNOS_PIN_BANK_EINTN(8, 0x140, "gpy4"),
- EXYNOS_PIN_BANK_EINTN(8, 0x160, "gpy5"),
- EXYNOS_PIN_BANK_EINTN(8, 0x180, "gpy6"),
-};
-
-/* pin banks of exynos5420 pin-controller 2 */
-static const struct samsung_pin_bank_data exynos5420_pin_banks2[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpe0", 0x00),
- EXYNOS_PIN_BANK_EINTG(2, 0x020, "gpe1", 0x04),
- EXYNOS_PIN_BANK_EINTG(6, 0x040, "gpf0", 0x08),
- EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpf1", 0x0c),
- EXYNOS_PIN_BANK_EINTG(8, 0x080, "gpg0", 0x10),
- EXYNOS_PIN_BANK_EINTG(8, 0x0A0, "gpg1", 0x14),
- EXYNOS_PIN_BANK_EINTG(2, 0x0C0, "gpg2", 0x18),
- EXYNOS_PIN_BANK_EINTG(4, 0x0E0, "gpj4", 0x1c),
-};
-
-/* pin banks of exynos5420 pin-controller 3 */
-static const struct samsung_pin_bank_data exynos5420_pin_banks3[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
- EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
- EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
- EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpb0", 0x0c),
- EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpb1", 0x10),
- EXYNOS_PIN_BANK_EINTG(4, 0x0A0, "gpb2", 0x14),
- EXYNOS_PIN_BANK_EINTG(8, 0x0C0, "gpb3", 0x18),
- EXYNOS_PIN_BANK_EINTG(2, 0x0E0, "gpb4", 0x1c),
- EXYNOS_PIN_BANK_EINTG(8, 0x100, "gph0", 0x20),
-};
-
-/* pin banks of exynos5420 pin-controller 4 */
-static const struct samsung_pin_bank_data exynos5420_pin_banks4[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz", 0x00),
-};
-
-/* PMU pad retention groups registers for Exynos5420 (without audio) */
-static const u32 exynos5420_retention_regs[] = {
- EXYNOS_PAD_RET_DRAM_OPTION,
- EXYNOS_PAD_RET_JTAG_OPTION,
- EXYNOS5420_PAD_RET_GPIO_OPTION,
- EXYNOS5420_PAD_RET_UART_OPTION,
- EXYNOS5420_PAD_RET_MMCA_OPTION,
- EXYNOS5420_PAD_RET_MMCB_OPTION,
- EXYNOS5420_PAD_RET_MMCC_OPTION,
- EXYNOS5420_PAD_RET_HSI_OPTION,
- EXYNOS_PAD_RET_EBIA_OPTION,
- EXYNOS_PAD_RET_EBIB_OPTION,
- EXYNOS5420_PAD_RET_SPI_OPTION,
- EXYNOS5420_PAD_RET_DRAM_COREBLK_OPTION,
-};
-
-static const struct samsung_retention_data exynos5420_retention_data __initconst = {
- .regs = exynos5420_retention_regs,
- .nr_regs = ARRAY_SIZE(exynos5420_retention_regs),
- .value = EXYNOS_WAKEUP_FROM_LOWPWR,
- .refcnt = &exynos_shared_retention_refcnt,
- .init = exynos_retention_init,
-};
-
-/*
- * Samsung pinctrl driver data for Exynos5420 SoC. Exynos5420 SoC includes
- * four gpio/pin-mux/pinconfig controllers.
- */
-const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {
- {
- /* pin-controller instance 0 data */
- .pin_banks = exynos5420_pin_banks0,
- .nr_banks = ARRAY_SIZE(exynos5420_pin_banks0),
- .eint_gpio_init = exynos_eint_gpio_init,
- .eint_wkup_init = exynos_eint_wkup_init,
- .retention_data = &exynos5420_retention_data,
- }, {
- /* pin-controller instance 1 data */
- .pin_banks = exynos5420_pin_banks1,
- .nr_banks = ARRAY_SIZE(exynos5420_pin_banks1),
- .eint_gpio_init = exynos_eint_gpio_init,
- .retention_data = &exynos5420_retention_data,
- }, {
- /* pin-controller instance 2 data */
- .pin_banks = exynos5420_pin_banks2,
- .nr_banks = ARRAY_SIZE(exynos5420_pin_banks2),
- .eint_gpio_init = exynos_eint_gpio_init,
- .retention_data = &exynos5420_retention_data,
- }, {
- /* pin-controller instance 3 data */
- .pin_banks = exynos5420_pin_banks3,
- .nr_banks = ARRAY_SIZE(exynos5420_pin_banks3),
- .eint_gpio_init = exynos_eint_gpio_init,
- .retention_data = &exynos5420_retention_data,
- }, {
- /* pin-controller instance 4 data */
- .pin_banks = exynos5420_pin_banks4,
- .nr_banks = ARRAY_SIZE(exynos5420_pin_banks4),
- .eint_gpio_init = exynos_eint_gpio_init,
- .retention_data = &exynos4_audio_retention_data,
- },
-};
-
-/* pin banks of exynos5433 pin-controller - ALIVE */
-static const struct samsung_pin_bank_data exynos5433_pin_banks0[] __initconst = {
- EXYNOS5433_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
- EXYNOS5433_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
- EXYNOS5433_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
- EXYNOS5433_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
- EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1),
- EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1),
- EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1),
- EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1),
- EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1),
-};
-
-/* pin banks of exynos5433 pin-controller - AUD */
-static const struct samsung_pin_bank_data exynos5433_pin_banks1[] __initconst = {
- EXYNOS5433_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
- EXYNOS5433_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
-};
-
-/* pin banks of exynos5433 pin-controller - CPIF */
-static const struct samsung_pin_bank_data exynos5433_pin_banks2[] __initconst = {
- EXYNOS5433_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
-};
-
-/* pin banks of exynos5433 pin-controller - eSE */
-static const struct samsung_pin_bank_data exynos5433_pin_banks3[] __initconst = {
- EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
-};
-
-/* pin banks of exynos5433 pin-controller - FINGER */
-static const struct samsung_pin_bank_data exynos5433_pin_banks4[] __initconst = {
- EXYNOS5433_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
-};
-
-/* pin banks of exynos5433 pin-controller - FSYS */
-static const struct samsung_pin_bank_data exynos5433_pin_banks5[] __initconst = {
- EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
- EXYNOS5433_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
- EXYNOS5433_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
- EXYNOS5433_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c),
- EXYNOS5433_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10),
- EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14),
-};
-
-/* pin banks of exynos5433 pin-controller - IMEM */
-static const struct samsung_pin_bank_data exynos5433_pin_banks6[] __initconst = {
- EXYNOS5433_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
-};
-
-/* pin banks of exynos5433 pin-controller - NFC */
-static const struct samsung_pin_bank_data exynos5433_pin_banks7[] __initconst = {
- EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
-};
-
-/* pin banks of exynos5433 pin-controller - PERIC */
-static const struct samsung_pin_bank_data exynos5433_pin_banks8[] __initconst = {
- EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
- EXYNOS5433_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
- EXYNOS5433_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
- EXYNOS5433_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c),
- EXYNOS5433_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10),
- EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14),
- EXYNOS5433_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18),
- EXYNOS5433_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c),
- EXYNOS5433_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20),
- EXYNOS5433_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24),
- EXYNOS5433_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28),
- EXYNOS5433_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c),
- EXYNOS5433_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30),
- EXYNOS5433_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34),
- EXYNOS5433_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38),
- EXYNOS5433_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c),
- EXYNOS5433_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40),
-};
-
-/* pin banks of exynos5433 pin-controller - TOUCH */
-static const struct samsung_pin_bank_data exynos5433_pin_banks9[] __initconst = {
- EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
-};
-
-/* PMU pin retention groups registers for Exynos5433 (without audio & fsys) */
-static const u32 exynos5433_retention_regs[] = {
- EXYNOS5433_PAD_RETENTION_TOP_OPTION,
- EXYNOS5433_PAD_RETENTION_UART_OPTION,
- EXYNOS5433_PAD_RETENTION_EBIA_OPTION,
- EXYNOS5433_PAD_RETENTION_EBIB_OPTION,
- EXYNOS5433_PAD_RETENTION_SPI_OPTION,
- EXYNOS5433_PAD_RETENTION_MIF_OPTION,
- EXYNOS5433_PAD_RETENTION_USBXTI_OPTION,
- EXYNOS5433_PAD_RETENTION_BOOTLDO_OPTION,
- EXYNOS5433_PAD_RETENTION_UFS_OPTION,
- EXYNOS5433_PAD_RETENTION_FSYSGENIO_OPTION,
-};
-
-static const struct samsung_retention_data exynos5433_retention_data __initconst = {
- .regs = exynos5433_retention_regs,
- .nr_regs = ARRAY_SIZE(exynos5433_retention_regs),
- .value = EXYNOS_WAKEUP_FROM_LOWPWR,
- .refcnt = &exynos_shared_retention_refcnt,
- .init = exynos_retention_init,
-};
-
-/* PMU retention control for audio pins can be tied to audio pin bank */
-static const u32 exynos5433_audio_retention_regs[] = {
- EXYNOS5433_PAD_RETENTION_AUD_OPTION,
-};
-
-static const struct samsung_retention_data exynos5433_audio_retention_data __initconst = {
- .regs = exynos5433_audio_retention_regs,
- .nr_regs = ARRAY_SIZE(exynos5433_audio_retention_regs),
- .value = EXYNOS_WAKEUP_FROM_LOWPWR,
- .init = exynos_retention_init,
-};
-
-/* PMU retention control for mmc pins can be tied to fsys pin bank */
-static const u32 exynos5433_fsys_retention_regs[] = {
- EXYNOS5433_PAD_RETENTION_MMC0_OPTION,
- EXYNOS5433_PAD_RETENTION_MMC1_OPTION,
- EXYNOS5433_PAD_RETENTION_MMC2_OPTION,
-};
-
-static const struct samsung_retention_data exynos5433_fsys_retention_data __initconst = {
- .regs = exynos5433_fsys_retention_regs,
- .nr_regs = ARRAY_SIZE(exynos5433_fsys_retention_regs),
- .value = EXYNOS_WAKEUP_FROM_LOWPWR,
- .init = exynos_retention_init,
-};
-
-/*
- * Samsung pinctrl driver data for Exynos5433 SoC. Exynos5433 SoC includes
- * ten gpio/pin-mux/pinconfig controllers.
- */
-const struct samsung_pin_ctrl exynos5433_pin_ctrl[] __initconst = {
- {
- /* pin-controller instance 0 data */
- .pin_banks = exynos5433_pin_banks0,
- .nr_banks = ARRAY_SIZE(exynos5433_pin_banks0),
- .eint_wkup_init = exynos_eint_wkup_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- .nr_ext_resources = 1,
- .retention_data = &exynos5433_retention_data,
- }, {
- /* pin-controller instance 1 data */
- .pin_banks = exynos5433_pin_banks1,
- .nr_banks = ARRAY_SIZE(exynos5433_pin_banks1),
- .eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- .retention_data = &exynos5433_audio_retention_data,
- }, {
- /* pin-controller instance 2 data */
- .pin_banks = exynos5433_pin_banks2,
- .nr_banks = ARRAY_SIZE(exynos5433_pin_banks2),
- .eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- .retention_data = &exynos5433_retention_data,
- }, {
- /* pin-controller instance 3 data */
- .pin_banks = exynos5433_pin_banks3,
- .nr_banks = ARRAY_SIZE(exynos5433_pin_banks3),
- .eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- .retention_data = &exynos5433_retention_data,
- }, {
- /* pin-controller instance 4 data */
- .pin_banks = exynos5433_pin_banks4,
- .nr_banks = ARRAY_SIZE(exynos5433_pin_banks4),
- .eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- .retention_data = &exynos5433_retention_data,
- }, {
- /* pin-controller instance 5 data */
- .pin_banks = exynos5433_pin_banks5,
- .nr_banks = ARRAY_SIZE(exynos5433_pin_banks5),
- .eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- .retention_data = &exynos5433_fsys_retention_data,
- }, {
- /* pin-controller instance 6 data */
- .pin_banks = exynos5433_pin_banks6,
- .nr_banks = ARRAY_SIZE(exynos5433_pin_banks6),
- .eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- .retention_data = &exynos5433_retention_data,
- }, {
- /* pin-controller instance 7 data */
- .pin_banks = exynos5433_pin_banks7,
- .nr_banks = ARRAY_SIZE(exynos5433_pin_banks7),
- .eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- .retention_data = &exynos5433_retention_data,
- }, {
- /* pin-controller instance 8 data */
- .pin_banks = exynos5433_pin_banks8,
- .nr_banks = ARRAY_SIZE(exynos5433_pin_banks8),
- .eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- .retention_data = &exynos5433_retention_data,
- }, {
- /* pin-controller instance 9 data */
- .pin_banks = exynos5433_pin_banks9,
- .nr_banks = ARRAY_SIZE(exynos5433_pin_banks9),
- .eint_gpio_init = exynos_eint_gpio_init,
- .suspend = exynos_pinctrl_suspend,
- .resume = exynos_pinctrl_resume,
- .retention_data = &exynos5433_retention_data,
- },
-};
-
-/* pin banks of exynos7 pin-controller - ALIVE */
-static const struct samsung_pin_bank_data exynos7_pin_banks0[] __initconst = {
- EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
- EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
- EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
- EXYNOS_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
-};
-
-/* pin banks of exynos7 pin-controller - BUS0 */
-static const struct samsung_pin_bank_data exynos7_pin_banks1[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(5, 0x000, "gpb0", 0x00),
- EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpc0", 0x04),
- EXYNOS_PIN_BANK_EINTG(2, 0x040, "gpc1", 0x08),
- EXYNOS_PIN_BANK_EINTG(6, 0x060, "gpc2", 0x0c),
- EXYNOS_PIN_BANK_EINTG(8, 0x080, "gpc3", 0x10),
- EXYNOS_PIN_BANK_EINTG(4, 0x0a0, "gpd0", 0x14),
- EXYNOS_PIN_BANK_EINTG(6, 0x0c0, "gpd1", 0x18),
- EXYNOS_PIN_BANK_EINTG(8, 0x0e0, "gpd2", 0x1c),
- EXYNOS_PIN_BANK_EINTG(5, 0x100, "gpd4", 0x20),
- EXYNOS_PIN_BANK_EINTG(4, 0x120, "gpd5", 0x24),
- EXYNOS_PIN_BANK_EINTG(6, 0x140, "gpd6", 0x28),
- EXYNOS_PIN_BANK_EINTG(3, 0x160, "gpd7", 0x2c),
- EXYNOS_PIN_BANK_EINTG(2, 0x180, "gpd8", 0x30),
- EXYNOS_PIN_BANK_EINTG(2, 0x1a0, "gpg0", 0x34),
- EXYNOS_PIN_BANK_EINTG(4, 0x1c0, "gpg3", 0x38),
-};
-
-/* pin banks of exynos7 pin-controller - NFC */
-static const struct samsung_pin_bank_data exynos7_pin_banks2[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
-};
-
-/* pin banks of exynos7 pin-controller - TOUCH */
-static const struct samsung_pin_bank_data exynos7_pin_banks3[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
-};
-
-/* pin banks of exynos7 pin-controller - FF */
-static const struct samsung_pin_bank_data exynos7_pin_banks4[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpg4", 0x00),
-};
-
-/* pin banks of exynos7 pin-controller - ESE */
-static const struct samsung_pin_bank_data exynos7_pin_banks5[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(5, 0x000, "gpv7", 0x00),
-};
-
-/* pin banks of exynos7 pin-controller - FSYS0 */
-static const struct samsung_pin_bank_data exynos7_pin_banks6[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpr4", 0x00),
-};
-
-/* pin banks of exynos7 pin-controller - FSYS1 */
-static const struct samsung_pin_bank_data exynos7_pin_banks7[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpr0", 0x00),
- EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpr1", 0x04),
- EXYNOS_PIN_BANK_EINTG(5, 0x040, "gpr2", 0x08),
- EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpr3", 0x0c),
-};
-
-/* pin banks of exynos7 pin-controller - BUS1 */
-static const struct samsung_pin_bank_data exynos7_pin_banks8[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(8, 0x020, "gpf0", 0x00),
- EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpf1", 0x04),
- EXYNOS_PIN_BANK_EINTG(4, 0x060, "gpf2", 0x08),
- EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpf3", 0x0c),
- EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpf4", 0x10),
- EXYNOS_PIN_BANK_EINTG(8, 0x0c0, "gpf5", 0x14),
- EXYNOS_PIN_BANK_EINTG(5, 0x0e0, "gpg1", 0x18),
- EXYNOS_PIN_BANK_EINTG(5, 0x100, "gpg2", 0x1c),
- EXYNOS_PIN_BANK_EINTG(6, 0x120, "gph1", 0x20),
- EXYNOS_PIN_BANK_EINTG(3, 0x140, "gpv6", 0x24),
-};
-
-static const struct samsung_pin_bank_data exynos7_pin_banks9[] __initconst = {
- EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
- EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
-};
-
-const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = {
- {
- /* pin-controller instance 0 Alive data */
- .pin_banks = exynos7_pin_banks0,
- .nr_banks = ARRAY_SIZE(exynos7_pin_banks0),
- .eint_wkup_init = exynos_eint_wkup_init,
- }, {
- /* pin-controller instance 1 BUS0 data */
- .pin_banks = exynos7_pin_banks1,
- .nr_banks = ARRAY_SIZE(exynos7_pin_banks1),
- .eint_gpio_init = exynos_eint_gpio_init,
- }, {
- /* pin-controller instance 2 NFC data */
- .pin_banks = exynos7_pin_banks2,
- .nr_banks = ARRAY_SIZE(exynos7_pin_banks2),
- .eint_gpio_init = exynos_eint_gpio_init,
- }, {
- /* pin-controller instance 3 TOUCH data */
- .pin_banks = exynos7_pin_banks3,
- .nr_banks = ARRAY_SIZE(exynos7_pin_banks3),
- .eint_gpio_init = exynos_eint_gpio_init,
- }, {
- /* pin-controller instance 4 FF data */
- .pin_banks = exynos7_pin_banks4,
- .nr_banks = ARRAY_SIZE(exynos7_pin_banks4),
- .eint_gpio_init = exynos_eint_gpio_init,
- }, {
- /* pin-controller instance 5 ESE data */
- .pin_banks = exynos7_pin_banks5,
- .nr_banks = ARRAY_SIZE(exynos7_pin_banks5),
- .eint_gpio_init = exynos_eint_gpio_init,
- }, {
- /* pin-controller instance 6 FSYS0 data */
- .pin_banks = exynos7_pin_banks6,
- .nr_banks = ARRAY_SIZE(exynos7_pin_banks6),
- .eint_gpio_init = exynos_eint_gpio_init,
- }, {
- /* pin-controller instance 7 FSYS1 data */
- .pin_banks = exynos7_pin_banks7,
- .nr_banks = ARRAY_SIZE(exynos7_pin_banks7),
- .eint_gpio_init = exynos_eint_gpio_init,
- }, {
- /* pin-controller instance 8 BUS1 data */
- .pin_banks = exynos7_pin_banks8,
- .nr_banks = ARRAY_SIZE(exynos7_pin_banks8),
- .eint_gpio_init = exynos_eint_gpio_init,
- }, {
- /* pin-controller instance 9 AUD data */
- .pin_banks = exynos7_pin_banks9,
- .nr_banks = ARRAY_SIZE(exynos7_pin_banks9),
- .eint_gpio_init = exynos_eint_gpio_init,
- },
-};
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index cd046eb7d705..b90139715c8f 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
@@ -17,6 +17,9 @@
* (at your option) any later version.
*/
+#ifndef __PINCTRL_SAMSUNG_EXYNOS_H
+#define __PINCTRL_SAMSUNG_EXYNOS_H
+
/* External GPIO and wakeup interrupt related definitions */
#define EXYNOS_GPIO_ECON_OFFSET 0x700
#define EXYNOS_GPIO_EFLTCON_OFFSET 0x800
@@ -131,3 +134,13 @@ struct exynos_muxed_weint_data {
unsigned int nr_banks;
struct samsung_pin_bank *banks[];
};
+
+int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d);
+int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d);
+void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata);
+void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata);
+struct samsung_retention_ctrl *
+exynos_retention_init(struct samsung_pinctrl_drv_data *drvdata,
+ const struct samsung_retention_data *data);
+
+#endif /* __PINCTRL_SAMSUNG_EXYNOS_H */
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos5440.c b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
index 3000df80709f..32a3a9fd65c4 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos5440.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
@@ -1,6 +1,8 @@
/*
* pin-controller/pin-mux/pin-config/gpio-driver for Samsung's EXYNOS5440 SoC.
*
+ * Author: Thomas Abraham <[email protected]>
+ *
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
@@ -10,7 +12,7 @@
* (at your option) any later version.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -991,7 +993,6 @@ static const struct of_device_id exynos5440_pinctrl_dt_match[] = {
{ .compatible = "samsung,exynos5440-pinctrl" },
{},
};
-MODULE_DEVICE_TABLE(of, exynos5440_pinctrl_dt_match);
static struct platform_driver exynos5440_pinctrl_driver = {
.probe = exynos5440_pinctrl_probe,
@@ -1007,13 +1008,3 @@ static int __init exynos5440_pinctrl_drv_register(void)
return platform_driver_register(&exynos5440_pinctrl_driver);
}
postcore_initcall(exynos5440_pinctrl_drv_register);
-
-static void __exit exynos5440_pinctrl_drv_unregister(void)
-{
- platform_driver_unregister(&exynos5440_pinctrl_driver);
-}
-module_exit(exynos5440_pinctrl_drv_unregister);
-
-MODULE_AUTHOR("Thomas Abraham <[email protected]>");
-MODULE_DESCRIPTION("Samsung EXYNOS5440 SoC pinctrl driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
index b82a003546ae..49774851e84a 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
@@ -13,7 +13,7 @@
* external gpio and wakeup interrupt support.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
index f17890aa6e25..4a88d7446e87 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
@@ -15,7 +15,7 @@
* external gpio and wakeup interrupt support.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index a4a0da5d2a32..f542642eed8d 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -20,7 +20,7 @@
* and wakeup interrupts can be hooked to.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -1183,27 +1183,29 @@ static int __maybe_unused samsung_pinctrl_resume(struct device *dev)
}
static const struct of_device_id samsung_pinctrl_dt_match[] = {
-#ifdef CONFIG_PINCTRL_EXYNOS
+#ifdef CONFIG_PINCTRL_EXYNOS_ARM
{ .compatible = "samsung,exynos3250-pinctrl",
- .data = (void *)exynos3250_pin_ctrl },
+ .data = exynos3250_pin_ctrl },
{ .compatible = "samsung,exynos4210-pinctrl",
- .data = (void *)exynos4210_pin_ctrl },
+ .data = exynos4210_pin_ctrl },
{ .compatible = "samsung,exynos4x12-pinctrl",
- .data = (void *)exynos4x12_pin_ctrl },
+ .data = exynos4x12_pin_ctrl },
{ .compatible = "samsung,exynos5250-pinctrl",
- .data = (void *)exynos5250_pin_ctrl },
+ .data = exynos5250_pin_ctrl },
{ .compatible = "samsung,exynos5260-pinctrl",
- .data = (void *)exynos5260_pin_ctrl },
+ .data = exynos5260_pin_ctrl },
{ .compatible = "samsung,exynos5410-pinctrl",
- .data = (void *)exynos5410_pin_ctrl },
+ .data = exynos5410_pin_ctrl },
{ .compatible = "samsung,exynos5420-pinctrl",
- .data = (void *)exynos5420_pin_ctrl },
- { .compatible = "samsung,exynos5433-pinctrl",
- .data = (void *)exynos5433_pin_ctrl },
+ .data = exynos5420_pin_ctrl },
{ .compatible = "samsung,s5pv210-pinctrl",
- .data = (void *)s5pv210_pin_ctrl },
+ .data = s5pv210_pin_ctrl },
+#endif
+#ifdef CONFIG_PINCTRL_EXYNOS_ARM64
+ { .compatible = "samsung,exynos5433-pinctrl",
+ .data = exynos5433_pin_ctrl },
{ .compatible = "samsung,exynos7-pinctrl",
- .data = (void *)exynos7_pin_ctrl },
+ .data = exynos7_pin_ctrl },
#endif
#ifdef CONFIG_PINCTRL_S3C64XX
{ .compatible = "samsung,s3c64xx-pinctrl",
@@ -1221,7 +1223,6 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
#endif
{},
};
-MODULE_DEVICE_TABLE(of, samsung_pinctrl_dt_match);
static const struct dev_pm_ops samsung_pinctrl_pm_ops = {
SET_LATE_SYSTEM_SLEEP_PM_OPS(samsung_pinctrl_suspend,
@@ -1243,13 +1244,3 @@ static int __init samsung_pinctrl_drv_register(void)
return platform_driver_register(&samsung_pinctrl_driver);
}
postcore_initcall(samsung_pinctrl_drv_register);
-
-static void __exit samsung_pinctrl_drv_unregister(void)
-{
- platform_driver_unregister(&samsung_pinctrl_driver);
-}
-module_exit(samsung_pinctrl_drv_unregister);
-
-MODULE_AUTHOR("Thomas Abraham <[email protected]>");
-MODULE_DESCRIPTION("Samsung pinctrl driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig
index 07eca54bdc1c..24f76a05a5a9 100644
--- a/drivers/pinctrl/sh-pfc/Kconfig
+++ b/drivers/pinctrl/sh-pfc/Kconfig
@@ -34,6 +34,16 @@ config PINCTRL_PFC_R8A7740
depends on ARCH_R8A7740
select PINCTRL_SH_PFC_GPIO
+config PINCTRL_PFC_R8A7743
+ def_bool y
+ depends on ARCH_R8A7743
+ select PINCTRL_SH_PFC
+
+config PINCTRL_PFC_R8A7745
+ def_bool y
+ depends on ARCH_R8A7745
+ select PINCTRL_SH_PFC
+
config PINCTRL_PFC_R8A7778
def_bool y
depends on ARCH_R8A7778
diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/sh-pfc/Makefile
index 8e08684774af..33d28eed9ba3 100644
--- a/drivers/pinctrl/sh-pfc/Makefile
+++ b/drivers/pinctrl/sh-pfc/Makefile
@@ -3,6 +3,8 @@ obj-$(CONFIG_PINCTRL_SH_PFC_GPIO) += gpio.o
obj-$(CONFIG_PINCTRL_PFC_EMEV2) += pfc-emev2.o
obj-$(CONFIG_PINCTRL_PFC_R8A73A4) += pfc-r8a73a4.o
obj-$(CONFIG_PINCTRL_PFC_R8A7740) += pfc-r8a7740.o
+obj-$(CONFIG_PINCTRL_PFC_R8A7743) += pfc-r8a7791.o
+obj-$(CONFIG_PINCTRL_PFC_R8A7745) += pfc-r8a7794.o
obj-$(CONFIG_PINCTRL_PFC_R8A7778) += pfc-r8a7778.o
obj-$(CONFIG_PINCTRL_PFC_R8A7779) += pfc-r8a7779.o
obj-$(CONFIG_PINCTRL_PFC_R8A7790) += pfc-r8a7790.o
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 4a5a0feb931b..e72391d5e57d 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -485,6 +485,18 @@ static const struct of_device_id sh_pfc_of_table[] = {
.data = &r8a7740_pinmux_info,
},
#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A7743
+ {
+ .compatible = "renesas,pfc-r8a7743",
+ .data = &r8a7743_pinmux_info,
+ },
+#endif
+#ifdef CONFIG_PINCTRL_PFC_R8A7745
+ {
+ .compatible = "renesas,pfc-r8a7745",
+ .data = &r8a7745_pinmux_info,
+ },
+#endif
#ifdef CONFIG_PINCTRL_PFC_R8A7778
{
.compatible = "renesas,pfc-r8a7778",
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 2ed7eeb50aac..4c5ffbd75be7 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -1,8 +1,8 @@
/*
- * r8a7791 processor support - PFC hardware block.
+ * r8a7791/r8a7743 processor support - PFC hardware block.
*
* Copyright (C) 2013 Renesas Electronics Corporation
- * Copyright (C) 2014-2015 Cogent Embedded, Inc.
+ * Copyright (C) 2014-2017 Cogent Embedded, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
@@ -119,22 +119,22 @@ enum {
/* IPSR0 */
FN_D0, FN_D1, FN_D2, FN_D3, FN_D4, FN_D5, FN_D6, FN_D7, FN_D8,
FN_D9, FN_D10, FN_D11, FN_D12, FN_D13, FN_D14, FN_D15,
- FN_A0, FN_ATAWR0_N_C, FN_MSIOF0_SCK_B, FN_SCL0_C, FN_PWM2_B,
+ FN_A0, FN_ATAWR0_N_C, FN_MSIOF0_SCK_B, FN_I2C0_SCL_C, FN_PWM2_B,
FN_A1, FN_MSIOF0_SYNC_B, FN_A2, FN_MSIOF0_SS1_B,
FN_A3, FN_MSIOF0_SS2_B, FN_A4, FN_MSIOF0_TXD_B,
FN_A5, FN_MSIOF0_RXD_B, FN_A6, FN_MSIOF1_SCK,
/* IPSR1 */
- FN_A7, FN_MSIOF1_SYNC, FN_A8, FN_MSIOF1_SS1, FN_SCL0,
- FN_A9, FN_MSIOF1_SS2, FN_SDA0,
+ FN_A7, FN_MSIOF1_SYNC, FN_A8, FN_MSIOF1_SS1, FN_I2C0_SCL,
+ FN_A9, FN_MSIOF1_SS2, FN_I2C0_SDA,
FN_A10, FN_MSIOF1_TXD, FN_MSIOF1_TXD_D,
- FN_A11, FN_MSIOF1_RXD, FN_SCL3_D, FN_MSIOF1_RXD_D,
- FN_A12, FN_FMCLK, FN_SDA3_D, FN_MSIOF1_SCK_D,
+ FN_A11, FN_MSIOF1_RXD, FN_I2C3_SCL_D, FN_MSIOF1_RXD_D,
+ FN_A12, FN_FMCLK, FN_I2C3_SDA_D, FN_MSIOF1_SCK_D,
FN_A13, FN_ATAG0_N_C, FN_BPFCLK, FN_MSIOF1_SS1_D,
FN_A14, FN_ATADIR0_N_C, FN_FMIN, FN_FMIN_C, FN_MSIOF1_SYNC_D,
FN_A15, FN_BPFCLK_C,
FN_A16, FN_DREQ2_B, FN_FMCLK_C, FN_SCIFA1_SCK_B,
- FN_A17, FN_DACK2_B, FN_SDA0_C,
+ FN_A17, FN_DACK2_B, FN_I2C0_SDA_C,
FN_A18, FN_DREQ1, FN_SCIFA1_RXD_C, FN_SCIFB1_RXD_C,
/* IPSR2 */
@@ -145,8 +145,8 @@ enum {
FN_A23, FN_IO2, FN_BPFCLK_B, FN_RX0, FN_SCIFA0_RXD,
FN_A24, FN_DREQ2, FN_IO3, FN_TX1, FN_SCIFA1_TXD,
FN_A25, FN_DACK2, FN_SSL, FN_DREQ1_C, FN_RX1, FN_SCIFA1_RXD,
- FN_CS0_N, FN_ATAG0_N_B, FN_SCL1,
- FN_CS1_N_A26, FN_ATADIR0_N_B, FN_SDA1,
+ FN_CS0_N, FN_ATAG0_N_B, FN_I2C1_SCL,
+ FN_CS1_N_A26, FN_ATADIR0_N_B, FN_I2C1_SDA,
FN_EX_CS1_N, FN_MSIOF2_SCK,
FN_EX_CS2_N, FN_ATAWR0_N, FN_MSIOF2_SYNC,
FN_EX_CS3_N, FN_ATADIR0_N, FN_MSIOF2_TXD, FN_ATAG0_N, FN_EX_WAIT1,
@@ -169,12 +169,13 @@ enum {
FN_SSI_WS0129, FN_HTX0_C, FN_HTX2_C, FN_SCIFB0_TXD_C, FN_SCIFB2_TXD_C,
/* IPSR4 */
- FN_SSI_SDATA0, FN_SCL0_B, FN_SCL7_B, FN_MSIOF2_SCK_C,
- FN_SSI_SCK1, FN_SDA0_B, FN_SDA7_B, FN_MSIOF2_SYNC_C, FN_GLO_I0_D,
- FN_SSI_WS1, FN_SCL1_B, FN_SCL8_B, FN_MSIOF2_TXD_C, FN_GLO_I1_D,
- FN_SSI_SDATA1, FN_SDA1_B, FN_SDA8_B, FN_MSIOF2_RXD_C,
- FN_SSI_SCK2, FN_SCL2, FN_GPS_CLK_B, FN_GLO_Q0_D, FN_HSCK1_E,
- FN_SSI_WS2, FN_SDA2, FN_GPS_SIGN_B, FN_RX2_E,
+ FN_SSI_SDATA0, FN_I2C0_SCL_B, FN_IIC0_SCL_B, FN_MSIOF2_SCK_C,
+ FN_SSI_SCK1, FN_I2C0_SDA_B, FN_IIC0_SDA_B, FN_MSIOF2_SYNC_C,
+ FN_GLO_I0_D,
+ FN_SSI_WS1, FN_I2C1_SCL_B, FN_IIC1_SCL_B, FN_MSIOF2_TXD_C, FN_GLO_I1_D,
+ FN_SSI_SDATA1, FN_I2C1_SDA_B, FN_IIC1_SDA_B, FN_MSIOF2_RXD_C,
+ FN_SSI_SCK2, FN_I2C2_SCL, FN_GPS_CLK_B, FN_GLO_Q0_D, FN_HSCK1_E,
+ FN_SSI_WS2, FN_I2C2_SDA, FN_GPS_SIGN_B, FN_RX2_E,
FN_GLO_Q1_D, FN_HCTS1_N_E,
FN_SSI_SDATA2, FN_GPS_MAG_B, FN_TX2_E, FN_HRTS1_N_E,
FN_SSI_SCK34, FN_SSI_WS34, FN_SSI_SDATA3,
@@ -210,10 +211,10 @@ enum {
FN_IRQ0, FN_SCIFB1_RXD_D, FN_INTC_IRQ0_N,
FN_IRQ1, FN_SCIFB1_SCK_C, FN_INTC_IRQ1_N,
FN_IRQ2, FN_SCIFB1_TXD_D, FN_INTC_IRQ2_N,
- FN_IRQ3, FN_SCL4_C, FN_MSIOF2_TXD_E, FN_INTC_IRQ3_N,
- FN_IRQ4, FN_HRX1_C, FN_SDA4_C, FN_MSIOF2_RXD_E, FN_INTC_IRQ4_N,
- FN_IRQ5, FN_HTX1_C, FN_SCL1_E, FN_MSIOF2_SCK_E,
- FN_IRQ6, FN_HSCK1_C, FN_MSIOF1_SS2_B, FN_SDA1_E, FN_MSIOF2_SYNC_E,
+ FN_IRQ3, FN_I2C4_SCL_C, FN_MSIOF2_TXD_E, FN_INTC_IRQ3_N,
+ FN_IRQ4, FN_HRX1_C, FN_I2C4_SDA_C, FN_MSIOF2_RXD_E, FN_INTC_IRQ4_N,
+ FN_IRQ5, FN_HTX1_C, FN_I2C1_SCL_E, FN_MSIOF2_SCK_E,
+ FN_IRQ6, FN_HSCK1_C, FN_MSIOF1_SS2_B, FN_I2C1_SDA_E, FN_MSIOF2_SYNC_E,
FN_IRQ7, FN_HCTS1_N_C, FN_MSIOF1_TXD_B, FN_GPS_CLK_C, FN_GPS_CLK_D,
FN_IRQ8, FN_HRTS1_N_C, FN_MSIOF1_RXD_B, FN_GPS_SIGN_C, FN_GPS_SIGN_D,
@@ -257,16 +258,16 @@ enum {
FN_DU1_DB5, FN_LCDOUT21, FN_TX3, FN_SCIFA3_TXD, FN_CAN1_TX,
/* IPSR9 */
- FN_DU1_DB6, FN_LCDOUT22, FN_SCL3_C, FN_RX3, FN_SCIFA3_RXD,
- FN_DU1_DB7, FN_LCDOUT23, FN_SDA3_C, FN_SCIF3_SCK, FN_SCIFA3_SCK,
+ FN_DU1_DB6, FN_LCDOUT22, FN_I2C3_SCL_C, FN_RX3, FN_SCIFA3_RXD,
+ FN_DU1_DB7, FN_LCDOUT23, FN_I2C3_SDA_C, FN_SCIF3_SCK, FN_SCIFA3_SCK,
FN_DU1_DOTCLKIN, FN_QSTVA_QVS,
FN_DU1_DOTCLKOUT0, FN_QCLK,
FN_DU1_DOTCLKOUT1, FN_QSTVB_QVE, FN_CAN0_TX,
- FN_TX3_B, FN_SCL2_B, FN_PWM4,
+ FN_TX3_B, FN_I2C2_SCL_B, FN_PWM4,
FN_DU1_EXHSYNC_DU1_HSYNC, FN_QSTH_QHS,
FN_DU1_EXVSYNC_DU1_VSYNC, FN_QSTB_QHE,
FN_DU1_EXODDF_DU1_ODDF_DISP_CDE, FN_QCPV_QDE,
- FN_CAN0_RX, FN_RX3_B, FN_SDA2_B,
+ FN_CAN0_RX, FN_RX3_B, FN_I2C2_SDA_B,
FN_DU1_DISP, FN_QPOLA,
FN_DU1_CDE, FN_QPOLB, FN_PWM4_B,
FN_VI0_CLKENB, FN_TX4, FN_SCIFA4_TXD, FN_TS_SDATA0_D,
@@ -274,15 +275,15 @@ enum {
FN_VI0_HSYNC_N, FN_TX5, FN_SCIFA5_TXD, FN_TS_SDEN0_D,
FN_VI0_VSYNC_N, FN_RX5, FN_SCIFA5_RXD, FN_TS_SPSYNC0_D,
FN_VI0_DATA3_VI0_B3, FN_SCIF3_SCK_B, FN_SCIFA3_SCK_B,
- FN_VI0_G0, FN_SCL8, FN_STP_IVCXO27_0_C, FN_SCL4,
+ FN_VI0_G0, FN_IIC1_SCL, FN_STP_IVCXO27_0_C, FN_I2C4_SCL,
FN_HCTS2_N, FN_SCIFB2_CTS_N, FN_ATAWR1_N,
/* IPSR10 */
- FN_VI0_G1, FN_SDA8, FN_STP_ISCLK_0_C, FN_SDA4,
+ FN_VI0_G1, FN_IIC1_SDA, FN_STP_ISCLK_0_C, FN_I2C4_SDA,
FN_HRTS2_N, FN_SCIFB2_RTS_N, FN_ATADIR1_N,
- FN_VI0_G2, FN_VI2_HSYNC_N, FN_STP_ISD_0_C, FN_SCL3_B,
+ FN_VI0_G2, FN_VI2_HSYNC_N, FN_STP_ISD_0_C, FN_I2C3_SCL_B,
FN_HSCK2, FN_SCIFB2_SCK, FN_ATARD1_N,
- FN_VI0_G3, FN_VI2_VSYNC_N, FN_STP_ISEN_0_C, FN_SDA3_B,
+ FN_VI0_G3, FN_VI2_VSYNC_N, FN_STP_ISEN_0_C, FN_I2C3_SDA_B,
FN_HRX2, FN_SCIFB2_RXD, FN_ATACS01_N,
FN_VI0_G4, FN_VI2_CLKENB, FN_STP_ISSYNC_0_C,
FN_HTX2, FN_SCIFB2_TXD, FN_SCIFB0_SCK_D,
@@ -296,13 +297,13 @@ enum {
FN_TS_SCK0_C, FN_ATAG1_N,
FN_VI0_R2, FN_VI2_DATA3, FN_GLO_Q0_B, FN_TS_SDEN0_C,
FN_VI0_R3, FN_VI2_DATA4, FN_GLO_Q1_B, FN_TS_SPSYNC0_C,
- FN_VI0_R4, FN_VI2_DATA5, FN_GLO_SCLK_B, FN_TX0_C, FN_SCL1_D,
+ FN_VI0_R4, FN_VI2_DATA5, FN_GLO_SCLK_B, FN_TX0_C, FN_I2C1_SCL_D,
/* IPSR11 */
- FN_VI0_R5, FN_VI2_DATA6, FN_GLO_SDATA_B, FN_RX0_C, FN_SDA1_D,
- FN_VI0_R6, FN_VI2_DATA7, FN_GLO_SS_B, FN_TX1_C, FN_SCL4_B,
+ FN_VI0_R5, FN_VI2_DATA6, FN_GLO_SDATA_B, FN_RX0_C, FN_I2C1_SDA_D,
+ FN_VI0_R6, FN_VI2_DATA7, FN_GLO_SS_B, FN_TX1_C, FN_I2C4_SCL_B,
FN_VI0_R7, FN_GLO_RFON_B, FN_RX1_C, FN_CAN0_RX_E,
- FN_SDA4_B, FN_HRX1_D, FN_SCIFB0_RXD_D,
+ FN_I2C4_SDA_B, FN_HRX1_D, FN_SCIFB0_RXD_D,
FN_VI1_HSYNC_N, FN_AVB_RXD0, FN_TS_SDATA0_B, FN_TX4_B, FN_SCIFA4_TXD_B,
FN_VI1_VSYNC_N, FN_AVB_RXD1, FN_TS_SCK0_B, FN_RX4_B, FN_SCIFA4_RXD_B,
FN_VI1_CLKENB, FN_AVB_RXD2, FN_TS_SDEN0_B,
@@ -312,15 +313,15 @@ enum {
FN_VI1_DATA3, FN_AVB_RX_ER, FN_VI1_DATA4, FN_AVB_MDIO,
FN_VI1_DATA5, FN_AVB_RX_DV, FN_VI1_DATA6, FN_AVB_MAGIC,
FN_VI1_DATA7, FN_AVB_MDC,
- FN_ETH_MDIO, FN_AVB_RX_CLK, FN_SCL2_C,
- FN_ETH_CRS_DV, FN_AVB_LINK, FN_SDA2_C,
+ FN_ETH_MDIO, FN_AVB_RX_CLK, FN_I2C2_SCL_C,
+ FN_ETH_CRS_DV, FN_AVB_LINK, FN_I2C2_SDA_C,
/* IPSR12 */
- FN_ETH_RX_ER, FN_AVB_CRS, FN_SCL3, FN_SCL7,
- FN_ETH_RXD0, FN_AVB_PHY_INT, FN_SDA3, FN_SDA7,
+ FN_ETH_RX_ER, FN_AVB_CRS, FN_I2C3_SCL, FN_IIC0_SCL,
+ FN_ETH_RXD0, FN_AVB_PHY_INT, FN_I2C3_SDA, FN_IIC0_SDA,
FN_ETH_RXD1, FN_AVB_GTXREFCLK, FN_CAN0_TX_C,
- FN_SCL2_D, FN_MSIOF1_RXD_E,
- FN_ETH_LINK, FN_AVB_TXD0, FN_CAN0_RX_C, FN_SDA2_D, FN_MSIOF1_SCK_E,
+ FN_I2C2_SCL_D, FN_MSIOF1_RXD_E,
+ FN_ETH_LINK, FN_AVB_TXD0, FN_CAN0_RX_C, FN_I2C2_SDA_D, FN_MSIOF1_SCK_E,
FN_ETH_REFCLK, FN_AVB_TXD1, FN_SCIFA3_RXD_B,
FN_CAN1_RX_C, FN_MSIOF1_SYNC_E,
FN_ETH_TXD1, FN_AVB_TXD2, FN_SCIFA3_TXD_B,
@@ -351,23 +352,23 @@ enum {
FN_SD1_CMD, FN_REMOCON_B, FN_SD1_DATA0, FN_SPEEDIN_B,
FN_SD1_DATA1, FN_IETX_B, FN_SD1_DATA2, FN_IECLK_B,
FN_SD1_DATA3, FN_IERX_B,
- FN_SD1_CD, FN_PWM0, FN_TPU_TO0, FN_SCL1_C,
+ FN_SD1_CD, FN_PWM0, FN_TPU_TO0, FN_I2C1_SCL_C,
/* IPSR14 */
- FN_SD1_WP, FN_PWM1_B, FN_SDA1_C,
+ FN_SD1_WP, FN_PWM1_B, FN_I2C1_SDA_C,
FN_SD2_CLK, FN_MMC_CLK, FN_SD2_CMD, FN_MMC_CMD,
FN_SD2_DATA0, FN_MMC_D0, FN_SD2_DATA1, FN_MMC_D1,
FN_SD2_DATA2, FN_MMC_D2, FN_SD2_DATA3, FN_MMC_D3,
- FN_SD2_CD, FN_MMC_D4, FN_SCL8_C, FN_TX5_B, FN_SCIFA5_TXD_C,
- FN_SD2_WP, FN_MMC_D5, FN_SDA8_C, FN_RX5_B, FN_SCIFA5_RXD_C,
+ FN_SD2_CD, FN_MMC_D4, FN_IIC1_SCL_C, FN_TX5_B, FN_SCIFA5_TXD_C,
+ FN_SD2_WP, FN_MMC_D5, FN_IIC1_SDA_C, FN_RX5_B, FN_SCIFA5_RXD_C,
FN_MSIOF0_SCK, FN_RX2_C, FN_ADIDATA, FN_VI1_CLK_C, FN_VI1_G0_B,
FN_MSIOF0_SYNC, FN_TX2_C, FN_ADICS_SAMP, FN_VI1_CLKENB_C, FN_VI1_G1_B,
FN_MSIOF0_TXD, FN_ADICLK, FN_VI1_FIELD_C, FN_VI1_G2_B,
FN_MSIOF0_RXD, FN_ADICHS0, FN_VI1_DATA0_C, FN_VI1_G3_B,
FN_MSIOF0_SS1, FN_MMC_D6, FN_ADICHS1, FN_TX0_E,
- FN_VI1_HSYNC_N_C, FN_SCL7_C, FN_VI1_G4_B,
+ FN_VI1_HSYNC_N_C, FN_IIC0_SCL_C, FN_VI1_G4_B,
FN_MSIOF0_SS2, FN_MMC_D7, FN_ADICHS2, FN_RX0_E,
- FN_VI1_VSYNC_N_C, FN_SDA7_C, FN_VI1_G5_B,
+ FN_VI1_VSYNC_N_C, FN_IIC0_SDA_C, FN_VI1_G5_B,
/* IPSR15 */
FN_SIM0_RST, FN_IETX, FN_CAN1_TX_D,
@@ -432,18 +433,18 @@ enum {
/* MOD_SEL3 */
FN_SEL_HSCIF2_0, FN_SEL_HSCIF2_1, FN_SEL_HSCIF2_2, FN_SEL_HSCIF2_3,
FN_SEL_CANCLK_0, FN_SEL_CANCLK_1, FN_SEL_CANCLK_2, FN_SEL_CANCLK_3,
- FN_SEL_IIC8_0, FN_SEL_IIC8_1, FN_SEL_IIC8_2,
- FN_SEL_IIC7_0, FN_SEL_IIC7_1, FN_SEL_IIC7_2,
- FN_SEL_IIC4_0, FN_SEL_IIC4_1, FN_SEL_IIC4_2,
- FN_SEL_IIC3_0, FN_SEL_IIC3_1, FN_SEL_IIC3_2, FN_SEL_IIC3_3,
+ FN_SEL_IIC1_0, FN_SEL_IIC1_1, FN_SEL_IIC1_2,
+ FN_SEL_IIC0_0, FN_SEL_IIC0_1, FN_SEL_IIC0_2,
+ FN_SEL_I2C4_0, FN_SEL_I2C4_1, FN_SEL_I2C4_2,
+ FN_SEL_I2C3_0, FN_SEL_I2C3_1, FN_SEL_I2C3_2, FN_SEL_I2C3_3,
FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2, FN_SEL_SCIF3_3,
FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2,
FN_SEL_MMC_0, FN_SEL_MMC_1,
FN_SEL_SCIF5_0, FN_SEL_SCIF5_1,
- FN_SEL_IIC2_0, FN_SEL_IIC2_1, FN_SEL_IIC2_2, FN_SEL_IIC2_3,
- FN_SEL_IIC1_0, FN_SEL_IIC1_1, FN_SEL_IIC1_2, FN_SEL_IIC1_3,
- FN_SEL_IIC1_4,
- FN_SEL_IIC0_0, FN_SEL_IIC0_1, FN_SEL_IIC0_2,
+ FN_SEL_I2C2_0, FN_SEL_I2C2_1, FN_SEL_I2C2_2, FN_SEL_I2C2_3,
+ FN_SEL_I2C1_0, FN_SEL_I2C1_1, FN_SEL_I2C1_2, FN_SEL_I2C1_3,
+ FN_SEL_I2C1_4,
+ FN_SEL_I2C0_0, FN_SEL_I2C0_1, FN_SEL_I2C0_2,
/* MOD_SEL4 */
FN_SEL_SOF1_0, FN_SEL_SOF1_1, FN_SEL_SOF1_2, FN_SEL_SOF1_3,
@@ -481,22 +482,23 @@ enum {
D0_MARK, D1_MARK, D2_MARK, D3_MARK, D4_MARK, D5_MARK,
D6_MARK, D7_MARK, D8_MARK,
D9_MARK, D10_MARK, D11_MARK, D12_MARK, D13_MARK, D14_MARK, D15_MARK,
- A0_MARK, ATAWR0_N_C_MARK, MSIOF0_SCK_B_MARK, SCL0_C_MARK, PWM2_B_MARK,
+ A0_MARK, ATAWR0_N_C_MARK, MSIOF0_SCK_B_MARK, I2C0_SCL_C_MARK,
+ PWM2_B_MARK,
A1_MARK, MSIOF0_SYNC_B_MARK, A2_MARK, MSIOF0_SS1_B_MARK,
A3_MARK, MSIOF0_SS2_B_MARK, A4_MARK, MSIOF0_TXD_B_MARK,
A5_MARK, MSIOF0_RXD_B_MARK, A6_MARK, MSIOF1_SCK_MARK,
/* IPSR1 */
- A7_MARK, MSIOF1_SYNC_MARK, A8_MARK, MSIOF1_SS1_MARK, SCL0_MARK,
- A9_MARK, MSIOF1_SS2_MARK, SDA0_MARK,
+ A7_MARK, MSIOF1_SYNC_MARK, A8_MARK, MSIOF1_SS1_MARK, I2C0_SCL_MARK,
+ A9_MARK, MSIOF1_SS2_MARK, I2C0_SDA_MARK,
A10_MARK, MSIOF1_TXD_MARK, MSIOF1_TXD_D_MARK,
- A11_MARK, MSIOF1_RXD_MARK, SCL3_D_MARK, MSIOF1_RXD_D_MARK,
- A12_MARK, FMCLK_MARK, SDA3_D_MARK, MSIOF1_SCK_D_MARK,
+ A11_MARK, MSIOF1_RXD_MARK, I2C3_SCL_D_MARK, MSIOF1_RXD_D_MARK,
+ A12_MARK, FMCLK_MARK, I2C3_SDA_D_MARK, MSIOF1_SCK_D_MARK,
A13_MARK, ATAG0_N_C_MARK, BPFCLK_MARK, MSIOF1_SS1_D_MARK,
A14_MARK, ATADIR0_N_C_MARK, FMIN_MARK, FMIN_C_MARK, MSIOF1_SYNC_D_MARK,
A15_MARK, BPFCLK_C_MARK,
A16_MARK, DREQ2_B_MARK, FMCLK_C_MARK, SCIFA1_SCK_B_MARK,
- A17_MARK, DACK2_B_MARK, SDA0_C_MARK,
+ A17_MARK, DACK2_B_MARK, I2C0_SDA_C_MARK,
A18_MARK, DREQ1_MARK, SCIFA1_RXD_C_MARK, SCIFB1_RXD_C_MARK,
/* IPSR2 */
@@ -509,8 +511,8 @@ enum {
A24_MARK, DREQ2_MARK, IO3_MARK, TX1_MARK, SCIFA1_TXD_MARK,
A25_MARK, DACK2_MARK, SSL_MARK, DREQ1_C_MARK,
RX1_MARK, SCIFA1_RXD_MARK,
- CS0_N_MARK, ATAG0_N_B_MARK, SCL1_MARK,
- CS1_N_A26_MARK, ATADIR0_N_B_MARK, SDA1_MARK,
+ CS0_N_MARK, ATAG0_N_B_MARK, I2C1_SCL_MARK,
+ CS1_N_A26_MARK, ATADIR0_N_B_MARK, I2C1_SDA_MARK,
EX_CS1_N_MARK, MSIOF2_SCK_MARK,
EX_CS2_N_MARK, ATAWR0_N_MARK, MSIOF2_SYNC_MARK,
EX_CS3_N_MARK, ATADIR0_N_MARK, MSIOF2_TXD_MARK,
@@ -537,14 +539,15 @@ enum {
SCIFB0_TXD_C_MARK, SCIFB2_TXD_C_MARK,
/* IPSR4 */
- SSI_SDATA0_MARK, SCL0_B_MARK, SCL7_B_MARK, MSIOF2_SCK_C_MARK,
- SSI_SCK1_MARK, SDA0_B_MARK, SDA7_B_MARK,
+ SSI_SDATA0_MARK, I2C0_SCL_B_MARK, IIC0_SCL_B_MARK, MSIOF2_SCK_C_MARK,
+ SSI_SCK1_MARK, I2C0_SDA_B_MARK, IIC0_SDA_B_MARK,
MSIOF2_SYNC_C_MARK, GLO_I0_D_MARK,
- SSI_WS1_MARK, SCL1_B_MARK, SCL8_B_MARK,
+ SSI_WS1_MARK, I2C1_SCL_B_MARK, IIC1_SCL_B_MARK,
MSIOF2_TXD_C_MARK, GLO_I1_D_MARK,
- SSI_SDATA1_MARK, SDA1_B_MARK, SDA8_B_MARK, MSIOF2_RXD_C_MARK,
- SSI_SCK2_MARK, SCL2_MARK, GPS_CLK_B_MARK, GLO_Q0_D_MARK, HSCK1_E_MARK,
- SSI_WS2_MARK, SDA2_MARK, GPS_SIGN_B_MARK, RX2_E_MARK,
+ SSI_SDATA1_MARK, I2C1_SDA_B_MARK, IIC1_SDA_B_MARK, MSIOF2_RXD_C_MARK,
+ SSI_SCK2_MARK, I2C2_SCL_MARK, GPS_CLK_B_MARK, GLO_Q0_D_MARK,
+ HSCK1_E_MARK,
+ SSI_WS2_MARK, I2C2_SDA_MARK, GPS_SIGN_B_MARK, RX2_E_MARK,
GLO_Q1_D_MARK, HCTS1_N_E_MARK,
SSI_SDATA2_MARK, GPS_MAG_B_MARK, TX2_E_MARK, HRTS1_N_E_MARK,
SSI_SCK34_MARK, SSI_WS34_MARK, SSI_SDATA3_MARK,
@@ -580,12 +583,12 @@ enum {
IRQ0_MARK, SCIFB1_RXD_D_MARK, INTC_IRQ0_N_MARK,
IRQ1_MARK, SCIFB1_SCK_C_MARK, INTC_IRQ1_N_MARK,
IRQ2_MARK, SCIFB1_TXD_D_MARK, INTC_IRQ2_N_MARK,
- IRQ3_MARK, SCL4_C_MARK, MSIOF2_TXD_E_MARK, INTC_IRQ3_N_MARK,
- IRQ4_MARK, HRX1_C_MARK, SDA4_C_MARK,
+ IRQ3_MARK, I2C4_SCL_C_MARK, MSIOF2_TXD_E_MARK, INTC_IRQ3_N_MARK,
+ IRQ4_MARK, HRX1_C_MARK, I2C4_SDA_C_MARK,
MSIOF2_RXD_E_MARK, INTC_IRQ4_N_MARK,
- IRQ5_MARK, HTX1_C_MARK, SCL1_E_MARK, MSIOF2_SCK_E_MARK,
+ IRQ5_MARK, HTX1_C_MARK, I2C1_SCL_E_MARK, MSIOF2_SCK_E_MARK,
IRQ6_MARK, HSCK1_C_MARK, MSIOF1_SS2_B_MARK,
- SDA1_E_MARK, MSIOF2_SYNC_E_MARK,
+ I2C1_SDA_E_MARK, MSIOF2_SYNC_E_MARK,
IRQ7_MARK, HCTS1_N_C_MARK, MSIOF1_TXD_B_MARK,
GPS_CLK_C_MARK, GPS_CLK_D_MARK,
IRQ8_MARK, HRTS1_N_C_MARK, MSIOF1_RXD_B_MARK,
@@ -632,17 +635,17 @@ enum {
DU1_DB5_MARK, LCDOUT21_MARK, TX3_MARK, SCIFA3_TXD_MARK, CAN1_TX_MARK,
/* IPSR9 */
- DU1_DB6_MARK, LCDOUT22_MARK, SCL3_C_MARK, RX3_MARK, SCIFA3_RXD_MARK,
- DU1_DB7_MARK, LCDOUT23_MARK, SDA3_C_MARK,
+ DU1_DB6_MARK, LCDOUT22_MARK, I2C3_SCL_C_MARK, RX3_MARK, SCIFA3_RXD_MARK,
+ DU1_DB7_MARK, LCDOUT23_MARK, I2C3_SDA_C_MARK,
SCIF3_SCK_MARK, SCIFA3_SCK_MARK,
DU1_DOTCLKIN_MARK, QSTVA_QVS_MARK,
DU1_DOTCLKOUT0_MARK, QCLK_MARK,
DU1_DOTCLKOUT1_MARK, QSTVB_QVE_MARK, CAN0_TX_MARK,
- TX3_B_MARK, SCL2_B_MARK, PWM4_MARK,
+ TX3_B_MARK, I2C2_SCL_B_MARK, PWM4_MARK,
DU1_EXHSYNC_DU1_HSYNC_MARK, QSTH_QHS_MARK,
DU1_EXVSYNC_DU1_VSYNC_MARK, QSTB_QHE_MARK,
DU1_EXODDF_DU1_ODDF_DISP_CDE_MARK, QCPV_QDE_MARK,
- CAN0_RX_MARK, RX3_B_MARK, SDA2_B_MARK,
+ CAN0_RX_MARK, RX3_B_MARK, I2C2_SDA_B_MARK,
DU1_DISP_MARK, QPOLA_MARK,
DU1_CDE_MARK, QPOLB_MARK, PWM4_B_MARK,
VI0_CLKENB_MARK, TX4_MARK, SCIFA4_TXD_MARK, TS_SDATA0_D_MARK,
@@ -650,15 +653,15 @@ enum {
VI0_HSYNC_N_MARK, TX5_MARK, SCIFA5_TXD_MARK, TS_SDEN0_D_MARK,
VI0_VSYNC_N_MARK, RX5_MARK, SCIFA5_RXD_MARK, TS_SPSYNC0_D_MARK,
VI0_DATA3_VI0_B3_MARK, SCIF3_SCK_B_MARK, SCIFA3_SCK_B_MARK,
- VI0_G0_MARK, SCL8_MARK, STP_IVCXO27_0_C_MARK, SCL4_MARK,
+ VI0_G0_MARK, IIC1_SCL_MARK, STP_IVCXO27_0_C_MARK, I2C4_SCL_MARK,
HCTS2_N_MARK, SCIFB2_CTS_N_MARK, ATAWR1_N_MARK,
/* IPSR10 */
- VI0_G1_MARK, SDA8_MARK, STP_ISCLK_0_C_MARK, SDA4_MARK,
+ VI0_G1_MARK, IIC1_SDA_MARK, STP_ISCLK_0_C_MARK, I2C4_SDA_MARK,
HRTS2_N_MARK, SCIFB2_RTS_N_MARK, ATADIR1_N_MARK,
- VI0_G2_MARK, VI2_HSYNC_N_MARK, STP_ISD_0_C_MARK, SCL3_B_MARK,
+ VI0_G2_MARK, VI2_HSYNC_N_MARK, STP_ISD_0_C_MARK, I2C3_SCL_B_MARK,
HSCK2_MARK, SCIFB2_SCK_MARK, ATARD1_N_MARK,
- VI0_G3_MARK, VI2_VSYNC_N_MARK, STP_ISEN_0_C_MARK, SDA3_B_MARK,
+ VI0_G3_MARK, VI2_VSYNC_N_MARK, STP_ISEN_0_C_MARK, I2C3_SDA_B_MARK,
HRX2_MARK, SCIFB2_RXD_MARK, ATACS01_N_MARK,
VI0_G4_MARK, VI2_CLKENB_MARK, STP_ISSYNC_0_C_MARK,
HTX2_MARK, SCIFB2_TXD_MARK, SCIFB0_SCK_D_MARK,
@@ -672,13 +675,15 @@ enum {
TS_SCK0_C_MARK, ATAG1_N_MARK,
VI0_R2_MARK, VI2_DATA3_MARK, GLO_Q0_B_MARK, TS_SDEN0_C_MARK,
VI0_R3_MARK, VI2_DATA4_MARK, GLO_Q1_B_MARK, TS_SPSYNC0_C_MARK,
- VI0_R4_MARK, VI2_DATA5_MARK, GLO_SCLK_B_MARK, TX0_C_MARK, SCL1_D_MARK,
+ VI0_R4_MARK, VI2_DATA5_MARK, GLO_SCLK_B_MARK, TX0_C_MARK,
+ I2C1_SCL_D_MARK,
/* IPSR11 */
- VI0_R5_MARK, VI2_DATA6_MARK, GLO_SDATA_B_MARK, RX0_C_MARK, SDA1_D_MARK,
- VI0_R6_MARK, VI2_DATA7_MARK, GLO_SS_B_MARK, TX1_C_MARK, SCL4_B_MARK,
+ VI0_R5_MARK, VI2_DATA6_MARK, GLO_SDATA_B_MARK, RX0_C_MARK,
+ I2C1_SDA_D_MARK,
+ VI0_R6_MARK, VI2_DATA7_MARK, GLO_SS_B_MARK, TX1_C_MARK, I2C4_SCL_B_MARK,
VI0_R7_MARK, GLO_RFON_B_MARK, RX1_C_MARK, CAN0_RX_E_MARK,
- SDA4_B_MARK, HRX1_D_MARK, SCIFB0_RXD_D_MARK,
+ I2C4_SDA_B_MARK, HRX1_D_MARK, SCIFB0_RXD_D_MARK,
VI1_HSYNC_N_MARK, AVB_RXD0_MARK, TS_SDATA0_B_MARK,
TX4_B_MARK, SCIFA4_TXD_B_MARK,
VI1_VSYNC_N_MARK, AVB_RXD1_MARK, TS_SCK0_B_MARK,
@@ -690,16 +695,16 @@ enum {
VI1_DATA3_MARK, AVB_RX_ER_MARK, VI1_DATA4_MARK, AVB_MDIO_MARK,
VI1_DATA5_MARK, AVB_RX_DV_MARK, VI1_DATA6_MARK, AVB_MAGIC_MARK,
VI1_DATA7_MARK, AVB_MDC_MARK,
- ETH_MDIO_MARK, AVB_RX_CLK_MARK, SCL2_C_MARK,
- ETH_CRS_DV_MARK, AVB_LINK_MARK, SDA2_C_MARK,
+ ETH_MDIO_MARK, AVB_RX_CLK_MARK, I2C2_SCL_C_MARK,
+ ETH_CRS_DV_MARK, AVB_LINK_MARK, I2C2_SDA_C_MARK,
/* IPSR12 */
- ETH_RX_ER_MARK, AVB_CRS_MARK, SCL3_MARK, SCL7_MARK,
- ETH_RXD0_MARK, AVB_PHY_INT_MARK, SDA3_MARK, SDA7_MARK,
+ ETH_RX_ER_MARK, AVB_CRS_MARK, I2C3_SCL_MARK, IIC0_SCL_MARK,
+ ETH_RXD0_MARK, AVB_PHY_INT_MARK, I2C3_SDA_MARK, IIC0_SDA_MARK,
ETH_RXD1_MARK, AVB_GTXREFCLK_MARK, CAN0_TX_C_MARK,
- SCL2_D_MARK, MSIOF1_RXD_E_MARK,
+ I2C2_SCL_D_MARK, MSIOF1_RXD_E_MARK,
ETH_LINK_MARK, AVB_TXD0_MARK, CAN0_RX_C_MARK,
- SDA2_D_MARK, MSIOF1_SCK_E_MARK,
+ I2C2_SDA_D_MARK, MSIOF1_SCK_E_MARK,
ETH_REFCLK_MARK, AVB_TXD1_MARK, SCIFA3_RXD_B_MARK,
CAN1_RX_C_MARK, MSIOF1_SYNC_E_MARK,
ETH_TXD1_MARK, AVB_TXD2_MARK, SCIFA3_TXD_B_MARK,
@@ -730,15 +735,17 @@ enum {
SD1_CMD_MARK, REMOCON_B_MARK, SD1_DATA0_MARK, SPEEDIN_B_MARK,
SD1_DATA1_MARK, IETX_B_MARK, SD1_DATA2_MARK, IECLK_B_MARK,
SD1_DATA3_MARK, IERX_B_MARK,
- SD1_CD_MARK, PWM0_MARK, TPU_TO0_MARK, SCL1_C_MARK,
+ SD1_CD_MARK, PWM0_MARK, TPU_TO0_MARK, I2C1_SCL_C_MARK,
/* IPSR14 */
- SD1_WP_MARK, PWM1_B_MARK, SDA1_C_MARK,
+ SD1_WP_MARK, PWM1_B_MARK, I2C1_SDA_C_MARK,
SD2_CLK_MARK, MMC_CLK_MARK, SD2_CMD_MARK, MMC_CMD_MARK,
SD2_DATA0_MARK, MMC_D0_MARK, SD2_DATA1_MARK, MMC_D1_MARK,
SD2_DATA2_MARK, MMC_D2_MARK, SD2_DATA3_MARK, MMC_D3_MARK,
- SD2_CD_MARK, MMC_D4_MARK, SCL8_C_MARK, TX5_B_MARK, SCIFA5_TXD_C_MARK,
- SD2_WP_MARK, MMC_D5_MARK, SDA8_C_MARK, RX5_B_MARK, SCIFA5_RXD_C_MARK,
+ SD2_CD_MARK, MMC_D4_MARK, IIC1_SCL_C_MARK, TX5_B_MARK,
+ SCIFA5_TXD_C_MARK,
+ SD2_WP_MARK, MMC_D5_MARK, IIC1_SDA_C_MARK, RX5_B_MARK,
+ SCIFA5_RXD_C_MARK,
MSIOF0_SCK_MARK, RX2_C_MARK, ADIDATA_MARK,
VI1_CLK_C_MARK, VI1_G0_B_MARK,
MSIOF0_SYNC_MARK, TX2_C_MARK, ADICS_SAMP_MARK,
@@ -746,9 +753,9 @@ enum {
MSIOF0_TXD_MARK, ADICLK_MARK, VI1_FIELD_C_MARK, VI1_G2_B_MARK,
MSIOF0_RXD_MARK, ADICHS0_MARK, VI1_DATA0_C_MARK, VI1_G3_B_MARK,
MSIOF0_SS1_MARK, MMC_D6_MARK, ADICHS1_MARK, TX0_E_MARK,
- VI1_HSYNC_N_C_MARK, SCL7_C_MARK, VI1_G4_B_MARK,
+ VI1_HSYNC_N_C_MARK, IIC0_SCL_C_MARK, VI1_G4_B_MARK,
MSIOF0_SS2_MARK, MMC_D7_MARK, ADICHS2_MARK, RX0_E_MARK,
- VI1_VSYNC_N_C_MARK, SDA7_C_MARK, VI1_G5_B_MARK,
+ VI1_VSYNC_N_C_MARK, IIC0_SDA_C_MARK, VI1_G5_B_MARK,
/* IPSR15 */
SIM0_RST_MARK, IETX_MARK, CAN1_TX_D_MARK,
@@ -822,7 +829,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP0_18_16, A0),
PINMUX_IPSR_MSEL(IP0_18_16, ATAWR0_N_C, SEL_LBS_2),
PINMUX_IPSR_MSEL(IP0_18_16, MSIOF0_SCK_B, SEL_SOF0_1),
- PINMUX_IPSR_MSEL(IP0_18_16, SCL0_C, SEL_IIC0_2),
+ PINMUX_IPSR_MSEL(IP0_18_16, I2C0_SCL_C, SEL_I2C0_2),
PINMUX_IPSR_GPSR(IP0_18_16, PWM2_B),
PINMUX_IPSR_GPSR(IP0_20_19, A1),
PINMUX_IPSR_MSEL(IP0_20_19, MSIOF0_SYNC_B, SEL_SOF0_1),
@@ -842,20 +849,20 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP1_1_0, MSIOF1_SYNC, SEL_SOF1_0),
PINMUX_IPSR_GPSR(IP1_3_2, A8),
PINMUX_IPSR_MSEL(IP1_3_2, MSIOF1_SS1, SEL_SOF1_0),
- PINMUX_IPSR_MSEL(IP1_3_2, SCL0, SEL_IIC0_0),
+ PINMUX_IPSR_MSEL(IP1_3_2, I2C0_SCL, SEL_I2C0_0),
PINMUX_IPSR_GPSR(IP1_5_4, A9),
PINMUX_IPSR_MSEL(IP1_5_4, MSIOF1_SS2, SEL_SOF1_0),
- PINMUX_IPSR_MSEL(IP1_5_4, SDA0, SEL_IIC0_0),
+ PINMUX_IPSR_MSEL(IP1_5_4, I2C0_SDA, SEL_I2C0_0),
PINMUX_IPSR_GPSR(IP1_7_6, A10),
PINMUX_IPSR_MSEL(IP1_7_6, MSIOF1_TXD, SEL_SOF1_0),
PINMUX_IPSR_MSEL(IP1_7_6, MSIOF1_TXD_D, SEL_SOF1_3),
PINMUX_IPSR_GPSR(IP1_10_8, A11),
PINMUX_IPSR_MSEL(IP1_10_8, MSIOF1_RXD, SEL_SOF1_0),
- PINMUX_IPSR_MSEL(IP1_10_8, SCL3_D, SEL_IIC3_3),
+ PINMUX_IPSR_MSEL(IP1_10_8, I2C3_SCL_D, SEL_I2C3_3),
PINMUX_IPSR_MSEL(IP1_10_8, MSIOF1_RXD_D, SEL_SOF1_3),
PINMUX_IPSR_GPSR(IP1_13_11, A12),
PINMUX_IPSR_MSEL(IP1_13_11, FMCLK, SEL_FM_0),
- PINMUX_IPSR_MSEL(IP1_13_11, SDA3_D, SEL_IIC3_3),
+ PINMUX_IPSR_MSEL(IP1_13_11, I2C3_SDA_D, SEL_I2C3_3),
PINMUX_IPSR_MSEL(IP1_13_11, MSIOF1_SCK_D, SEL_SOF1_3),
PINMUX_IPSR_GPSR(IP1_16_14, A13),
PINMUX_IPSR_MSEL(IP1_16_14, ATAG0_N_C, SEL_LBS_2),
@@ -874,7 +881,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP1_25_23, SCIFA1_SCK_B, SEL_SCIFA1_1),
PINMUX_IPSR_GPSR(IP1_28_26, A17),
PINMUX_IPSR_MSEL(IP1_28_26, DACK2_B, SEL_LBS_1),
- PINMUX_IPSR_MSEL(IP1_28_26, SDA0_C, SEL_IIC0_2),
+ PINMUX_IPSR_MSEL(IP1_28_26, I2C0_SDA_C, SEL_I2C0_2),
PINMUX_IPSR_GPSR(IP1_31_29, A18),
PINMUX_IPSR_MSEL(IP1_31_29, DREQ1, SEL_LBS_0),
PINMUX_IPSR_MSEL(IP1_31_29, SCIFA1_RXD_C, SEL_SCIFA1_2),
@@ -914,10 +921,10 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP2_18_16, SCIFA1_RXD, SEL_SCIFA1_0),
PINMUX_IPSR_GPSR(IP2_20_19, CS0_N),
PINMUX_IPSR_MSEL(IP2_20_19, ATAG0_N_B, SEL_LBS_1),
- PINMUX_IPSR_MSEL(IP2_20_19, SCL1, SEL_IIC1_0),
+ PINMUX_IPSR_MSEL(IP2_20_19, I2C1_SCL, SEL_I2C1_0),
PINMUX_IPSR_GPSR(IP2_22_21, CS1_N_A26),
PINMUX_IPSR_MSEL(IP2_22_21, ATADIR0_N_B, SEL_LBS_1),
- PINMUX_IPSR_MSEL(IP2_22_21, SDA1, SEL_IIC1_0),
+ PINMUX_IPSR_MSEL(IP2_22_21, I2C1_SDA, SEL_I2C1_0),
PINMUX_IPSR_GPSR(IP2_24_23, EX_CS1_N),
PINMUX_IPSR_MSEL(IP2_24_23, MSIOF2_SCK, SEL_SOF2_0),
PINMUX_IPSR_GPSR(IP2_26_25, EX_CS2_N),
@@ -989,30 +996,30 @@ static const u16 pinmux_data[] = {
/* IPSR4 */
PINMUX_IPSR_MSEL(IP4_1_0, SSI_SDATA0, SEL_SSI0_0),
- PINMUX_IPSR_MSEL(IP4_1_0, SCL0_B, SEL_IIC0_1),
- PINMUX_IPSR_MSEL(IP4_1_0, SCL7_B, SEL_IIC7_1),
+ PINMUX_IPSR_MSEL(IP4_1_0, I2C0_SCL_B, SEL_I2C0_1),
+ PINMUX_IPSR_MSEL(IP4_1_0, IIC0_SCL_B, SEL_IIC0_1),
PINMUX_IPSR_MSEL(IP4_1_0, MSIOF2_SCK_C, SEL_SOF2_2),
PINMUX_IPSR_MSEL(IP4_4_2, SSI_SCK1, SEL_SSI1_0),
- PINMUX_IPSR_MSEL(IP4_4_2, SDA0_B, SEL_IIC0_1),
- PINMUX_IPSR_MSEL(IP4_4_2, SDA7_B, SEL_IIC7_1),
+ PINMUX_IPSR_MSEL(IP4_4_2, I2C0_SDA_B, SEL_I2C0_1),
+ PINMUX_IPSR_MSEL(IP4_4_2, IIC0_SDA_B, SEL_IIC0_1),
PINMUX_IPSR_MSEL(IP4_4_2, MSIOF2_SYNC_C, SEL_SOF2_2),
PINMUX_IPSR_MSEL(IP4_4_2, GLO_I0_D, SEL_GPS_3),
PINMUX_IPSR_MSEL(IP4_7_5, SSI_WS1, SEL_SSI1_0),
- PINMUX_IPSR_MSEL(IP4_7_5, SCL1_B, SEL_IIC1_1),
- PINMUX_IPSR_MSEL(IP4_7_5, SCL8_B, SEL_IIC8_1),
+ PINMUX_IPSR_MSEL(IP4_7_5, I2C1_SCL_B, SEL_I2C1_1),
+ PINMUX_IPSR_MSEL(IP4_7_5, IIC1_SCL_B, SEL_IIC1_1),
PINMUX_IPSR_MSEL(IP4_7_5, MSIOF2_TXD_C, SEL_SOF2_2),
PINMUX_IPSR_MSEL(IP4_7_5, GLO_I1_D, SEL_GPS_3),
PINMUX_IPSR_MSEL(IP4_9_8, SSI_SDATA1, SEL_SSI1_0),
- PINMUX_IPSR_MSEL(IP4_9_8, SDA1_B, SEL_IIC1_1),
- PINMUX_IPSR_MSEL(IP4_9_8, SDA8_B, SEL_IIC8_1),
+ PINMUX_IPSR_MSEL(IP4_9_8, I2C1_SDA_B, SEL_I2C1_1),
+ PINMUX_IPSR_MSEL(IP4_9_8, IIC1_SDA_B, SEL_IIC1_1),
PINMUX_IPSR_MSEL(IP4_9_8, MSIOF2_RXD_C, SEL_SOF2_2),
PINMUX_IPSR_GPSR(IP4_12_10, SSI_SCK2),
- PINMUX_IPSR_MSEL(IP4_12_10, SCL2, SEL_IIC2_0),
+ PINMUX_IPSR_MSEL(IP4_12_10, I2C2_SCL, SEL_I2C2_0),
PINMUX_IPSR_MSEL(IP4_12_10, GPS_CLK_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP4_12_10, GLO_Q0_D, SEL_GPS_3),
PINMUX_IPSR_MSEL(IP4_12_10, HSCK1_E, SEL_HSCIF1_4),
PINMUX_IPSR_GPSR(IP4_15_13, SSI_WS2),
- PINMUX_IPSR_MSEL(IP4_15_13, SDA2, SEL_IIC2_0),
+ PINMUX_IPSR_MSEL(IP4_15_13, I2C2_SDA, SEL_I2C2_0),
PINMUX_IPSR_MSEL(IP4_15_13, GPS_SIGN_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP4_15_13, RX2_E, SEL_SCIF2_4),
PINMUX_IPSR_MSEL(IP4_15_13, GLO_Q1_D, SEL_GPS_3),
@@ -1115,22 +1122,22 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP6_13_12, SCIFB1_TXD_D, SEL_SCIFB1_3),
PINMUX_IPSR_GPSR(IP6_13_12, INTC_IRQ2_N),
PINMUX_IPSR_GPSR(IP6_15_14, IRQ3),
- PINMUX_IPSR_MSEL(IP6_15_14, SCL4_C, SEL_IIC4_2),
+ PINMUX_IPSR_MSEL(IP6_15_14, I2C4_SCL_C, SEL_I2C4_2),
PINMUX_IPSR_MSEL(IP6_15_14, MSIOF2_TXD_E, SEL_SOF2_4),
PINMUX_IPSR_GPSR(IP6_15_14, INTC_IRQ4_N),
PINMUX_IPSR_GPSR(IP6_18_16, IRQ4),
PINMUX_IPSR_MSEL(IP6_18_16, HRX1_C, SEL_HSCIF1_2),
- PINMUX_IPSR_MSEL(IP6_18_16, SDA4_C, SEL_IIC4_2),
+ PINMUX_IPSR_MSEL(IP6_18_16, I2C4_SDA_C, SEL_I2C4_2),
PINMUX_IPSR_MSEL(IP6_18_16, MSIOF2_RXD_E, SEL_SOF2_4),
PINMUX_IPSR_GPSR(IP6_18_16, INTC_IRQ4_N),
PINMUX_IPSR_GPSR(IP6_20_19, IRQ5),
PINMUX_IPSR_MSEL(IP6_20_19, HTX1_C, SEL_HSCIF1_2),
- PINMUX_IPSR_MSEL(IP6_20_19, SCL1_E, SEL_IIC1_4),
+ PINMUX_IPSR_MSEL(IP6_20_19, I2C1_SCL_E, SEL_I2C1_4),
PINMUX_IPSR_MSEL(IP6_20_19, MSIOF2_SCK_E, SEL_SOF2_4),
PINMUX_IPSR_GPSR(IP6_23_21, IRQ6),
PINMUX_IPSR_MSEL(IP6_23_21, HSCK1_C, SEL_HSCIF1_2),
PINMUX_IPSR_MSEL(IP6_23_21, MSIOF1_SS2_B, SEL_SOF1_1),
- PINMUX_IPSR_MSEL(IP6_23_21, SDA1_E, SEL_IIC1_4),
+ PINMUX_IPSR_MSEL(IP6_23_21, I2C1_SDA_E, SEL_I2C1_4),
PINMUX_IPSR_MSEL(IP6_23_21, MSIOF2_SYNC_E, SEL_SOF2_4),
PINMUX_IPSR_GPSR(IP6_26_24, IRQ7),
PINMUX_IPSR_MSEL(IP6_26_24, HCTS1_N_C, SEL_HSCIF1_2),
@@ -1260,12 +1267,12 @@ static const u16 pinmux_data[] = {
/* IPSR9 */
PINMUX_IPSR_GPSR(IP9_2_0, DU1_DB6),
PINMUX_IPSR_GPSR(IP9_2_0, LCDOUT22),
- PINMUX_IPSR_MSEL(IP9_2_0, SCL3_C, SEL_IIC3_2),
+ PINMUX_IPSR_MSEL(IP9_2_0, I2C3_SCL_C, SEL_I2C3_2),
PINMUX_IPSR_MSEL(IP9_2_0, RX3, SEL_SCIF3_0),
PINMUX_IPSR_MSEL(IP9_2_0, SCIFA3_RXD, SEL_SCIFA3_0),
PINMUX_IPSR_GPSR(IP9_5_3, DU1_DB7),
PINMUX_IPSR_GPSR(IP9_5_3, LCDOUT23),
- PINMUX_IPSR_MSEL(IP9_5_3, SDA3_C, SEL_IIC3_2),
+ PINMUX_IPSR_MSEL(IP9_5_3, I2C3_SDA_C, SEL_I2C3_2),
PINMUX_IPSR_MSEL(IP9_5_3, SCIF3_SCK, SEL_SCIF3_0),
PINMUX_IPSR_MSEL(IP9_5_3, SCIFA3_SCK, SEL_SCIFA3_0),
PINMUX_IPSR_MSEL(IP9_6, DU1_DOTCLKIN, SEL_DIS_0),
@@ -1276,7 +1283,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP9_10_8, QSTVB_QVE),
PINMUX_IPSR_MSEL(IP9_10_8, CAN0_TX, SEL_CAN0_0),
PINMUX_IPSR_MSEL(IP9_10_8, TX3_B, SEL_SCIF3_1),
- PINMUX_IPSR_MSEL(IP9_10_8, SCL2_B, SEL_IIC2_1),
+ PINMUX_IPSR_MSEL(IP9_10_8, I2C2_SCL_B, SEL_I2C2_1),
PINMUX_IPSR_GPSR(IP9_10_8, PWM4),
PINMUX_IPSR_GPSR(IP9_11, DU1_EXHSYNC_DU1_HSYNC),
PINMUX_IPSR_GPSR(IP9_11, QSTH_QHS),
@@ -1286,7 +1293,7 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP9_15_13, QCPV_QDE),
PINMUX_IPSR_MSEL(IP9_15_13, CAN0_RX, SEL_CAN0_0),
PINMUX_IPSR_MSEL(IP9_15_13, RX3_B, SEL_SCIF3_1),
- PINMUX_IPSR_MSEL(IP9_15_13, SDA2_B, SEL_IIC2_1),
+ PINMUX_IPSR_MSEL(IP9_15_13, I2C2_SDA_B, SEL_I2C2_1),
PINMUX_IPSR_GPSR(IP9_16, DU1_DISP),
PINMUX_IPSR_GPSR(IP9_16, QPOLA),
PINMUX_IPSR_GPSR(IP9_18_17, DU1_CDE),
@@ -1312,32 +1319,32 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP9_28_27, SCIF3_SCK_B, SEL_SCIF3_1),
PINMUX_IPSR_MSEL(IP9_28_27, SCIFA3_SCK_B, SEL_SCIFA3_1),
PINMUX_IPSR_GPSR(IP9_31_29, VI0_G0),
- PINMUX_IPSR_MSEL(IP9_31_29, SCL8, SEL_IIC8_0),
+ PINMUX_IPSR_MSEL(IP9_31_29, IIC1_SCL, SEL_IIC1_0),
PINMUX_IPSR_MSEL(IP9_31_29, STP_IVCXO27_0_C, SEL_SSP_2),
- PINMUX_IPSR_MSEL(IP9_31_29, SCL4, SEL_IIC4_0),
+ PINMUX_IPSR_MSEL(IP9_31_29, I2C4_SCL, SEL_I2C4_0),
PINMUX_IPSR_MSEL(IP9_31_29, HCTS2_N, SEL_HSCIF2_0),
PINMUX_IPSR_MSEL(IP9_31_29, SCIFB2_CTS_N, SEL_SCIFB2_0),
PINMUX_IPSR_GPSR(IP9_31_29, ATAWR1_N),
/* IPSR10 */
PINMUX_IPSR_GPSR(IP10_2_0, VI0_G1),
- PINMUX_IPSR_MSEL(IP10_2_0, SDA8, SEL_IIC8_0),
+ PINMUX_IPSR_MSEL(IP10_2_0, IIC1_SDA, SEL_IIC1_0),
PINMUX_IPSR_MSEL(IP10_2_0, STP_ISCLK_0_C, SEL_SSP_2),
- PINMUX_IPSR_MSEL(IP10_2_0, SDA4, SEL_IIC4_0),
+ PINMUX_IPSR_MSEL(IP10_2_0, I2C4_SDA, SEL_I2C4_0),
PINMUX_IPSR_MSEL(IP10_2_0, HRTS2_N, SEL_HSCIF2_0),
PINMUX_IPSR_MSEL(IP10_2_0, SCIFB2_RTS_N, SEL_SCIFB2_0),
PINMUX_IPSR_GPSR(IP10_2_0, ATADIR1_N),
PINMUX_IPSR_GPSR(IP10_5_3, VI0_G2),
PINMUX_IPSR_GPSR(IP10_5_3, VI2_HSYNC_N),
PINMUX_IPSR_MSEL(IP10_5_3, STP_ISD_0_C, SEL_SSP_2),
- PINMUX_IPSR_MSEL(IP10_5_3, SCL3_B, SEL_IIC3_1),
+ PINMUX_IPSR_MSEL(IP10_5_3, I2C3_SCL_B, SEL_I2C3_1),
PINMUX_IPSR_MSEL(IP10_5_3, HSCK2, SEL_HSCIF2_0),
PINMUX_IPSR_MSEL(IP10_5_3, SCIFB2_SCK, SEL_SCIFB2_0),
PINMUX_IPSR_GPSR(IP10_5_3, ATARD1_N),
PINMUX_IPSR_GPSR(IP10_8_6, VI0_G3),
PINMUX_IPSR_GPSR(IP10_8_6, VI2_VSYNC_N),
PINMUX_IPSR_MSEL(IP10_8_6, STP_ISEN_0_C, SEL_SSP_2),
- PINMUX_IPSR_MSEL(IP10_8_6, SDA3_B, SEL_IIC3_1),
+ PINMUX_IPSR_MSEL(IP10_8_6, I2C3_SDA_B, SEL_I2C3_1),
PINMUX_IPSR_MSEL(IP10_8_6, HRX2, SEL_HSCIF2_0),
PINMUX_IPSR_MSEL(IP10_8_6, SCIFB2_RXD, SEL_SCIFB2_0),
PINMUX_IPSR_GPSR(IP10_8_6, ATACS01_N),
@@ -1382,24 +1389,24 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP10_31_29, VI2_DATA5),
PINMUX_IPSR_MSEL(IP10_31_29, GLO_SCLK_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP10_31_29, TX0_C, SEL_SCIF0_2),
- PINMUX_IPSR_MSEL(IP10_31_29, SCL1_D, SEL_IIC1_3),
+ PINMUX_IPSR_MSEL(IP10_31_29, I2C1_SCL_D, SEL_I2C1_3),
/* IPSR11 */
PINMUX_IPSR_GPSR(IP11_2_0, VI0_R5),
PINMUX_IPSR_GPSR(IP11_2_0, VI2_DATA6),
PINMUX_IPSR_MSEL(IP11_2_0, GLO_SDATA_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP11_2_0, RX0_C, SEL_SCIF0_2),
- PINMUX_IPSR_MSEL(IP11_2_0, SDA1_D, SEL_IIC1_3),
+ PINMUX_IPSR_MSEL(IP11_2_0, I2C1_SDA_D, SEL_I2C1_3),
PINMUX_IPSR_GPSR(IP11_5_3, VI0_R6),
PINMUX_IPSR_GPSR(IP11_5_3, VI2_DATA7),
PINMUX_IPSR_MSEL(IP11_5_3, GLO_SS_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP11_5_3, TX1_C, SEL_SCIF1_2),
- PINMUX_IPSR_MSEL(IP11_5_3, SCL4_B, SEL_IIC4_1),
+ PINMUX_IPSR_MSEL(IP11_5_3, I2C4_SCL_B, SEL_I2C4_1),
PINMUX_IPSR_GPSR(IP11_8_6, VI0_R7),
PINMUX_IPSR_MSEL(IP11_8_6, GLO_RFON_B, SEL_GPS_1),
PINMUX_IPSR_MSEL(IP11_8_6, RX1_C, SEL_SCIF1_2),
PINMUX_IPSR_MSEL(IP11_8_6, CAN0_RX_E, SEL_CAN0_4),
- PINMUX_IPSR_MSEL(IP11_8_6, SDA4_B, SEL_IIC4_1),
+ PINMUX_IPSR_MSEL(IP11_8_6, I2C4_SDA_B, SEL_I2C4_1),
PINMUX_IPSR_MSEL(IP11_8_6, HRX1_D, SEL_HSCIF1_3),
PINMUX_IPSR_MSEL(IP11_8_6, SCIFB0_RXD_D, SEL_SCIFB_3),
PINMUX_IPSR_MSEL(IP11_11_9, VI1_HSYNC_N, SEL_VI1_0),
@@ -1438,29 +1445,29 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP11_27, AVB_MDC),
PINMUX_IPSR_GPSR(IP11_29_28, ETH_MDIO),
PINMUX_IPSR_GPSR(IP11_29_28, AVB_RX_CLK),
- PINMUX_IPSR_MSEL(IP11_29_28, SCL2_C, SEL_IIC2_2),
+ PINMUX_IPSR_MSEL(IP11_29_28, I2C2_SCL_C, SEL_I2C2_2),
PINMUX_IPSR_GPSR(IP11_31_30, ETH_CRS_DV),
PINMUX_IPSR_GPSR(IP11_31_30, AVB_LINK),
- PINMUX_IPSR_MSEL(IP11_31_30, SDA2_C, SEL_IIC2_2),
+ PINMUX_IPSR_MSEL(IP11_31_30, I2C2_SDA_C, SEL_I2C2_2),
/* IPSR12 */
PINMUX_IPSR_GPSR(IP12_1_0, ETH_RX_ER),
PINMUX_IPSR_GPSR(IP12_1_0, AVB_CRS),
- PINMUX_IPSR_MSEL(IP12_1_0, SCL3, SEL_IIC3_0),
- PINMUX_IPSR_MSEL(IP12_1_0, SCL7, SEL_IIC7_0),
+ PINMUX_IPSR_MSEL(IP12_1_0, I2C3_SCL, SEL_I2C3_0),
+ PINMUX_IPSR_MSEL(IP12_1_0, IIC0_SCL, SEL_IIC0_0),
PINMUX_IPSR_GPSR(IP12_3_2, ETH_RXD0),
PINMUX_IPSR_GPSR(IP12_3_2, AVB_PHY_INT),
- PINMUX_IPSR_MSEL(IP12_3_2, SDA3, SEL_IIC3_0),
- PINMUX_IPSR_MSEL(IP12_3_2, SDA7, SEL_IIC7_0),
+ PINMUX_IPSR_MSEL(IP12_3_2, I2C3_SDA, SEL_I2C3_0),
+ PINMUX_IPSR_MSEL(IP12_3_2, IIC0_SDA, SEL_IIC0_0),
PINMUX_IPSR_GPSR(IP12_6_4, ETH_RXD1),
PINMUX_IPSR_GPSR(IP12_6_4, AVB_GTXREFCLK),
PINMUX_IPSR_MSEL(IP12_6_4, CAN0_TX_C, SEL_CAN0_2),
- PINMUX_IPSR_MSEL(IP12_6_4, SCL2_D, SEL_IIC2_3),
+ PINMUX_IPSR_MSEL(IP12_6_4, I2C2_SCL_D, SEL_I2C2_3),
PINMUX_IPSR_MSEL(IP12_6_4, MSIOF1_RXD_E, SEL_SOF1_4),
PINMUX_IPSR_GPSR(IP12_9_7, ETH_LINK),
PINMUX_IPSR_GPSR(IP12_9_7, AVB_TXD0),
PINMUX_IPSR_MSEL(IP12_9_7, CAN0_RX_C, SEL_CAN0_2),
- PINMUX_IPSR_MSEL(IP12_9_7, SDA2_D, SEL_IIC2_3),
+ PINMUX_IPSR_MSEL(IP12_9_7, I2C2_SDA_D, SEL_I2C2_3),
PINMUX_IPSR_MSEL(IP12_9_7, MSIOF1_SCK_E, SEL_SOF1_4),
PINMUX_IPSR_GPSR(IP12_12_10, ETH_REFCLK),
PINMUX_IPSR_GPSR(IP12_12_10, AVB_TXD1),
@@ -1552,12 +1559,12 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP13_30_28, SD1_CD),
PINMUX_IPSR_GPSR(IP13_30_28, PWM0),
PINMUX_IPSR_GPSR(IP13_30_28, TPU_TO0),
- PINMUX_IPSR_MSEL(IP13_30_28, SCL1_C, SEL_IIC1_2),
+ PINMUX_IPSR_MSEL(IP13_30_28, I2C1_SCL_C, SEL_I2C1_2),
/* IPSR14 */
PINMUX_IPSR_GPSR(IP14_1_0, SD1_WP),
PINMUX_IPSR_GPSR(IP14_1_0, PWM1_B),
- PINMUX_IPSR_MSEL(IP14_1_0, SDA1_C, SEL_IIC1_2),
+ PINMUX_IPSR_MSEL(IP14_1_0, I2C1_SDA_C, SEL_I2C1_2),
PINMUX_IPSR_GPSR(IP14_2, SD2_CLK),
PINMUX_IPSR_GPSR(IP14_2, MMC_CLK),
PINMUX_IPSR_GPSR(IP14_3, SD2_CMD),
@@ -1572,12 +1579,12 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP14_7, MMC_D3),
PINMUX_IPSR_GPSR(IP14_10_8, SD2_CD),
PINMUX_IPSR_GPSR(IP14_10_8, MMC_D4),
- PINMUX_IPSR_MSEL(IP14_10_8, SCL8_C, SEL_IIC8_2),
+ PINMUX_IPSR_MSEL(IP14_10_8, IIC1_SCL_C, SEL_IIC1_2),
PINMUX_IPSR_MSEL(IP14_10_8, TX5_B, SEL_SCIF5_1),
PINMUX_IPSR_MSEL(IP14_10_8, SCIFA5_TXD_C, SEL_SCIFA5_2),
PINMUX_IPSR_GPSR(IP14_13_11, SD2_WP),
PINMUX_IPSR_GPSR(IP14_13_11, MMC_D5),
- PINMUX_IPSR_MSEL(IP14_13_11, SDA8_C, SEL_IIC8_2),
+ PINMUX_IPSR_MSEL(IP14_13_11, IIC1_SDA_C, SEL_IIC1_2),
PINMUX_IPSR_MSEL(IP14_13_11, RX5_B, SEL_SCIF5_1),
PINMUX_IPSR_MSEL(IP14_13_11, SCIFA5_RXD_C, SEL_SCIFA5_2),
PINMUX_IPSR_MSEL(IP14_16_14, MSIOF0_SCK, SEL_SOF0_0),
@@ -1603,14 +1610,14 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP14_28_26, ADICHS1, SEL_RAD_0),
PINMUX_IPSR_MSEL(IP14_28_26, TX0_E, SEL_SCIF0_4),
PINMUX_IPSR_MSEL(IP14_28_26, VI1_HSYNC_N_C, SEL_VI1_2),
- PINMUX_IPSR_MSEL(IP14_28_26, SCL7_C, SEL_IIC7_2),
+ PINMUX_IPSR_MSEL(IP14_28_26, IIC0_SCL_C, SEL_IIC0_2),
PINMUX_IPSR_GPSR(IP14_28_26, VI1_G4_B),
PINMUX_IPSR_MSEL(IP14_31_29, MSIOF0_SS2, SEL_SOF0_0),
PINMUX_IPSR_MSEL(IP14_31_29, MMC_D7, SEL_MMC_0),
PINMUX_IPSR_MSEL(IP14_31_29, ADICHS2, SEL_RAD_0),
PINMUX_IPSR_MSEL(IP14_31_29, RX0_E, SEL_SCIF0_4),
PINMUX_IPSR_MSEL(IP14_31_29, VI1_VSYNC_N_C, SEL_VI1_2),
- PINMUX_IPSR_MSEL(IP14_31_29, SDA7_C, SEL_IIC7_2),
+ PINMUX_IPSR_MSEL(IP14_31_29, IIC0_SDA_C, SEL_IIC0_2),
PINMUX_IPSR_GPSR(IP14_31_29, VI1_G5_B),
/* IPSR15 */
@@ -2343,21 +2350,21 @@ static const unsigned int i2c0_pins[] = {
RCAR_GP_PIN(0, 24), RCAR_GP_PIN(0, 25),
};
static const unsigned int i2c0_mux[] = {
- SCL0_MARK, SDA0_MARK,
+ I2C0_SCL_MARK, I2C0_SDA_MARK,
};
static const unsigned int i2c0_b_pins[] = {
/* SCL, SDA */
RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3),
};
static const unsigned int i2c0_b_mux[] = {
- SCL0_B_MARK, SDA0_B_MARK,
+ I2C0_SCL_B_MARK, I2C0_SDA_B_MARK,
};
static const unsigned int i2c0_c_pins[] = {
/* SCL, SDA */
RCAR_GP_PIN(0, 16), RCAR_GP_PIN(1, 1),
};
static const unsigned int i2c0_c_mux[] = {
- SCL0_C_MARK, SDA0_C_MARK,
+ I2C0_SCL_C_MARK, I2C0_SDA_C_MARK,
};
/* - I2C1 ------------------------------------------------------------------- */
static const unsigned int i2c1_pins[] = {
@@ -2365,35 +2372,35 @@ static const unsigned int i2c1_pins[] = {
RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 11),
};
static const unsigned int i2c1_mux[] = {
- SCL1_MARK, SDA1_MARK,
+ I2C1_SCL_MARK, I2C1_SDA_MARK,
};
static const unsigned int i2c1_b_pins[] = {
/* SCL, SDA */
RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 5),
};
static const unsigned int i2c1_b_mux[] = {
- SCL1_B_MARK, SDA1_B_MARK,
+ I2C1_SCL_B_MARK, I2C1_SDA_B_MARK,
};
static const unsigned int i2c1_c_pins[] = {
/* SCL, SDA */
RCAR_GP_PIN(6, 14), RCAR_GP_PIN(6, 15),
};
static const unsigned int i2c1_c_mux[] = {
- SCL1_C_MARK, SDA1_C_MARK,
+ I2C1_SCL_C_MARK, I2C1_SDA_C_MARK,
};
static const unsigned int i2c1_d_pins[] = {
/* SCL, SDA */
RCAR_GP_PIN(4, 25), RCAR_GP_PIN(4, 26),
};
static const unsigned int i2c1_d_mux[] = {
- SCL1_D_MARK, SDA1_D_MARK,
+ I2C1_SCL_D_MARK, I2C1_SDA_D_MARK,
};
static const unsigned int i2c1_e_pins[] = {
/* SCL, SDA */
RCAR_GP_PIN(7, 15), RCAR_GP_PIN(7, 16),
};
static const unsigned int i2c1_e_mux[] = {
- SCL1_E_MARK, SDA1_E_MARK,
+ I2C1_SCL_E_MARK, I2C1_SDA_E_MARK,
};
/* - I2C2 ------------------------------------------------------------------- */
static const unsigned int i2c2_pins[] = {
@@ -2401,28 +2408,28 @@ static const unsigned int i2c2_pins[] = {
RCAR_GP_PIN(2, 6), RCAR_GP_PIN(2, 7),
};
static const unsigned int i2c2_mux[] = {
- SCL2_MARK, SDA2_MARK,
+ I2C2_SCL_MARK, I2C2_SDA_MARK,
};
static const unsigned int i2c2_b_pins[] = {
/* SCL, SDA */
RCAR_GP_PIN(3, 26), RCAR_GP_PIN(3, 29),
};
static const unsigned int i2c2_b_mux[] = {
- SCL2_B_MARK, SDA2_B_MARK,
+ I2C2_SCL_B_MARK, I2C2_SDA_B_MARK,
};
static const unsigned int i2c2_c_pins[] = {
/* SCL, SDA */
RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 14),
};
static const unsigned int i2c2_c_mux[] = {
- SCL2_C_MARK, SDA2_C_MARK,
+ I2C2_SCL_C_MARK, I2C2_SDA_C_MARK,
};
static const unsigned int i2c2_d_pins[] = {
/* SCL, SDA */
RCAR_GP_PIN(5, 17), RCAR_GP_PIN(5, 18),
};
static const unsigned int i2c2_d_mux[] = {
- SCL2_D_MARK, SDA2_D_MARK,
+ I2C2_SCL_D_MARK, I2C2_SDA_D_MARK,
};
/* - I2C3 ------------------------------------------------------------------- */
static const unsigned int i2c3_pins[] = {
@@ -2430,28 +2437,28 @@ static const unsigned int i2c3_pins[] = {
RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 16),
};
static const unsigned int i2c3_mux[] = {
- SCL3_MARK, SDA3_MARK,
+ I2C3_SCL_MARK, I2C3_SDA_MARK,
};
static const unsigned int i2c3_b_pins[] = {
/* SCL, SDA */
RCAR_GP_PIN(4, 15), RCAR_GP_PIN(4, 16),
};
static const unsigned int i2c3_b_mux[] = {
- SCL3_B_MARK, SDA3_B_MARK,
+ I2C3_SCL_B_MARK, I2C3_SDA_B_MARK,
};
static const unsigned int i2c3_c_pins[] = {
/* SCL, SDA */
RCAR_GP_PIN(3, 22), RCAR_GP_PIN(3, 23),
};
static const unsigned int i2c3_c_mux[] = {
- SCL3_C_MARK, SDA3_C_MARK,
+ I2C3_SCL_C_MARK, I2C3_SDA_C_MARK,
};
static const unsigned int i2c3_d_pins[] = {
/* SCL, SDA */
RCAR_GP_PIN(0, 27), RCAR_GP_PIN(0, 28),
};
static const unsigned int i2c3_d_mux[] = {
- SCL3_D_MARK, SDA3_D_MARK,
+ I2C3_SCL_D_MARK, I2C3_SDA_D_MARK,
};
/* - I2C4 ------------------------------------------------------------------- */
static const unsigned int i2c4_pins[] = {
@@ -2459,21 +2466,21 @@ static const unsigned int i2c4_pins[] = {
RCAR_GP_PIN(4, 13), RCAR_GP_PIN(4, 14),
};
static const unsigned int i2c4_mux[] = {
- SCL4_MARK, SDA4_MARK,
+ I2C4_SCL_MARK, I2C4_SDA_MARK,
};
static const unsigned int i2c4_b_pins[] = {
/* SCL, SDA */
RCAR_GP_PIN(4, 27), RCAR_GP_PIN(4, 28),
};
static const unsigned int i2c4_b_mux[] = {
- SCL4_B_MARK, SDA4_B_MARK,
+ I2C4_SCL_B_MARK, I2C4_SDA_B_MARK,
};
static const unsigned int i2c4_c_pins[] = {
/* SCL, SDA */
RCAR_GP_PIN(7, 13), RCAR_GP_PIN(7, 14),
};
static const unsigned int i2c4_c_mux[] = {
- SCL4_C_MARK, SDA4_C_MARK,
+ I2C4_SCL_C_MARK, I2C4_SDA_C_MARK,
};
/* - I2C7 ------------------------------------------------------------------- */
static const unsigned int i2c7_pins[] = {
@@ -2481,21 +2488,21 @@ static const unsigned int i2c7_pins[] = {
RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 16),
};
static const unsigned int i2c7_mux[] = {
- SCL7_MARK, SDA7_MARK,
+ IIC0_SCL_MARK, IIC0_SDA_MARK,
};
static const unsigned int i2c7_b_pins[] = {
/* SCL, SDA */
RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3),
};
static const unsigned int i2c7_b_mux[] = {
- SCL7_B_MARK, SDA7_B_MARK,
+ IIC0_SCL_B_MARK, IIC0_SDA_B_MARK,
};
static const unsigned int i2c7_c_pins[] = {
/* SCL, SDA */
RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
};
static const unsigned int i2c7_c_mux[] = {
- SCL7_C_MARK, SDA7_C_MARK,
+ IIC0_SCL_C_MARK, IIC0_SDA_C_MARK,
};
/* - I2C8 ------------------------------------------------------------------- */
static const unsigned int i2c8_pins[] = {
@@ -2503,21 +2510,21 @@ static const unsigned int i2c8_pins[] = {
RCAR_GP_PIN(4, 13), RCAR_GP_PIN(4, 14),
};
static const unsigned int i2c8_mux[] = {
- SCL8_MARK, SDA8_MARK,
+ IIC1_SCL_MARK, IIC1_SDA_MARK,
};
static const unsigned int i2c8_b_pins[] = {
/* SCL, SDA */
RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 5),
};
static const unsigned int i2c8_b_mux[] = {
- SCL8_B_MARK, SDA8_B_MARK,
+ IIC1_SCL_B_MARK, IIC1_SDA_B_MARK,
};
static const unsigned int i2c8_c_pins[] = {
/* SCL, SDA */
RCAR_GP_PIN(6, 22), RCAR_GP_PIN(6, 23),
};
static const unsigned int i2c8_c_mux[] = {
- SCL8_C_MARK, SDA8_C_MARK,
+ IIC1_SCL_C_MARK, IIC1_SDA_C_MARK,
};
/* - INTC ------------------------------------------------------------------- */
static const unsigned int intc_irq0_pins[] = {
@@ -4412,357 +4419,364 @@ static const unsigned int vin2_clk_mux[] = {
VI2_CLK_MARK,
};
-static const struct sh_pfc_pin_group pinmux_groups[] = {
- SH_PFC_PIN_GROUP(adi_common),
- SH_PFC_PIN_GROUP(adi_chsel0),
- SH_PFC_PIN_GROUP(adi_chsel1),
- SH_PFC_PIN_GROUP(adi_chsel2),
- SH_PFC_PIN_GROUP(adi_common_b),
- SH_PFC_PIN_GROUP(adi_chsel0_b),
- SH_PFC_PIN_GROUP(adi_chsel1_b),
- SH_PFC_PIN_GROUP(adi_chsel2_b),
- SH_PFC_PIN_GROUP(audio_clk_a),
- SH_PFC_PIN_GROUP(audio_clk_b),
- SH_PFC_PIN_GROUP(audio_clk_b_b),
- SH_PFC_PIN_GROUP(audio_clk_c),
- SH_PFC_PIN_GROUP(audio_clkout),
- SH_PFC_PIN_GROUP(avb_link),
- SH_PFC_PIN_GROUP(avb_magic),
- SH_PFC_PIN_GROUP(avb_phy_int),
- SH_PFC_PIN_GROUP(avb_mdio),
- SH_PFC_PIN_GROUP(avb_mii),
- SH_PFC_PIN_GROUP(avb_gmii),
- SH_PFC_PIN_GROUP(can0_data),
- SH_PFC_PIN_GROUP(can0_data_b),
- SH_PFC_PIN_GROUP(can0_data_c),
- SH_PFC_PIN_GROUP(can0_data_d),
- SH_PFC_PIN_GROUP(can0_data_e),
- SH_PFC_PIN_GROUP(can0_data_f),
- SH_PFC_PIN_GROUP(can1_data),
- SH_PFC_PIN_GROUP(can1_data_b),
- SH_PFC_PIN_GROUP(can1_data_c),
- SH_PFC_PIN_GROUP(can1_data_d),
- SH_PFC_PIN_GROUP(can_clk),
- SH_PFC_PIN_GROUP(can_clk_b),
- SH_PFC_PIN_GROUP(can_clk_c),
- SH_PFC_PIN_GROUP(can_clk_d),
- SH_PFC_PIN_GROUP(du_rgb666),
- SH_PFC_PIN_GROUP(du_rgb888),
- SH_PFC_PIN_GROUP(du_clk_out_0),
- SH_PFC_PIN_GROUP(du_clk_out_1),
- SH_PFC_PIN_GROUP(du_sync),
- SH_PFC_PIN_GROUP(du_oddf),
- SH_PFC_PIN_GROUP(du_cde),
- SH_PFC_PIN_GROUP(du_disp),
- SH_PFC_PIN_GROUP(du0_clk_in),
- SH_PFC_PIN_GROUP(du1_clk_in),
- SH_PFC_PIN_GROUP(du1_clk_in_b),
- SH_PFC_PIN_GROUP(du1_clk_in_c),
- SH_PFC_PIN_GROUP(eth_link),
- SH_PFC_PIN_GROUP(eth_magic),
- SH_PFC_PIN_GROUP(eth_mdio),
- SH_PFC_PIN_GROUP(eth_rmii),
- SH_PFC_PIN_GROUP(hscif0_data),
- SH_PFC_PIN_GROUP(hscif0_clk),
- SH_PFC_PIN_GROUP(hscif0_ctrl),
- SH_PFC_PIN_GROUP(hscif0_data_b),
- SH_PFC_PIN_GROUP(hscif0_ctrl_b),
- SH_PFC_PIN_GROUP(hscif0_data_c),
- SH_PFC_PIN_GROUP(hscif0_clk_c),
- SH_PFC_PIN_GROUP(hscif1_data),
- SH_PFC_PIN_GROUP(hscif1_clk),
- SH_PFC_PIN_GROUP(hscif1_ctrl),
- SH_PFC_PIN_GROUP(hscif1_data_b),
- SH_PFC_PIN_GROUP(hscif1_data_c),
- SH_PFC_PIN_GROUP(hscif1_clk_c),
- SH_PFC_PIN_GROUP(hscif1_ctrl_c),
- SH_PFC_PIN_GROUP(hscif1_data_d),
- SH_PFC_PIN_GROUP(hscif1_data_e),
- SH_PFC_PIN_GROUP(hscif1_clk_e),
- SH_PFC_PIN_GROUP(hscif1_ctrl_e),
- SH_PFC_PIN_GROUP(hscif2_data),
- SH_PFC_PIN_GROUP(hscif2_clk),
- SH_PFC_PIN_GROUP(hscif2_ctrl),
- SH_PFC_PIN_GROUP(hscif2_data_b),
- SH_PFC_PIN_GROUP(hscif2_ctrl_b),
- SH_PFC_PIN_GROUP(hscif2_data_c),
- SH_PFC_PIN_GROUP(hscif2_clk_c),
- SH_PFC_PIN_GROUP(hscif2_data_d),
- SH_PFC_PIN_GROUP(i2c0),
- SH_PFC_PIN_GROUP(i2c0_b),
- SH_PFC_PIN_GROUP(i2c0_c),
- SH_PFC_PIN_GROUP(i2c1),
- SH_PFC_PIN_GROUP(i2c1_b),
- SH_PFC_PIN_GROUP(i2c1_c),
- SH_PFC_PIN_GROUP(i2c1_d),
- SH_PFC_PIN_GROUP(i2c1_e),
- SH_PFC_PIN_GROUP(i2c2),
- SH_PFC_PIN_GROUP(i2c2_b),
- SH_PFC_PIN_GROUP(i2c2_c),
- SH_PFC_PIN_GROUP(i2c2_d),
- SH_PFC_PIN_GROUP(i2c3),
- SH_PFC_PIN_GROUP(i2c3_b),
- SH_PFC_PIN_GROUP(i2c3_c),
- SH_PFC_PIN_GROUP(i2c3_d),
- SH_PFC_PIN_GROUP(i2c4),
- SH_PFC_PIN_GROUP(i2c4_b),
- SH_PFC_PIN_GROUP(i2c4_c),
- SH_PFC_PIN_GROUP(i2c7),
- SH_PFC_PIN_GROUP(i2c7_b),
- SH_PFC_PIN_GROUP(i2c7_c),
- SH_PFC_PIN_GROUP(i2c8),
- SH_PFC_PIN_GROUP(i2c8_b),
- SH_PFC_PIN_GROUP(i2c8_c),
- SH_PFC_PIN_GROUP(intc_irq0),
- SH_PFC_PIN_GROUP(intc_irq1),
- SH_PFC_PIN_GROUP(intc_irq2),
- SH_PFC_PIN_GROUP(intc_irq3),
- SH_PFC_PIN_GROUP(mlb_3pin),
- SH_PFC_PIN_GROUP(mmc_data1),
- SH_PFC_PIN_GROUP(mmc_data4),
- SH_PFC_PIN_GROUP(mmc_data8),
- SH_PFC_PIN_GROUP(mmc_ctrl),
- SH_PFC_PIN_GROUP(msiof0_clk),
- SH_PFC_PIN_GROUP(msiof0_sync),
- SH_PFC_PIN_GROUP(msiof0_ss1),
- SH_PFC_PIN_GROUP(msiof0_ss2),
- SH_PFC_PIN_GROUP(msiof0_rx),
- SH_PFC_PIN_GROUP(msiof0_tx),
- SH_PFC_PIN_GROUP(msiof0_clk_b),
- SH_PFC_PIN_GROUP(msiof0_sync_b),
- SH_PFC_PIN_GROUP(msiof0_ss1_b),
- SH_PFC_PIN_GROUP(msiof0_ss2_b),
- SH_PFC_PIN_GROUP(msiof0_rx_b),
- SH_PFC_PIN_GROUP(msiof0_tx_b),
- SH_PFC_PIN_GROUP(msiof0_clk_c),
- SH_PFC_PIN_GROUP(msiof0_sync_c),
- SH_PFC_PIN_GROUP(msiof0_ss1_c),
- SH_PFC_PIN_GROUP(msiof0_ss2_c),
- SH_PFC_PIN_GROUP(msiof0_rx_c),
- SH_PFC_PIN_GROUP(msiof0_tx_c),
- SH_PFC_PIN_GROUP(msiof1_clk),
- SH_PFC_PIN_GROUP(msiof1_sync),
- SH_PFC_PIN_GROUP(msiof1_ss1),
- SH_PFC_PIN_GROUP(msiof1_ss2),
- SH_PFC_PIN_GROUP(msiof1_rx),
- SH_PFC_PIN_GROUP(msiof1_tx),
- SH_PFC_PIN_GROUP(msiof1_clk_b),
- SH_PFC_PIN_GROUP(msiof1_sync_b),
- SH_PFC_PIN_GROUP(msiof1_ss1_b),
- SH_PFC_PIN_GROUP(msiof1_ss2_b),
- SH_PFC_PIN_GROUP(msiof1_rx_b),
- SH_PFC_PIN_GROUP(msiof1_tx_b),
- SH_PFC_PIN_GROUP(msiof1_clk_c),
- SH_PFC_PIN_GROUP(msiof1_sync_c),
- SH_PFC_PIN_GROUP(msiof1_rx_c),
- SH_PFC_PIN_GROUP(msiof1_tx_c),
- SH_PFC_PIN_GROUP(msiof1_clk_d),
- SH_PFC_PIN_GROUP(msiof1_sync_d),
- SH_PFC_PIN_GROUP(msiof1_ss1_d),
- SH_PFC_PIN_GROUP(msiof1_rx_d),
- SH_PFC_PIN_GROUP(msiof1_tx_d),
- SH_PFC_PIN_GROUP(msiof1_clk_e),
- SH_PFC_PIN_GROUP(msiof1_sync_e),
- SH_PFC_PIN_GROUP(msiof1_rx_e),
- SH_PFC_PIN_GROUP(msiof1_tx_e),
- SH_PFC_PIN_GROUP(msiof2_clk),
- SH_PFC_PIN_GROUP(msiof2_sync),
- SH_PFC_PIN_GROUP(msiof2_ss1),
- SH_PFC_PIN_GROUP(msiof2_ss2),
- SH_PFC_PIN_GROUP(msiof2_rx),
- SH_PFC_PIN_GROUP(msiof2_tx),
- SH_PFC_PIN_GROUP(msiof2_clk_b),
- SH_PFC_PIN_GROUP(msiof2_sync_b),
- SH_PFC_PIN_GROUP(msiof2_ss1_b),
- SH_PFC_PIN_GROUP(msiof2_ss2_b),
- SH_PFC_PIN_GROUP(msiof2_rx_b),
- SH_PFC_PIN_GROUP(msiof2_tx_b),
- SH_PFC_PIN_GROUP(msiof2_clk_c),
- SH_PFC_PIN_GROUP(msiof2_sync_c),
- SH_PFC_PIN_GROUP(msiof2_rx_c),
- SH_PFC_PIN_GROUP(msiof2_tx_c),
- SH_PFC_PIN_GROUP(msiof2_clk_d),
- SH_PFC_PIN_GROUP(msiof2_sync_d),
- SH_PFC_PIN_GROUP(msiof2_ss1_d),
- SH_PFC_PIN_GROUP(msiof2_ss2_d),
- SH_PFC_PIN_GROUP(msiof2_rx_d),
- SH_PFC_PIN_GROUP(msiof2_tx_d),
- SH_PFC_PIN_GROUP(msiof2_clk_e),
- SH_PFC_PIN_GROUP(msiof2_sync_e),
- SH_PFC_PIN_GROUP(msiof2_rx_e),
- SH_PFC_PIN_GROUP(msiof2_tx_e),
- SH_PFC_PIN_GROUP(pwm0),
- SH_PFC_PIN_GROUP(pwm0_b),
- SH_PFC_PIN_GROUP(pwm1),
- SH_PFC_PIN_GROUP(pwm1_b),
- SH_PFC_PIN_GROUP(pwm2),
- SH_PFC_PIN_GROUP(pwm2_b),
- SH_PFC_PIN_GROUP(pwm3),
- SH_PFC_PIN_GROUP(pwm4),
- SH_PFC_PIN_GROUP(pwm4_b),
- SH_PFC_PIN_GROUP(pwm5),
- SH_PFC_PIN_GROUP(pwm5_b),
- SH_PFC_PIN_GROUP(pwm6),
- SH_PFC_PIN_GROUP(qspi_ctrl),
- SH_PFC_PIN_GROUP(qspi_data2),
- SH_PFC_PIN_GROUP(qspi_data4),
- SH_PFC_PIN_GROUP(qspi_ctrl_b),
- SH_PFC_PIN_GROUP(qspi_data2_b),
- SH_PFC_PIN_GROUP(qspi_data4_b),
- SH_PFC_PIN_GROUP(scif0_data),
- SH_PFC_PIN_GROUP(scif0_data_b),
- SH_PFC_PIN_GROUP(scif0_data_c),
- SH_PFC_PIN_GROUP(scif0_data_d),
- SH_PFC_PIN_GROUP(scif0_data_e),
- SH_PFC_PIN_GROUP(scif1_data),
- SH_PFC_PIN_GROUP(scif1_data_b),
- SH_PFC_PIN_GROUP(scif1_clk_b),
- SH_PFC_PIN_GROUP(scif1_data_c),
- SH_PFC_PIN_GROUP(scif1_data_d),
- SH_PFC_PIN_GROUP(scif2_data),
- SH_PFC_PIN_GROUP(scif2_data_b),
- SH_PFC_PIN_GROUP(scif2_clk_b),
- SH_PFC_PIN_GROUP(scif2_data_c),
- SH_PFC_PIN_GROUP(scif2_data_e),
- SH_PFC_PIN_GROUP(scif3_data),
- SH_PFC_PIN_GROUP(scif3_clk),
- SH_PFC_PIN_GROUP(scif3_data_b),
- SH_PFC_PIN_GROUP(scif3_clk_b),
- SH_PFC_PIN_GROUP(scif3_data_c),
- SH_PFC_PIN_GROUP(scif3_data_d),
- SH_PFC_PIN_GROUP(scif4_data),
- SH_PFC_PIN_GROUP(scif4_data_b),
- SH_PFC_PIN_GROUP(scif4_data_c),
- SH_PFC_PIN_GROUP(scif5_data),
- SH_PFC_PIN_GROUP(scif5_data_b),
- SH_PFC_PIN_GROUP(scifa0_data),
- SH_PFC_PIN_GROUP(scifa0_data_b),
- SH_PFC_PIN_GROUP(scifa1_data),
- SH_PFC_PIN_GROUP(scifa1_clk),
- SH_PFC_PIN_GROUP(scifa1_data_b),
- SH_PFC_PIN_GROUP(scifa1_clk_b),
- SH_PFC_PIN_GROUP(scifa1_data_c),
- SH_PFC_PIN_GROUP(scifa2_data),
- SH_PFC_PIN_GROUP(scifa2_clk),
- SH_PFC_PIN_GROUP(scifa2_data_b),
- SH_PFC_PIN_GROUP(scifa3_data),
- SH_PFC_PIN_GROUP(scifa3_clk),
- SH_PFC_PIN_GROUP(scifa3_data_b),
- SH_PFC_PIN_GROUP(scifa3_clk_b),
- SH_PFC_PIN_GROUP(scifa3_data_c),
- SH_PFC_PIN_GROUP(scifa3_clk_c),
- SH_PFC_PIN_GROUP(scifa4_data),
- SH_PFC_PIN_GROUP(scifa4_data_b),
- SH_PFC_PIN_GROUP(scifa4_data_c),
- SH_PFC_PIN_GROUP(scifa5_data),
- SH_PFC_PIN_GROUP(scifa5_data_b),
- SH_PFC_PIN_GROUP(scifa5_data_c),
- SH_PFC_PIN_GROUP(scifb0_data),
- SH_PFC_PIN_GROUP(scifb0_clk),
- SH_PFC_PIN_GROUP(scifb0_ctrl),
- SH_PFC_PIN_GROUP(scifb0_data_b),
- SH_PFC_PIN_GROUP(scifb0_clk_b),
- SH_PFC_PIN_GROUP(scifb0_ctrl_b),
- SH_PFC_PIN_GROUP(scifb0_data_c),
- SH_PFC_PIN_GROUP(scifb0_clk_c),
- SH_PFC_PIN_GROUP(scifb0_data_d),
- SH_PFC_PIN_GROUP(scifb0_clk_d),
- SH_PFC_PIN_GROUP(scifb1_data),
- SH_PFC_PIN_GROUP(scifb1_clk),
- SH_PFC_PIN_GROUP(scifb1_ctrl),
- SH_PFC_PIN_GROUP(scifb1_data_b),
- SH_PFC_PIN_GROUP(scifb1_clk_b),
- SH_PFC_PIN_GROUP(scifb1_data_c),
- SH_PFC_PIN_GROUP(scifb1_clk_c),
- SH_PFC_PIN_GROUP(scifb1_data_d),
- SH_PFC_PIN_GROUP(scifb2_data),
- SH_PFC_PIN_GROUP(scifb2_clk),
- SH_PFC_PIN_GROUP(scifb2_ctrl),
- SH_PFC_PIN_GROUP(scifb2_data_b),
- SH_PFC_PIN_GROUP(scifb2_clk_b),
- SH_PFC_PIN_GROUP(scifb2_ctrl_b),
- SH_PFC_PIN_GROUP(scifb2_data_c),
- SH_PFC_PIN_GROUP(scifb2_clk_c),
- SH_PFC_PIN_GROUP(scifb2_data_d),
- SH_PFC_PIN_GROUP(scif_clk),
- SH_PFC_PIN_GROUP(scif_clk_b),
- SH_PFC_PIN_GROUP(sdhi0_data1),
- SH_PFC_PIN_GROUP(sdhi0_data4),
- SH_PFC_PIN_GROUP(sdhi0_ctrl),
- SH_PFC_PIN_GROUP(sdhi0_cd),
- SH_PFC_PIN_GROUP(sdhi0_wp),
- SH_PFC_PIN_GROUP(sdhi1_data1),
- SH_PFC_PIN_GROUP(sdhi1_data4),
- SH_PFC_PIN_GROUP(sdhi1_ctrl),
- SH_PFC_PIN_GROUP(sdhi1_cd),
- SH_PFC_PIN_GROUP(sdhi1_wp),
- SH_PFC_PIN_GROUP(sdhi2_data1),
- SH_PFC_PIN_GROUP(sdhi2_data4),
- SH_PFC_PIN_GROUP(sdhi2_ctrl),
- SH_PFC_PIN_GROUP(sdhi2_cd),
- SH_PFC_PIN_GROUP(sdhi2_wp),
- SH_PFC_PIN_GROUP(ssi0_data),
- SH_PFC_PIN_GROUP(ssi0_data_b),
- SH_PFC_PIN_GROUP(ssi0129_ctrl),
- SH_PFC_PIN_GROUP(ssi0129_ctrl_b),
- SH_PFC_PIN_GROUP(ssi1_data),
- SH_PFC_PIN_GROUP(ssi1_data_b),
- SH_PFC_PIN_GROUP(ssi1_ctrl),
- SH_PFC_PIN_GROUP(ssi1_ctrl_b),
- SH_PFC_PIN_GROUP(ssi2_data),
- SH_PFC_PIN_GROUP(ssi2_ctrl),
- SH_PFC_PIN_GROUP(ssi3_data),
- SH_PFC_PIN_GROUP(ssi34_ctrl),
- SH_PFC_PIN_GROUP(ssi4_data),
- SH_PFC_PIN_GROUP(ssi4_ctrl),
- SH_PFC_PIN_GROUP(ssi5_data),
- SH_PFC_PIN_GROUP(ssi5_ctrl),
- SH_PFC_PIN_GROUP(ssi6_data),
- SH_PFC_PIN_GROUP(ssi6_ctrl),
- SH_PFC_PIN_GROUP(ssi7_data),
- SH_PFC_PIN_GROUP(ssi7_data_b),
- SH_PFC_PIN_GROUP(ssi78_ctrl),
- SH_PFC_PIN_GROUP(ssi78_ctrl_b),
- SH_PFC_PIN_GROUP(ssi8_data),
- SH_PFC_PIN_GROUP(ssi8_data_b),
- SH_PFC_PIN_GROUP(ssi9_data),
- SH_PFC_PIN_GROUP(ssi9_data_b),
- SH_PFC_PIN_GROUP(ssi9_ctrl),
- SH_PFC_PIN_GROUP(ssi9_ctrl_b),
- SH_PFC_PIN_GROUP(usb0),
- SH_PFC_PIN_GROUP(usb1),
- VIN_DATA_PIN_GROUP(vin0_data, 24),
- VIN_DATA_PIN_GROUP(vin0_data, 20),
- SH_PFC_PIN_GROUP(vin0_data18),
- VIN_DATA_PIN_GROUP(vin0_data, 16),
- VIN_DATA_PIN_GROUP(vin0_data, 12),
- VIN_DATA_PIN_GROUP(vin0_data, 10),
- VIN_DATA_PIN_GROUP(vin0_data, 8),
- SH_PFC_PIN_GROUP(vin0_sync),
- SH_PFC_PIN_GROUP(vin0_field),
- SH_PFC_PIN_GROUP(vin0_clkenb),
- SH_PFC_PIN_GROUP(vin0_clk),
- SH_PFC_PIN_GROUP(vin1_data8),
- SH_PFC_PIN_GROUP(vin1_sync),
- SH_PFC_PIN_GROUP(vin1_field),
- SH_PFC_PIN_GROUP(vin1_clkenb),
- SH_PFC_PIN_GROUP(vin1_clk),
- VIN_DATA_PIN_GROUP(vin1_b_data, 24),
- VIN_DATA_PIN_GROUP(vin1_b_data, 20),
- SH_PFC_PIN_GROUP(vin1_b_data18),
- VIN_DATA_PIN_GROUP(vin1_b_data, 16),
- VIN_DATA_PIN_GROUP(vin1_b_data, 12),
- VIN_DATA_PIN_GROUP(vin1_b_data, 10),
- VIN_DATA_PIN_GROUP(vin1_b_data, 8),
- SH_PFC_PIN_GROUP(vin1_b_sync),
- SH_PFC_PIN_GROUP(vin1_b_field),
- SH_PFC_PIN_GROUP(vin1_b_clkenb),
- SH_PFC_PIN_GROUP(vin1_b_clk),
- SH_PFC_PIN_GROUP(vin2_data8),
- SH_PFC_PIN_GROUP(vin2_sync),
- SH_PFC_PIN_GROUP(vin2_field),
- SH_PFC_PIN_GROUP(vin2_clkenb),
- SH_PFC_PIN_GROUP(vin2_clk),
+static const struct {
+ struct sh_pfc_pin_group common[341];
+ struct sh_pfc_pin_group r8a779x[9];
+} pinmux_groups = {
+ .common = {
+ SH_PFC_PIN_GROUP(audio_clk_a),
+ SH_PFC_PIN_GROUP(audio_clk_b),
+ SH_PFC_PIN_GROUP(audio_clk_b_b),
+ SH_PFC_PIN_GROUP(audio_clk_c),
+ SH_PFC_PIN_GROUP(audio_clkout),
+ SH_PFC_PIN_GROUP(avb_link),
+ SH_PFC_PIN_GROUP(avb_magic),
+ SH_PFC_PIN_GROUP(avb_phy_int),
+ SH_PFC_PIN_GROUP(avb_mdio),
+ SH_PFC_PIN_GROUP(avb_mii),
+ SH_PFC_PIN_GROUP(avb_gmii),
+ SH_PFC_PIN_GROUP(can0_data),
+ SH_PFC_PIN_GROUP(can0_data_b),
+ SH_PFC_PIN_GROUP(can0_data_c),
+ SH_PFC_PIN_GROUP(can0_data_d),
+ SH_PFC_PIN_GROUP(can0_data_e),
+ SH_PFC_PIN_GROUP(can0_data_f),
+ SH_PFC_PIN_GROUP(can1_data),
+ SH_PFC_PIN_GROUP(can1_data_b),
+ SH_PFC_PIN_GROUP(can1_data_c),
+ SH_PFC_PIN_GROUP(can1_data_d),
+ SH_PFC_PIN_GROUP(can_clk),
+ SH_PFC_PIN_GROUP(can_clk_b),
+ SH_PFC_PIN_GROUP(can_clk_c),
+ SH_PFC_PIN_GROUP(can_clk_d),
+ SH_PFC_PIN_GROUP(du_rgb666),
+ SH_PFC_PIN_GROUP(du_rgb888),
+ SH_PFC_PIN_GROUP(du_clk_out_0),
+ SH_PFC_PIN_GROUP(du_clk_out_1),
+ SH_PFC_PIN_GROUP(du_sync),
+ SH_PFC_PIN_GROUP(du_oddf),
+ SH_PFC_PIN_GROUP(du_cde),
+ SH_PFC_PIN_GROUP(du_disp),
+ SH_PFC_PIN_GROUP(du0_clk_in),
+ SH_PFC_PIN_GROUP(du1_clk_in),
+ SH_PFC_PIN_GROUP(du1_clk_in_b),
+ SH_PFC_PIN_GROUP(du1_clk_in_c),
+ SH_PFC_PIN_GROUP(eth_link),
+ SH_PFC_PIN_GROUP(eth_magic),
+ SH_PFC_PIN_GROUP(eth_mdio),
+ SH_PFC_PIN_GROUP(eth_rmii),
+ SH_PFC_PIN_GROUP(hscif0_data),
+ SH_PFC_PIN_GROUP(hscif0_clk),
+ SH_PFC_PIN_GROUP(hscif0_ctrl),
+ SH_PFC_PIN_GROUP(hscif0_data_b),
+ SH_PFC_PIN_GROUP(hscif0_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif0_data_c),
+ SH_PFC_PIN_GROUP(hscif0_clk_c),
+ SH_PFC_PIN_GROUP(hscif1_data),
+ SH_PFC_PIN_GROUP(hscif1_clk),
+ SH_PFC_PIN_GROUP(hscif1_ctrl),
+ SH_PFC_PIN_GROUP(hscif1_data_b),
+ SH_PFC_PIN_GROUP(hscif1_data_c),
+ SH_PFC_PIN_GROUP(hscif1_clk_c),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_c),
+ SH_PFC_PIN_GROUP(hscif1_data_d),
+ SH_PFC_PIN_GROUP(hscif1_data_e),
+ SH_PFC_PIN_GROUP(hscif1_clk_e),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_e),
+ SH_PFC_PIN_GROUP(hscif2_data),
+ SH_PFC_PIN_GROUP(hscif2_clk),
+ SH_PFC_PIN_GROUP(hscif2_ctrl),
+ SH_PFC_PIN_GROUP(hscif2_data_b),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data_c),
+ SH_PFC_PIN_GROUP(hscif2_clk_c),
+ SH_PFC_PIN_GROUP(hscif2_data_d),
+ SH_PFC_PIN_GROUP(i2c0),
+ SH_PFC_PIN_GROUP(i2c0_b),
+ SH_PFC_PIN_GROUP(i2c0_c),
+ SH_PFC_PIN_GROUP(i2c1),
+ SH_PFC_PIN_GROUP(i2c1_b),
+ SH_PFC_PIN_GROUP(i2c1_c),
+ SH_PFC_PIN_GROUP(i2c1_d),
+ SH_PFC_PIN_GROUP(i2c1_e),
+ SH_PFC_PIN_GROUP(i2c2),
+ SH_PFC_PIN_GROUP(i2c2_b),
+ SH_PFC_PIN_GROUP(i2c2_c),
+ SH_PFC_PIN_GROUP(i2c2_d),
+ SH_PFC_PIN_GROUP(i2c3),
+ SH_PFC_PIN_GROUP(i2c3_b),
+ SH_PFC_PIN_GROUP(i2c3_c),
+ SH_PFC_PIN_GROUP(i2c3_d),
+ SH_PFC_PIN_GROUP(i2c4),
+ SH_PFC_PIN_GROUP(i2c4_b),
+ SH_PFC_PIN_GROUP(i2c4_c),
+ SH_PFC_PIN_GROUP(i2c7),
+ SH_PFC_PIN_GROUP(i2c7_b),
+ SH_PFC_PIN_GROUP(i2c7_c),
+ SH_PFC_PIN_GROUP(i2c8),
+ SH_PFC_PIN_GROUP(i2c8_b),
+ SH_PFC_PIN_GROUP(i2c8_c),
+ SH_PFC_PIN_GROUP(intc_irq0),
+ SH_PFC_PIN_GROUP(intc_irq1),
+ SH_PFC_PIN_GROUP(intc_irq2),
+ SH_PFC_PIN_GROUP(intc_irq3),
+ SH_PFC_PIN_GROUP(mmc_data1),
+ SH_PFC_PIN_GROUP(mmc_data4),
+ SH_PFC_PIN_GROUP(mmc_data8),
+ SH_PFC_PIN_GROUP(mmc_ctrl),
+ SH_PFC_PIN_GROUP(msiof0_clk),
+ SH_PFC_PIN_GROUP(msiof0_sync),
+ SH_PFC_PIN_GROUP(msiof0_ss1),
+ SH_PFC_PIN_GROUP(msiof0_ss2),
+ SH_PFC_PIN_GROUP(msiof0_rx),
+ SH_PFC_PIN_GROUP(msiof0_tx),
+ SH_PFC_PIN_GROUP(msiof0_clk_b),
+ SH_PFC_PIN_GROUP(msiof0_sync_b),
+ SH_PFC_PIN_GROUP(msiof0_ss1_b),
+ SH_PFC_PIN_GROUP(msiof0_ss2_b),
+ SH_PFC_PIN_GROUP(msiof0_rx_b),
+ SH_PFC_PIN_GROUP(msiof0_tx_b),
+ SH_PFC_PIN_GROUP(msiof0_clk_c),
+ SH_PFC_PIN_GROUP(msiof0_sync_c),
+ SH_PFC_PIN_GROUP(msiof0_ss1_c),
+ SH_PFC_PIN_GROUP(msiof0_ss2_c),
+ SH_PFC_PIN_GROUP(msiof0_rx_c),
+ SH_PFC_PIN_GROUP(msiof0_tx_c),
+ SH_PFC_PIN_GROUP(msiof1_clk),
+ SH_PFC_PIN_GROUP(msiof1_sync),
+ SH_PFC_PIN_GROUP(msiof1_ss1),
+ SH_PFC_PIN_GROUP(msiof1_ss2),
+ SH_PFC_PIN_GROUP(msiof1_rx),
+ SH_PFC_PIN_GROUP(msiof1_tx),
+ SH_PFC_PIN_GROUP(msiof1_clk_b),
+ SH_PFC_PIN_GROUP(msiof1_sync_b),
+ SH_PFC_PIN_GROUP(msiof1_ss1_b),
+ SH_PFC_PIN_GROUP(msiof1_ss2_b),
+ SH_PFC_PIN_GROUP(msiof1_rx_b),
+ SH_PFC_PIN_GROUP(msiof1_tx_b),
+ SH_PFC_PIN_GROUP(msiof1_clk_c),
+ SH_PFC_PIN_GROUP(msiof1_sync_c),
+ SH_PFC_PIN_GROUP(msiof1_rx_c),
+ SH_PFC_PIN_GROUP(msiof1_tx_c),
+ SH_PFC_PIN_GROUP(msiof1_clk_d),
+ SH_PFC_PIN_GROUP(msiof1_sync_d),
+ SH_PFC_PIN_GROUP(msiof1_ss1_d),
+ SH_PFC_PIN_GROUP(msiof1_rx_d),
+ SH_PFC_PIN_GROUP(msiof1_tx_d),
+ SH_PFC_PIN_GROUP(msiof1_clk_e),
+ SH_PFC_PIN_GROUP(msiof1_sync_e),
+ SH_PFC_PIN_GROUP(msiof1_rx_e),
+ SH_PFC_PIN_GROUP(msiof1_tx_e),
+ SH_PFC_PIN_GROUP(msiof2_clk),
+ SH_PFC_PIN_GROUP(msiof2_sync),
+ SH_PFC_PIN_GROUP(msiof2_ss1),
+ SH_PFC_PIN_GROUP(msiof2_ss2),
+ SH_PFC_PIN_GROUP(msiof2_rx),
+ SH_PFC_PIN_GROUP(msiof2_tx),
+ SH_PFC_PIN_GROUP(msiof2_clk_b),
+ SH_PFC_PIN_GROUP(msiof2_sync_b),
+ SH_PFC_PIN_GROUP(msiof2_ss1_b),
+ SH_PFC_PIN_GROUP(msiof2_ss2_b),
+ SH_PFC_PIN_GROUP(msiof2_rx_b),
+ SH_PFC_PIN_GROUP(msiof2_tx_b),
+ SH_PFC_PIN_GROUP(msiof2_clk_c),
+ SH_PFC_PIN_GROUP(msiof2_sync_c),
+ SH_PFC_PIN_GROUP(msiof2_rx_c),
+ SH_PFC_PIN_GROUP(msiof2_tx_c),
+ SH_PFC_PIN_GROUP(msiof2_clk_d),
+ SH_PFC_PIN_GROUP(msiof2_sync_d),
+ SH_PFC_PIN_GROUP(msiof2_ss1_d),
+ SH_PFC_PIN_GROUP(msiof2_ss2_d),
+ SH_PFC_PIN_GROUP(msiof2_rx_d),
+ SH_PFC_PIN_GROUP(msiof2_tx_d),
+ SH_PFC_PIN_GROUP(msiof2_clk_e),
+ SH_PFC_PIN_GROUP(msiof2_sync_e),
+ SH_PFC_PIN_GROUP(msiof2_rx_e),
+ SH_PFC_PIN_GROUP(msiof2_tx_e),
+ SH_PFC_PIN_GROUP(pwm0),
+ SH_PFC_PIN_GROUP(pwm0_b),
+ SH_PFC_PIN_GROUP(pwm1),
+ SH_PFC_PIN_GROUP(pwm1_b),
+ SH_PFC_PIN_GROUP(pwm2),
+ SH_PFC_PIN_GROUP(pwm2_b),
+ SH_PFC_PIN_GROUP(pwm3),
+ SH_PFC_PIN_GROUP(pwm4),
+ SH_PFC_PIN_GROUP(pwm4_b),
+ SH_PFC_PIN_GROUP(pwm5),
+ SH_PFC_PIN_GROUP(pwm5_b),
+ SH_PFC_PIN_GROUP(pwm6),
+ SH_PFC_PIN_GROUP(qspi_ctrl),
+ SH_PFC_PIN_GROUP(qspi_data2),
+ SH_PFC_PIN_GROUP(qspi_data4),
+ SH_PFC_PIN_GROUP(qspi_ctrl_b),
+ SH_PFC_PIN_GROUP(qspi_data2_b),
+ SH_PFC_PIN_GROUP(qspi_data4_b),
+ SH_PFC_PIN_GROUP(scif0_data),
+ SH_PFC_PIN_GROUP(scif0_data_b),
+ SH_PFC_PIN_GROUP(scif0_data_c),
+ SH_PFC_PIN_GROUP(scif0_data_d),
+ SH_PFC_PIN_GROUP(scif0_data_e),
+ SH_PFC_PIN_GROUP(scif1_data),
+ SH_PFC_PIN_GROUP(scif1_data_b),
+ SH_PFC_PIN_GROUP(scif1_clk_b),
+ SH_PFC_PIN_GROUP(scif1_data_c),
+ SH_PFC_PIN_GROUP(scif1_data_d),
+ SH_PFC_PIN_GROUP(scif2_data),
+ SH_PFC_PIN_GROUP(scif2_data_b),
+ SH_PFC_PIN_GROUP(scif2_clk_b),
+ SH_PFC_PIN_GROUP(scif2_data_c),
+ SH_PFC_PIN_GROUP(scif2_data_e),
+ SH_PFC_PIN_GROUP(scif3_data),
+ SH_PFC_PIN_GROUP(scif3_clk),
+ SH_PFC_PIN_GROUP(scif3_data_b),
+ SH_PFC_PIN_GROUP(scif3_clk_b),
+ SH_PFC_PIN_GROUP(scif3_data_c),
+ SH_PFC_PIN_GROUP(scif3_data_d),
+ SH_PFC_PIN_GROUP(scif4_data),
+ SH_PFC_PIN_GROUP(scif4_data_b),
+ SH_PFC_PIN_GROUP(scif4_data_c),
+ SH_PFC_PIN_GROUP(scif5_data),
+ SH_PFC_PIN_GROUP(scif5_data_b),
+ SH_PFC_PIN_GROUP(scifa0_data),
+ SH_PFC_PIN_GROUP(scifa0_data_b),
+ SH_PFC_PIN_GROUP(scifa1_data),
+ SH_PFC_PIN_GROUP(scifa1_clk),
+ SH_PFC_PIN_GROUP(scifa1_data_b),
+ SH_PFC_PIN_GROUP(scifa1_clk_b),
+ SH_PFC_PIN_GROUP(scifa1_data_c),
+ SH_PFC_PIN_GROUP(scifa2_data),
+ SH_PFC_PIN_GROUP(scifa2_clk),
+ SH_PFC_PIN_GROUP(scifa2_data_b),
+ SH_PFC_PIN_GROUP(scifa3_data),
+ SH_PFC_PIN_GROUP(scifa3_clk),
+ SH_PFC_PIN_GROUP(scifa3_data_b),
+ SH_PFC_PIN_GROUP(scifa3_clk_b),
+ SH_PFC_PIN_GROUP(scifa3_data_c),
+ SH_PFC_PIN_GROUP(scifa3_clk_c),
+ SH_PFC_PIN_GROUP(scifa4_data),
+ SH_PFC_PIN_GROUP(scifa4_data_b),
+ SH_PFC_PIN_GROUP(scifa4_data_c),
+ SH_PFC_PIN_GROUP(scifa5_data),
+ SH_PFC_PIN_GROUP(scifa5_data_b),
+ SH_PFC_PIN_GROUP(scifa5_data_c),
+ SH_PFC_PIN_GROUP(scifb0_data),
+ SH_PFC_PIN_GROUP(scifb0_clk),
+ SH_PFC_PIN_GROUP(scifb0_ctrl),
+ SH_PFC_PIN_GROUP(scifb0_data_b),
+ SH_PFC_PIN_GROUP(scifb0_clk_b),
+ SH_PFC_PIN_GROUP(scifb0_ctrl_b),
+ SH_PFC_PIN_GROUP(scifb0_data_c),
+ SH_PFC_PIN_GROUP(scifb0_clk_c),
+ SH_PFC_PIN_GROUP(scifb0_data_d),
+ SH_PFC_PIN_GROUP(scifb0_clk_d),
+ SH_PFC_PIN_GROUP(scifb1_data),
+ SH_PFC_PIN_GROUP(scifb1_clk),
+ SH_PFC_PIN_GROUP(scifb1_ctrl),
+ SH_PFC_PIN_GROUP(scifb1_data_b),
+ SH_PFC_PIN_GROUP(scifb1_clk_b),
+ SH_PFC_PIN_GROUP(scifb1_data_c),
+ SH_PFC_PIN_GROUP(scifb1_clk_c),
+ SH_PFC_PIN_GROUP(scifb1_data_d),
+ SH_PFC_PIN_GROUP(scifb2_data),
+ SH_PFC_PIN_GROUP(scifb2_clk),
+ SH_PFC_PIN_GROUP(scifb2_ctrl),
+ SH_PFC_PIN_GROUP(scifb2_data_b),
+ SH_PFC_PIN_GROUP(scifb2_clk_b),
+ SH_PFC_PIN_GROUP(scifb2_ctrl_b),
+ SH_PFC_PIN_GROUP(scifb2_data_c),
+ SH_PFC_PIN_GROUP(scifb2_clk_c),
+ SH_PFC_PIN_GROUP(scifb2_data_d),
+ SH_PFC_PIN_GROUP(scif_clk),
+ SH_PFC_PIN_GROUP(scif_clk_b),
+ SH_PFC_PIN_GROUP(sdhi0_data1),
+ SH_PFC_PIN_GROUP(sdhi0_data4),
+ SH_PFC_PIN_GROUP(sdhi0_ctrl),
+ SH_PFC_PIN_GROUP(sdhi0_cd),
+ SH_PFC_PIN_GROUP(sdhi0_wp),
+ SH_PFC_PIN_GROUP(sdhi1_data1),
+ SH_PFC_PIN_GROUP(sdhi1_data4),
+ SH_PFC_PIN_GROUP(sdhi1_ctrl),
+ SH_PFC_PIN_GROUP(sdhi1_cd),
+ SH_PFC_PIN_GROUP(sdhi1_wp),
+ SH_PFC_PIN_GROUP(sdhi2_data1),
+ SH_PFC_PIN_GROUP(sdhi2_data4),
+ SH_PFC_PIN_GROUP(sdhi2_ctrl),
+ SH_PFC_PIN_GROUP(sdhi2_cd),
+ SH_PFC_PIN_GROUP(sdhi2_wp),
+ SH_PFC_PIN_GROUP(ssi0_data),
+ SH_PFC_PIN_GROUP(ssi0_data_b),
+ SH_PFC_PIN_GROUP(ssi0129_ctrl),
+ SH_PFC_PIN_GROUP(ssi0129_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi1_data),
+ SH_PFC_PIN_GROUP(ssi1_data_b),
+ SH_PFC_PIN_GROUP(ssi1_ctrl),
+ SH_PFC_PIN_GROUP(ssi1_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi2_data),
+ SH_PFC_PIN_GROUP(ssi2_ctrl),
+ SH_PFC_PIN_GROUP(ssi3_data),
+ SH_PFC_PIN_GROUP(ssi34_ctrl),
+ SH_PFC_PIN_GROUP(ssi4_data),
+ SH_PFC_PIN_GROUP(ssi4_ctrl),
+ SH_PFC_PIN_GROUP(ssi5_data),
+ SH_PFC_PIN_GROUP(ssi5_ctrl),
+ SH_PFC_PIN_GROUP(ssi6_data),
+ SH_PFC_PIN_GROUP(ssi6_ctrl),
+ SH_PFC_PIN_GROUP(ssi7_data),
+ SH_PFC_PIN_GROUP(ssi7_data_b),
+ SH_PFC_PIN_GROUP(ssi78_ctrl),
+ SH_PFC_PIN_GROUP(ssi78_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi8_data),
+ SH_PFC_PIN_GROUP(ssi8_data_b),
+ SH_PFC_PIN_GROUP(ssi9_data),
+ SH_PFC_PIN_GROUP(ssi9_data_b),
+ SH_PFC_PIN_GROUP(ssi9_ctrl),
+ SH_PFC_PIN_GROUP(ssi9_ctrl_b),
+ SH_PFC_PIN_GROUP(usb0),
+ SH_PFC_PIN_GROUP(usb1),
+ VIN_DATA_PIN_GROUP(vin0_data, 24),
+ VIN_DATA_PIN_GROUP(vin0_data, 20),
+ SH_PFC_PIN_GROUP(vin0_data18),
+ VIN_DATA_PIN_GROUP(vin0_data, 16),
+ VIN_DATA_PIN_GROUP(vin0_data, 12),
+ VIN_DATA_PIN_GROUP(vin0_data, 10),
+ VIN_DATA_PIN_GROUP(vin0_data, 8),
+ SH_PFC_PIN_GROUP(vin0_sync),
+ SH_PFC_PIN_GROUP(vin0_field),
+ SH_PFC_PIN_GROUP(vin0_clkenb),
+ SH_PFC_PIN_GROUP(vin0_clk),
+ SH_PFC_PIN_GROUP(vin1_data8),
+ SH_PFC_PIN_GROUP(vin1_sync),
+ SH_PFC_PIN_GROUP(vin1_field),
+ SH_PFC_PIN_GROUP(vin1_clkenb),
+ SH_PFC_PIN_GROUP(vin1_clk),
+ VIN_DATA_PIN_GROUP(vin1_b_data, 24),
+ VIN_DATA_PIN_GROUP(vin1_b_data, 20),
+ SH_PFC_PIN_GROUP(vin1_b_data18),
+ VIN_DATA_PIN_GROUP(vin1_b_data, 16),
+ VIN_DATA_PIN_GROUP(vin1_b_data, 12),
+ VIN_DATA_PIN_GROUP(vin1_b_data, 10),
+ VIN_DATA_PIN_GROUP(vin1_b_data, 8),
+ SH_PFC_PIN_GROUP(vin1_b_sync),
+ SH_PFC_PIN_GROUP(vin1_b_field),
+ SH_PFC_PIN_GROUP(vin1_b_clkenb),
+ SH_PFC_PIN_GROUP(vin1_b_clk),
+ SH_PFC_PIN_GROUP(vin2_data8),
+ SH_PFC_PIN_GROUP(vin2_sync),
+ SH_PFC_PIN_GROUP(vin2_field),
+ SH_PFC_PIN_GROUP(vin2_clkenb),
+ SH_PFC_PIN_GROUP(vin2_clk),
+ },
+ .r8a779x = {
+ SH_PFC_PIN_GROUP(adi_common),
+ SH_PFC_PIN_GROUP(adi_chsel0),
+ SH_PFC_PIN_GROUP(adi_chsel1),
+ SH_PFC_PIN_GROUP(adi_chsel2),
+ SH_PFC_PIN_GROUP(adi_common_b),
+ SH_PFC_PIN_GROUP(adi_chsel0_b),
+ SH_PFC_PIN_GROUP(adi_chsel1_b),
+ SH_PFC_PIN_GROUP(adi_chsel2_b),
+ SH_PFC_PIN_GROUP(mlb_3pin),
+ }
};
static const char * const adi_groups[] = {
@@ -5280,65 +5294,72 @@ static const char * const vin2_groups[] = {
"vin2_clk",
};
-static const struct sh_pfc_function pinmux_functions[] = {
- SH_PFC_FUNCTION(adi),
- SH_PFC_FUNCTION(audio_clk),
- SH_PFC_FUNCTION(avb),
- SH_PFC_FUNCTION(can0),
- SH_PFC_FUNCTION(can1),
- SH_PFC_FUNCTION(du),
- SH_PFC_FUNCTION(du0),
- SH_PFC_FUNCTION(du1),
- SH_PFC_FUNCTION(eth),
- SH_PFC_FUNCTION(hscif0),
- SH_PFC_FUNCTION(hscif1),
- SH_PFC_FUNCTION(hscif2),
- SH_PFC_FUNCTION(i2c0),
- SH_PFC_FUNCTION(i2c1),
- SH_PFC_FUNCTION(i2c2),
- SH_PFC_FUNCTION(i2c3),
- SH_PFC_FUNCTION(i2c4),
- SH_PFC_FUNCTION(i2c7),
- SH_PFC_FUNCTION(i2c8),
- SH_PFC_FUNCTION(intc),
- SH_PFC_FUNCTION(mlb),
- SH_PFC_FUNCTION(mmc),
- SH_PFC_FUNCTION(msiof0),
- SH_PFC_FUNCTION(msiof1),
- SH_PFC_FUNCTION(msiof2),
- SH_PFC_FUNCTION(pwm0),
- SH_PFC_FUNCTION(pwm1),
- SH_PFC_FUNCTION(pwm2),
- SH_PFC_FUNCTION(pwm3),
- SH_PFC_FUNCTION(pwm4),
- SH_PFC_FUNCTION(pwm5),
- SH_PFC_FUNCTION(pwm6),
- SH_PFC_FUNCTION(qspi),
- SH_PFC_FUNCTION(scif0),
- SH_PFC_FUNCTION(scif1),
- SH_PFC_FUNCTION(scif2),
- SH_PFC_FUNCTION(scif3),
- SH_PFC_FUNCTION(scif4),
- SH_PFC_FUNCTION(scif5),
- SH_PFC_FUNCTION(scifa0),
- SH_PFC_FUNCTION(scifa1),
- SH_PFC_FUNCTION(scifa2),
- SH_PFC_FUNCTION(scifa3),
- SH_PFC_FUNCTION(scifa4),
- SH_PFC_FUNCTION(scifa5),
- SH_PFC_FUNCTION(scifb0),
- SH_PFC_FUNCTION(scifb1),
- SH_PFC_FUNCTION(scifb2),
- SH_PFC_FUNCTION(scif_clk),
- SH_PFC_FUNCTION(sdhi0),
- SH_PFC_FUNCTION(sdhi1),
- SH_PFC_FUNCTION(sdhi2),
- SH_PFC_FUNCTION(ssi),
- SH_PFC_FUNCTION(usb0),
- SH_PFC_FUNCTION(usb1),
- SH_PFC_FUNCTION(vin0),
- SH_PFC_FUNCTION(vin1),
- SH_PFC_FUNCTION(vin2),
+static const struct {
+ struct sh_pfc_function common[56];
+ struct sh_pfc_function r8a779x[2];
+} pinmux_functions = {
+ .common = {
+ SH_PFC_FUNCTION(audio_clk),
+ SH_PFC_FUNCTION(avb),
+ SH_PFC_FUNCTION(can0),
+ SH_PFC_FUNCTION(can1),
+ SH_PFC_FUNCTION(du),
+ SH_PFC_FUNCTION(du0),
+ SH_PFC_FUNCTION(du1),
+ SH_PFC_FUNCTION(eth),
+ SH_PFC_FUNCTION(hscif0),
+ SH_PFC_FUNCTION(hscif1),
+ SH_PFC_FUNCTION(hscif2),
+ SH_PFC_FUNCTION(i2c0),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c3),
+ SH_PFC_FUNCTION(i2c4),
+ SH_PFC_FUNCTION(i2c7),
+ SH_PFC_FUNCTION(i2c8),
+ SH_PFC_FUNCTION(intc),
+ SH_PFC_FUNCTION(mmc),
+ SH_PFC_FUNCTION(msiof0),
+ SH_PFC_FUNCTION(msiof1),
+ SH_PFC_FUNCTION(msiof2),
+ SH_PFC_FUNCTION(pwm0),
+ SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(pwm2),
+ SH_PFC_FUNCTION(pwm3),
+ SH_PFC_FUNCTION(pwm4),
+ SH_PFC_FUNCTION(pwm5),
+ SH_PFC_FUNCTION(pwm6),
+ SH_PFC_FUNCTION(qspi),
+ SH_PFC_FUNCTION(scif0),
+ SH_PFC_FUNCTION(scif1),
+ SH_PFC_FUNCTION(scif2),
+ SH_PFC_FUNCTION(scif3),
+ SH_PFC_FUNCTION(scif4),
+ SH_PFC_FUNCTION(scif5),
+ SH_PFC_FUNCTION(scifa0),
+ SH_PFC_FUNCTION(scifa1),
+ SH_PFC_FUNCTION(scifa2),
+ SH_PFC_FUNCTION(scifa3),
+ SH_PFC_FUNCTION(scifa4),
+ SH_PFC_FUNCTION(scifa5),
+ SH_PFC_FUNCTION(scifb0),
+ SH_PFC_FUNCTION(scifb1),
+ SH_PFC_FUNCTION(scifb2),
+ SH_PFC_FUNCTION(scif_clk),
+ SH_PFC_FUNCTION(sdhi0),
+ SH_PFC_FUNCTION(sdhi1),
+ SH_PFC_FUNCTION(sdhi2),
+ SH_PFC_FUNCTION(ssi),
+ SH_PFC_FUNCTION(usb0),
+ SH_PFC_FUNCTION(usb1),
+ SH_PFC_FUNCTION(vin0),
+ SH_PFC_FUNCTION(vin1),
+ SH_PFC_FUNCTION(vin2),
+ },
+ .r8a779x = {
+ SH_PFC_FUNCTION(adi),
+ SH_PFC_FUNCTION(mlb),
+ }
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -5638,7 +5659,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_A1, FN_MSIOF0_SYNC_B,
0, 0,
/* IP0_18_16 [3] */
- FN_A0, FN_ATAWR0_N_C, FN_MSIOF0_SCK_B, FN_SCL0_C, FN_PWM2_B,
+ FN_A0, FN_ATAWR0_N_C, FN_MSIOF0_SCK_B, FN_I2C0_SCL_C, FN_PWM2_B,
0, 0, 0,
/* IP0_15 [1] */
FN_D15, 0,
@@ -5679,7 +5700,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_A18, FN_DREQ1, FN_SCIFA1_RXD_C, 0, FN_SCIFB1_RXD_C,
0, 0, 0,
/* IP1_28_26 [3] */
- FN_A17, FN_DACK2_B, 0, FN_SDA0_C,
+ FN_A17, FN_DACK2_B, 0, FN_I2C0_SDA_C,
0, 0, 0, 0,
/* IP1_25_23 [3] */
FN_A16, FN_DREQ2_B, FN_FMCLK_C, 0, FN_SCIFA1_SCK_B,
@@ -5694,17 +5715,17 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_A13, FN_ATAG0_N_C, FN_BPFCLK, FN_MSIOF1_SS1_D,
0, 0, 0, 0,
/* IP1_13_11 [3] */
- FN_A12, FN_FMCLK, FN_SDA3_D, FN_MSIOF1_SCK_D,
+ FN_A12, FN_FMCLK, FN_I2C3_SDA_D, FN_MSIOF1_SCK_D,
0, 0, 0, 0,
/* IP1_10_8 [3] */
- FN_A11, FN_MSIOF1_RXD, FN_SCL3_D, FN_MSIOF1_RXD_D,
+ FN_A11, FN_MSIOF1_RXD, FN_I2C3_SCL_D, FN_MSIOF1_RXD_D,
0, 0, 0, 0,
/* IP1_7_6 [2] */
FN_A10, FN_MSIOF1_TXD, 0, FN_MSIOF1_TXD_D,
/* IP1_5_4 [2] */
- FN_A9, FN_MSIOF1_SS2, FN_SDA0, 0,
+ FN_A9, FN_MSIOF1_SS2, FN_I2C0_SDA, 0,
/* IP1_3_2 [2] */
- FN_A8, FN_MSIOF1_SS1, FN_SCL0, 0,
+ FN_A8, FN_MSIOF1_SS1, FN_I2C0_SCL, 0,
/* IP1_1_0 [2] */
FN_A7, FN_MSIOF1_SYNC,
0, 0, }
@@ -5722,9 +5743,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP2_24_23 [2] */
FN_EX_CS1_N, FN_MSIOF2_SCK, 0, 0,
/* IP2_22_21 [2] */
- FN_CS1_N_A26, FN_ATADIR0_N_B, FN_SDA1, 0,
+ FN_CS1_N_A26, FN_ATADIR0_N_B, FN_I2C1_SDA, 0,
/* IP2_20_19 [2] */
- FN_CS0_N, FN_ATAG0_N_B, FN_SCL1, 0,
+ FN_CS0_N, FN_ATAG0_N_B, FN_I2C1_SCL, 0,
/* IP2_18_16 [3] */
FN_A25, FN_DACK2, FN_SSL, FN_DREQ1_C, FN_RX1, FN_SCIFA1_RXD,
0, 0,
@@ -5807,23 +5828,23 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SSI_SDATA2, FN_GPS_MAG_B, FN_TX2_E, FN_HRTS1_N_E,
0, 0, 0, 0,
/* IP4_15_13 [3] */
- FN_SSI_WS2, FN_SDA2, FN_GPS_SIGN_B, FN_RX2_E,
+ FN_SSI_WS2, FN_I2C2_SDA, FN_GPS_SIGN_B, FN_RX2_E,
FN_GLO_Q1_D, FN_HCTS1_N_E,
0, 0,
/* IP4_12_10 [3] */
- FN_SSI_SCK2, FN_SCL2, FN_GPS_CLK_B, FN_GLO_Q0_D, FN_HSCK1_E,
+ FN_SSI_SCK2, FN_I2C2_SCL, FN_GPS_CLK_B, FN_GLO_Q0_D, FN_HSCK1_E,
0, 0, 0,
/* IP4_9_8 [2] */
- FN_SSI_SDATA1, FN_SDA1_B, FN_SDA8_B, FN_MSIOF2_RXD_C,
+ FN_SSI_SDATA1, FN_I2C1_SDA_B, FN_IIC1_SDA_B, FN_MSIOF2_RXD_C,
/* IP4_7_5 [3] */
- FN_SSI_WS1, FN_SCL1_B, FN_SCL8_B, FN_MSIOF2_TXD_C, FN_GLO_I1_D,
- 0, 0, 0,
+ FN_SSI_WS1, FN_I2C1_SCL_B, FN_IIC1_SCL_B, FN_MSIOF2_TXD_C,
+ FN_GLO_I1_D, 0, 0, 0,
/* IP4_4_2 [3] */
- FN_SSI_SCK1, FN_SDA0_B, FN_SDA7_B,
+ FN_SSI_SCK1, FN_I2C0_SDA_B, FN_IIC0_SDA_B,
FN_MSIOF2_SYNC_C, FN_GLO_I0_D,
0, 0, 0,
/* IP4_1_0 [2] */
- FN_SSI_SDATA0, FN_SCL0_B, FN_SCL7_B, FN_MSIOF2_SCK_C, }
+ FN_SSI_SDATA0, FN_I2C0_SCL_B, FN_IIC0_SCL_B, FN_MSIOF2_SCK_C, }
},
{ PINMUX_CFG_REG_VAR("IPSR5", 0xE6060034, 32,
3, 3, 2, 2, 2, 3, 2, 3, 3, 3, 3, 3) {
@@ -5877,15 +5898,15 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0,
/* IP6_23_21 [3] */
FN_IRQ6, FN_HSCK1_C, FN_MSIOF1_SS2_B,
- FN_SDA1_E, FN_MSIOF2_SYNC_E,
+ FN_I2C1_SDA_E, FN_MSIOF2_SYNC_E,
0, 0, 0,
/* IP6_20_19 [2] */
- FN_IRQ5, FN_HTX1_C, FN_SCL1_E, FN_MSIOF2_SCK_E,
+ FN_IRQ5, FN_HTX1_C, FN_I2C1_SCL_E, FN_MSIOF2_SCK_E,
/* IP6_18_16 [3] */
- FN_IRQ4, FN_HRX1_C, FN_SDA4_C, FN_MSIOF2_RXD_E, FN_INTC_IRQ4_N,
- 0, 0, 0,
+ FN_IRQ4, FN_HRX1_C, FN_I2C4_SDA_C, FN_MSIOF2_RXD_E,
+ FN_INTC_IRQ4_N, 0, 0, 0,
/* IP6_15_14 [2] */
- FN_IRQ3, FN_SCL4_C, FN_MSIOF2_TXD_E, FN_INTC_IRQ3_N,
+ FN_IRQ3, FN_I2C4_SCL_C, FN_MSIOF2_TXD_E, FN_INTC_IRQ3_N,
/* IP6_13_12 [2] */
FN_IRQ2, FN_SCIFB1_TXD_D, FN_INTC_IRQ2_N, 0,
/* IP6_11_10 [2] */
@@ -5990,7 +6011,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG_VAR("IPSR9", 0xE6060044, 32,
3, 2, 2, 2, 2, 2, 2, 1, 3, 1, 1, 3, 1, 1, 3, 3) {
/* IP9_31_29 [3] */
- FN_VI0_G0, FN_SCL8, FN_STP_IVCXO27_0_C, FN_SCL4,
+ FN_VI0_G0, FN_IIC1_SCL, FN_STP_IVCXO27_0_C, FN_I2C4_SCL,
FN_HCTS2_N, FN_SCIFB2_CTS_N, FN_ATAWR1_N, 0,
/* IP9_28_27 [2] */
FN_VI0_DATA3_VI0_B3, FN_SCIF3_SCK_B, FN_SCIFA3_SCK_B, 0,
@@ -6008,7 +6029,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_DU1_DISP, FN_QPOLA,
/* IP9_15_13 [3] */
FN_DU1_EXODDF_DU1_ODDF_DISP_CDE, FN_QCPV_QDE,
- FN_CAN0_RX, FN_RX3_B, FN_SDA2_B,
+ FN_CAN0_RX, FN_RX3_B, FN_I2C2_SDA_B,
0, 0, 0,
/* IP9_12 [1] */
FN_DU1_EXVSYNC_DU1_VSYNC, FN_QSTB_QHE,
@@ -6016,24 +6037,24 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_DU1_EXHSYNC_DU1_HSYNC, FN_QSTH_QHS,
/* IP9_10_8 [3] */
FN_DU1_DOTCLKOUT1, FN_QSTVB_QVE, FN_CAN0_TX,
- FN_TX3_B, FN_SCL2_B, FN_PWM4,
+ FN_TX3_B, FN_I2C2_SCL_B, FN_PWM4,
0, 0,
/* IP9_7 [1] */
FN_DU1_DOTCLKOUT0, FN_QCLK,
/* IP9_6 [1] */
FN_DU1_DOTCLKIN, FN_QSTVA_QVS,
/* IP9_5_3 [3] */
- FN_DU1_DB7, FN_LCDOUT23, FN_SDA3_C,
+ FN_DU1_DB7, FN_LCDOUT23, FN_I2C3_SDA_C,
FN_SCIF3_SCK, FN_SCIFA3_SCK,
0, 0, 0,
/* IP9_2_0 [3] */
- FN_DU1_DB6, FN_LCDOUT22, FN_SCL3_C, FN_RX3, FN_SCIFA3_RXD,
+ FN_DU1_DB6, FN_LCDOUT22, FN_I2C3_SCL_C, FN_RX3, FN_SCIFA3_RXD,
0, 0, 0, }
},
{ PINMUX_CFG_REG_VAR("IPSR10", 0xE6060048, 32,
3, 2, 2, 3, 3, 2, 2, 3, 3, 3, 3, 3) {
/* IP10_31_29 [3] */
- FN_VI0_R4, FN_VI2_DATA5, FN_GLO_SCLK_B, FN_TX0_C, FN_SCL1_D,
+ FN_VI0_R4, FN_VI2_DATA5, FN_GLO_SCLK_B, FN_TX0_C, FN_I2C1_SCL_D,
0, 0, 0,
/* IP10_28_27 [2] */
FN_VI0_R3, FN_VI2_DATA4, FN_GLO_Q1_B, FN_TS_SPSYNC0_C,
@@ -6058,22 +6079,22 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_HTX2, FN_SCIFB2_TXD, FN_SCIFB0_SCK_D,
0, 0,
/* IP10_8_6 [3] */
- FN_VI0_G3, FN_VI2_VSYNC_N, FN_STP_ISEN_0_C, FN_SDA3_B,
+ FN_VI0_G3, FN_VI2_VSYNC_N, FN_STP_ISEN_0_C, FN_I2C3_SDA_B,
FN_HRX2, FN_SCIFB2_RXD, FN_ATACS01_N, 0,
/* IP10_5_3 [3] */
- FN_VI0_G2, FN_VI2_HSYNC_N, FN_STP_ISD_0_C, FN_SCL3_B,
+ FN_VI0_G2, FN_VI2_HSYNC_N, FN_STP_ISD_0_C, FN_I2C3_SCL_B,
FN_HSCK2, FN_SCIFB2_SCK, FN_ATARD1_N, 0,
/* IP10_2_0 [3] */
- FN_VI0_G1, FN_SDA8, FN_STP_ISCLK_0_C, FN_SDA4,
+ FN_VI0_G1, FN_IIC1_SDA, FN_STP_ISCLK_0_C, FN_I2C4_SDA,
FN_HRTS2_N, FN_SCIFB2_RTS_N, FN_ATADIR1_N, 0, }
},
{ PINMUX_CFG_REG_VAR("IPSR11", 0xE606004C, 32,
2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
3, 3, 3, 3, 3) {
/* IP11_31_30 [2] */
- FN_ETH_CRS_DV, FN_AVB_LINK, FN_SDA2_C, 0,
+ FN_ETH_CRS_DV, FN_AVB_LINK, FN_I2C2_SDA_C, 0,
/* IP11_29_28 [2] */
- FN_ETH_MDIO, FN_AVB_RX_CLK, FN_SCL2_C, 0,
+ FN_ETH_MDIO, FN_AVB_RX_CLK, FN_I2C2_SCL_C, 0,
/* IP11_27 [1] */
FN_VI1_DATA7, FN_AVB_MDC,
/* IP11_26 [1] */
@@ -6106,13 +6127,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0,
/* IP11_8_6 [3] */
FN_VI0_R7, FN_GLO_RFON_B, FN_RX1_C, FN_CAN0_RX_E,
- FN_SDA4_B, FN_HRX1_D, FN_SCIFB0_RXD_D, 0,
+ FN_I2C4_SDA_B, FN_HRX1_D, FN_SCIFB0_RXD_D, 0,
/* IP11_5_3 [3] */
- FN_VI0_R6, FN_VI2_DATA7, FN_GLO_SS_B, FN_TX1_C, FN_SCL4_B,
+ FN_VI0_R6, FN_VI2_DATA7, FN_GLO_SS_B, FN_TX1_C, FN_I2C4_SCL_B,
0, 0, 0,
/* IP11_2_0 [3] */
- FN_VI0_R5, FN_VI2_DATA6, FN_GLO_SDATA_B, FN_RX0_C, FN_SDA1_D,
- 0, 0, 0, }
+ FN_VI0_R5, FN_VI2_DATA6, FN_GLO_SDATA_B, FN_RX0_C,
+ FN_I2C1_SDA_D, 0, 0, 0, }
},
{ PINMUX_CFG_REG_VAR("IPSR12", 0xE6060050, 32,
2, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2) {
@@ -6144,16 +6165,16 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0,
/* IP12_9_7 [3] */
FN_ETH_LINK, FN_AVB_TXD0, FN_CAN0_RX_C,
- FN_SDA2_D, FN_MSIOF1_SCK_E,
+ FN_I2C2_SDA_D, FN_MSIOF1_SCK_E,
0, 0, 0,
/* IP12_6_4 [3] */
FN_ETH_RXD1, FN_AVB_GTXREFCLK, FN_CAN0_TX_C,
- FN_SCL2_D, FN_MSIOF1_RXD_E,
+ FN_I2C2_SCL_D, FN_MSIOF1_RXD_E,
0, 0, 0,
/* IP12_3_2 [2] */
- FN_ETH_RXD0, FN_AVB_PHY_INT, FN_SDA3, FN_SDA7,
+ FN_ETH_RXD0, FN_AVB_PHY_INT, FN_I2C3_SDA, FN_IIC0_SDA,
/* IP12_1_0 [2] */
- FN_ETH_RX_ER, FN_AVB_CRS, FN_SCL3, FN_SCL7, }
+ FN_ETH_RX_ER, FN_AVB_CRS, FN_I2C3_SCL, FN_IIC0_SCL, }
},
{ PINMUX_CFG_REG_VAR("IPSR13", 0xE6060054, 32,
1, 3, 1, 1, 1, 2, 1, 3, 3, 1, 1, 1, 1, 1, 1,
@@ -6161,7 +6182,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP13_31 [1] */
0, 0,
/* IP13_30_28 [3] */
- FN_SD1_CD, FN_PWM0, FN_TPU_TO0, FN_SCL1_C,
+ FN_SD1_CD, FN_PWM0, FN_TPU_TO0, FN_I2C1_SCL_C,
0, 0, 0, 0,
/* IP13_27 [1] */
FN_SD1_DATA3, FN_IERX_B,
@@ -6210,10 +6231,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 2) {
/* IP14_31_29 [3] */
FN_MSIOF0_SS2, FN_MMC_D7, FN_ADICHS2, FN_RX0_E,
- FN_VI1_VSYNC_N_C, FN_SDA7_C, FN_VI1_G5_B, 0,
+ FN_VI1_VSYNC_N_C, FN_IIC0_SDA_C, FN_VI1_G5_B, 0,
/* IP14_28_26 [3] */
FN_MSIOF0_SS1, FN_MMC_D6, FN_ADICHS1, FN_TX0_E,
- FN_VI1_HSYNC_N_C, FN_SCL7_C, FN_VI1_G4_B, 0,
+ FN_VI1_HSYNC_N_C, FN_IIC0_SCL_C, FN_VI1_G4_B, 0,
/* IP14_25_23 [3] */
FN_MSIOF0_RXD, FN_ADICHS0, 0, FN_VI1_DATA0_C, FN_VI1_G3_B,
0, 0, 0,
@@ -6229,10 +6250,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_VI1_CLK_C, FN_VI1_G0_B,
0, 0,
/* IP14_13_11 [3] */
- FN_SD2_WP, FN_MMC_D5, FN_SDA8_C, FN_RX5_B, FN_SCIFA5_RXD_C,
+ FN_SD2_WP, FN_MMC_D5, FN_IIC1_SDA_C, FN_RX5_B, FN_SCIFA5_RXD_C,
0, 0, 0,
/* IP14_10_8 [3] */
- FN_SD2_CD, FN_MMC_D4, FN_SCL8_C, FN_TX5_B, FN_SCIFA5_TXD_C,
+ FN_SD2_CD, FN_MMC_D4, FN_IIC1_SCL_C, FN_TX5_B, FN_SCIFA5_TXD_C,
0, 0, 0,
/* IP14_7 [1] */
FN_SD2_DATA3, FN_MMC_D3,
@@ -6247,7 +6268,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP14_2 [1] */
FN_SD2_CLK, FN_MMC_CLK,
/* IP14_1_0 [2] */
- FN_SD1_WP, FN_PWM1_B, FN_SDA1_C, 0, }
+ FN_SD1_WP, FN_PWM1_B, FN_I2C1_SDA_C, 0, }
},
{ PINMUX_CFG_REG_VAR("IPSR15", 0xE606005C, 32,
2, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2) {
@@ -6424,14 +6445,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_CANCLK [2] */
FN_SEL_CANCLK_0, FN_SEL_CANCLK_1,
FN_SEL_CANCLK_2, FN_SEL_CANCLK_3,
- /* SEL_IIC8 [2] */
- FN_SEL_IIC8_0, FN_SEL_IIC8_1, FN_SEL_IIC8_2, 0,
- /* SEL_IIC7 [2] */
- FN_SEL_IIC7_0, FN_SEL_IIC7_1, FN_SEL_IIC7_2, 0,
- /* SEL_IIC4 [2] */
- FN_SEL_IIC4_0, FN_SEL_IIC4_1, FN_SEL_IIC4_2, 0,
- /* SEL_IIC3 [2] */
- FN_SEL_IIC3_0, FN_SEL_IIC3_1, FN_SEL_IIC3_2, FN_SEL_IIC3_3,
+ /* SEL_IIC1 [2] */
+ FN_SEL_IIC1_0, FN_SEL_IIC1_1, FN_SEL_IIC1_2, 0,
+ /* SEL_IIC0 [2] */
+ FN_SEL_IIC0_0, FN_SEL_IIC0_1, FN_SEL_IIC0_2, 0,
+ /* SEL_I2C4 [2] */
+ FN_SEL_I2C4_0, FN_SEL_I2C4_1, FN_SEL_I2C4_2, 0,
+ /* SEL_I2C3 [2] */
+ FN_SEL_I2C3_0, FN_SEL_I2C3_1, FN_SEL_I2C3_2, FN_SEL_I2C3_3,
/* SEL_SCIF3 [2] */
FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2, FN_SEL_SCIF3_3,
/* SEL_IEB [2] */
@@ -6442,14 +6463,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SEL_SCIF5_0, FN_SEL_SCIF5_1,
/* RESERVED [2] */
0, 0, 0, 0,
- /* SEL_IIC2 [2] */
- FN_SEL_IIC2_0, FN_SEL_IIC2_1, FN_SEL_IIC2_2, FN_SEL_IIC2_3,
- /* SEL_IIC1 [3] */
- FN_SEL_IIC1_0, FN_SEL_IIC1_1, FN_SEL_IIC1_2, FN_SEL_IIC1_3,
- FN_SEL_IIC1_4,
+ /* SEL_I2C2 [2] */
+ FN_SEL_I2C2_0, FN_SEL_I2C2_1, FN_SEL_I2C2_2, FN_SEL_I2C2_3,
+ /* SEL_I2C1 [3] */
+ FN_SEL_I2C1_0, FN_SEL_I2C1_1, FN_SEL_I2C1_2, FN_SEL_I2C1_3,
+ FN_SEL_I2C1_4,
0, 0, 0,
- /* SEL_IIC0 [2] */
- FN_SEL_IIC0_0, FN_SEL_IIC0_1, FN_SEL_IIC0_2, 0,
+ /* SEL_I2C0 [2] */
+ FN_SEL_I2C0_0, FN_SEL_I2C0_1, FN_SEL_I2C0_2, 0,
/* RESERVED [2] */
0, 0, 0, 0,
/* RESERVED [2] */
@@ -6520,6 +6541,28 @@ static const struct sh_pfc_soc_operations r8a7791_pinmux_ops = {
.pin_to_pocctrl = r8a7791_pin_to_pocctrl,
};
+#ifdef CONFIG_PINCTRL_PFC_R8A7743
+const struct sh_pfc_soc_info r8a7743_pinmux_info = {
+ .name = "r8a77430_pfc",
+ .ops = &r8a7791_pinmux_ops,
+ .unlock_reg = 0xe6060000, /* PMMR */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups.common,
+ .nr_groups = ARRAY_SIZE(pinmux_groups.common),
+ .functions = pinmux_functions.common,
+ .nr_functions = ARRAY_SIZE(pinmux_functions.common),
+
+ .cfg_regs = pinmux_config_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
+#endif
+
#ifdef CONFIG_PINCTRL_PFC_R8A7791
const struct sh_pfc_soc_info r8a7791_pinmux_info = {
.name = "r8a77910_pfc",
@@ -6530,10 +6573,12 @@ const struct sh_pfc_soc_info r8a7791_pinmux_info = {
.pins = pinmux_pins,
.nr_pins = ARRAY_SIZE(pinmux_pins),
- .groups = pinmux_groups,
- .nr_groups = ARRAY_SIZE(pinmux_groups),
- .functions = pinmux_functions,
- .nr_functions = ARRAY_SIZE(pinmux_functions),
+ .groups = pinmux_groups.common,
+ .nr_groups = ARRAY_SIZE(pinmux_groups.common) +
+ ARRAY_SIZE(pinmux_groups.r8a779x),
+ .functions = pinmux_functions.common,
+ .nr_functions = ARRAY_SIZE(pinmux_functions.common) +
+ ARRAY_SIZE(pinmux_functions.r8a779x),
.cfg_regs = pinmux_config_regs,
@@ -6552,10 +6597,12 @@ const struct sh_pfc_soc_info r8a7793_pinmux_info = {
.pins = pinmux_pins,
.nr_pins = ARRAY_SIZE(pinmux_pins),
- .groups = pinmux_groups,
- .nr_groups = ARRAY_SIZE(pinmux_groups),
- .functions = pinmux_functions,
- .nr_functions = ARRAY_SIZE(pinmux_functions),
+ .groups = pinmux_groups.common,
+ .nr_groups = ARRAY_SIZE(pinmux_groups.common) +
+ ARRAY_SIZE(pinmux_groups.r8a779x),
+ .functions = pinmux_functions.common,
+ .nr_functions = ARRAY_SIZE(pinmux_functions.common) +
+ ARRAY_SIZE(pinmux_functions.r8a779x),
.cfg_regs = pinmux_config_regs,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7792.c b/drivers/pinctrl/sh-pfc/pfc-r8a7792.c
index 21badb6166b9..cc3597f66605 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7792.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7792.c
@@ -1137,6 +1137,43 @@ static const unsigned int scif0_ctrl_pins[] = {
static const unsigned int scif0_ctrl_mux[] = {
RTS0_N_MARK, CTS0_N_MARK,
};
+/* - SCIF1 ------------------------------------------------------------------ */
+static const unsigned int scif1_data_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(10, 19), RCAR_GP_PIN(10, 18),
+};
+static const unsigned int scif1_data_mux[] = {
+ RX1_MARK, TX1_MARK,
+};
+static const unsigned int scif1_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(10, 15),
+};
+static const unsigned int scif1_clk_mux[] = {
+ SCK1_MARK,
+};
+static const unsigned int scif1_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(10, 17), RCAR_GP_PIN(10, 16),
+};
+static const unsigned int scif1_ctrl_mux[] = {
+ RTS1_N_MARK, CTS1_N_MARK,
+};
+/* - SCIF2 ------------------------------------------------------------------ */
+static const unsigned int scif2_data_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(10, 22), RCAR_GP_PIN(10, 21),
+};
+static const unsigned int scif2_data_mux[] = {
+ RX2_MARK, TX2_MARK,
+};
+static const unsigned int scif2_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(10, 20),
+};
+static const unsigned int scif2_clk_mux[] = {
+ SCK2_MARK,
+};
/* - SCIF3 ------------------------------------------------------------------ */
static const unsigned int scif3_data_pins[] = {
/* RX, TX */
@@ -1680,6 +1717,11 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(scif0_data),
SH_PFC_PIN_GROUP(scif0_clk),
SH_PFC_PIN_GROUP(scif0_ctrl),
+ SH_PFC_PIN_GROUP(scif1_data),
+ SH_PFC_PIN_GROUP(scif1_clk),
+ SH_PFC_PIN_GROUP(scif1_ctrl),
+ SH_PFC_PIN_GROUP(scif2_data),
+ SH_PFC_PIN_GROUP(scif2_clk),
SH_PFC_PIN_GROUP(scif3_data),
SH_PFC_PIN_GROUP(scif3_clk),
SH_PFC_PIN_GROUP(sdhi0_data1),
@@ -1826,6 +1868,17 @@ static const char * const scif0_groups[] = {
"scif0_ctrl",
};
+static const char * const scif1_groups[] = {
+ "scif1_data",
+ "scif1_clk",
+ "scif1_ctrl",
+};
+
+static const char * const scif2_groups[] = {
+ "scif2_data",
+ "scif2_clk",
+};
+
static const char * const scif3_groups[] = {
"scif3_data",
"scif3_clk",
@@ -1924,6 +1977,8 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(msiof1),
SH_PFC_FUNCTION(qspi),
SH_PFC_FUNCTION(scif0),
+ SH_PFC_FUNCTION(scif1),
+ SH_PFC_FUNCTION(scif2),
SH_PFC_FUNCTION(scif3),
SH_PFC_FUNCTION(sdhi0),
SH_PFC_FUNCTION(vin0),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
index ef093ac0cf2f..a0ed220071f5 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
@@ -1,9 +1,9 @@
/*
- * r8a7794 processor support - PFC hardware block.
+ * r8a7794/r8a7745 processor support - PFC hardware block.
*
* Copyright (C) 2014-2015 Renesas Electronics Corporation
* Copyright (C) 2015 Renesas Solutions Corp.
- * Copyright (C) 2015-2016 Cogent Embedded, Inc., <[email protected]>
+ * Copyright (C) 2015-2017 Cogent Embedded, Inc. <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
@@ -105,235 +105,279 @@ enum {
FN_I2C3_SDA_B, FN_SCIF5_TXD_B, FN_D5, FN_SCIF4_RXD_B, FN_I2C0_SCL_D,
/* IPSR1 */
- FN_D6, FN_SCIF4_TXD_B, FN_I2C0_SDA_D, FN_D7, FN_IRQ3, FN_TCLK1,
- FN_PWM6_B, FN_D8, FN_HSCIF2_HRX, FN_I2C1_SCL_B, FN_D9, FN_HSCIF2_HTX,
- FN_I2C1_SDA_B, FN_D10, FN_HSCIF2_HSCK, FN_SCIF1_SCK_C, FN_IRQ6,
- FN_PWM5_C, FN_D11, FN_HSCIF2_HCTS_N, FN_SCIF1_RXD_C, FN_I2C1_SCL_D,
- FN_D12, FN_HSCIF2_HRTS_N, FN_SCIF1_TXD_C, FN_I2C1_SDA_D, FN_D13,
- FN_SCIFA1_SCK, FN_TANS1, FN_PWM2_C, FN_TCLK2_B, FN_D14, FN_SCIFA1_RXD,
- FN_IIC0_SCL_B, FN_D15, FN_SCIFA1_TXD, FN_IIC0_SDA_B, FN_A0,
- FN_SCIFB1_SCK, FN_PWM3_B, FN_A1, FN_SCIFB1_TXD, FN_A3, FN_SCIFB0_SCK,
- FN_A4, FN_SCIFB0_TXD, FN_A5, FN_SCIFB0_RXD, FN_PWM4_B, FN_TPUTO3_C,
+ FN_D6, FN_SCIF4_TXD_B, FN_I2C0_SDA_D,
+ FN_D7, FN_IRQ3, FN_TCLK1, FN_PWM6_B,
+ FN_D8, FN_HSCIF2_HRX, FN_I2C1_SCL_B,
+ FN_D9, FN_HSCIF2_HTX, FN_I2C1_SDA_B,
+ FN_D10, FN_HSCIF2_HSCK, FN_SCIF1_SCK_C, FN_IRQ6, FN_PWM5_C,
+ FN_D11, FN_HSCIF2_HCTS_N, FN_SCIF1_RXD_C, FN_I2C1_SCL_D,
+ FN_D12, FN_HSCIF2_HRTS_N, FN_SCIF1_TXD_C, FN_I2C1_SDA_D,
+ FN_D13, FN_SCIFA1_SCK, FN_PWM2_C, FN_TCLK2_B,
+ FN_D14, FN_SCIFA1_RXD, FN_I2C5_SCL_B,
+ FN_D15, FN_SCIFA1_TXD, FN_I2C5_SDA_B,
+ FN_A0, FN_SCIFB1_SCK, FN_PWM3_B,
+ FN_A1, FN_SCIFB1_TXD,
+ FN_A3, FN_SCIFB0_SCK,
+ FN_A4, FN_SCIFB0_TXD,
+ FN_A5, FN_SCIFB0_RXD, FN_PWM4_B, FN_TPUTO3_C,
FN_A6, FN_SCIFB0_CTS_N, FN_SCIFA4_RXD_B, FN_TPUTO2_C,
/* IPSR2 */
- FN_A7, FN_SCIFB0_RTS_N, FN_SCIFA4_TXD_B, FN_A8, FN_MSIOF1_RXD,
- FN_SCIFA0_RXD_B, FN_A9, FN_MSIOF1_TXD, FN_SCIFA0_TXD_B, FN_A10,
- FN_MSIOF1_SCK, FN_IIC1_SCL_B, FN_A11, FN_MSIOF1_SYNC, FN_IIC1_SDA_B,
- FN_A12, FN_MSIOF1_SS1, FN_SCIFA5_RXD_B, FN_A13, FN_MSIOF1_SS2,
- FN_SCIFA5_TXD_B, FN_A14, FN_MSIOF2_RXD, FN_HSCIF0_HRX_B, FN_DREQ1_N,
- FN_A15, FN_MSIOF2_TXD, FN_HSCIF0_HTX_B, FN_DACK1, FN_A16,
- FN_MSIOF2_SCK, FN_HSCIF0_HSCK_B, FN_SPEEDIN, FN_VSP, FN_CAN_CLK_C,
- FN_TPUTO2_B, FN_A17, FN_MSIOF2_SYNC, FN_SCIF4_RXD_E, FN_CAN1_RX_B,
- FN_AVB_AVTP_CAPTURE_B, FN_A18, FN_MSIOF2_SS1, FN_SCIF4_TXD_E,
- FN_CAN1_TX_B, FN_AVB_AVTP_MATCH_B, FN_A19, FN_MSIOF2_SS2, FN_PWM4,
- FN_TPUTO2, FN_MOUT0, FN_A20, FN_SPCLK, FN_MOUT1,
+ FN_A7, FN_SCIFB0_RTS_N, FN_SCIFA4_TXD_B,
+ FN_A8, FN_MSIOF1_RXD, FN_SCIFA0_RXD_B,
+ FN_A9, FN_MSIOF1_TXD, FN_SCIFA0_TXD_B,
+ FN_A10, FN_MSIOF1_SCK, FN_IIC0_SCL_B,
+ FN_A11, FN_MSIOF1_SYNC, FN_IIC0_SDA_B,
+ FN_A12, FN_MSIOF1_SS1, FN_SCIFA5_RXD_B,
+ FN_A13, FN_MSIOF1_SS2, FN_SCIFA5_TXD_B,
+ FN_A14, FN_MSIOF2_RXD, FN_HSCIF0_HRX_B, FN_DREQ1_N,
+ FN_A15, FN_MSIOF2_TXD, FN_HSCIF0_HTX_B, FN_DACK1,
+ FN_A16, FN_MSIOF2_SCK, FN_HSCIF0_HSCK_B, FN_SPEEDIN, FN_CAN_CLK_C,
+ FN_TPUTO2_B,
+ FN_A17, FN_MSIOF2_SYNC, FN_SCIF4_RXD_E, FN_CAN1_RX_B,
+ FN_A18, FN_MSIOF2_SS1, FN_SCIF4_TXD_E, FN_CAN1_TX_B,
+ FN_A19, FN_MSIOF2_SS2, FN_PWM4, FN_TPUTO2,
+ FN_A20, FN_SPCLK,
/* IPSR3 */
- FN_A21, FN_MOSI_IO0, FN_MOUT2, FN_A22, FN_MISO_IO1, FN_MOUT5,
- FN_ATADIR1_N, FN_A23, FN_IO2, FN_MOUT6, FN_ATAWR1_N, FN_A24, FN_IO3,
- FN_EX_WAIT2, FN_A25, FN_SSL, FN_ATARD1_N, FN_CS0_N, FN_VI1_DATA8,
- FN_CS1_N_A26, FN_VI1_DATA9, FN_EX_CS0_N, FN_VI1_DATA10, FN_EX_CS1_N,
- FN_TPUTO3_B, FN_SCIFB2_RXD, FN_VI1_DATA11, FN_EX_CS2_N, FN_PWM0,
- FN_SCIF4_RXD_C, FN_TS_SDATA_B, FN_RIF0_SYNC, FN_TPUTO3, FN_SCIFB2_TXD,
- FN_SDATA_B, FN_EX_CS3_N, FN_SCIFA2_SCK, FN_SCIF4_TXD_C, FN_TS_SCK_B,
- FN_RIF0_CLK, FN_BPFCLK, FN_SCIFB2_SCK, FN_MDATA_B, FN_EX_CS4_N,
- FN_SCIFA2_RXD, FN_I2C2_SCL_E, FN_TS_SDEN_B, FN_RIF0_D0, FN_FMCLK,
- FN_SCIFB2_CTS_N, FN_SCKZ_B, FN_EX_CS5_N, FN_SCIFA2_TXD, FN_I2C2_SDA_E,
- FN_TS_SPSYNC_B, FN_RIF0_D1, FN_FMIN, FN_SCIFB2_RTS_N, FN_STM_N_B,
- FN_BS_N, FN_DRACK0, FN_PWM1_C, FN_TPUTO0_C, FN_ATACS01_N, FN_MTS_N_B,
- FN_RD_N, FN_ATACS11_N, FN_RD_WR_N, FN_ATAG1_N,
+ FN_A21, FN_MOSI_IO0,
+ FN_A22, FN_MISO_IO1, FN_ATADIR1_N,
+ FN_A23, FN_IO2, FN_ATAWR1_N,
+ FN_A24, FN_IO3, FN_EX_WAIT2,
+ FN_A25, FN_SSL, FN_ATARD1_N,
+ FN_CS0_N, FN_VI1_DATA8,
+ FN_CS1_N_A26, FN_VI1_DATA9,
+ FN_EX_CS0_N, FN_VI1_DATA10,
+ FN_EX_CS1_N, FN_TPUTO3_B, FN_SCIFB2_RXD, FN_VI1_DATA11,
+ FN_EX_CS2_N, FN_PWM0, FN_SCIF4_RXD_C, FN_TS_SDATA_B, FN_TPUTO3,
+ FN_SCIFB2_TXD,
+ FN_EX_CS3_N, FN_SCIFA2_SCK, FN_SCIF4_TXD_C, FN_TS_SCK_B, FN_BPFCLK,
+ FN_SCIFB2_SCK,
+ FN_EX_CS4_N, FN_SCIFA2_RXD, FN_I2C2_SCL_E, FN_TS_SDEN_B, FN_FMCLK,
+ FN_SCIFB2_CTS_N,
+ FN_EX_CS5_N, FN_SCIFA2_TXD, FN_I2C2_SDA_E, FN_TS_SPSYNC_B, FN_FMIN,
+ FN_SCIFB2_RTS_N,
+ FN_BS_N, FN_DRACK0, FN_PWM1_C, FN_TPUTO0_C, FN_ATACS01_N,
+ FN_RD_N, FN_ATACS11_N,
+ FN_RD_WR_N, FN_ATAG1_N,
/* IPSR4 */
- FN_EX_WAIT0, FN_CAN_CLK_B, FN_SCIF_CLK, FN_PWMFSW0, FN_DU0_DR0,
- FN_LCDOUT16, FN_SCIF5_RXD_C, FN_I2C2_SCL_D, FN_CC50_STATE0,
- FN_DU0_DR1, FN_LCDOUT17, FN_SCIF5_TXD_C, FN_I2C2_SDA_D, FN_CC50_STATE1,
- FN_DU0_DR2, FN_LCDOUT18, FN_CC50_STATE2, FN_DU0_DR3, FN_LCDOUT19,
- FN_CC50_STATE3, FN_DU0_DR4, FN_LCDOUT20, FN_CC50_STATE4, FN_DU0_DR5,
- FN_LCDOUT21, FN_CC50_STATE5, FN_DU0_DR6, FN_LCDOUT22, FN_CC50_STATE6,
- FN_DU0_DR7, FN_LCDOUT23, FN_CC50_STATE7, FN_DU0_DG0, FN_LCDOUT8,
- FN_SCIFA0_RXD_C, FN_I2C3_SCL_D, FN_CC50_STATE8, FN_DU0_DG1, FN_LCDOUT9,
- FN_SCIFA0_TXD_C, FN_I2C3_SDA_D, FN_CC50_STATE9, FN_DU0_DG2, FN_LCDOUT10,
- FN_CC50_STATE10, FN_DU0_DG3, FN_LCDOUT11, FN_CC50_STATE11, FN_DU0_DG4,
- FN_LCDOUT12, FN_CC50_STATE12,
+ FN_EX_WAIT0, FN_CAN_CLK_B, FN_SCIF_CLK,
+ FN_DU0_DR0, FN_LCDOUT16, FN_SCIF5_RXD_C, FN_I2C2_SCL_D,
+ FN_DU0_DR1, FN_LCDOUT17, FN_SCIF5_TXD_C, FN_I2C2_SDA_D,
+ FN_DU0_DR2, FN_LCDOUT18,
+ FN_DU0_DR3, FN_LCDOUT19,
+ FN_DU0_DR4, FN_LCDOUT20,
+ FN_DU0_DR5, FN_LCDOUT21,
+ FN_DU0_DR6, FN_LCDOUT22,
+ FN_DU0_DR7, FN_LCDOUT23,
+ FN_DU0_DG0, FN_LCDOUT8, FN_SCIFA0_RXD_C, FN_I2C3_SCL_D,
+ FN_DU0_DG1, FN_LCDOUT9, FN_SCIFA0_TXD_C, FN_I2C3_SDA_D,
+ FN_DU0_DG2, FN_LCDOUT10,
+ FN_DU0_DG3, FN_LCDOUT11,
+ FN_DU0_DG4, FN_LCDOUT12,
/* IPSR5 */
- FN_DU0_DG5, FN_LCDOUT13, FN_CC50_STATE13, FN_DU0_DG6, FN_LCDOUT14,
- FN_CC50_STATE14, FN_DU0_DG7, FN_LCDOUT15, FN_CC50_STATE15, FN_DU0_DB0,
- FN_LCDOUT0, FN_SCIFA4_RXD_C, FN_I2C4_SCL_D, FN_CAN0_RX_C,
- FN_CC50_STATE16, FN_DU0_DB1, FN_LCDOUT1, FN_SCIFA4_TXD_C, FN_I2C4_SDA_D,
- FN_CAN0_TX_C, FN_CC50_STATE17, FN_DU0_DB2, FN_LCDOUT2, FN_CC50_STATE18,
- FN_DU0_DB3, FN_LCDOUT3, FN_CC50_STATE19, FN_DU0_DB4, FN_LCDOUT4,
- FN_CC50_STATE20, FN_DU0_DB5, FN_LCDOUT5, FN_CC50_STATE21, FN_DU0_DB6,
- FN_LCDOUT6, FN_CC50_STATE22, FN_DU0_DB7, FN_LCDOUT7, FN_CC50_STATE23,
- FN_DU0_DOTCLKIN, FN_QSTVA_QVS, FN_CC50_STATE24, FN_DU0_DOTCLKOUT0,
- FN_QCLK, FN_CC50_STATE25, FN_DU0_DOTCLKOUT1, FN_QSTVB_QVE,
- FN_CC50_STATE26, FN_DU0_EXHSYNC_DU0_HSYNC, FN_QSTH_QHS, FN_CC50_STATE27,
+ FN_DU0_DG5, FN_LCDOUT13,
+ FN_DU0_DG6, FN_LCDOUT14,
+ FN_DU0_DG7, FN_LCDOUT15,
+ FN_DU0_DB0, FN_LCDOUT0, FN_SCIFA4_RXD_C, FN_I2C4_SCL_D, FN_CAN0_RX_C,
+ FN_DU0_DB1, FN_LCDOUT1, FN_SCIFA4_TXD_C, FN_I2C4_SDA_D, FN_CAN0_TX_C,
+ FN_DU0_DB2, FN_LCDOUT2,
+ FN_DU0_DB3, FN_LCDOUT3,
+ FN_DU0_DB4, FN_LCDOUT4,
+ FN_DU0_DB5, FN_LCDOUT5,
+ FN_DU0_DB6, FN_LCDOUT6,
+ FN_DU0_DB7, FN_LCDOUT7,
+ FN_DU0_DOTCLKIN, FN_QSTVA_QVS,
+ FN_DU0_DOTCLKOUT0, FN_QCLK,
+ FN_DU0_DOTCLKOUT1, FN_QSTVB_QVE,
+ FN_DU0_EXHSYNC_DU0_HSYNC, FN_QSTH_QHS,
/* IPSR6 */
- FN_DU0_EXVSYNC_DU0_VSYNC, FN_QSTB_QHE, FN_CC50_STATE28,
- FN_DU0_EXODDF_DU0_ODDF_DISP_CDE, FN_QCPV_QDE, FN_CC50_STATE29,
- FN_DU0_DISP, FN_QPOLA, FN_CC50_STATE30, FN_DU0_CDE, FN_QPOLB,
- FN_CC50_STATE31, FN_VI0_CLK, FN_AVB_RX_CLK, FN_VI0_DATA0_VI0_B0,
- FN_AVB_RX_DV, FN_VI0_DATA1_VI0_B1, FN_AVB_RXD0, FN_VI0_DATA2_VI0_B2,
- FN_AVB_RXD1, FN_VI0_DATA3_VI0_B3, FN_AVB_RXD2, FN_VI0_DATA4_VI0_B4,
- FN_AVB_RXD3, FN_VI0_DATA5_VI0_B5, FN_AVB_RXD4, FN_VI0_DATA6_VI0_B6,
- FN_AVB_RXD5, FN_VI0_DATA7_VI0_B7, FN_AVB_RXD6, FN_VI0_CLKENB,
- FN_I2C3_SCL, FN_SCIFA5_RXD_C, FN_IETX_C, FN_AVB_RXD7, FN_VI0_FIELD,
- FN_I2C3_SDA, FN_SCIFA5_TXD_C, FN_IECLK_C, FN_AVB_RX_ER, FN_VI0_HSYNC_N,
- FN_SCIF0_RXD_B, FN_I2C0_SCL_C, FN_IERX_C, FN_AVB_COL, FN_VI0_VSYNC_N,
- FN_SCIF0_TXD_B, FN_I2C0_SDA_C, FN_AUDIO_CLKOUT_B, FN_AVB_TX_EN,
- FN_ETH_MDIO, FN_VI0_G0, FN_MSIOF2_RXD_B, FN_IIC0_SCL_D, FN_AVB_TX_CLK,
- FN_ADIDATA, FN_AD_DI,
+ FN_DU0_EXVSYNC_DU0_VSYNC, FN_QSTB_QHE,
+ FN_DU0_EXODDF_DU0_ODDF_DISP_CDE, FN_QCPV_QDE,
+ FN_DU0_DISP, FN_QPOLA,
+ FN_DU0_CDE, FN_QPOLB,
+ FN_VI0_CLK, FN_AVB_RX_CLK,
+ FN_VI0_DATA0_VI0_B0, FN_AVB_RX_DV,
+ FN_VI0_DATA1_VI0_B1, FN_AVB_RXD0,
+ FN_VI0_DATA2_VI0_B2, FN_AVB_RXD1,
+ FN_VI0_DATA3_VI0_B3, FN_AVB_RXD2,
+ FN_VI0_DATA4_VI0_B4, FN_AVB_RXD3,
+ FN_VI0_DATA5_VI0_B5, FN_AVB_RXD4,
+ FN_VI0_DATA6_VI0_B6, FN_AVB_RXD5,
+ FN_VI0_DATA7_VI0_B7, FN_AVB_RXD6,
+ FN_VI0_CLKENB, FN_I2C3_SCL, FN_SCIFA5_RXD_C, FN_IETX_C, FN_AVB_RXD7,
+ FN_VI0_FIELD, FN_I2C3_SDA, FN_SCIFA5_TXD_C, FN_IECLK_C, FN_AVB_RX_ER,
+ FN_VI0_HSYNC_N, FN_SCIF0_RXD_B, FN_I2C0_SCL_C, FN_IERX_C, FN_AVB_COL,
+ FN_VI0_VSYNC_N, FN_SCIF0_TXD_B, FN_I2C0_SDA_C, FN_AUDIO_CLKOUT_B,
+ FN_AVB_TX_EN,
+ FN_ETH_MDIO, FN_VI0_G0, FN_MSIOF2_RXD_B, FN_I2C5_SCL_D, FN_AVB_TX_CLK,
+ FN_ADIDATA,
/* IPSR7 */
- FN_ETH_CRS_DV, FN_VI0_G1, FN_MSIOF2_TXD_B, FN_IIC0_SDA_D, FN_AVB_TXD0,
- FN_ADICS_SAMP, FN_AD_DO, FN_ETH_RX_ER, FN_VI0_G2, FN_MSIOF2_SCK_B,
- FN_CAN0_RX_B, FN_AVB_TXD1, FN_ADICLK, FN_AD_CLK, FN_ETH_RXD0, FN_VI0_G3,
- FN_MSIOF2_SYNC_B, FN_CAN0_TX_B, FN_AVB_TXD2, FN_ADICHS0, FN_AD_NCS_N,
+ FN_ETH_CRS_DV, FN_VI0_G1, FN_MSIOF2_TXD_B, FN_I2C5_SDA_D, FN_AVB_TXD0,
+ FN_ADICS_SAMP,
+ FN_ETH_RX_ER, FN_VI0_G2, FN_MSIOF2_SCK_B, FN_CAN0_RX_B, FN_AVB_TXD1,
+ FN_ADICLK,
+ FN_ETH_RXD0, FN_VI0_G3, FN_MSIOF2_SYNC_B, FN_CAN0_TX_B, FN_AVB_TXD2,
+ FN_ADICHS0,
FN_ETH_RXD1, FN_VI0_G4, FN_MSIOF2_SS1_B, FN_SCIF4_RXD_D, FN_AVB_TXD3,
- FN_ADICHS1, FN_ETH_LINK, FN_VI0_G5, FN_MSIOF2_SS2_B, FN_SCIF4_TXD_D,
- FN_AVB_TXD4, FN_ADICHS2, FN_ETH_REFCLK, FN_VI0_G6, FN_SCIF2_SCK_C,
- FN_AVB_TXD5, FN_SSI_SCK5_B, FN_ETH_TXD1, FN_VI0_G7, FN_SCIF2_RXD_C,
- FN_IIC1_SCL_D, FN_AVB_TXD6, FN_SSI_WS5_B, FN_ETH_TX_EN, FN_VI0_R0,
- FN_SCIF2_TXD_C, FN_IIC1_SDA_D, FN_AVB_TXD7, FN_SSI_SDATA5_B,
+ FN_ADICHS1,
+ FN_ETH_LINK, FN_VI0_G5, FN_MSIOF2_SS2_B, FN_SCIF4_TXD_D, FN_AVB_TXD4,
+ FN_ADICHS2,
+ FN_ETH_REFCLK, FN_VI0_G6, FN_SCIF2_SCK_C, FN_AVB_TXD5, FN_SSI_SCK5_B,
+ FN_ETH_TXD1, FN_VI0_G7, FN_SCIF2_RXD_C, FN_IIC0_SCL_D, FN_AVB_TXD6,
+ FN_SSI_WS5_B,
+ FN_ETH_TX_EN, FN_VI0_R0, FN_SCIF2_TXD_C, FN_IIC0_SDA_D, FN_AVB_TXD7,
+ FN_SSI_SDATA5_B,
FN_ETH_MAGIC, FN_VI0_R1, FN_SCIF3_SCK_B, FN_AVB_TX_ER, FN_SSI_SCK6_B,
FN_ETH_TXD0, FN_VI0_R2, FN_SCIF3_RXD_B, FN_I2C4_SCL_E, FN_AVB_GTX_CLK,
- FN_SSI_WS6_B, FN_DREQ0_N, FN_SCIFB1_RXD,
+ FN_SSI_WS6_B,
+ FN_DREQ0_N, FN_SCIFB1_RXD,
/* IPSR8 */
FN_ETH_MDC, FN_VI0_R3, FN_SCIF3_TXD_B, FN_I2C4_SDA_E, FN_AVB_MDC,
- FN_SSI_SDATA6_B, FN_HSCIF0_HRX, FN_VI0_R4, FN_I2C1_SCL_C,
- FN_AUDIO_CLKA_B, FN_AVB_MDIO, FN_SSI_SCK78_B, FN_HSCIF0_HTX,
- FN_VI0_R5, FN_I2C1_SDA_C, FN_AUDIO_CLKB_B, FN_AVB_LINK, FN_SSI_WS78_B,
+ FN_SSI_SDATA6_B,
+ FN_HSCIF0_HRX, FN_VI0_R4, FN_I2C1_SCL_C, FN_AUDIO_CLKA_B, FN_AVB_MDIO,
+ FN_SSI_SCK78_B,
+ FN_HSCIF0_HTX, FN_VI0_R5, FN_I2C1_SDA_C, FN_AUDIO_CLKB_B, FN_AVB_LINK,
+ FN_SSI_WS78_B,
FN_HSCIF0_HCTS_N, FN_VI0_R6, FN_SCIF0_RXD_D, FN_I2C0_SCL_E,
- FN_AVB_MAGIC, FN_SSI_SDATA7_B, FN_HSCIF0_HRTS_N, FN_VI0_R7,
- FN_SCIF0_TXD_D, FN_I2C0_SDA_E, FN_AVB_PHY_INT, FN_SSI_SDATA8_B,
+ FN_AVB_MAGIC, FN_SSI_SDATA7_B,
+ FN_HSCIF0_HRTS_N, FN_VI0_R7, FN_SCIF0_TXD_D, FN_I2C0_SDA_E,
+ FN_AVB_PHY_INT, FN_SSI_SDATA8_B,
FN_HSCIF0_HSCK, FN_SCIF_CLK_B, FN_AVB_CRS, FN_AUDIO_CLKC_B,
FN_I2C0_SCL, FN_SCIF0_RXD_C, FN_PWM5, FN_TCLK1_B, FN_AVB_GTXREFCLK,
- FN_CAN1_RX_D, FN_TPUTO0_B, FN_I2C0_SDA, FN_SCIF0_TXD_C, FN_TPUTO0,
- FN_CAN_CLK, FN_DVC_MUTE, FN_CAN1_TX_D, FN_I2C1_SCL, FN_SCIF4_RXD,
- FN_PWM5_B, FN_DU1_DR0, FN_RIF1_SYNC_B, FN_TS_SDATA_D, FN_TPUTO1_B,
- FN_I2C1_SDA, FN_SCIF4_TXD, FN_IRQ5, FN_DU1_DR1, FN_RIF1_CLK_B,
- FN_TS_SCK_D, FN_BPFCLK_C, FN_MSIOF0_RXD, FN_SCIF5_RXD, FN_I2C2_SCL_C,
- FN_DU1_DR2, FN_RIF1_D0_B, FN_TS_SDEN_D, FN_FMCLK_C, FN_RDS_CLK,
+ FN_CAN1_RX_D, FN_TPUTO0_B,
+ FN_I2C0_SDA, FN_SCIF0_TXD_C, FN_TPUTO0, FN_CAN_CLK, FN_DVC_MUTE,
+ FN_CAN1_TX_D,
+ FN_I2C1_SCL, FN_SCIF4_RXD, FN_PWM5_B, FN_DU1_DR0, FN_TS_SDATA_D,
+ FN_TPUTO1_B,
+ FN_I2C1_SDA, FN_SCIF4_TXD, FN_IRQ5, FN_DU1_DR1, FN_TS_SCK_D,
+ FN_BPFCLK_C,
+ FN_MSIOF0_RXD, FN_SCIF5_RXD, FN_I2C2_SCL_C, FN_DU1_DR2, FN_TS_SDEN_D,
+ FN_FMCLK_C,
/* IPSR9 */
- FN_MSIOF0_TXD, FN_SCIF5_TXD, FN_I2C2_SDA_C, FN_DU1_DR3, FN_RIF1_D1_B,
- FN_TS_SPSYNC_D, FN_FMIN_C, FN_RDS_DATA, FN_MSIOF0_SCK, FN_IRQ0,
- FN_TS_SDATA, FN_DU1_DR4, FN_RIF1_SYNC, FN_TPUTO1_C, FN_MSIOF0_SYNC,
- FN_PWM1, FN_TS_SCK, FN_DU1_DR5, FN_RIF1_CLK, FN_BPFCLK_B, FN_MSIOF0_SS1,
- FN_SCIFA0_RXD, FN_TS_SDEN, FN_DU1_DR6, FN_RIF1_D0, FN_FMCLK_B,
- FN_RDS_CLK_B, FN_MSIOF0_SS2, FN_SCIFA0_TXD, FN_TS_SPSYNC, FN_DU1_DR7,
- FN_RIF1_D1, FN_FMIN_B, FN_RDS_DATA_B, FN_HSCIF1_HRX, FN_I2C4_SCL,
- FN_PWM6, FN_DU1_DG0, FN_HSCIF1_HTX, FN_I2C4_SDA, FN_TPUTO1, FN_DU1_DG1,
+ FN_MSIOF0_TXD, FN_SCIF5_TXD, FN_I2C2_SDA_C, FN_DU1_DR3, FN_TS_SPSYNC_D,
+ FN_FMIN_C,
+ FN_MSIOF0_SCK, FN_IRQ0, FN_TS_SDATA, FN_DU1_DR4, FN_TPUTO1_C,
+ FN_MSIOF0_SYNC, FN_PWM1, FN_TS_SCK, FN_DU1_DR5, FN_BPFCLK_B,
+ FN_MSIOF0_SS1, FN_SCIFA0_RXD, FN_TS_SDEN, FN_DU1_DR6, FN_FMCLK_B,
+ FN_MSIOF0_SS2, FN_SCIFA0_TXD, FN_TS_SPSYNC, FN_DU1_DR7, FN_FMIN_B,
+ FN_HSCIF1_HRX, FN_I2C4_SCL, FN_PWM6, FN_DU1_DG0,
+ FN_HSCIF1_HTX, FN_I2C4_SDA, FN_TPUTO1, FN_DU1_DG1,
FN_HSCIF1_HSCK, FN_PWM2, FN_IETX, FN_DU1_DG2, FN_REMOCON_B,
- FN_SPEEDIN_B, FN_VSP_B, FN_HSCIF1_HCTS_N, FN_SCIFA4_RXD, FN_IECLK,
- FN_DU1_DG3, FN_SSI_SCK1_B, FN_CAN_DEBUG_HW_TRIGGER, FN_CC50_STATE32,
+ FN_SPEEDIN_B,
+ FN_HSCIF1_HCTS_N, FN_SCIFA4_RXD, FN_IECLK, FN_DU1_DG3, FN_SSI_SCK1_B,
FN_HSCIF1_HRTS_N, FN_SCIFA4_TXD, FN_IERX, FN_DU1_DG4, FN_SSI_WS1_B,
- FN_CAN_STEP0, FN_CC50_STATE33, FN_SCIF1_SCK, FN_PWM3, FN_TCLK2,
- FN_DU1_DG5, FN_SSI_SDATA1_B, FN_CAN_TXCLK, FN_CC50_STATE34,
+ FN_SCIF1_SCK, FN_PWM3, FN_TCLK2, FN_DU1_DG5, FN_SSI_SDATA1_B,
/* IPSR10 */
- FN_SCIF1_RXD, FN_IIC0_SCL, FN_DU1_DG6, FN_SSI_SCK2_B, FN_CAN_DEBUGOUT0,
- FN_CC50_STATE35, FN_SCIF1_TXD, FN_IIC0_SDA, FN_DU1_DG7, FN_SSI_WS2_B,
- FN_CAN_DEBUGOUT1, FN_CC50_STATE36, FN_SCIF2_RXD, FN_IIC1_SCL,
- FN_DU1_DB0, FN_SSI_SDATA2_B, FN_USB0_EXTLP, FN_CAN_DEBUGOUT2,
- FN_CC50_STATE37, FN_SCIF2_TXD, FN_IIC1_SDA, FN_DU1_DB1, FN_SSI_SCK9_B,
- FN_USB0_OVC1, FN_CAN_DEBUGOUT3, FN_CC50_STATE38, FN_SCIF2_SCK, FN_IRQ1,
- FN_DU1_DB2, FN_SSI_WS9_B, FN_USB0_IDIN, FN_CAN_DEBUGOUT4,
- FN_CC50_STATE39, FN_SCIF3_SCK, FN_IRQ2, FN_BPFCLK_D, FN_DU1_DB3,
- FN_SSI_SDATA9_B, FN_TANS2, FN_CAN_DEBUGOUT5, FN_CC50_OSCOUT,
+ FN_SCIF1_RXD, FN_I2C5_SCL, FN_DU1_DG6, FN_SSI_SCK2_B,
+ FN_SCIF1_TXD, FN_I2C5_SDA, FN_DU1_DG7, FN_SSI_WS2_B,
+ FN_SCIF2_RXD, FN_IIC0_SCL, FN_DU1_DB0, FN_SSI_SDATA2_B,
+ FN_SCIF2_TXD, FN_IIC0_SDA, FN_DU1_DB1, FN_SSI_SCK9_B,
+ FN_SCIF2_SCK, FN_IRQ1, FN_DU1_DB2, FN_SSI_WS9_B,
+ FN_SCIF3_SCK, FN_IRQ2, FN_BPFCLK_D, FN_DU1_DB3, FN_SSI_SDATA9_B,
FN_SCIF3_RXD, FN_I2C1_SCL_E, FN_FMCLK_D, FN_DU1_DB4, FN_AUDIO_CLKA_C,
- FN_SSI_SCK4_B, FN_CAN_DEBUGOUT6, FN_RDS_CLK_C, FN_SCIF3_TXD,
- FN_I2C1_SDA_E, FN_FMIN_D, FN_DU1_DB5, FN_AUDIO_CLKB_C, FN_SSI_WS4_B,
- FN_CAN_DEBUGOUT7, FN_RDS_DATA_C, FN_I2C2_SCL, FN_SCIFA5_RXD, FN_DU1_DB6,
- FN_AUDIO_CLKC_C, FN_SSI_SDATA4_B, FN_CAN_DEBUGOUT8, FN_I2C2_SDA,
- FN_SCIFA5_TXD, FN_DU1_DB7, FN_AUDIO_CLKOUT_C, FN_CAN_DEBUGOUT9,
- FN_SSI_SCK5, FN_SCIFA3_SCK, FN_DU1_DOTCLKIN, FN_CAN_DEBUGOUT10,
+ FN_SSI_SCK4_B,
+ FN_SCIF3_TXD, FN_I2C1_SDA_E, FN_FMIN_D, FN_DU1_DB5, FN_AUDIO_CLKB_C,
+ FN_SSI_WS4_B,
+ FN_I2C2_SCL, FN_SCIFA5_RXD, FN_DU1_DB6, FN_AUDIO_CLKC_C,
+ FN_SSI_SDATA4_B,
+ FN_I2C2_SDA, FN_SCIFA5_TXD, FN_DU1_DB7, FN_AUDIO_CLKOUT_C,
+ FN_SSI_SCK5, FN_SCIFA3_SCK, FN_DU1_DOTCLKIN,
/* IPSR11 */
FN_SSI_WS5, FN_SCIFA3_RXD, FN_I2C3_SCL_C, FN_DU1_DOTCLKOUT0,
- FN_CAN_DEBUGOUT11, FN_SSI_SDATA5, FN_SCIFA3_TXD, FN_I2C3_SDA_C,
- FN_DU1_DOTCLKOUT1, FN_CAN_DEBUGOUT12, FN_SSI_SCK6, FN_SCIFA1_SCK_B,
- FN_DU1_EXHSYNC_DU1_HSYNC, FN_CAN_DEBUGOUT13, FN_SSI_WS6,
- FN_SCIFA1_RXD_B, FN_I2C4_SCL_C, FN_DU1_EXVSYNC_DU1_VSYNC,
- FN_CAN_DEBUGOUT14, FN_SSI_SDATA6, FN_SCIFA1_TXD_B, FN_I2C4_SDA_C,
- FN_DU1_EXODDF_DU1_ODDF_DISP_CDE, FN_CAN_DEBUGOUT15, FN_SSI_SCK78,
- FN_SCIFA2_SCK_B, FN_IIC0_SDA_C, FN_DU1_DISP, FN_SSI_WS78,
- FN_SCIFA2_RXD_B, FN_IIC0_SCL_C, FN_DU1_CDE, FN_SSI_SDATA7,
- FN_SCIFA2_TXD_B, FN_IRQ8, FN_AUDIO_CLKA_D, FN_CAN_CLK_D, FN_PCMOE_N,
+ FN_SSI_SDATA5, FN_SCIFA3_TXD, FN_I2C3_SDA_C, FN_DU1_DOTCLKOUT1,
+ FN_SSI_SCK6, FN_SCIFA1_SCK_B, FN_DU1_EXHSYNC_DU1_HSYNC,
+ FN_SSI_WS6, FN_SCIFA1_RXD_B, FN_I2C4_SCL_C, FN_DU1_EXVSYNC_DU1_VSYNC,
+ FN_SSI_SDATA6, FN_SCIFA1_TXD_B, FN_I2C4_SDA_C,
+ FN_DU1_EXODDF_DU1_ODDF_DISP_CDE,
+ FN_SSI_SCK78, FN_SCIFA2_SCK_B, FN_I2C5_SDA_C, FN_DU1_DISP,
+ FN_SSI_WS78, FN_SCIFA2_RXD_B, FN_I2C5_SCL_C, FN_DU1_CDE,
+ FN_SSI_SDATA7, FN_SCIFA2_TXD_B, FN_IRQ8, FN_AUDIO_CLKA_D, FN_CAN_CLK_D,
FN_SSI_SCK0129, FN_MSIOF1_RXD_B, FN_SCIF5_RXD_D, FN_ADIDATA_B,
- FN_AD_DI_B, FN_PCMWE_N, FN_SSI_WS0129, FN_MSIOF1_TXD_B, FN_SCIF5_TXD_D,
- FN_ADICS_SAMP_B, FN_AD_DO_B, FN_SSI_SDATA0, FN_MSIOF1_SCK_B, FN_PWM0_B,
- FN_ADICLK_B, FN_AD_CLK_B,
+ FN_SSI_WS0129, FN_MSIOF1_TXD_B, FN_SCIF5_TXD_D, FN_ADICS_SAMP_B,
+ FN_SSI_SDATA0, FN_MSIOF1_SCK_B, FN_PWM0_B, FN_ADICLK_B,
/* IPSR12 */
FN_SSI_SCK34, FN_MSIOF1_SYNC_B, FN_SCIFA1_SCK_C, FN_ADICHS0_B,
- FN_AD_NCS_N_B, FN_DREQ1_N_B, FN_SSI_WS34, FN_MSIOF1_SS1_B,
- FN_SCIFA1_RXD_C, FN_ADICHS1_B, FN_CAN1_RX_C, FN_DACK1_B, FN_SSI_SDATA3,
- FN_MSIOF1_SS2_B, FN_SCIFA1_TXD_C, FN_ADICHS2_B, FN_CAN1_TX_C,
- FN_DREQ2_N, FN_SSI_SCK4, FN_MLB_CLK, FN_IETX_B, FN_IRD_TX, FN_SSI_WS4,
- FN_MLB_SIG, FN_IECLK_B, FN_IRD_RX, FN_SSI_SDATA4, FN_MLB_DAT,
- FN_IERX_B, FN_IRD_SCK, FN_SSI_SDATA8, FN_SCIF1_SCK_B,
- FN_PWM1_B, FN_IRQ9, FN_REMOCON, FN_DACK2, FN_ETH_MDIO_B, FN_SSI_SCK1,
- FN_SCIF1_RXD_B, FN_IIC1_SCL_C, FN_VI1_CLK, FN_CAN0_RX_D,
- FN_AVB_AVTP_CAPTURE, FN_ETH_CRS_DV_B, FN_SSI_WS1, FN_SCIF1_TXD_B,
- FN_IIC1_SDA_C, FN_VI1_DATA0, FN_CAN0_TX_D, FN_AVB_AVTP_MATCH,
- FN_ETH_RX_ER_B, FN_SSI_SDATA1, FN_HSCIF1_HRX_B, FN_SDATA, FN_VI1_DATA1,
- FN_ATAWR0_N, FN_ETH_RXD0_B, FN_SSI_SCK2, FN_HSCIF1_HTX_B, FN_VI1_DATA2,
- FN_MDATA, FN_ATAG0_N, FN_ETH_RXD1_B,
+ FN_DREQ1_N_B,
+ FN_SSI_WS34, FN_MSIOF1_SS1_B, FN_SCIFA1_RXD_C, FN_ADICHS1_B,
+ FN_CAN1_RX_C, FN_DACK1_B,
+ FN_SSI_SDATA3, FN_MSIOF1_SS2_B, FN_SCIFA1_TXD_C, FN_ADICHS2_B,
+ FN_CAN1_TX_C, FN_DREQ2_N,
+ FN_SSI_SCK4, FN_MLB_CLK, FN_IETX_B, FN_SSI_WS4, FN_MLB_SIG, FN_IECLK_B,
+ FN_SSI_SDATA4, FN_MLB_DAT, FN_IERX_B,
+ FN_SSI_SDATA8, FN_SCIF1_SCK_B, FN_PWM1_B, FN_IRQ9, FN_REMOCON,
+ FN_DACK2, FN_ETH_MDIO_B,
+ FN_SSI_SCK1, FN_SCIF1_RXD_B, FN_IIC0_SCL_C, FN_VI1_CLK, FN_CAN0_RX_D,
+ FN_ETH_CRS_DV_B,
+ FN_SSI_WS1, FN_SCIF1_TXD_B, FN_IIC0_SDA_C, FN_VI1_DATA0, FN_CAN0_TX_D,
+ FN_ETH_RX_ER_B,
+ FN_SSI_SDATA1, FN_HSCIF1_HRX_B, FN_VI1_DATA1, FN_ATAWR0_N,
+ FN_ETH_RXD0_B,
+ FN_SSI_SCK2, FN_HSCIF1_HTX_B, FN_VI1_DATA2, FN_ATAG0_N, FN_ETH_RXD1_B,
/* IPSR13 */
- FN_SSI_WS2, FN_HSCIF1_HCTS_N_B, FN_SCIFA0_RXD_D, FN_VI1_DATA3, FN_SCKZ,
- FN_ATACS00_N, FN_ETH_LINK_B, FN_SSI_SDATA2, FN_HSCIF1_HRTS_N_B,
- FN_SCIFA0_TXD_D, FN_VI1_DATA4, FN_STM_N, FN_ATACS10_N, FN_ETH_REFCLK_B,
- FN_SSI_SCK9, FN_SCIF2_SCK_B, FN_PWM2_B, FN_VI1_DATA5, FN_MTS_N,
- FN_EX_WAIT1, FN_ETH_TXD1_B, FN_SSI_WS9, FN_SCIF2_RXD_B, FN_I2C3_SCL_E,
- FN_VI1_DATA6, FN_ATARD0_N, FN_ETH_TX_EN_B, FN_SSI_SDATA9,
- FN_SCIF2_TXD_B, FN_I2C3_SDA_E, FN_VI1_DATA7, FN_ATADIR0_N,
- FN_ETH_MAGIC_B, FN_AUDIO_CLKA, FN_I2C0_SCL_B, FN_SCIFA4_RXD_D,
- FN_VI1_CLKENB, FN_TS_SDATA_C, FN_RIF0_SYNC_B, FN_ETH_TXD0_B,
+ FN_SSI_WS2, FN_HSCIF1_HCTS_N_B, FN_SCIFA0_RXD_D, FN_VI1_DATA3,
+ FN_ATACS00_N, FN_ETH_LINK_B,
+ FN_SSI_SDATA2, FN_HSCIF1_HRTS_N_B, FN_SCIFA0_TXD_D, FN_VI1_DATA4,
+ FN_ATACS10_N, FN_ETH_REFCLK_B,
+ FN_SSI_SCK9, FN_SCIF2_SCK_B, FN_PWM2_B, FN_VI1_DATA5, FN_EX_WAIT1,
+ FN_ETH_TXD1_B,
+ FN_SSI_WS9, FN_SCIF2_RXD_B, FN_I2C3_SCL_E, FN_VI1_DATA6, FN_ATARD0_N,
+ FN_ETH_TX_EN_B,
+ FN_SSI_SDATA9, FN_SCIF2_TXD_B, FN_I2C3_SDA_E, FN_VI1_DATA7,
+ FN_ATADIR0_N, FN_ETH_MAGIC_B,
+ FN_AUDIO_CLKA, FN_I2C0_SCL_B, FN_SCIFA4_RXD_D, FN_VI1_CLKENB,
+ FN_TS_SDATA_C, FN_ETH_TXD0_B,
FN_AUDIO_CLKB, FN_I2C0_SDA_B, FN_SCIFA4_TXD_D, FN_VI1_FIELD,
- FN_TS_SCK_C, FN_RIF0_CLK_B, FN_BPFCLK_E, FN_ETH_MDC_B, FN_AUDIO_CLKC,
- FN_I2C4_SCL_B, FN_SCIFA5_RXD_D, FN_VI1_HSYNC_N, FN_TS_SDEN_C,
- FN_RIF0_D0_B, FN_FMCLK_E, FN_RDS_CLK_D, FN_AUDIO_CLKOUT, FN_I2C4_SDA_B,
- FN_SCIFA5_TXD_D, FN_VI1_VSYNC_N, FN_TS_SPSYNC_C, FN_RIF0_D1_B,
- FN_FMIN_E, FN_RDS_DATA_D,
+ FN_TS_SCK_C, FN_BPFCLK_E, FN_ETH_MDC_B,
+ FN_AUDIO_CLKC, FN_I2C4_SCL_B, FN_SCIFA5_RXD_D, FN_VI1_HSYNC_N,
+ FN_TS_SDEN_C, FN_FMCLK_E,
+ FN_AUDIO_CLKOUT, FN_I2C4_SDA_B, FN_SCIFA5_TXD_D, FN_VI1_VSYNC_N,
+ FN_TS_SPSYNC_C, FN_FMIN_E,
/* MOD_SEL */
FN_SEL_ADG_0, FN_SEL_ADG_1, FN_SEL_ADG_2, FN_SEL_ADG_3,
- FN_SEL_ADI_0, FN_SEL_ADI_1, FN_SEL_CAN_0, FN_SEL_CAN_1,
- FN_SEL_CAN_2, FN_SEL_CAN_3, FN_SEL_DARC_0, FN_SEL_DARC_1,
- FN_SEL_DARC_2, FN_SEL_DARC_3, FN_SEL_DARC_4, FN_SEL_DR0_0,
- FN_SEL_DR0_1, FN_SEL_DR1_0, FN_SEL_DR1_1, FN_SEL_DR2_0, FN_SEL_DR2_1,
- FN_SEL_DR3_0, FN_SEL_DR3_1, FN_SEL_ETH_0, FN_SEL_ETH_1, FN_SEL_FSN_0,
- FN_SEL_FSN_1, FN_SEL_I2C00_0, FN_SEL_I2C00_1, FN_SEL_I2C00_2,
- FN_SEL_I2C00_3, FN_SEL_I2C00_4, FN_SEL_I2C01_0, FN_SEL_I2C01_1,
- FN_SEL_I2C01_2, FN_SEL_I2C01_3, FN_SEL_I2C01_4, FN_SEL_I2C02_0,
- FN_SEL_I2C02_1, FN_SEL_I2C02_2, FN_SEL_I2C02_3, FN_SEL_I2C02_4,
+ FN_SEL_CAN_0, FN_SEL_CAN_1, FN_SEL_CAN_2, FN_SEL_CAN_3,
+ FN_SEL_DARC_0, FN_SEL_DARC_1, FN_SEL_DARC_2, FN_SEL_DARC_3,
+ FN_SEL_DARC_4,
+ FN_SEL_ETH_0, FN_SEL_ETH_1,
+ FN_SEL_I2C00_0, FN_SEL_I2C00_1, FN_SEL_I2C00_2, FN_SEL_I2C00_3,
+ FN_SEL_I2C00_4,
+ FN_SEL_I2C01_0, FN_SEL_I2C01_1, FN_SEL_I2C01_2, FN_SEL_I2C01_3,
+ FN_SEL_I2C01_4,
+ FN_SEL_I2C02_0, FN_SEL_I2C02_1, FN_SEL_I2C02_2, FN_SEL_I2C02_3,
+ FN_SEL_I2C02_4,
FN_SEL_I2C03_0, FN_SEL_I2C03_1, FN_SEL_I2C03_2, FN_SEL_I2C03_3,
- FN_SEL_I2C03_4, FN_SEL_I2C04_0, FN_SEL_I2C04_1, FN_SEL_I2C04_2,
- FN_SEL_I2C04_3, FN_SEL_I2C04_4, FN_SEL_IIC00_0, FN_SEL_IIC00_1,
- FN_SEL_IIC00_2, FN_SEL_IIC00_3, FN_SEL_AVB_0, FN_SEL_AVB_1,
+ FN_SEL_I2C03_4,
+ FN_SEL_I2C04_0, FN_SEL_I2C04_1, FN_SEL_I2C04_2, FN_SEL_I2C04_3,
+ FN_SEL_I2C04_4,
+ FN_SEL_I2C05_0, FN_SEL_I2C05_1, FN_SEL_I2C05_2, FN_SEL_I2C05_3,
/* MOD_SEL2 */
- FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2, FN_SEL_IIC01_0,
- FN_SEL_IIC01_1, FN_SEL_IIC01_2, FN_SEL_IIC01_3, FN_SEL_LBS_0,
- FN_SEL_LBS_1, FN_SEL_MSI1_0, FN_SEL_MSI1_1, FN_SEL_MSI2_0,
- FN_SEL_MSI2_1, FN_SEL_RAD_0, FN_SEL_RAD_1, FN_SEL_RCN_0,
- FN_SEL_RCN_1, FN_SEL_RSP_0, FN_SEL_RSP_1, FN_SEL_SCIFA0_0,
- FN_SEL_SCIFA0_1, FN_SEL_SCIFA0_2, FN_SEL_SCIFA0_3, FN_SEL_SCIFA1_0,
- FN_SEL_SCIFA1_1, FN_SEL_SCIFA1_2, FN_SEL_SCIFA2_0, FN_SEL_SCIFA2_1,
- FN_SEL_SCIFA3_0, FN_SEL_SCIFA3_1, FN_SEL_SCIFA4_0, FN_SEL_SCIFA4_1,
- FN_SEL_SCIFA4_2, FN_SEL_SCIFA4_3, FN_SEL_SCIFA5_0, FN_SEL_SCIFA5_1,
- FN_SEL_SCIFA5_2, FN_SEL_SCIFA5_3, FN_SEL_SPDM_0, FN_SEL_SPDM_1,
- FN_SEL_TMU_0, FN_SEL_TMU_1, FN_SEL_TSIF0_0, FN_SEL_TSIF0_1,
- FN_SEL_TSIF0_2, FN_SEL_TSIF0_3, FN_SEL_CAN0_0, FN_SEL_CAN0_1,
- FN_SEL_CAN0_2, FN_SEL_CAN0_3, FN_SEL_CAN1_0, FN_SEL_CAN1_1,
- FN_SEL_CAN1_2, FN_SEL_CAN1_3, FN_SEL_HSCIF0_0, FN_SEL_HSCIF0_1,
- FN_SEL_HSCIF1_0, FN_SEL_HSCIF1_1, FN_SEL_RDS_0, FN_SEL_RDS_1,
- FN_SEL_RDS_2, FN_SEL_RDS_3,
+ FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2,
+ FN_SEL_IIC0_0, FN_SEL_IIC0_1, FN_SEL_IIC0_2, FN_SEL_IIC0_3,
+ FN_SEL_LBS_0, FN_SEL_LBS_1, FN_SEL_MSI1_0, FN_SEL_MSI1_1,
+ FN_SEL_MSI2_0, FN_SEL_MSI2_1, FN_SEL_RAD_0, FN_SEL_RAD_1,
+ FN_SEL_RCN_0, FN_SEL_RCN_1, FN_SEL_RSP_0, FN_SEL_RSP_1,
+ FN_SEL_SCIFA0_0, FN_SEL_SCIFA0_1, FN_SEL_SCIFA0_2, FN_SEL_SCIFA0_3,
+ FN_SEL_SCIFA1_0, FN_SEL_SCIFA1_1, FN_SEL_SCIFA1_2,
+ FN_SEL_SCIFA2_0, FN_SEL_SCIFA2_1, FN_SEL_SCIFA3_0, FN_SEL_SCIFA3_1,
+ FN_SEL_SCIFA4_0, FN_SEL_SCIFA4_1, FN_SEL_SCIFA4_2, FN_SEL_SCIFA4_3,
+ FN_SEL_SCIFA5_0, FN_SEL_SCIFA5_1, FN_SEL_SCIFA5_2, FN_SEL_SCIFA5_3,
+ FN_SEL_TMU_0, FN_SEL_TMU_1,
+ FN_SEL_TSIF0_0, FN_SEL_TSIF0_1, FN_SEL_TSIF0_2, FN_SEL_TSIF0_3,
+ FN_SEL_CAN0_0, FN_SEL_CAN0_1, FN_SEL_CAN0_2, FN_SEL_CAN0_3,
+ FN_SEL_CAN1_0, FN_SEL_CAN1_1, FN_SEL_CAN1_2, FN_SEL_CAN1_3,
+ FN_SEL_HSCIF0_0, FN_SEL_HSCIF0_1, FN_SEL_HSCIF1_0, FN_SEL_HSCIF1_1,
/* MOD_SEL3 */
FN_SEL_SCIF0_0, FN_SEL_SCIF0_1, FN_SEL_SCIF0_2, FN_SEL_SCIF0_3,
@@ -372,117 +416,141 @@ enum {
SCIF4_RXD_B_MARK, I2C0_SCL_D_MARK,
/* IPSR1 */
- D6_MARK, SCIF4_TXD_B_MARK, I2C0_SDA_D_MARK, D7_MARK, IRQ3_MARK,
- TCLK1_MARK, PWM6_B_MARK, D8_MARK, HSCIF2_HRX_MARK, I2C1_SCL_B_MARK,
- D9_MARK, HSCIF2_HTX_MARK, I2C1_SDA_B_MARK, D10_MARK,
- HSCIF2_HSCK_MARK, SCIF1_SCK_C_MARK, IRQ6_MARK, PWM5_C_MARK,
+ D6_MARK, SCIF4_TXD_B_MARK, I2C0_SDA_D_MARK,
+ D7_MARK, IRQ3_MARK, TCLK1_MARK, PWM6_B_MARK,
+ D8_MARK, HSCIF2_HRX_MARK, I2C1_SCL_B_MARK,
+ D9_MARK, HSCIF2_HTX_MARK, I2C1_SDA_B_MARK,
+ D10_MARK, HSCIF2_HSCK_MARK, SCIF1_SCK_C_MARK, IRQ6_MARK, PWM5_C_MARK,
D11_MARK, HSCIF2_HCTS_N_MARK, SCIF1_RXD_C_MARK, I2C1_SCL_D_MARK,
D12_MARK, HSCIF2_HRTS_N_MARK, SCIF1_TXD_C_MARK, I2C1_SDA_D_MARK,
- D13_MARK, SCIFA1_SCK_MARK, TANS1_MARK, PWM2_C_MARK, TCLK2_B_MARK,
- D14_MARK, SCIFA1_RXD_MARK, IIC0_SCL_B_MARK, D15_MARK, SCIFA1_TXD_MARK,
- IIC0_SDA_B_MARK, A0_MARK, SCIFB1_SCK_MARK, PWM3_B_MARK, A1_MARK,
- SCIFB1_TXD_MARK, A3_MARK, SCIFB0_SCK_MARK, A4_MARK, SCIFB0_TXD_MARK,
- A5_MARK, SCIFB0_RXD_MARK, PWM4_B_MARK, TPUTO3_C_MARK, A6_MARK,
- SCIFB0_CTS_N_MARK, SCIFA4_RXD_B_MARK, TPUTO2_C_MARK,
+ D13_MARK, SCIFA1_SCK_MARK, PWM2_C_MARK, TCLK2_B_MARK,
+ D14_MARK, SCIFA1_RXD_MARK, I2C5_SCL_B_MARK,
+ D15_MARK, SCIFA1_TXD_MARK, I2C5_SDA_B_MARK,
+ A0_MARK, SCIFB1_SCK_MARK, PWM3_B_MARK,
+ A1_MARK, SCIFB1_TXD_MARK,
+ A3_MARK, SCIFB0_SCK_MARK,
+ A4_MARK, SCIFB0_TXD_MARK,
+ A5_MARK, SCIFB0_RXD_MARK, PWM4_B_MARK, TPUTO3_C_MARK,
+ A6_MARK, SCIFB0_CTS_N_MARK, SCIFA4_RXD_B_MARK, TPUTO2_C_MARK,
/* IPSR2 */
- A7_MARK, SCIFB0_RTS_N_MARK, SCIFA4_TXD_B_MARK, A8_MARK, MSIOF1_RXD_MARK,
- SCIFA0_RXD_B_MARK, A9_MARK, MSIOF1_TXD_MARK, SCIFA0_TXD_B_MARK,
- A10_MARK, MSIOF1_SCK_MARK, IIC1_SCL_B_MARK, A11_MARK, MSIOF1_SYNC_MARK,
- IIC1_SDA_B_MARK, A12_MARK, MSIOF1_SS1_MARK, SCIFA5_RXD_B_MARK,
- A13_MARK, MSIOF1_SS2_MARK, SCIFA5_TXD_B_MARK, A14_MARK, MSIOF2_RXD_MARK,
- HSCIF0_HRX_B_MARK, DREQ1_N_MARK, A15_MARK, MSIOF2_TXD_MARK,
- HSCIF0_HTX_B_MARK, DACK1_MARK, A16_MARK, MSIOF2_SCK_MARK,
- HSCIF0_HSCK_B_MARK, SPEEDIN_MARK, VSP_MARK, CAN_CLK_C_MARK,
- TPUTO2_B_MARK, A17_MARK, MSIOF2_SYNC_MARK, SCIF4_RXD_E_MARK,
- CAN1_RX_B_MARK, AVB_AVTP_CAPTURE_B_MARK, A18_MARK, MSIOF2_SS1_MARK,
- SCIF4_TXD_E_MARK, CAN1_TX_B_MARK, AVB_AVTP_MATCH_B_MARK, A19_MARK,
- MSIOF2_SS2_MARK, PWM4_MARK, TPUTO2_MARK, MOUT0_MARK, A20_MARK,
- SPCLK_MARK, MOUT1_MARK,
+ A7_MARK, SCIFB0_RTS_N_MARK, SCIFA4_TXD_B_MARK,
+ A8_MARK, MSIOF1_RXD_MARK, SCIFA0_RXD_B_MARK,
+ A9_MARK, MSIOF1_TXD_MARK, SCIFA0_TXD_B_MARK,
+ A10_MARK, MSIOF1_SCK_MARK, IIC0_SCL_B_MARK,
+ A11_MARK, MSIOF1_SYNC_MARK, IIC0_SDA_B_MARK,
+ A12_MARK, MSIOF1_SS1_MARK, SCIFA5_RXD_B_MARK,
+ A13_MARK, MSIOF1_SS2_MARK, SCIFA5_TXD_B_MARK,
+ A14_MARK, MSIOF2_RXD_MARK, HSCIF0_HRX_B_MARK, DREQ1_N_MARK,
+ A15_MARK, MSIOF2_TXD_MARK, HSCIF0_HTX_B_MARK, DACK1_MARK,
+ A16_MARK, MSIOF2_SCK_MARK, HSCIF0_HSCK_B_MARK, SPEEDIN_MARK,
+ CAN_CLK_C_MARK, TPUTO2_B_MARK,
+ A17_MARK, MSIOF2_SYNC_MARK, SCIF4_RXD_E_MARK, CAN1_RX_B_MARK,
+ A18_MARK, MSIOF2_SS1_MARK, SCIF4_TXD_E_MARK, CAN1_TX_B_MARK,
+ A19_MARK, MSIOF2_SS2_MARK, PWM4_MARK, TPUTO2_MARK,
+ A20_MARK, SPCLK_MARK,
/* IPSR3 */
- A21_MARK, MOSI_IO0_MARK, MOUT2_MARK, A22_MARK, MISO_IO1_MARK,
- MOUT5_MARK, ATADIR1_N_MARK, A23_MARK, IO2_MARK, MOUT6_MARK,
- ATAWR1_N_MARK, A24_MARK, IO3_MARK, EX_WAIT2_MARK, A25_MARK, SSL_MARK,
- ATARD1_N_MARK, CS0_N_MARK, VI1_DATA8_MARK, CS1_N_A26_MARK,
- VI1_DATA9_MARK, EX_CS0_N_MARK, VI1_DATA10_MARK, EX_CS1_N_MARK,
- TPUTO3_B_MARK, SCIFB2_RXD_MARK, VI1_DATA11_MARK, EX_CS2_N_MARK,
- PWM0_MARK, SCIF4_RXD_C_MARK, TS_SDATA_B_MARK, RIF0_SYNC_MARK,
- TPUTO3_MARK, SCIFB2_TXD_MARK, SDATA_B_MARK, EX_CS3_N_MARK,
- SCIFA2_SCK_MARK, SCIF4_TXD_C_MARK, TS_SCK_B_MARK, RIF0_CLK_MARK,
- BPFCLK_MARK, SCIFB2_SCK_MARK, MDATA_B_MARK, EX_CS4_N_MARK,
- SCIFA2_RXD_MARK, I2C2_SCL_E_MARK, TS_SDEN_B_MARK, RIF0_D0_MARK,
- FMCLK_MARK, SCIFB2_CTS_N_MARK, SCKZ_B_MARK, EX_CS5_N_MARK,
- SCIFA2_TXD_MARK, I2C2_SDA_E_MARK, TS_SPSYNC_B_MARK, RIF0_D1_MARK,
- FMIN_MARK, SCIFB2_RTS_N_MARK, STM_N_B_MARK, BS_N_MARK, DRACK0_MARK,
- PWM1_C_MARK, TPUTO0_C_MARK, ATACS01_N_MARK, MTS_N_B_MARK, RD_N_MARK,
- ATACS11_N_MARK, RD_WR_N_MARK, ATAG1_N_MARK,
+ A21_MARK, MOSI_IO0_MARK,
+ A22_MARK, MISO_IO1_MARK, ATADIR1_N_MARK,
+ A23_MARK, IO2_MARK, ATAWR1_N_MARK,
+ A24_MARK, IO3_MARK, EX_WAIT2_MARK,
+ A25_MARK, SSL_MARK, ATARD1_N_MARK,
+ CS0_N_MARK, VI1_DATA8_MARK,
+ CS1_N_A26_MARK, VI1_DATA9_MARK,
+ EX_CS0_N_MARK, VI1_DATA10_MARK,
+ EX_CS1_N_MARK, TPUTO3_B_MARK, SCIFB2_RXD_MARK, VI1_DATA11_MARK,
+ EX_CS2_N_MARK, PWM0_MARK, SCIF4_RXD_C_MARK, TS_SDATA_B_MARK,
+ TPUTO3_MARK, SCIFB2_TXD_MARK,
+ EX_CS3_N_MARK, SCIFA2_SCK_MARK, SCIF4_TXD_C_MARK, TS_SCK_B_MARK,
+ BPFCLK_MARK, SCIFB2_SCK_MARK,
+ EX_CS4_N_MARK, SCIFA2_RXD_MARK, I2C2_SCL_E_MARK, TS_SDEN_B_MARK,
+ FMCLK_MARK, SCIFB2_CTS_N_MARK,
+ EX_CS5_N_MARK, SCIFA2_TXD_MARK, I2C2_SDA_E_MARK, TS_SPSYNC_B_MARK,
+ FMIN_MARK, SCIFB2_RTS_N_MARK,
+ BS_N_MARK, DRACK0_MARK, PWM1_C_MARK, TPUTO0_C_MARK, ATACS01_N_MARK,
+ RD_N_MARK, ATACS11_N_MARK,
+ RD_WR_N_MARK, ATAG1_N_MARK,
/* IPSR4 */
- EX_WAIT0_MARK, CAN_CLK_B_MARK, SCIF_CLK_MARK, PWMFSW0_MARK,
+ EX_WAIT0_MARK, CAN_CLK_B_MARK, SCIF_CLK_MARK,
DU0_DR0_MARK, LCDOUT16_MARK, SCIF5_RXD_C_MARK, I2C2_SCL_D_MARK,
- CC50_STATE0_MARK, DU0_DR1_MARK, LCDOUT17_MARK, SCIF5_TXD_C_MARK,
- I2C2_SDA_D_MARK, CC50_STATE1_MARK, DU0_DR2_MARK, LCDOUT18_MARK,
- CC50_STATE2_MARK, DU0_DR3_MARK, LCDOUT19_MARK, CC50_STATE3_MARK,
- DU0_DR4_MARK, LCDOUT20_MARK, CC50_STATE4_MARK, DU0_DR5_MARK,
- LCDOUT21_MARK, CC50_STATE5_MARK, DU0_DR6_MARK, LCDOUT22_MARK,
- CC50_STATE6_MARK, DU0_DR7_MARK, LCDOUT23_MARK, CC50_STATE7_MARK,
+ DU0_DR1_MARK, LCDOUT17_MARK, SCIF5_TXD_C_MARK, I2C2_SDA_D_MARK,
+ DU0_DR2_MARK, LCDOUT18_MARK,
+ DU0_DR3_MARK, LCDOUT19_MARK,
+ DU0_DR4_MARK, LCDOUT20_MARK,
+ DU0_DR5_MARK, LCDOUT21_MARK,
+ DU0_DR6_MARK, LCDOUT22_MARK,
+ DU0_DR7_MARK, LCDOUT23_MARK,
DU0_DG0_MARK, LCDOUT8_MARK, SCIFA0_RXD_C_MARK, I2C3_SCL_D_MARK,
- CC50_STATE8_MARK, DU0_DG1_MARK, LCDOUT9_MARK, SCIFA0_TXD_C_MARK,
- I2C3_SDA_D_MARK, CC50_STATE9_MARK, DU0_DG2_MARK, LCDOUT10_MARK,
- CC50_STATE10_MARK, DU0_DG3_MARK, LCDOUT11_MARK, CC50_STATE11_MARK,
- DU0_DG4_MARK, LCDOUT12_MARK, CC50_STATE12_MARK,
+ DU0_DG1_MARK, LCDOUT9_MARK, SCIFA0_TXD_C_MARK, I2C3_SDA_D_MARK,
+ DU0_DG2_MARK, LCDOUT10_MARK,
+ DU0_DG3_MARK, LCDOUT11_MARK,
+ DU0_DG4_MARK, LCDOUT12_MARK,
/* IPSR5 */
- DU0_DG5_MARK, LCDOUT13_MARK, CC50_STATE13_MARK, DU0_DG6_MARK,
- LCDOUT14_MARK, CC50_STATE14_MARK, DU0_DG7_MARK, LCDOUT15_MARK,
- CC50_STATE15_MARK, DU0_DB0_MARK, LCDOUT0_MARK, SCIFA4_RXD_C_MARK,
- I2C4_SCL_D_MARK, CAN0_RX_C_MARK, CC50_STATE16_MARK, DU0_DB1_MARK,
- LCDOUT1_MARK, SCIFA4_TXD_C_MARK, I2C4_SDA_D_MARK, CAN0_TX_C_MARK,
- CC50_STATE17_MARK, DU0_DB2_MARK, LCDOUT2_MARK, CC50_STATE18_MARK,
- DU0_DB3_MARK, LCDOUT3_MARK, CC50_STATE19_MARK, DU0_DB4_MARK,
- LCDOUT4_MARK, CC50_STATE20_MARK, DU0_DB5_MARK, LCDOUT5_MARK,
- CC50_STATE21_MARK, DU0_DB6_MARK, LCDOUT6_MARK, CC50_STATE22_MARK,
- DU0_DB7_MARK, LCDOUT7_MARK, CC50_STATE23_MARK, DU0_DOTCLKIN_MARK,
- QSTVA_QVS_MARK, CC50_STATE24_MARK, DU0_DOTCLKOUT0_MARK,
- QCLK_MARK, CC50_STATE25_MARK, DU0_DOTCLKOUT1_MARK, QSTVB_QVE_MARK,
- CC50_STATE26_MARK, DU0_EXHSYNC_DU0_HSYNC_MARK, QSTH_QHS_MARK,
- CC50_STATE27_MARK,
+ DU0_DG5_MARK, LCDOUT13_MARK,
+ DU0_DG6_MARK, LCDOUT14_MARK,
+ DU0_DG7_MARK, LCDOUT15_MARK,
+ DU0_DB0_MARK, LCDOUT0_MARK, SCIFA4_RXD_C_MARK, I2C4_SCL_D_MARK,
+ CAN0_RX_C_MARK,
+ DU0_DB1_MARK, LCDOUT1_MARK, SCIFA4_TXD_C_MARK, I2C4_SDA_D_MARK,
+ CAN0_TX_C_MARK,
+ DU0_DB2_MARK, LCDOUT2_MARK,
+ DU0_DB3_MARK, LCDOUT3_MARK,
+ DU0_DB4_MARK, LCDOUT4_MARK,
+ DU0_DB5_MARK, LCDOUT5_MARK,
+ DU0_DB6_MARK, LCDOUT6_MARK,
+ DU0_DB7_MARK, LCDOUT7_MARK,
+ DU0_DOTCLKIN_MARK, QSTVA_QVS_MARK,
+ DU0_DOTCLKOUT0_MARK, QCLK_MARK,
+ DU0_DOTCLKOUT1_MARK, QSTVB_QVE_MARK,
+ DU0_EXHSYNC_DU0_HSYNC_MARK, QSTH_QHS_MARK,
/* IPSR6 */
- DU0_EXVSYNC_DU0_VSYNC_MARK, QSTB_QHE_MARK, CC50_STATE28_MARK,
- DU0_EXODDF_DU0_ODDF_DISP_CDE_MARK, QCPV_QDE_MARK, CC50_STATE29_MARK,
- DU0_DISP_MARK, QPOLA_MARK, CC50_STATE30_MARK, DU0_CDE_MARK, QPOLB_MARK,
- CC50_STATE31_MARK, VI0_CLK_MARK, AVB_RX_CLK_MARK, VI0_DATA0_VI0_B0_MARK,
- AVB_RX_DV_MARK, VI0_DATA1_VI0_B1_MARK, AVB_RXD0_MARK,
- VI0_DATA2_VI0_B2_MARK, AVB_RXD1_MARK, VI0_DATA3_VI0_B3_MARK,
- AVB_RXD2_MARK, VI0_DATA4_VI0_B4_MARK, AVB_RXD3_MARK,
- VI0_DATA5_VI0_B5_MARK, AVB_RXD4_MARK, VI0_DATA6_VI0_B6_MARK,
- AVB_RXD5_MARK, VI0_DATA7_VI0_B7_MARK, AVB_RXD6_MARK, VI0_CLKENB_MARK,
- I2C3_SCL_MARK, SCIFA5_RXD_C_MARK, IETX_C_MARK, AVB_RXD7_MARK,
+ DU0_EXVSYNC_DU0_VSYNC_MARK, QSTB_QHE_MARK,
+ DU0_EXODDF_DU0_ODDF_DISP_CDE_MARK, QCPV_QDE_MARK,
+ DU0_DISP_MARK, QPOLA_MARK, DU0_CDE_MARK, QPOLB_MARK,
+ VI0_CLK_MARK, AVB_RX_CLK_MARK, VI0_DATA0_VI0_B0_MARK, AVB_RX_DV_MARK,
+ VI0_DATA1_VI0_B1_MARK, AVB_RXD0_MARK,
+ VI0_DATA2_VI0_B2_MARK, AVB_RXD1_MARK,
+ VI0_DATA3_VI0_B3_MARK, AVB_RXD2_MARK,
+ VI0_DATA4_VI0_B4_MARK, AVB_RXD3_MARK,
+ VI0_DATA5_VI0_B5_MARK, AVB_RXD4_MARK,
+ VI0_DATA6_VI0_B6_MARK, AVB_RXD5_MARK,
+ VI0_DATA7_VI0_B7_MARK, AVB_RXD6_MARK,
+ VI0_CLKENB_MARK, I2C3_SCL_MARK, SCIFA5_RXD_C_MARK, IETX_C_MARK,
+ AVB_RXD7_MARK,
VI0_FIELD_MARK, I2C3_SDA_MARK, SCIFA5_TXD_C_MARK, IECLK_C_MARK,
- AVB_RX_ER_MARK, VI0_HSYNC_N_MARK, SCIF0_RXD_B_MARK, I2C0_SCL_C_MARK,
- IERX_C_MARK, AVB_COL_MARK, VI0_VSYNC_N_MARK, SCIF0_TXD_B_MARK,
- I2C0_SDA_C_MARK, AUDIO_CLKOUT_B_MARK, AVB_TX_EN_MARK, ETH_MDIO_MARK,
- VI0_G0_MARK, MSIOF2_RXD_B_MARK, IIC0_SCL_D_MARK, AVB_TX_CLK_MARK,
- ADIDATA_MARK, AD_DI_MARK,
+ AVB_RX_ER_MARK,
+ VI0_HSYNC_N_MARK, SCIF0_RXD_B_MARK, I2C0_SCL_C_MARK, IERX_C_MARK,
+ AVB_COL_MARK,
+ VI0_VSYNC_N_MARK, SCIF0_TXD_B_MARK, I2C0_SDA_C_MARK,
+ AUDIO_CLKOUT_B_MARK, AVB_TX_EN_MARK,
+ ETH_MDIO_MARK, VI0_G0_MARK, MSIOF2_RXD_B_MARK, I2C5_SCL_D_MARK,
+ AVB_TX_CLK_MARK, ADIDATA_MARK,
/* IPSR7 */
- ETH_CRS_DV_MARK, VI0_G1_MARK, MSIOF2_TXD_B_MARK, IIC0_SDA_D_MARK,
- AVB_TXD0_MARK, ADICS_SAMP_MARK, AD_DO_MARK, ETH_RX_ER_MARK, VI0_G2_MARK,
- MSIOF2_SCK_B_MARK, CAN0_RX_B_MARK, AVB_TXD1_MARK, ADICLK_MARK,
- AD_CLK_MARK, ETH_RXD0_MARK, VI0_G3_MARK, MSIOF2_SYNC_B_MARK,
- CAN0_TX_B_MARK, AVB_TXD2_MARK, ADICHS0_MARK, AD_NCS_N_MARK,
+ ETH_CRS_DV_MARK, VI0_G1_MARK, MSIOF2_TXD_B_MARK, I2C5_SDA_D_MARK,
+ AVB_TXD0_MARK, ADICS_SAMP_MARK,
+ ETH_RX_ER_MARK, VI0_G2_MARK, MSIOF2_SCK_B_MARK, CAN0_RX_B_MARK,
+ AVB_TXD1_MARK, ADICLK_MARK,
+ ETH_RXD0_MARK, VI0_G3_MARK, MSIOF2_SYNC_B_MARK, CAN0_TX_B_MARK,
+ AVB_TXD2_MARK, ADICHS0_MARK,
ETH_RXD1_MARK, VI0_G4_MARK, MSIOF2_SS1_B_MARK, SCIF4_RXD_D_MARK,
- AVB_TXD3_MARK, ADICHS1_MARK, ETH_LINK_MARK, VI0_G5_MARK,
- MSIOF2_SS2_B_MARK, SCIF4_TXD_D_MARK, AVB_TXD4_MARK, ADICHS2_MARK,
+ AVB_TXD3_MARK, ADICHS1_MARK,
+ ETH_LINK_MARK, VI0_G5_MARK, MSIOF2_SS2_B_MARK, SCIF4_TXD_D_MARK,
+ AVB_TXD4_MARK, ADICHS2_MARK,
ETH_REFCLK_MARK, VI0_G6_MARK, SCIF2_SCK_C_MARK, AVB_TXD5_MARK,
- SSI_SCK5_B_MARK, ETH_TXD1_MARK, VI0_G7_MARK, SCIF2_RXD_C_MARK,
- IIC1_SCL_D_MARK, AVB_TXD6_MARK, SSI_WS5_B_MARK, ETH_TX_EN_MARK,
- VI0_R0_MARK, SCIF2_TXD_C_MARK, IIC1_SDA_D_MARK, AVB_TXD7_MARK,
- SSI_SDATA5_B_MARK, ETH_MAGIC_MARK, VI0_R1_MARK, SCIF3_SCK_B_MARK,
- AVB_TX_ER_MARK, SSI_SCK6_B_MARK, ETH_TXD0_MARK, VI0_R2_MARK,
- SCIF3_RXD_B_MARK, I2C4_SCL_E_MARK, AVB_GTX_CLK_MARK, SSI_WS6_B_MARK,
+ SSI_SCK5_B_MARK,
+ ETH_TXD1_MARK, VI0_G7_MARK, SCIF2_RXD_C_MARK, IIC0_SCL_D_MARK,
+ AVB_TXD6_MARK, SSI_WS5_B_MARK,
+ ETH_TX_EN_MARK, VI0_R0_MARK, SCIF2_TXD_C_MARK, IIC0_SDA_D_MARK,
+ AVB_TXD7_MARK, SSI_SDATA5_B_MARK,
+ ETH_MAGIC_MARK, VI0_R1_MARK, SCIF3_SCK_B_MARK, AVB_TX_ER_MARK,
+ SSI_SCK6_B_MARK,
+ ETH_TXD0_MARK, VI0_R2_MARK, SCIF3_RXD_B_MARK, I2C4_SCL_E_MARK,
+ AVB_GTX_CLK_MARK, SSI_WS6_B_MARK,
DREQ0_N_MARK, SCIFB1_RXD_MARK,
/* IPSR8 */
@@ -498,103 +566,107 @@ enum {
I2C0_SCL_MARK, SCIF0_RXD_C_MARK, PWM5_MARK, TCLK1_B_MARK,
AVB_GTXREFCLK_MARK, CAN1_RX_D_MARK, TPUTO0_B_MARK, I2C0_SDA_MARK,
SCIF0_TXD_C_MARK, TPUTO0_MARK, CAN_CLK_MARK, DVC_MUTE_MARK,
- CAN1_TX_D_MARK, I2C1_SCL_MARK, SCIF4_RXD_MARK, PWM5_B_MARK,
- DU1_DR0_MARK, RIF1_SYNC_B_MARK, TS_SDATA_D_MARK, TPUTO1_B_MARK,
- I2C1_SDA_MARK, SCIF4_TXD_MARK, IRQ5_MARK, DU1_DR1_MARK, RIF1_CLK_B_MARK,
- TS_SCK_D_MARK, BPFCLK_C_MARK, MSIOF0_RXD_MARK, SCIF5_RXD_MARK,
- I2C2_SCL_C_MARK, DU1_DR2_MARK, RIF1_D0_B_MARK, TS_SDEN_D_MARK,
- FMCLK_C_MARK, RDS_CLK_MARK,
+ CAN1_TX_D_MARK,
+ I2C1_SCL_MARK, SCIF4_RXD_MARK, PWM5_B_MARK, DU1_DR0_MARK,
+ TS_SDATA_D_MARK, TPUTO1_B_MARK,
+ I2C1_SDA_MARK, SCIF4_TXD_MARK, IRQ5_MARK, DU1_DR1_MARK, TS_SCK_D_MARK,
+ BPFCLK_C_MARK,
+ MSIOF0_RXD_MARK, SCIF5_RXD_MARK, I2C2_SCL_C_MARK, DU1_DR2_MARK,
+ TS_SDEN_D_MARK, FMCLK_C_MARK,
/* IPSR9 */
MSIOF0_TXD_MARK, SCIF5_TXD_MARK, I2C2_SDA_C_MARK, DU1_DR3_MARK,
- RIF1_D1_B_MARK, TS_SPSYNC_D_MARK, FMIN_C_MARK, RDS_DATA_MARK,
- MSIOF0_SCK_MARK, IRQ0_MARK, TS_SDATA_MARK, DU1_DR4_MARK, RIF1_SYNC_MARK,
- TPUTO1_C_MARK, MSIOF0_SYNC_MARK, PWM1_MARK, TS_SCK_MARK, DU1_DR5_MARK,
- RIF1_CLK_MARK, BPFCLK_B_MARK, MSIOF0_SS1_MARK, SCIFA0_RXD_MARK,
- TS_SDEN_MARK, DU1_DR6_MARK, RIF1_D0_MARK, FMCLK_B_MARK, RDS_CLK_B_MARK,
+ TS_SPSYNC_D_MARK, FMIN_C_MARK,
+ MSIOF0_SCK_MARK, IRQ0_MARK, TS_SDATA_MARK, DU1_DR4_MARK, TPUTO1_C_MARK,
+ MSIOF0_SYNC_MARK, PWM1_MARK, TS_SCK_MARK, DU1_DR5_MARK, BPFCLK_B_MARK,
+ MSIOF0_SS1_MARK, SCIFA0_RXD_MARK, TS_SDEN_MARK, DU1_DR6_MARK,
+ FMCLK_B_MARK,
MSIOF0_SS2_MARK, SCIFA0_TXD_MARK, TS_SPSYNC_MARK, DU1_DR7_MARK,
- RIF1_D1_MARK, FMIN_B_MARK, RDS_DATA_B_MARK, HSCIF1_HRX_MARK,
- I2C4_SCL_MARK, PWM6_MARK, DU1_DG0_MARK, HSCIF1_HTX_MARK,
- I2C4_SDA_MARK, TPUTO1_MARK, DU1_DG1_MARK, HSCIF1_HSCK_MARK,
- PWM2_MARK, IETX_MARK, DU1_DG2_MARK, REMOCON_B_MARK, SPEEDIN_B_MARK,
- VSP_B_MARK, HSCIF1_HCTS_N_MARK, SCIFA4_RXD_MARK, IECLK_MARK,
- DU1_DG3_MARK, SSI_SCK1_B_MARK, CAN_DEBUG_HW_TRIGGER_MARK,
- CC50_STATE32_MARK, HSCIF1_HRTS_N_MARK, SCIFA4_TXD_MARK, IERX_MARK,
- DU1_DG4_MARK, SSI_WS1_B_MARK, CAN_STEP0_MARK, CC50_STATE33_MARK,
+ FMIN_B_MARK,
+ HSCIF1_HRX_MARK, I2C4_SCL_MARK, PWM6_MARK, DU1_DG0_MARK,
+ HSCIF1_HTX_MARK, I2C4_SDA_MARK, TPUTO1_MARK, DU1_DG1_MARK,
+ HSCIF1_HSCK_MARK, PWM2_MARK, IETX_MARK, DU1_DG2_MARK, REMOCON_B_MARK,
+ SPEEDIN_B_MARK,
+ HSCIF1_HCTS_N_MARK, SCIFA4_RXD_MARK, IECLK_MARK, DU1_DG3_MARK,
+ SSI_SCK1_B_MARK,
+ HSCIF1_HRTS_N_MARK, SCIFA4_TXD_MARK, IERX_MARK, DU1_DG4_MARK,
+ SSI_WS1_B_MARK,
SCIF1_SCK_MARK, PWM3_MARK, TCLK2_MARK, DU1_DG5_MARK, SSI_SDATA1_B_MARK,
- CAN_TXCLK_MARK, CC50_STATE34_MARK,
+ CAN_TXCLK_MARK,
/* IPSR10 */
- SCIF1_RXD_MARK, IIC0_SCL_MARK, DU1_DG6_MARK, SSI_SCK2_B_MARK,
- CAN_DEBUGOUT0_MARK, CC50_STATE35_MARK, SCIF1_TXD_MARK, IIC0_SDA_MARK,
- DU1_DG7_MARK, SSI_WS2_B_MARK, CAN_DEBUGOUT1_MARK, CC50_STATE36_MARK,
- SCIF2_RXD_MARK, IIC1_SCL_MARK, DU1_DB0_MARK, SSI_SDATA2_B_MARK,
- USB0_EXTLP_MARK, CAN_DEBUGOUT2_MARK, CC50_STATE37_MARK, SCIF2_TXD_MARK,
- IIC1_SDA_MARK, DU1_DB1_MARK, SSI_SCK9_B_MARK, USB0_OVC1_MARK,
- CAN_DEBUGOUT3_MARK, CC50_STATE38_MARK, SCIF2_SCK_MARK, IRQ1_MARK,
- DU1_DB2_MARK, SSI_WS9_B_MARK, USB0_IDIN_MARK, CAN_DEBUGOUT4_MARK,
- CC50_STATE39_MARK, SCIF3_SCK_MARK, IRQ2_MARK, BPFCLK_D_MARK,
- DU1_DB3_MARK, SSI_SDATA9_B_MARK, TANS2_MARK, CAN_DEBUGOUT5_MARK,
- CC50_OSCOUT_MARK, SCIF3_RXD_MARK, I2C1_SCL_E_MARK, FMCLK_D_MARK,
- DU1_DB4_MARK, AUDIO_CLKA_C_MARK, SSI_SCK4_B_MARK, CAN_DEBUGOUT6_MARK,
- RDS_CLK_C_MARK, SCIF3_TXD_MARK, I2C1_SDA_E_MARK, FMIN_D_MARK,
- DU1_DB5_MARK, AUDIO_CLKB_C_MARK, SSI_WS4_B_MARK, CAN_DEBUGOUT7_MARK,
- RDS_DATA_C_MARK, I2C2_SCL_MARK, SCIFA5_RXD_MARK, DU1_DB6_MARK,
- AUDIO_CLKC_C_MARK, SSI_SDATA4_B_MARK, CAN_DEBUGOUT8_MARK, I2C2_SDA_MARK,
- SCIFA5_TXD_MARK, DU1_DB7_MARK, AUDIO_CLKOUT_C_MARK, CAN_DEBUGOUT9_MARK,
- SSI_SCK5_MARK, SCIFA3_SCK_MARK, DU1_DOTCLKIN_MARK, CAN_DEBUGOUT10_MARK,
+ SCIF1_RXD_MARK, I2C5_SCL_MARK, DU1_DG6_MARK, SSI_SCK2_B_MARK,
+ SCIF1_TXD_MARK, I2C5_SDA_MARK, DU1_DG7_MARK, SSI_WS2_B_MARK,
+ SCIF2_RXD_MARK, IIC0_SCL_MARK, DU1_DB0_MARK, SSI_SDATA2_B_MARK,
+ SCIF2_TXD_MARK, IIC0_SDA_MARK, DU1_DB1_MARK, SSI_SCK9_B_MARK,
+ SCIF2_SCK_MARK, IRQ1_MARK, DU1_DB2_MARK, SSI_WS9_B_MARK,
+ SCIF3_SCK_MARK, IRQ2_MARK, BPFCLK_D_MARK, DU1_DB3_MARK,
+ SSI_SDATA9_B_MARK,
+ SCIF3_RXD_MARK, I2C1_SCL_E_MARK, FMCLK_D_MARK, DU1_DB4_MARK,
+ AUDIO_CLKA_C_MARK, SSI_SCK4_B_MARK,
+ SCIF3_TXD_MARK, I2C1_SDA_E_MARK, FMIN_D_MARK, DU1_DB5_MARK,
+ AUDIO_CLKB_C_MARK, SSI_WS4_B_MARK,
+ I2C2_SCL_MARK, SCIFA5_RXD_MARK, DU1_DB6_MARK, AUDIO_CLKC_C_MARK,
+ SSI_SDATA4_B_MARK,
+ I2C2_SDA_MARK, SCIFA5_TXD_MARK, DU1_DB7_MARK, AUDIO_CLKOUT_C_MARK,
+ SSI_SCK5_MARK, SCIFA3_SCK_MARK, DU1_DOTCLKIN_MARK,
/* IPSR11 */
SSI_WS5_MARK, SCIFA3_RXD_MARK, I2C3_SCL_C_MARK, DU1_DOTCLKOUT0_MARK,
- CAN_DEBUGOUT11_MARK, SSI_SDATA5_MARK, SCIFA3_TXD_MARK, I2C3_SDA_C_MARK,
- DU1_DOTCLKOUT1_MARK, CAN_DEBUGOUT12_MARK, SSI_SCK6_MARK,
- SCIFA1_SCK_B_MARK, DU1_EXHSYNC_DU1_HSYNC_MARK, CAN_DEBUGOUT13_MARK,
+ SSI_SDATA5_MARK, SCIFA3_TXD_MARK, I2C3_SDA_C_MARK, DU1_DOTCLKOUT1_MARK,
+ SSI_SCK6_MARK, SCIFA1_SCK_B_MARK, DU1_EXHSYNC_DU1_HSYNC_MARK,
SSI_WS6_MARK, SCIFA1_RXD_B_MARK, I2C4_SCL_C_MARK,
- DU1_EXVSYNC_DU1_VSYNC_MARK, CAN_DEBUGOUT14_MARK, SSI_SDATA6_MARK,
- SCIFA1_TXD_B_MARK, I2C4_SDA_C_MARK, DU1_EXODDF_DU1_ODDF_DISP_CDE_MARK,
- CAN_DEBUGOUT15_MARK, SSI_SCK78_MARK, SCIFA2_SCK_B_MARK, IIC0_SDA_C_MARK,
- DU1_DISP_MARK, SSI_WS78_MARK, SCIFA2_RXD_B_MARK, IIC0_SCL_C_MARK,
- DU1_CDE_MARK, SSI_SDATA7_MARK, SCIFA2_TXD_B_MARK, IRQ8_MARK,
- AUDIO_CLKA_D_MARK, CAN_CLK_D_MARK, PCMOE_N_MARK, SSI_SCK0129_MARK,
- MSIOF1_RXD_B_MARK, SCIF5_RXD_D_MARK, ADIDATA_B_MARK, AD_DI_B_MARK,
- PCMWE_N_MARK, SSI_WS0129_MARK, MSIOF1_TXD_B_MARK, SCIF5_TXD_D_MARK,
- ADICS_SAMP_B_MARK, AD_DO_B_MARK, SSI_SDATA0_MARK, MSIOF1_SCK_B_MARK,
- PWM0_B_MARK, ADICLK_B_MARK, AD_CLK_B_MARK,
+ DU1_EXVSYNC_DU1_VSYNC_MARK,
+ SSI_SDATA6_MARK, SCIFA1_TXD_B_MARK, I2C4_SDA_C_MARK,
+ DU1_EXODDF_DU1_ODDF_DISP_CDE_MARK,
+ SSI_SCK78_MARK, SCIFA2_SCK_B_MARK, I2C5_SDA_C_MARK, DU1_DISP_MARK,
+ SSI_WS78_MARK, SCIFA2_RXD_B_MARK, I2C5_SCL_C_MARK, DU1_CDE_MARK,
+ SSI_SDATA7_MARK, SCIFA2_TXD_B_MARK, IRQ8_MARK, AUDIO_CLKA_D_MARK,
+ CAN_CLK_D_MARK,
+ SSI_SCK0129_MARK, MSIOF1_RXD_B_MARK, SCIF5_RXD_D_MARK, ADIDATA_B_MARK,
+ SSI_WS0129_MARK, MSIOF1_TXD_B_MARK, SCIF5_TXD_D_MARK, ADICS_SAMP_B_MARK,
+ SSI_SDATA0_MARK, MSIOF1_SCK_B_MARK, PWM0_B_MARK, ADICLK_B_MARK,
/* IPSR12 */
SSI_SCK34_MARK, MSIOF1_SYNC_B_MARK, SCIFA1_SCK_C_MARK, ADICHS0_B_MARK,
- AD_NCS_N_B_MARK, DREQ1_N_B_MARK, SSI_WS34_MARK, MSIOF1_SS1_B_MARK,
- SCIFA1_RXD_C_MARK, ADICHS1_B_MARK, CAN1_RX_C_MARK, DACK1_B_MARK,
+ DREQ1_N_B_MARK,
+ SSI_WS34_MARK, MSIOF1_SS1_B_MARK, SCIFA1_RXD_C_MARK, ADICHS1_B_MARK,
+ CAN1_RX_C_MARK, DACK1_B_MARK,
SSI_SDATA3_MARK, MSIOF1_SS2_B_MARK, SCIFA1_TXD_C_MARK, ADICHS2_B_MARK,
- CAN1_TX_C_MARK, DREQ2_N_MARK, SSI_SCK4_MARK, MLB_CLK_MARK, IETX_B_MARK,
- IRD_TX_MARK, SSI_WS4_MARK, MLB_SIG_MARK, IECLK_B_MARK, IRD_RX_MARK,
- SSI_SDATA4_MARK, MLB_DAT_MARK, IERX_B_MARK, IRD_SCK_MARK,
+ CAN1_TX_C_MARK, DREQ2_N_MARK,
+ SSI_SCK4_MARK, MLB_CLK_MARK, IETX_B_MARK,
+ SSI_WS4_MARK, MLB_SIG_MARK, IECLK_B_MARK,
+ SSI_SDATA4_MARK, MLB_DAT_MARK, IERX_B_MARK,
SSI_SDATA8_MARK, SCIF1_SCK_B_MARK, PWM1_B_MARK, IRQ9_MARK, REMOCON_MARK,
- DACK2_MARK, ETH_MDIO_B_MARK, SSI_SCK1_MARK, SCIF1_RXD_B_MARK,
- IIC1_SCL_C_MARK, VI1_CLK_MARK, CAN0_RX_D_MARK, AVB_AVTP_CAPTURE_MARK,
- ETH_CRS_DV_B_MARK, SSI_WS1_MARK, SCIF1_TXD_B_MARK, IIC1_SDA_C_MARK,
- VI1_DATA0_MARK, CAN0_TX_D_MARK, AVB_AVTP_MATCH_MARK, ETH_RX_ER_B_MARK,
- SSI_SDATA1_MARK, HSCIF1_HRX_B_MARK, VI1_DATA1_MARK, SDATA_MARK,
- ATAWR0_N_MARK, ETH_RXD0_B_MARK, SSI_SCK2_MARK, HSCIF1_HTX_B_MARK,
- VI1_DATA2_MARK, MDATA_MARK, ATAG0_N_MARK, ETH_RXD1_B_MARK,
+ DACK2_MARK, ETH_MDIO_B_MARK,
+ SSI_SCK1_MARK, SCIF1_RXD_B_MARK, IIC0_SCL_C_MARK, VI1_CLK_MARK,
+ CAN0_RX_D_MARK, ETH_CRS_DV_B_MARK,
+ SSI_WS1_MARK, SCIF1_TXD_B_MARK, IIC0_SDA_C_MARK, VI1_DATA0_MARK,
+ CAN0_TX_D_MARK, ETH_RX_ER_B_MARK,
+ SSI_SDATA1_MARK, HSCIF1_HRX_B_MARK, VI1_DATA1_MARK, ATAWR0_N_MARK,
+ ETH_RXD0_B_MARK,
+ SSI_SCK2_MARK, HSCIF1_HTX_B_MARK, VI1_DATA2_MARK, ATAG0_N_MARK,
+ ETH_RXD1_B_MARK,
/* IPSR13 */
SSI_WS2_MARK, HSCIF1_HCTS_N_B_MARK, SCIFA0_RXD_D_MARK, VI1_DATA3_MARK,
- SCKZ_MARK, ATACS00_N_MARK, ETH_LINK_B_MARK, SSI_SDATA2_MARK,
- HSCIF1_HRTS_N_B_MARK, SCIFA0_TXD_D_MARK, VI1_DATA4_MARK, STM_N_MARK,
- ATACS10_N_MARK, ETH_REFCLK_B_MARK, SSI_SCK9_MARK, SCIF2_SCK_B_MARK,
- PWM2_B_MARK, VI1_DATA5_MARK, MTS_N_MARK, EX_WAIT1_MARK,
- ETH_TXD1_B_MARK, SSI_WS9_MARK, SCIF2_RXD_B_MARK, I2C3_SCL_E_MARK,
- VI1_DATA6_MARK, ATARD0_N_MARK, ETH_TX_EN_B_MARK, SSI_SDATA9_MARK,
- SCIF2_TXD_B_MARK, I2C3_SDA_E_MARK, VI1_DATA7_MARK, ATADIR0_N_MARK,
- ETH_MAGIC_B_MARK, AUDIO_CLKA_MARK, I2C0_SCL_B_MARK, SCIFA4_RXD_D_MARK,
- VI1_CLKENB_MARK, TS_SDATA_C_MARK, RIF0_SYNC_B_MARK, ETH_TXD0_B_MARK,
+ ATACS00_N_MARK, ETH_LINK_B_MARK,
+ SSI_SDATA2_MARK, HSCIF1_HRTS_N_B_MARK, SCIFA0_TXD_D_MARK,
+ VI1_DATA4_MARK, ATACS10_N_MARK, ETH_REFCLK_B_MARK,
+ SSI_SCK9_MARK, SCIF2_SCK_B_MARK, PWM2_B_MARK, VI1_DATA5_MARK,
+ EX_WAIT1_MARK, ETH_TXD1_B_MARK,
+ SSI_WS9_MARK, SCIF2_RXD_B_MARK, I2C3_SCL_E_MARK, VI1_DATA6_MARK,
+ ATARD0_N_MARK, ETH_TX_EN_B_MARK,
+ SSI_SDATA9_MARK, SCIF2_TXD_B_MARK, I2C3_SDA_E_MARK, VI1_DATA7_MARK,
+ ATADIR0_N_MARK, ETH_MAGIC_B_MARK,
+ AUDIO_CLKA_MARK, I2C0_SCL_B_MARK, SCIFA4_RXD_D_MARK, VI1_CLKENB_MARK,
+ TS_SDATA_C_MARK, ETH_TXD0_B_MARK,
AUDIO_CLKB_MARK, I2C0_SDA_B_MARK, SCIFA4_TXD_D_MARK, VI1_FIELD_MARK,
- TS_SCK_C_MARK, RIF0_CLK_B_MARK, BPFCLK_E_MARK, ETH_MDC_B_MARK,
+ TS_SCK_C_MARK, BPFCLK_E_MARK, ETH_MDC_B_MARK,
AUDIO_CLKC_MARK, I2C4_SCL_B_MARK, SCIFA5_RXD_D_MARK, VI1_HSYNC_N_MARK,
- TS_SDEN_C_MARK, RIF0_D0_B_MARK, FMCLK_E_MARK, RDS_CLK_D_MARK,
+ TS_SDEN_C_MARK, FMCLK_E_MARK,
AUDIO_CLKOUT_MARK, I2C4_SDA_B_MARK, SCIFA5_TXD_D_MARK, VI1_VSYNC_N_MARK,
- TS_SPSYNC_C_MARK, RIF0_D1_B_MARK, FMIN_E_MARK, RDS_DATA_D_MARK,
+ TS_SPSYNC_C_MARK, FMIN_E_MARK,
PINMUX_MARK_END,
};
@@ -700,15 +772,14 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP1_14_13, I2C1_SDA_D, SEL_I2C01_3),
PINMUX_IPSR_GPSR(IP1_17_15, D13),
PINMUX_IPSR_MSEL(IP1_17_15, SCIFA1_SCK, SEL_SCIFA1_0),
- PINMUX_IPSR_GPSR(IP1_17_15, TANS1),
PINMUX_IPSR_GPSR(IP1_17_15, PWM2_C),
PINMUX_IPSR_MSEL(IP1_17_15, TCLK2_B, SEL_TMU_1),
PINMUX_IPSR_GPSR(IP1_19_18, D14),
PINMUX_IPSR_MSEL(IP1_19_18, SCIFA1_RXD, SEL_SCIFA1_0),
- PINMUX_IPSR_MSEL(IP1_19_18, IIC0_SCL_B, SEL_IIC00_1),
+ PINMUX_IPSR_MSEL(IP1_19_18, I2C5_SCL_B, SEL_I2C05_1),
PINMUX_IPSR_GPSR(IP1_21_20, D15),
PINMUX_IPSR_MSEL(IP1_21_20, SCIFA1_TXD, SEL_SCIFA1_0),
- PINMUX_IPSR_MSEL(IP1_21_20, IIC0_SDA_B, SEL_IIC00_1),
+ PINMUX_IPSR_MSEL(IP1_21_20, I2C5_SDA_B, SEL_I2C05_1),
PINMUX_IPSR_GPSR(IP1_23_22, A0),
PINMUX_IPSR_GPSR(IP1_23_22, SCIFB1_SCK),
PINMUX_IPSR_GPSR(IP1_23_22, PWM3_B),
@@ -739,10 +810,10 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP2_5_4, SCIFA0_TXD_B, SEL_SCIFA0_1),
PINMUX_IPSR_GPSR(IP2_7_6, A10),
PINMUX_IPSR_MSEL(IP2_7_6, MSIOF1_SCK, SEL_MSI1_0),
- PINMUX_IPSR_MSEL(IP2_7_6, IIC1_SCL_B, SEL_IIC01_1),
+ PINMUX_IPSR_MSEL(IP2_7_6, IIC0_SCL_B, SEL_IIC0_1),
PINMUX_IPSR_GPSR(IP2_9_8, A11),
PINMUX_IPSR_MSEL(IP2_9_8, MSIOF1_SYNC, SEL_MSI1_0),
- PINMUX_IPSR_MSEL(IP2_9_8, IIC1_SDA_B, SEL_IIC01_1),
+ PINMUX_IPSR_MSEL(IP2_9_8, IIC0_SDA_B, SEL_IIC0_1),
PINMUX_IPSR_GPSR(IP2_11_10, A12),
PINMUX_IPSR_MSEL(IP2_11_10, MSIOF1_SS1, SEL_MSI1_0),
PINMUX_IPSR_MSEL(IP2_11_10, SCIFA5_RXD_B, SEL_SCIFA5_1),
@@ -761,39 +832,31 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP2_20_18, MSIOF2_SCK, SEL_MSI2_0),
PINMUX_IPSR_MSEL(IP2_20_18, HSCIF0_HSCK_B, SEL_HSCIF0_1),
PINMUX_IPSR_MSEL(IP2_20_18, SPEEDIN, SEL_RSP_0),
- PINMUX_IPSR_MSEL(IP2_20_18, VSP, SEL_SPDM_0),
PINMUX_IPSR_MSEL(IP2_20_18, CAN_CLK_C, SEL_CAN_2),
PINMUX_IPSR_GPSR(IP2_20_18, TPUTO2_B),
PINMUX_IPSR_GPSR(IP2_23_21, A17),
PINMUX_IPSR_MSEL(IP2_23_21, MSIOF2_SYNC, SEL_MSI2_0),
PINMUX_IPSR_MSEL(IP2_23_21, SCIF4_RXD_E, SEL_SCIF4_4),
PINMUX_IPSR_MSEL(IP2_23_21, CAN1_RX_B, SEL_CAN1_1),
- PINMUX_IPSR_MSEL(IP2_23_21, AVB_AVTP_CAPTURE_B, SEL_AVB_1),
PINMUX_IPSR_GPSR(IP2_26_24, A18),
PINMUX_IPSR_MSEL(IP2_26_24, MSIOF2_SS1, SEL_MSI2_0),
PINMUX_IPSR_MSEL(IP2_26_24, SCIF4_TXD_E, SEL_SCIF4_4),
PINMUX_IPSR_MSEL(IP2_26_24, CAN1_TX_B, SEL_CAN1_1),
- PINMUX_IPSR_MSEL(IP2_26_24, AVB_AVTP_MATCH_B, SEL_AVB_1),
PINMUX_IPSR_GPSR(IP2_29_27, A19),
PINMUX_IPSR_MSEL(IP2_29_27, MSIOF2_SS2, SEL_MSI2_0),
PINMUX_IPSR_GPSR(IP2_29_27, PWM4),
PINMUX_IPSR_GPSR(IP2_29_27, TPUTO2),
- PINMUX_IPSR_GPSR(IP2_29_27, MOUT0),
PINMUX_IPSR_GPSR(IP2_31_30, A20),
PINMUX_IPSR_GPSR(IP2_31_30, SPCLK),
- PINMUX_IPSR_GPSR(IP2_29_27, MOUT1),
/* IPSR3 */
PINMUX_IPSR_GPSR(IP3_1_0, A21),
PINMUX_IPSR_GPSR(IP3_1_0, MOSI_IO0),
- PINMUX_IPSR_GPSR(IP3_1_0, MOUT2),
PINMUX_IPSR_GPSR(IP3_3_2, A22),
PINMUX_IPSR_GPSR(IP3_3_2, MISO_IO1),
- PINMUX_IPSR_GPSR(IP3_3_2, MOUT5),
PINMUX_IPSR_GPSR(IP3_3_2, ATADIR1_N),
PINMUX_IPSR_GPSR(IP3_5_4, A23),
PINMUX_IPSR_GPSR(IP3_5_4, IO2),
- PINMUX_IPSR_GPSR(IP3_5_4, MOUT6),
PINMUX_IPSR_GPSR(IP3_5_4, ATAWR1_N),
PINMUX_IPSR_GPSR(IP3_7_6, A24),
PINMUX_IPSR_GPSR(IP3_7_6, IO3),
@@ -815,40 +878,31 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP3_17_15, PWM0),
PINMUX_IPSR_MSEL(IP3_17_15, SCIF4_RXD_C, SEL_SCIF4_2),
PINMUX_IPSR_MSEL(IP3_17_15, TS_SDATA_B, SEL_TSIF0_1),
- PINMUX_IPSR_MSEL(IP3_17_15, RIF0_SYNC, SEL_DR0_0),
PINMUX_IPSR_GPSR(IP3_17_15, TPUTO3),
PINMUX_IPSR_GPSR(IP3_17_15, SCIFB2_TXD),
- PINMUX_IPSR_MSEL(IP3_17_15, SDATA_B, SEL_FSN_1),
PINMUX_IPSR_GPSR(IP3_20_18, EX_CS3_N),
PINMUX_IPSR_MSEL(IP3_20_18, SCIFA2_SCK, SEL_SCIFA2_0),
PINMUX_IPSR_MSEL(IP3_20_18, SCIF4_TXD_C, SEL_SCIF4_2),
PINMUX_IPSR_MSEL(IP3_20_18, TS_SCK_B, SEL_TSIF0_1),
- PINMUX_IPSR_MSEL(IP3_20_18, RIF0_CLK, SEL_DR0_0),
PINMUX_IPSR_MSEL(IP3_20_18, BPFCLK, SEL_DARC_0),
PINMUX_IPSR_GPSR(IP3_20_18, SCIFB2_SCK),
- PINMUX_IPSR_MSEL(IP3_20_18, MDATA_B, SEL_FSN_1),
PINMUX_IPSR_GPSR(IP3_23_21, EX_CS4_N),
PINMUX_IPSR_MSEL(IP3_23_21, SCIFA2_RXD, SEL_SCIFA2_0),
PINMUX_IPSR_MSEL(IP3_23_21, I2C2_SCL_E, SEL_I2C02_4),
PINMUX_IPSR_MSEL(IP3_23_21, TS_SDEN_B, SEL_TSIF0_1),
- PINMUX_IPSR_MSEL(IP3_23_21, RIF0_D0, SEL_DR0_0),
PINMUX_IPSR_MSEL(IP3_23_21, FMCLK, SEL_DARC_0),
PINMUX_IPSR_GPSR(IP3_23_21, SCIFB2_CTS_N),
- PINMUX_IPSR_MSEL(IP3_23_21, SCKZ_B, SEL_FSN_1),
PINMUX_IPSR_GPSR(IP3_26_24, EX_CS5_N),
PINMUX_IPSR_MSEL(IP3_26_24, SCIFA2_TXD, SEL_SCIFA2_0),
PINMUX_IPSR_MSEL(IP3_26_24, I2C2_SDA_E, SEL_I2C02_4),
PINMUX_IPSR_MSEL(IP3_26_24, TS_SPSYNC_B, SEL_TSIF0_1),
- PINMUX_IPSR_MSEL(IP3_26_24, RIF0_D1, SEL_DR1_0),
PINMUX_IPSR_MSEL(IP3_26_24, FMIN, SEL_DARC_0),
PINMUX_IPSR_GPSR(IP3_26_24, SCIFB2_RTS_N),
- PINMUX_IPSR_MSEL(IP3_26_24, STM_N_B, SEL_FSN_1),
PINMUX_IPSR_GPSR(IP3_29_27, BS_N),
PINMUX_IPSR_GPSR(IP3_29_27, DRACK0),
PINMUX_IPSR_GPSR(IP3_29_27, PWM1_C),
PINMUX_IPSR_GPSR(IP3_29_27, TPUTO0_C),
PINMUX_IPSR_GPSR(IP3_29_27, ATACS01_N),
- PINMUX_IPSR_MSEL(IP3_29_27, MTS_N_B, SEL_FSN_1),
PINMUX_IPSR_GPSR(IP3_30, RD_N),
PINMUX_IPSR_GPSR(IP3_30, ATACS11_N),
PINMUX_IPSR_GPSR(IP3_31, RD_WR_N),
@@ -858,121 +912,88 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP4_1_0, EX_WAIT0),
PINMUX_IPSR_MSEL(IP4_1_0, CAN_CLK_B, SEL_CAN_1),
PINMUX_IPSR_MSEL(IP4_1_0, SCIF_CLK, SEL_SCIF0_0),
- PINMUX_IPSR_GPSR(IP4_1_0, PWMFSW0),
PINMUX_IPSR_GPSR(IP4_4_2, DU0_DR0),
PINMUX_IPSR_GPSR(IP4_4_2, LCDOUT16),
PINMUX_IPSR_MSEL(IP4_4_2, SCIF5_RXD_C, SEL_SCIF5_2),
PINMUX_IPSR_MSEL(IP4_4_2, I2C2_SCL_D, SEL_I2C02_3),
- PINMUX_IPSR_GPSR(IP4_4_2, CC50_STATE0),
PINMUX_IPSR_GPSR(IP4_7_5, DU0_DR1),
PINMUX_IPSR_GPSR(IP4_7_5, LCDOUT17),
PINMUX_IPSR_MSEL(IP4_7_5, SCIF5_TXD_C, SEL_SCIF5_2),
PINMUX_IPSR_MSEL(IP4_7_5, I2C2_SDA_D, SEL_I2C02_3),
- PINMUX_IPSR_GPSR(IP4_9_8, CC50_STATE1),
PINMUX_IPSR_GPSR(IP4_9_8, DU0_DR2),
PINMUX_IPSR_GPSR(IP4_9_8, LCDOUT18),
- PINMUX_IPSR_GPSR(IP4_9_8, CC50_STATE2),
PINMUX_IPSR_GPSR(IP4_11_10, DU0_DR3),
PINMUX_IPSR_GPSR(IP4_11_10, LCDOUT19),
- PINMUX_IPSR_GPSR(IP4_11_10, CC50_STATE3),
PINMUX_IPSR_GPSR(IP4_13_12, DU0_DR4),
PINMUX_IPSR_GPSR(IP4_13_12, LCDOUT20),
- PINMUX_IPSR_GPSR(IP4_13_12, CC50_STATE4),
PINMUX_IPSR_GPSR(IP4_15_14, DU0_DR5),
PINMUX_IPSR_GPSR(IP4_15_14, LCDOUT21),
- PINMUX_IPSR_GPSR(IP4_15_14, CC50_STATE5),
PINMUX_IPSR_GPSR(IP4_17_16, DU0_DR6),
PINMUX_IPSR_GPSR(IP4_17_16, LCDOUT22),
- PINMUX_IPSR_GPSR(IP4_17_16, CC50_STATE6),
PINMUX_IPSR_GPSR(IP4_19_18, DU0_DR7),
PINMUX_IPSR_GPSR(IP4_19_18, LCDOUT23),
- PINMUX_IPSR_GPSR(IP4_19_18, CC50_STATE7),
PINMUX_IPSR_GPSR(IP4_22_20, DU0_DG0),
PINMUX_IPSR_GPSR(IP4_22_20, LCDOUT8),
PINMUX_IPSR_MSEL(IP4_22_20, SCIFA0_RXD_C, SEL_SCIFA0_2),
PINMUX_IPSR_MSEL(IP4_22_20, I2C3_SCL_D, SEL_I2C03_3),
- PINMUX_IPSR_GPSR(IP4_22_20, CC50_STATE8),
PINMUX_IPSR_GPSR(IP4_25_23, DU0_DG1),
PINMUX_IPSR_GPSR(IP4_25_23, LCDOUT9),
PINMUX_IPSR_MSEL(IP4_25_23, SCIFA0_TXD_C, SEL_SCIFA0_2),
PINMUX_IPSR_MSEL(IP4_25_23, I2C3_SDA_D, SEL_I2C03_3),
- PINMUX_IPSR_GPSR(IP4_25_23, CC50_STATE9),
PINMUX_IPSR_GPSR(IP4_27_26, DU0_DG2),
PINMUX_IPSR_GPSR(IP4_27_26, LCDOUT10),
- PINMUX_IPSR_GPSR(IP4_27_26, CC50_STATE10),
PINMUX_IPSR_GPSR(IP4_29_28, DU0_DG3),
PINMUX_IPSR_GPSR(IP4_29_28, LCDOUT11),
- PINMUX_IPSR_GPSR(IP4_29_28, CC50_STATE11),
PINMUX_IPSR_GPSR(IP4_31_30, DU0_DG4),
PINMUX_IPSR_GPSR(IP4_31_30, LCDOUT12),
- PINMUX_IPSR_GPSR(IP4_31_30, CC50_STATE12),
/* IPSR5 */
PINMUX_IPSR_GPSR(IP5_1_0, DU0_DG5),
PINMUX_IPSR_GPSR(IP5_1_0, LCDOUT13),
- PINMUX_IPSR_GPSR(IP5_1_0, CC50_STATE13),
PINMUX_IPSR_GPSR(IP5_3_2, DU0_DG6),
PINMUX_IPSR_GPSR(IP5_3_2, LCDOUT14),
- PINMUX_IPSR_GPSR(IP5_3_2, CC50_STATE14),
PINMUX_IPSR_GPSR(IP5_5_4, DU0_DG7),
PINMUX_IPSR_GPSR(IP5_5_4, LCDOUT15),
- PINMUX_IPSR_GPSR(IP5_5_4, CC50_STATE15),
PINMUX_IPSR_GPSR(IP5_8_6, DU0_DB0),
PINMUX_IPSR_GPSR(IP5_8_6, LCDOUT0),
PINMUX_IPSR_MSEL(IP5_8_6, SCIFA4_RXD_C, SEL_SCIFA4_2),
PINMUX_IPSR_MSEL(IP5_8_6, I2C4_SCL_D, SEL_I2C04_3),
PINMUX_IPSR_MSEL(IP7_8_6, CAN0_RX_C, SEL_CAN0_2),
- PINMUX_IPSR_GPSR(IP5_8_6, CC50_STATE16),
PINMUX_IPSR_GPSR(IP5_11_9, DU0_DB1),
PINMUX_IPSR_GPSR(IP5_11_9, LCDOUT1),
PINMUX_IPSR_MSEL(IP5_11_9, SCIFA4_TXD_C, SEL_SCIFA4_2),
PINMUX_IPSR_MSEL(IP5_11_9, I2C4_SDA_D, SEL_I2C04_3),
PINMUX_IPSR_MSEL(IP5_11_9, CAN0_TX_C, SEL_CAN0_2),
- PINMUX_IPSR_GPSR(IP5_11_9, CC50_STATE17),
PINMUX_IPSR_GPSR(IP5_13_12, DU0_DB2),
PINMUX_IPSR_GPSR(IP5_13_12, LCDOUT2),
- PINMUX_IPSR_GPSR(IP5_13_12, CC50_STATE18),
PINMUX_IPSR_GPSR(IP5_15_14, DU0_DB3),
PINMUX_IPSR_GPSR(IP5_15_14, LCDOUT3),
- PINMUX_IPSR_GPSR(IP5_15_14, CC50_STATE19),
PINMUX_IPSR_GPSR(IP5_17_16, DU0_DB4),
PINMUX_IPSR_GPSR(IP5_17_16, LCDOUT4),
- PINMUX_IPSR_GPSR(IP5_17_16, CC50_STATE20),
PINMUX_IPSR_GPSR(IP5_19_18, DU0_DB5),
PINMUX_IPSR_GPSR(IP5_19_18, LCDOUT5),
- PINMUX_IPSR_GPSR(IP5_19_18, CC50_STATE21),
PINMUX_IPSR_GPSR(IP5_21_20, DU0_DB6),
PINMUX_IPSR_GPSR(IP5_21_20, LCDOUT6),
- PINMUX_IPSR_GPSR(IP5_21_20, CC50_STATE22),
PINMUX_IPSR_GPSR(IP5_23_22, DU0_DB7),
PINMUX_IPSR_GPSR(IP5_23_22, LCDOUT7),
- PINMUX_IPSR_GPSR(IP5_23_22, CC50_STATE23),
PINMUX_IPSR_GPSR(IP5_25_24, DU0_DOTCLKIN),
PINMUX_IPSR_GPSR(IP5_25_24, QSTVA_QVS),
- PINMUX_IPSR_GPSR(IP5_25_24, CC50_STATE24),
PINMUX_IPSR_GPSR(IP5_27_26, DU0_DOTCLKOUT0),
PINMUX_IPSR_GPSR(IP5_27_26, QCLK),
- PINMUX_IPSR_GPSR(IP5_27_26, CC50_STATE25),
PINMUX_IPSR_GPSR(IP5_29_28, DU0_DOTCLKOUT1),
PINMUX_IPSR_GPSR(IP5_29_28, QSTVB_QVE),
- PINMUX_IPSR_GPSR(IP5_29_28, CC50_STATE26),
PINMUX_IPSR_GPSR(IP5_31_30, DU0_EXHSYNC_DU0_HSYNC),
PINMUX_IPSR_GPSR(IP5_31_30, QSTH_QHS),
- PINMUX_IPSR_GPSR(IP5_31_30, CC50_STATE27),
/* IPSR6 */
PINMUX_IPSR_GPSR(IP6_1_0, DU0_EXVSYNC_DU0_VSYNC),
PINMUX_IPSR_GPSR(IP6_1_0, QSTB_QHE),
- PINMUX_IPSR_GPSR(IP6_1_0, CC50_STATE28),
PINMUX_IPSR_GPSR(IP6_3_2, DU0_EXODDF_DU0_ODDF_DISP_CDE),
PINMUX_IPSR_GPSR(IP6_3_2, QCPV_QDE),
- PINMUX_IPSR_GPSR(IP6_3_2, CC50_STATE29),
PINMUX_IPSR_GPSR(IP6_5_4, DU0_DISP),
PINMUX_IPSR_GPSR(IP6_5_4, QPOLA),
- PINMUX_IPSR_GPSR(IP6_5_4, CC50_STATE30),
PINMUX_IPSR_GPSR(IP6_7_6, DU0_CDE),
PINMUX_IPSR_GPSR(IP6_7_6, QPOLB),
- PINMUX_IPSR_GPSR(IP6_7_6, CC50_STATE31),
PINMUX_IPSR_GPSR(IP6_8, VI0_CLK),
PINMUX_IPSR_GPSR(IP6_8, AVB_RX_CLK),
PINMUX_IPSR_GPSR(IP6_9, VI0_DATA0_VI0_B0),
@@ -1014,33 +1035,29 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP6_31_29, ETH_MDIO, SEL_ETH_0),
PINMUX_IPSR_GPSR(IP6_31_29, VI0_G0),
PINMUX_IPSR_MSEL(IP6_31_29, MSIOF2_RXD_B, SEL_MSI2_1),
- PINMUX_IPSR_MSEL(IP6_31_29, IIC0_SCL_D, SEL_IIC00_3),
+ PINMUX_IPSR_MSEL(IP6_31_29, I2C5_SCL_D, SEL_I2C05_3),
PINMUX_IPSR_GPSR(IP6_31_29, AVB_TX_CLK),
PINMUX_IPSR_MSEL(IP6_31_29, ADIDATA, SEL_RAD_0),
- PINMUX_IPSR_MSEL(IP6_31_29, AD_DI, SEL_ADI_0),
/* IPSR7 */
PINMUX_IPSR_MSEL(IP7_2_0, ETH_CRS_DV, SEL_ETH_0),
PINMUX_IPSR_GPSR(IP7_2_0, VI0_G1),
PINMUX_IPSR_MSEL(IP7_2_0, MSIOF2_TXD_B, SEL_MSI2_1),
- PINMUX_IPSR_MSEL(IP7_2_0, IIC0_SDA_D, SEL_IIC00_3),
+ PINMUX_IPSR_MSEL(IP7_2_0, I2C5_SDA_D, SEL_I2C05_3),
PINMUX_IPSR_GPSR(IP7_2_0, AVB_TXD0),
PINMUX_IPSR_MSEL(IP7_2_0, ADICS_SAMP, SEL_RAD_0),
- PINMUX_IPSR_MSEL(IP7_2_0, AD_DO, SEL_ADI_0),
PINMUX_IPSR_MSEL(IP7_5_3, ETH_RX_ER, SEL_ETH_0),
PINMUX_IPSR_GPSR(IP7_5_3, VI0_G2),
PINMUX_IPSR_MSEL(IP7_5_3, MSIOF2_SCK_B, SEL_MSI2_1),
PINMUX_IPSR_MSEL(IP7_5_3, CAN0_RX_B, SEL_CAN0_1),
PINMUX_IPSR_GPSR(IP7_5_3, AVB_TXD1),
PINMUX_IPSR_MSEL(IP7_5_3, ADICLK, SEL_RAD_0),
- PINMUX_IPSR_MSEL(IP7_5_3, AD_CLK, SEL_ADI_0),
PINMUX_IPSR_MSEL(IP7_8_6, ETH_RXD0, SEL_ETH_0),
PINMUX_IPSR_GPSR(IP7_8_6, VI0_G3),
PINMUX_IPSR_MSEL(IP7_8_6, MSIOF2_SYNC_B, SEL_MSI2_1),
PINMUX_IPSR_MSEL(IP7_8_6, CAN0_TX_B, SEL_CAN0_1),
PINMUX_IPSR_GPSR(IP7_8_6, AVB_TXD2),
PINMUX_IPSR_MSEL(IP7_8_6, ADICHS0, SEL_RAD_0),
- PINMUX_IPSR_MSEL(IP7_8_6, AD_NCS_N, SEL_ADI_0),
PINMUX_IPSR_MSEL(IP7_11_9, ETH_RXD1, SEL_ETH_0),
PINMUX_IPSR_GPSR(IP7_11_9, VI0_G4),
PINMUX_IPSR_MSEL(IP7_11_9, MSIOF2_SS1_B, SEL_MSI2_1),
@@ -1061,13 +1078,13 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP7_20_18, ETH_TXD1, SEL_ETH_0),
PINMUX_IPSR_GPSR(IP7_20_18, VI0_G7),
PINMUX_IPSR_MSEL(IP7_20_18, SCIF2_RXD_C, SEL_SCIF2_2),
- PINMUX_IPSR_MSEL(IP7_20_18, IIC1_SCL_D, SEL_IIC01_3),
+ PINMUX_IPSR_MSEL(IP7_20_18, IIC0_SCL_D, SEL_IIC0_3),
PINMUX_IPSR_GPSR(IP7_20_18, AVB_TXD6),
PINMUX_IPSR_MSEL(IP7_20_18, SSI_WS5_B, SEL_SSI5_1),
PINMUX_IPSR_MSEL(IP7_23_21, ETH_TX_EN, SEL_ETH_0),
PINMUX_IPSR_GPSR(IP7_23_21, VI0_R0),
PINMUX_IPSR_MSEL(IP7_23_21, SCIF2_TXD_C, SEL_SCIF2_2),
- PINMUX_IPSR_MSEL(IP7_23_21, IIC1_SDA_D, SEL_IIC01_3),
+ PINMUX_IPSR_MSEL(IP7_23_21, IIC0_SDA_D, SEL_IIC0_3),
PINMUX_IPSR_GPSR(IP7_23_21, AVB_TXD7),
PINMUX_IPSR_MSEL(IP7_23_21, SSI_SDATA5_B, SEL_SSI5_1),
PINMUX_IPSR_MSEL(IP7_26_24, ETH_MAGIC, SEL_ETH_0),
@@ -1136,60 +1153,48 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP8_25_23, SCIF4_RXD, SEL_SCIF4_0),
PINMUX_IPSR_GPSR(IP8_25_23, PWM5_B),
PINMUX_IPSR_GPSR(IP8_25_23, DU1_DR0),
- PINMUX_IPSR_MSEL(IP8_25_23, RIF1_SYNC_B, SEL_DR2_1),
PINMUX_IPSR_MSEL(IP8_25_23, TS_SDATA_D, SEL_TSIF0_3),
PINMUX_IPSR_GPSR(IP8_25_23, TPUTO1_B),
PINMUX_IPSR_MSEL(IP8_28_26, I2C1_SDA, SEL_I2C01_0),
PINMUX_IPSR_MSEL(IP8_28_26, SCIF4_TXD, SEL_SCIF4_0),
PINMUX_IPSR_GPSR(IP8_28_26, IRQ5),
PINMUX_IPSR_GPSR(IP8_28_26, DU1_DR1),
- PINMUX_IPSR_MSEL(IP8_28_26, RIF1_CLK_B, SEL_DR2_1),
PINMUX_IPSR_MSEL(IP8_28_26, TS_SCK_D, SEL_TSIF0_3),
PINMUX_IPSR_MSEL(IP8_28_26, BPFCLK_C, SEL_DARC_2),
PINMUX_IPSR_GPSR(IP8_31_29, MSIOF0_RXD),
PINMUX_IPSR_MSEL(IP8_31_29, SCIF5_RXD, SEL_SCIF5_0),
PINMUX_IPSR_MSEL(IP8_31_29, I2C2_SCL_C, SEL_I2C02_2),
PINMUX_IPSR_GPSR(IP8_31_29, DU1_DR2),
- PINMUX_IPSR_MSEL(IP8_31_29, RIF1_D0_B, SEL_DR2_1),
PINMUX_IPSR_MSEL(IP8_31_29, TS_SDEN_D, SEL_TSIF0_3),
PINMUX_IPSR_MSEL(IP8_31_29, FMCLK_C, SEL_DARC_2),
- PINMUX_IPSR_MSEL(IP8_31_29, RDS_CLK, SEL_RDS_0),
/* IPSR9 */
PINMUX_IPSR_GPSR(IP9_2_0, MSIOF0_TXD),
PINMUX_IPSR_MSEL(IP9_2_0, SCIF5_TXD, SEL_SCIF5_0),
PINMUX_IPSR_MSEL(IP9_2_0, I2C2_SDA_C, SEL_I2C02_2),
PINMUX_IPSR_GPSR(IP9_2_0, DU1_DR3),
- PINMUX_IPSR_MSEL(IP9_2_0, RIF1_D1_B, SEL_DR3_1),
PINMUX_IPSR_MSEL(IP9_2_0, TS_SPSYNC_D, SEL_TSIF0_3),
PINMUX_IPSR_MSEL(IP9_2_0, FMIN_C, SEL_DARC_2),
- PINMUX_IPSR_MSEL(IP9_2_0, RDS_DATA, SEL_RDS_0),
PINMUX_IPSR_GPSR(IP9_5_3, MSIOF0_SCK),
PINMUX_IPSR_GPSR(IP9_5_3, IRQ0),
PINMUX_IPSR_MSEL(IP9_5_3, TS_SDATA, SEL_TSIF0_0),
PINMUX_IPSR_GPSR(IP9_5_3, DU1_DR4),
- PINMUX_IPSR_MSEL(IP9_5_3, RIF1_SYNC, SEL_DR2_0),
PINMUX_IPSR_GPSR(IP9_5_3, TPUTO1_C),
PINMUX_IPSR_GPSR(IP9_8_6, MSIOF0_SYNC),
PINMUX_IPSR_GPSR(IP9_8_6, PWM1),
PINMUX_IPSR_MSEL(IP9_8_6, TS_SCK, SEL_TSIF0_0),
PINMUX_IPSR_GPSR(IP9_8_6, DU1_DR5),
- PINMUX_IPSR_MSEL(IP9_8_6, RIF1_CLK, SEL_DR2_0),
PINMUX_IPSR_MSEL(IP9_8_6, BPFCLK_B, SEL_DARC_1),
PINMUX_IPSR_GPSR(IP9_11_9, MSIOF0_SS1),
PINMUX_IPSR_MSEL(IP9_11_9, SCIFA0_RXD, SEL_SCIFA0_0),
PINMUX_IPSR_MSEL(IP9_11_9, TS_SDEN, SEL_TSIF0_0),
PINMUX_IPSR_GPSR(IP9_11_9, DU1_DR6),
- PINMUX_IPSR_MSEL(IP9_11_9, RIF1_D0, SEL_DR2_0),
PINMUX_IPSR_MSEL(IP9_11_9, FMCLK_B, SEL_DARC_1),
- PINMUX_IPSR_MSEL(IP9_11_9, RDS_CLK_B, SEL_RDS_1),
PINMUX_IPSR_GPSR(IP9_14_12, MSIOF0_SS2),
PINMUX_IPSR_MSEL(IP9_14_12, SCIFA0_TXD, SEL_SCIFA0_0),
PINMUX_IPSR_MSEL(IP9_14_12, TS_SPSYNC, SEL_TSIF0_0),
PINMUX_IPSR_GPSR(IP9_14_12, DU1_DR7),
- PINMUX_IPSR_MSEL(IP9_14_12, RIF1_D1, SEL_DR3_0),
PINMUX_IPSR_MSEL(IP9_14_12, FMIN_B, SEL_DARC_1),
- PINMUX_IPSR_MSEL(IP9_14_12, RDS_DATA_B, SEL_RDS_1),
PINMUX_IPSR_MSEL(IP9_16_15, HSCIF1_HRX, SEL_HSCIF1_0),
PINMUX_IPSR_MSEL(IP9_16_15, I2C4_SCL, SEL_I2C04_0),
PINMUX_IPSR_GPSR(IP9_16_15, PWM6),
@@ -1204,165 +1209,124 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP9_21_19, DU1_DG2),
PINMUX_IPSR_MSEL(IP9_21_19, REMOCON_B, SEL_RCN_1),
PINMUX_IPSR_MSEL(IP9_21_19, SPEEDIN_B, SEL_RSP_1),
- PINMUX_IPSR_MSEL(IP9_21_19, VSP_B, SEL_SPDM_1),
PINMUX_IPSR_MSEL(IP9_24_22, HSCIF1_HCTS_N, SEL_HSCIF1_0),
PINMUX_IPSR_MSEL(IP9_24_22, SCIFA4_RXD, SEL_SCIFA4_0),
PINMUX_IPSR_MSEL(IP9_24_22, IECLK, SEL_IEB_0),
PINMUX_IPSR_GPSR(IP9_24_22, DU1_DG3),
PINMUX_IPSR_MSEL(IP9_24_22, SSI_SCK1_B, SEL_SSI1_1),
- PINMUX_IPSR_GPSR(IP9_24_22, CAN_DEBUG_HW_TRIGGER),
- PINMUX_IPSR_GPSR(IP9_24_22, CC50_STATE32),
PINMUX_IPSR_MSEL(IP9_27_25, HSCIF1_HRTS_N, SEL_HSCIF1_0),
PINMUX_IPSR_MSEL(IP9_27_25, SCIFA4_TXD, SEL_SCIFA4_0),
PINMUX_IPSR_MSEL(IP9_27_25, IERX, SEL_IEB_0),
PINMUX_IPSR_GPSR(IP9_27_25, DU1_DG4),
PINMUX_IPSR_MSEL(IP9_27_25, SSI_WS1_B, SEL_SSI1_1),
- PINMUX_IPSR_GPSR(IP9_27_25, CAN_STEP0),
- PINMUX_IPSR_GPSR(IP9_27_25, CC50_STATE33),
PINMUX_IPSR_MSEL(IP9_30_28, SCIF1_SCK, SEL_SCIF1_0),
PINMUX_IPSR_GPSR(IP9_30_28, PWM3),
PINMUX_IPSR_MSEL(IP9_30_28, TCLK2, SEL_TMU_0),
PINMUX_IPSR_GPSR(IP9_30_28, DU1_DG5),
PINMUX_IPSR_MSEL(IP9_30_28, SSI_SDATA1_B, SEL_SSI1_1),
- PINMUX_IPSR_GPSR(IP9_30_28, CAN_TXCLK),
- PINMUX_IPSR_GPSR(IP9_30_28, CC50_STATE34),
/* IPSR10 */
PINMUX_IPSR_MSEL(IP10_2_0, SCIF1_RXD, SEL_SCIF1_0),
- PINMUX_IPSR_MSEL(IP10_2_0, IIC0_SCL, SEL_IIC00_0),
+ PINMUX_IPSR_MSEL(IP10_2_0, I2C5_SCL, SEL_I2C05_0),
PINMUX_IPSR_GPSR(IP10_2_0, DU1_DG6),
PINMUX_IPSR_MSEL(IP10_2_0, SSI_SCK2_B, SEL_SSI2_1),
- PINMUX_IPSR_GPSR(IP10_2_0, CAN_DEBUGOUT0),
- PINMUX_IPSR_GPSR(IP10_2_0, CC50_STATE35),
PINMUX_IPSR_MSEL(IP10_5_3, SCIF1_TXD, SEL_SCIF1_0),
- PINMUX_IPSR_MSEL(IP10_5_3, IIC0_SDA, SEL_IIC00_0),
+ PINMUX_IPSR_MSEL(IP10_5_3, I2C5_SDA, SEL_I2C05_0),
PINMUX_IPSR_GPSR(IP10_5_3, DU1_DG7),
PINMUX_IPSR_MSEL(IP10_5_3, SSI_WS2_B, SEL_SSI2_1),
- PINMUX_IPSR_GPSR(IP10_5_3, CAN_DEBUGOUT1),
- PINMUX_IPSR_GPSR(IP10_5_3, CC50_STATE36),
PINMUX_IPSR_MSEL(IP10_8_6, SCIF2_RXD, SEL_SCIF2_0),
- PINMUX_IPSR_MSEL(IP10_8_6, IIC1_SCL, SEL_IIC01_0),
+ PINMUX_IPSR_MSEL(IP10_8_6, IIC0_SCL, SEL_IIC0_0),
PINMUX_IPSR_GPSR(IP10_8_6, DU1_DB0),
PINMUX_IPSR_MSEL(IP10_8_6, SSI_SDATA2_B, SEL_SSI2_1),
- PINMUX_IPSR_GPSR(IP10_8_6, USB0_EXTLP),
- PINMUX_IPSR_GPSR(IP10_8_6, CAN_DEBUGOUT2),
- PINMUX_IPSR_GPSR(IP10_8_6, CC50_STATE37),
PINMUX_IPSR_MSEL(IP10_11_9, SCIF2_TXD, SEL_SCIF2_0),
- PINMUX_IPSR_MSEL(IP10_11_9, IIC1_SDA, SEL_IIC01_0),
+ PINMUX_IPSR_MSEL(IP10_11_9, IIC0_SDA, SEL_IIC0_0),
PINMUX_IPSR_GPSR(IP10_11_9, DU1_DB1),
PINMUX_IPSR_MSEL(IP10_11_9, SSI_SCK9_B, SEL_SSI9_1),
- PINMUX_IPSR_GPSR(IP10_11_9, USB0_OVC1),
- PINMUX_IPSR_GPSR(IP10_11_9, CAN_DEBUGOUT3),
- PINMUX_IPSR_GPSR(IP10_11_9, CC50_STATE38),
PINMUX_IPSR_MSEL(IP10_14_12, SCIF2_SCK, SEL_SCIF2_0),
PINMUX_IPSR_GPSR(IP10_14_12, IRQ1),
PINMUX_IPSR_GPSR(IP10_14_12, DU1_DB2),
PINMUX_IPSR_MSEL(IP10_14_12, SSI_WS9_B, SEL_SSI9_1),
- PINMUX_IPSR_GPSR(IP10_14_12, USB0_IDIN),
- PINMUX_IPSR_GPSR(IP10_14_12, CAN_DEBUGOUT4),
- PINMUX_IPSR_GPSR(IP10_14_12, CC50_STATE39),
PINMUX_IPSR_MSEL(IP10_17_15, SCIF3_SCK, SEL_SCIF3_0),
PINMUX_IPSR_GPSR(IP10_17_15, IRQ2),
PINMUX_IPSR_MSEL(IP10_17_15, BPFCLK_D, SEL_DARC_3),
PINMUX_IPSR_GPSR(IP10_17_15, DU1_DB3),
PINMUX_IPSR_MSEL(IP10_17_15, SSI_SDATA9_B, SEL_SSI9_1),
- PINMUX_IPSR_GPSR(IP10_17_15, TANS2),
- PINMUX_IPSR_GPSR(IP10_17_15, CAN_DEBUGOUT5),
- PINMUX_IPSR_GPSR(IP10_17_15, CC50_OSCOUT),
PINMUX_IPSR_MSEL(IP10_20_18, SCIF3_RXD, SEL_SCIF3_0),
PINMUX_IPSR_MSEL(IP10_20_18, I2C1_SCL_E, SEL_I2C01_4),
PINMUX_IPSR_MSEL(IP10_20_18, FMCLK_D, SEL_DARC_3),
PINMUX_IPSR_GPSR(IP10_20_18, DU1_DB4),
PINMUX_IPSR_MSEL(IP10_20_18, AUDIO_CLKA_C, SEL_ADG_2),
PINMUX_IPSR_MSEL(IP10_20_18, SSI_SCK4_B, SEL_SSI4_1),
- PINMUX_IPSR_GPSR(IP10_20_18, CAN_DEBUGOUT6),
- PINMUX_IPSR_MSEL(IP10_20_18, RDS_CLK_C, SEL_RDS_2),
PINMUX_IPSR_MSEL(IP10_23_21, SCIF3_TXD, SEL_SCIF3_0),
PINMUX_IPSR_MSEL(IP10_23_21, I2C1_SDA_E, SEL_I2C01_4),
PINMUX_IPSR_MSEL(IP10_23_21, FMIN_D, SEL_DARC_3),
PINMUX_IPSR_GPSR(IP10_23_21, DU1_DB5),
PINMUX_IPSR_MSEL(IP10_23_21, AUDIO_CLKB_C, SEL_ADG_2),
PINMUX_IPSR_MSEL(IP10_23_21, SSI_WS4_B, SEL_SSI4_1),
- PINMUX_IPSR_GPSR(IP10_23_21, CAN_DEBUGOUT7),
- PINMUX_IPSR_MSEL(IP10_23_21, RDS_DATA_C, SEL_RDS_2),
PINMUX_IPSR_MSEL(IP10_26_24, I2C2_SCL, SEL_I2C02_0),
PINMUX_IPSR_MSEL(IP10_26_24, SCIFA5_RXD, SEL_SCIFA5_0),
PINMUX_IPSR_GPSR(IP10_26_24, DU1_DB6),
PINMUX_IPSR_MSEL(IP10_26_24, AUDIO_CLKC_C, SEL_ADG_2),
PINMUX_IPSR_MSEL(IP10_26_24, SSI_SDATA4_B, SEL_SSI4_1),
- PINMUX_IPSR_GPSR(IP10_26_24, CAN_DEBUGOUT8),
PINMUX_IPSR_MSEL(IP10_29_27, I2C2_SDA, SEL_I2C02_0),
PINMUX_IPSR_MSEL(IP10_29_27, SCIFA5_TXD, SEL_SCIFA5_0),
PINMUX_IPSR_GPSR(IP10_29_27, DU1_DB7),
PINMUX_IPSR_MSEL(IP10_29_27, AUDIO_CLKOUT_C, SEL_ADG_2),
- PINMUX_IPSR_GPSR(IP10_29_27, CAN_DEBUGOUT9),
PINMUX_IPSR_MSEL(IP10_31_30, SSI_SCK5, SEL_SSI5_0),
PINMUX_IPSR_MSEL(IP10_31_30, SCIFA3_SCK, SEL_SCIFA3_0),
PINMUX_IPSR_GPSR(IP10_31_30, DU1_DOTCLKIN),
- PINMUX_IPSR_GPSR(IP10_31_30, CAN_DEBUGOUT10),
/* IPSR11 */
PINMUX_IPSR_MSEL(IP11_2_0, SSI_WS5, SEL_SSI5_0),
PINMUX_IPSR_MSEL(IP11_2_0, SCIFA3_RXD, SEL_SCIFA3_0),
PINMUX_IPSR_MSEL(IP11_2_0, I2C3_SCL_C, SEL_I2C03_2),
PINMUX_IPSR_GPSR(IP11_2_0, DU1_DOTCLKOUT0),
- PINMUX_IPSR_GPSR(IP11_2_0, CAN_DEBUGOUT11),
PINMUX_IPSR_MSEL(IP11_5_3, SSI_SDATA5, SEL_SSI5_0),
PINMUX_IPSR_MSEL(IP11_5_3, SCIFA3_TXD, SEL_SCIFA3_0),
PINMUX_IPSR_MSEL(IP11_5_3, I2C3_SDA_C, SEL_I2C03_2),
PINMUX_IPSR_GPSR(IP11_5_3, DU1_DOTCLKOUT1),
- PINMUX_IPSR_GPSR(IP11_5_3, CAN_DEBUGOUT12),
PINMUX_IPSR_MSEL(IP11_7_6, SSI_SCK6, SEL_SSI6_0),
PINMUX_IPSR_MSEL(IP11_7_6, SCIFA1_SCK_B, SEL_SCIFA1_1),
PINMUX_IPSR_GPSR(IP11_7_6, DU1_EXHSYNC_DU1_HSYNC),
- PINMUX_IPSR_GPSR(IP11_7_6, CAN_DEBUGOUT13),
PINMUX_IPSR_MSEL(IP11_10_8, SSI_WS6, SEL_SSI6_0),
PINMUX_IPSR_MSEL(IP11_10_8, SCIFA1_RXD_B, SEL_SCIFA1_1),
PINMUX_IPSR_MSEL(IP11_10_8, I2C4_SCL_C, SEL_I2C04_2),
PINMUX_IPSR_GPSR(IP11_10_8, DU1_EXVSYNC_DU1_VSYNC),
- PINMUX_IPSR_GPSR(IP11_10_8, CAN_DEBUGOUT14),
PINMUX_IPSR_MSEL(IP11_13_11, SSI_SDATA6, SEL_SSI6_0),
PINMUX_IPSR_MSEL(IP11_13_11, SCIFA1_TXD_B, SEL_SCIFA1_1),
PINMUX_IPSR_MSEL(IP11_13_11, I2C4_SDA_C, SEL_I2C04_2),
PINMUX_IPSR_GPSR(IP11_13_11, DU1_EXODDF_DU1_ODDF_DISP_CDE),
- PINMUX_IPSR_GPSR(IP11_13_11, CAN_DEBUGOUT15),
PINMUX_IPSR_MSEL(IP11_15_14, SSI_SCK78, SEL_SSI7_0),
PINMUX_IPSR_MSEL(IP11_15_14, SCIFA2_SCK_B, SEL_SCIFA2_1),
- PINMUX_IPSR_MSEL(IP11_15_14, IIC0_SDA_C, SEL_IIC00_2),
+ PINMUX_IPSR_MSEL(IP11_15_14, I2C5_SDA_C, SEL_I2C05_2),
PINMUX_IPSR_GPSR(IP11_15_14, DU1_DISP),
PINMUX_IPSR_MSEL(IP11_17_16, SSI_WS78, SEL_SSI7_0),
PINMUX_IPSR_MSEL(IP11_17_16, SCIFA2_RXD_B, SEL_SCIFA2_1),
- PINMUX_IPSR_MSEL(IP11_17_16, IIC0_SCL_C, SEL_IIC00_2),
+ PINMUX_IPSR_MSEL(IP11_17_16, I2C5_SCL_C, SEL_I2C05_2),
PINMUX_IPSR_GPSR(IP11_17_16, DU1_CDE),
PINMUX_IPSR_MSEL(IP11_20_18, SSI_SDATA7, SEL_SSI7_0),
PINMUX_IPSR_MSEL(IP11_20_18, SCIFA2_TXD_B, SEL_SCIFA2_1),
PINMUX_IPSR_GPSR(IP11_20_18, IRQ8),
PINMUX_IPSR_MSEL(IP11_20_18, AUDIO_CLKA_D, SEL_ADG_3),
PINMUX_IPSR_MSEL(IP11_20_18, CAN_CLK_D, SEL_CAN_3),
- PINMUX_IPSR_GPSR(IP11_20_18, PCMOE_N),
PINMUX_IPSR_GPSR(IP11_23_21, SSI_SCK0129),
PINMUX_IPSR_MSEL(IP11_23_21, MSIOF1_RXD_B, SEL_MSI1_1),
PINMUX_IPSR_MSEL(IP11_23_21, SCIF5_RXD_D, SEL_SCIF5_3),
PINMUX_IPSR_MSEL(IP11_23_21, ADIDATA_B, SEL_RAD_1),
- PINMUX_IPSR_MSEL(IP11_23_21, AD_DI_B, SEL_ADI_1),
- PINMUX_IPSR_GPSR(IP11_23_21, PCMWE_N),
PINMUX_IPSR_GPSR(IP11_26_24, SSI_WS0129),
PINMUX_IPSR_MSEL(IP11_26_24, MSIOF1_TXD_B, SEL_MSI1_1),
PINMUX_IPSR_MSEL(IP11_26_24, SCIF5_TXD_D, SEL_SCIF5_3),
PINMUX_IPSR_MSEL(IP11_26_24, ADICS_SAMP_B, SEL_RAD_1),
- PINMUX_IPSR_MSEL(IP11_26_24, AD_DO_B, SEL_ADI_1),
PINMUX_IPSR_GPSR(IP11_29_27, SSI_SDATA0),
PINMUX_IPSR_MSEL(IP11_29_27, MSIOF1_SCK_B, SEL_MSI1_1),
PINMUX_IPSR_GPSR(IP11_29_27, PWM0_B),
PINMUX_IPSR_MSEL(IP11_29_27, ADICLK_B, SEL_RAD_1),
- PINMUX_IPSR_MSEL(IP11_29_27, AD_CLK_B, SEL_ADI_1),
/* IPSR12 */
PINMUX_IPSR_GPSR(IP12_2_0, SSI_SCK34),
PINMUX_IPSR_MSEL(IP12_2_0, MSIOF1_SYNC_B, SEL_MSI1_1),
PINMUX_IPSR_MSEL(IP12_2_0, SCIFA1_SCK_C, SEL_SCIFA1_2),
PINMUX_IPSR_MSEL(IP12_2_0, ADICHS0_B, SEL_RAD_1),
- PINMUX_IPSR_MSEL(IP12_2_0, AD_NCS_N_B, SEL_ADI_1),
PINMUX_IPSR_MSEL(IP12_2_0, DREQ1_N_B, SEL_LBS_1),
PINMUX_IPSR_GPSR(IP12_5_3, SSI_WS34),
PINMUX_IPSR_MSEL(IP12_5_3, MSIOF1_SS1_B, SEL_MSI1_1),
@@ -1379,15 +1343,12 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP12_10_9, SSI_SCK4, SEL_SSI4_0),
PINMUX_IPSR_GPSR(IP12_10_9, MLB_CLK),
PINMUX_IPSR_MSEL(IP12_10_9, IETX_B, SEL_IEB_1),
- PINMUX_IPSR_GPSR(IP12_10_9, IRD_TX),
PINMUX_IPSR_MSEL(IP12_12_11, SSI_WS4, SEL_SSI4_0),
PINMUX_IPSR_GPSR(IP12_12_11, MLB_SIG),
PINMUX_IPSR_MSEL(IP12_12_11, IECLK_B, SEL_IEB_1),
- PINMUX_IPSR_GPSR(IP12_12_11, IRD_RX),
PINMUX_IPSR_MSEL(IP12_14_13, SSI_SDATA4, SEL_SSI4_0),
PINMUX_IPSR_GPSR(IP12_14_13, MLB_DAT),
PINMUX_IPSR_MSEL(IP12_14_13, IERX_B, SEL_IEB_1),
- PINMUX_IPSR_GPSR(IP12_14_13, IRD_SCK),
PINMUX_IPSR_MSEL(IP12_17_15, SSI_SDATA8, SEL_SSI8_0),
PINMUX_IPSR_MSEL(IP12_17_15, SCIF1_SCK_B, SEL_SCIF1_1),
PINMUX_IPSR_GPSR(IP12_17_15, PWM1_B),
@@ -1397,28 +1358,24 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP12_17_15, ETH_MDIO_B, SEL_ETH_1),
PINMUX_IPSR_MSEL(IP12_20_18, SSI_SCK1, SEL_SSI1_0),
PINMUX_IPSR_MSEL(IP12_20_18, SCIF1_RXD_B, SEL_SCIF1_1),
- PINMUX_IPSR_MSEL(IP12_20_18, IIC1_SCL_C, SEL_IIC01_2),
+ PINMUX_IPSR_MSEL(IP12_20_18, IIC0_SCL_C, SEL_IIC0_2),
PINMUX_IPSR_GPSR(IP12_20_18, VI1_CLK),
PINMUX_IPSR_MSEL(IP12_20_18, CAN0_RX_D, SEL_CAN0_3),
- PINMUX_IPSR_MSEL(IP12_20_18, AVB_AVTP_CAPTURE, SEL_AVB_0),
PINMUX_IPSR_MSEL(IP12_20_18, ETH_CRS_DV_B, SEL_ETH_1),
PINMUX_IPSR_MSEL(IP12_23_21, SSI_WS1, SEL_SSI1_0),
PINMUX_IPSR_MSEL(IP12_23_21, SCIF1_TXD_B, SEL_SCIF1_1),
- PINMUX_IPSR_MSEL(IP12_23_21, IIC1_SDA_C, SEL_IIC01_2),
+ PINMUX_IPSR_MSEL(IP12_23_21, IIC0_SDA_C, SEL_IIC0_2),
PINMUX_IPSR_GPSR(IP12_23_21, VI1_DATA0),
PINMUX_IPSR_MSEL(IP12_23_21, CAN0_TX_D, SEL_CAN0_3),
- PINMUX_IPSR_MSEL(IP12_23_21, AVB_AVTP_MATCH, SEL_AVB_0),
PINMUX_IPSR_MSEL(IP12_23_21, ETH_RX_ER_B, SEL_ETH_1),
PINMUX_IPSR_MSEL(IP12_26_24, SSI_SDATA1, SEL_SSI1_0),
PINMUX_IPSR_MSEL(IP12_26_24, HSCIF1_HRX_B, SEL_HSCIF1_1),
PINMUX_IPSR_GPSR(IP12_26_24, VI1_DATA1),
- PINMUX_IPSR_MSEL(IP12_26_24, SDATA, SEL_FSN_0),
PINMUX_IPSR_GPSR(IP12_26_24, ATAWR0_N),
PINMUX_IPSR_MSEL(IP12_26_24, ETH_RXD0_B, SEL_ETH_1),
PINMUX_IPSR_MSEL(IP12_29_27, SSI_SCK2, SEL_SSI2_0),
PINMUX_IPSR_MSEL(IP12_29_27, HSCIF1_HTX_B, SEL_HSCIF1_1),
PINMUX_IPSR_GPSR(IP12_29_27, VI1_DATA2),
- PINMUX_IPSR_MSEL(IP12_29_27, MDATA, SEL_FSN_0),
PINMUX_IPSR_GPSR(IP12_29_27, ATAG0_N),
PINMUX_IPSR_MSEL(IP12_29_27, ETH_RXD1_B, SEL_ETH_1),
@@ -1427,21 +1384,18 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP13_2_0, HSCIF1_HCTS_N_B, SEL_HSCIF1_1),
PINMUX_IPSR_MSEL(IP13_2_0, SCIFA0_RXD_D, SEL_SCIFA0_3),
PINMUX_IPSR_GPSR(IP13_2_0, VI1_DATA3),
- PINMUX_IPSR_MSEL(IP13_2_0, SCKZ, SEL_FSN_0),
PINMUX_IPSR_GPSR(IP13_2_0, ATACS00_N),
PINMUX_IPSR_MSEL(IP13_2_0, ETH_LINK_B, SEL_ETH_1),
PINMUX_IPSR_MSEL(IP13_5_3, SSI_SDATA2, SEL_SSI2_0),
PINMUX_IPSR_MSEL(IP13_5_3, HSCIF1_HRTS_N_B, SEL_HSCIF1_1),
PINMUX_IPSR_MSEL(IP13_5_3, SCIFA0_TXD_D, SEL_SCIFA0_3),
PINMUX_IPSR_GPSR(IP13_5_3, VI1_DATA4),
- PINMUX_IPSR_MSEL(IP13_5_3, STM_N, SEL_FSN_0),
PINMUX_IPSR_GPSR(IP13_5_3, ATACS10_N),
PINMUX_IPSR_MSEL(IP13_5_3, ETH_REFCLK_B, SEL_ETH_1),
PINMUX_IPSR_MSEL(IP13_8_6, SSI_SCK9, SEL_SSI9_0),
PINMUX_IPSR_MSEL(IP13_8_6, SCIF2_SCK_B, SEL_SCIF2_1),
PINMUX_IPSR_GPSR(IP13_8_6, PWM2_B),
PINMUX_IPSR_GPSR(IP13_8_6, VI1_DATA5),
- PINMUX_IPSR_MSEL(IP13_8_6, MTS_N, SEL_FSN_0),
PINMUX_IPSR_GPSR(IP13_8_6, EX_WAIT1),
PINMUX_IPSR_MSEL(IP13_8_6, ETH_TXD1_B, SEL_ETH_1),
PINMUX_IPSR_MSEL(IP13_11_9, SSI_WS9, SEL_SSI9_0),
@@ -1461,14 +1415,12 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP13_17_15, SCIFA4_RXD_D, SEL_SCIFA4_3),
PINMUX_IPSR_GPSR(IP13_17_15, VI1_CLKENB),
PINMUX_IPSR_MSEL(IP13_17_15, TS_SDATA_C, SEL_TSIF0_2),
- PINMUX_IPSR_MSEL(IP13_17_15, RIF0_SYNC_B, SEL_DR0_1),
PINMUX_IPSR_MSEL(IP13_17_15, ETH_TXD0_B, SEL_ETH_1),
PINMUX_IPSR_MSEL(IP13_20_18, AUDIO_CLKB, SEL_ADG_0),
PINMUX_IPSR_MSEL(IP13_20_18, I2C0_SDA_B, SEL_I2C00_1),
PINMUX_IPSR_MSEL(IP13_20_18, SCIFA4_TXD_D, SEL_SCIFA4_3),
PINMUX_IPSR_GPSR(IP13_20_18, VI1_FIELD),
PINMUX_IPSR_MSEL(IP13_20_18, TS_SCK_C, SEL_TSIF0_2),
- PINMUX_IPSR_MSEL(IP13_20_18, RIF0_CLK_B, SEL_DR0_1),
PINMUX_IPSR_MSEL(IP13_20_18, BPFCLK_E, SEL_DARC_4),
PINMUX_IPSR_MSEL(IP13_20_18, ETH_MDC_B, SEL_ETH_1),
PINMUX_IPSR_MSEL(IP13_23_21, AUDIO_CLKC, SEL_ADG_0),
@@ -1476,17 +1428,13 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP13_23_21, SCIFA5_RXD_D, SEL_SCIFA5_3),
PINMUX_IPSR_GPSR(IP13_23_21, VI1_HSYNC_N),
PINMUX_IPSR_MSEL(IP13_23_21, TS_SDEN_C, SEL_TSIF0_2),
- PINMUX_IPSR_MSEL(IP13_23_21, RIF0_D0_B, SEL_DR0_1),
PINMUX_IPSR_MSEL(IP13_23_21, FMCLK_E, SEL_DARC_4),
- PINMUX_IPSR_MSEL(IP13_23_21, RDS_CLK_D, SEL_RDS_3),
PINMUX_IPSR_MSEL(IP13_26_24, AUDIO_CLKOUT, SEL_ADG_0),
PINMUX_IPSR_MSEL(IP13_26_24, I2C4_SDA_B, SEL_I2C04_1),
PINMUX_IPSR_MSEL(IP13_26_24, SCIFA5_TXD_D, SEL_SCIFA5_3),
PINMUX_IPSR_GPSR(IP13_26_24, VI1_VSYNC_N),
PINMUX_IPSR_MSEL(IP13_26_24, TS_SPSYNC_C, SEL_TSIF0_2),
- PINMUX_IPSR_MSEL(IP13_26_24, RIF0_D1_B, SEL_DR1_1),
PINMUX_IPSR_MSEL(IP13_26_24, FMIN_E, SEL_DARC_4),
- PINMUX_IPSR_MSEL(IP13_26_24, RDS_DATA_D, SEL_RDS_3),
};
static const struct sh_pfc_pin pinmux_pins[] = {
@@ -1660,30 +1608,6 @@ static const unsigned int avb_gmii_mux[] = {
AVB_TX_EN_MARK, AVB_TX_ER_MARK, AVB_TX_CLK_MARK,
AVB_COL_MARK,
};
-static const unsigned int avb_avtp_capture_pins[] = {
- RCAR_GP_PIN(5, 11),
-};
-static const unsigned int avb_avtp_capture_mux[] = {
- AVB_AVTP_CAPTURE_MARK,
-};
-static const unsigned int avb_avtp_match_pins[] = {
- RCAR_GP_PIN(5, 12),
-};
-static const unsigned int avb_avtp_match_mux[] = {
- AVB_AVTP_MATCH_MARK,
-};
-static const unsigned int avb_avtp_capture_b_pins[] = {
- RCAR_GP_PIN(1, 1),
-};
-static const unsigned int avb_avtp_capture_b_mux[] = {
- AVB_AVTP_CAPTURE_B_MARK,
-};
-static const unsigned int avb_avtp_match_b_pins[] = {
- RCAR_GP_PIN(1, 2),
-};
-static const unsigned int avb_avtp_match_b_mux[] = {
- AVB_AVTP_MATCH_B_MARK,
-};
/* - DU --------------------------------------------------------------------- */
static const unsigned int du0_rgb666_pins[] = {
/* R[7:2], G[7:2], B[7:2] */
@@ -3535,10 +3459,6 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(avb_mdio),
SH_PFC_PIN_GROUP(avb_mii),
SH_PFC_PIN_GROUP(avb_gmii),
- SH_PFC_PIN_GROUP(avb_avtp_capture),
- SH_PFC_PIN_GROUP(avb_avtp_match),
- SH_PFC_PIN_GROUP(avb_avtp_capture_b),
- SH_PFC_PIN_GROUP(avb_avtp_match_b),
SH_PFC_PIN_GROUP(du0_rgb666),
SH_PFC_PIN_GROUP(du0_rgb888),
SH_PFC_PIN_GROUP(du0_clk0_out),
@@ -3809,10 +3729,6 @@ static const char * const avb_groups[] = {
"avb_mdio",
"avb_mii",
"avb_gmii",
- "avb_avtp_capture",
- "avb_avtp_match",
- "avb_avtp_capture_b",
- "avb_avtp_match_b",
};
static const char * const du0_groups[] = {
@@ -4540,11 +4456,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP1_23_22 [2] */
FN_A0, FN_SCIFB1_SCK, FN_PWM3_B, 0,
/* IP1_21_20 [2] */
- FN_D15, FN_SCIFA1_TXD, FN_IIC0_SDA_B, 0,
+ FN_D15, FN_SCIFA1_TXD, FN_I2C5_SDA_B, 0,
/* IP1_19_18 [2] */
- FN_D14, FN_SCIFA1_RXD, FN_IIC0_SCL_B, 0,
+ FN_D14, FN_SCIFA1_RXD, FN_I2C5_SCL_B, 0,
/* IP1_17_15 [3] */
- FN_D13, FN_SCIFA1_SCK, FN_TANS1, FN_PWM2_C, FN_TCLK2_B,
+ FN_D13, FN_SCIFA1_SCK, 0, FN_PWM2_C, FN_TCLK2_B,
0, 0, 0,
/* IP1_14_13 [2] */
FN_D12, FN_HSCIF2_HRTS_N, FN_SCIF1_TXD_C, FN_I2C1_SDA_D,
@@ -4565,19 +4481,19 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG_VAR("IPSR2", 0xE6060028, 32,
2, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2) {
/* IP2_31_30 [2] */
- FN_A20, FN_SPCLK, FN_MOUT1, 0,
+ FN_A20, FN_SPCLK, 0, 0,
/* IP2_29_27 [3] */
FN_A19, FN_MSIOF2_SS2, FN_PWM4, FN_TPUTO2,
- FN_MOUT0, 0, 0, 0,
+ 0, 0, 0, 0,
/* IP2_26_24 [3] */
FN_A18, FN_MSIOF2_SS1, FN_SCIF4_TXD_E, FN_CAN1_TX_B,
- FN_AVB_AVTP_MATCH_B, 0, 0, 0,
+ 0, 0, 0, 0,
/* IP2_23_21 [3] */
FN_A17, FN_MSIOF2_SYNC, FN_SCIF4_RXD_E, FN_CAN1_RX_B,
- FN_AVB_AVTP_CAPTURE_B, 0, 0, 0,
+ 0, 0, 0, 0,
/* IP2_20_18 [3] */
FN_A16, FN_MSIOF2_SCK, FN_HSCIF0_HSCK_B, FN_SPEEDIN,
- FN_VSP, FN_CAN_CLK_C, FN_TPUTO2_B, 0,
+ 0, FN_CAN_CLK_C, FN_TPUTO2_B, 0,
/* IP2_17_16 [2] */
FN_A15, FN_MSIOF2_TXD, FN_HSCIF0_HTX_B, FN_DACK1,
/* IP2_15_14 [2] */
@@ -4587,9 +4503,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP2_11_10 [2] */
FN_A12, FN_MSIOF1_SS1, FN_SCIFA5_RXD_B, 0,
/* IP2_9_8 [2] */
- FN_A11, FN_MSIOF1_SYNC, FN_IIC1_SDA_B, 0,
+ FN_A11, FN_MSIOF1_SYNC, FN_IIC0_SDA_B, 0,
/* IP2_7_6 [2] */
- FN_A10, FN_MSIOF1_SCK, FN_IIC1_SCL_B, 0,
+ FN_A10, FN_MSIOF1_SCK, FN_IIC0_SCL_B, 0,
/* IP2_5_4 [2] */
FN_A9, FN_MSIOF1_TXD, FN_SCIFA0_TXD_B, 0,
/* IP2_3_2 [2] */
@@ -4605,19 +4521,19 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_RD_N, FN_ATACS11_N,
/* IP3_29_27 [3] */
FN_BS_N, FN_DRACK0, FN_PWM1_C, FN_TPUTO0_C, FN_ATACS01_N,
- FN_MTS_N_B, 0, 0,
+ 0, 0, 0,
/* IP3_26_24 [3] */
FN_EX_CS5_N, FN_SCIFA2_TXD, FN_I2C2_SDA_E, FN_TS_SPSYNC_B,
- FN_RIF0_D1, FN_FMIN, FN_SCIFB2_RTS_N, FN_STM_N_B,
+ 0, FN_FMIN, FN_SCIFB2_RTS_N, 0,
/* IP3_23_21 [3] */
FN_EX_CS4_N, FN_SCIFA2_RXD, FN_I2C2_SCL_E, FN_TS_SDEN_B,
- FN_RIF0_D0, FN_FMCLK, FN_SCIFB2_CTS_N, FN_SCKZ_B,
+ 0, FN_FMCLK, FN_SCIFB2_CTS_N, 0,
/* IP3_20_18 [3] */
FN_EX_CS3_N, FN_SCIFA2_SCK, FN_SCIF4_TXD_C, FN_TS_SCK_B,
- FN_RIF0_CLK, FN_BPFCLK, FN_SCIFB2_SCK, FN_MDATA_B,
+ 0, FN_BPFCLK, FN_SCIFB2_SCK, 0,
/* IP3_17_15 [3] */
FN_EX_CS2_N, FN_PWM0, FN_SCIF4_RXD_C, FN_TS_SDATA_B,
- FN_RIF0_SYNC, FN_TPUTO3, FN_SCIFB2_TXD, FN_SDATA_B,
+ 0, FN_TPUTO3, FN_SCIFB2_TXD, 0,
/* IP3_14_13 [2] */
FN_EX_CS1_N, FN_TPUTO3_B, FN_SCIFB2_RXD, FN_VI1_DATA11,
/* IP3_12 [1] */
@@ -4631,88 +4547,88 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP3_7_6 [2] */
FN_A24, FN_IO3, FN_EX_WAIT2, 0,
/* IP3_5_4 [2] */
- FN_A23, FN_IO2, FN_MOUT6, FN_ATAWR1_N,
+ FN_A23, FN_IO2, 0, FN_ATAWR1_N,
/* IP3_3_2 [2] */
- FN_A22, FN_MISO_IO1, FN_MOUT5, FN_ATADIR1_N,
+ FN_A22, FN_MISO_IO1, 0, FN_ATADIR1_N,
/* IP3_1_0 [2] */
- FN_A21, FN_MOSI_IO0, FN_MOUT2, 0, }
+ FN_A21, FN_MOSI_IO0, 0, 0, }
},
{ PINMUX_CFG_REG_VAR("IPSR4", 0xE6060030, 32,
2, 2, 2, 3, 3, 2, 2, 2, 2, 2, 2, 3, 3, 2) {
/* IP4_31_30 [2] */
- FN_DU0_DG4, FN_LCDOUT12, FN_CC50_STATE12, 0,
+ FN_DU0_DG4, FN_LCDOUT12, 0, 0,
/* IP4_29_28 [2] */
- FN_DU0_DG3, FN_LCDOUT11, FN_CC50_STATE11, 0,
+ FN_DU0_DG3, FN_LCDOUT11, 0, 0,
/* IP4_27_26 [2] */
- FN_DU0_DG2, FN_LCDOUT10, FN_CC50_STATE10, 0,
+ FN_DU0_DG2, FN_LCDOUT10, 0, 0,
/* IP4_25_23 [3] */
FN_DU0_DG1, FN_LCDOUT9, FN_SCIFA0_TXD_C, FN_I2C3_SDA_D,
- FN_CC50_STATE9, 0, 0, 0,
+ 0, 0, 0, 0,
/* IP4_22_20 [3] */
FN_DU0_DG0, FN_LCDOUT8, FN_SCIFA0_RXD_C, FN_I2C3_SCL_D,
- FN_CC50_STATE8, 0, 0, 0,
+ 0, 0, 0, 0,
/* IP4_19_18 [2] */
- FN_DU0_DR7, FN_LCDOUT23, FN_CC50_STATE7, 0,
+ FN_DU0_DR7, FN_LCDOUT23, 0, 0,
/* IP4_17_16 [2] */
- FN_DU0_DR6, FN_LCDOUT22, FN_CC50_STATE6, 0,
+ FN_DU0_DR6, FN_LCDOUT22, 0, 0,
/* IP4_15_14 [2] */
- FN_DU0_DR5, FN_LCDOUT21, FN_CC50_STATE5, 0,
+ FN_DU0_DR5, FN_LCDOUT21, 0, 0,
/* IP4_13_12 [2] */
- FN_DU0_DR4, FN_LCDOUT20, FN_CC50_STATE4, 0,
+ FN_DU0_DR4, FN_LCDOUT20, 0, 0,
/* IP4_11_10 [2] */
- FN_DU0_DR3, FN_LCDOUT19, FN_CC50_STATE3, 0,
+ FN_DU0_DR3, FN_LCDOUT19, 0, 0,
/* IP4_9_8 [2] */
- FN_DU0_DR2, FN_LCDOUT18, FN_CC50_STATE2, 0,
+ FN_DU0_DR2, FN_LCDOUT18, 0, 0,
/* IP4_7_5 [3] */
FN_DU0_DR1, FN_LCDOUT17, FN_SCIF5_TXD_C, FN_I2C2_SDA_D,
- FN_CC50_STATE1, 0, 0, 0,
+ 0, 0, 0, 0,
/* IP4_4_2 [3] */
FN_DU0_DR0, FN_LCDOUT16, FN_SCIF5_RXD_C, FN_I2C2_SCL_D,
- FN_CC50_STATE0, 0, 0, 0,
+ 0, 0, 0, 0,
/* IP4_1_0 [2] */
- FN_EX_WAIT0, FN_CAN_CLK_B, FN_SCIF_CLK, FN_PWMFSW0, }
+ FN_EX_WAIT0, FN_CAN_CLK_B, FN_SCIF_CLK, 0, }
},
{ PINMUX_CFG_REG_VAR("IPSR5", 0xE6060034, 32,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 2, 2, 2) {
/* IP5_31_30 [2] */
- FN_DU0_EXHSYNC_DU0_HSYNC, FN_QSTH_QHS, FN_CC50_STATE27, 0,
+ FN_DU0_EXHSYNC_DU0_HSYNC, FN_QSTH_QHS, 0, 0,
/* IP5_29_28 [2] */
- FN_DU0_DOTCLKOUT1, FN_QSTVB_QVE, FN_CC50_STATE26, 0,
+ FN_DU0_DOTCLKOUT1, FN_QSTVB_QVE, 0, 0,
/* IP5_27_26 [2] */
- FN_DU0_DOTCLKOUT0, FN_QCLK, FN_CC50_STATE25, 0,
+ FN_DU0_DOTCLKOUT0, FN_QCLK, 0, 0,
/* IP5_25_24 [2] */
- FN_DU0_DOTCLKIN, FN_QSTVA_QVS, FN_CC50_STATE24, 0,
+ FN_DU0_DOTCLKIN, FN_QSTVA_QVS, 0, 0,
/* IP5_23_22 [2] */
- FN_DU0_DB7, FN_LCDOUT7, FN_CC50_STATE23, 0,
+ FN_DU0_DB7, FN_LCDOUT7, 0, 0,
/* IP5_21_20 [2] */
- FN_DU0_DB6, FN_LCDOUT6, FN_CC50_STATE22, 0,
+ FN_DU0_DB6, FN_LCDOUT6, 0, 0,
/* IP5_19_18 [2] */
- FN_DU0_DB5, FN_LCDOUT5, FN_CC50_STATE21, 0,
+ FN_DU0_DB5, FN_LCDOUT5, 0, 0,
/* IP5_17_16 [2] */
- FN_DU0_DB4, FN_LCDOUT4, FN_CC50_STATE20, 0,
+ FN_DU0_DB4, FN_LCDOUT4, 0, 0,
/* IP5_15_14 [2] */
- FN_DU0_DB3, FN_LCDOUT3, FN_CC50_STATE19, 0,
+ FN_DU0_DB3, FN_LCDOUT3, 0, 0,
/* IP5_13_12 [2] */
- FN_DU0_DB2, FN_LCDOUT2, FN_CC50_STATE18, 0,
+ FN_DU0_DB2, FN_LCDOUT2, 0, 0,
/* IP5_11_9 [3] */
FN_DU0_DB1, FN_LCDOUT1, FN_SCIFA4_TXD_C, FN_I2C4_SDA_D,
- FN_CAN0_TX_C, FN_CC50_STATE17, 0, 0,
+ FN_CAN0_TX_C, 0, 0, 0,
/* IP5_8_6 [3] */
FN_DU0_DB0, FN_LCDOUT0, FN_SCIFA4_RXD_C, FN_I2C4_SCL_D,
- FN_CAN0_RX_C, FN_CC50_STATE16, 0, 0,
+ FN_CAN0_RX_C, 0, 0, 0,
/* IP5_5_4 [2] */
- FN_DU0_DG7, FN_LCDOUT15, FN_CC50_STATE15, 0,
+ FN_DU0_DG7, FN_LCDOUT15, 0, 0,
/* IP5_3_2 [2] */
- FN_DU0_DG6, FN_LCDOUT14, FN_CC50_STATE14, 0,
+ FN_DU0_DG6, FN_LCDOUT14, 0, 0,
/* IP5_1_0 [2] */
- FN_DU0_DG5, FN_LCDOUT13, FN_CC50_STATE13, 0, }
+ FN_DU0_DG5, FN_LCDOUT13, 0, 0, }
},
{ PINMUX_CFG_REG_VAR("IPSR6", 0xE6060038, 32,
3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
2, 2) {
/* IP6_31_29 [3] */
- FN_ETH_MDIO, FN_VI0_G0, FN_MSIOF2_RXD_B, FN_IIC0_SCL_D,
- FN_AVB_TX_CLK, FN_ADIDATA, FN_AD_DI, 0,
+ FN_ETH_MDIO, FN_VI0_G0, FN_MSIOF2_RXD_B, FN_I2C5_SCL_D,
+ FN_AVB_TX_CLK, FN_ADIDATA, 0, 0,
/* IP6_28_26 [3] */
FN_VI0_VSYNC_N, FN_SCIF0_TXD_B, FN_I2C0_SDA_C,
FN_AUDIO_CLKOUT_B, FN_AVB_TX_EN, 0, 0, 0,
@@ -4744,14 +4660,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP6_8 [1] */
FN_VI0_CLK, FN_AVB_RX_CLK,
/* IP6_7_6 [2] */
- FN_DU0_CDE, FN_QPOLB, FN_CC50_STATE31, 0,
+ FN_DU0_CDE, FN_QPOLB, 0, 0,
/* IP6_5_4 [2] */
- FN_DU0_DISP, FN_QPOLA, FN_CC50_STATE30, 0,
+ FN_DU0_DISP, FN_QPOLA, 0, 0,
/* IP6_3_2 [2] */
- FN_DU0_EXODDF_DU0_ODDF_DISP_CDE, FN_QCPV_QDE, FN_CC50_STATE29,
+ FN_DU0_EXODDF_DU0_ODDF_DISP_CDE, FN_QCPV_QDE, 0,
0,
/* IP6_1_0 [2] */
- FN_DU0_EXVSYNC_DU0_VSYNC, FN_QSTB_QHE, FN_CC50_STATE28, 0, }
+ FN_DU0_EXVSYNC_DU0_VSYNC, FN_QSTB_QHE, 0, 0, }
},
{ PINMUX_CFG_REG_VAR("IPSR7", 0xE606003C, 32,
1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3) {
@@ -4766,10 +4682,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_ETH_MAGIC, FN_VI0_R1, FN_SCIF3_SCK_B, FN_AVB_TX_ER,
FN_SSI_SCK6_B, 0, 0, 0,
/* IP7_23_21 [3] */
- FN_ETH_TX_EN, FN_VI0_R0, FN_SCIF2_TXD_C, FN_IIC1_SDA_D,
+ FN_ETH_TX_EN, FN_VI0_R0, FN_SCIF2_TXD_C, FN_IIC0_SDA_D,
FN_AVB_TXD7, FN_SSI_SDATA5_B, 0, 0,
/* IP7_20_18 [3] */
- FN_ETH_TXD1, FN_VI0_G7, FN_SCIF2_RXD_C, FN_IIC1_SCL_D,
+ FN_ETH_TXD1, FN_VI0_G7, FN_SCIF2_RXD_C, FN_IIC0_SCL_D,
FN_AVB_TXD6, FN_SSI_WS5_B, 0, 0,
/* IP7_17_15 [3] */
FN_ETH_REFCLK, FN_VI0_G6, FN_SCIF2_SCK_C, FN_AVB_TXD5,
@@ -4782,25 +4698,25 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_AVB_TXD3, FN_ADICHS1, 0, 0,
/* IP7_8_6 [3] */
FN_ETH_RXD0, FN_VI0_G3, FN_MSIOF2_SYNC_B, FN_CAN0_TX_B,
- FN_AVB_TXD2, FN_ADICHS0, FN_AD_NCS_N, 0,
+ FN_AVB_TXD2, FN_ADICHS0, 0, 0,
/* IP7_5_3 [3] */
FN_ETH_RX_ER, FN_VI0_G2, FN_MSIOF2_SCK_B, FN_CAN0_RX_B,
- FN_AVB_TXD1, FN_ADICLK, FN_AD_CLK, 0,
+ FN_AVB_TXD1, FN_ADICLK, 0, 0,
/* IP7_2_0 [3] */
- FN_ETH_CRS_DV, FN_VI0_G1, FN_MSIOF2_TXD_B, FN_IIC0_SDA_D,
- FN_AVB_TXD0, FN_ADICS_SAMP, FN_AD_DO, 0, }
+ FN_ETH_CRS_DV, FN_VI0_G1, FN_MSIOF2_TXD_B, FN_I2C5_SDA_D,
+ FN_AVB_TXD0, FN_ADICS_SAMP, 0, 0, }
},
{ PINMUX_CFG_REG_VAR("IPSR8", 0xE6060040, 32,
3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3) {
/* IP8_31_29 [3] */
FN_MSIOF0_RXD, FN_SCIF5_RXD, FN_I2C2_SCL_C, FN_DU1_DR2,
- FN_RIF1_D0_B, FN_TS_SDEN_D, FN_FMCLK_C, FN_RDS_CLK,
+ 0, FN_TS_SDEN_D, FN_FMCLK_C, 0,
/* IP8_28_26 [3] */
FN_I2C1_SDA, FN_SCIF4_TXD, FN_IRQ5, FN_DU1_DR1,
- FN_RIF1_CLK_B, FN_TS_SCK_D, FN_BPFCLK_C, 0,
+ 0, FN_TS_SCK_D, FN_BPFCLK_C, 0,
/* IP8_25_23 [3] */
FN_I2C1_SCL, FN_SCIF4_RXD, FN_PWM5_B, FN_DU1_DR0,
- FN_RIF1_SYNC_B, FN_TS_SDATA_D, FN_TPUTO1_B, 0,
+ 0, FN_TS_SDATA_D, FN_TPUTO1_B, 0,
/* IP8_22_20 [3] */
FN_I2C0_SDA, FN_SCIF0_TXD_C, FN_TPUTO0, FN_CAN_CLK,
FN_DVC_MUTE, FN_CAN1_TX_D, 0, 0,
@@ -4831,70 +4747,70 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
/* IP9_30_28 [3] */
FN_SCIF1_SCK, FN_PWM3, FN_TCLK2, FN_DU1_DG5,
- FN_SSI_SDATA1_B, FN_CAN_TXCLK, FN_CC50_STATE34, 0,
+ FN_SSI_SDATA1_B, 0, 0, 0,
/* IP9_27_25 [3] */
FN_HSCIF1_HRTS_N, FN_SCIFA4_TXD, FN_IERX, FN_DU1_DG4,
- FN_SSI_WS1_B, FN_CAN_STEP0, FN_CC50_STATE33, 0,
+ FN_SSI_WS1_B, 0, 0, 0,
/* IP9_24_22 [3] */
FN_HSCIF1_HCTS_N, FN_SCIFA4_RXD, FN_IECLK, FN_DU1_DG3,
- FN_SSI_SCK1_B, FN_CAN_DEBUG_HW_TRIGGER, FN_CC50_STATE32, 0,
+ FN_SSI_SCK1_B, 0, 0, 0,
/* IP9_21_19 [3] */
FN_HSCIF1_HSCK, FN_PWM2, FN_IETX, FN_DU1_DG2,
- FN_REMOCON_B, FN_SPEEDIN_B, FN_VSP_B, 0,
+ FN_REMOCON_B, FN_SPEEDIN_B, 0, 0,
/* IP9_18_17 [2] */
FN_HSCIF1_HTX, FN_I2C4_SDA, FN_TPUTO1, FN_DU1_DG1,
/* IP9_16_15 [2] */
FN_HSCIF1_HRX, FN_I2C4_SCL, FN_PWM6, FN_DU1_DG0,
/* IP9_14_12 [3] */
FN_MSIOF0_SS2, FN_SCIFA0_TXD, FN_TS_SPSYNC, FN_DU1_DR7,
- FN_RIF1_D1, FN_FMIN_B, FN_RDS_DATA_B, 0,
+ 0, FN_FMIN_B, 0, 0,
/* IP9_11_9 [3] */
FN_MSIOF0_SS1, FN_SCIFA0_RXD, FN_TS_SDEN, FN_DU1_DR6,
- FN_RIF1_D0, FN_FMCLK_B, FN_RDS_CLK_B, 0,
+ 0, FN_FMCLK_B, 0, 0,
/* IP9_8_6 [3] */
FN_MSIOF0_SYNC, FN_PWM1, FN_TS_SCK, FN_DU1_DR5,
- FN_RIF1_CLK, FN_BPFCLK_B, 0, 0,
+ 0, FN_BPFCLK_B, 0, 0,
/* IP9_5_3 [3] */
FN_MSIOF0_SCK, FN_IRQ0, FN_TS_SDATA, FN_DU1_DR4,
- FN_RIF1_SYNC, FN_TPUTO1_C, 0, 0,
+ 0, FN_TPUTO1_C, 0, 0,
/* IP9_2_0 [3] */
FN_MSIOF0_TXD, FN_SCIF5_TXD, FN_I2C2_SDA_C, FN_DU1_DR3,
- FN_RIF1_D1_B, FN_TS_SPSYNC_D, FN_FMIN_C, FN_RDS_DATA, }
+ 0, FN_TS_SPSYNC_D, FN_FMIN_C, 0, }
},
{ PINMUX_CFG_REG_VAR("IPSR10", 0xE6060048, 32,
2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3) {
/* IP10_31_30 [2] */
- FN_SSI_SCK5, FN_SCIFA3_SCK, FN_DU1_DOTCLKIN, FN_CAN_DEBUGOUT10,
+ FN_SSI_SCK5, FN_SCIFA3_SCK, FN_DU1_DOTCLKIN, 0,
/* IP10_29_27 [3] */
FN_I2C2_SDA, FN_SCIFA5_TXD, FN_DU1_DB7, FN_AUDIO_CLKOUT_C,
- FN_CAN_DEBUGOUT9, 0, 0, 0,
+ 0, 0, 0, 0,
/* IP10_26_24 [3] */
FN_I2C2_SCL, FN_SCIFA5_RXD, FN_DU1_DB6, FN_AUDIO_CLKC_C,
- FN_SSI_SDATA4_B, FN_CAN_DEBUGOUT8, 0, 0,
+ FN_SSI_SDATA4_B, 0, 0, 0,
/* IP10_23_21 [3] */
FN_SCIF3_TXD, FN_I2C1_SDA_E, FN_FMIN_D, FN_DU1_DB5,
- FN_AUDIO_CLKB_C, FN_SSI_WS4_B, FN_CAN_DEBUGOUT7, FN_RDS_DATA_C,
+ FN_AUDIO_CLKB_C, FN_SSI_WS4_B, 0, 0,
/* IP10_20_18 [3] */
FN_SCIF3_RXD, FN_I2C1_SCL_E, FN_FMCLK_D, FN_DU1_DB4,
- FN_AUDIO_CLKA_C, FN_SSI_SCK4_B, FN_CAN_DEBUGOUT6, FN_RDS_CLK_C,
+ FN_AUDIO_CLKA_C, FN_SSI_SCK4_B, 0, 0,
/* IP10_17_15 [3] */
FN_SCIF3_SCK, FN_IRQ2, FN_BPFCLK_D, FN_DU1_DB3,
- FN_SSI_SDATA9_B, FN_TANS2, FN_CAN_DEBUGOUT5, FN_CC50_OSCOUT,
+ FN_SSI_SDATA9_B, 0, 0, 0,
/* IP10_14_12 [3] */
FN_SCIF2_SCK, FN_IRQ1, FN_DU1_DB2, FN_SSI_WS9_B,
- FN_USB0_IDIN, FN_CAN_DEBUGOUT4, FN_CC50_STATE39, 0,
+ 0, 0, 0, 0,
/* IP10_11_9 [3] */
- FN_SCIF2_TXD, FN_IIC1_SDA, FN_DU1_DB1, FN_SSI_SCK9_B,
- FN_USB0_OVC1, FN_CAN_DEBUGOUT3, FN_CC50_STATE38, 0,
+ FN_SCIF2_TXD, FN_IIC0_SDA, FN_DU1_DB1, FN_SSI_SCK9_B,
+ 0, 0, 0, 0,
/* IP10_8_6 [3] */
- FN_SCIF2_RXD, FN_IIC1_SCL, FN_DU1_DB0, FN_SSI_SDATA2_B,
- FN_USB0_EXTLP, FN_CAN_DEBUGOUT2, FN_CC50_STATE37, 0,
+ FN_SCIF2_RXD, FN_IIC0_SCL, FN_DU1_DB0, FN_SSI_SDATA2_B,
+ 0, 0, 0, 0,
/* IP10_5_3 [3] */
- FN_SCIF1_TXD, FN_IIC0_SDA, FN_DU1_DG7, FN_SSI_WS2_B,
- FN_CAN_DEBUGOUT1, FN_CC50_STATE36, 0, 0,
+ FN_SCIF1_TXD, FN_I2C5_SDA, FN_DU1_DG7, FN_SSI_WS2_B,
+ 0, 0, 0, 0,
/* IP10_2_0 [3] */
- FN_SCIF1_RXD, FN_IIC0_SCL, FN_DU1_DG6, FN_SSI_SCK2_B,
- FN_CAN_DEBUGOUT0, FN_CC50_STATE35, 0, 0, }
+ FN_SCIF1_RXD, FN_I2C5_SCL, FN_DU1_DG6, FN_SSI_SCK2_B,
+ 0, 0, 0, 0, }
},
{ PINMUX_CFG_REG_VAR("IPSR11", 0xE606004C, 32,
2, 3, 3, 3, 3, 2, 2, 3, 3, 2, 3, 3) {
@@ -4902,61 +4818,60 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0,
/* IP11_29_27 [3] */
FN_SSI_SDATA0, FN_MSIOF1_SCK_B, FN_PWM0_B, FN_ADICLK_B,
- FN_AD_CLK_B, 0, 0, 0,
+ 0, 0, 0, 0,
/* IP11_26_24 [3] */
FN_SSI_WS0129, FN_MSIOF1_TXD_B, FN_SCIF5_TXD_D, FN_ADICS_SAMP_B,
- FN_AD_DO_B, 0, 0, 0,
+ 0, 0, 0, 0,
/* IP11_23_21 [3] */
FN_SSI_SCK0129, FN_MSIOF1_RXD_B, FN_SCIF5_RXD_D, FN_ADIDATA_B,
- FN_AD_DI_B, FN_PCMWE_N, 0, 0,
+ 0, 0, 0, 0,
/* IP11_20_18 [3] */
FN_SSI_SDATA7, FN_SCIFA2_TXD_B, FN_IRQ8, FN_AUDIO_CLKA_D,
- FN_CAN_CLK_D, FN_PCMOE_N, 0, 0,
+ FN_CAN_CLK_D, 0, 0, 0,
/* IP11_17_16 [2] */
- FN_SSI_WS78, FN_SCIFA2_RXD_B, FN_IIC0_SCL_C, FN_DU1_CDE,
+ FN_SSI_WS78, FN_SCIFA2_RXD_B, FN_I2C5_SCL_C, FN_DU1_CDE,
/* IP11_15_14 [2] */
- FN_SSI_SCK78, FN_SCIFA2_SCK_B, FN_IIC0_SDA_C, FN_DU1_DISP,
+ FN_SSI_SCK78, FN_SCIFA2_SCK_B, FN_I2C5_SDA_C, FN_DU1_DISP,
/* IP11_13_11 [3] */
FN_SSI_SDATA6, FN_SCIFA1_TXD_B, FN_I2C4_SDA_C,
- FN_DU1_EXODDF_DU1_ODDF_DISP_CDE, FN_CAN_DEBUGOUT15, 0, 0, 0,
+ FN_DU1_EXODDF_DU1_ODDF_DISP_CDE, 0, 0, 0, 0,
/* IP11_10_8 [3] */
FN_SSI_WS6, FN_SCIFA1_RXD_B, FN_I2C4_SCL_C,
- FN_DU1_EXVSYNC_DU1_VSYNC, FN_CAN_DEBUGOUT14, 0, 0, 0,
+ FN_DU1_EXVSYNC_DU1_VSYNC, 0, 0, 0, 0,
/* IP11_7_6 [2] */
- FN_SSI_SCK6, FN_SCIFA1_SCK_B, FN_DU1_EXHSYNC_DU1_HSYNC,
- FN_CAN_DEBUGOUT13,
+ FN_SSI_SCK6, FN_SCIFA1_SCK_B, FN_DU1_EXHSYNC_DU1_HSYNC, 0,
/* IP11_5_3 [3] */
FN_SSI_SDATA5, FN_SCIFA3_TXD, FN_I2C3_SDA_C, FN_DU1_DOTCLKOUT1,
- FN_CAN_DEBUGOUT12, 0, 0, 0,
+ 0, 0, 0, 0,
/* IP11_2_0 [3] */
FN_SSI_WS5, FN_SCIFA3_RXD, FN_I2C3_SCL_C, FN_DU1_DOTCLKOUT0,
- FN_CAN_DEBUGOUT11, 0, 0, 0, }
+ 0, 0, 0, 0, }
},
{ PINMUX_CFG_REG_VAR("IPSR12", 0xE6060050, 32,
2, 3, 3, 3, 3, 3, 2, 2, 2, 3, 3, 3) {
/* IP12_31_30 [2] */
0, 0, 0, 0,
/* IP12_29_27 [3] */
- FN_SSI_SCK2, FN_HSCIF1_HTX_B, FN_VI1_DATA2, FN_MDATA,
+ FN_SSI_SCK2, FN_HSCIF1_HTX_B, FN_VI1_DATA2, 0,
FN_ATAG0_N, FN_ETH_RXD1_B, 0, 0,
/* IP12_26_24 [3] */
- FN_SSI_SDATA1, FN_HSCIF1_HRX_B, FN_VI1_DATA1, FN_SDATA,
+ FN_SSI_SDATA1, FN_HSCIF1_HRX_B, FN_VI1_DATA1, 0,
FN_ATAWR0_N, FN_ETH_RXD0_B, 0, 0,
/* IP12_23_21 [3] */
- FN_SSI_WS1, FN_SCIF1_TXD_B, FN_IIC1_SDA_C, FN_VI1_DATA0,
- FN_CAN0_TX_D, FN_AVB_AVTP_MATCH, FN_ETH_RX_ER_B, 0,
+ FN_SSI_WS1, FN_SCIF1_TXD_B, FN_IIC0_SDA_C, FN_VI1_DATA0,
+ FN_CAN0_TX_D, 0, FN_ETH_RX_ER_B, 0,
/* IP12_20_18 [3] */
- FN_SSI_SCK1, FN_SCIF1_RXD_B, FN_IIC1_SCL_C, FN_VI1_CLK,
- FN_CAN0_RX_D, FN_AVB_AVTP_CAPTURE, FN_ETH_CRS_DV_B, 0,
+ FN_SSI_SCK1, FN_SCIF1_RXD_B, FN_IIC0_SCL_C, FN_VI1_CLK,
+ FN_CAN0_RX_D, 0, FN_ETH_CRS_DV_B, 0,
/* IP12_17_15 [3] */
FN_SSI_SDATA8, FN_SCIF1_SCK_B, FN_PWM1_B, FN_IRQ9,
FN_REMOCON, FN_DACK2, FN_ETH_MDIO_B, 0,
/* IP12_14_13 [2] */
- FN_SSI_SDATA4, FN_MLB_DAT, FN_IERX_B, FN_IRD_SCK,
+ FN_SSI_SDATA4, FN_MLB_DAT, FN_IERX_B, 0,
/* IP12_12_11 [2] */
- FN_SSI_WS4, FN_MLB_SIG, FN_IECLK_B, FN_IRD_RX,
+ FN_SSI_WS4, FN_MLB_SIG, FN_IECLK_B, 0,
/* IP12_10_9 [2] */
- FN_SSI_SCK4, FN_MLB_CLK, FN_IETX_B, FN_IRD_TX,
+ FN_SSI_SCK4, FN_MLB_CLK, FN_IETX_B, 0,
/* IP12_8_6 [3] */
FN_SSI_SDATA3, FN_MSIOF1_SS2_B, FN_SCIFA1_TXD_C, FN_ADICHS2_B,
FN_CAN1_TX_C, FN_DREQ2_N, 0, 0,
@@ -4965,7 +4880,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_CAN1_RX_C, FN_DACK1_B, 0, 0,
/* IP12_2_0 [3] */
FN_SSI_SCK34, FN_MSIOF1_SYNC_B, FN_SCIFA1_SCK_C, FN_ADICHS0_B,
- FN_AD_NCS_N_B, FN_DREQ1_N_B, 0, 0, }
+ 0, FN_DREQ1_N_B, 0, 0, }
},
{ PINMUX_CFG_REG_VAR("IPSR13", 0xE6060054, 32,
1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3) {
@@ -4981,16 +4896,16 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
/* IP13_26_24 [3] */
FN_AUDIO_CLKOUT, FN_I2C4_SDA_B, FN_SCIFA5_TXD_D, FN_VI1_VSYNC_N,
- FN_TS_SPSYNC_C, FN_RIF0_D1_B, FN_FMIN_E, FN_RDS_DATA_D,
+ FN_TS_SPSYNC_C, 0, FN_FMIN_E, 0,
/* IP13_23_21 [3] */
FN_AUDIO_CLKC, FN_I2C4_SCL_B, FN_SCIFA5_RXD_D, FN_VI1_HSYNC_N,
- FN_TS_SDEN_C, FN_RIF0_D0_B, FN_FMCLK_E, FN_RDS_CLK_D,
+ FN_TS_SDEN_C, 0, FN_FMCLK_E, 0,
/* IP13_20_18 [3] */
FN_AUDIO_CLKB, FN_I2C0_SDA_B, FN_SCIFA4_TXD_D, FN_VI1_FIELD,
- FN_TS_SCK_C, FN_RIF0_CLK_B, FN_BPFCLK_E, FN_ETH_MDC_B,
+ FN_TS_SCK_C, 0, FN_BPFCLK_E, FN_ETH_MDC_B,
/* IP13_17_15 [3] */
FN_AUDIO_CLKA, FN_I2C0_SCL_B, FN_SCIFA4_RXD_D, FN_VI1_CLKENB,
- FN_TS_SDATA_C, FN_RIF0_SYNC_B, FN_ETH_TXD0_B, 0,
+ FN_TS_SDATA_C, 0, FN_ETH_TXD0_B, 0,
/* IP13_14_12 [3] */
FN_SSI_SDATA9, FN_SCIF2_TXD_B, FN_I2C3_SDA_E, FN_VI1_DATA7,
FN_ATADIR0_N, FN_ETH_MAGIC_B, 0, 0,
@@ -4999,38 +4914,32 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_ATARD0_N, FN_ETH_TX_EN_B, 0, 0,
/* IP13_8_6 [3] */
FN_SSI_SCK9, FN_SCIF2_SCK_B, FN_PWM2_B, FN_VI1_DATA5,
- FN_MTS_N, FN_EX_WAIT1, FN_ETH_TXD1_B, 0,
+ 0, FN_EX_WAIT1, FN_ETH_TXD1_B, 0,
/* IP13_5_3 [2] */
FN_SSI_SDATA2, FN_HSCIF1_HRTS_N_B, FN_SCIFA0_TXD_D,
- FN_VI1_DATA4, FN_STM_N, FN_ATACS10_N, FN_ETH_REFCLK_B, 0,
+ FN_VI1_DATA4, 0, FN_ATACS10_N, FN_ETH_REFCLK_B, 0,
/* IP13_2_0 [3] */
FN_SSI_WS2, FN_HSCIF1_HCTS_N_B, FN_SCIFA0_RXD_D, FN_VI1_DATA3,
- FN_SCKZ, FN_ATACS00_N, FN_ETH_LINK_B, 0, }
+ 0, FN_ATACS00_N, FN_ETH_LINK_B, 0, }
},
{ PINMUX_CFG_REG_VAR("MOD_SEL", 0xE6060090, 32,
- 2, 1, 2, 3, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3,
+ 2, 1, 2, 3, 4, 1, 1, 3, 3, 3, 3, 3,
2, 1) {
/* SEL_ADG [2] */
FN_SEL_ADG_0, FN_SEL_ADG_1, FN_SEL_ADG_2, FN_SEL_ADG_3,
- /* SEL_ADI [1] */
- FN_SEL_ADI_0, FN_SEL_ADI_1,
+ /* RESERVED [1] */
+ 0, 0,
/* SEL_CAN [2] */
FN_SEL_CAN_0, FN_SEL_CAN_1, FN_SEL_CAN_2, FN_SEL_CAN_3,
/* SEL_DARC [3] */
FN_SEL_DARC_0, FN_SEL_DARC_1, FN_SEL_DARC_2, FN_SEL_DARC_3,
FN_SEL_DARC_4, 0, 0, 0,
- /* SEL_DR0 [1] */
- FN_SEL_DR0_0, FN_SEL_DR0_1,
- /* SEL_DR1 [1] */
- FN_SEL_DR1_0, FN_SEL_DR1_1,
- /* SEL_DR2 [1] */
- FN_SEL_DR2_0, FN_SEL_DR2_1,
- /* SEL_DR3 [1] */
- FN_SEL_DR3_0, FN_SEL_DR3_1,
+ /* RESERVED [4] */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
/* SEL_ETH [1] */
FN_SEL_ETH_0, FN_SEL_ETH_1,
- /* SLE_FSN [1] */
- FN_SEL_FSN_0, FN_SEL_FSN_1,
+ /* RESERVED [1] */
+ 0, 0,
/* SEL_IC200 [3] */
FN_SEL_I2C00_0, FN_SEL_I2C00_1, FN_SEL_I2C00_2, FN_SEL_I2C00_3,
FN_SEL_I2C00_4, 0, 0, 0,
@@ -5046,10 +4955,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_I2C04 [3] */
FN_SEL_I2C04_0, FN_SEL_I2C04_1, FN_SEL_I2C04_2, FN_SEL_I2C04_3,
FN_SEL_I2C04_4, 0, 0, 0,
- /* SEL_IIC00 [2] */
- FN_SEL_IIC00_0, FN_SEL_IIC00_1, FN_SEL_IIC00_2, FN_SEL_IIC00_3,
- /* SEL_AVB [1] */
- FN_SEL_AVB_0, FN_SEL_AVB_1, }
+ /* SEL_I2C05 [2] */
+ FN_SEL_I2C05_0, FN_SEL_I2C05_1, FN_SEL_I2C05_2, FN_SEL_I2C05_3,
+ /* RESERVED [1] */
+ 0, 0, }
},
{ PINMUX_CFG_REG_VAR("MOD_SEL2", 0xE6060094, 32,
2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1,
@@ -5057,7 +4966,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_IEB [2] */
FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2, 0,
/* SEL_IIC0 [2] */
- FN_SEL_IIC01_0, FN_SEL_IIC01_1, FN_SEL_IIC01_2, FN_SEL_IIC01_3,
+ FN_SEL_IIC0_0, FN_SEL_IIC0_1, FN_SEL_IIC0_2, FN_SEL_IIC0_3,
/* SEL_LBS [1] */
FN_SEL_LBS_0, FN_SEL_LBS_1,
/* SEL_MSI1 [1] */
@@ -5085,8 +4994,8 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_SCIFA5 [2] */
FN_SEL_SCIFA5_0, FN_SEL_SCIFA5_1, FN_SEL_SCIFA5_2,
FN_SEL_SCIFA5_3,
- /* SEL_SPDM [1] */
- FN_SEL_SPDM_0, FN_SEL_SPDM_1,
+ /* RESERVED [1] */
+ 0, 0,
/* SEL_TMU [1] */
FN_SEL_TMU_0, FN_SEL_TMU_1,
/* SEL_TSIF0 [2] */
@@ -5099,8 +5008,8 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SEL_HSCIF0_0, FN_SEL_HSCIF0_1,
/* SEL_HSCIF1 [1] */
FN_SEL_HSCIF1_0, FN_SEL_HSCIF1_1,
- /* SEL_RDS [2] */
- FN_SEL_RDS_0, FN_SEL_RDS_1, FN_SEL_RDS_2, FN_SEL_RDS_3, }
+ /* RESERVED [2] */
+ 0, 0, 0, 0, }
},
{ PINMUX_CFG_REG_VAR("MOD_SEL3", 0xE6060098, 32,
2, 2, 2, 1, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1,
@@ -5185,6 +5094,28 @@ static const struct sh_pfc_soc_operations r8a7794_pinmux_ops = {
.pin_to_pocctrl = r8a7794_pin_to_pocctrl,
};
+#ifdef CONFIG_PINCTRL_PFC_R8A7745
+const struct sh_pfc_soc_info r8a7745_pinmux_info = {
+ .name = "r8a77450_pfc",
+ .unlock_reg = 0xe6060000, /* PMMR */
+
+ .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
+
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .groups = pinmux_groups,
+ .nr_groups = ARRAY_SIZE(pinmux_groups),
+ .functions = pinmux_functions,
+ .nr_functions = ARRAY_SIZE(pinmux_functions),
+
+ .cfg_regs = pinmux_config_regs,
+
+ .pinmux_data = pinmux_data,
+ .pinmux_data_size = ARRAY_SIZE(pinmux_data),
+};
+#endif
+
+#ifdef CONFIG_PINCTRL_PFC_R8A7794
const struct sh_pfc_soc_info r8a7794_pinmux_info = {
.name = "r8a77940_pfc",
.ops = &r8a7794_pinmux_ops,
@@ -5204,3 +5135,4 @@ const struct sh_pfc_soc_info r8a7794_pinmux_info = {
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
};
+#endif
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
index 081efda9a280..95fd0994893a 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795-es1.c
@@ -192,8 +192,8 @@
#define GPSR6_9 F_(SSI_WS4, IP14_27_24)
#define GPSR6_8 F_(SSI_SCK4, IP14_23_20)
#define GPSR6_7 F_(SSI_SDATA3, IP14_19_16)
-#define GPSR6_6 F_(SSI_WS34, IP14_15_12)
-#define GPSR6_5 F_(SSI_SCK34, IP14_11_8)
+#define GPSR6_6 F_(SSI_WS349, IP14_15_12)
+#define GPSR6_5 F_(SSI_SCK349, IP14_11_8)
#define GPSR6_4 F_(SSI_SDATA2_A, IP14_7_4)
#define GPSR6_3 F_(SSI_SDATA1_A, IP14_3_0)
#define GPSR6_2 F_(SSI_SDATA0, IP13_31_28)
@@ -328,8 +328,8 @@
#define IP13_31_28 FM(SSI_SDATA0) F_(0, 0) FM(MSIOF1_SS2_F) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP14_3_0 FM(SSI_SDATA1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP14_7_4 FM(SSI_SDATA2_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(SSI_SCK1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP14_11_8 FM(SSI_SCK34) F_(0, 0) FM(MSIOF1_SS1_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(STP_OPWM_0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP14_15_12 FM(SSI_WS34) FM(HCTS2_N_A) FM(MSIOF1_SS2_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(STP_IVCXO27_0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_11_8 FM(SSI_SCK349) F_(0, 0) FM(MSIOF1_SS1_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(STP_OPWM_0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_15_12 FM(SSI_WS349) FM(HCTS2_N_A) FM(MSIOF1_SS2_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(STP_IVCXO27_0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP14_19_16 FM(SSI_SDATA3) FM(HRTS2_N_A) FM(MSIOF1_TXD_A) F_(0, 0) F_(0, 0) FM(TS_SCK0_A) FM(STP_ISCLK_0_A) FM(RIF0_D1_A) FM(RIF2_D0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP14_23_20 FM(SSI_SCK4) FM(HRX2_A) FM(MSIOF1_SCK_A) F_(0, 0) F_(0, 0) FM(TS_SDAT0_A) FM(STP_ISD_0_A) FM(RIF0_CLK_A) FM(RIF2_CLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP14_27_24 FM(SSI_WS4) FM(HTX2_A) FM(MSIOF1_SYNC_A) F_(0, 0) F_(0, 0) FM(TS_SDEN0_A) FM(STP_ISEN_0_A) FM(RIF0_SYNC_A) FM(RIF2_SYNC_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -1256,11 +1256,11 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP14_7_4, SSI_SDATA2_A, SEL_SSI_0),
PINMUX_IPSR_MSEL(IP14_7_4, SSI_SCK1_B, SEL_SSI_1),
- PINMUX_IPSR_GPSR(IP14_11_8, SSI_SCK34),
+ PINMUX_IPSR_GPSR(IP14_11_8, SSI_SCK349),
PINMUX_IPSR_MSEL(IP14_11_8, MSIOF1_SS1_A, SEL_MSIOF1_0),
PINMUX_IPSR_MSEL(IP14_11_8, STP_OPWM_0_A, SEL_SSP1_0_0),
- PINMUX_IPSR_GPSR(IP14_15_12, SSI_WS34),
+ PINMUX_IPSR_GPSR(IP14_15_12, SSI_WS349),
PINMUX_IPSR_MSEL(IP14_15_12, HCTS2_N_A, SEL_HSCIF2_0),
PINMUX_IPSR_MSEL(IP14_15_12, MSIOF1_SS2_A, SEL_MSIOF1_0),
PINMUX_IPSR_MSEL(IP14_15_12, STP_IVCXO27_0_A, SEL_SSP1_0_0),
@@ -3650,12 +3650,12 @@ static const unsigned int ssi3_data_pins[] = {
static const unsigned int ssi3_data_mux[] = {
SSI_SDATA3_MARK,
};
-static const unsigned int ssi34_ctrl_pins[] = {
+static const unsigned int ssi349_ctrl_pins[] = {
/* SCK, WS */
RCAR_GP_PIN(6, 5), RCAR_GP_PIN(6, 6),
};
-static const unsigned int ssi34_ctrl_mux[] = {
- SSI_SCK34_MARK, SSI_WS34_MARK,
+static const unsigned int ssi349_ctrl_mux[] = {
+ SSI_SCK349_MARK, SSI_WS349_MARK,
};
static const unsigned int ssi4_data_pins[] = {
/* SDATA */
@@ -4063,7 +4063,7 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(ssi2_ctrl_a),
SH_PFC_PIN_GROUP(ssi2_ctrl_b),
SH_PFC_PIN_GROUP(ssi3_data),
- SH_PFC_PIN_GROUP(ssi34_ctrl),
+ SH_PFC_PIN_GROUP(ssi349_ctrl),
SH_PFC_PIN_GROUP(ssi4_data),
SH_PFC_PIN_GROUP(ssi4_ctrl),
SH_PFC_PIN_GROUP(ssi5_data),
@@ -4509,7 +4509,7 @@ static const char * const ssi_groups[] = {
"ssi2_ctrl_a",
"ssi2_ctrl_b",
"ssi3_data",
- "ssi34_ctrl",
+ "ssi349_ctrl",
"ssi4_data",
"ssi4_ctrl",
"ssi5_data",
@@ -5356,8 +5356,8 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ RCAR_GP_PIN(6, 2), 24, 3 }, /* SSI_SDATA0 */
{ RCAR_GP_PIN(6, 3), 20, 3 }, /* SSI_SDATA1 */
{ RCAR_GP_PIN(6, 4), 16, 3 }, /* SSI_SDATA2 */
- { RCAR_GP_PIN(6, 5), 12, 3 }, /* SSI_SCK34 */
- { RCAR_GP_PIN(6, 6), 8, 3 }, /* SSI_WS34 */
+ { RCAR_GP_PIN(6, 5), 12, 3 }, /* SSI_SCK349 */
+ { RCAR_GP_PIN(6, 6), 8, 3 }, /* SSI_WS349 */
{ RCAR_GP_PIN(6, 7), 4, 3 }, /* SSI_SDATA3 */
{ RCAR_GP_PIN(6, 8), 0, 3 }, /* SSI_SCK4 */
} },
@@ -5604,8 +5604,8 @@ static const struct sh_pfc_bias_info bias_info[] = {
{ RCAR_GP_PIN(6, 9), PU5, 16 }, /* SSI_WS4 */
{ RCAR_GP_PIN(6, 8), PU5, 15 }, /* SSI_SCK4 */
{ RCAR_GP_PIN(6, 7), PU5, 14 }, /* SSI_SDATA3 */
- { RCAR_GP_PIN(6, 6), PU5, 13 }, /* SSI_WS34 */
- { RCAR_GP_PIN(6, 5), PU5, 12 }, /* SSI_SCK34 */
+ { RCAR_GP_PIN(6, 6), PU5, 13 }, /* SSI_WS349 */
+ { RCAR_GP_PIN(6, 5), PU5, 12 }, /* SSI_SCK349 */
{ RCAR_GP_PIN(6, 4), PU5, 11 }, /* SSI_SDATA2_A */
{ RCAR_GP_PIN(6, 3), PU5, 10 }, /* SSI_SDATA1_A */
{ RCAR_GP_PIN(6, 2), PU5, 9 }, /* SSI_SDATA0 */
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
index 0454f31c0831..1656295af2b0 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
@@ -193,8 +193,8 @@
#define GPSR6_9 F_(SSI_WS4, IP15_27_24)
#define GPSR6_8 F_(SSI_SCK4, IP15_23_20)
#define GPSR6_7 F_(SSI_SDATA3, IP15_19_16)
-#define GPSR6_6 F_(SSI_WS34, IP15_15_12)
-#define GPSR6_5 F_(SSI_SCK34, IP15_11_8)
+#define GPSR6_6 F_(SSI_WS349, IP15_15_12)
+#define GPSR6_5 F_(SSI_SCK349, IP15_11_8)
#define GPSR6_4 F_(SSI_SDATA2_A, IP15_7_4)
#define GPSR6_3 F_(SSI_SDATA1_A, IP15_3_0)
#define GPSR6_2 F_(SSI_SDATA0, IP14_31_28)
@@ -339,8 +339,8 @@
#define IP14_31_28 FM(SSI_SDATA0) F_(0, 0) FM(MSIOF1_SS2_F) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP15_3_0 FM(SSI_SDATA1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP15_7_4 FM(SSI_SDATA2_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(SSI_SCK1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP15_11_8 FM(SSI_SCK34) F_(0, 0) FM(MSIOF1_SS1_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(STP_OPWM_0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP15_15_12 FM(SSI_WS34) FM(HCTS2_N_A) FM(MSIOF1_SS2_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(STP_IVCXO27_0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_11_8 FM(SSI_SCK349) F_(0, 0) FM(MSIOF1_SS1_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(STP_OPWM_0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_15_12 FM(SSI_WS349) FM(HCTS2_N_A) FM(MSIOF1_SS2_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(STP_IVCXO27_0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP15_19_16 FM(SSI_SDATA3) FM(HRTS2_N_A) FM(MSIOF1_TXD_A) F_(0, 0) F_(0, 0) FM(TS_SCK0_A) FM(STP_ISCLK_0_A) FM(RIF0_D1_A) FM(RIF2_D0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP15_23_20 FM(SSI_SCK4) FM(HRX2_A) FM(MSIOF1_SCK_A) F_(0, 0) F_(0, 0) FM(TS_SDAT0_A) FM(STP_ISD_0_A) FM(RIF0_CLK_A) FM(RIF2_CLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP15_27_24 FM(SSI_WS4) FM(HTX2_A) FM(MSIOF1_SYNC_A) F_(0, 0) F_(0, 0) FM(TS_SDEN0_A) FM(STP_ISEN_0_A) FM(RIF0_SYNC_A) FM(RIF2_SYNC_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -1315,11 +1315,11 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP15_7_4, SSI_SDATA2_A, SEL_SSI_0),
PINMUX_IPSR_MSEL(IP15_7_4, SSI_SCK1_B, SEL_SSI_1),
- PINMUX_IPSR_GPSR(IP15_11_8, SSI_SCK34),
+ PINMUX_IPSR_GPSR(IP15_11_8, SSI_SCK349),
PINMUX_IPSR_MSEL(IP15_11_8, MSIOF1_SS1_A, SEL_MSIOF1_0),
PINMUX_IPSR_MSEL(IP15_11_8, STP_OPWM_0_A, SEL_SSP1_0_0),
- PINMUX_IPSR_GPSR(IP15_15_12, SSI_WS34),
+ PINMUX_IPSR_GPSR(IP15_15_12, SSI_WS349),
PINMUX_IPSR_MSEL(IP15_15_12, HCTS2_N_A, SEL_HSCIF2_0),
PINMUX_IPSR_MSEL(IP15_15_12, MSIOF1_SS2_A, SEL_MSIOF1_0),
PINMUX_IPSR_MSEL(IP15_15_12, STP_IVCXO27_0_A, SEL_SSP1_0_0),
@@ -1576,6 +1576,273 @@ static const struct sh_pfc_pin pinmux_pins[] = {
SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('T'), 30, ASEBRK, CFG_FLAGS),
};
+/* - EtherAVB --------------------------------------------------------------- */
+static const unsigned int avb_link_pins[] = {
+ /* AVB_LINK */
+ RCAR_GP_PIN(2, 12),
+};
+static const unsigned int avb_link_mux[] = {
+ AVB_LINK_MARK,
+};
+static const unsigned int avb_magic_pins[] = {
+ /* AVB_MAGIC_ */
+ RCAR_GP_PIN(2, 10),
+};
+static const unsigned int avb_magic_mux[] = {
+ AVB_MAGIC_MARK,
+};
+static const unsigned int avb_phy_int_pins[] = {
+ /* AVB_PHY_INT */
+ RCAR_GP_PIN(2, 11),
+};
+static const unsigned int avb_phy_int_mux[] = {
+ AVB_PHY_INT_MARK,
+};
+static const unsigned int avb_mdc_pins[] = {
+ /* AVB_MDC, AVB_MDIO */
+ RCAR_GP_PIN(2, 9), PIN_NUMBER('A', 9),
+};
+static const unsigned int avb_mdc_mux[] = {
+ AVB_MDC_MARK, AVB_MDIO_MARK,
+};
+static const unsigned int avb_mii_pins[] = {
+ /*
+ * AVB_TX_CTL, AVB_TXC, AVB_TD0,
+ * AVB_TD1, AVB_TD2, AVB_TD3,
+ * AVB_RX_CTL, AVB_RXC, AVB_RD0,
+ * AVB_RD1, AVB_RD2, AVB_RD3,
+ * AVB_TXCREFCLK
+ */
+ PIN_NUMBER('A', 8), PIN_NUMBER('A', 19), PIN_NUMBER('A', 18),
+ PIN_NUMBER('B', 18), PIN_NUMBER('A', 17), PIN_NUMBER('B', 17),
+ PIN_NUMBER('A', 16), PIN_NUMBER('B', 19), PIN_NUMBER('A', 13),
+ PIN_NUMBER('B', 13), PIN_NUMBER('A', 14), PIN_NUMBER('B', 14),
+ PIN_NUMBER('A', 12),
+
+};
+static const unsigned int avb_mii_mux[] = {
+ AVB_TX_CTL_MARK, AVB_TXC_MARK, AVB_TD0_MARK,
+ AVB_TD1_MARK, AVB_TD2_MARK, AVB_TD3_MARK,
+ AVB_RX_CTL_MARK, AVB_RXC_MARK, AVB_RD0_MARK,
+ AVB_RD1_MARK, AVB_RD2_MARK, AVB_RD3_MARK,
+ AVB_TXCREFCLK_MARK,
+};
+static const unsigned int avb_avtp_pps_pins[] = {
+ /* AVB_AVTP_PPS */
+ RCAR_GP_PIN(2, 6),
+};
+static const unsigned int avb_avtp_pps_mux[] = {
+ AVB_AVTP_PPS_MARK,
+};
+static const unsigned int avb_avtp_match_a_pins[] = {
+ /* AVB_AVTP_MATCH_A */
+ RCAR_GP_PIN(2, 13),
+};
+static const unsigned int avb_avtp_match_a_mux[] = {
+ AVB_AVTP_MATCH_A_MARK,
+};
+static const unsigned int avb_avtp_capture_a_pins[] = {
+ /* AVB_AVTP_CAPTURE_A */
+ RCAR_GP_PIN(2, 14),
+};
+static const unsigned int avb_avtp_capture_a_mux[] = {
+ AVB_AVTP_CAPTURE_A_MARK,
+};
+static const unsigned int avb_avtp_match_b_pins[] = {
+ /* AVB_AVTP_MATCH_B */
+ RCAR_GP_PIN(1, 8),
+};
+static const unsigned int avb_avtp_match_b_mux[] = {
+ AVB_AVTP_MATCH_B_MARK,
+};
+static const unsigned int avb_avtp_capture_b_pins[] = {
+ /* AVB_AVTP_CAPTURE_B */
+ RCAR_GP_PIN(1, 11),
+};
+static const unsigned int avb_avtp_capture_b_mux[] = {
+ AVB_AVTP_CAPTURE_B_MARK,
+};
+
+/* - DU --------------------------------------------------------------------- */
+static const unsigned int du_rgb666_pins[] = {
+ /* R[7:2], G[7:2], B[7:2] */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 13),
+ RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
+ RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 19), RCAR_GP_PIN(1, 18),
+ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 3), RCAR_GP_PIN(1, 2),
+};
+static const unsigned int du_rgb666_mux[] = {
+ DU_DR7_MARK, DU_DR6_MARK, DU_DR5_MARK, DU_DR4_MARK,
+ DU_DR3_MARK, DU_DR2_MARK,
+ DU_DG7_MARK, DU_DG6_MARK, DU_DG5_MARK, DU_DG4_MARK,
+ DU_DG3_MARK, DU_DG2_MARK,
+ DU_DB7_MARK, DU_DB6_MARK, DU_DB5_MARK, DU_DB4_MARK,
+ DU_DB3_MARK, DU_DB2_MARK,
+};
+static const unsigned int du_rgb888_pins[] = {
+ /* R[7:0], G[7:0], B[7:0] */
+ RCAR_GP_PIN(0, 15), RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 13),
+ RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 10),
+ RCAR_GP_PIN(0, 9), RCAR_GP_PIN(0, 8),
+ RCAR_GP_PIN(1, 15), RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 13),
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 19), RCAR_GP_PIN(1, 18),
+ RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 16),
+ RCAR_GP_PIN(1, 7), RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 3), RCAR_GP_PIN(1, 2),
+ RCAR_GP_PIN(1, 1), RCAR_GP_PIN(1, 0),
+};
+static const unsigned int du_rgb888_mux[] = {
+ DU_DR7_MARK, DU_DR6_MARK, DU_DR5_MARK, DU_DR4_MARK,
+ DU_DR3_MARK, DU_DR2_MARK, DU_DR1_MARK, DU_DR0_MARK,
+ DU_DG7_MARK, DU_DG6_MARK, DU_DG5_MARK, DU_DG4_MARK,
+ DU_DG3_MARK, DU_DG2_MARK, DU_DG1_MARK, DU_DG0_MARK,
+ DU_DB7_MARK, DU_DB6_MARK, DU_DB5_MARK, DU_DB4_MARK,
+ DU_DB3_MARK, DU_DB2_MARK, DU_DB1_MARK, DU_DB0_MARK,
+};
+static const unsigned int du_clk_out_0_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(1, 27),
+};
+static const unsigned int du_clk_out_0_mux[] = {
+ DU_DOTCLKOUT0_MARK
+};
+static const unsigned int du_clk_out_1_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(2, 3),
+};
+static const unsigned int du_clk_out_1_mux[] = {
+ DU_DOTCLKOUT1_MARK
+};
+static const unsigned int du_sync_pins[] = {
+ /* EXVSYNC/VSYNC, EXHSYNC/HSYNC */
+ RCAR_GP_PIN(2, 5), RCAR_GP_PIN(2, 4),
+};
+static const unsigned int du_sync_mux[] = {
+ DU_EXVSYNC_DU_VSYNC_MARK, DU_EXHSYNC_DU_HSYNC_MARK
+};
+static const unsigned int du_oddf_pins[] = {
+ /* EXDISP/EXODDF/EXCDE */
+ RCAR_GP_PIN(2, 2),
+};
+static const unsigned int du_oddf_mux[] = {
+ DU_EXODDF_DU_ODDF_DISP_CDE_MARK,
+};
+static const unsigned int du_cde_pins[] = {
+ /* CDE */
+ RCAR_GP_PIN(2, 0),
+};
+static const unsigned int du_cde_mux[] = {
+ DU_CDE_MARK,
+};
+static const unsigned int du_disp_pins[] = {
+ /* DISP */
+ RCAR_GP_PIN(2, 1),
+};
+static const unsigned int du_disp_mux[] = {
+ DU_DISP_MARK,
+};
+
+/* - PWM0 --------------------------------------------------------------------*/
+static const unsigned int pwm0_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 6),
+};
+static const unsigned int pwm0_mux[] = {
+ PWM0_MARK,
+};
+/* - PWM1 --------------------------------------------------------------------*/
+static const unsigned int pwm1_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 7),
+};
+static const unsigned int pwm1_a_mux[] = {
+ PWM1_A_MARK,
+};
+static const unsigned int pwm1_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 8),
+};
+static const unsigned int pwm1_b_mux[] = {
+ PWM1_B_MARK,
+};
+/* - PWM2 --------------------------------------------------------------------*/
+static const unsigned int pwm2_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 8),
+};
+static const unsigned int pwm2_a_mux[] = {
+ PWM2_A_MARK,
+};
+static const unsigned int pwm2_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 11),
+};
+static const unsigned int pwm2_b_mux[] = {
+ PWM2_B_MARK,
+};
+/* - PWM3 --------------------------------------------------------------------*/
+static const unsigned int pwm3_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 0),
+};
+static const unsigned int pwm3_a_mux[] = {
+ PWM3_A_MARK,
+};
+static const unsigned int pwm3_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 2),
+};
+static const unsigned int pwm3_b_mux[] = {
+ PWM3_B_MARK,
+};
+/* - PWM4 --------------------------------------------------------------------*/
+static const unsigned int pwm4_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 1),
+};
+static const unsigned int pwm4_a_mux[] = {
+ PWM4_A_MARK,
+};
+static const unsigned int pwm4_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 3),
+};
+static const unsigned int pwm4_b_mux[] = {
+ PWM4_B_MARK,
+};
+/* - PWM5 --------------------------------------------------------------------*/
+static const unsigned int pwm5_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 2),
+};
+static const unsigned int pwm5_a_mux[] = {
+ PWM5_A_MARK,
+};
+static const unsigned int pwm5_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 4),
+};
+static const unsigned int pwm5_b_mux[] = {
+ PWM5_B_MARK,
+};
+/* - PWM6 --------------------------------------------------------------------*/
+static const unsigned int pwm6_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 3),
+};
+static const unsigned int pwm6_a_mux[] = {
+ PWM6_A_MARK,
+};
+static const unsigned int pwm6_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 5),
+};
+static const unsigned int pwm6_b_mux[] = {
+ PWM6_B_MARK,
+};
+
/* - SCIF0 ------------------------------------------------------------------ */
static const unsigned int scif0_data_pins[] = {
/* RX, TX */
@@ -1790,6 +2057,37 @@ static const unsigned int scif_clk_b_mux[] = {
};
static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(avb_link),
+ SH_PFC_PIN_GROUP(avb_magic),
+ SH_PFC_PIN_GROUP(avb_phy_int),
+ SH_PFC_PIN_GROUP(avb_mdc),
+ SH_PFC_PIN_GROUP(avb_mii),
+ SH_PFC_PIN_GROUP(avb_avtp_pps),
+ SH_PFC_PIN_GROUP(avb_avtp_match_a),
+ SH_PFC_PIN_GROUP(avb_avtp_capture_a),
+ SH_PFC_PIN_GROUP(avb_avtp_match_b),
+ SH_PFC_PIN_GROUP(avb_avtp_capture_b),
+ SH_PFC_PIN_GROUP(du_rgb666),
+ SH_PFC_PIN_GROUP(du_rgb888),
+ SH_PFC_PIN_GROUP(du_clk_out_0),
+ SH_PFC_PIN_GROUP(du_clk_out_1),
+ SH_PFC_PIN_GROUP(du_sync),
+ SH_PFC_PIN_GROUP(du_oddf),
+ SH_PFC_PIN_GROUP(du_cde),
+ SH_PFC_PIN_GROUP(du_disp),
+ SH_PFC_PIN_GROUP(pwm0),
+ SH_PFC_PIN_GROUP(pwm1_a),
+ SH_PFC_PIN_GROUP(pwm1_b),
+ SH_PFC_PIN_GROUP(pwm2_a),
+ SH_PFC_PIN_GROUP(pwm2_b),
+ SH_PFC_PIN_GROUP(pwm3_a),
+ SH_PFC_PIN_GROUP(pwm3_b),
+ SH_PFC_PIN_GROUP(pwm4_a),
+ SH_PFC_PIN_GROUP(pwm4_b),
+ SH_PFC_PIN_GROUP(pwm5_a),
+ SH_PFC_PIN_GROUP(pwm5_b),
+ SH_PFC_PIN_GROUP(pwm6_a),
+ SH_PFC_PIN_GROUP(pwm6_b),
SH_PFC_PIN_GROUP(scif0_data),
SH_PFC_PIN_GROUP(scif0_clk),
SH_PFC_PIN_GROUP(scif0_ctrl),
@@ -1821,6 +2119,64 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(scif_clk_b),
};
+static const char * const avb_groups[] = {
+ "avb_link",
+ "avb_magic",
+ "avb_phy_int",
+ "avb_mdc",
+ "avb_mii",
+ "avb_avtp_pps",
+ "avb_avtp_match_a",
+ "avb_avtp_capture_a",
+ "avb_avtp_match_b",
+ "avb_avtp_capture_b",
+};
+
+static const char * const du_groups[] = {
+ "du_rgb666",
+ "du_rgb888",
+ "du_clk_out_0",
+ "du_clk_out_1",
+ "du_sync",
+ "du_oddf",
+ "du_cde",
+ "du_disp",
+};
+
+static const char * const pwm0_groups[] = {
+ "pwm0",
+};
+
+static const char * const pwm1_groups[] = {
+ "pwm1_a",
+ "pwm1_b",
+};
+
+static const char * const pwm2_groups[] = {
+ "pwm2_a",
+ "pwm2_b",
+};
+
+static const char * const pwm3_groups[] = {
+ "pwm3_a",
+ "pwm3_b",
+};
+
+static const char * const pwm4_groups[] = {
+ "pwm4_a",
+ "pwm4_b",
+};
+
+static const char * const pwm5_groups[] = {
+ "pwm5_a",
+ "pwm5_b",
+};
+
+static const char * const pwm6_groups[] = {
+ "pwm6_a",
+ "pwm6_b",
+};
+
static const char * const scif0_groups[] = {
"scif0_data",
"scif0_clk",
@@ -1872,6 +2228,15 @@ static const char * const scif_clk_groups[] = {
};
static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(avb),
+ SH_PFC_FUNCTION(du),
+ SH_PFC_FUNCTION(pwm0),
+ SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(pwm2),
+ SH_PFC_FUNCTION(pwm3),
+ SH_PFC_FUNCTION(pwm4),
+ SH_PFC_FUNCTION(pwm5),
+ SH_PFC_FUNCTION(pwm6),
SH_PFC_FUNCTION(scif0),
SH_PFC_FUNCTION(scif1),
SH_PFC_FUNCTION(scif2),
@@ -2653,8 +3018,8 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ RCAR_GP_PIN(6, 2), 24, 3 }, /* SSI_SDATA0 */
{ RCAR_GP_PIN(6, 3), 20, 3 }, /* SSI_SDATA1 */
{ RCAR_GP_PIN(6, 4), 16, 3 }, /* SSI_SDATA2 */
- { RCAR_GP_PIN(6, 5), 12, 3 }, /* SSI_SCK34 */
- { RCAR_GP_PIN(6, 6), 8, 3 }, /* SSI_WS34 */
+ { RCAR_GP_PIN(6, 5), 12, 3 }, /* SSI_SCK349 */
+ { RCAR_GP_PIN(6, 6), 8, 3 }, /* SSI_WS349 */
{ RCAR_GP_PIN(6, 7), 4, 3 }, /* SSI_SDATA3 */
{ RCAR_GP_PIN(6, 8), 0, 3 }, /* SSI_SCK4 */
} },
@@ -2900,8 +3265,8 @@ static const struct sh_pfc_bias_info bias_info[] = {
{ RCAR_GP_PIN(6, 9), PU5, 16 }, /* SSI_WS4 */
{ RCAR_GP_PIN(6, 8), PU5, 15 }, /* SSI_SCK4 */
{ RCAR_GP_PIN(6, 7), PU5, 14 }, /* SSI_SDATA3 */
- { RCAR_GP_PIN(6, 6), PU5, 13 }, /* SSI_WS34 */
- { RCAR_GP_PIN(6, 5), PU5, 12 }, /* SSI_SCK34 */
+ { RCAR_GP_PIN(6, 6), PU5, 13 }, /* SSI_WS349 */
+ { RCAR_GP_PIN(6, 5), PU5, 12 }, /* SSI_SCK349 */
{ RCAR_GP_PIN(6, 4), PU5, 11 }, /* SSI_SDATA2_A */
{ RCAR_GP_PIN(6, 3), PU5, 10 }, /* SSI_SDATA1_A */
{ RCAR_GP_PIN(6, 2), PU5, 9 }, /* SSI_SDATA0 */
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
index b0362ae707e2..98bf5d0e078e 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c
@@ -199,13 +199,13 @@
#define GPSR6_9 F_(SSI_WS4, IP15_27_24)
#define GPSR6_8 F_(SSI_SCK4, IP15_23_20)
#define GPSR6_7 F_(SSI_SDATA3, IP15_19_16)
-#define GPSR6_6 F_(SSI_WS34, IP15_15_12)
-#define GPSR6_5 F_(SSI_SCK34, IP15_11_8)
+#define GPSR6_6 F_(SSI_WS349, IP15_15_12)
+#define GPSR6_5 F_(SSI_SCK349, IP15_11_8)
#define GPSR6_4 F_(SSI_SDATA2_A, IP15_7_4)
#define GPSR6_3 F_(SSI_SDATA1_A, IP15_3_0)
#define GPSR6_2 F_(SSI_SDATA0, IP14_31_28)
-#define GPSR6_1 F_(SSI_WS0129, IP14_27_24)
-#define GPSR6_0 F_(SSI_SCK0129, IP14_23_20)
+#define GPSR6_1 F_(SSI_WS01239, IP14_27_24)
+#define GPSR6_0 F_(SSI_SCK01239, IP14_23_20)
/* GPSR7 */
#define GPSR7_3 FM(GP7_03)
@@ -338,15 +338,15 @@
#define IP14_11_8 FM(MLB_CLK) F_(0, 0) FM(MSIOF1_SCK_F) F_(0, 0) FM(SCL1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP14_15_12 FM(MLB_SIG) FM(RX1_B) FM(MSIOF1_SYNC_F) F_(0, 0) FM(SDA1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP14_19_16 FM(MLB_DAT) FM(TX1_B) FM(MSIOF1_RXD_F) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP14_23_20 FM(SSI_SCK0129) F_(0, 0) FM(MSIOF1_TXD_F) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP14_27_24 FM(SSI_WS0129) F_(0, 0) FM(MSIOF1_SS1_F) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_23_20 FM(SSI_SCK01239) F_(0, 0) FM(MSIOF1_TXD_F) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP14_27_24 FM(SSI_WS01239) F_(0, 0) FM(MSIOF1_SS1_F) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
/* IPSRx */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ /* 8 */ /* 9 */ /* A */ /* B */ /* C - F */
#define IP14_31_28 FM(SSI_SDATA0) F_(0, 0) FM(MSIOF1_SS2_F) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP15_3_0 FM(SSI_SDATA1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP15_7_4 FM(SSI_SDATA2_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(SSI_SCK1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP15_11_8 FM(SSI_SCK34) F_(0, 0) FM(MSIOF1_SS1_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(STP_OPWM_0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP15_15_12 FM(SSI_WS34) FM(HCTS2_N_A) FM(MSIOF1_SS2_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(STP_IVCXO27_0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_11_8 FM(SSI_SCK349) F_(0, 0) FM(MSIOF1_SS1_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(STP_OPWM_0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP15_15_12 FM(SSI_WS349) FM(HCTS2_N_A) FM(MSIOF1_SS2_A) F_(0, 0) F_(0, 0) F_(0, 0) FM(STP_IVCXO27_0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP15_19_16 FM(SSI_SDATA3) FM(HRTS2_N_A) FM(MSIOF1_TXD_A) F_(0, 0) F_(0, 0) FM(TS_SCK0_A) FM(STP_ISCLK_0_A) FM(RIF0_D1_A) FM(RIF2_D0_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP15_23_20 FM(SSI_SCK4) FM(HRX2_A) FM(MSIOF1_SCK_A) F_(0, 0) F_(0, 0) FM(TS_SDAT0_A) FM(STP_ISD_0_A) FM(RIF0_CLK_A) FM(RIF2_CLK_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP15_27_24 FM(SSI_WS4) FM(HTX2_A) FM(MSIOF1_SYNC_A) F_(0, 0) F_(0, 0) FM(TS_SDEN0_A) FM(STP_ISEN_0_A) FM(RIF0_SYNC_A) FM(RIF2_SYNC_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
@@ -1304,10 +1304,10 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP14_19_16, TX1_B, SEL_SCIF1_1),
PINMUX_IPSR_MSEL(IP14_19_16, MSIOF1_RXD_F, SEL_MSIOF1_5),
- PINMUX_IPSR_GPSR(IP14_23_20, SSI_SCK0129),
+ PINMUX_IPSR_GPSR(IP14_23_20, SSI_SCK01239),
PINMUX_IPSR_MSEL(IP14_23_20, MSIOF1_TXD_F, SEL_MSIOF1_5),
- PINMUX_IPSR_GPSR(IP14_27_24, SSI_WS0129),
+ PINMUX_IPSR_GPSR(IP14_27_24, SSI_WS01239),
PINMUX_IPSR_MSEL(IP14_27_24, MSIOF1_SS1_F, SEL_MSIOF1_5),
PINMUX_IPSR_GPSR(IP14_31_28, SSI_SDATA0),
@@ -1319,11 +1319,11 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_MSEL(IP15_7_4, SSI_SDATA2_A, SEL_SSI_0),
PINMUX_IPSR_MSEL(IP15_7_4, SSI_SCK1_B, SEL_SSI_1),
- PINMUX_IPSR_GPSR(IP15_11_8, SSI_SCK34),
+ PINMUX_IPSR_GPSR(IP15_11_8, SSI_SCK349),
PINMUX_IPSR_MSEL(IP15_11_8, MSIOF1_SS1_A, SEL_MSIOF1_0),
PINMUX_IPSR_MSEL(IP15_11_8, STP_OPWM_0_A, SEL_SSP1_0_0),
- PINMUX_IPSR_GPSR(IP15_15_12, SSI_WS34),
+ PINMUX_IPSR_GPSR(IP15_15_12, SSI_WS349),
PINMUX_IPSR_MSEL(IP15_15_12, HCTS2_N_A, SEL_HSCIF2_0),
PINMUX_IPSR_MSEL(IP15_15_12, MSIOF1_SS2_A, SEL_MSIOF1_0),
PINMUX_IPSR_MSEL(IP15_15_12, STP_IVCXO27_0_A, SEL_SSP1_0_0),
@@ -1582,6 +1582,128 @@ static const struct sh_pfc_pin pinmux_pins[] = {
SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('T'), 30, ASEBRK, CFG_FLAGS),
};
+/* - AUDIO CLOCK ------------------------------------------------------------ */
+static const unsigned int audio_clk_a_a_pins[] = {
+ /* CLK A */
+ RCAR_GP_PIN(6, 22),
+};
+static const unsigned int audio_clk_a_a_mux[] = {
+ AUDIO_CLKA_A_MARK,
+};
+static const unsigned int audio_clk_a_b_pins[] = {
+ /* CLK A */
+ RCAR_GP_PIN(5, 4),
+};
+static const unsigned int audio_clk_a_b_mux[] = {
+ AUDIO_CLKA_B_MARK,
+};
+static const unsigned int audio_clk_a_c_pins[] = {
+ /* CLK A */
+ RCAR_GP_PIN(5, 19),
+};
+static const unsigned int audio_clk_a_c_mux[] = {
+ AUDIO_CLKA_C_MARK,
+};
+static const unsigned int audio_clk_b_a_pins[] = {
+ /* CLK B */
+ RCAR_GP_PIN(5, 12),
+};
+static const unsigned int audio_clk_b_a_mux[] = {
+ AUDIO_CLKB_A_MARK,
+};
+static const unsigned int audio_clk_b_b_pins[] = {
+ /* CLK B */
+ RCAR_GP_PIN(6, 23),
+};
+static const unsigned int audio_clk_b_b_mux[] = {
+ AUDIO_CLKB_B_MARK,
+};
+static const unsigned int audio_clk_c_a_pins[] = {
+ /* CLK C */
+ RCAR_GP_PIN(5, 21),
+};
+static const unsigned int audio_clk_c_a_mux[] = {
+ AUDIO_CLKC_A_MARK,
+};
+static const unsigned int audio_clk_c_b_pins[] = {
+ /* CLK C */
+ RCAR_GP_PIN(5, 0),
+};
+static const unsigned int audio_clk_c_b_mux[] = {
+ AUDIO_CLKC_B_MARK,
+};
+static const unsigned int audio_clkout_a_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(5, 18),
+};
+static const unsigned int audio_clkout_a_mux[] = {
+ AUDIO_CLKOUT_A_MARK,
+};
+static const unsigned int audio_clkout_b_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(6, 28),
+};
+static const unsigned int audio_clkout_b_mux[] = {
+ AUDIO_CLKOUT_B_MARK,
+};
+static const unsigned int audio_clkout_c_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(5, 3),
+};
+static const unsigned int audio_clkout_c_mux[] = {
+ AUDIO_CLKOUT_C_MARK,
+};
+static const unsigned int audio_clkout_d_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(5, 21),
+};
+static const unsigned int audio_clkout_d_mux[] = {
+ AUDIO_CLKOUT_D_MARK,
+};
+static const unsigned int audio_clkout1_a_pins[] = {
+ /* CLKOUT1 */
+ RCAR_GP_PIN(5, 15),
+};
+static const unsigned int audio_clkout1_a_mux[] = {
+ AUDIO_CLKOUT1_A_MARK,
+};
+static const unsigned int audio_clkout1_b_pins[] = {
+ /* CLKOUT1 */
+ RCAR_GP_PIN(6, 29),
+};
+static const unsigned int audio_clkout1_b_mux[] = {
+ AUDIO_CLKOUT1_B_MARK,
+};
+static const unsigned int audio_clkout2_a_pins[] = {
+ /* CLKOUT2 */
+ RCAR_GP_PIN(5, 16),
+};
+static const unsigned int audio_clkout2_a_mux[] = {
+ AUDIO_CLKOUT2_A_MARK,
+};
+static const unsigned int audio_clkout2_b_pins[] = {
+ /* CLKOUT2 */
+ RCAR_GP_PIN(6, 30),
+};
+static const unsigned int audio_clkout2_b_mux[] = {
+ AUDIO_CLKOUT2_B_MARK,
+};
+
+static const unsigned int audio_clkout3_a_pins[] = {
+ /* CLKOUT3 */
+ RCAR_GP_PIN(5, 19),
+};
+static const unsigned int audio_clkout3_a_mux[] = {
+ AUDIO_CLKOUT3_A_MARK,
+};
+static const unsigned int audio_clkout3_b_pins[] = {
+ /* CLKOUT3 */
+ RCAR_GP_PIN(6, 31),
+};
+static const unsigned int audio_clkout3_b_mux[] = {
+ AUDIO_CLKOUT3_B_MARK,
+};
+
/* - EtherAVB --------------------------------------------------------------- */
static const unsigned int avb_link_pins[] = {
/* AVB_LINK */
@@ -1605,11 +1727,33 @@ static const unsigned int avb_phy_int_mux[] = {
AVB_PHY_INT_MARK,
};
static const unsigned int avb_mdc_pins[] = {
- /* AVB_MDC */
- RCAR_GP_PIN(2, 9),
+ /* AVB_MDC, AVB_MDIO */
+ RCAR_GP_PIN(2, 9), PIN_NUMBER('A', 9),
};
static const unsigned int avb_mdc_mux[] = {
- AVB_MDC_MARK,
+ AVB_MDC_MARK, AVB_MDIO_MARK,
+};
+static const unsigned int avb_mii_pins[] = {
+ /*
+ * AVB_TX_CTL, AVB_TXC, AVB_TD0,
+ * AVB_TD1, AVB_TD2, AVB_TD3,
+ * AVB_RX_CTL, AVB_RXC, AVB_RD0,
+ * AVB_RD1, AVB_RD2, AVB_RD3,
+ * AVB_TXCREFCLK
+ */
+ PIN_NUMBER('A', 8), PIN_NUMBER('A', 19), PIN_NUMBER('A', 18),
+ PIN_NUMBER('B', 18), PIN_NUMBER('A', 17), PIN_NUMBER('B', 17),
+ PIN_NUMBER('A', 16), PIN_NUMBER('B', 19), PIN_NUMBER('A', 13),
+ PIN_NUMBER('B', 13), PIN_NUMBER('A', 14), PIN_NUMBER('B', 14),
+ PIN_NUMBER('A', 12),
+
+};
+static const unsigned int avb_mii_mux[] = {
+ AVB_TX_CTL_MARK, AVB_TXC_MARK, AVB_TD0_MARK,
+ AVB_TD1_MARK, AVB_TD2_MARK, AVB_TD3_MARK,
+ AVB_RX_CTL_MARK, AVB_RXC_MARK, AVB_RD0_MARK,
+ AVB_RD1_MARK, AVB_RD2_MARK, AVB_RD3_MARK,
+ AVB_TXCREFCLK_MARK,
};
static const unsigned int avb_avtp_pps_pins[] = {
/* AVB_AVTP_PPS */
@@ -2955,6 +3099,105 @@ static const unsigned int msiof3_rxd_e_mux[] = {
MSIOF3_RXD_E_MARK,
};
+/* - PWM0 --------------------------------------------------------------------*/
+static const unsigned int pwm0_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 6),
+};
+static const unsigned int pwm0_mux[] = {
+ PWM0_MARK,
+};
+/* - PWM1 --------------------------------------------------------------------*/
+static const unsigned int pwm1_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 7),
+};
+static const unsigned int pwm1_a_mux[] = {
+ PWM1_A_MARK,
+};
+static const unsigned int pwm1_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 8),
+};
+static const unsigned int pwm1_b_mux[] = {
+ PWM1_B_MARK,
+};
+/* - PWM2 --------------------------------------------------------------------*/
+static const unsigned int pwm2_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 8),
+};
+static const unsigned int pwm2_a_mux[] = {
+ PWM2_A_MARK,
+};
+static const unsigned int pwm2_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 11),
+};
+static const unsigned int pwm2_b_mux[] = {
+ PWM2_B_MARK,
+};
+/* - PWM3 --------------------------------------------------------------------*/
+static const unsigned int pwm3_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 0),
+};
+static const unsigned int pwm3_a_mux[] = {
+ PWM3_A_MARK,
+};
+static const unsigned int pwm3_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 2),
+};
+static const unsigned int pwm3_b_mux[] = {
+ PWM3_B_MARK,
+};
+/* - PWM4 --------------------------------------------------------------------*/
+static const unsigned int pwm4_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 1),
+};
+static const unsigned int pwm4_a_mux[] = {
+ PWM4_A_MARK,
+};
+static const unsigned int pwm4_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 3),
+};
+static const unsigned int pwm4_b_mux[] = {
+ PWM4_B_MARK,
+};
+/* - PWM5 --------------------------------------------------------------------*/
+static const unsigned int pwm5_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 2),
+};
+static const unsigned int pwm5_a_mux[] = {
+ PWM5_A_MARK,
+};
+static const unsigned int pwm5_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 4),
+};
+static const unsigned int pwm5_b_mux[] = {
+ PWM5_B_MARK,
+};
+/* - PWM6 --------------------------------------------------------------------*/
+static const unsigned int pwm6_a_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(1, 3),
+};
+static const unsigned int pwm6_a_mux[] = {
+ PWM6_A_MARK,
+};
+static const unsigned int pwm6_b_pins[] = {
+ /* PWM */
+ RCAR_GP_PIN(2, 5),
+};
+static const unsigned int pwm6_b_mux[] = {
+ PWM6_B_MARK,
+};
+
/* - SCIF0 ------------------------------------------------------------------ */
static const unsigned int scif0_data_pins[] = {
/* RX, TX */
@@ -3376,11 +3619,206 @@ static const unsigned int sdhi3_ds_mux[] = {
SD3_DS_MARK,
};
+/* - SSI -------------------------------------------------------------------- */
+static const unsigned int ssi0_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 2),
+};
+static const unsigned int ssi0_data_mux[] = {
+ SSI_SDATA0_MARK,
+};
+static const unsigned int ssi01239_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 0), RCAR_GP_PIN(6, 1),
+};
+static const unsigned int ssi01239_ctrl_mux[] = {
+ SSI_SCK01239_MARK, SSI_WS01239_MARK,
+};
+static const unsigned int ssi1_data_a_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 3),
+};
+static const unsigned int ssi1_data_a_mux[] = {
+ SSI_SDATA1_A_MARK,
+};
+static const unsigned int ssi1_data_b_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(5, 12),
+};
+static const unsigned int ssi1_data_b_mux[] = {
+ SSI_SDATA1_B_MARK,
+};
+static const unsigned int ssi1_ctrl_a_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27),
+};
+static const unsigned int ssi1_ctrl_a_mux[] = {
+ SSI_SCK1_A_MARK, SSI_WS1_A_MARK,
+};
+static const unsigned int ssi1_ctrl_b_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 4), RCAR_GP_PIN(6, 21),
+};
+static const unsigned int ssi1_ctrl_b_mux[] = {
+ SSI_SCK1_B_MARK, SSI_WS1_B_MARK,
+};
+static const unsigned int ssi2_data_a_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 4),
+};
+static const unsigned int ssi2_data_a_mux[] = {
+ SSI_SDATA2_A_MARK,
+};
+static const unsigned int ssi2_data_b_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(5, 13),
+};
+static const unsigned int ssi2_data_b_mux[] = {
+ SSI_SDATA2_B_MARK,
+};
+static const unsigned int ssi2_ctrl_a_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(5, 19), RCAR_GP_PIN(5, 21),
+};
+static const unsigned int ssi2_ctrl_a_mux[] = {
+ SSI_SCK2_A_MARK, SSI_WS2_A_MARK,
+};
+static const unsigned int ssi2_ctrl_b_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
+};
+static const unsigned int ssi2_ctrl_b_mux[] = {
+ SSI_SCK2_B_MARK, SSI_WS2_B_MARK,
+};
+static const unsigned int ssi3_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 7),
+};
+static const unsigned int ssi3_data_mux[] = {
+ SSI_SDATA3_MARK,
+};
+static const unsigned int ssi349_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 5), RCAR_GP_PIN(6, 6),
+};
+static const unsigned int ssi349_ctrl_mux[] = {
+ SSI_SCK349_MARK, SSI_WS349_MARK,
+};
+static const unsigned int ssi4_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 10),
+};
+static const unsigned int ssi4_data_mux[] = {
+ SSI_SDATA4_MARK,
+};
+static const unsigned int ssi4_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9),
+};
+static const unsigned int ssi4_ctrl_mux[] = {
+ SSI_SCK4_MARK, SSI_WS4_MARK,
+};
+static const unsigned int ssi5_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 13),
+};
+static const unsigned int ssi5_data_mux[] = {
+ SSI_SDATA5_MARK,
+};
+static const unsigned int ssi5_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 11), RCAR_GP_PIN(6, 12),
+};
+static const unsigned int ssi5_ctrl_mux[] = {
+ SSI_SCK5_MARK, SSI_WS5_MARK,
+};
+static const unsigned int ssi6_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 16),
+};
+static const unsigned int ssi6_data_mux[] = {
+ SSI_SDATA6_MARK,
+};
+static const unsigned int ssi6_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 14), RCAR_GP_PIN(6, 15),
+};
+static const unsigned int ssi6_ctrl_mux[] = {
+ SSI_SCK6_MARK, SSI_WS6_MARK,
+};
+static const unsigned int ssi7_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 19),
+};
+static const unsigned int ssi7_data_mux[] = {
+ SSI_SDATA7_MARK,
+};
+static const unsigned int ssi78_ctrl_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18),
+};
+static const unsigned int ssi78_ctrl_mux[] = {
+ SSI_SCK78_MARK, SSI_WS78_MARK,
+};
+static const unsigned int ssi8_data_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 20),
+};
+static const unsigned int ssi8_data_mux[] = {
+ SSI_SDATA8_MARK,
+};
+static const unsigned int ssi9_data_a_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(6, 21),
+};
+static const unsigned int ssi9_data_a_mux[] = {
+ SSI_SDATA9_A_MARK,
+};
+static const unsigned int ssi9_data_b_pins[] = {
+ /* SDATA */
+ RCAR_GP_PIN(5, 14),
+};
+static const unsigned int ssi9_data_b_mux[] = {
+ SSI_SDATA9_B_MARK,
+};
+static const unsigned int ssi9_ctrl_a_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 16),
+};
+static const unsigned int ssi9_ctrl_a_mux[] = {
+ SSI_SCK9_A_MARK, SSI_WS9_A_MARK,
+};
+static const unsigned int ssi9_ctrl_b_pins[] = {
+ /* SCK, WS */
+ RCAR_GP_PIN(6, 30), RCAR_GP_PIN(6, 31),
+};
+static const unsigned int ssi9_ctrl_b_mux[] = {
+ SSI_SCK9_B_MARK, SSI_WS9_B_MARK,
+};
+
static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(audio_clk_a_a),
+ SH_PFC_PIN_GROUP(audio_clk_a_b),
+ SH_PFC_PIN_GROUP(audio_clk_a_c),
+ SH_PFC_PIN_GROUP(audio_clk_b_a),
+ SH_PFC_PIN_GROUP(audio_clk_b_b),
+ SH_PFC_PIN_GROUP(audio_clk_c_a),
+ SH_PFC_PIN_GROUP(audio_clk_c_b),
+ SH_PFC_PIN_GROUP(audio_clkout_a),
+ SH_PFC_PIN_GROUP(audio_clkout_b),
+ SH_PFC_PIN_GROUP(audio_clkout_c),
+ SH_PFC_PIN_GROUP(audio_clkout_d),
+ SH_PFC_PIN_GROUP(audio_clkout1_a),
+ SH_PFC_PIN_GROUP(audio_clkout1_b),
+ SH_PFC_PIN_GROUP(audio_clkout2_a),
+ SH_PFC_PIN_GROUP(audio_clkout2_b),
+ SH_PFC_PIN_GROUP(audio_clkout3_a),
+ SH_PFC_PIN_GROUP(audio_clkout3_b),
SH_PFC_PIN_GROUP(avb_link),
SH_PFC_PIN_GROUP(avb_magic),
SH_PFC_PIN_GROUP(avb_phy_int),
SH_PFC_PIN_GROUP(avb_mdc),
+ SH_PFC_PIN_GROUP(avb_mii),
SH_PFC_PIN_GROUP(avb_avtp_pps),
SH_PFC_PIN_GROUP(avb_avtp_match_a),
SH_PFC_PIN_GROUP(avb_avtp_capture_a),
@@ -3565,6 +4003,19 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(msiof3_ss2_e),
SH_PFC_PIN_GROUP(msiof3_txd_e),
SH_PFC_PIN_GROUP(msiof3_rxd_e),
+ SH_PFC_PIN_GROUP(pwm0),
+ SH_PFC_PIN_GROUP(pwm1_a),
+ SH_PFC_PIN_GROUP(pwm1_b),
+ SH_PFC_PIN_GROUP(pwm2_a),
+ SH_PFC_PIN_GROUP(pwm2_b),
+ SH_PFC_PIN_GROUP(pwm3_a),
+ SH_PFC_PIN_GROUP(pwm3_b),
+ SH_PFC_PIN_GROUP(pwm4_a),
+ SH_PFC_PIN_GROUP(pwm4_b),
+ SH_PFC_PIN_GROUP(pwm5_a),
+ SH_PFC_PIN_GROUP(pwm5_b),
+ SH_PFC_PIN_GROUP(pwm6_a),
+ SH_PFC_PIN_GROUP(pwm6_b),
SH_PFC_PIN_GROUP(scif0_data),
SH_PFC_PIN_GROUP(scif0_clk),
SH_PFC_PIN_GROUP(scif0_ctrl),
@@ -3620,6 +4071,51 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(sdhi3_cd),
SH_PFC_PIN_GROUP(sdhi3_wp),
SH_PFC_PIN_GROUP(sdhi3_ds),
+ SH_PFC_PIN_GROUP(ssi0_data),
+ SH_PFC_PIN_GROUP(ssi01239_ctrl),
+ SH_PFC_PIN_GROUP(ssi1_data_a),
+ SH_PFC_PIN_GROUP(ssi1_data_b),
+ SH_PFC_PIN_GROUP(ssi1_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi1_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi2_data_a),
+ SH_PFC_PIN_GROUP(ssi2_data_b),
+ SH_PFC_PIN_GROUP(ssi2_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi2_ctrl_b),
+ SH_PFC_PIN_GROUP(ssi3_data),
+ SH_PFC_PIN_GROUP(ssi349_ctrl),
+ SH_PFC_PIN_GROUP(ssi4_data),
+ SH_PFC_PIN_GROUP(ssi4_ctrl),
+ SH_PFC_PIN_GROUP(ssi5_data),
+ SH_PFC_PIN_GROUP(ssi5_ctrl),
+ SH_PFC_PIN_GROUP(ssi6_data),
+ SH_PFC_PIN_GROUP(ssi6_ctrl),
+ SH_PFC_PIN_GROUP(ssi7_data),
+ SH_PFC_PIN_GROUP(ssi78_ctrl),
+ SH_PFC_PIN_GROUP(ssi8_data),
+ SH_PFC_PIN_GROUP(ssi9_data_a),
+ SH_PFC_PIN_GROUP(ssi9_data_b),
+ SH_PFC_PIN_GROUP(ssi9_ctrl_a),
+ SH_PFC_PIN_GROUP(ssi9_ctrl_b),
+};
+
+static const char * const audio_clk_groups[] = {
+ "audio_clk_a_a",
+ "audio_clk_a_b",
+ "audio_clk_a_c",
+ "audio_clk_b_a",
+ "audio_clk_b_b",
+ "audio_clk_c_a",
+ "audio_clk_c_b",
+ "audio_clkout_a",
+ "audio_clkout_b",
+ "audio_clkout_c",
+ "audio_clkout_d",
+ "audio_clkout1_a",
+ "audio_clkout1_b",
+ "audio_clkout2_a",
+ "audio_clkout2_b",
+ "audio_clkout3_a",
+ "audio_clkout3_b",
};
static const char * const avb_groups[] = {
@@ -3627,6 +4123,7 @@ static const char * const avb_groups[] = {
"avb_magic",
"avb_phy_int",
"avb_mdc",
+ "avb_mii",
"avb_avtp_pps",
"avb_avtp_match_a",
"avb_avtp_capture_a",
@@ -3879,6 +4376,40 @@ static const char * const msiof3_groups[] = {
"msiof3_rxd_e",
};
+static const char * const pwm0_groups[] = {
+ "pwm0",
+};
+
+static const char * const pwm1_groups[] = {
+ "pwm1_a",
+ "pwm1_b",
+};
+
+static const char * const pwm2_groups[] = {
+ "pwm2_a",
+ "pwm2_b",
+};
+
+static const char * const pwm3_groups[] = {
+ "pwm3_a",
+ "pwm3_b",
+};
+
+static const char * const pwm4_groups[] = {
+ "pwm4_a",
+ "pwm4_b",
+};
+
+static const char * const pwm5_groups[] = {
+ "pwm5_a",
+ "pwm5_b",
+};
+
+static const char * const pwm6_groups[] = {
+ "pwm6_a",
+ "pwm6_b",
+};
+
static const char * const scif0_groups[] = {
"scif0_data",
"scif0_clk",
@@ -3967,7 +4498,36 @@ static const char * const sdhi3_groups[] = {
"sdhi3_ds",
};
+static const char * const ssi_groups[] = {
+ "ssi0_data",
+ "ssi01239_ctrl",
+ "ssi1_data_a",
+ "ssi1_data_b",
+ "ssi1_ctrl_a",
+ "ssi1_ctrl_b",
+ "ssi2_data_a",
+ "ssi2_data_b",
+ "ssi2_ctrl_a",
+ "ssi2_ctrl_b",
+ "ssi3_data",
+ "ssi349_ctrl",
+ "ssi4_data",
+ "ssi4_ctrl",
+ "ssi5_data",
+ "ssi5_ctrl",
+ "ssi6_data",
+ "ssi6_ctrl",
+ "ssi7_data",
+ "ssi78_ctrl",
+ "ssi8_data",
+ "ssi9_data_a",
+ "ssi9_data_b",
+ "ssi9_ctrl_a",
+ "ssi9_ctrl_b",
+};
+
static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(audio_clk),
SH_PFC_FUNCTION(avb),
SH_PFC_FUNCTION(can0),
SH_PFC_FUNCTION(can1),
@@ -3991,6 +4551,13 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(msiof1),
SH_PFC_FUNCTION(msiof2),
SH_PFC_FUNCTION(msiof3),
+ SH_PFC_FUNCTION(pwm0),
+ SH_PFC_FUNCTION(pwm1),
+ SH_PFC_FUNCTION(pwm2),
+ SH_PFC_FUNCTION(pwm3),
+ SH_PFC_FUNCTION(pwm4),
+ SH_PFC_FUNCTION(pwm5),
+ SH_PFC_FUNCTION(pwm6),
SH_PFC_FUNCTION(scif0),
SH_PFC_FUNCTION(scif1),
SH_PFC_FUNCTION(scif2),
@@ -4002,6 +4569,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(sdhi1),
SH_PFC_FUNCTION(sdhi2),
SH_PFC_FUNCTION(sdhi3),
+ SH_PFC_FUNCTION(ssi),
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -4775,8 +5343,8 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ RCAR_GP_PIN(6, 2), 24, 3 }, /* SSI_SDATA0 */
{ RCAR_GP_PIN(6, 3), 20, 3 }, /* SSI_SDATA1 */
{ RCAR_GP_PIN(6, 4), 16, 3 }, /* SSI_SDATA2 */
- { RCAR_GP_PIN(6, 5), 12, 3 }, /* SSI_SCK34 */
- { RCAR_GP_PIN(6, 6), 8, 3 }, /* SSI_WS34 */
+ { RCAR_GP_PIN(6, 5), 12, 3 }, /* SSI_SCK349 */
+ { RCAR_GP_PIN(6, 6), 8, 3 }, /* SSI_WS349 */
{ RCAR_GP_PIN(6, 7), 4, 3 }, /* SSI_SDATA3 */
{ RCAR_GP_PIN(6, 8), 0, 3 }, /* SSI_SCK4 */
} },
@@ -5022,8 +5590,8 @@ static const struct sh_pfc_bias_info bias_info[] = {
{ RCAR_GP_PIN(6, 9), PU5, 16 }, /* SSI_WS4 */
{ RCAR_GP_PIN(6, 8), PU5, 15 }, /* SSI_SCK4 */
{ RCAR_GP_PIN(6, 7), PU5, 14 }, /* SSI_SDATA3 */
- { RCAR_GP_PIN(6, 6), PU5, 13 }, /* SSI_WS34 */
- { RCAR_GP_PIN(6, 5), PU5, 12 }, /* SSI_SCK34 */
+ { RCAR_GP_PIN(6, 6), PU5, 13 }, /* SSI_WS349 */
+ { RCAR_GP_PIN(6, 5), PU5, 12 }, /* SSI_SCK349 */
{ RCAR_GP_PIN(6, 4), PU5, 11 }, /* SSI_SDATA2_A */
{ RCAR_GP_PIN(6, 3), PU5, 10 }, /* SSI_SDATA1_A */
{ RCAR_GP_PIN(6, 2), PU5, 9 }, /* SSI_SDATA0 */
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index f31eb6c1e87d..4376397123de 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -259,6 +259,8 @@ struct sh_pfc_soc_info {
extern const struct sh_pfc_soc_info emev2_pinmux_info;
extern const struct sh_pfc_soc_info r8a73a4_pinmux_info;
extern const struct sh_pfc_soc_info r8a7740_pinmux_info;
+extern const struct sh_pfc_soc_info r8a7743_pinmux_info;
+extern const struct sh_pfc_soc_info r8a7745_pinmux_info;
extern const struct sh_pfc_soc_info r8a7778_pinmux_info;
extern const struct sh_pfc_soc_info r8a7779_pinmux_info;
extern const struct sh_pfc_soc_info r8a7790_pinmux_info;
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 222b6685b09f..06431ff49ffb 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -209,6 +209,24 @@ static int stm32_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
return irq_create_fwspec_mapping(&fwspec);
}
+static int stm32_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
+ int pin = stm32_gpio_pin(offset);
+ int ret;
+ u32 mode, alt;
+
+ stm32_pmx_get_mode(bank, pin, &mode, &alt);
+ if ((alt == 0) && (mode == 0))
+ ret = 1;
+ else if ((alt == 0) && (mode == 1))
+ ret = 0;
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
static const struct gpio_chip stm32_gpio_template = {
.request = stm32_gpio_request,
.free = stm32_gpio_free,
@@ -217,14 +235,44 @@ static const struct gpio_chip stm32_gpio_template = {
.direction_input = stm32_gpio_direction_input,
.direction_output = stm32_gpio_direction_output,
.to_irq = stm32_gpio_to_irq,
+ .get_direction = stm32_gpio_get_direction,
};
+static int stm32_gpio_irq_request_resources(struct irq_data *irq_data)
+{
+ struct stm32_gpio_bank *bank = irq_data->domain->host_data;
+ struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
+ int ret;
+
+ ret = stm32_gpio_direction_input(&bank->gpio_chip, irq_data->hwirq);
+ if (ret)
+ return ret;
+
+ ret = gpiochip_lock_as_irq(&bank->gpio_chip, irq_data->hwirq);
+ if (ret) {
+ dev_err(pctl->dev, "unable to lock HW IRQ %lu for IRQ\n",
+ irq_data->hwirq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void stm32_gpio_irq_release_resources(struct irq_data *irq_data)
+{
+ struct stm32_gpio_bank *bank = irq_data->domain->host_data;
+
+ gpiochip_unlock_as_irq(&bank->gpio_chip, irq_data->hwirq);
+}
+
static struct irq_chip stm32_gpio_irq_chip = {
.name = "stm32gpio",
.irq_eoi = irq_chip_eoi_parent,
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_set_type = irq_chip_set_type_parent,
+ .irq_request_resources = stm32_gpio_irq_request_resources,
+ .irq_release_resources = stm32_gpio_irq_release_resources,
};
static int stm32_gpio_domain_translate(struct irq_domain *d,
@@ -248,15 +296,6 @@ static void stm32_gpio_domain_activate(struct irq_domain *d,
struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
regmap_field_write(pctl->irqmux[irq_data->hwirq], bank->bank_nr);
- gpiochip_lock_as_irq(&bank->gpio_chip, irq_data->hwirq);
-}
-
-static void stm32_gpio_domain_deactivate(struct irq_domain *d,
- struct irq_data *irq_data)
-{
- struct stm32_gpio_bank *bank = d->host_data;
-
- gpiochip_unlock_as_irq(&bank->gpio_chip, irq_data->hwirq);
}
static int stm32_gpio_domain_alloc(struct irq_domain *d,
@@ -285,7 +324,6 @@ static const struct irq_domain_ops stm32_gpio_domain_ops = {
.alloc = stm32_gpio_domain_alloc,
.free = irq_domain_free_irqs_common,
.activate = stm32_gpio_domain_activate,
- .deactivate = stm32_gpio_domain_deactivate,
};
/* Pinctrl functions */
@@ -411,11 +449,6 @@ static int stm32_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
pin = STM32_GET_PIN_NO(pinfunc);
func = STM32_GET_PIN_FUNC(pinfunc);
- if (pin >= pctl->match_data->npins) {
- dev_err(pctl->dev, "invalid pin number.\n");
- return -EINVAL;
- }
-
if (!stm32_pctrl_is_function_valid(pctl, pin, func)) {
dev_err(pctl->dev, "invalid function.\n");
return -EINVAL;
@@ -558,8 +591,8 @@ static void stm32_pmx_set_mode(struct stm32_gpio_bank *bank,
clk_disable(bank->clk);
}
-static void stm32_pmx_get_mode(struct stm32_gpio_bank *bank,
- int pin, u32 *mode, u32 *alt)
+void stm32_pmx_get_mode(struct stm32_gpio_bank *bank, int pin, u32 *mode,
+ u32 *alt)
{
u32 val;
int alt_shift = (pin % 8) * 4;
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.h b/drivers/pinctrl/stm32/pinctrl-stm32.h
index 35ebc94c01e4..8702a9992ce5 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.h
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.h
@@ -45,7 +45,10 @@ struct stm32_pinctrl_match_data {
const unsigned int npins;
};
-int stm32_pctl_probe(struct platform_device *pdev);
+struct stm32_gpio_bank;
+int stm32_pctl_probe(struct platform_device *pdev);
+void stm32_pmx_get_mode(struct stm32_gpio_bank *bank,
+ int pin, u32 *mode, u32 *alt);
#endif /* __PINCTRL_STM32_H */
diff --git a/drivers/pinctrl/sunxi/Kconfig b/drivers/pinctrl/sunxi/Kconfig
index 793e6f94fa0b..31f85ca92669 100644
--- a/drivers/pinctrl/sunxi/Kconfig
+++ b/drivers/pinctrl/sunxi/Kconfig
@@ -7,7 +7,7 @@ config PINCTRL_SUNXI
select GPIOLIB
config PINCTRL_SUN4I_A10
- def_bool MACH_SUN4I
+ def_bool MACH_SUN4I || MACH_SUN7I
select PINCTRL_SUNXI
config PINCTRL_SUN5I
@@ -23,10 +23,6 @@ config PINCTRL_SUN6I_A31_R
depends on RESET_CONTROLLER
select PINCTRL_SUNXI
-config PINCTRL_SUN7I_A20
- def_bool MACH_SUN7I
- select PINCTRL_SUNXI
-
config PINCTRL_SUN8I_A23
def_bool MACH_SUN8I
select PINCTRL_SUNXI
@@ -39,6 +35,10 @@ config PINCTRL_SUN8I_A83T
def_bool MACH_SUN8I
select PINCTRL_SUNXI
+config PINCTRL_SUN8I_A83T_R
+ def_bool MACH_SUN8I
+ select PINCTRL_SUNXI
+
config PINCTRL_SUN8I_A23_R
def_bool MACH_SUN8I
depends on RESET_CONTROLLER
diff --git a/drivers/pinctrl/sunxi/Makefile b/drivers/pinctrl/sunxi/Makefile
index df4ccd6cd44c..dc6c9619e41c 100644
--- a/drivers/pinctrl/sunxi/Makefile
+++ b/drivers/pinctrl/sunxi/Makefile
@@ -6,13 +6,13 @@ obj-$(CONFIG_PINCTRL_SUN4I_A10) += pinctrl-sun4i-a10.o
obj-$(CONFIG_PINCTRL_SUN5I) += pinctrl-sun5i.o
obj-$(CONFIG_PINCTRL_SUN6I_A31) += pinctrl-sun6i-a31.o
obj-$(CONFIG_PINCTRL_SUN6I_A31_R) += pinctrl-sun6i-a31-r.o
-obj-$(CONFIG_PINCTRL_SUN7I_A20) += pinctrl-sun7i-a20.o
obj-$(CONFIG_PINCTRL_SUN8I_A23) += pinctrl-sun8i-a23.o
obj-$(CONFIG_PINCTRL_SUN8I_A23_R) += pinctrl-sun8i-a23-r.o
obj-$(CONFIG_PINCTRL_SUN8I_A33) += pinctrl-sun8i-a33.o
obj-$(CONFIG_PINCTRL_SUN50I_A64) += pinctrl-sun50i-a64.o
obj-$(CONFIG_PINCTRL_SUN50I_A64_R) += pinctrl-sun50i-a64-r.o
obj-$(CONFIG_PINCTRL_SUN8I_A83T) += pinctrl-sun8i-a83t.o
+obj-$(CONFIG_PINCTRL_SUN8I_A83T_R) += pinctrl-sun8i-a83t-r.o
obj-$(CONFIG_PINCTRL_SUN8I_H3) += pinctrl-sun8i-h3.o
obj-$(CONFIG_PINCTRL_SUN8I_H3_R) += pinctrl-sun8i-h3-r.o
obj-$(CONFIG_PINCTRL_SUN8I_V3S) += pinctrl-sun8i-v3s.o
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
index fb30b86a97ee..159580c04b14 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
@@ -24,101 +24,147 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ERXD3 */
SUNXI_FUNCTION(0x3, "spi1"), /* CS0 */
- SUNXI_FUNCTION(0x4, "uart2")), /* RTS */
+ SUNXI_FUNCTION(0x4, "uart2"), /* RTS */
+ SUNXI_FUNCTION_VARIANT(0x5, "gmac", /* GRXD3 */
+ PINCTRL_SUN7I_A20)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 1),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ERXD2 */
SUNXI_FUNCTION(0x3, "spi1"), /* CLK */
- SUNXI_FUNCTION(0x4, "uart2")), /* CTS */
+ SUNXI_FUNCTION(0x4, "uart2"), /* CTS */
+ SUNXI_FUNCTION_VARIANT(0x5, "gmac", /* GRXD2 */
+ PINCTRL_SUN7I_A20)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 2),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ERXD1 */
SUNXI_FUNCTION(0x3, "spi1"), /* MOSI */
- SUNXI_FUNCTION(0x4, "uart2")), /* TX */
+ SUNXI_FUNCTION(0x4, "uart2"), /* TX */
+ SUNXI_FUNCTION_VARIANT(0x5, "gmac", /* GRXD1 */
+ PINCTRL_SUN7I_A20)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 3),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ERXD0 */
SUNXI_FUNCTION(0x3, "spi1"), /* MISO */
- SUNXI_FUNCTION(0x4, "uart2")), /* RX */
+ SUNXI_FUNCTION(0x4, "uart2"), /* RX */
+ SUNXI_FUNCTION_VARIANT(0x5, "gmac", /* GRXD0 */
+ PINCTRL_SUN7I_A20)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 4),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ETXD3 */
- SUNXI_FUNCTION(0x3, "spi1")), /* CS1 */
+ SUNXI_FUNCTION(0x3, "spi1"), /* CS1 */
+ SUNXI_FUNCTION_VARIANT(0x5, "gmac", /* GTXD3 */
+ PINCTRL_SUN7I_A20)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 5),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ETXD2 */
- SUNXI_FUNCTION(0x3, "spi3")), /* CS0 */
+ SUNXI_FUNCTION(0x3, "spi3"), /* CS0 */
+ SUNXI_FUNCTION_VARIANT(0x5, "gmac", /* GTXD2 */
+ PINCTRL_SUN7I_A20)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 6),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ETXD1 */
- SUNXI_FUNCTION(0x3, "spi3")), /* CLK */
+ SUNXI_FUNCTION(0x3, "spi3"), /* CLK */
+ SUNXI_FUNCTION_VARIANT(0x5, "gmac", /* GTXD1 */
+ PINCTRL_SUN7I_A20)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 7),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ETXD0 */
- SUNXI_FUNCTION(0x3, "spi3")), /* MOSI */
+ SUNXI_FUNCTION(0x3, "spi3"), /* MOSI */
+ SUNXI_FUNCTION_VARIANT(0x5, "gmac", /* GTXD0 */
+ PINCTRL_SUN7I_A20)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 8),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ERXCK */
- SUNXI_FUNCTION(0x3, "spi3")), /* MISO */
+ SUNXI_FUNCTION(0x3, "spi3"), /* MISO */
+ SUNXI_FUNCTION_VARIANT(0x5, "gmac", /* GRXCK */
+ PINCTRL_SUN7I_A20)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 9),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ERXERR */
- SUNXI_FUNCTION(0x3, "spi3")), /* CS1 */
+ SUNXI_FUNCTION(0x3, "spi3"), /* CS1 */
+ SUNXI_FUNCTION_VARIANT(0x5, "gmac", /* GNULL / ERXERR */
+ PINCTRL_SUN7I_A20),
+ SUNXI_FUNCTION_VARIANT(0x6, "i2s1", /* MCLK */
+ PINCTRL_SUN7I_A20)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 10),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ERXDV */
- SUNXI_FUNCTION(0x4, "uart1")), /* TX */
+ SUNXI_FUNCTION(0x4, "uart1"), /* TX */
+ SUNXI_FUNCTION_VARIANT(0x5, "gmac", /* GRXDV */
+ PINCTRL_SUN7I_A20)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 11),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* EMDC */
- SUNXI_FUNCTION(0x4, "uart1")), /* RX */
+ SUNXI_FUNCTION(0x4, "uart1"), /* RX */
+ SUNXI_FUNCTION_VARIANT(0x5, "gmac", /* EMDC */
+ PINCTRL_SUN7I_A20)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 12),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* EMDIO */
SUNXI_FUNCTION(0x3, "uart6"), /* TX */
- SUNXI_FUNCTION(0x4, "uart1")), /* RTS */
+ SUNXI_FUNCTION(0x4, "uart1"), /* RTS */
+ SUNXI_FUNCTION_VARIANT(0x5, "gmac", /* EMDIO */
+ PINCTRL_SUN7I_A20)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 13),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ETXEN */
SUNXI_FUNCTION(0x3, "uart6"), /* RX */
- SUNXI_FUNCTION(0x4, "uart1")), /* CTS */
+ SUNXI_FUNCTION(0x4, "uart1"), /* CTS */
+ SUNXI_FUNCTION_VARIANT(0x5, "gmac", /* GTXCTL / ETXEN */
+ PINCTRL_SUN7I_A20)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 14),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ETXCK */
SUNXI_FUNCTION(0x3, "uart7"), /* TX */
- SUNXI_FUNCTION(0x4, "uart1")), /* DTR */
+ SUNXI_FUNCTION(0x4, "uart1"), /* DTR */
+ SUNXI_FUNCTION_VARIANT(0x5, "gmac", /* GNULL / ETXCK */
+ PINCTRL_SUN7I_A20),
+ SUNXI_FUNCTION_VARIANT(0x6, "i2s1", /* BCLK */
+ PINCTRL_SUN7I_A20)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 15),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ECRS */
SUNXI_FUNCTION(0x3, "uart7"), /* RX */
- SUNXI_FUNCTION(0x4, "uart1")), /* DSR */
+ SUNXI_FUNCTION(0x4, "uart1"), /* DSR */
+ SUNXI_FUNCTION_VARIANT(0x5, "gmac", /* GTXCK / ECRS */
+ PINCTRL_SUN7I_A20),
+ SUNXI_FUNCTION_VARIANT(0x6, "i2s1", /* LRCK */
+ PINCTRL_SUN7I_A20)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 16),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ECOL */
SUNXI_FUNCTION(0x3, "can"), /* TX */
- SUNXI_FUNCTION(0x4, "uart1")), /* DCD */
+ SUNXI_FUNCTION(0x4, "uart1"), /* DCD */
+ SUNXI_FUNCTION_VARIANT(0x5, "gmac", /* GCLKIN / ECOL */
+ PINCTRL_SUN7I_A20),
+ SUNXI_FUNCTION_VARIANT(0x6, "i2s1", /* DO */
+ PINCTRL_SUN7I_A20)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 17),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "emac"), /* ETXERR */
SUNXI_FUNCTION(0x3, "can"), /* RX */
- SUNXI_FUNCTION(0x4, "uart1")), /* RING */
+ SUNXI_FUNCTION(0x4, "uart1"), /* RING */
+ SUNXI_FUNCTION_VARIANT(0x5, "gmac", /* GNULL / ETXERR */
+ PINCTRL_SUN7I_A20),
+ SUNXI_FUNCTION_VARIANT(0x6, "i2s1", /* DI */
+ PINCTRL_SUN7I_A20)),
/* Hole */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0),
SUNXI_FUNCTION(0x0, "gpio_in"),
@@ -150,47 +196,77 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s"), /* MCLK */
+ /*
+ * On A10 there's only one I2S controller and the pin group
+ * is simply named "i2s". On A20 there's two and thus it's
+ * renamed to "i2s0". Deal with these name here, in order
+ * to satisfy existing device trees.
+ */
+ SUNXI_FUNCTION_VARIANT(0x2, "i2s", /* MCLK */
+ PINCTRL_SUN4I_A10),
+ SUNXI_FUNCTION_VARIANT(0x2, "i2s0", /* MCLK */
+ PINCTRL_SUN7I_A20),
SUNXI_FUNCTION(0x3, "ac97")), /* MCLK */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s"), /* BCLK */
+ SUNXI_FUNCTION_VARIANT(0x2, "i2s", /* BCLK */
+ PINCTRL_SUN4I_A10),
+ SUNXI_FUNCTION_VARIANT(0x2, "i2s0", /* BCLK */
+ PINCTRL_SUN7I_A20),
SUNXI_FUNCTION(0x3, "ac97")), /* BCLK */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 7),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s"), /* LRCK */
+ SUNXI_FUNCTION_VARIANT(0x2, "i2s", /* LRCK */
+ PINCTRL_SUN4I_A10),
+ SUNXI_FUNCTION_VARIANT(0x2, "i2s0", /* LRCK */
+ PINCTRL_SUN7I_A20),
SUNXI_FUNCTION(0x3, "ac97")), /* SYNC */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 8),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s"), /* DO0 */
+ SUNXI_FUNCTION_VARIANT(0x2, "i2s", /* DO0 */
+ PINCTRL_SUN4I_A10),
+ SUNXI_FUNCTION_VARIANT(0x2, "i2s0", /* DO0 */
+ PINCTRL_SUN7I_A20),
SUNXI_FUNCTION(0x3, "ac97")), /* DO */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 9),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s")), /* DO1 */
+ SUNXI_FUNCTION_VARIANT(0x2, "i2s", /* DO1 */
+ PINCTRL_SUN4I_A10),
+ SUNXI_FUNCTION_VARIANT(0x2, "i2s0", /* DO1 */
+ PINCTRL_SUN7I_A20)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 10),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s")), /* DO2 */
+ SUNXI_FUNCTION_VARIANT(0x2, "i2s", /* DO2 */
+ PINCTRL_SUN4I_A10),
+ SUNXI_FUNCTION_VARIANT(0x2, "i2s0", /* DO2 */
+ PINCTRL_SUN7I_A20)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 11),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s")), /* DO3 */
+ SUNXI_FUNCTION_VARIANT(0x2, "i2s", /* DO3 */
+ PINCTRL_SUN4I_A10),
+ SUNXI_FUNCTION_VARIANT(0x2, "i2s0", /* DO3 */
+ PINCTRL_SUN7I_A20)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 12),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s"), /* DI */
+ SUNXI_FUNCTION_VARIANT(0x2, "i2s", /* DI */
+ PINCTRL_SUN4I_A10),
+ SUNXI_FUNCTION_VARIANT(0x2, "i2s0", /* DI */
+ PINCTRL_SUN7I_A20),
SUNXI_FUNCTION(0x3, "ac97"), /* DI */
- /* Undocumented mux function - See SPDIF MCLK above */
+ /* Undocumented mux function on A10 - See SPDIF MCLK above */
SUNXI_FUNCTION(0x4, "spdif")), /* SPDIF IN */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 13),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi2"), /* CS1 */
- /* Undocumented mux function - See SPDIF MCLK above */
+ /* Undocumented mux function on A10 - See SPDIF MCLK above */
SUNXI_FUNCTION(0x4, "spdif")), /* SPDIF OUT */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 14),
SUNXI_FUNCTION(0x0, "gpio_in"),
@@ -672,7 +748,8 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* D0 */
- SUNXI_FUNCTION(0x3, "pata"), /* ATAA0 */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATAA0 */
+ PINCTRL_SUN4I_A10),
SUNXI_FUNCTION(0x4, "uart3"), /* TX */
SUNXI_FUNCTION_IRQ(0x6, 0), /* EINT0 */
SUNXI_FUNCTION(0x7, "csi1")), /* D0 */
@@ -680,7 +757,8 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* D1 */
- SUNXI_FUNCTION(0x3, "pata"), /* ATAA1 */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATAA1 */
+ PINCTRL_SUN4I_A10),
SUNXI_FUNCTION(0x4, "uart3"), /* RX */
SUNXI_FUNCTION_IRQ(0x6, 1), /* EINT1 */
SUNXI_FUNCTION(0x7, "csi1")), /* D1 */
@@ -688,7 +766,8 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* D2 */
- SUNXI_FUNCTION(0x3, "pata"), /* ATAA2 */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATAA2 */
+ PINCTRL_SUN4I_A10),
SUNXI_FUNCTION(0x4, "uart3"), /* RTS */
SUNXI_FUNCTION_IRQ(0x6, 2), /* EINT2 */
SUNXI_FUNCTION(0x7, "csi1")), /* D2 */
@@ -696,7 +775,8 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* D3 */
- SUNXI_FUNCTION(0x3, "pata"), /* ATAIRQ */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATAIRQ */
+ PINCTRL_SUN4I_A10),
SUNXI_FUNCTION(0x4, "uart3"), /* CTS */
SUNXI_FUNCTION_IRQ(0x6, 3), /* EINT3 */
SUNXI_FUNCTION(0x7, "csi1")), /* D3 */
@@ -704,7 +784,8 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* D4 */
- SUNXI_FUNCTION(0x3, "pata"), /* ATAD0 */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATAD0 */
+ PINCTRL_SUN4I_A10),
SUNXI_FUNCTION(0x4, "uart4"), /* TX */
SUNXI_FUNCTION_IRQ(0x6, 4), /* EINT4 */
SUNXI_FUNCTION(0x7, "csi1")), /* D4 */
@@ -712,7 +793,8 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* D5 */
- SUNXI_FUNCTION(0x3, "pata"), /* ATAD1 */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATAD1 */
+ PINCTRL_SUN4I_A10),
SUNXI_FUNCTION(0x4, "uart4"), /* RX */
SUNXI_FUNCTION_IRQ(0x6, 5), /* EINT5 */
SUNXI_FUNCTION(0x7, "csi1")), /* D5 */
@@ -720,7 +802,8 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* D6 */
- SUNXI_FUNCTION(0x3, "pata"), /* ATAD2 */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATAD2 */
+ PINCTRL_SUN4I_A10),
SUNXI_FUNCTION(0x4, "uart5"), /* TX */
SUNXI_FUNCTION(0x5, "ms"), /* BS */
SUNXI_FUNCTION_IRQ(0x6, 6), /* EINT6 */
@@ -729,7 +812,8 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* D7 */
- SUNXI_FUNCTION(0x3, "pata"), /* ATAD3 */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATAD3 */
+ PINCTRL_SUN4I_A10),
SUNXI_FUNCTION(0x4, "uart5"), /* RX */
SUNXI_FUNCTION(0x5, "ms"), /* CLK */
SUNXI_FUNCTION_IRQ(0x6, 7), /* EINT7 */
@@ -738,7 +822,10 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* D8 */
- SUNXI_FUNCTION(0x3, "pata"), /* ATAD4 */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATAD4 */
+ PINCTRL_SUN4I_A10),
+ SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ERXD3 */
+ PINCTRL_SUN7I_A20),
SUNXI_FUNCTION(0x4, "keypad"), /* IN0 */
SUNXI_FUNCTION(0x5, "ms"), /* D0 */
SUNXI_FUNCTION_IRQ(0x6, 8), /* EINT8 */
@@ -747,7 +834,10 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* D9 */
- SUNXI_FUNCTION(0x3, "pata"), /* ATAD5 */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATAD5 */
+ PINCTRL_SUN4I_A10),
+ SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ERXD2 */
+ PINCTRL_SUN7I_A20),
SUNXI_FUNCTION(0x4, "keypad"), /* IN1 */
SUNXI_FUNCTION(0x5, "ms"), /* D1 */
SUNXI_FUNCTION_IRQ(0x6, 9), /* EINT9 */
@@ -756,7 +846,10 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* D10 */
- SUNXI_FUNCTION(0x3, "pata"), /* ATAD6 */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATAD6 */
+ PINCTRL_SUN4I_A10),
+ SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ERXD1 */
+ PINCTRL_SUN7I_A20),
SUNXI_FUNCTION(0x4, "keypad"), /* IN2 */
SUNXI_FUNCTION(0x5, "ms"), /* D2 */
SUNXI_FUNCTION_IRQ(0x6, 10), /* EINT10 */
@@ -765,7 +858,10 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* D11 */
- SUNXI_FUNCTION(0x3, "pata"), /* ATAD7 */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATAD7 */
+ PINCTRL_SUN4I_A10),
+ SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ERXD0 */
+ PINCTRL_SUN7I_A20),
SUNXI_FUNCTION(0x4, "keypad"), /* IN3 */
SUNXI_FUNCTION(0x5, "ms"), /* D3 */
SUNXI_FUNCTION_IRQ(0x6, 11), /* EINT11 */
@@ -774,7 +870,8 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* D12 */
- SUNXI_FUNCTION(0x3, "pata"), /* ATAD8 */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATAD8 */
+ PINCTRL_SUN4I_A10),
SUNXI_FUNCTION(0x4, "ps2"), /* SCK1 */
SUNXI_FUNCTION_IRQ(0x6, 12), /* EINT12 */
SUNXI_FUNCTION(0x7, "csi1")), /* D12 */
@@ -782,7 +879,8 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* D13 */
- SUNXI_FUNCTION(0x3, "pata"), /* ATAD9 */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATAD9 */
+ PINCTRL_SUN4I_A10),
SUNXI_FUNCTION(0x4, "ps2"), /* SDA1 */
SUNXI_FUNCTION(0x5, "sim"), /* RST */
SUNXI_FUNCTION_IRQ(0x6, 13), /* EINT13 */
@@ -791,7 +889,10 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* D14 */
- SUNXI_FUNCTION(0x3, "pata"), /* ATAD10 */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATAD10 */
+ PINCTRL_SUN4I_A10),
+ SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ETXD3 */
+ PINCTRL_SUN7I_A20),
SUNXI_FUNCTION(0x4, "keypad"), /* IN4 */
SUNXI_FUNCTION(0x5, "sim"), /* VPPEN */
SUNXI_FUNCTION_IRQ(0x6, 14), /* EINT14 */
@@ -800,7 +901,10 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* D15 */
- SUNXI_FUNCTION(0x3, "pata"), /* ATAD11 */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATAD11 */
+ PINCTRL_SUN4I_A10),
+ SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ETXD2 */
+ PINCTRL_SUN7I_A20),
SUNXI_FUNCTION(0x4, "keypad"), /* IN5 */
SUNXI_FUNCTION(0x5, "sim"), /* VPPPP */
SUNXI_FUNCTION_IRQ(0x6, 15), /* EINT15 */
@@ -809,7 +913,10 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* D16 */
- SUNXI_FUNCTION(0x3, "pata"), /* ATAD12 */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATAD12 */
+ PINCTRL_SUN4I_A10),
+ SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ETXD1 */
+ PINCTRL_SUN7I_A20),
SUNXI_FUNCTION(0x4, "keypad"), /* IN6 */
SUNXI_FUNCTION_IRQ(0x6, 16), /* EINT16 */
SUNXI_FUNCTION(0x7, "csi1")), /* D16 */
@@ -817,7 +924,10 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* D17 */
- SUNXI_FUNCTION(0x3, "pata"), /* ATAD13 */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATAD13 */
+ PINCTRL_SUN4I_A10),
+ SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ETXD0 */
+ PINCTRL_SUN7I_A20),
SUNXI_FUNCTION(0x4, "keypad"), /* IN7 */
SUNXI_FUNCTION(0x5, "sim"), /* VCCEN */
SUNXI_FUNCTION_IRQ(0x6, 17), /* EINT17 */
@@ -826,7 +936,10 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* D18 */
- SUNXI_FUNCTION(0x3, "pata"), /* ATAD14 */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATAD14 */
+ PINCTRL_SUN4I_A10),
+ SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ERXCK */
+ PINCTRL_SUN7I_A20),
SUNXI_FUNCTION(0x4, "keypad"), /* OUT0 */
SUNXI_FUNCTION(0x5, "sim"), /* SCK */
SUNXI_FUNCTION_IRQ(0x6, 18), /* EINT18 */
@@ -835,7 +948,10 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* D19 */
- SUNXI_FUNCTION(0x3, "pata"), /* ATAD15 */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATAD15 */
+ PINCTRL_SUN4I_A10),
+ SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ERXERR */
+ PINCTRL_SUN7I_A20),
SUNXI_FUNCTION(0x4, "keypad"), /* OUT1 */
SUNXI_FUNCTION(0x5, "sim"), /* SDA */
SUNXI_FUNCTION_IRQ(0x6, 19), /* EINT19 */
@@ -844,7 +960,10 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* D20 */
- SUNXI_FUNCTION(0x3, "pata"), /* ATAOE */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATAOE */
+ PINCTRL_SUN4I_A10),
+ SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ERXDV */
+ PINCTRL_SUN7I_A20),
SUNXI_FUNCTION(0x4, "can"), /* TX */
SUNXI_FUNCTION_IRQ(0x6, 20), /* EINT20 */
SUNXI_FUNCTION(0x7, "csi1")), /* D20 */
@@ -852,7 +971,10 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* D21 */
- SUNXI_FUNCTION(0x3, "pata"), /* ATADREQ */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATADREQ */
+ PINCTRL_SUN4I_A10),
+ SUNXI_FUNCTION_VARIANT(0x3, "emac", /* EMDC */
+ PINCTRL_SUN7I_A20),
SUNXI_FUNCTION(0x4, "can"), /* RX */
SUNXI_FUNCTION_IRQ(0x6, 21), /* EINT21 */
SUNXI_FUNCTION(0x7, "csi1")), /* D21 */
@@ -860,7 +982,10 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* D22 */
- SUNXI_FUNCTION(0x3, "pata"), /* ATADACK */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATADACK */
+ PINCTRL_SUN4I_A10),
+ SUNXI_FUNCTION_VARIANT(0x3, "emac", /* EMDIO */
+ PINCTRL_SUN7I_A20),
SUNXI_FUNCTION(0x4, "keypad"), /* OUT2 */
SUNXI_FUNCTION(0x5, "mmc1"), /* CMD */
SUNXI_FUNCTION(0x7, "csi1")), /* D22 */
@@ -868,7 +993,10 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* D23 */
- SUNXI_FUNCTION(0x3, "pata"), /* ATACS0 */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATACS0 */
+ PINCTRL_SUN4I_A10),
+ SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ETXEN */
+ PINCTRL_SUN7I_A20),
SUNXI_FUNCTION(0x4, "keypad"), /* OUT3 */
SUNXI_FUNCTION(0x5, "mmc1"), /* CLK */
SUNXI_FUNCTION(0x7, "csi1")), /* D23 */
@@ -876,7 +1004,10 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* CLK */
- SUNXI_FUNCTION(0x3, "pata"), /* ATACS1 */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATACS1 */
+ PINCTRL_SUN4I_A10),
+ SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ETXCK */
+ PINCTRL_SUN7I_A20),
SUNXI_FUNCTION(0x4, "keypad"), /* OUT4 */
SUNXI_FUNCTION(0x5, "mmc1"), /* D0 */
SUNXI_FUNCTION(0x7, "csi1")), /* PCLK */
@@ -884,7 +1015,10 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* DE */
- SUNXI_FUNCTION(0x3, "pata"), /* ATAIORDY */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATAIORDY */
+ PINCTRL_SUN4I_A10),
+ SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ECRS */
+ PINCTRL_SUN7I_A20),
SUNXI_FUNCTION(0x4, "keypad"), /* OUT5 */
SUNXI_FUNCTION(0x5, "mmc1"), /* D1 */
SUNXI_FUNCTION(0x7, "csi1")), /* FIELD */
@@ -892,7 +1026,10 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* HSYNC */
- SUNXI_FUNCTION(0x3, "pata"), /* ATAIOR */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATAIOR */
+ PINCTRL_SUN4I_A10),
+ SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ECOL */
+ PINCTRL_SUN7I_A20),
SUNXI_FUNCTION(0x4, "keypad"), /* OUT6 */
SUNXI_FUNCTION(0x5, "mmc1"), /* D2 */
SUNXI_FUNCTION(0x7, "csi1")), /* HSYNC */
@@ -900,24 +1037,35 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "lcd1"), /* VSYNC */
- SUNXI_FUNCTION(0x3, "pata"), /* ATAIOW */
+ SUNXI_FUNCTION_VARIANT(0x3, "pata", /* ATAIOW */
+ PINCTRL_SUN4I_A10),
+ SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ETXERR */
+ PINCTRL_SUN7I_A20),
SUNXI_FUNCTION(0x4, "keypad"), /* OUT7 */
SUNXI_FUNCTION(0x5, "mmc1"), /* D3 */
SUNXI_FUNCTION(0x7, "csi1")), /* VSYNC */
/* Hole */
SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 0),
SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION_VARIANT(0x3, "i2c3", /* SCK */
+ PINCTRL_SUN7I_A20)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 1),
SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION_VARIANT(0x3, "i2c3", /* SDA */
+ PINCTRL_SUN7I_A20)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 2),
SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION_VARIANT(0x3, "i2c4", /* SCK */
+ PINCTRL_SUN7I_A20)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 3),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "pwm")), /* PWM1 */
+ SUNXI_FUNCTION(0x2, "pwm"), /* PWM1 */
+ SUNXI_FUNCTION_VARIANT(0x3, "i2c3", /* SDA */
+ PINCTRL_SUN7I_A20)),
SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 4),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
@@ -959,12 +1107,16 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi0"), /* MOSI */
SUNXI_FUNCTION(0x3, "uart6"), /* TX */
+ SUNXI_FUNCTION_VARIANT(0x4, "clk_out_a",
+ PINCTRL_SUN7I_A20),
SUNXI_FUNCTION_IRQ(0x6, 24)), /* EINT24 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 13),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi0"), /* MISO */
SUNXI_FUNCTION(0x3, "uart6"), /* RX */
+ SUNXI_FUNCTION_VARIANT(0x4, "clk_out_b",
+ PINCTRL_SUN7I_A20),
SUNXI_FUNCTION_IRQ(0x6, 25)), /* EINT25 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 14),
SUNXI_FUNCTION(0x0, "gpio_in"),
@@ -1027,12 +1179,21 @@ static const struct sunxi_pinctrl_desc sun4i_a10_pinctrl_data = {
static int sun4i_a10_pinctrl_probe(struct platform_device *pdev)
{
- return sunxi_pinctrl_init(pdev,
- &sun4i_a10_pinctrl_data);
+ unsigned long variant = (unsigned long)of_device_get_match_data(&pdev->dev);
+
+ return sunxi_pinctrl_init_with_variant(pdev, &sun4i_a10_pinctrl_data,
+ variant);
}
static const struct of_device_id sun4i_a10_pinctrl_match[] = {
- { .compatible = "allwinner,sun4i-a10-pinctrl", },
+ {
+ .compatible = "allwinner,sun4i-a10-pinctrl",
+ .data = (void *)PINCTRL_SUN4I_A10
+ },
+ {
+ .compatible = "allwinner,sun7i-a20-pinctrl",
+ .data = (void *)PINCTRL_SUN7I_A20
+ },
{}
};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c b/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c
deleted file mode 100644
index b6f4c68ffb39..000000000000
--- a/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c
+++ /dev/null
@@ -1,1056 +0,0 @@
-/*
- * Allwinner A20 SoCs pinctrl driver.
- *
- * Copyright (C) 2014 Maxime Ripard
- *
- * Maxime Ripard <[email protected]>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/pinctrl/pinctrl.h>
-
-#include "pinctrl-sunxi.h"
-
-static const struct sunxi_desc_pin sun7i_a20_pins[] = {
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "emac"), /* ERXD3 */
- SUNXI_FUNCTION(0x3, "spi1"), /* CS0 */
- SUNXI_FUNCTION(0x4, "uart2"), /* RTS */
- SUNXI_FUNCTION(0x5, "gmac")), /* GRXD3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "emac"), /* ERXD2 */
- SUNXI_FUNCTION(0x3, "spi1"), /* CLK */
- SUNXI_FUNCTION(0x4, "uart2"), /* CTS */
- SUNXI_FUNCTION(0x5, "gmac")), /* GRXD2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "emac"), /* ERXD1 */
- SUNXI_FUNCTION(0x3, "spi1"), /* MOSI */
- SUNXI_FUNCTION(0x4, "uart2"), /* TX */
- SUNXI_FUNCTION(0x5, "gmac")), /* GRXD1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "emac"), /* ERXD0 */
- SUNXI_FUNCTION(0x3, "spi1"), /* MISO */
- SUNXI_FUNCTION(0x4, "uart2"), /* RX */
- SUNXI_FUNCTION(0x5, "gmac")), /* GRXD0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "emac"), /* ETXD3 */
- SUNXI_FUNCTION(0x3, "spi1"), /* CS1 */
- SUNXI_FUNCTION(0x5, "gmac")), /* GTXD3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "emac"), /* ETXD2 */
- SUNXI_FUNCTION(0x3, "spi3"), /* CS0 */
- SUNXI_FUNCTION(0x5, "gmac")), /* GTXD2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "emac"), /* ETXD1 */
- SUNXI_FUNCTION(0x3, "spi3"), /* CLK */
- SUNXI_FUNCTION(0x5, "gmac")), /* GTXD1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "emac"), /* ETXD0 */
- SUNXI_FUNCTION(0x3, "spi3"), /* MOSI */
- SUNXI_FUNCTION(0x5, "gmac")), /* GTXD0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 8),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "emac"), /* ERXCK */
- SUNXI_FUNCTION(0x3, "spi3"), /* MISO */
- SUNXI_FUNCTION(0x5, "gmac")), /* GRXCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "emac"), /* ERXERR */
- SUNXI_FUNCTION(0x3, "spi3"), /* CS1 */
- SUNXI_FUNCTION(0x5, "gmac"), /* GNULL / ERXERR */
- SUNXI_FUNCTION(0x6, "i2s1")), /* MCLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "emac"), /* ERXDV */
- SUNXI_FUNCTION(0x4, "uart1"), /* TX */
- SUNXI_FUNCTION(0x5, "gmac")), /* GRXCTL / ERXDV */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "emac"), /* EMDC */
- SUNXI_FUNCTION(0x4, "uart1"), /* RX */
- SUNXI_FUNCTION(0x5, "gmac")), /* EMDC */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 12),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "emac"), /* EMDIO */
- SUNXI_FUNCTION(0x3, "uart6"), /* TX */
- SUNXI_FUNCTION(0x4, "uart1"), /* RTS */
- SUNXI_FUNCTION(0x5, "gmac")), /* EMDIO */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 13),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "emac"), /* ETXEN */
- SUNXI_FUNCTION(0x3, "uart6"), /* RX */
- SUNXI_FUNCTION(0x4, "uart1"), /* CTS */
- SUNXI_FUNCTION(0x5, "gmac")), /* GTXCTL / ETXEN */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 14),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "emac"), /* ETXCK */
- SUNXI_FUNCTION(0x3, "uart7"), /* TX */
- SUNXI_FUNCTION(0x4, "uart1"), /* DTR */
- SUNXI_FUNCTION(0x5, "gmac"), /* GNULL / ETXCK */
- SUNXI_FUNCTION(0x6, "i2s1")), /* BCLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 15),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "emac"), /* ECRS */
- SUNXI_FUNCTION(0x3, "uart7"), /* RX */
- SUNXI_FUNCTION(0x4, "uart1"), /* DSR */
- SUNXI_FUNCTION(0x5, "gmac"), /* GTXCK / ECRS */
- SUNXI_FUNCTION(0x6, "i2s1")), /* LRCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 16),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "emac"), /* ECOL */
- SUNXI_FUNCTION(0x3, "can"), /* TX */
- SUNXI_FUNCTION(0x4, "uart1"), /* DCD */
- SUNXI_FUNCTION(0x5, "gmac"), /* GCLKIN / ECOL */
- SUNXI_FUNCTION(0x6, "i2s1")), /* DO */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 17),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "emac"), /* ETXERR */
- SUNXI_FUNCTION(0x3, "can"), /* RX */
- SUNXI_FUNCTION(0x4, "uart1"), /* RING */
- SUNXI_FUNCTION(0x5, "gmac"), /* GNULL / ETXERR */
- SUNXI_FUNCTION(0x6, "i2s1")), /* LRCK */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c0")), /* SCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c0")), /* SDA */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "pwm")), /* PWM0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ir0"), /* TX */
- SUNXI_FUNCTION(0x4, "spdif")), /* MCLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ir0")), /* RX */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0"), /* MCLK */
- SUNXI_FUNCTION(0x3, "ac97")), /* MCLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0"), /* BCLK */
- SUNXI_FUNCTION(0x3, "ac97")), /* BCLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0"), /* LRCK */
- SUNXI_FUNCTION(0x3, "ac97")), /* SYNC */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 8),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0"), /* DO0 */
- SUNXI_FUNCTION(0x3, "ac97")), /* DO */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0")), /* DO1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0")), /* DO2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0")), /* DO3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 12),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0"), /* DI */
- SUNXI_FUNCTION(0x3, "ac97"), /* DI */
- SUNXI_FUNCTION(0x4, "spdif")), /* DI */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 13),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi2"), /* CS1 */
- SUNXI_FUNCTION(0x4, "spdif")), /* DO */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 14),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi2"), /* CS0 */
- SUNXI_FUNCTION(0x3, "jtag")), /* MS0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 15),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi2"), /* CLK */
- SUNXI_FUNCTION(0x3, "jtag")), /* CK0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 16),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi2"), /* MOSI */
- SUNXI_FUNCTION(0x3, "jtag")), /* DO0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 17),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi2"), /* MISO */
- SUNXI_FUNCTION(0x3, "jtag")), /* DI0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 18),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c1")), /* SCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 19),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c1")), /* SDA */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 20),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c2")), /* SCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 21),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2c2")), /* SDA */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 22),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "uart0"), /* TX */
- SUNXI_FUNCTION(0x3, "ir1")), /* TX */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 23),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "uart0"), /* RX */
- SUNXI_FUNCTION(0x3, "ir1")), /* RX */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NWE */
- SUNXI_FUNCTION(0x3, "spi0")), /* MOSI */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NALE */
- SUNXI_FUNCTION(0x3, "spi0")), /* MISO */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NCLE */
- SUNXI_FUNCTION(0x3, "spi0")), /* SCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0")), /* NCE1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0")), /* NCE0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0")), /* NRE# */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NRB0 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* CMD */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NRB1 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* CLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 8),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ0 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ1 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ2 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NDQ3 */
- SUNXI_FUNCTION(0x3, "mmc2")), /* D3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 12),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0")), /* NDQ4 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 13),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0")), /* NDQ5 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0")), /* NDQ6 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0")), /* NDQ7 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0")), /* NWP */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 17),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0")), /* NCE2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 18),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0")), /* NCE3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 19),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NCE4 */
- SUNXI_FUNCTION(0x3, "spi2")), /* CS0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 20),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NCE5 */
- SUNXI_FUNCTION(0x3, "spi2")), /* CLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 21),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NCE6 */
- SUNXI_FUNCTION(0x3, "spi2")), /* MOSI */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 22),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0"), /* NCE7 */
- SUNXI_FUNCTION(0x3, "spi2")), /* MISO */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 23),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "spi0")), /* CS0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 24),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "nand0")), /* NDQS */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D0 */
- SUNXI_FUNCTION(0x3, "lvds0")), /* VP0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D1 */
- SUNXI_FUNCTION(0x3, "lvds0")), /* VN0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D2 */
- SUNXI_FUNCTION(0x3, "lvds0")), /* VP1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D3 */
- SUNXI_FUNCTION(0x3, "lvds0")), /* VN1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D4 */
- SUNXI_FUNCTION(0x3, "lvds0")), /* VP2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D5 */
- SUNXI_FUNCTION(0x3, "lvds0")), /* VN2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D6 */
- SUNXI_FUNCTION(0x3, "lvds0")), /* VPC */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D7 */
- SUNXI_FUNCTION(0x3, "lvds0")), /* VNC */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 8),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D8 */
- SUNXI_FUNCTION(0x3, "lvds0")), /* VP3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D9 */
- SUNXI_FUNCTION(0x3, "lvds0")), /* VM3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D10 */
- SUNXI_FUNCTION(0x3, "lvds1")), /* VP0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D11 */
- SUNXI_FUNCTION(0x3, "lvds1")), /* VN0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D12 */
- SUNXI_FUNCTION(0x3, "lvds1")), /* VP1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D13 */
- SUNXI_FUNCTION(0x3, "lvds1")), /* VN1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D14 */
- SUNXI_FUNCTION(0x3, "lvds1")), /* VP2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D15 */
- SUNXI_FUNCTION(0x3, "lvds1")), /* VN2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 16),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D16 */
- SUNXI_FUNCTION(0x3, "lvds1")), /* VPC */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 17),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D17 */
- SUNXI_FUNCTION(0x3, "lvds1")), /* VNC */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D18 */
- SUNXI_FUNCTION(0x3, "lvds1")), /* VP3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D19 */
- SUNXI_FUNCTION(0x3, "lvds1")), /* VN3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D20 */
- SUNXI_FUNCTION(0x3, "csi1")), /* MCLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 21),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D21 */
- SUNXI_FUNCTION(0x3, "sim")), /* VPPEN */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 22),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D22 */
- SUNXI_FUNCTION(0x3, "sim")), /* VPPPP */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 23),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* D23 */
- SUNXI_FUNCTION(0x3, "sim")), /* DET */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 24),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* CLK */
- SUNXI_FUNCTION(0x3, "sim")), /* VCCEN */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 25),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* DE */
- SUNXI_FUNCTION(0x3, "sim")), /* RST */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 26),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* HSYNC */
- SUNXI_FUNCTION(0x3, "sim")), /* SCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 27),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd0"), /* VSYNC */
- SUNXI_FUNCTION(0x3, "sim")), /* SDA */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* CLK */
- SUNXI_FUNCTION(0x3, "csi0")), /* PCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* ERR */
- SUNXI_FUNCTION(0x3, "csi0")), /* CK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* SYNC */
- SUNXI_FUNCTION(0x3, "csi0")), /* HSYNC */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* DVLD */
- SUNXI_FUNCTION(0x3, "csi0")), /* VSYNC */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* D0 */
- SUNXI_FUNCTION(0x3, "csi0")), /* D0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* D1 */
- SUNXI_FUNCTION(0x3, "csi0"), /* D1 */
- SUNXI_FUNCTION(0x4, "sim")), /* VPPEN */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* D2 */
- SUNXI_FUNCTION(0x3, "csi0")), /* D2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* D3 */
- SUNXI_FUNCTION(0x3, "csi0")), /* D3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* D4 */
- SUNXI_FUNCTION(0x3, "csi0")), /* D4 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* D5 */
- SUNXI_FUNCTION(0x3, "csi0")), /* D5 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* D6 */
- SUNXI_FUNCTION(0x3, "csi0")), /* D6 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts0"), /* D7 */
- SUNXI_FUNCTION(0x3, "csi0")), /* D7 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0"), /* D1 */
- SUNXI_FUNCTION(0x4, "jtag")), /* MSI */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0"), /* D0 */
- SUNXI_FUNCTION(0x4, "jtag")), /* DI1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0"), /* CLK */
- SUNXI_FUNCTION(0x4, "uart0")), /* TX */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0"), /* CMD */
- SUNXI_FUNCTION(0x4, "jtag")), /* DO1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */
- SUNXI_FUNCTION(0x4, "uart0")), /* RX */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc0"), /* D2 */
- SUNXI_FUNCTION(0x4, "jtag")), /* CK1 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts1"), /* CLK */
- SUNXI_FUNCTION(0x3, "csi1"), /* PCK */
- SUNXI_FUNCTION(0x4, "mmc1")), /* CMD */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts1"), /* ERR */
- SUNXI_FUNCTION(0x3, "csi1"), /* CK */
- SUNXI_FUNCTION(0x4, "mmc1")), /* CLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts1"), /* SYNC */
- SUNXI_FUNCTION(0x3, "csi1"), /* HSYNC */
- SUNXI_FUNCTION(0x4, "mmc1")), /* D0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts1"), /* DVLD */
- SUNXI_FUNCTION(0x3, "csi1"), /* VSYNC */
- SUNXI_FUNCTION(0x4, "mmc1")), /* D1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts1"), /* D0 */
- SUNXI_FUNCTION(0x3, "csi1"), /* D0 */
- SUNXI_FUNCTION(0x4, "mmc1"), /* D2 */
- SUNXI_FUNCTION(0x5, "csi0")), /* D8 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts1"), /* D1 */
- SUNXI_FUNCTION(0x3, "csi1"), /* D1 */
- SUNXI_FUNCTION(0x4, "mmc1"), /* D3 */
- SUNXI_FUNCTION(0x5, "csi0")), /* D9 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts1"), /* D2 */
- SUNXI_FUNCTION(0x3, "csi1"), /* D2 */
- SUNXI_FUNCTION(0x4, "uart3"), /* TX */
- SUNXI_FUNCTION(0x5, "csi0")), /* D10 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts1"), /* D3 */
- SUNXI_FUNCTION(0x3, "csi1"), /* D3 */
- SUNXI_FUNCTION(0x4, "uart3"), /* RX */
- SUNXI_FUNCTION(0x5, "csi0")), /* D11 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts1"), /* D4 */
- SUNXI_FUNCTION(0x3, "csi1"), /* D4 */
- SUNXI_FUNCTION(0x4, "uart3"), /* RTS */
- SUNXI_FUNCTION(0x5, "csi0")), /* D12 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts1"), /* D5 */
- SUNXI_FUNCTION(0x3, "csi1"), /* D5 */
- SUNXI_FUNCTION(0x4, "uart3"), /* CTS */
- SUNXI_FUNCTION(0x5, "csi0")), /* D13 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts1"), /* D6 */
- SUNXI_FUNCTION(0x3, "csi1"), /* D6 */
- SUNXI_FUNCTION(0x4, "uart4"), /* TX */
- SUNXI_FUNCTION(0x5, "csi0")), /* D14 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ts1"), /* D7 */
- SUNXI_FUNCTION(0x3, "csi1"), /* D7 */
- SUNXI_FUNCTION(0x4, "uart4"), /* RX */
- SUNXI_FUNCTION(0x5, "csi0")), /* D15 */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* D0 */
- SUNXI_FUNCTION(0x4, "uart3"), /* TX */
- SUNXI_FUNCTION_IRQ(0x6, 0), /* EINT0 */
- SUNXI_FUNCTION(0x7, "csi1")), /* D0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* D1 */
- SUNXI_FUNCTION(0x4, "uart3"), /* RX */
- SUNXI_FUNCTION_IRQ(0x6, 1), /* EINT1 */
- SUNXI_FUNCTION(0x7, "csi1")), /* D1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* D2 */
- SUNXI_FUNCTION(0x4, "uart3"), /* RTS */
- SUNXI_FUNCTION_IRQ(0x6, 2), /* EINT2 */
- SUNXI_FUNCTION(0x7, "csi1")), /* D2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* D3 */
- SUNXI_FUNCTION(0x4, "uart3"), /* CTS */
- SUNXI_FUNCTION_IRQ(0x6, 3), /* EINT3 */
- SUNXI_FUNCTION(0x7, "csi1")), /* D3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* D4 */
- SUNXI_FUNCTION(0x4, "uart4"), /* TX */
- SUNXI_FUNCTION_IRQ(0x6, 4), /* EINT4 */
- SUNXI_FUNCTION(0x7, "csi1")), /* D4 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* D5 */
- SUNXI_FUNCTION(0x4, "uart4"), /* RX */
- SUNXI_FUNCTION_IRQ(0x6, 5), /* EINT5 */
- SUNXI_FUNCTION(0x7, "csi1")), /* D5 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* D6 */
- SUNXI_FUNCTION(0x4, "uart5"), /* TX */
- SUNXI_FUNCTION(0x5, "ms"), /* BS */
- SUNXI_FUNCTION_IRQ(0x6, 6), /* EINT6 */
- SUNXI_FUNCTION(0x7, "csi1")), /* D6 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* D7 */
- SUNXI_FUNCTION(0x4, "uart5"), /* RX */
- SUNXI_FUNCTION(0x5, "ms"), /* CLK */
- SUNXI_FUNCTION_IRQ(0x6, 7), /* EINT7 */
- SUNXI_FUNCTION(0x7, "csi1")), /* D7 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 8),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* D8 */
- SUNXI_FUNCTION(0x3, "emac"), /* ERXD3 */
- SUNXI_FUNCTION(0x4, "keypad"), /* IN0 */
- SUNXI_FUNCTION(0x5, "ms"), /* D0 */
- SUNXI_FUNCTION_IRQ(0x6, 8), /* EINT8 */
- SUNXI_FUNCTION(0x7, "csi1")), /* D8 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* D9 */
- SUNXI_FUNCTION(0x3, "emac"), /* ERXD2 */
- SUNXI_FUNCTION(0x4, "keypad"), /* IN1 */
- SUNXI_FUNCTION(0x5, "ms"), /* D1 */
- SUNXI_FUNCTION_IRQ(0x6, 9), /* EINT9 */
- SUNXI_FUNCTION(0x7, "csi1")), /* D9 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* D10 */
- SUNXI_FUNCTION(0x3, "emac"), /* ERXD1 */
- SUNXI_FUNCTION(0x4, "keypad"), /* IN2 */
- SUNXI_FUNCTION(0x5, "ms"), /* D2 */
- SUNXI_FUNCTION_IRQ(0x6, 10), /* EINT10 */
- SUNXI_FUNCTION(0x7, "csi1")), /* D10 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* D11 */
- SUNXI_FUNCTION(0x3, "emac"), /* ERXD0 */
- SUNXI_FUNCTION(0x4, "keypad"), /* IN3 */
- SUNXI_FUNCTION(0x5, "ms"), /* D3 */
- SUNXI_FUNCTION_IRQ(0x6, 11), /* EINT11 */
- SUNXI_FUNCTION(0x7, "csi1")), /* D11 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 12),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* D12 */
- SUNXI_FUNCTION(0x4, "ps2"), /* SCK1 */
- SUNXI_FUNCTION_IRQ(0x6, 12), /* EINT12 */
- SUNXI_FUNCTION(0x7, "csi1")), /* D12 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 13),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* D13 */
- SUNXI_FUNCTION(0x4, "ps2"), /* SDA1 */
- SUNXI_FUNCTION(0x5, "sim"), /* RST */
- SUNXI_FUNCTION_IRQ(0x6, 13), /* EINT13 */
- SUNXI_FUNCTION(0x7, "csi1")), /* D13 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 14),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* D14 */
- SUNXI_FUNCTION(0x3, "emac"), /* ETXD3 */
- SUNXI_FUNCTION(0x4, "keypad"), /* IN4 */
- SUNXI_FUNCTION(0x5, "sim"), /* VPPEN */
- SUNXI_FUNCTION_IRQ(0x6, 14), /* EINT14 */
- SUNXI_FUNCTION(0x7, "csi1")), /* D14 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 15),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* D15 */
- SUNXI_FUNCTION(0x3, "emac"), /* ETXD3 */
- SUNXI_FUNCTION(0x4, "keypad"), /* IN5 */
- SUNXI_FUNCTION(0x5, "sim"), /* VPPPP */
- SUNXI_FUNCTION_IRQ(0x6, 15), /* EINT15 */
- SUNXI_FUNCTION(0x7, "csi1")), /* D15 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 16),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* D16 */
- SUNXI_FUNCTION(0x3, "emac"), /* ETXD2 */
- SUNXI_FUNCTION(0x4, "keypad"), /* IN6 */
- SUNXI_FUNCTION_IRQ(0x6, 16), /* EINT16 */
- SUNXI_FUNCTION(0x7, "csi1")), /* D16 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* D17 */
- SUNXI_FUNCTION(0x3, "emac"), /* ETXD1 */
- SUNXI_FUNCTION(0x4, "keypad"), /* IN7 */
- SUNXI_FUNCTION(0x5, "sim"), /* VCCEN */
- SUNXI_FUNCTION_IRQ(0x6, 17), /* EINT17 */
- SUNXI_FUNCTION(0x7, "csi1")), /* D17 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 18),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* D18 */
- SUNXI_FUNCTION(0x3, "emac"), /* ETXD0 */
- SUNXI_FUNCTION(0x4, "keypad"), /* OUT0 */
- SUNXI_FUNCTION(0x5, "sim"), /* SCK */
- SUNXI_FUNCTION_IRQ(0x6, 18), /* EINT18 */
- SUNXI_FUNCTION(0x7, "csi1")), /* D18 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 19),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* D19 */
- SUNXI_FUNCTION(0x3, "emac"), /* ERXERR */
- SUNXI_FUNCTION(0x4, "keypad"), /* OUT1 */
- SUNXI_FUNCTION(0x5, "sim"), /* SDA */
- SUNXI_FUNCTION_IRQ(0x6, 19), /* EINT19 */
- SUNXI_FUNCTION(0x7, "csi1")), /* D19 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 20),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* D20 */
- SUNXI_FUNCTION(0x3, "emac"), /* ERXDV */
- SUNXI_FUNCTION(0x4, "can"), /* TX */
- SUNXI_FUNCTION_IRQ(0x6, 20), /* EINT20 */
- SUNXI_FUNCTION(0x7, "csi1")), /* D20 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 21),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* D21 */
- SUNXI_FUNCTION(0x3, "emac"), /* EMDC */
- SUNXI_FUNCTION(0x4, "can"), /* RX */
- SUNXI_FUNCTION_IRQ(0x6, 21), /* EINT21 */
- SUNXI_FUNCTION(0x7, "csi1")), /* D21 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 22),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* D22 */
- SUNXI_FUNCTION(0x3, "emac"), /* EMDIO */
- SUNXI_FUNCTION(0x4, "keypad"), /* OUT2 */
- SUNXI_FUNCTION(0x5, "mmc1"), /* CMD */
- SUNXI_FUNCTION(0x7, "csi1")), /* D22 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 23),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* D23 */
- SUNXI_FUNCTION(0x3, "emac"), /* ETXEN */
- SUNXI_FUNCTION(0x4, "keypad"), /* OUT3 */
- SUNXI_FUNCTION(0x5, "mmc1"), /* CLK */
- SUNXI_FUNCTION(0x7, "csi1")), /* D23 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 24),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* CLK */
- SUNXI_FUNCTION(0x3, "emac"), /* ETXCK */
- SUNXI_FUNCTION(0x4, "keypad"), /* OUT4 */
- SUNXI_FUNCTION(0x5, "mmc1"), /* D0 */
- SUNXI_FUNCTION(0x7, "csi1")), /* PCLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 25),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* DE */
- SUNXI_FUNCTION(0x3, "emac"), /* ECRS */
- SUNXI_FUNCTION(0x4, "keypad"), /* OUT5 */
- SUNXI_FUNCTION(0x5, "mmc1"), /* D1 */
- SUNXI_FUNCTION(0x7, "csi1")), /* FIELD */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 26),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* HSYNC */
- SUNXI_FUNCTION(0x3, "emac"), /* ECOL */
- SUNXI_FUNCTION(0x4, "keypad"), /* OUT6 */
- SUNXI_FUNCTION(0x5, "mmc1"), /* D2 */
- SUNXI_FUNCTION(0x7, "csi1")), /* HSYNC */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 27),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "lcd1"), /* VSYNC */
- SUNXI_FUNCTION(0x3, "emac"), /* ETXERR */
- SUNXI_FUNCTION(0x4, "keypad"), /* OUT7 */
- SUNXI_FUNCTION(0x5, "mmc1"), /* D3 */
- SUNXI_FUNCTION(0x7, "csi1")), /* VSYNC */
- /* Hole */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 0),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "i2c3")), /* SCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 1),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "i2c3")), /* SDA */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 2),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "i2c4")), /* SCK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 3),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "pwm"), /* PWM1 */
- SUNXI_FUNCTION(0x3, "i2c4")), /* SDA */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 4),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc3")), /* CMD */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 5),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc3")), /* CLK */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 6),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc3")), /* D0 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 7),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc3")), /* D1 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 8),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc3")), /* D2 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 9),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc3")), /* D3 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 10),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi0"), /* CS0 */
- SUNXI_FUNCTION(0x3, "uart5"), /* TX */
- SUNXI_FUNCTION_IRQ(0x6, 22)), /* EINT22 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 11),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi0"), /* CLK */
- SUNXI_FUNCTION(0x3, "uart5"), /* RX */
- SUNXI_FUNCTION_IRQ(0x6, 23)), /* EINT23 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 12),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi0"), /* MOSI */
- SUNXI_FUNCTION(0x3, "uart6"), /* TX */
- SUNXI_FUNCTION(0x4, "clk_out_a"), /* CLK_OUT_A */
- SUNXI_FUNCTION_IRQ(0x6, 24)), /* EINT24 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 13),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi0"), /* MISO */
- SUNXI_FUNCTION(0x3, "uart6"), /* RX */
- SUNXI_FUNCTION(0x4, "clk_out_b"), /* CLK_OUT_B */
- SUNXI_FUNCTION_IRQ(0x6, 25)), /* EINT25 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 14),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi0"), /* CS1 */
- SUNXI_FUNCTION(0x3, "ps2"), /* SCK1 */
- SUNXI_FUNCTION(0x4, "timer4"), /* TCLKIN0 */
- SUNXI_FUNCTION_IRQ(0x6, 26)), /* EINT26 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 15),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* CS1 */
- SUNXI_FUNCTION(0x3, "ps2"), /* SDA1 */
- SUNXI_FUNCTION(0x4, "timer5"), /* TCLKIN1 */
- SUNXI_FUNCTION_IRQ(0x6, 27)), /* EINT27 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 16),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* CS0 */
- SUNXI_FUNCTION(0x3, "uart2"), /* RTS */
- SUNXI_FUNCTION_IRQ(0x6, 28)), /* EINT28 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 17),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* CLK */
- SUNXI_FUNCTION(0x3, "uart2"), /* CTS */
- SUNXI_FUNCTION_IRQ(0x6, 29)), /* EINT29 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 18),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* MOSI */
- SUNXI_FUNCTION(0x3, "uart2"), /* TX */
- SUNXI_FUNCTION_IRQ(0x6, 30)), /* EINT30 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 19),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "spi1"), /* MISO */
- SUNXI_FUNCTION(0x3, "uart2"), /* RX */
- SUNXI_FUNCTION_IRQ(0x6, 31)), /* EINT31 */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 20),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ps2"), /* SCK0 */
- SUNXI_FUNCTION(0x3, "uart7"), /* TX */
- SUNXI_FUNCTION(0x4, "hdmi")), /* HSCL */
- SUNXI_PIN(SUNXI_PINCTRL_PIN(I, 21),
- SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "ps2"), /* SDA0 */
- SUNXI_FUNCTION(0x3, "uart7"), /* RX */
- SUNXI_FUNCTION(0x4, "hdmi")), /* HSDA */
-};
-
-static const struct sunxi_pinctrl_desc sun7i_a20_pinctrl_data = {
- .pins = sun7i_a20_pins,
- .npins = ARRAY_SIZE(sun7i_a20_pins),
- .irq_banks = 1,
-};
-
-static int sun7i_a20_pinctrl_probe(struct platform_device *pdev)
-{
- return sunxi_pinctrl_init(pdev,
- &sun7i_a20_pinctrl_data);
-}
-
-static const struct of_device_id sun7i_a20_pinctrl_match[] = {
- { .compatible = "allwinner,sun7i-a20-pinctrl", },
- {}
-};
-
-static struct platform_driver sun7i_a20_pinctrl_driver = {
- .probe = sun7i_a20_pinctrl_probe,
- .driver = {
- .name = "sun7i-a20-pinctrl",
- .of_match_table = sun7i_a20_pinctrl_match,
- },
-};
-builtin_platform_driver(sun7i_a20_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t-r.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t-r.c
new file mode 100644
index 000000000000..6531cf67958e
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t-r.c
@@ -0,0 +1,128 @@
+/*
+ * Allwinner A83T SoCs special pins pinctrl driver.
+ *
+ * Copyright (C) 2017 Chen-Yu Tsai
+ * Chen-Yu Tsai <[email protected]>
+ *
+ * Based on pinctrl-sun50i-a64-r.c
+ *
+ * Copyright (C) 2016 Icenowy Zheng
+ * Icenowy Zheng <[email protected]>
+ *
+ * Copyright (C) 2014 Chen-Yu Tsai
+ * Chen-Yu Tsai <[email protected]>
+ *
+ * Copyright (C) 2014 Boris Brezillon
+ * Boris Brezillon <[email protected]>
+ *
+ * Copyright (C) 2014 Maxime Ripard
+ * Maxime Ripard <[email protected]>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include "pinctrl-sunxi.h"
+
+static const struct sunxi_desc_pin sun8i_a83t_r_pins[] = {
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_rsb"), /* SCK */
+ SUNXI_FUNCTION(0x3, "s_i2c"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 0)), /* PL_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_rsb"), /* SDA */
+ SUNXI_FUNCTION(0x3, "s_i2c"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 1)), /* PL_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_uart"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 2)), /* PL_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_uart"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 3)), /* PL_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_jtag"), /* MS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 4)), /* PL_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_jtag"), /* CK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 5)), /* PL_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_jtag"), /* DO */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 6)), /* PL_EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_jtag"), /* DI */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 7)), /* PL_EINT7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_i2c"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 8)), /* PL_EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_i2c"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 9)), /* PL_EINT9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_pwm"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 10)), /* PL_EINT10 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 11)), /* PL_EINT11 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_cir_rx"),
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 12)), /* PL_EINT12 */
+};
+
+static const struct sunxi_pinctrl_desc sun8i_a83t_r_pinctrl_data = {
+ .pins = sun8i_a83t_r_pins,
+ .npins = ARRAY_SIZE(sun8i_a83t_r_pins),
+ .pin_base = PL_BASE,
+ .irq_banks = 1,
+};
+
+static int sun8i_a83t_r_pinctrl_probe(struct platform_device *pdev)
+{
+ return sunxi_pinctrl_init(pdev,
+ &sun8i_a83t_r_pinctrl_data);
+}
+
+static const struct of_device_id sun8i_a83t_r_pinctrl_match[] = {
+ { .compatible = "allwinner,sun8i-a83t-r-pinctrl", },
+ {}
+};
+
+static struct platform_driver sun8i_a83t_r_pinctrl_driver = {
+ .probe = sun8i_a83t_r_pinctrl_probe,
+ .driver = {
+ .name = "sun8i-a83t-r-pinctrl",
+ .of_match_table = sun8i_a83t_r_pinctrl_match,
+ },
+};
+builtin_platform_driver(sun8i_a83t_r_pinctrl_driver);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 58774acfc814..0dfd7fa66c48 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -979,7 +979,7 @@ static int sunxi_pinctrl_irq_of_xlate(struct irq_domain *d,
return 0;
}
-static struct irq_domain_ops sunxi_pinctrl_irq_domain_ops = {
+static const struct irq_domain_ops sunxi_pinctrl_irq_domain_ops = {
.xlate = sunxi_pinctrl_irq_of_xlate,
};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index a9d315a1256c..1bfc0d8a55df 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -87,6 +87,9 @@
#define PINCTRL_SUN5I_GR8 BIT(3)
#define PINCTRL_SUN6I_A31 BIT(4)
#define PINCTRL_SUN6I_A31S BIT(5)
+#define PINCTRL_SUN4I_A10 BIT(6)
+#define PINCTRL_SUN7I_A20 BIT(7)
+#define PINCTRL_SUN8I_R40 BIT(8)
struct sunxi_desc_function {
unsigned long variant;
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index 277622b4b6fb..51716819129d 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -21,7 +21,6 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/machine.h>
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra114.c b/drivers/pinctrl/tegra/pinctrl-tegra114.c
index 952132ce5ea0..56b33fca1bfc 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra114.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra114.c
@@ -1,6 +1,8 @@
/*
* Pinctrl data for the NVIDIA Tegra114 pinmux
*
+ * Author: Pritesh Raithatha <[email protected]>
+ *
* Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
@@ -13,7 +15,7 @@
* more details.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/pinctrl.h>
@@ -1857,7 +1859,6 @@ static const struct of_device_id tegra114_pinctrl_of_match[] = {
{ .compatible = "nvidia,tegra114-pinmux", },
{ },
};
-MODULE_DEVICE_TABLE(of, tegra114_pinctrl_of_match);
static struct platform_driver tegra114_pinctrl_driver = {
.driver = {
@@ -1866,8 +1867,4 @@ static struct platform_driver tegra114_pinctrl_driver = {
},
.probe = tegra114_pinctrl_probe,
};
-module_platform_driver(tegra114_pinctrl_driver);
-
-MODULE_AUTHOR("Pritesh Raithatha <[email protected]>");
-MODULE_DESCRIPTION("NVIDIA Tegra114 pinctrl driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(tegra114_pinctrl_driver);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra124.c b/drivers/pinctrl/tegra/pinctrl-tegra124.c
index bca239e3ae50..7bc998ace0d5 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra124.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra124.c
@@ -1,6 +1,8 @@
/*
* Pinctrl data for the NVIDIA Tegra124 pinmux
*
+ * Author: Ashwini Ghuge <[email protected]>
+ *
* Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
@@ -13,7 +15,7 @@
* more details.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/pinctrl.h>
@@ -2069,7 +2071,6 @@ static const struct of_device_id tegra124_pinctrl_of_match[] = {
{ .compatible = "nvidia,tegra124-pinmux", },
{ },
};
-MODULE_DEVICE_TABLE(of, tegra124_pinctrl_of_match);
static struct platform_driver tegra124_pinctrl_driver = {
.driver = {
@@ -2078,8 +2079,4 @@ static struct platform_driver tegra124_pinctrl_driver = {
},
.probe = tegra124_pinctrl_probe,
};
-module_platform_driver(tegra124_pinctrl_driver);
-
-MODULE_AUTHOR("Ashwini Ghuge <[email protected]>");
-MODULE_DESCRIPTION("NVIDIA Tegra124 pinctrl driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(tegra124_pinctrl_driver);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra20.c b/drivers/pinctrl/tegra/pinctrl-tegra20.c
index ad62451a5a9b..7e38ee9bae78 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra20.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra20.c
@@ -1,6 +1,8 @@
/*
* Pinctrl data for the NVIDIA Tegra20 pinmux
*
+ * Author: Stephen Warren <[email protected]>
+ *
* Copyright (c) 2011-2012, NVIDIA CORPORATION. All rights reserved.
*
* Derived from code:
@@ -17,7 +19,7 @@
* more details.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/pinctrl.h>
@@ -2246,9 +2248,4 @@ static struct platform_driver tegra20_pinctrl_driver = {
},
.probe = tegra20_pinctrl_probe,
};
-module_platform_driver(tegra20_pinctrl_driver);
-
-MODULE_AUTHOR("Stephen Warren <[email protected]>");
-MODULE_DESCRIPTION("NVIDIA Tegra20 pinctrl driver");
-MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(of, tegra20_pinctrl_of_match);
+builtin_platform_driver(tegra20_pinctrl_driver);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra210.c b/drivers/pinctrl/tegra/pinctrl-tegra210.c
index 2b70e93da9db..c244e5b17bd6 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra210.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra210.c
@@ -13,7 +13,7 @@
* more details.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/pinctrl.h>
@@ -1573,7 +1573,6 @@ static const struct of_device_id tegra210_pinctrl_of_match[] = {
{ .compatible = "nvidia,tegra210-pinmux", },
{ },
};
-MODULE_DEVICE_TABLE(of, tegra210_pinctrl_of_match);
static struct platform_driver tegra210_pinctrl_driver = {
.driver = {
@@ -1582,8 +1581,4 @@ static struct platform_driver tegra210_pinctrl_driver = {
},
.probe = tegra210_pinctrl_probe,
};
-module_platform_driver(tegra210_pinctrl_driver);
-
-MODULE_AUTHOR("NVIDIA");
-MODULE_DESCRIPTION("NVIDIA Tegra210 pinctrl driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(tegra210_pinctrl_driver);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra30.c b/drivers/pinctrl/tegra/pinctrl-tegra30.c
index 474ac6daf513..1f180a20f2ab 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra30.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra30.c
@@ -1,6 +1,8 @@
/*
* Pinctrl data for the NVIDIA Tegra30 pinmux
*
+ * Author: Stephen Warren <[email protected]>
+ *
* Copyright (c) 2011-2012, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
@@ -13,7 +15,7 @@
* more details.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/pinctrl.h>
@@ -2492,7 +2494,6 @@ static const struct of_device_id tegra30_pinctrl_of_match[] = {
{ .compatible = "nvidia,tegra30-pinmux", },
{ },
};
-MODULE_DEVICE_TABLE(of, tegra30_pinctrl_of_match);
static struct platform_driver tegra30_pinctrl_driver = {
.driver = {
@@ -2501,8 +2502,4 @@ static struct platform_driver tegra30_pinctrl_driver = {
},
.probe = tegra30_pinctrl_probe,
};
-module_platform_driver(tegra30_pinctrl_driver);
-
-MODULE_AUTHOR("Stephen Warren <[email protected]>");
-MODULE_DESCRIPTION("NVIDIA Tegra30 pinctrl driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(tegra30_pinctrl_driver);
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
index 706effe0a492..ad73db8d067b 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
@@ -508,57 +508,71 @@ static const unsigned usb1_pins[] = {48, 49};
static const int usb1_muxvals[] = {0, 0};
static const unsigned usb2_pins[] = {50, 51};
static const int usb2_muxvals[] = {0, 0};
-static const unsigned port_range_pins[] = {
+static const unsigned port_range0_pins[] = {
159, 160, 161, 162, 163, 164, 165, 166, /* PORT0x */
0, 1, 2, 3, 4, 5, 6, 7, /* PORT1x */
8, 9, 10, 11, 12, 13, 14, 15, /* PORT2x */
- 16, 17, 18, -1, -1, -1, -1, -1, /* PORT3x */
- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT4x */
- -1, -1, -1, 46, 47, 48, 49, 50, /* PORT5x */
- 51, -1, -1, 54, 55, 56, 57, 58, /* PORT6x */
+ 16, 17, 18, /* PORT30-32 */
+};
+static const int port_range0_muxvals[] = {
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT0x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT1x */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT2x */
+ 15, 15, 15, /* PORT30-32 */
+};
+static const unsigned port_range1_pins[] = {
+ 46, 47, 48, 49, 50, /* PORT53-57 */
+ 51, /* PORT60 */
+};
+static const int port_range1_muxvals[] = {
+ 15, 15, 15, 15, 15, /* PORT53-57 */
+ 15, /* PORT60 */
+};
+static const unsigned port_range2_pins[] = {
+ 54, 55, 56, 57, 58, /* PORT63-67 */
59, 60, 69, 70, 71, 72, 73, 74, /* PORT7x */
75, 76, 77, 78, 79, 80, 81, 82, /* PORT8x */
83, 84, 85, 86, 87, 88, 89, 90, /* PORT9x */
91, 92, 93, 94, 95, 96, 97, 98, /* PORT10x */
- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT11x */
- 99, 100, 101, 102, 103, 104, 105, 106, /* PORT12x */
- 107, 108, 109, 110, 111, 112, 113, 114, /* PORT13x */
- 115, 116, 117, 118, 119, 120, 121, 122, /* PORT14x */
- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT15x */
- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT16x */
- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT17x */
- 61, 62, 63, 64, 65, 66, 67, 68, /* PORT18x */
- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT19x */
- 123, 124, 125, 126, 127, 128, 129, 130, /* PORT20x */
- 131, 132, 133, 134, 135, 136, 137, 138, /* PORT21x */
- 139, 140, 141, 142, -1, -1, -1, -1, /* PORT22x */
- 147, 148, 149, 150, 151, 152, 153, 154, /* PORT23x */
- 155, 156, 157, 143, 144, 145, 146, 158, /* PORT24x */
};
-static const int port_range_muxvals[] = {
- 15, 15, 15, 15, 15, 15, 15, 15, /* PORT0x */
- 15, 15, 15, 15, 15, 15, 15, 15, /* PORT1x */
- 15, 15, 15, 15, 15, 15, 15, 15, /* PORT2x */
- 15, 15, 15, -1, -1, -1, -1, -1, /* PORT3x */
- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT4x */
- -1, -1, -1, 15, 15, 15, 15, 15, /* PORT5x */
- 15, -1, -1, 15, 15, 15, 15, 15, /* PORT6x */
+static const int port_range2_muxvals[] = {
+ 15, 15, 15, 15, 15, /* PORT63-67 */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT7x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT8x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT9x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT10x */
- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT11x */
+};
+static const unsigned port_range3_pins[] = {
+ 99, 100, 101, 102, 103, 104, 105, 106, /* PORT12x */
+ 107, 108, 109, 110, 111, 112, 113, 114, /* PORT13x */
+ 115, 116, 117, 118, 119, 120, 121, 122, /* PORT14x */
+};
+static const int port_range3_muxvals[] = {
15, 15, 15, 15, 15, 15, 15, 15, /* PORT12x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT13x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT14x */
- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT15x */
- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT16x */
- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT17x */
+};
+static const unsigned port_range4_pins[] = {
+ 61, 62, 63, 64, 65, 66, 67, 68, /* PORT18x */
+};
+static const int port_range4_muxvals[] = {
15, 15, 15, 15, 15, 15, 15, 15, /* PORT18x */
- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT19x */
+};
+static const unsigned port_range5_pins[] = {
+ 123, 124, 125, 126, 127, 128, 129, 130, /* PORT20x */
+ 131, 132, 133, 134, 135, 136, 137, 138, /* PORT21x */
+ 139, 140, 141, 142, /* PORT220-223 */
+};
+static const int port_range5_muxvals[] = {
15, 15, 15, 15, 15, 15, 15, 15, /* PORT20x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT21x */
- 15, 15, 15, 15, -1, -1, -1, -1, /* PORT22x */
+ 15, 15, 15, 15, /* PORT220-223 */
+};
+static const unsigned port_range6_pins[] = {
+ 147, 148, 149, 150, 151, 152, 153, 154, /* PORT23x */
+ 155, 156, 157, 143, 144, 145, 146, 158, /* PORT24x */
+};
+static const int port_range6_muxvals[] = {
15, 15, 15, 15, 15, 15, 15, 15, /* PORT23x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT24x */
};
@@ -607,147 +621,153 @@ static const struct uniphier_pinctrl_group uniphier_ld11_groups[] = {
UNIPHIER_PINCTRL_GROUP(usb0),
UNIPHIER_PINCTRL_GROUP(usb1),
UNIPHIER_PINCTRL_GROUP(usb2),
- UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range2),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range3),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range4),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range5),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range6),
UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq),
UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_alternatives),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range, 0),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range, 1),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range, 2),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range, 3),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range, 4),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range, 5),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range, 6),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range, 7),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range, 8),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range, 9),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range, 10),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range, 11),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range, 12),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range, 13),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range, 14),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range, 15),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range, 16),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range, 17),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range, 18),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range, 19),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range, 20),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range, 21),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range, 22),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range, 23),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range, 24),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range, 25),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range, 26),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range, 43),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range, 44),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range, 45),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range, 46),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range, 47),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range, 48),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range, 51),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range, 52),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range, 53),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range, 54),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range, 55),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range, 56),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range, 57),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range, 58),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range, 59),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range, 60),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range, 61),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range, 62),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range, 63),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range, 64),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range, 65),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range, 66),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range, 67),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range, 68),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range, 69),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range, 70),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range, 71),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range, 72),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range, 73),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range, 74),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range, 75),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range, 76),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range, 77),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range, 78),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range, 79),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range, 80),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range, 81),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range, 82),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range, 83),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range, 84),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range, 85),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range, 86),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range, 87),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range, 96),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range, 97),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range, 98),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range, 99),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range, 100),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range, 101),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range, 102),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range, 103),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range, 104),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range, 105),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range, 106),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range, 107),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range, 108),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range, 109),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range, 110),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range, 111),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range, 112),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range, 113),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range, 114),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range, 115),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range, 116),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range, 117),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range, 118),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range, 119),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range, 144),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range, 145),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range, 146),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range, 147),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range, 148),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range, 149),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range, 150),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range, 151),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range, 160),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range, 161),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range, 162),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range, 163),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range, 164),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range, 165),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range, 166),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range, 167),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range, 168),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range, 169),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range, 170),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range, 171),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range, 172),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range, 173),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range, 174),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range, 175),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range, 176),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range, 177),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range, 178),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range, 179),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range, 184),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range, 185),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range, 186),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range, 187),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range, 188),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range, 189),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range, 190),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range, 191),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range, 192),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range, 193),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range, 194),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range, 195),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range, 196),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range, 197),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range, 198),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range, 199),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range1, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range1, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range1, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range1, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range1, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range1, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range2, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range2, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range2, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range2, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range2, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range2, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range2, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range2, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range2, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range2, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range2, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range2, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range2, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range2, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range2, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range2, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range2, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range2, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range2, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range2, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range2, 20),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range2, 21),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range2, 22),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range2, 23),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range2, 24),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range2, 25),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range2, 26),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range2, 27),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range2, 28),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range2, 29),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range2, 30),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range2, 31),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range2, 32),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range2, 33),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range2, 34),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range2, 35),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range2, 36),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range3, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range3, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range3, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range3, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range3, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range3, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range3, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range3, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range3, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range3, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range3, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range3, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range3, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range3, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range3, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range3, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range3, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range3, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range3, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range3, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range3, 20),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range3, 21),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range3, 22),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range3, 23),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range4, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range4, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range4, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range4, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range4, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range4, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range4, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range4, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range5, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range5, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range5, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range5, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range5, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range5, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range5, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range5, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range5, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range5, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range5, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range5, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range5, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range5, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range5, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range5, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range5, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range5, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range5, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range5, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range6, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range6, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range6, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range6, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range6, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range6, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range6, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range6, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range6, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range6, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range6, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range6, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range6, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range6, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range6, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range6, 15),
UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0),
UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1),
UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2),
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
index c8d18a2d3a88..93006626028d 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
@@ -597,7 +597,7 @@ static const unsigned usb2_pins[] = {50, 51};
static const int usb2_muxvals[] = {0, 0};
static const unsigned usb3_pins[] = {52, 53};
static const int usb3_muxvals[] = {0, 0};
-static const unsigned port_range_pins[] = {
+static const unsigned port_range0_pins[] = {
168, 169, 170, 171, 172, 173, 174, 175, /* PORT0x */
0, 1, 2, 3, 4, 5, 6, 7, /* PORT1x */
8, 9, 10, 11, 12, 13, 14, 15, /* PORT2x */
@@ -609,23 +609,8 @@ static const unsigned port_range_pins[] = {
75, 76, 77, 78, 79, 80, 81, 82, /* PORT8x */
83, 84, 85, 86, 87, 88, 89, 90, /* PORT9x */
91, 92, 93, 94, 95, 96, 97, 98, /* PORT10x */
- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT11x */
- 99, 100, 101, 102, 103, 104, 105, 106, /* PORT12x */
- 107, 108, 109, 110, 111, 112, 113, 114, /* PORT13x */
- 115, 116, 117, 118, 119, 120, 121, 122, /* PORT14x */
- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT15x */
- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT16x */
- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT17x */
- 61, 62, 63, 64, 65, 66, 67, 68, /* PORT18x */
- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT19x */
- 123, 124, 125, 126, 127, 128, 129, 130, /* PORT20x */
- 131, 132, 133, 134, 135, 136, 137, 138, /* PORT21x */
- 139, 140, 141, 142, 143, 144, 145, 146, /* PORT22x */
- 147, 148, 149, 150, 151, 152, 153, 154, /* PORT23x */
- 155, 156, 157, 158, 159, 160, 161, 162, /* PORT24x */
- 163, 164, 165, 166, 167, /* PORT25x */
};
-static const int port_range_muxvals[] = {
+static const int port_range0_muxvals[] = {
15, 15, 15, 15, 15, 15, 15, 15, /* PORT0x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT1x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT2x */
@@ -637,21 +622,38 @@ static const int port_range_muxvals[] = {
15, 15, 15, 15, 15, 15, 15, 15, /* PORT8x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT9x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT10x */
- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT11x */
+};
+static const unsigned port_range1_pins[] = {
+ 99, 100, 101, 102, 103, 104, 105, 106, /* PORT12x */
+ 107, 108, 109, 110, 111, 112, 113, 114, /* PORT13x */
+ 115, 116, 117, 118, 119, 120, 121, 122, /* PORT14x */
+};
+static const int port_range1_muxvals[] = {
15, 15, 15, 15, 15, 15, 15, 15, /* PORT12x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT13x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT14x */
- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT15x */
- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT16x */
- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT17x */
+};
+static const unsigned port_range2_pins[] = {
+ 61, 62, 63, 64, 65, 66, 67, 68, /* PORT18x */
+};
+static const int port_range2_muxvals[] = {
15, 15, 15, 15, 15, 15, 15, 15, /* PORT18x */
- -1, -1, -1, -1, -1, -1, -1, -1, /* PORT19x */
+};
+static const unsigned port_range3_pins[] = {
+ 123, 124, 125, 126, 127, 128, 129, 130, /* PORT20x */
+ 131, 132, 133, 134, 135, 136, 137, 138, /* PORT21x */
+ 139, 140, 141, 142, 143, 144, 145, 146, /* PORT22x */
+ 147, 148, 149, 150, 151, 152, 153, 154, /* PORT23x */
+ 155, 156, 157, 158, 159, 160, 161, 162, /* PORT24x */
+ 163, 164, 165, 166, 167, /* PORT250-254 */
+};
+static const int port_range3_muxvals[] = {
15, 15, 15, 15, 15, 15, 15, 15, /* PORT20x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT21x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT22x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT23x */
15, 15, 15, 15, 15, 15, 15, 15, /* PORT24x */
- 15, 15, 15, 15, 15, /* PORT25x */
+ 15, 15, 15, 15, 15, /* PORT250-254 */
};
static const unsigned xirq_pins[] = {
149, 150, 151, 152, 153, 154, 155, 156, /* XIRQ0-7 */
@@ -695,174 +697,177 @@ static const struct uniphier_pinctrl_group uniphier_ld20_groups[] = {
UNIPHIER_PINCTRL_GROUP(usb1),
UNIPHIER_PINCTRL_GROUP(usb2),
UNIPHIER_PINCTRL_GROUP(usb3),
- UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range2),
+ UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range3),
UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq),
UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_alternatives),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range, 0),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range, 1),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range, 2),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range, 3),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range, 4),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range, 5),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range, 6),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range, 7),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range, 8),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range, 9),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range, 10),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range, 11),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range, 12),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range, 13),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range, 14),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range, 15),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range, 16),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range, 17),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range, 18),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range, 19),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range, 20),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range, 21),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range, 22),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range, 23),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range, 24),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range, 25),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range, 26),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range, 27),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range, 28),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range, 29),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range, 30),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range, 31),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range, 32),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range, 33),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range, 34),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range, 35),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range, 36),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range, 37),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range, 38),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range, 39),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range, 40),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range, 41),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range, 42),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range, 43),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range, 44),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range, 45),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range, 46),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range, 47),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range, 48),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range, 49),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range, 50),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range, 51),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range, 52),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range, 53),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range, 54),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range, 55),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range, 56),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range, 57),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range, 58),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range, 59),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range, 60),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range, 61),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range, 62),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range, 63),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range, 64),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range, 65),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range, 66),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range, 67),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range, 68),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range, 69),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range, 70),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range, 71),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range, 72),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range, 73),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range, 74),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range, 75),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range, 76),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range, 77),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range, 78),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range, 79),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range, 80),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range, 81),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range, 82),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range, 83),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range, 84),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range, 85),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range, 86),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range, 87),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range, 96),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range, 97),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range, 98),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range, 99),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range, 100),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range, 101),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range, 102),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range, 103),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range, 104),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range, 105),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range, 106),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range, 107),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range, 108),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range, 109),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range, 110),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range, 111),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range, 112),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range, 113),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range, 114),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range, 115),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range, 116),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range, 117),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range, 118),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range, 119),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range, 144),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range, 145),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range, 146),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range, 147),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range, 148),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range, 149),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range, 150),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range, 151),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range, 160),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range, 161),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range, 162),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range, 163),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range, 164),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range, 165),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range, 166),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range, 167),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range, 168),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range, 169),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range, 170),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range, 171),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range, 172),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range, 173),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range, 174),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range, 175),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range, 176),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range, 177),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range, 178),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range, 179),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range, 180),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range, 181),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range, 182),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range, 183),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range, 184),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range, 185),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range, 186),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range, 187),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range, 188),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range, 189),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range, 190),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range, 191),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range, 192),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range, 193),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range, 194),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range, 195),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range, 196),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range, 197),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range, 198),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range, 199),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range, 200),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range, 201),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range, 202),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range, 203),
- UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range, 204),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range1, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range1, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range1, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range1, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range1, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range1, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range1, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range1, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range1, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range1, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range1, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range1, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range1, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range1, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range1, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range1, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range1, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range1, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range1, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range1, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range1, 20),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range1, 21),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range1, 22),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range1, 23),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range2, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range2, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range2, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range2, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range2, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range2, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range2, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range2, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range3, 0),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range3, 1),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range3, 2),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range3, 3),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range3, 4),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range3, 5),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range3, 6),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range3, 7),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range3, 8),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range3, 9),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range3, 10),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range3, 11),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range3, 12),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range3, 13),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range3, 14),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range3, 15),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range3, 16),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range3, 17),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range3, 18),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range3, 19),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range3, 20),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range3, 21),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range3, 22),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range3, 23),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range3, 24),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range3, 25),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range3, 26),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range3, 27),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range3, 28),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range3, 29),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range3, 30),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range3, 31),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range3, 32),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range3, 33),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range3, 34),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range3, 35),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range3, 36),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range3, 37),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range3, 38),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range3, 39),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range3, 40),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range3, 41),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range3, 42),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range3, 43),
+ UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range3, 44),
UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0),
UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1),
UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2),
diff --git a/drivers/pinctrl/zte/Kconfig b/drivers/pinctrl/zte/Kconfig
new file mode 100644
index 000000000000..0d97352a24ec
--- /dev/null
+++ b/drivers/pinctrl/zte/Kconfig
@@ -0,0 +1,13 @@
+config PINCTRL_ZX
+ bool
+ select PINMUX
+ select GENERIC_PINCONF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+
+config PINCTRL_ZX296718
+ bool "ZTE ZX296718 pinctrl driver"
+ depends on OF && ARCH_ZX
+ select PINCTRL_ZX
+ help
+ Say Y here to enable the ZX296718 pinctrl driver
diff --git a/drivers/pinctrl/zte/Makefile b/drivers/pinctrl/zte/Makefile
new file mode 100644
index 000000000000..c42e651d7a73
--- /dev/null
+++ b/drivers/pinctrl/zte/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_PINCTRL_ZX) += pinctrl-zx.o
+obj-$(CONFIG_PINCTRL_ZX296718) += pinctrl-zx296718.o
diff --git a/drivers/pinctrl/zte/pinctrl-zx.c b/drivers/pinctrl/zte/pinctrl-zx.c
new file mode 100644
index 000000000000..787e3967bd5c
--- /dev/null
+++ b/drivers/pinctrl/zte/pinctrl-zx.c
@@ -0,0 +1,445 @@
+/*
+ * Copyright (C) 2017 Sanechips Technology Co., Ltd.
+ * Copyright 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+#include "../pinmux.h"
+#include "pinctrl-zx.h"
+
+#define ZX_PULL_DOWN BIT(0)
+#define ZX_PULL_UP BIT(1)
+#define ZX_INPUT_ENABLE BIT(3)
+#define ZX_DS_SHIFT 4
+#define ZX_DS_MASK (0x7 << ZX_DS_SHIFT)
+#define ZX_DS_VALUE(x) (((x) << ZX_DS_SHIFT) & ZX_DS_MASK)
+#define ZX_SLEW BIT(8)
+
+struct zx_pinctrl {
+ struct pinctrl_dev *pctldev;
+ struct device *dev;
+ void __iomem *base;
+ void __iomem *aux_base;
+ spinlock_t lock;
+ struct zx_pinctrl_soc_info *info;
+};
+
+static int zx_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np_config,
+ struct pinctrl_map **map, u32 *num_maps)
+{
+ return pinconf_generic_dt_node_to_map(pctldev, np_config, map,
+ num_maps, PIN_MAP_TYPE_INVALID);
+}
+
+static const struct pinctrl_ops zx_pinctrl_ops = {
+ .dt_node_to_map = zx_dt_node_to_map,
+ .dt_free_map = pinctrl_utils_free_map,
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+};
+
+#define NONAON_MVAL 2
+
+static int zx_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
+ unsigned int group_selector)
+{
+ struct zx_pinctrl *zpctl = pinctrl_dev_get_drvdata(pctldev);
+ struct zx_pinctrl_soc_info *info = zpctl->info;
+ const struct pinctrl_pin_desc *pindesc = info->pins + group_selector;
+ struct zx_pin_data *data = pindesc->drv_data;
+ struct zx_mux_desc *mux = data->muxes;
+ u32 mask = (1 << data->width) - 1;
+ u32 offset = data->offset;
+ u32 bitpos = data->bitpos;
+ struct function_desc *func;
+ unsigned long flags;
+ u32 val, mval;
+
+ /* Skip reserved pin */
+ if (!data)
+ return -EINVAL;
+
+ func = pinmux_generic_get_function(pctldev, func_selector);
+ if (!func)
+ return -EINVAL;
+
+ while (mux->name) {
+ if (strcmp(mux->name, func->name) == 0)
+ break;
+ mux++;
+ }
+
+ /* Found mux value to be written */
+ mval = mux->muxval;
+
+ spin_lock_irqsave(&zpctl->lock, flags);
+
+ if (data->aon_pin) {
+ /*
+ * It's an AON pin, whose mux register offset and bit position
+ * can be caluculated from pin number. Each register covers 16
+ * pins, and each pin occupies 2 bits.
+ */
+ u16 aoffset = pindesc->number / 16 * 4;
+ u16 abitpos = (pindesc->number % 16) * 2;
+
+ if (mval & AON_MUX_FLAG) {
+ /*
+ * This is a mux value that needs to be written into
+ * AON pinmux register. Write it and then we're done.
+ */
+ val = readl(zpctl->aux_base + aoffset);
+ val &= ~(0x3 << abitpos);
+ val |= (mval & 0x3) << abitpos;
+ writel(val, zpctl->aux_base + aoffset);
+ } else {
+ /*
+ * It's a mux value that needs to be written into TOP
+ * pinmux register.
+ */
+ val = readl(zpctl->base + offset);
+ val &= ~(mask << bitpos);
+ val |= (mval & mask) << bitpos;
+ writel(val, zpctl->base + offset);
+
+ /*
+ * In this case, the AON pinmux register needs to be
+ * set up to select non-AON function.
+ */
+ val = readl(zpctl->aux_base + aoffset);
+ val &= ~(0x3 << abitpos);
+ val |= NONAON_MVAL << abitpos;
+ writel(val, zpctl->aux_base + aoffset);
+ }
+
+ } else {
+ /*
+ * This is a TOP pin, and we only need to set up TOP pinmux
+ * register and then we're done with it.
+ */
+ val = readl(zpctl->base + offset);
+ val &= ~(mask << bitpos);
+ val |= (mval & mask) << bitpos;
+ writel(val, zpctl->base + offset);
+ }
+
+ spin_unlock_irqrestore(&zpctl->lock, flags);
+
+ return 0;
+}
+
+static const struct pinmux_ops zx_pinmux_ops = {
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .set_mux = zx_set_mux,
+};
+
+static int zx_pin_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct zx_pinctrl *zpctl = pinctrl_dev_get_drvdata(pctldev);
+ struct zx_pinctrl_soc_info *info = zpctl->info;
+ const struct pinctrl_pin_desc *pindesc = info->pins + pin;
+ struct zx_pin_data *data = pindesc->drv_data;
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ u32 val;
+
+ /* Skip reserved pin */
+ if (!data)
+ return -EINVAL;
+
+ val = readl(zpctl->aux_base + data->coffset);
+ val = val >> data->cbitpos;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ val &= ZX_PULL_DOWN;
+ val = !!val;
+ if (val == 0)
+ return -EINVAL;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ val &= ZX_PULL_UP;
+ val = !!val;
+ if (val == 0)
+ return -EINVAL;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ val &= ZX_INPUT_ENABLE;
+ val = !!val;
+ if (val == 0)
+ return -EINVAL;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ val &= ZX_DS_MASK;
+ val = val >> ZX_DS_SHIFT;
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ val &= ZX_SLEW;
+ val = !!val;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, val);
+
+ return 0;
+}
+
+static int zx_pin_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct zx_pinctrl *zpctl = pinctrl_dev_get_drvdata(pctldev);
+ struct zx_pinctrl_soc_info *info = zpctl->info;
+ const struct pinctrl_pin_desc *pindesc = info->pins + pin;
+ struct zx_pin_data *data = pindesc->drv_data;
+ enum pin_config_param param;
+ u32 val, arg;
+ int i;
+
+ /* Skip reserved pin */
+ if (!data)
+ return -EINVAL;
+
+ val = readl(zpctl->aux_base + data->coffset);
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ val |= ZX_PULL_DOWN << data->cbitpos;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ val |= ZX_PULL_UP << data->cbitpos;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ val |= ZX_INPUT_ENABLE << data->cbitpos;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ val &= ~(ZX_DS_MASK << data->cbitpos);
+ val |= ZX_DS_VALUE(arg) << data->cbitpos;
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ if (arg)
+ val |= ZX_SLEW << data->cbitpos;
+ else
+ val &= ~ZX_SLEW << data->cbitpos;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+ }
+
+ writel(val, zpctl->aux_base + data->coffset);
+ return 0;
+}
+
+static const struct pinconf_ops zx_pinconf_ops = {
+ .pin_config_set = zx_pin_config_set,
+ .pin_config_get = zx_pin_config_get,
+ .is_generic = true,
+};
+
+static int zx_pinctrl_build_state(struct platform_device *pdev)
+{
+ struct zx_pinctrl *zpctl = platform_get_drvdata(pdev);
+ struct zx_pinctrl_soc_info *info = zpctl->info;
+ struct pinctrl_dev *pctldev = zpctl->pctldev;
+ struct function_desc *functions;
+ int nfunctions;
+ struct group_desc *groups;
+ int ngroups;
+ int i;
+
+ /* Every single pin composes a group */
+ ngroups = info->npins;
+ groups = devm_kzalloc(&pdev->dev, ngroups * sizeof(*groups),
+ GFP_KERNEL);
+ if (!groups)
+ return -ENOMEM;
+
+ for (i = 0; i < ngroups; i++) {
+ const struct pinctrl_pin_desc *pindesc = info->pins + i;
+ struct group_desc *group = groups + i;
+
+ group->name = pindesc->name;
+ group->pins = (int *) &pindesc->number;
+ group->num_pins = 1;
+ radix_tree_insert(&pctldev->pin_group_tree, i, group);
+ }
+
+ pctldev->num_groups = ngroups;
+
+ /* Build function list from pin mux functions */
+ functions = devm_kzalloc(&pdev->dev, info->npins * sizeof(*functions),
+ GFP_KERNEL);
+ if (!functions)
+ return -ENOMEM;
+
+ nfunctions = 0;
+ for (i = 0; i < info->npins; i++) {
+ const struct pinctrl_pin_desc *pindesc = info->pins + i;
+ struct zx_pin_data *data = pindesc->drv_data;
+ struct zx_mux_desc *mux;
+
+ /* Reserved pins do not have a drv_data at all */
+ if (!data)
+ continue;
+
+ /* Loop over all muxes for the pin */
+ mux = data->muxes;
+ while (mux->name) {
+ struct function_desc *func = functions;
+
+ /* Search function list for given mux */
+ while (func->name) {
+ if (strcmp(mux->name, func->name) == 0) {
+ /* Function exists */
+ func->num_group_names++;
+ break;
+ }
+ func++;
+ }
+
+ if (!func->name) {
+ /* New function */
+ func->name = mux->name;
+ func->num_group_names = 1;
+ radix_tree_insert(&pctldev->pin_function_tree,
+ nfunctions++, func);
+ }
+
+ mux++;
+ }
+ }
+
+ pctldev->num_functions = nfunctions;
+ functions = krealloc(functions, nfunctions * sizeof(*functions),
+ GFP_KERNEL);
+
+ /* Find pin groups for every single function */
+ for (i = 0; i < info->npins; i++) {
+ const struct pinctrl_pin_desc *pindesc = info->pins + i;
+ struct zx_pin_data *data = pindesc->drv_data;
+ struct zx_mux_desc *mux;
+
+ if (!data)
+ continue;
+
+ mux = data->muxes;
+ while (mux->name) {
+ struct function_desc *func;
+ const char **group;
+ int j;
+
+ /* Find function for given mux */
+ for (j = 0; j < nfunctions; j++)
+ if (strcmp(functions[j].name, mux->name) == 0)
+ break;
+
+ func = functions + j;
+ if (!func->group_names) {
+ func->group_names = devm_kzalloc(&pdev->dev,
+ func->num_group_names *
+ sizeof(*func->group_names),
+ GFP_KERNEL);
+ if (!func->group_names)
+ return -ENOMEM;
+ }
+
+ group = func->group_names;
+ while (*group)
+ group++;
+ *group = pindesc->name;
+
+ mux++;
+ }
+ }
+
+ return 0;
+}
+
+int zx_pinctrl_init(struct platform_device *pdev,
+ struct zx_pinctrl_soc_info *info)
+{
+ struct pinctrl_desc *pctldesc;
+ struct zx_pinctrl *zpctl;
+ struct device_node *np;
+ struct resource *res;
+ int ret;
+
+ zpctl = devm_kzalloc(&pdev->dev, sizeof(*zpctl), GFP_KERNEL);
+ if (!zpctl)
+ return -ENOMEM;
+
+ spin_lock_init(&zpctl->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ zpctl->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(zpctl->base))
+ return PTR_ERR(zpctl->base);
+
+ np = of_parse_phandle(pdev->dev.of_node, "zte,auxiliary-controller", 0);
+ if (!np) {
+ dev_err(&pdev->dev, "failed to find auxiliary controller\n");
+ return -ENODEV;
+ }
+
+ zpctl->aux_base = of_iomap(np, 0);
+ if (!zpctl->aux_base)
+ return -ENOMEM;
+
+ zpctl->dev = &pdev->dev;
+ zpctl->info = info;
+
+ pctldesc = devm_kzalloc(&pdev->dev, sizeof(*pctldesc), GFP_KERNEL);
+ if (!pctldesc)
+ return -ENOMEM;
+
+ pctldesc->name = dev_name(&pdev->dev);
+ pctldesc->owner = THIS_MODULE;
+ pctldesc->pins = info->pins;
+ pctldesc->npins = info->npins;
+ pctldesc->pctlops = &zx_pinctrl_ops;
+ pctldesc->pmxops = &zx_pinmux_ops;
+ pctldesc->confops = &zx_pinconf_ops;
+
+ zpctl->pctldev = devm_pinctrl_register(&pdev->dev, pctldesc, zpctl);
+ if (IS_ERR(zpctl->pctldev)) {
+ ret = PTR_ERR(zpctl->pctldev);
+ dev_err(&pdev->dev, "failed to register pinctrl: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, zpctl);
+
+ ret = zx_pinctrl_build_state(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to build state: %d\n", ret);
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "initialized pinctrl driver\n");
+ return 0;
+}
diff --git a/drivers/pinctrl/zte/pinctrl-zx.h b/drivers/pinctrl/zte/pinctrl-zx.h
new file mode 100644
index 000000000000..bc67e2be0503
--- /dev/null
+++ b/drivers/pinctrl/zte/pinctrl-zx.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2017 Sanechips Technology Co., Ltd.
+ * Copyright 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __PINCTRL_ZX_H
+#define __PINCTRL_ZX_H
+
+/**
+ * struct zx_mux_desc - hardware mux descriptor
+ * @name: mux function name
+ * @muxval: mux register bit value
+ */
+struct zx_mux_desc {
+ const char *name;
+ u8 muxval;
+};
+
+/**
+ * struct zx_pin_data - hardware per-pin data
+ * @aon_pin: whether it's an AON pin
+ * @offset: register offset within TOP pinmux controller
+ * @bitpos: bit position within TOP pinmux register
+ * @width: bit width within TOP pinmux register
+ * @coffset: pinconf register offset within AON controller
+ * @cbitpos: pinconf bit position within AON register
+ * @muxes: available mux function names and corresponding register values
+ *
+ * Unlike TOP pinmux and AON pinconf registers which are arranged pretty
+ * arbitrarily, AON pinmux register bits are well organized per pin id, and
+ * each pin occupies two bits, so that we can calculate the AON register offset
+ * and bit position from pin id. Thus, we only need to define TOP pinmux and
+ * AON pinconf register data for the pin.
+ */
+struct zx_pin_data {
+ bool aon_pin;
+ u16 offset;
+ u16 bitpos;
+ u16 width;
+ u16 coffset;
+ u16 cbitpos;
+ struct zx_mux_desc *muxes;
+};
+
+struct zx_pinctrl_soc_info {
+ const struct pinctrl_pin_desc *pins;
+ unsigned int npins;
+};
+
+#define TOP_PIN(pin, off, bp, wd, coff, cbp, ...) { \
+ .number = pin, \
+ .name = #pin, \
+ .drv_data = &(struct zx_pin_data) { \
+ .aon_pin = false, \
+ .offset = off, \
+ .bitpos = bp, \
+ .width = wd, \
+ .coffset = coff, \
+ .cbitpos = cbp, \
+ .muxes = (struct zx_mux_desc[]) { \
+ __VA_ARGS__, { } }, \
+ }, \
+}
+
+#define AON_PIN(pin, off, bp, wd, coff, cbp, ...) { \
+ .number = pin, \
+ .name = #pin, \
+ .drv_data = &(struct zx_pin_data) { \
+ .aon_pin = true, \
+ .offset = off, \
+ .bitpos = bp, \
+ .width = wd, \
+ .coffset = coff, \
+ .cbitpos = cbp, \
+ .muxes = (struct zx_mux_desc[]) { \
+ __VA_ARGS__, { } }, \
+ }, \
+}
+
+#define ZX_RESERVED(pin) PINCTRL_PIN(pin, #pin)
+
+#define TOP_MUX(_val, _name) { \
+ .name = _name, \
+ .muxval = _val, \
+}
+
+/*
+ * When the flag is set, it's a mux configuration for an AON pin that sits in
+ * AON register. Otherwise, it's one for AON pin but sitting in TOP register.
+ */
+#define AON_MUX_FLAG BIT(7)
+
+#define AON_MUX(_val, _name) { \
+ .name = _name, \
+ .muxval = _val | AON_MUX_FLAG, \
+}
+
+int zx_pinctrl_init(struct platform_device *pdev,
+ struct zx_pinctrl_soc_info *info);
+
+#endif /* __PINCTRL_ZX_H */
diff --git a/drivers/pinctrl/zte/pinctrl-zx296718.c b/drivers/pinctrl/zte/pinctrl-zx296718.c
new file mode 100644
index 000000000000..71efec17ee7e
--- /dev/null
+++ b/drivers/pinctrl/zte/pinctrl-zx296718.c
@@ -0,0 +1,1027 @@
+/*
+ * Copyright (C) 2017 Sanechips Technology Co., Ltd.
+ * Copyright 2017 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-zx.h"
+
+#define TOP_REG0 0x00
+#define TOP_REG1 0x04
+#define TOP_REG2 0x08
+#define TOP_REG3 0x0c
+#define TOP_REG4 0x10
+#define TOP_REG5 0x14
+#define TOP_REG6 0x18
+#define TOP_REG7 0x1c
+#define TOP_REG8 0x20
+
+/*
+ * The pin numbering starts from AON pins with reserved ones included,
+ * so that register data like offset and bit position for AON pins can
+ * be calculated from pin number.
+ */
+enum zx296718_pin {
+ /* aon_pmm_reg_0 */
+ I2C3_SCL = 0,
+ I2C3_SDA = 1,
+ AON_RESERVED0 = 2,
+ AON_RESERVED1 = 3,
+ SEC_EN = 4,
+ UART0_RXD = 5,
+ UART0_TXD = 6,
+ IR_IN = 7,
+ SPI0_CLK = 8,
+ SPI0_CS = 9,
+ SPI0_TXD = 10,
+ SPI0_RXD = 11,
+ KEY_COL0 = 12,
+ KEY_COL1 = 13,
+ KEY_COL2 = 14,
+ KEY_ROW0 = 15,
+
+ /* aon_pmm_reg_1 */
+ KEY_ROW1 = 16,
+ KEY_ROW2 = 17,
+ HDMI_SCL = 18,
+ HDMI_SDA = 19,
+ JTAG_TCK = 20,
+ JTAG_TRSTN = 21,
+ JTAG_TMS = 22,
+ JTAG_TDI = 23,
+ JTAG_TDO = 24,
+ I2C0_SCL = 25,
+ I2C0_SDA = 26,
+ I2C1_SCL = 27,
+ I2C1_SDA = 28,
+ AON_RESERVED2 = 29,
+ AON_RESERVED3 = 30,
+ AON_RESERVED4 = 31,
+
+ /* aon_pmm_reg_2 */
+ SPI1_CLK = 32,
+ SPI1_CS = 33,
+ SPI1_TXD = 34,
+ SPI1_RXD = 35,
+ AON_RESERVED5 = 36,
+ AON_RESERVED6 = 37,
+ AUDIO_DET = 38,
+ SPDIF_OUT = 39,
+ HDMI_CEC = 40,
+ HDMI_HPD = 41,
+ GMAC_25M_OUT = 42,
+ BOOT_SEL0 = 43,
+ BOOT_SEL1 = 44,
+ BOOT_SEL2 = 45,
+ DEEP_SLEEP_OUT_N = 46,
+ AON_RESERVED7 = 47,
+
+ /* top_pmm_reg_0 */
+ GMII_GTX_CLK = 48,
+ GMII_TX_CLK = 49,
+ GMII_TXD0 = 50,
+ GMII_TXD1 = 51,
+ GMII_TXD2 = 52,
+ GMII_TXD3 = 53,
+ GMII_TXD4 = 54,
+ GMII_TXD5 = 55,
+ GMII_TXD6 = 56,
+ GMII_TXD7 = 57,
+ GMII_TX_ER = 58,
+ GMII_TX_EN = 59,
+ GMII_RX_CLK = 60,
+ GMII_RXD0 = 61,
+ GMII_RXD1 = 62,
+ GMII_RXD2 = 63,
+
+ /* top_pmm_reg_1 */
+ GMII_RXD3 = 64,
+ GMII_RXD4 = 65,
+ GMII_RXD5 = 66,
+ GMII_RXD6 = 67,
+ GMII_RXD7 = 68,
+ GMII_RX_ER = 69,
+ GMII_RX_DV = 70,
+ GMII_COL = 71,
+ GMII_CRS = 72,
+ GMII_MDC = 73,
+ GMII_MDIO = 74,
+ SDIO1_CLK = 75,
+ SDIO1_CMD = 76,
+ SDIO1_DATA0 = 77,
+ SDIO1_DATA1 = 78,
+ SDIO1_DATA2 = 79,
+
+ /* top_pmm_reg_2 */
+ SDIO1_DATA3 = 80,
+ SDIO1_CD = 81,
+ SDIO1_WP = 82,
+ USIM1_CD = 83,
+ USIM1_CLK = 84,
+ USIM1_RST = 85,
+
+ /* top_pmm_reg_3 */
+ USIM1_DATA = 86,
+ SDIO0_CLK = 87,
+ SDIO0_CMD = 88,
+ SDIO0_DATA0 = 89,
+ SDIO0_DATA1 = 90,
+ SDIO0_DATA2 = 91,
+ SDIO0_DATA3 = 92,
+ SDIO0_CD = 93,
+ SDIO0_WP = 94,
+
+ /* top_pmm_reg_4 */
+ TSI0_DATA0 = 95,
+ SPINOR_CLK = 96,
+ TSI2_DATA = 97,
+ TSI2_CLK = 98,
+ TSI2_SYNC = 99,
+ TSI2_VALID = 100,
+ SPINOR_CS = 101,
+ SPINOR_DQ0 = 102,
+ SPINOR_DQ1 = 103,
+ SPINOR_DQ2 = 104,
+ SPINOR_DQ3 = 105,
+ VGA_HS = 106,
+ VGA_VS = 107,
+ TSI3_DATA = 108,
+
+ /* top_pmm_reg_5 */
+ TSI3_CLK = 109,
+ TSI3_SYNC = 110,
+ TSI3_VALID = 111,
+ I2S1_WS = 112,
+ I2S1_BCLK = 113,
+ I2S1_MCLK = 114,
+ I2S1_DIN0 = 115,
+ I2S1_DOUT0 = 116,
+ SPI3_CLK = 117,
+ SPI3_CS = 118,
+ SPI3_TXD = 119,
+ NAND_LDO_MS18_SEL = 120,
+
+ /* top_pmm_reg_6 */
+ SPI3_RXD = 121,
+ I2S0_MCLK = 122,
+ I2S0_BCLK = 123,
+ I2S0_WS = 124,
+ I2S0_DIN0 = 125,
+ I2S0_DOUT0 = 126,
+ I2C5_SCL = 127,
+ I2C5_SDA = 128,
+ SPI2_CLK = 129,
+ SPI2_CS = 130,
+ SPI2_TXD = 131,
+
+ /* top_pmm_reg_7 */
+ SPI2_RXD = 132,
+ NAND_WP_N = 133,
+ NAND_PAGE_SIZE0 = 134,
+ NAND_PAGE_SIZE1 = 135,
+ NAND_ADDR_CYCLE = 136,
+ NAND_RB0 = 137,
+ NAND_RB1 = 138,
+ NAND_RB2 = 139,
+ NAND_RB3 = 140,
+
+ /* top_pmm_reg_8 */
+ GMAC_125M_IN = 141,
+ GMAC_50M_OUT = 142,
+ SPINOR_SSCLK_LOOPBACK = 143,
+ SPINOR_SDIO1CLK_LOOPBACK = 144,
+};
+
+static const struct pinctrl_pin_desc zx296718_pins[] = {
+ /* aon_pmm_reg_0 */
+ AON_PIN(I2C3_SCL, TOP_REG2, 18, 2, 0x48, 0,
+ AON_MUX(0x0, "ANMI"), /* anmi */
+ AON_MUX(0x1, "AGPIO"), /* agpio29 */
+ AON_MUX(0x2, "nonAON"), /* pin0 */
+ AON_MUX(0x3, "EXT_INT"), /* int4 */
+ TOP_MUX(0x0, "I2C3"), /* scl */
+ TOP_MUX(0x1, "SPI2"), /* txd */
+ TOP_MUX(0x2, "I2S1")), /* din0 */
+ AON_PIN(I2C3_SDA, TOP_REG2, 20, 2, 0x48, 9,
+ AON_MUX(0x0, "WD"), /* rst_b */
+ AON_MUX(0x1, "AGPIO"), /* agpio30 */
+ AON_MUX(0x2, "nonAON"), /* pin1 */
+ AON_MUX(0x3, "EXT_INT"), /* int5 */
+ TOP_MUX(0x0, "I2C3"), /* sda */
+ TOP_MUX(0x1, "SPI2"), /* rxd */
+ TOP_MUX(0x2, "I2S0")), /* mclk */
+ ZX_RESERVED(AON_RESERVED0),
+ ZX_RESERVED(AON_RESERVED1),
+ AON_PIN(SEC_EN, TOP_REG3, 5, 1, 0x50, 0,
+ AON_MUX(0x0, "SEC"), /* en */
+ AON_MUX(0x1, "AGPIO"), /* agpio28 */
+ AON_MUX(0x2, "nonAON"), /* pin3 */
+ AON_MUX(0x3, "EXT_INT"), /* int7 */
+ TOP_MUX(0x0, "I2C2"), /* sda */
+ TOP_MUX(0x1, "SPI2")), /* cs */
+ AON_PIN(UART0_RXD, 0, 0, 0, 0x50, 9,
+ AON_MUX(0x0, "UART0"), /* rxd */
+ AON_MUX(0x1, "AGPIO"), /* agpio20 */
+ AON_MUX(0x2, "nonAON")), /* pin34 */
+ AON_PIN(UART0_TXD, 0, 0, 0, 0x50, 18,
+ AON_MUX(0x0, "UART0"), /* txd */
+ AON_MUX(0x1, "AGPIO"), /* agpio21 */
+ AON_MUX(0x2, "nonAON")), /* pin32 */
+ AON_PIN(IR_IN, 0, 0, 0, 0x64, 0,
+ AON_MUX(0x0, "IR"), /* in */
+ AON_MUX(0x1, "AGPIO"), /* agpio0 */
+ AON_MUX(0x2, "nonAON")), /* pin27 */
+ AON_PIN(SPI0_CLK, TOP_REG3, 16, 1, 0x64, 9,
+ AON_MUX(0x0, "EXT_INT"), /* int0 */
+ AON_MUX(0x1, "AGPIO"), /* agpio23 */
+ AON_MUX(0x2, "nonAON"), /* pin5 */
+ AON_MUX(0x3, "PCU"), /* test6 */
+ TOP_MUX(0x0, "SPI0"), /* clk */
+ TOP_MUX(0x1, "ISP")), /* flash_trig */
+ AON_PIN(SPI0_CS, TOP_REG3, 17, 1, 0x64, 18,
+ AON_MUX(0x0, "EXT_INT"), /* int1 */
+ AON_MUX(0x1, "AGPIO"), /* agpio24 */
+ AON_MUX(0x2, "nonAON"), /* pin6 */
+ AON_MUX(0x3, "PCU"), /* test0 */
+ TOP_MUX(0x0, "SPI0"), /* cs */
+ TOP_MUX(0x1, "ISP")), /* prelight_trig */
+ AON_PIN(SPI0_TXD, TOP_REG3, 18, 1, 0x68, 0,
+ AON_MUX(0x0, "EXT_INT"), /* int2 */
+ AON_MUX(0x1, "AGPIO"), /* agpio25 */
+ AON_MUX(0x2, "nonAON"), /* pin7 */
+ AON_MUX(0x3, "PCU"), /* test1 */
+ TOP_MUX(0x0, "SPI0"), /* txd */
+ TOP_MUX(0x1, "ISP")), /* shutter_trig */
+ AON_PIN(SPI0_RXD, TOP_REG3, 19, 1, 0x68, 9,
+ AON_MUX(0x0, "EXT_INT"), /* int3 */
+ AON_MUX(0x1, "AGPIO"), /* agpio26 */
+ AON_MUX(0x2, "nonAON"), /* pin8 */
+ AON_MUX(0x3, "PCU"), /* test2 */
+ TOP_MUX(0x0, "SPI0"), /* rxd */
+ TOP_MUX(0x1, "ISP")), /* shutter_open */
+ AON_PIN(KEY_COL0, TOP_REG3, 20, 1, 0x68, 18,
+ AON_MUX(0x0, "KEY"), /* col0 */
+ AON_MUX(0x1, "AGPIO"), /* agpio5 */
+ AON_MUX(0x2, "nonAON"), /* pin9 */
+ AON_MUX(0x3, "PCU"), /* test3 */
+ TOP_MUX(0x0, "UART3"), /* rxd */
+ TOP_MUX(0x1, "I2S0")), /* din1 */
+ AON_PIN(KEY_COL1, TOP_REG3, 21, 2, 0x6c, 0,
+ AON_MUX(0x0, "KEY"), /* col1 */
+ AON_MUX(0x1, "AGPIO"), /* agpio6 */
+ AON_MUX(0x2, "nonAON"), /* pin10 */
+ TOP_MUX(0x0, "UART3"), /* txd */
+ TOP_MUX(0x1, "I2S0"), /* din2 */
+ TOP_MUX(0x2, "VGA")), /* scl */
+ AON_PIN(KEY_COL2, TOP_REG3, 23, 2, 0x6c, 9,
+ AON_MUX(0x0, "KEY"), /* col2 */
+ AON_MUX(0x1, "AGPIO"), /* agpio7 */
+ AON_MUX(0x2, "nonAON"), /* pin11 */
+ TOP_MUX(0x0, "PWM"), /* out1 */
+ TOP_MUX(0x1, "I2S0"), /* din3 */
+ TOP_MUX(0x2, "VGA")), /* sda */
+ AON_PIN(KEY_ROW0, 0, 0, 0, 0x6c, 18,
+ AON_MUX(0x0, "KEY"), /* row0 */
+ AON_MUX(0x1, "AGPIO"), /* agpio8 */
+ AON_MUX(0x2, "nonAON"), /* pin33 */
+ AON_MUX(0x3, "WD")), /* rst_b */
+
+ /* aon_pmm_reg_1 */
+ AON_PIN(KEY_ROW1, TOP_REG3, 25, 2, 0x70, 0,
+ AON_MUX(0x0, "KEY"), /* row1 */
+ AON_MUX(0x1, "AGPIO"), /* agpio9 */
+ AON_MUX(0x2, "nonAON"), /* pin12 */
+ TOP_MUX(0x0, "LCD"), /* port0 lcd_te */
+ TOP_MUX(0x1, "I2S0"), /* dout2 */
+ TOP_MUX(0x2, "PWM"), /* out2 */
+ TOP_MUX(0x3, "VGA")), /* hs1 */
+ AON_PIN(KEY_ROW2, TOP_REG3, 27, 2, 0x70, 9,
+ AON_MUX(0x0, "KEY"), /* row2 */
+ AON_MUX(0x1, "AGPIO"), /* agpio10 */
+ AON_MUX(0x2, "nonAON"), /* pin13 */
+ TOP_MUX(0x0, "LCD"), /* port1 lcd_te */
+ TOP_MUX(0x1, "I2S0"), /* dout3 */
+ TOP_MUX(0x2, "PWM"), /* out3 */
+ TOP_MUX(0x3, "VGA")), /* vs1 */
+ AON_PIN(HDMI_SCL, TOP_REG3, 29, 1, 0x70, 18,
+ AON_MUX(0x0, "PCU"), /* test7 */
+ AON_MUX(0x1, "AGPIO"), /* agpio3 */
+ AON_MUX(0x2, "nonAON"), /* pin14 */
+ TOP_MUX(0x0, "HDMI"), /* scl */
+ TOP_MUX(0x1, "UART3")), /* rxd */
+ AON_PIN(HDMI_SDA, TOP_REG3, 30, 1, 0x74, 0,
+ AON_MUX(0x0, "PCU"), /* test8 */
+ AON_MUX(0x1, "AGPIO"), /* agpio4 */
+ AON_MUX(0x2, "nonAON"), /* pin15 */
+ TOP_MUX(0x0, "HDMI"), /* sda */
+ TOP_MUX(0x1, "UART3")), /* txd */
+ AON_PIN(JTAG_TCK, TOP_REG7, 3, 1, 0x78, 18,
+ AON_MUX(0x0, "JTAG"), /* tck */
+ AON_MUX(0x1, "AGPIO"), /* agpio11 */
+ AON_MUX(0x2, "nonAON"), /* pin22 */
+ AON_MUX(0x3, "EXT_INT"), /* int4 */
+ TOP_MUX(0x0, "SPI4"), /* clk */
+ TOP_MUX(0x1, "UART1")), /* rxd */
+ AON_PIN(JTAG_TRSTN, TOP_REG7, 4, 1, 0xac, 0,
+ AON_MUX(0x0, "JTAG"), /* trstn */
+ AON_MUX(0x1, "AGPIO"), /* agpio12 */
+ AON_MUX(0x2, "nonAON"), /* pin23 */
+ AON_MUX(0x3, "EXT_INT"), /* int5 */
+ TOP_MUX(0x0, "SPI4"), /* cs */
+ TOP_MUX(0x1, "UART1")), /* txd */
+ AON_PIN(JTAG_TMS, TOP_REG7, 5, 1, 0xac, 9,
+ AON_MUX(0x0, "JTAG"), /* tms */
+ AON_MUX(0x1, "AGPIO"), /* agpio13 */
+ AON_MUX(0x2, "nonAON"), /* pin24 */
+ AON_MUX(0x3, "EXT_INT"), /* int6 */
+ TOP_MUX(0x0, "SPI4"), /* txd */
+ TOP_MUX(0x1, "UART2")), /* rxd */
+ AON_PIN(JTAG_TDI, TOP_REG7, 6, 1, 0xac, 18,
+ AON_MUX(0x0, "JTAG"), /* tdi */
+ AON_MUX(0x1, "AGPIO"), /* agpio14 */
+ AON_MUX(0x2, "nonAON"), /* pin25 */
+ AON_MUX(0x3, "EXT_INT"), /* int7 */
+ TOP_MUX(0x0, "SPI4"), /* rxd */
+ TOP_MUX(0x1, "UART2")), /* txd */
+ AON_PIN(JTAG_TDO, 0, 0, 0, 0xb0, 0,
+ AON_MUX(0x0, "JTAG"), /* tdo */
+ AON_MUX(0x1, "AGPIO"), /* agpio15 */
+ AON_MUX(0x2, "nonAON")), /* pin26 */
+ AON_PIN(I2C0_SCL, 0, 0, 0, 0xb0, 9,
+ AON_MUX(0x0, "I2C0"), /* scl */
+ AON_MUX(0x1, "AGPIO"), /* agpio16 */
+ AON_MUX(0x2, "nonAON")), /* pin28 */
+ AON_PIN(I2C0_SDA, 0, 0, 0, 0xb0, 18,
+ AON_MUX(0x0, "I2C0"), /* sda */
+ AON_MUX(0x1, "AGPIO"), /* agpio17 */
+ AON_MUX(0x2, "nonAON")), /* pin29 */
+ AON_PIN(I2C1_SCL, TOP_REG8, 4, 1, 0xb4, 0,
+ AON_MUX(0x0, "I2C1"), /* scl */
+ AON_MUX(0x1, "AGPIO"), /* agpio18 */
+ AON_MUX(0x2, "nonAON"), /* pin30 */
+ TOP_MUX(0x0, "LCD")), /* port0 lcd_te */
+ AON_PIN(I2C1_SDA, TOP_REG8, 5, 1, 0xb4, 9,
+ AON_MUX(0x0, "I2C1"), /* sda */
+ AON_MUX(0x1, "AGPIO"), /* agpio19 */
+ AON_MUX(0x2, "nonAON"), /* pin31 */
+ TOP_MUX(0x0, "LCD")), /* port1 lcd_te */
+ ZX_RESERVED(AON_RESERVED2),
+ ZX_RESERVED(AON_RESERVED3),
+ ZX_RESERVED(AON_RESERVED4),
+
+ /* aon_pmm_reg_2 */
+ AON_PIN(SPI1_CLK, TOP_REG2, 6, 3, 0x40, 9,
+ AON_MUX(0x0, "EXT_INT"), /* int0 */
+ AON_MUX(0x1, "PCU"), /* test12 */
+ AON_MUX(0x2, "nonAON"), /* pin39 */
+ TOP_MUX(0x0, "SPI1"), /* clk */
+ TOP_MUX(0x1, "PCM"), /* clk */
+ TOP_MUX(0x2, "BGPIO"), /* gpio35 */
+ TOP_MUX(0x3, "I2C4"), /* scl */
+ TOP_MUX(0x4, "I2S1"), /* mclk */
+ TOP_MUX(0x5, "ISP")), /* flash_trig */
+ AON_PIN(SPI1_CS, TOP_REG2, 9, 3, 0x40, 18,
+ AON_MUX(0x0, "EXT_INT"), /* int1 */
+ AON_MUX(0x1, "PCU"), /* test13 */
+ AON_MUX(0x2, "nonAON"), /* pin40 */
+ TOP_MUX(0x0, "SPI1"), /* cs */
+ TOP_MUX(0x1, "PCM"), /* fs */
+ TOP_MUX(0x2, "BGPIO"), /* gpio36 */
+ TOP_MUX(0x3, "I2C4"), /* sda */
+ TOP_MUX(0x4, "I2S1"), /* bclk */
+ TOP_MUX(0x5, "ISP")), /* prelight_trig */
+ AON_PIN(SPI1_TXD, TOP_REG2, 12, 3, 0x44, 0,
+ AON_MUX(0x0, "EXT_INT"), /* int2 */
+ AON_MUX(0x1, "PCU"), /* test14 */
+ AON_MUX(0x2, "nonAON"), /* pin41 */
+ TOP_MUX(0x0, "SPI1"), /* txd */
+ TOP_MUX(0x1, "PCM"), /* txd */
+ TOP_MUX(0x2, "BGPIO"), /* gpio37 */
+ TOP_MUX(0x3, "UART5"), /* rxd */
+ TOP_MUX(0x4, "I2S1"), /* ws */
+ TOP_MUX(0x5, "ISP")), /* shutter_trig */
+ AON_PIN(SPI1_RXD, TOP_REG2, 15, 3, 0x44, 9,
+ AON_MUX(0x0, "EXT_INT"), /* int3 */
+ AON_MUX(0x1, "PCU"), /* test15 */
+ AON_MUX(0x2, "nonAON"), /* pin42 */
+ TOP_MUX(0x0, "SPI1"), /* rxd */
+ TOP_MUX(0x1, "PCM"), /* rxd */
+ TOP_MUX(0x2, "BGPIO"), /* gpio38 */
+ TOP_MUX(0x3, "UART5"), /* txd */
+ TOP_MUX(0x4, "I2S1"), /* dout0 */
+ TOP_MUX(0x5, "ISP")), /* shutter_open */
+ ZX_RESERVED(AON_RESERVED5),
+ ZX_RESERVED(AON_RESERVED6),
+ AON_PIN(AUDIO_DET, TOP_REG3, 3, 2, 0x48, 18,
+ AON_MUX(0x0, "PCU"), /* test4 */
+ AON_MUX(0x1, "AGPIO"), /* agpio27 */
+ AON_MUX(0x2, "nonAON"), /* pin2 */
+ AON_MUX(0x3, "EXT_INT"), /* int16 */
+ TOP_MUX(0x0, "AUDIO"), /* detect */
+ TOP_MUX(0x1, "I2C2"), /* scl */
+ TOP_MUX(0x2, "SPI2")), /* clk */
+ AON_PIN(SPDIF_OUT, TOP_REG3, 14, 2, 0x78, 9,
+ AON_MUX(0x0, "PCU"), /* test5 */
+ AON_MUX(0x1, "AGPIO"), /* agpio22 */
+ AON_MUX(0x2, "nonAON"), /* pin4 */
+ TOP_MUX(0x0, "SPDIF"), /* out */
+ TOP_MUX(0x1, "PWM"), /* out0 */
+ TOP_MUX(0x2, "ISP")), /* fl_trig */
+ AON_PIN(HDMI_CEC, 0, 0, 0, 0x74, 9,
+ AON_MUX(0x0, "PCU"), /* test9 */
+ AON_MUX(0x1, "AGPIO"), /* agpio1 */
+ AON_MUX(0x2, "nonAON")), /* pin16 */
+ AON_PIN(HDMI_HPD, 0, 0, 0, 0x74, 18,
+ AON_MUX(0x0, "PCU"), /* test10 */
+ AON_MUX(0x1, "AGPIO"), /* agpio2 */
+ AON_MUX(0x2, "nonAON")), /* pin17 */
+ AON_PIN(GMAC_25M_OUT, 0, 0, 0, 0x78, 0,
+ AON_MUX(0x0, "PCU"), /* test11 */
+ AON_MUX(0x1, "AGPIO"), /* agpio31 */
+ AON_MUX(0x2, "nonAON")), /* pin43 */
+ AON_PIN(BOOT_SEL0, 0, 0, 0, 0xc0, 9,
+ AON_MUX(0x0, "BOOT"), /* sel0 */
+ AON_MUX(0x1, "AGPIO"), /* agpio18 */
+ AON_MUX(0x2, "nonAON")), /* pin18 */
+ AON_PIN(BOOT_SEL1, 0, 0, 0, 0xc0, 18,
+ AON_MUX(0x0, "BOOT"), /* sel1 */
+ AON_MUX(0x1, "AGPIO"), /* agpio19 */
+ AON_MUX(0x2, "nonAON")), /* pin19 */
+ AON_PIN(BOOT_SEL2, 0, 0, 0, 0xc4, 0,
+ AON_MUX(0x0, "BOOT"), /* sel2 */
+ AON_MUX(0x1, "AGPIO"), /* agpio20 */
+ AON_MUX(0x2, "nonAON")), /* pin20 */
+ AON_PIN(DEEP_SLEEP_OUT_N, 0, 0, 0, 0xc4, 9,
+ AON_MUX(0x0, "DEEPSLP"), /* deep sleep out_n */
+ AON_MUX(0x1, "AGPIO"), /* agpio21 */
+ AON_MUX(0x2, "nonAON")), /* pin21 */
+ ZX_RESERVED(AON_RESERVED7),
+
+ /* top_pmm_reg_0 */
+ TOP_PIN(GMII_GTX_CLK, TOP_REG0, 0, 2, 0x10, 0,
+ TOP_MUX(0x0, "GMII"), /* gtx_clk */
+ TOP_MUX(0x1, "DVI0"), /* clk */
+ TOP_MUX(0x2, "BGPIO")), /* gpio0 */
+ TOP_PIN(GMII_TX_CLK, TOP_REG0, 2, 2, 0x10, 9,
+ TOP_MUX(0x0, "GMII"), /* tx_clk */
+ TOP_MUX(0x1, "DVI0"), /* vs */
+ TOP_MUX(0x2, "BGPIO")), /* gpio1 */
+ TOP_PIN(GMII_TXD0, TOP_REG0, 4, 2, 0x10, 18,
+ TOP_MUX(0x0, "GMII"), /* txd0 */
+ TOP_MUX(0x1, "DVI0"), /* hs */
+ TOP_MUX(0x2, "BGPIO")), /* gpio2 */
+ TOP_PIN(GMII_TXD1, TOP_REG0, 6, 2, 0x14, 0,
+ TOP_MUX(0x0, "GMII"), /* txd1 */
+ TOP_MUX(0x1, "DVI0"), /* d0 */
+ TOP_MUX(0x2, "BGPIO")), /* gpio3 */
+ TOP_PIN(GMII_TXD2, TOP_REG0, 8, 2, 0x14, 9,
+ TOP_MUX(0x0, "GMII"), /* txd2 */
+ TOP_MUX(0x1, "DVI0"), /* d1 */
+ TOP_MUX(0x2, "BGPIO")), /* gpio4 */
+ TOP_PIN(GMII_TXD3, TOP_REG0, 10, 2, 0x14, 18,
+ TOP_MUX(0x0, "GMII"), /* txd3 */
+ TOP_MUX(0x1, "DVI0"), /* d2 */
+ TOP_MUX(0x2, "BGPIO")), /* gpio5 */
+ TOP_PIN(GMII_TXD4, TOP_REG0, 12, 2, 0x18, 0,
+ TOP_MUX(0x0, "GMII"), /* txd4 */
+ TOP_MUX(0x1, "DVI0"), /* d3 */
+ TOP_MUX(0x2, "BGPIO")), /* gpio6 */
+ TOP_PIN(GMII_TXD5, TOP_REG0, 14, 2, 0x18, 9,
+ TOP_MUX(0x0, "GMII"), /* txd5 */
+ TOP_MUX(0x1, "DVI0"), /* d4 */
+ TOP_MUX(0x2, "BGPIO")), /* gpio7 */
+ TOP_PIN(GMII_TXD6, TOP_REG0, 16, 2, 0x18, 18,
+ TOP_MUX(0x0, "GMII"), /* txd6 */
+ TOP_MUX(0x1, "DVI0"), /* d5 */
+ TOP_MUX(0x2, "BGPIO")), /* gpio8 */
+ TOP_PIN(GMII_TXD7, TOP_REG0, 18, 2, 0x1c, 0,
+ TOP_MUX(0x0, "GMII"), /* txd7 */
+ TOP_MUX(0x1, "DVI0"), /* d6 */
+ TOP_MUX(0x2, "BGPIO")), /* gpio9 */
+ TOP_PIN(GMII_TX_ER, TOP_REG0, 20, 2, 0x1c, 9,
+ TOP_MUX(0x0, "GMII"), /* tx_er */
+ TOP_MUX(0x1, "DVI0"), /* d7 */
+ TOP_MUX(0x2, "BGPIO")), /* gpio10 */
+ TOP_PIN(GMII_TX_EN, TOP_REG0, 22, 2, 0x1c, 18,
+ TOP_MUX(0x0, "GMII"), /* tx_en */
+ TOP_MUX(0x1, "DVI0"), /* d8 */
+ TOP_MUX(0x3, "BGPIO")), /* gpio11 */
+ TOP_PIN(GMII_RX_CLK, TOP_REG0, 24, 2, 0x20, 0,
+ TOP_MUX(0x0, "GMII"), /* rx_clk */
+ TOP_MUX(0x1, "DVI0"), /* d9 */
+ TOP_MUX(0x3, "BGPIO")), /* gpio12 */
+ TOP_PIN(GMII_RXD0, TOP_REG0, 26, 2, 0x20, 9,
+ TOP_MUX(0x0, "GMII"), /* rxd0 */
+ TOP_MUX(0x1, "DVI0"), /* d10 */
+ TOP_MUX(0x3, "BGPIO")), /* gpio13 */
+ TOP_PIN(GMII_RXD1, TOP_REG0, 28, 2, 0x20, 18,
+ TOP_MUX(0x0, "GMII"), /* rxd1 */
+ TOP_MUX(0x1, "DVI0"), /* d11 */
+ TOP_MUX(0x2, "BGPIO")), /* gpio14 */
+ TOP_PIN(GMII_RXD2, TOP_REG0, 30, 2, 0x24, 0,
+ TOP_MUX(0x0, "GMII"), /* rxd2 */
+ TOP_MUX(0x1, "DVI1"), /* clk */
+ TOP_MUX(0x2, "BGPIO")), /* gpio15 */
+
+ /* top_pmm_reg_1 */
+ TOP_PIN(GMII_RXD3, TOP_REG1, 0, 2, 0x24, 9,
+ TOP_MUX(0x0, "GMII"), /* rxd3 */
+ TOP_MUX(0x1, "DVI1"), /* hs */
+ TOP_MUX(0x2, "BGPIO")), /* gpio16 */
+ TOP_PIN(GMII_RXD4, TOP_REG1, 2, 2, 0x24, 18,
+ TOP_MUX(0x0, "GMII"), /* rxd4 */
+ TOP_MUX(0x1, "DVI1"), /* vs */
+ TOP_MUX(0x2, "BGPIO")), /* gpio17 */
+ TOP_PIN(GMII_RXD5, TOP_REG1, 4, 2, 0x28, 0,
+ TOP_MUX(0x0, "GMII"), /* rxd5 */
+ TOP_MUX(0x1, "DVI1"), /* d0 */
+ TOP_MUX(0x2, "BGPIO"), /* gpio18 */
+ TOP_MUX(0x3, "TSI0")), /* dat0 */
+ TOP_PIN(GMII_RXD6, TOP_REG1, 6, 2, 0x28, 9,
+ TOP_MUX(0x0, "GMII"), /* rxd6 */
+ TOP_MUX(0x1, "DVI1"), /* d1 */
+ TOP_MUX(0x2, "BGPIO"), /* gpio19 */
+ TOP_MUX(0x3, "TSI0")), /* clk */
+ TOP_PIN(GMII_RXD7, TOP_REG1, 8, 2, 0x28, 18,
+ TOP_MUX(0x0, "GMII"), /* rxd7 */
+ TOP_MUX(0x1, "DVI1"), /* d2 */
+ TOP_MUX(0x2, "BGPIO"), /* gpio20 */
+ TOP_MUX(0x3, "TSI0")), /* sync */
+ TOP_PIN(GMII_RX_ER, TOP_REG1, 10, 2, 0x2c, 0,
+ TOP_MUX(0x0, "GMII"), /* rx_er */
+ TOP_MUX(0x1, "DVI1"), /* d3 */
+ TOP_MUX(0x2, "BGPIO"), /* gpio21 */
+ TOP_MUX(0x3, "TSI0")), /* valid */
+ TOP_PIN(GMII_RX_DV, TOP_REG1, 12, 2, 0x2c, 9,
+ TOP_MUX(0x0, "GMII"), /* rx_dv */
+ TOP_MUX(0x1, "DVI1"), /* d4 */
+ TOP_MUX(0x2, "BGPIO"), /* gpio22 */
+ TOP_MUX(0x3, "TSI1")), /* dat0 */
+ TOP_PIN(GMII_COL, TOP_REG1, 14, 2, 0x2c, 18,
+ TOP_MUX(0x0, "GMII"), /* col */
+ TOP_MUX(0x1, "DVI1"), /* d5 */
+ TOP_MUX(0x2, "BGPIO"), /* gpio23 */
+ TOP_MUX(0x3, "TSI1")), /* clk */
+ TOP_PIN(GMII_CRS, TOP_REG1, 16, 2, 0x30, 0,
+ TOP_MUX(0x0, "GMII"), /* crs */
+ TOP_MUX(0x1, "DVI1"), /* d6 */
+ TOP_MUX(0x2, "BGPIO"), /* gpio24 */
+ TOP_MUX(0x3, "TSI1")), /* sync */
+ TOP_PIN(GMII_MDC, TOP_REG1, 18, 2, 0x30, 9,
+ TOP_MUX(0x0, "GMII"), /* mdc */
+ TOP_MUX(0x1, "DVI1"), /* d7 */
+ TOP_MUX(0x2, "BGPIO"), /* gpio25 */
+ TOP_MUX(0x3, "TSI1")), /* valid */
+ TOP_PIN(GMII_MDIO, TOP_REG1, 20, 1, 0x30, 18,
+ TOP_MUX(0x0, "GMII"), /* mdio */
+ TOP_MUX(0x2, "BGPIO")), /* gpio26 */
+ TOP_PIN(SDIO1_CLK, TOP_REG1, 21, 2, 0x34, 18,
+ TOP_MUX(0x0, "SDIO1"), /* clk */
+ TOP_MUX(0x1, "USIM0"), /* clk */
+ TOP_MUX(0x2, "BGPIO"), /* gpio27 */
+ TOP_MUX(0x3, "SPINOR")), /* clk */
+ TOP_PIN(SDIO1_CMD, TOP_REG1, 23, 2, 0x38, 0,
+ TOP_MUX(0x0, "SDIO1"), /* cmd */
+ TOP_MUX(0x1, "USIM0"), /* cd */
+ TOP_MUX(0x2, "BGPIO"), /* gpio28 */
+ TOP_MUX(0x3, "SPINOR")), /* cs */
+ TOP_PIN(SDIO1_DATA0, TOP_REG1, 25, 2, 0x38, 9,
+ TOP_MUX(0x0, "SDIO1"), /* dat0 */
+ TOP_MUX(0x1, "USIM0"), /* rst */
+ TOP_MUX(0x2, "BGPIO"), /* gpio29 */
+ TOP_MUX(0x3, "SPINOR")), /* dq0 */
+ TOP_PIN(SDIO1_DATA1, TOP_REG1, 27, 2, 0x38, 18,
+ TOP_MUX(0x0, "SDIO1"), /* dat1 */
+ TOP_MUX(0x1, "USIM0"), /* data */
+ TOP_MUX(0x2, "BGPIO"), /* gpio30 */
+ TOP_MUX(0x3, "SPINOR")), /* dq1 */
+ TOP_PIN(SDIO1_DATA2, TOP_REG1, 29, 2, 0x3c, 0,
+ TOP_MUX(0x0, "SDIO1"), /* dat2 */
+ TOP_MUX(0x1, "BGPIO"), /* gpio31 */
+ TOP_MUX(0x2, "SPINOR")), /* dq2 */
+
+ /* top_pmm_reg_2 */
+ TOP_PIN(SDIO1_DATA3, TOP_REG2, 0, 2, 0x3c, 9,
+ TOP_MUX(0x0, "SDIO1"), /* dat3 */
+ TOP_MUX(0x1, "BGPIO"), /* gpio32 */
+ TOP_MUX(0x2, "SPINOR")), /* dq3 */
+ TOP_PIN(SDIO1_CD, TOP_REG2, 2, 2, 0x3c, 18,
+ TOP_MUX(0x0, "SDIO1"), /* cd */
+ TOP_MUX(0x1, "BGPIO"), /* gpio33 */
+ TOP_MUX(0x2, "ISP")), /* fl_trig */
+ TOP_PIN(SDIO1_WP, TOP_REG2, 4, 2, 0x40, 0,
+ TOP_MUX(0x0, "SDIO1"), /* wp */
+ TOP_MUX(0x1, "BGPIO"), /* gpio34 */
+ TOP_MUX(0x2, "ISP")), /* ref_clk */
+ TOP_PIN(USIM1_CD, TOP_REG2, 22, 3, 0x44, 18,
+ TOP_MUX(0x0, "USIM1"), /* cd */
+ TOP_MUX(0x1, "UART4"), /* rxd */
+ TOP_MUX(0x2, "BGPIO"), /* gpio39 */
+ TOP_MUX(0x3, "SPI3"), /* clk */
+ TOP_MUX(0x4, "I2S0"), /* bclk */
+ TOP_MUX(0x5, "B_DVI0")), /* d8 */
+ TOP_PIN(USIM1_CLK, TOP_REG2, 25, 3, 0x4c, 18,
+ TOP_MUX(0x0, "USIM1"), /* clk */
+ TOP_MUX(0x1, "UART4"), /* txd */
+ TOP_MUX(0x2, "BGPIO"), /* gpio40 */
+ TOP_MUX(0x3, "SPI3"), /* cs */
+ TOP_MUX(0x4, "I2S0"), /* ws */
+ TOP_MUX(0x5, "B_DVI0")), /* d9 */
+ TOP_PIN(USIM1_RST, TOP_REG2, 28, 3, 0x4c, 0,
+ TOP_MUX(0x0, "USIM1"), /* rst */
+ TOP_MUX(0x1, "UART4"), /* cts */
+ TOP_MUX(0x2, "BGPIO"), /* gpio41 */
+ TOP_MUX(0x3, "SPI3"), /* txd */
+ TOP_MUX(0x4, "I2S0"), /* dout0 */
+ TOP_MUX(0x5, "B_DVI0")), /* d10 */
+
+ /* top_pmm_reg_3 */
+ TOP_PIN(USIM1_DATA, TOP_REG3, 0, 3, 0x4c, 9,
+ TOP_MUX(0x0, "USIM1"), /* dat */
+ TOP_MUX(0x1, "UART4"), /* rst */
+ TOP_MUX(0x2, "BGPIO"), /* gpio42 */
+ TOP_MUX(0x3, "SPI3"), /* rxd */
+ TOP_MUX(0x4, "I2S0"), /* din0 */
+ TOP_MUX(0x5, "B_DVI0")), /* d11 */
+ TOP_PIN(SDIO0_CLK, TOP_REG3, 6, 1, 0x58, 0,
+ TOP_MUX(0x0, "SDIO0"), /* clk */
+ TOP_MUX(0x1, "GPIO")), /* gpio43 */
+ TOP_PIN(SDIO0_CMD, TOP_REG3, 7, 1, 0x58, 9,
+ TOP_MUX(0x0, "SDIO0"), /* cmd */
+ TOP_MUX(0x1, "GPIO")), /* gpio44 */
+ TOP_PIN(SDIO0_DATA0, TOP_REG3, 8, 1, 0x58, 18,
+ TOP_MUX(0x0, "SDIO0"), /* dat0 */
+ TOP_MUX(0x1, "GPIO")), /* gpio45 */
+ TOP_PIN(SDIO0_DATA1, TOP_REG3, 9, 1, 0x5c, 0,
+ TOP_MUX(0x0, "SDIO0"), /* dat1 */
+ TOP_MUX(0x1, "GPIO")), /* gpio46 */
+ TOP_PIN(SDIO0_DATA2, TOP_REG3, 10, 1, 0x5c, 9,
+ TOP_MUX(0x0, "SDIO0"), /* dat2 */
+ TOP_MUX(0x1, "GPIO")), /* gpio47 */
+ TOP_PIN(SDIO0_DATA3, TOP_REG3, 11, 1, 0x5c, 18,
+ TOP_MUX(0x0, "SDIO0"), /* dat3 */
+ TOP_MUX(0x1, "GPIO")), /* gpio48 */
+ TOP_PIN(SDIO0_CD, TOP_REG3, 12, 1, 0x60, 0,
+ TOP_MUX(0x0, "SDIO0"), /* cd */
+ TOP_MUX(0x1, "GPIO")), /* gpio49 */
+ TOP_PIN(SDIO0_WP, TOP_REG3, 13, 1, 0x60, 9,
+ TOP_MUX(0x0, "SDIO0"), /* wp */
+ TOP_MUX(0x1, "GPIO")), /* gpio50 */
+
+ /* top_pmm_reg_4 */
+ TOP_PIN(TSI0_DATA0, TOP_REG4, 0, 2, 0x60, 18,
+ TOP_MUX(0x0, "TSI0"), /* dat0 */
+ TOP_MUX(0x1, "LCD"), /* clk */
+ TOP_MUX(0x2, "BGPIO")), /* gpio51 */
+ TOP_PIN(SPINOR_CLK, TOP_REG4, 2, 2, 0xa8, 18,
+ TOP_MUX(0x0, "SPINOR"), /* clk */
+ TOP_MUX(0x1, "TSI0"), /* dat1 */
+ TOP_MUX(0x2, "LCD"), /* dat0 */
+ TOP_MUX(0x3, "BGPIO")), /* gpio52 */
+ TOP_PIN(TSI2_DATA, TOP_REG4, 4, 2, 0x7c, 0,
+ TOP_MUX(0x0, "TSI2"), /* dat */
+ TOP_MUX(0x1, "TSI0"), /* dat2 */
+ TOP_MUX(0x2, "LCD"), /* dat1 */
+ TOP_MUX(0x3, "BGPIO")), /* gpio53 */
+ TOP_PIN(TSI2_CLK, TOP_REG4, 6, 2, 0x7c, 9,
+ TOP_MUX(0x0, "TSI2"), /* clk */
+ TOP_MUX(0x1, "TSI0"), /* dat3 */
+ TOP_MUX(0x2, "LCD"), /* dat2 */
+ TOP_MUX(0x3, "BGPIO")), /* gpio54 */
+ TOP_PIN(TSI2_SYNC, TOP_REG4, 8, 2, 0x7c, 18,
+ TOP_MUX(0x0, "TSI2"), /* sync */
+ TOP_MUX(0x1, "TSI0"), /* dat4 */
+ TOP_MUX(0x2, "LCD"), /* dat3 */
+ TOP_MUX(0x3, "BGPIO")), /* gpio55 */
+ TOP_PIN(TSI2_VALID, TOP_REG4, 10, 2, 0x80, 0,
+ TOP_MUX(0x0, "TSI2"), /* valid */
+ TOP_MUX(0x1, "TSI0"), /* dat5 */
+ TOP_MUX(0x2, "LCD"), /* dat4 */
+ TOP_MUX(0x3, "BGPIO")), /* gpio56 */
+ TOP_PIN(SPINOR_CS, TOP_REG4, 12, 2, 0x80, 9,
+ TOP_MUX(0x0, "SPINOR"), /* cs */
+ TOP_MUX(0x1, "TSI0"), /* dat6 */
+ TOP_MUX(0x2, "LCD"), /* dat5 */
+ TOP_MUX(0x3, "BGPIO")), /* gpio57 */
+ TOP_PIN(SPINOR_DQ0, TOP_REG4, 14, 2, 0x80, 18,
+ TOP_MUX(0x0, "SPINOR"), /* dq0 */
+ TOP_MUX(0x1, "TSI0"), /* dat7 */
+ TOP_MUX(0x2, "LCD"), /* dat6 */
+ TOP_MUX(0x3, "BGPIO")), /* gpio58 */
+ TOP_PIN(SPINOR_DQ1, TOP_REG4, 16, 2, 0x84, 0,
+ TOP_MUX(0x0, "SPINOR"), /* dq1 */
+ TOP_MUX(0x1, "TSI0"), /* clk */
+ TOP_MUX(0x2, "LCD"), /* dat7 */
+ TOP_MUX(0x3, "BGPIO")), /* gpio59 */
+ TOP_PIN(SPINOR_DQ2, TOP_REG4, 18, 2, 0x84, 9,
+ TOP_MUX(0x0, "SPINOR"), /* dq2 */
+ TOP_MUX(0x1, "TSI0"), /* sync */
+ TOP_MUX(0x2, "LCD"), /* dat8 */
+ TOP_MUX(0x3, "BGPIO")), /* gpio60 */
+ TOP_PIN(SPINOR_DQ3, TOP_REG4, 20, 2, 0x84, 18,
+ TOP_MUX(0x0, "SPINOR"), /* dq3 */
+ TOP_MUX(0x1, "TSI0"), /* valid */
+ TOP_MUX(0x2, "LCD"), /* dat9 */
+ TOP_MUX(0x3, "BGPIO")), /* gpio61 */
+ TOP_PIN(VGA_HS, TOP_REG4, 22, 3, 0x88, 0,
+ TOP_MUX(0x0, "VGA"), /* hs */
+ TOP_MUX(0x1, "TSI1"), /* dat0 */
+ TOP_MUX(0x2, "LCD"), /* dat10 */
+ TOP_MUX(0x3, "BGPIO"), /* gpio62 */
+ TOP_MUX(0x4, "I2S1"), /* din1 */
+ TOP_MUX(0x5, "B_DVI0")), /* clk */
+ TOP_PIN(VGA_VS, TOP_REG4, 25, 3, 0x88, 9,
+ TOP_MUX(0x0, "VGA"), /* vs0 */
+ TOP_MUX(0x1, "TSI1"), /* dat1 */
+ TOP_MUX(0x2, "LCD"), /* dat11 */
+ TOP_MUX(0x3, "BGPIO"), /* gpio63 */
+ TOP_MUX(0x4, "I2S1"), /* din2 */
+ TOP_MUX(0x5, "B_DVI0")), /* vs */
+ TOP_PIN(TSI3_DATA, TOP_REG4, 28, 3, 0x88, 18,
+ TOP_MUX(0x0, "TSI3"), /* dat */
+ TOP_MUX(0x1, "TSI1"), /* dat2 */
+ TOP_MUX(0x2, "LCD"), /* dat12 */
+ TOP_MUX(0x3, "BGPIO"), /* gpio64 */
+ TOP_MUX(0x4, "I2S1"), /* din3 */
+ TOP_MUX(0x5, "B_DVI0")), /* hs */
+
+ /* top_pmm_reg_5 */
+ TOP_PIN(TSI3_CLK, TOP_REG5, 0, 3, 0x8c, 0,
+ TOP_MUX(0x0, "TSI3"), /* clk */
+ TOP_MUX(0x1, "TSI1"), /* dat3 */
+ TOP_MUX(0x2, "LCD"), /* dat13 */
+ TOP_MUX(0x3, "BGPIO"), /* gpio65 */
+ TOP_MUX(0x4, "I2S1"), /* dout1 */
+ TOP_MUX(0x5, "B_DVI0")), /* d0 */
+ TOP_PIN(TSI3_SYNC, TOP_REG5, 3, 3, 0x8c, 9,
+ TOP_MUX(0x0, "TSI3"), /* sync */
+ TOP_MUX(0x1, "TSI1"), /* dat4 */
+ TOP_MUX(0x2, "LCD"), /* dat14 */
+ TOP_MUX(0x3, "BGPIO"), /* gpio66 */
+ TOP_MUX(0x4, "I2S1"), /* dout2 */
+ TOP_MUX(0x5, "B_DVI0")), /* d1 */
+ TOP_PIN(TSI3_VALID, TOP_REG5, 6, 3, 0x8c, 18,
+ TOP_MUX(0x0, "TSI3"), /* valid */
+ TOP_MUX(0x1, "TSI1"), /* dat5 */
+ TOP_MUX(0x2, "LCD"), /* dat15 */
+ TOP_MUX(0x3, "BGPIO"), /* gpio67 */
+ TOP_MUX(0x4, "I2S1"), /* dout3 */
+ TOP_MUX(0x5, "B_DVI0")), /* d2 */
+ TOP_PIN(I2S1_WS, TOP_REG5, 9, 3, 0x90, 0,
+ TOP_MUX(0x0, "I2S1"), /* ws */
+ TOP_MUX(0x1, "TSI1"), /* dat6 */
+ TOP_MUX(0x2, "LCD"), /* dat16 */
+ TOP_MUX(0x3, "BGPIO"), /* gpio68 */
+ TOP_MUX(0x4, "VGA"), /* scl */
+ TOP_MUX(0x5, "B_DVI0")), /* d3 */
+ TOP_PIN(I2S1_BCLK, TOP_REG5, 12, 3, 0x90, 9,
+ TOP_MUX(0x0, "I2S1"), /* bclk */
+ TOP_MUX(0x1, "TSI1"), /* dat7 */
+ TOP_MUX(0x2, "LCD"), /* dat17 */
+ TOP_MUX(0x3, "BGPIO"), /* gpio69 */
+ TOP_MUX(0x4, "VGA"), /* sda */
+ TOP_MUX(0x5, "B_DVI0")), /* d4 */
+ TOP_PIN(I2S1_MCLK, TOP_REG5, 15, 2, 0x90, 18,
+ TOP_MUX(0x0, "I2S1"), /* mclk */
+ TOP_MUX(0x1, "TSI1"), /* clk */
+ TOP_MUX(0x2, "LCD"), /* dat18 */
+ TOP_MUX(0x3, "BGPIO")), /* gpio70 */
+ TOP_PIN(I2S1_DIN0, TOP_REG5, 17, 2, 0x94, 0,
+ TOP_MUX(0x0, "I2S1"), /* din0 */
+ TOP_MUX(0x1, "TSI1"), /* sync */
+ TOP_MUX(0x2, "LCD"), /* dat19 */
+ TOP_MUX(0x3, "BGPIO")), /* gpio71 */
+ TOP_PIN(I2S1_DOUT0, TOP_REG5, 19, 2, 0x94, 9,
+ TOP_MUX(0x0, "I2S1"), /* dout0 */
+ TOP_MUX(0x1, "TSI1"), /* valid */
+ TOP_MUX(0x2, "LCD"), /* dat20 */
+ TOP_MUX(0x3, "BGPIO")), /* gpio72 */
+ TOP_PIN(SPI3_CLK, TOP_REG5, 21, 3, 0x94, 18,
+ TOP_MUX(0x0, "SPI3"), /* clk */
+ TOP_MUX(0x1, "TSO1"), /* clk */
+ TOP_MUX(0x2, "LCD"), /* dat21 */
+ TOP_MUX(0x3, "BGPIO"), /* gpio73 */
+ TOP_MUX(0x4, "UART5"), /* rxd */
+ TOP_MUX(0x5, "PCM"), /* fs */
+ TOP_MUX(0x6, "I2S0"), /* din1 */
+ TOP_MUX(0x7, "B_DVI0")), /* d5 */
+ TOP_PIN(SPI3_CS, TOP_REG5, 24, 3, 0x98, 0,
+ TOP_MUX(0x0, "SPI3"), /* cs */
+ TOP_MUX(0x1, "TSO1"), /* dat0 */
+ TOP_MUX(0x2, "LCD"), /* dat22 */
+ TOP_MUX(0x3, "BGPIO"), /* gpio74 */
+ TOP_MUX(0x4, "UART5"), /* txd */
+ TOP_MUX(0x5, "PCM"), /* clk */
+ TOP_MUX(0x6, "I2S0"), /* din2 */
+ TOP_MUX(0x7, "B_DVI0")), /* d6 */
+ TOP_PIN(SPI3_TXD, TOP_REG5, 27, 3, 0x98, 9,
+ TOP_MUX(0x0, "SPI3"), /* txd */
+ TOP_MUX(0x1, "TSO1"), /* dat1 */
+ TOP_MUX(0x2, "LCD"), /* dat23 */
+ TOP_MUX(0x3, "BGPIO"), /* gpio75 */
+ TOP_MUX(0x4, "UART5"), /* cts */
+ TOP_MUX(0x5, "PCM"), /* txd */
+ TOP_MUX(0x6, "I2S0"), /* din3 */
+ TOP_MUX(0x7, "B_DVI0")), /* d7 */
+ TOP_PIN(NAND_LDO_MS18_SEL, TOP_REG5, 30, 1, 0xe4, 0,
+ TOP_MUX(0x0, "NAND"), /* ldo_ms18_sel */
+ TOP_MUX(0x1, "BGPIO")), /* gpio99 */
+
+ /* top_pmm_reg_6 */
+ TOP_PIN(SPI3_RXD, TOP_REG6, 0, 3, 0x98, 18,
+ TOP_MUX(0x0, "SPI3"), /* rxd */
+ TOP_MUX(0x1, "TSO1"), /* dat2 */
+ TOP_MUX(0x2, "LCD"), /* stvu_vsync */
+ TOP_MUX(0x3, "BGPIO"), /* gpio76 */
+ TOP_MUX(0x4, "UART5"), /* rts */
+ TOP_MUX(0x5, "PCM"), /* rxd */
+ TOP_MUX(0x6, "I2S0"), /* dout1 */
+ TOP_MUX(0x7, "B_DVI1")), /* clk */
+ TOP_PIN(I2S0_MCLK, TOP_REG6, 3, 3, 0x9c, 0,
+ TOP_MUX(0x0, "I2S0"), /* mclk */
+ TOP_MUX(0x1, "TSO1"), /* dat3 */
+ TOP_MUX(0x2, "LCD"), /* stvd */
+ TOP_MUX(0x3, "BGPIO"), /* gpio77 */
+ TOP_MUX(0x4, "USIM0"), /* cd */
+ TOP_MUX(0x5, "B_DVI1")), /* vs */
+ TOP_PIN(I2S0_BCLK, TOP_REG6, 6, 3, 0x9c, 9,
+ TOP_MUX(0x0, "I2S0"), /* bclk */
+ TOP_MUX(0x1, "TSO1"), /* dat4 */
+ TOP_MUX(0x2, "LCD"), /* sthl_hsync */
+ TOP_MUX(0x3, "BGPIO"), /* gpio78 */
+ TOP_MUX(0x4, "USIM0"), /* clk */
+ TOP_MUX(0x5, "B_DVI1")), /* hs */
+ TOP_PIN(I2S0_WS, TOP_REG6, 9, 3, 0x9c, 18,
+ TOP_MUX(0x0, "I2S0"), /* ws */
+ TOP_MUX(0x1, "TSO1"), /* dat5 */
+ TOP_MUX(0x2, "LCD"), /* sthr */
+ TOP_MUX(0x3, "BGPIO"), /* gpio79 */
+ TOP_MUX(0x4, "USIM0"), /* rst */
+ TOP_MUX(0x5, "B_DVI1")), /* d0 */
+ TOP_PIN(I2S0_DIN0, TOP_REG6, 12, 3, 0xa0, 0,
+ TOP_MUX(0x0, "I2S0"), /* din0 */
+ TOP_MUX(0x1, "TSO1"), /* dat6 */
+ TOP_MUX(0x2, "LCD"), /* oev_dataen */
+ TOP_MUX(0x3, "BGPIO"), /* gpio80 */
+ TOP_MUX(0x4, "USIM0"), /* dat */
+ TOP_MUX(0x5, "B_DVI1")), /* d1 */
+ TOP_PIN(I2S0_DOUT0, TOP_REG6, 15, 2, 0xa0, 9,
+ TOP_MUX(0x0, "I2S0"), /* dout0 */
+ TOP_MUX(0x1, "TSO1"), /* dat7 */
+ TOP_MUX(0x2, "LCD"), /* ckv */
+ TOP_MUX(0x3, "BGPIO")), /* gpio81 */
+ TOP_PIN(I2C5_SCL, TOP_REG6, 17, 3, 0xa0, 18,
+ TOP_MUX(0x0, "I2C5"), /* scl */
+ TOP_MUX(0x1, "TSO1"), /* sync */
+ TOP_MUX(0x2, "LCD"), /* ld */
+ TOP_MUX(0x3, "BGPIO"), /* gpio82 */
+ TOP_MUX(0x4, "PWM"), /* out2 */
+ TOP_MUX(0x5, "I2S0"), /* dout2 */
+ TOP_MUX(0x6, "B_DVI1")), /* d2 */
+ TOP_PIN(I2C5_SDA, TOP_REG6, 20, 3, 0xa4, 0,
+ TOP_MUX(0x0, "I2C5"), /* sda */
+ TOP_MUX(0x1, "TSO1"), /* vld */
+ TOP_MUX(0x2, "LCD"), /* pol */
+ TOP_MUX(0x3, "BGPIO"), /* gpio83 */
+ TOP_MUX(0x4, "PWM"), /* out3 */
+ TOP_MUX(0x5, "I2S0"), /* dout3 */
+ TOP_MUX(0x6, "B_DVI1")), /* d3 */
+ TOP_PIN(SPI2_CLK, TOP_REG6, 23, 3, 0xa4, 9,
+ TOP_MUX(0x0, "SPI2"), /* clk */
+ TOP_MUX(0x1, "TSO0"), /* clk */
+ TOP_MUX(0x2, "LCD"), /* degsl */
+ TOP_MUX(0x3, "BGPIO"), /* gpio84 */
+ TOP_MUX(0x4, "I2C4"), /* scl */
+ TOP_MUX(0x5, "B_DVI1")), /* d4 */
+ TOP_PIN(SPI2_CS, TOP_REG6, 26, 3, 0xa4, 18,
+ TOP_MUX(0x0, "SPI2"), /* cs */
+ TOP_MUX(0x1, "TSO0"), /* data */
+ TOP_MUX(0x2, "LCD"), /* rev */
+ TOP_MUX(0x3, "BGPIO"), /* gpio85 */
+ TOP_MUX(0x4, "I2C4"), /* sda */
+ TOP_MUX(0x5, "B_DVI1")), /* d5 */
+ TOP_PIN(SPI2_TXD, TOP_REG6, 29, 3, 0xa8, 0,
+ TOP_MUX(0x0, "SPI2"), /* txd */
+ TOP_MUX(0x1, "TSO0"), /* sync */
+ TOP_MUX(0x2, "LCD"), /* u_d */
+ TOP_MUX(0x3, "BGPIO"), /* gpio86 */
+ TOP_MUX(0x4, "I2C4"), /* scl */
+ TOP_MUX(0x5, "B_DVI1")), /* d6 */
+
+ /* top_pmm_reg_7 */
+ TOP_PIN(SPI2_RXD, TOP_REG7, 0, 3, 0xa8, 9,
+ TOP_MUX(0x0, "SPI2"), /* rxd */
+ TOP_MUX(0x1, "TSO0"), /* vld */
+ TOP_MUX(0x2, "LCD"), /* r_l */
+ TOP_MUX(0x3, "BGPIO"), /* gpio87 */
+ TOP_MUX(0x4, "I2C3"), /* sda */
+ TOP_MUX(0x5, "B_DVI1")), /* d7 */
+ TOP_PIN(NAND_WP_N, TOP_REG7, 7, 3, 0x54, 9,
+ TOP_MUX(0x0, "NAND"), /* wp */
+ TOP_MUX(0x1, "PWM"), /* out2 */
+ TOP_MUX(0x2, "SPI2"), /* clk */
+ TOP_MUX(0x3, "BGPIO"), /* gpio88 */
+ TOP_MUX(0x4, "TSI0"), /* dat0 */
+ TOP_MUX(0x5, "I2S1")), /* din1 */
+ TOP_PIN(NAND_PAGE_SIZE0, TOP_REG7, 10, 3, 0xb8, 0,
+ TOP_MUX(0x0, "NAND"), /* boot_pagesize0 */
+ TOP_MUX(0x1, "PWM"), /* out3 */
+ TOP_MUX(0x2, "SPI2"), /* cs */
+ TOP_MUX(0x3, "BGPIO"), /* gpio89 */
+ TOP_MUX(0x4, "TSI0"), /* clk */
+ TOP_MUX(0x5, "I2S1")), /* din2 */
+ TOP_PIN(NAND_PAGE_SIZE1, TOP_REG7, 13, 3, 0xb8, 9,
+ TOP_MUX(0x0, "NAND"), /* boot_pagesize1 */
+ TOP_MUX(0x1, "I2C4"), /* scl */
+ TOP_MUX(0x2, "SPI2"), /* txd */
+ TOP_MUX(0x3, "BGPIO"), /* gpio90 */
+ TOP_MUX(0x4, "TSI0"), /* sync */
+ TOP_MUX(0x5, "I2S1")), /* din3 */
+ TOP_PIN(NAND_ADDR_CYCLE, TOP_REG7, 16, 3, 0xb8, 18,
+ TOP_MUX(0x0, "NAND"), /* boot_addr_cycles */
+ TOP_MUX(0x1, "I2C4"), /* sda */
+ TOP_MUX(0x2, "SPI2"), /* rxd */
+ TOP_MUX(0x3, "BGPIO"), /* gpio91 */
+ TOP_MUX(0x4, "TSI0"), /* valid */
+ TOP_MUX(0x5, "I2S1")), /* dout1 */
+ TOP_PIN(NAND_RB0, TOP_REG7, 19, 3, 0xbc, 0,
+ TOP_MUX(0x0, "NAND"), /* rdy_busy0 */
+ TOP_MUX(0x1, "I2C2"), /* scl */
+ TOP_MUX(0x2, "USIM0"), /* cd */
+ TOP_MUX(0x3, "BGPIO"), /* gpio92 */
+ TOP_MUX(0x4, "TSI1")), /* data0 */
+ TOP_PIN(NAND_RB1, TOP_REG7, 22, 3, 0xbc, 9,
+ TOP_MUX(0x0, "NAND"), /* rdy_busy1 */
+ TOP_MUX(0x1, "I2C2"), /* sda */
+ TOP_MUX(0x2, "USIM0"), /* clk */
+ TOP_MUX(0x3, "BGPIO"), /* gpio93 */
+ TOP_MUX(0x4, "TSI1")), /* clk */
+ TOP_PIN(NAND_RB2, TOP_REG7, 25, 3, 0xbc, 18,
+ TOP_MUX(0x0, "NAND"), /* rdy_busy2 */
+ TOP_MUX(0x1, "UART5"), /* rxd */
+ TOP_MUX(0x2, "USIM0"), /* rst */
+ TOP_MUX(0x3, "BGPIO"), /* gpio94 */
+ TOP_MUX(0x4, "TSI1"), /* sync */
+ TOP_MUX(0x4, "I2S1")), /* dout2 */
+ TOP_PIN(NAND_RB3, TOP_REG7, 28, 3, 0x54, 18,
+ TOP_MUX(0x0, "NAND"), /* rdy_busy3 */
+ TOP_MUX(0x1, "UART5"), /* txd */
+ TOP_MUX(0x2, "USIM0"), /* dat */
+ TOP_MUX(0x3, "BGPIO"), /* gpio95 */
+ TOP_MUX(0x4, "TSI1"), /* valid */
+ TOP_MUX(0x4, "I2S1")), /* dout3 */
+
+ /* top_pmm_reg_8 */
+ TOP_PIN(GMAC_125M_IN, TOP_REG8, 0, 2, 0x34, 0,
+ TOP_MUX(0x0, "GMII"), /* 125m_in */
+ TOP_MUX(0x1, "USB2"), /* 0_drvvbus */
+ TOP_MUX(0x2, "ISP"), /* ref_clk */
+ TOP_MUX(0x3, "BGPIO")), /* gpio96 */
+ TOP_PIN(GMAC_50M_OUT, TOP_REG8, 2, 2, 0x34, 9,
+ TOP_MUX(0x0, "GMII"), /* 50m_out */
+ TOP_MUX(0x1, "USB2"), /* 1_drvvbus */
+ TOP_MUX(0x2, "BGPIO"), /* gpio97 */
+ TOP_MUX(0x3, "USB2")), /* 0_drvvbus */
+ TOP_PIN(SPINOR_SSCLK_LOOPBACK, TOP_REG8, 6, 1, 0xc8, 9,
+ TOP_MUX(0x0, "SPINOR")), /* sdio1_clk_i */
+ TOP_PIN(SPINOR_SDIO1CLK_LOOPBACK, TOP_REG8, 7, 1, 0xc8, 18,
+ TOP_MUX(0x0, "SPINOR")), /* ssclk_i */
+};
+
+static struct zx_pinctrl_soc_info zx296718_pinctrl_info = {
+ .pins = zx296718_pins,
+ .npins = ARRAY_SIZE(zx296718_pins),
+};
+
+static int zx296718_pinctrl_probe(struct platform_device *pdev)
+{
+ return zx_pinctrl_init(pdev, &zx296718_pinctrl_info);
+}
+
+static const struct of_device_id zx296718_pinctrl_match[] = {
+ { .compatible = "zte,zx296718-pmm", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, zx296718_pinctrl_match);
+
+static struct platform_driver zx296718_pinctrl_driver = {
+ .probe = zx296718_pinctrl_probe,
+ .driver = {
+ .name = "zx296718-pinctrl",
+ .of_match_table = zx296718_pinctrl_match,
+ },
+};
+builtin_platform_driver(zx296718_pinctrl_driver);
+
+MODULE_DESCRIPTION("ZTE ZX296718 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index a3ccc3c795a5..b04860703740 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -195,16 +195,6 @@ config FUJITSU_LAPTOP
If you have a Fujitsu laptop, say Y or M here.
-config FUJITSU_LAPTOP_DEBUG
- bool "Verbose debug mode for Fujitsu Laptop Extras"
- depends on FUJITSU_LAPTOP
- default n
- ---help---
- Enables extra debug output from the fujitsu extras driver, at the
- expense of a slight increase in driver size.
-
- If you are not sure, say N here.
-
config FUJITSU_TABLET
tristate "Fujitsu Tablet Extras"
depends on ACPI
@@ -656,6 +646,18 @@ config ACPI_WMI
It is safe to enable this driver even if your DSDT doesn't define
any ACPI-WMI devices.
+config WMI_BMOF
+ tristate "WMI embedded Binary MOF driver"
+ depends on ACPI_WMI
+ default ACPI_WMI
+ ---help---
+ Say Y here if you want to be able to read a firmware-embedded
+ WMI Binary MOF data. Using this requires userspace tools and may be
+ rather tedious.
+
+ To compile this driver as a module, choose M here: the module will
+ be called wmi-bmof.
+
config MSI_WMI
tristate "MSI WMI extras"
depends on ACPI_WMI
@@ -669,6 +671,13 @@ config MSI_WMI
To compile this driver as a module, choose M here: the module will
be called msi-wmi.
+config PEAQ_WMI
+ tristate "PEAQ 2-in-1 WMI hotkey driver"
+ depends on ACPI_WMI
+ depends on INPUT
+ help
+ Say Y here if you want to support WMI-based hotkeys on PEAQ 2-in-1s.
+
config TOPSTAR_LAPTOP
tristate "Topstar Laptop Extras"
depends on ACPI
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index ab22ce77fb66..91cec1751461 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -35,8 +35,10 @@ obj-$(CONFIG_PANASONIC_LAPTOP) += panasonic-laptop.o
obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
obj-$(CONFIG_ACPI_WMI) += wmi.o
obj-$(CONFIG_MSI_WMI) += msi-wmi.o
+obj-$(CONFIG_PEAQ_WMI) += peaq-wmi.o
obj-$(CONFIG_SURFACE3_WMI) += surface3-wmi.o
obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o
+obj-$(CONFIG_WMI_BMOF) += wmi-bmof.o
# toshiba_acpi must link after wmi to ensure that wmi devices are found
# before toshiba_acpi initializes
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 79fa5ab3fd00..1be71f956d5c 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -149,6 +149,8 @@ struct event_return_value {
#define ACER_WMID3_GDS_THREEG (1<<6) /* 3G */
#define ACER_WMID3_GDS_WIMAX (1<<7) /* WiMAX */
#define ACER_WMID3_GDS_BLUETOOTH (1<<11) /* BT */
+#define ACER_WMID3_GDS_RFBTN (1<<14) /* RF Button */
+
#define ACER_WMID3_GDS_TOUCHPAD (1<<1) /* Touchpad */
/* Hotkey Customized Setting and Acer Application Status.
@@ -221,6 +223,7 @@ struct hotkey_function_type_aa {
#define ACER_CAP_BRIGHTNESS (1<<3)
#define ACER_CAP_THREEG (1<<4)
#define ACER_CAP_ACCEL (1<<5)
+#define ACER_CAP_RFBTN (1<<6)
#define ACER_CAP_ANY (0xFFFFFFFF)
/*
@@ -700,7 +703,7 @@ struct acpi_buffer *result)
input.length = sizeof(struct wmab_args);
input.pointer = (u8 *)regbuf;
- status = wmi_evaluate_method(AMW0_GUID1, 1, 1, &input, result);
+ status = wmi_evaluate_method(AMW0_GUID1, 0, 1, &input, result);
return status;
}
@@ -965,7 +968,7 @@ WMI_execute_u32(u32 method_id, u32 in, u32 *out)
u32 tmp = 0;
acpi_status status;
- status = wmi_evaluate_method(WMID_GUID1, 1, method_id, &input, &result);
+ status = wmi_evaluate_method(WMID_GUID1, 0, method_id, &input, &result);
if (ACPI_FAILURE(status))
return status;
@@ -1264,6 +1267,10 @@ static void __init type_aa_dmi_decode(const struct dmi_header *header, void *d)
interface->capability |= ACER_CAP_THREEG;
if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_BLUETOOTH)
interface->capability |= ACER_CAP_BLUETOOTH;
+ if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_RFBTN) {
+ interface->capability |= ACER_CAP_RFBTN;
+ commun_func_bitmap &= ~ACER_WMID3_GDS_RFBTN;
+ }
commun_fn_key_number = type_aa->commun_fn_key_number;
}
@@ -1275,7 +1282,7 @@ static acpi_status __init WMID_set_capabilities(void)
acpi_status status;
u32 devices;
- status = wmi_query_block(WMID_GUID2, 1, &out);
+ status = wmi_query_block(WMID_GUID2, 0, &out);
if (ACPI_FAILURE(status))
return status;
@@ -2018,7 +2025,7 @@ static u32 get_wmid_devices(void)
acpi_status status;
u32 devices = 0;
- status = wmi_query_block(WMID_GUID2, 1, &out);
+ status = wmi_query_block(WMID_GUID2, 0, &out);
if (ACPI_FAILURE(status))
return 0;
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 2acdb0d6ea89..ea22591ee66f 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -557,7 +557,7 @@ err_out:
}
/* bind fan callbacks to fan device */
-static struct thermal_cooling_device_ops acerhdf_cooling_ops = {
+static const struct thermal_cooling_device_ops acerhdf_cooling_ops = {
.get_max_state = acerhdf_get_max_state,
.get_cur_state = acerhdf_get_cur_state,
.set_cur_state = acerhdf_set_cur_state,
diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
index d6b34923fb4e..9866fec78c1c 100644
--- a/drivers/platform/x86/alienware-wmi.c
+++ b/drivers/platform/x86/alienware-wmi.c
@@ -303,7 +303,7 @@ static int alienware_update_led(struct platform_zone *zone)
}
pr_debug("alienware-wmi: guid %s method %d\n", guid, method_id);
- status = wmi_evaluate_method(guid, 1, method_id, &input, NULL);
+ status = wmi_evaluate_method(guid, 0, method_id, &input, NULL);
if (ACPI_FAILURE(status))
pr_err("alienware-wmi: zone set failure: %u\n", status);
return ACPI_FAILURE(status);
@@ -352,7 +352,7 @@ static int wmax_brightness(int brightness)
};
input.length = (acpi_size) sizeof(args);
input.pointer = &args;
- status = wmi_evaluate_method(WMAX_CONTROL_GUID, 1,
+ status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0,
WMAX_METHOD_BRIGHTNESS, &input, NULL);
if (ACPI_FAILURE(status))
pr_err("alienware-wmi: brightness set failure: %u\n", status);
@@ -506,10 +506,10 @@ static acpi_status alienware_wmax_command(struct wmax_basic_args *in_args,
if (out_data != NULL) {
output.length = ACPI_ALLOCATE_BUFFER;
output.pointer = NULL;
- status = wmi_evaluate_method(WMAX_CONTROL_GUID, 1,
+ status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0,
command, &input, &output);
} else
- status = wmi_evaluate_method(WMAX_CONTROL_GUID, 1,
+ status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0,
command, &input, NULL);
if (ACPI_SUCCESS(status) && out_data != NULL) {
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index ec202094bd50..f42159fd2031 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -1510,7 +1510,11 @@ static void kbd_init(void)
ret = kbd_init_info();
kbd_init_tokens();
- if (kbd_token_bits != 0 || ret == 0)
+ /*
+ * Only supports keyboard backlight when it has at least two modes.
+ */
+ if ((ret == 0 && (kbd_info.levels != 0 || kbd_mode_levels_count >= 2))
+ || kbd_get_valid_token_counts() >= 2)
kbd_led_present = true;
}
diff --git a/drivers/platform/x86/dell-rbtn.c b/drivers/platform/x86/dell-rbtn.c
index dcd9f40a4b18..f3afe778001e 100644
--- a/drivers/platform/x86/dell-rbtn.c
+++ b/drivers/platform/x86/dell-rbtn.c
@@ -110,7 +110,7 @@ static int rbtn_rfkill_set_block(void *data, bool blocked)
return -EINVAL;
}
-static struct rfkill_ops rbtn_ops = {
+static const struct rfkill_ops rbtn_ops = {
.query = rbtn_rfkill_query,
.set_block = rbtn_rfkill_set_block,
};
@@ -221,16 +221,27 @@ static const struct acpi_device_id rbtn_ids[] = {
/*
* This driver can also handle the "DELLABC6" device that
- * appears on the XPS 13 9350, but that device is disabled
- * by the DSDT unless booted with acpi_osi="!Windows 2012"
- * acpi_osi="!Windows 2013". Even if we boot that and bind
- * the driver, we seem to have inconsistent behavior in
- * which NetworkManager can get out of sync with the rfkill
- * state.
+ * appears on the XPS 13 9350, but that device is disabled by
+ * the DSDT unless booted with acpi_osi="!Windows 2012"
+ * acpi_osi="!Windows 2013".
*
- * On the XPS 13 9350 and similar laptops, we're not supposed to
- * use DELLABC6 at all. Instead, we handle the rfkill button
- * via the intel-hid driver.
+ * According to Mario at Dell:
+ *
+ * DELLABC6 is a custom interface that was created solely to
+ * have airplane mode support for Windows 7. For Windows 10
+ * the proper interface is to use that which is handled by
+ * intel-hid. A OEM airplane mode driver is not used.
+ *
+ * Since the kernel doesn't identify as Windows 7 it would be
+ * incorrect to do attempt to use that interface.
+ *
+ * Even if we override _OSI and bind to DELLABC6, we end up with
+ * inconsistent behavior in which userspace can get out of sync
+ * with the rfkill state as it conflicts with events from
+ * intel-hid.
+ *
+ * The upshot is that it is better to just ignore DELLABC6
+ * devices.
*/
{ "", 0 },
diff --git a/drivers/platform/x86/dell-wmi-led.c b/drivers/platform/x86/dell-wmi-led.c
index a0c7e99530ef..5bedaf7f0633 100644
--- a/drivers/platform/x86/dell-wmi-led.c
+++ b/drivers/platform/x86/dell-wmi-led.c
@@ -68,7 +68,7 @@ static int dell_led_perform_fn(u8 length, u8 result_code, u8 device_id,
input.length = sizeof(struct bios_args);
input.pointer = &args;
- status = wmi_evaluate_method(DELL_LED_BIOS_GUID, 1, 1, &input, &output);
+ status = wmi_evaluate_method(DELL_LED_BIOS_GUID, 0, 1, &input, &output);
if (ACPI_FAILURE(status))
return status;
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 8a64c7967753..f8978464df31 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -36,6 +36,7 @@
#include <linux/acpi.h>
#include <linux/string.h>
#include <linux/dmi.h>
+#include <linux/wmi.h>
#include <acpi/video.h>
#include "dell-smbios.h"
@@ -53,6 +54,10 @@ static bool wmi_requires_smbios_request;
MODULE_ALIAS("wmi:"DELL_EVENT_GUID);
MODULE_ALIAS("wmi:"DELL_DESCRIPTOR_GUID);
+struct dell_wmi_priv {
+ struct input_dev *input_dev;
+};
+
static int __init dmi_matched(const struct dmi_system_id *dmi)
{
wmi_requires_smbios_request = 1;
@@ -86,7 +91,7 @@ static const struct dmi_system_id dell_wmi_smbios_list[] __initconst = {
* notifications (rather than requests for change) or are also sent
* via the keyboard controller so should not be sent again.
*/
-static const struct key_entry dell_wmi_keymap_type_0000[] __initconst = {
+static const struct key_entry dell_wmi_keymap_type_0000[] = {
{ KE_IGNORE, 0x003a, { KEY_CAPSLOCK } },
/* Key code is followed by brightness level */
@@ -207,7 +212,7 @@ struct dell_dmi_results {
};
/* Uninitialized entries here are KEY_RESERVED == 0. */
-static const u16 bios_to_linux_keycode[256] __initconst = {
+static const u16 bios_to_linux_keycode[256] = {
[0] = KEY_MEDIA,
[1] = KEY_NEXTSONG,
[2] = KEY_PLAYPAUSE,
@@ -256,7 +261,7 @@ static const u16 bios_to_linux_keycode[256] __initconst = {
* These are applied if the 0xB2 DMI hotkey table is present and doesn't
* override them.
*/
-static const struct key_entry dell_wmi_keymap_type_0010[] __initconst = {
+static const struct key_entry dell_wmi_keymap_type_0010[] = {
/* Fn-lock */
{ KE_IGNORE, 0x151, { KEY_RESERVED } },
@@ -272,7 +277,12 @@ static const struct key_entry dell_wmi_keymap_type_0010[] __initconst = {
/* RGB keyboard backlight control */
{ KE_IGNORE, 0x154, { KEY_RESERVED } },
- /* Stealth mode toggle */
+ /*
+ * Stealth mode toggle. This will "disable all lights and sounds".
+ * The action is performed by the BIOS and EC; the WMI event is just
+ * a notification. On the XPS 13 9350, this is Fn+F7, and there's
+ * a BIOS setting to enable and disable the hotkey.
+ */
{ KE_IGNORE, 0x155, { KEY_RESERVED } },
/* Rugged magnetic dock attach/detach events */
@@ -289,7 +299,7 @@ static const struct key_entry dell_wmi_keymap_type_0010[] __initconst = {
/*
* Keymap for WMI events of type 0x0011
*/
-static const struct key_entry dell_wmi_keymap_type_0011[] __initconst = {
+static const struct key_entry dell_wmi_keymap_type_0011[] = {
/* Battery unplugged */
{ KE_IGNORE, 0xfff0, { KEY_RESERVED } },
@@ -304,13 +314,12 @@ static const struct key_entry dell_wmi_keymap_type_0011[] __initconst = {
{ KE_IGNORE, 0x02f6, { KEY_RESERVED } },
};
-static struct input_dev *dell_wmi_input_dev;
-
-static void dell_wmi_process_key(int type, int code)
+static void dell_wmi_process_key(struct wmi_device *wdev, int type, int code)
{
+ struct dell_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
const struct key_entry *key;
- key = sparse_keymap_entry_from_scancode(dell_wmi_input_dev,
+ key = sparse_keymap_entry_from_scancode(priv->input_dev,
(type << 16) | code);
if (!key) {
pr_info("Unknown key with type 0x%04x and code 0x%04x pressed\n",
@@ -333,33 +342,18 @@ static void dell_wmi_process_key(int type, int code)
dell_laptop_call_notifier(
DELL_LAPTOP_KBD_BACKLIGHT_BRIGHTNESS_CHANGED, NULL);
- sparse_keymap_report_entry(dell_wmi_input_dev, key, 1, true);
+ sparse_keymap_report_entry(priv->input_dev, key, 1, true);
}
-static void dell_wmi_notify(u32 value, void *context)
+static void dell_wmi_notify(struct wmi_device *wdev,
+ union acpi_object *obj)
{
- struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
- acpi_status status;
- acpi_size buffer_size;
u16 *buffer_entry, *buffer_end;
+ acpi_size buffer_size;
int len, i;
- status = wmi_get_event_data(value, &response);
- if (status != AE_OK) {
- pr_warn("bad event status 0x%x\n", status);
- return;
- }
-
- obj = (union acpi_object *)response.pointer;
- if (!obj) {
- pr_warn("no response\n");
- return;
- }
-
if (obj->type != ACPI_TYPE_BUFFER) {
pr_warn("bad response type %x\n", obj->type);
- kfree(obj);
return;
}
@@ -404,13 +398,14 @@ static void dell_wmi_notify(u32 value, void *context)
switch (buffer_entry[1]) {
case 0x0000: /* One key pressed or event occurred */
if (len > 2)
- dell_wmi_process_key(0x0000, buffer_entry[2]);
+ dell_wmi_process_key(wdev, 0x0000,
+ buffer_entry[2]);
/* Other entries could contain additional information */
break;
case 0x0010: /* Sequence of keys pressed */
case 0x0011: /* Sequence of events occurred */
for (i = 2; i < len; ++i)
- dell_wmi_process_key(buffer_entry[1],
+ dell_wmi_process_key(wdev, buffer_entry[1],
buffer_entry[i]);
break;
default: /* Unknown event */
@@ -423,7 +418,6 @@ static void dell_wmi_notify(u32 value, void *context)
}
- kfree(obj);
}
static bool have_scancode(u32 scancode, const struct key_entry *keymap, int len)
@@ -437,9 +431,7 @@ static bool have_scancode(u32 scancode, const struct key_entry *keymap, int len)
return false;
}
-static void __init handle_dmi_entry(const struct dmi_header *dm,
- void *opaque)
-
+static void handle_dmi_entry(const struct dmi_header *dm, void *opaque)
{
struct dell_dmi_results *results = opaque;
struct dell_bios_hotkey_table *table;
@@ -449,6 +441,7 @@ static void __init handle_dmi_entry(const struct dmi_header *dm,
if (results->err || results->keymap)
return; /* We already found the hotkey table. */
+ /* The Dell hotkey table is type 0xB2. Scan until we find it. */
if (dm->type != 0xb2)
return;
@@ -509,19 +502,20 @@ static void __init handle_dmi_entry(const struct dmi_header *dm,
results->keymap_size = pos;
}
-static int __init dell_wmi_input_setup(void)
+static int dell_wmi_input_setup(struct wmi_device *wdev)
{
+ struct dell_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
struct dell_dmi_results dmi_results = {};
struct key_entry *keymap;
int err, i, pos = 0;
- dell_wmi_input_dev = input_allocate_device();
- if (!dell_wmi_input_dev)
+ priv->input_dev = input_allocate_device();
+ if (!priv->input_dev)
return -ENOMEM;
- dell_wmi_input_dev->name = "Dell WMI hotkeys";
- dell_wmi_input_dev->phys = "wmi/input0";
- dell_wmi_input_dev->id.bustype = BUS_HOST;
+ priv->input_dev->name = "Dell WMI hotkeys";
+ priv->input_dev->id.bustype = BUS_HOST;
+ priv->input_dev->dev.parent = &wdev->dev;
if (dmi_walk(handle_dmi_entry, &dmi_results)) {
/*
@@ -596,7 +590,7 @@ static int __init dell_wmi_input_setup(void)
keymap[pos].type = KE_END;
- err = sparse_keymap_setup(dell_wmi_input_dev, keymap, NULL);
+ err = sparse_keymap_setup(priv->input_dev, keymap, NULL);
/*
* Sparse keymap library makes a copy of keymap so we don't need the
* original one that was allocated.
@@ -605,17 +599,24 @@ static int __init dell_wmi_input_setup(void)
if (err)
goto err_free_dev;
- err = input_register_device(dell_wmi_input_dev);
+ err = input_register_device(priv->input_dev);
if (err)
goto err_free_dev;
return 0;
err_free_dev:
- input_free_device(dell_wmi_input_dev);
+ input_free_device(priv->input_dev);
return err;
}
+static void dell_wmi_input_destroy(struct wmi_device *wdev)
+{
+ struct dell_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
+
+ input_unregister_device(priv->input_dev);
+}
+
/*
* Descriptor buffer is 128 byte long and contains:
*
@@ -714,46 +715,55 @@ static int dell_wmi_events_set_enabled(bool enable)
return dell_smbios_error(ret);
}
+static int dell_wmi_probe(struct wmi_device *wdev)
+{
+ struct dell_wmi_priv *priv = devm_kzalloc(
+ &wdev->dev, sizeof(struct dell_wmi_priv), GFP_KERNEL);
+
+ dev_set_drvdata(&wdev->dev, priv);
+
+ return dell_wmi_input_setup(wdev);
+}
+
+static int dell_wmi_remove(struct wmi_device *wdev)
+{
+ dell_wmi_input_destroy(wdev);
+ return 0;
+}
+static const struct wmi_device_id dell_wmi_id_table[] = {
+ { .guid_string = DELL_EVENT_GUID },
+ { },
+};
+
+static struct wmi_driver dell_wmi_driver = {
+ .driver = {
+ .name = "dell-wmi",
+ },
+ .id_table = dell_wmi_id_table,
+ .probe = dell_wmi_probe,
+ .remove = dell_wmi_remove,
+ .notify = dell_wmi_notify,
+};
+
static int __init dell_wmi_init(void)
{
int err;
- acpi_status status;
-
- if (!wmi_has_guid(DELL_EVENT_GUID) ||
- !wmi_has_guid(DELL_DESCRIPTOR_GUID)) {
- pr_warn("Dell WMI GUID were not found\n");
- return -ENODEV;
- }
err = dell_wmi_check_descriptor_buffer();
if (err)
return err;
- err = dell_wmi_input_setup();
- if (err)
- return err;
-
- status = wmi_install_notify_handler(DELL_EVENT_GUID,
- dell_wmi_notify, NULL);
- if (ACPI_FAILURE(status)) {
- input_unregister_device(dell_wmi_input_dev);
- pr_err("Unable to register notify handler - %d\n", status);
- return -ENODEV;
- }
-
dmi_check_system(dell_wmi_smbios_list);
if (wmi_requires_smbios_request) {
err = dell_wmi_events_set_enabled(true);
if (err) {
pr_err("Failed to enable WMI events\n");
- wmi_remove_notify_handler(DELL_EVENT_GUID);
- input_unregister_device(dell_wmi_input_dev);
return err;
}
}
- return 0;
+ return wmi_driver_register(&dell_wmi_driver);
}
module_init(dell_wmi_init);
@@ -761,7 +771,7 @@ static void __exit dell_wmi_exit(void)
{
if (wmi_requires_smbios_request)
dell_wmi_events_set_enabled(false);
- wmi_remove_notify_handler(DELL_EVENT_GUID);
- input_unregister_device(dell_wmi_input_dev);
+
+ wmi_driver_unregister(&dell_wmi_driver);
}
module_exit(dell_wmi_exit);
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 2426399e1e04..5a681962899c 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -445,7 +445,7 @@ static struct attribute *platform_attributes[] = {
NULL
};
-static struct attribute_group platform_attribute_group = {
+static const struct attribute_group platform_attribute_group = {
.attrs = platform_attributes
};
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 7f49d92914c9..c1a852847d02 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -112,25 +112,8 @@
#define MAX_HOTKEY_RINGBUFFER_SIZE 100
#define RINGBUFFERSIZE 40
-/* Debugging */
-#define FUJLAPTOP_DBG_ERROR 0x0001
-#define FUJLAPTOP_DBG_WARN 0x0002
-#define FUJLAPTOP_DBG_INFO 0x0004
-#define FUJLAPTOP_DBG_TRACE 0x0008
-
-#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG
-#define vdbg_printk(a_dbg_level, format, arg...) \
- do { if (dbg_level & a_dbg_level) \
- printk(KERN_DEBUG pr_fmt("%s: " format), __func__, ## arg); \
- } while (0)
-#else
-#define vdbg_printk(a_dbg_level, format, arg...) \
- do { } while (0)
-#endif
-
/* Device controlling the backlight and associated keys */
struct fujitsu_bl {
- acpi_handle acpi_handle;
struct input_dev *input;
char phys[32];
struct backlight_device *bl_device;
@@ -144,8 +127,6 @@ static bool disable_brightness_adjust;
/* Device used to access hotkeys and other features on the laptop */
struct fujitsu_laptop {
- acpi_handle acpi_handle;
- struct acpi_device *dev;
struct input_dev *input;
char phys[32];
struct platform_device *pf_device;
@@ -155,15 +136,12 @@ struct fujitsu_laptop {
int flags_state;
};
-static struct fujitsu_laptop *fujitsu_laptop;
-
-#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG
-static u32 dbg_level = 0x03;
-#endif
+static struct acpi_device *fext;
/* Fujitsu ACPI interface function */
-static int call_fext_func(int func, int op, int feature, int state)
+static int call_fext_func(struct acpi_device *device,
+ int func, int op, int feature, int state)
{
union acpi_object params[4] = {
{ .integer.type = ACPI_TYPE_INTEGER, .integer.value = func },
@@ -175,28 +153,30 @@ static int call_fext_func(int func, int op, int feature, int state)
unsigned long long value;
acpi_status status;
- status = acpi_evaluate_integer(fujitsu_laptop->acpi_handle, "FUNC",
- &arg_list, &value);
+ status = acpi_evaluate_integer(device->handle, "FUNC", &arg_list,
+ &value);
if (ACPI_FAILURE(status)) {
- vdbg_printk(FUJLAPTOP_DBG_ERROR, "Failed to evaluate FUNC\n");
+ acpi_handle_err(device->handle, "Failed to evaluate FUNC\n");
return -ENODEV;
}
- vdbg_printk(FUJLAPTOP_DBG_TRACE, "FUNC 0x%x (args 0x%x, 0x%x, 0x%x) returned 0x%x\n",
- func, op, feature, state, (int)value);
+ acpi_handle_debug(device->handle,
+ "FUNC 0x%x (args 0x%x, 0x%x, 0x%x) returned 0x%x\n",
+ func, op, feature, state, (int)value);
return value;
}
/* Hardware access for LCD brightness control */
-static int set_lcd_level(int level)
+static int set_lcd_level(struct acpi_device *device, int level)
{
+ struct fujitsu_bl *priv = acpi_driver_data(device);
acpi_status status;
char *method;
switch (use_alt_lcd_levels) {
case -1:
- if (acpi_has_method(fujitsu_bl->acpi_handle, "SBL2"))
+ if (acpi_has_method(device->handle, "SBL2"))
method = "SBL2";
else
method = "SBLL";
@@ -209,74 +189,77 @@ static int set_lcd_level(int level)
break;
}
- vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via %s [%d]\n",
- method, level);
+ acpi_handle_debug(device->handle, "set lcd level via %s [%d]\n", method,
+ level);
- if (level < 0 || level >= fujitsu_bl->max_brightness)
+ if (level < 0 || level >= priv->max_brightness)
return -EINVAL;
- status = acpi_execute_simple_method(fujitsu_bl->acpi_handle, method,
- level);
+ status = acpi_execute_simple_method(device->handle, method, level);
if (ACPI_FAILURE(status)) {
- vdbg_printk(FUJLAPTOP_DBG_ERROR, "Failed to evaluate %s\n",
- method);
+ acpi_handle_err(device->handle, "Failed to evaluate %s\n",
+ method);
return -ENODEV;
}
- fujitsu_bl->brightness_level = level;
+ priv->brightness_level = level;
return 0;
}
-static int get_lcd_level(void)
+static int get_lcd_level(struct acpi_device *device)
{
+ struct fujitsu_bl *priv = acpi_driver_data(device);
unsigned long long state = 0;
acpi_status status = AE_OK;
- vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLL\n");
+ acpi_handle_debug(device->handle, "get lcd level via GBLL\n");
- status = acpi_evaluate_integer(fujitsu_bl->acpi_handle, "GBLL", NULL,
- &state);
+ status = acpi_evaluate_integer(device->handle, "GBLL", NULL, &state);
if (ACPI_FAILURE(status))
return 0;
- fujitsu_bl->brightness_level = state & 0x0fffffff;
+ priv->brightness_level = state & 0x0fffffff;
- return fujitsu_bl->brightness_level;
+ return priv->brightness_level;
}
-static int get_max_brightness(void)
+static int get_max_brightness(struct acpi_device *device)
{
+ struct fujitsu_bl *priv = acpi_driver_data(device);
unsigned long long state = 0;
acpi_status status = AE_OK;
- vdbg_printk(FUJLAPTOP_DBG_TRACE, "get max lcd level via RBLL\n");
+ acpi_handle_debug(device->handle, "get max lcd level via RBLL\n");
- status = acpi_evaluate_integer(fujitsu_bl->acpi_handle, "RBLL", NULL,
- &state);
+ status = acpi_evaluate_integer(device->handle, "RBLL", NULL, &state);
if (ACPI_FAILURE(status))
return -1;
- fujitsu_bl->max_brightness = state;
+ priv->max_brightness = state;
- return fujitsu_bl->max_brightness;
+ return priv->max_brightness;
}
/* Backlight device stuff */
static int bl_get_brightness(struct backlight_device *b)
{
- return b->props.power == FB_BLANK_POWERDOWN ? 0 : get_lcd_level();
+ struct acpi_device *device = bl_get_data(b);
+
+ return b->props.power == FB_BLANK_POWERDOWN ? 0 : get_lcd_level(device);
}
static int bl_update_status(struct backlight_device *b)
{
+ struct acpi_device *device = bl_get_data(b);
+
if (b->props.power == FB_BLANK_POWERDOWN)
- call_fext_func(FUNC_BACKLIGHT, 0x1, 0x4, 0x3);
+ call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x3);
else
- call_fext_func(FUNC_BACKLIGHT, 0x1, 0x4, 0x0);
+ call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x0);
- return set_lcd_level(b->props.brightness);
+ return set_lcd_level(device, b->props.brightness);
}
static const struct backlight_ops fujitsu_bl_ops = {
@@ -287,9 +270,11 @@ static const struct backlight_ops fujitsu_bl_ops = {
static ssize_t lid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- if (!(fujitsu_laptop->flags_supported & FLAG_LID))
+ struct fujitsu_laptop *priv = dev_get_drvdata(dev);
+
+ if (!(priv->flags_supported & FLAG_LID))
return sprintf(buf, "unknown\n");
- if (fujitsu_laptop->flags_state & FLAG_LID)
+ if (priv->flags_state & FLAG_LID)
return sprintf(buf, "open\n");
else
return sprintf(buf, "closed\n");
@@ -298,9 +283,11 @@ static ssize_t lid_show(struct device *dev, struct device_attribute *attr,
static ssize_t dock_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- if (!(fujitsu_laptop->flags_supported & FLAG_DOCK))
+ struct fujitsu_laptop *priv = dev_get_drvdata(dev);
+
+ if (!(priv->flags_supported & FLAG_DOCK))
return sprintf(buf, "unknown\n");
- if (fujitsu_laptop->flags_state & FLAG_DOCK)
+ if (priv->flags_state & FLAG_DOCK)
return sprintf(buf, "docked\n");
else
return sprintf(buf, "undocked\n");
@@ -309,9 +296,11 @@ static ssize_t dock_show(struct device *dev, struct device_attribute *attr,
static ssize_t radios_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- if (!(fujitsu_laptop->flags_supported & FLAG_RFKILL))
+ struct fujitsu_laptop *priv = dev_get_drvdata(dev);
+
+ if (!(priv->flags_supported & FLAG_RFKILL))
return sprintf(buf, "unknown\n");
- if (fujitsu_laptop->flags_state & FLAG_RFKILL)
+ if (priv->flags_state & FLAG_RFKILL)
return sprintf(buf, "on\n");
else
return sprintf(buf, "killed\n");
@@ -348,89 +337,76 @@ static const struct key_entry keymap_backlight[] = {
static int acpi_fujitsu_bl_input_setup(struct acpi_device *device)
{
- struct fujitsu_bl *fujitsu_bl = acpi_driver_data(device);
+ struct fujitsu_bl *priv = acpi_driver_data(device);
int ret;
- fujitsu_bl->input = devm_input_allocate_device(&device->dev);
- if (!fujitsu_bl->input)
+ priv->input = devm_input_allocate_device(&device->dev);
+ if (!priv->input)
return -ENOMEM;
- snprintf(fujitsu_bl->phys, sizeof(fujitsu_bl->phys),
- "%s/video/input0", acpi_device_hid(device));
+ snprintf(priv->phys, sizeof(priv->phys), "%s/video/input0",
+ acpi_device_hid(device));
- fujitsu_bl->input->name = acpi_device_name(device);
- fujitsu_bl->input->phys = fujitsu_bl->phys;
- fujitsu_bl->input->id.bustype = BUS_HOST;
- fujitsu_bl->input->id.product = 0x06;
+ priv->input->name = acpi_device_name(device);
+ priv->input->phys = priv->phys;
+ priv->input->id.bustype = BUS_HOST;
+ priv->input->id.product = 0x06;
- ret = sparse_keymap_setup(fujitsu_bl->input, keymap_backlight, NULL);
+ ret = sparse_keymap_setup(priv->input, keymap_backlight, NULL);
if (ret)
return ret;
- return input_register_device(fujitsu_bl->input);
+ return input_register_device(priv->input);
}
static int fujitsu_backlight_register(struct acpi_device *device)
{
+ struct fujitsu_bl *priv = acpi_driver_data(device);
const struct backlight_properties props = {
- .brightness = fujitsu_bl->brightness_level,
- .max_brightness = fujitsu_bl->max_brightness - 1,
+ .brightness = priv->brightness_level,
+ .max_brightness = priv->max_brightness - 1,
.type = BACKLIGHT_PLATFORM
};
struct backlight_device *bd;
bd = devm_backlight_device_register(&device->dev, "fujitsu-laptop",
- &device->dev, NULL,
+ &device->dev, device,
&fujitsu_bl_ops, &props);
if (IS_ERR(bd))
return PTR_ERR(bd);
- fujitsu_bl->bl_device = bd;
+ priv->bl_device = bd;
return 0;
}
static int acpi_fujitsu_bl_add(struct acpi_device *device)
{
- int state = 0;
+ struct fujitsu_bl *priv;
int error;
if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
return -ENODEV;
- if (!device)
- return -EINVAL;
+ priv = devm_kzalloc(&device->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
- fujitsu_bl->acpi_handle = device->handle;
- sprintf(acpi_device_name(device), "%s", ACPI_FUJITSU_BL_DEVICE_NAME);
- sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
- device->driver_data = fujitsu_bl;
+ fujitsu_bl = priv;
+ strcpy(acpi_device_name(device), ACPI_FUJITSU_BL_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_FUJITSU_CLASS);
+ device->driver_data = priv;
error = acpi_fujitsu_bl_input_setup(device);
if (error)
return error;
- error = acpi_bus_update_power(fujitsu_bl->acpi_handle, &state);
- if (error) {
- pr_err("Error reading power state\n");
- return error;
- }
-
- pr_info("ACPI: %s [%s] (%s)\n",
- acpi_device_name(device), acpi_device_bid(device),
- !device->power.state ? "on" : "off");
+ pr_info("ACPI: %s [%s]\n",
+ acpi_device_name(device), acpi_device_bid(device));
- if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
- vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
- if (ACPI_FAILURE
- (acpi_evaluate_object
- (device->handle, METHOD_NAME__INI, NULL, NULL)))
- pr_err("_INI Method failed\n");
- }
-
- if (get_max_brightness() <= 0)
- fujitsu_bl->max_brightness = FUJITSU_LCD_N_LEVELS;
- get_lcd_level();
+ if (get_max_brightness(device) <= 0)
+ priv->max_brightness = FUJITSU_LCD_N_LEVELS;
+ get_lcd_level(device);
error = fujitsu_backlight_register(device);
if (error)
@@ -443,32 +419,30 @@ static int acpi_fujitsu_bl_add(struct acpi_device *device)
static void acpi_fujitsu_bl_notify(struct acpi_device *device, u32 event)
{
- struct input_dev *input;
+ struct fujitsu_bl *priv = acpi_driver_data(device);
int oldb, newb;
- input = fujitsu_bl->input;
-
if (event != ACPI_FUJITSU_NOTIFY_CODE1) {
- vdbg_printk(FUJLAPTOP_DBG_WARN,
- "unsupported event [0x%x]\n", event);
- sparse_keymap_report_event(input, -1, 1, true);
+ acpi_handle_info(device->handle, "unsupported event [0x%x]\n",
+ event);
+ sparse_keymap_report_event(priv->input, -1, 1, true);
return;
}
- oldb = fujitsu_bl->brightness_level;
- get_lcd_level();
- newb = fujitsu_bl->brightness_level;
+ oldb = priv->brightness_level;
+ get_lcd_level(device);
+ newb = priv->brightness_level;
- vdbg_printk(FUJLAPTOP_DBG_TRACE, "brightness button event [%i -> %i]\n",
- oldb, newb);
+ acpi_handle_debug(device->handle,
+ "brightness button event [%i -> %i]\n", oldb, newb);
if (oldb == newb)
return;
if (!disable_brightness_adjust)
- set_lcd_level(newb);
+ set_lcd_level(device, newb);
- sparse_keymap_report_event(input, oldb < newb, 1, true);
+ sparse_keymap_report_event(priv->input, oldb < newb, 1, true);
}
/* ACPI device for hotkey handling */
@@ -541,42 +515,44 @@ static const struct dmi_system_id fujitsu_laptop_dmi_table[] = {
static int acpi_fujitsu_laptop_input_setup(struct acpi_device *device)
{
- struct fujitsu_laptop *fujitsu_laptop = acpi_driver_data(device);
+ struct fujitsu_laptop *priv = acpi_driver_data(device);
int ret;
- fujitsu_laptop->input = devm_input_allocate_device(&device->dev);
- if (!fujitsu_laptop->input)
+ priv->input = devm_input_allocate_device(&device->dev);
+ if (!priv->input)
return -ENOMEM;
- snprintf(fujitsu_laptop->phys, sizeof(fujitsu_laptop->phys),
- "%s/video/input0", acpi_device_hid(device));
+ snprintf(priv->phys, sizeof(priv->phys), "%s/input0",
+ acpi_device_hid(device));
- fujitsu_laptop->input->name = acpi_device_name(device);
- fujitsu_laptop->input->phys = fujitsu_laptop->phys;
- fujitsu_laptop->input->id.bustype = BUS_HOST;
- fujitsu_laptop->input->id.product = 0x06;
+ priv->input->name = acpi_device_name(device);
+ priv->input->phys = priv->phys;
+ priv->input->id.bustype = BUS_HOST;
dmi_check_system(fujitsu_laptop_dmi_table);
- ret = sparse_keymap_setup(fujitsu_laptop->input, keymap, NULL);
+ ret = sparse_keymap_setup(priv->input, keymap, NULL);
if (ret)
return ret;
- return input_register_device(fujitsu_laptop->input);
+ return input_register_device(priv->input);
}
-static int fujitsu_laptop_platform_add(void)
+static int fujitsu_laptop_platform_add(struct acpi_device *device)
{
+ struct fujitsu_laptop *priv = acpi_driver_data(device);
int ret;
- fujitsu_laptop->pf_device = platform_device_alloc("fujitsu-laptop", -1);
- if (!fujitsu_laptop->pf_device)
+ priv->pf_device = platform_device_alloc("fujitsu-laptop", -1);
+ if (!priv->pf_device)
return -ENOMEM;
- ret = platform_device_add(fujitsu_laptop->pf_device);
+ platform_set_drvdata(priv->pf_device, priv);
+
+ ret = platform_device_add(priv->pf_device);
if (ret)
goto err_put_platform_device;
- ret = sysfs_create_group(&fujitsu_laptop->pf_device->dev.kobj,
+ ret = sysfs_create_group(&priv->pf_device->dev.kobj,
&fujitsu_pf_attribute_group);
if (ret)
goto err_del_platform_device;
@@ -584,23 +560,26 @@ static int fujitsu_laptop_platform_add(void)
return 0;
err_del_platform_device:
- platform_device_del(fujitsu_laptop->pf_device);
+ platform_device_del(priv->pf_device);
err_put_platform_device:
- platform_device_put(fujitsu_laptop->pf_device);
+ platform_device_put(priv->pf_device);
return ret;
}
-static void fujitsu_laptop_platform_remove(void)
+static void fujitsu_laptop_platform_remove(struct acpi_device *device)
{
- sysfs_remove_group(&fujitsu_laptop->pf_device->dev.kobj,
+ struct fujitsu_laptop *priv = acpi_driver_data(device);
+
+ sysfs_remove_group(&priv->pf_device->dev.kobj,
&fujitsu_pf_attribute_group);
- platform_device_unregister(fujitsu_laptop->pf_device);
+ platform_device_unregister(priv->pf_device);
}
static int logolamp_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
+ struct acpi_device *device = to_acpi_device(cdev->dev->parent);
int poweron = FUNC_LED_ON, always = FUNC_LED_ON;
int ret;
@@ -610,132 +589,128 @@ static int logolamp_set(struct led_classdev *cdev,
if (brightness < LED_FULL)
always = FUNC_LED_OFF;
- ret = call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_POWERON, poweron);
+ ret = call_fext_func(device, FUNC_LEDS, 0x1, LOGOLAMP_POWERON, poweron);
if (ret < 0)
return ret;
- return call_fext_func(FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, always);
+ return call_fext_func(device, FUNC_LEDS, 0x1, LOGOLAMP_ALWAYS, always);
}
static enum led_brightness logolamp_get(struct led_classdev *cdev)
{
+ struct acpi_device *device = to_acpi_device(cdev->dev->parent);
int ret;
- ret = call_fext_func(FUNC_LEDS, 0x2, LOGOLAMP_ALWAYS, 0x0);
+ ret = call_fext_func(device, FUNC_LEDS, 0x2, LOGOLAMP_ALWAYS, 0x0);
if (ret == FUNC_LED_ON)
return LED_FULL;
- ret = call_fext_func(FUNC_LEDS, 0x2, LOGOLAMP_POWERON, 0x0);
+ ret = call_fext_func(device, FUNC_LEDS, 0x2, LOGOLAMP_POWERON, 0x0);
if (ret == FUNC_LED_ON)
return LED_HALF;
return LED_OFF;
}
-static struct led_classdev logolamp_led = {
- .name = "fujitsu::logolamp",
- .brightness_set_blocking = logolamp_set,
- .brightness_get = logolamp_get
-};
-
static int kblamps_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
+ struct acpi_device *device = to_acpi_device(cdev->dev->parent);
+
if (brightness >= LED_FULL)
- return call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS,
+ return call_fext_func(device, FUNC_LEDS, 0x1, KEYBOARD_LAMPS,
FUNC_LED_ON);
else
- return call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS,
+ return call_fext_func(device, FUNC_LEDS, 0x1, KEYBOARD_LAMPS,
FUNC_LED_OFF);
}
static enum led_brightness kblamps_get(struct led_classdev *cdev)
{
+ struct acpi_device *device = to_acpi_device(cdev->dev->parent);
enum led_brightness brightness = LED_OFF;
- if (call_fext_func(FUNC_LEDS, 0x2, KEYBOARD_LAMPS, 0x0) == FUNC_LED_ON)
+ if (call_fext_func(device,
+ FUNC_LEDS, 0x2, KEYBOARD_LAMPS, 0x0) == FUNC_LED_ON)
brightness = LED_FULL;
return brightness;
}
-static struct led_classdev kblamps_led = {
- .name = "fujitsu::kblamps",
- .brightness_set_blocking = kblamps_set,
- .brightness_get = kblamps_get
-};
-
static int radio_led_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
+ struct acpi_device *device = to_acpi_device(cdev->dev->parent);
+
if (brightness >= LED_FULL)
- return call_fext_func(FUNC_FLAGS, 0x5, RADIO_LED_ON,
+ return call_fext_func(device, FUNC_FLAGS, 0x5, RADIO_LED_ON,
RADIO_LED_ON);
else
- return call_fext_func(FUNC_FLAGS, 0x5, RADIO_LED_ON, 0x0);
+ return call_fext_func(device, FUNC_FLAGS, 0x5, RADIO_LED_ON,
+ 0x0);
}
static enum led_brightness radio_led_get(struct led_classdev *cdev)
{
+ struct acpi_device *device = to_acpi_device(cdev->dev->parent);
enum led_brightness brightness = LED_OFF;
- if (call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0) & RADIO_LED_ON)
+ if (call_fext_func(device, FUNC_FLAGS, 0x4, 0x0, 0x0) & RADIO_LED_ON)
brightness = LED_FULL;
return brightness;
}
-static struct led_classdev radio_led = {
- .name = "fujitsu::radio_led",
- .brightness_set_blocking = radio_led_set,
- .brightness_get = radio_led_get,
- .default_trigger = "rfkill-any"
-};
-
static int eco_led_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
+ struct acpi_device *device = to_acpi_device(cdev->dev->parent);
int curr;
- curr = call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0);
+ curr = call_fext_func(device, FUNC_LEDS, 0x2, ECO_LED, 0x0);
if (brightness >= LED_FULL)
- return call_fext_func(FUNC_LEDS, 0x1, ECO_LED,
+ return call_fext_func(device, FUNC_LEDS, 0x1, ECO_LED,
curr | ECO_LED_ON);
else
- return call_fext_func(FUNC_LEDS, 0x1, ECO_LED,
+ return call_fext_func(device, FUNC_LEDS, 0x1, ECO_LED,
curr & ~ECO_LED_ON);
}
static enum led_brightness eco_led_get(struct led_classdev *cdev)
{
+ struct acpi_device *device = to_acpi_device(cdev->dev->parent);
enum led_brightness brightness = LED_OFF;
- if (call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0) & ECO_LED_ON)
+ if (call_fext_func(device, FUNC_LEDS, 0x2, ECO_LED, 0x0) & ECO_LED_ON)
brightness = LED_FULL;
return brightness;
}
-static struct led_classdev eco_led = {
- .name = "fujitsu::eco_led",
- .brightness_set_blocking = eco_led_set,
- .brightness_get = eco_led_get
-};
-
static int acpi_fujitsu_laptop_leds_register(struct acpi_device *device)
{
+ struct led_classdev *led;
int result;
- if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) {
- result = devm_led_classdev_register(&device->dev,
- &logolamp_led);
+ if (call_fext_func(device,
+ FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) {
+ led = devm_kzalloc(&device->dev, sizeof(*led), GFP_KERNEL);
+ led->name = "fujitsu::logolamp";
+ led->brightness_set_blocking = logolamp_set;
+ led->brightness_get = logolamp_get;
+ result = devm_led_classdev_register(&device->dev, led);
if (result)
return result;
}
- if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & KEYBOARD_LAMPS) &&
- (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) == 0x0)) {
- result = devm_led_classdev_register(&device->dev, &kblamps_led);
+ if ((call_fext_func(device,
+ FUNC_LEDS, 0x0, 0x0, 0x0) & KEYBOARD_LAMPS) &&
+ (call_fext_func(device, FUNC_BUTTONS, 0x0, 0x0, 0x0) == 0x0)) {
+ led = devm_kzalloc(&device->dev, sizeof(*led), GFP_KERNEL);
+ led->name = "fujitsu::kblamps";
+ led->brightness_set_blocking = kblamps_set;
+ led->brightness_get = kblamps_get;
+ result = devm_led_classdev_register(&device->dev, led);
if (result)
return result;
}
@@ -746,8 +721,13 @@ static int acpi_fujitsu_laptop_leds_register(struct acpi_device *device)
* to also have an RF LED. Therefore use bit 24 as an indicator
* that an RF LED is present.
*/
- if (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) {
- result = devm_led_classdev_register(&device->dev, &radio_led);
+ if (call_fext_func(device, FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) {
+ led = devm_kzalloc(&device->dev, sizeof(*led), GFP_KERNEL);
+ led->name = "fujitsu::radio_led";
+ led->brightness_set_blocking = radio_led_set;
+ led->brightness_get = radio_led_get;
+ led->default_trigger = "rfkill-any";
+ result = devm_led_classdev_register(&device->dev, led);
if (result)
return result;
}
@@ -757,9 +737,14 @@ static int acpi_fujitsu_laptop_leds_register(struct acpi_device *device)
* bit 14 seems to indicate presence of said led as well.
* Confirm by testing the status.
*/
- if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & BIT(14)) &&
- (call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0) != UNSUPPORTED_CMD)) {
- result = devm_led_classdev_register(&device->dev, &eco_led);
+ if ((call_fext_func(device, FUNC_LEDS, 0x0, 0x0, 0x0) & BIT(14)) &&
+ (call_fext_func(device,
+ FUNC_LEDS, 0x2, ECO_LED, 0x0) != UNSUPPORTED_CMD)) {
+ led = devm_kzalloc(&device->dev, sizeof(*led), GFP_KERNEL);
+ led->name = "fujitsu::eco_led";
+ led->brightness_set_blocking = eco_led_set;
+ led->brightness_get = eco_led_get;
+ result = devm_led_classdev_register(&device->dev, led);
if (result)
return result;
}
@@ -769,23 +754,25 @@ static int acpi_fujitsu_laptop_leds_register(struct acpi_device *device)
static int acpi_fujitsu_laptop_add(struct acpi_device *device)
{
- int state = 0;
+ struct fujitsu_laptop *priv;
int error;
int i;
- if (!device)
- return -EINVAL;
+ priv = devm_kzalloc(&device->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ WARN_ONCE(fext, "More than one FUJ02E3 ACPI device was found. Driver may not work as intended.");
+ fext = device;
- fujitsu_laptop->acpi_handle = device->handle;
- sprintf(acpi_device_name(device), "%s",
- ACPI_FUJITSU_LAPTOP_DEVICE_NAME);
- sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
- device->driver_data = fujitsu_laptop;
+ strcpy(acpi_device_name(device), ACPI_FUJITSU_LAPTOP_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ACPI_FUJITSU_CLASS);
+ device->driver_data = priv;
/* kfifo */
- spin_lock_init(&fujitsu_laptop->fifo_lock);
- error = kfifo_alloc(&fujitsu_laptop->fifo, RINGBUFFERSIZE * sizeof(int),
- GFP_KERNEL);
+ spin_lock_init(&priv->fifo_lock);
+ error = kfifo_alloc(&priv->fifo, RINGBUFFERSIZE * sizeof(int),
+ GFP_KERNEL);
if (error) {
pr_err("kfifo_alloc failed\n");
goto err_stop;
@@ -795,51 +782,36 @@ static int acpi_fujitsu_laptop_add(struct acpi_device *device)
if (error)
goto err_free_fifo;
- error = acpi_bus_update_power(fujitsu_laptop->acpi_handle, &state);
- if (error) {
- pr_err("Error reading power state\n");
- goto err_free_fifo;
- }
-
- pr_info("ACPI: %s [%s] (%s)\n",
- acpi_device_name(device), acpi_device_bid(device),
- !device->power.state ? "on" : "off");
-
- fujitsu_laptop->dev = device;
-
- if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
- vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
- if (ACPI_FAILURE
- (acpi_evaluate_object
- (device->handle, METHOD_NAME__INI, NULL, NULL)))
- pr_err("_INI Method failed\n");
- }
+ pr_info("ACPI: %s [%s]\n",
+ acpi_device_name(device), acpi_device_bid(device));
i = 0;
- while (call_fext_func(FUNC_BUTTONS, 0x1, 0x0, 0x0) != 0
+ while (call_fext_func(device, FUNC_BUTTONS, 0x1, 0x0, 0x0) != 0
&& (i++) < MAX_HOTKEY_RINGBUFFER_SIZE)
; /* No action, result is discarded */
- vdbg_printk(FUJLAPTOP_DBG_INFO, "Discarded %i ringbuffer entries\n", i);
+ acpi_handle_debug(device->handle, "Discarded %i ringbuffer entries\n",
+ i);
- fujitsu_laptop->flags_supported =
- call_fext_func(FUNC_FLAGS, 0x0, 0x0, 0x0);
+ priv->flags_supported = call_fext_func(device, FUNC_FLAGS, 0x0, 0x0,
+ 0x0);
/* Make sure our bitmask of supported functions is cleared if the
RFKILL function block is not implemented, like on the S7020. */
- if (fujitsu_laptop->flags_supported == UNSUPPORTED_CMD)
- fujitsu_laptop->flags_supported = 0;
+ if (priv->flags_supported == UNSUPPORTED_CMD)
+ priv->flags_supported = 0;
- if (fujitsu_laptop->flags_supported)
- fujitsu_laptop->flags_state =
- call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0);
+ if (priv->flags_supported)
+ priv->flags_state = call_fext_func(device, FUNC_FLAGS, 0x4, 0x0,
+ 0x0);
/* Suspect this is a keymap of the application panel, print it */
- pr_info("BTNI: [0x%x]\n", call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0));
+ acpi_handle_info(device->handle, "BTNI: [0x%x]\n",
+ call_fext_func(device, FUNC_BUTTONS, 0x0, 0x0, 0x0));
/* Sync backlight power status */
- if (fujitsu_bl->bl_device &&
+ if (fujitsu_bl && fujitsu_bl->bl_device &&
acpi_video_get_backlight_type() == acpi_backlight_vendor) {
- if (call_fext_func(FUNC_BACKLIGHT, 0x2, 0x4, 0x0) == 3)
+ if (call_fext_func(fext, FUNC_BACKLIGHT, 0x2, 0x4, 0x0) == 3)
fujitsu_bl->bl_device->props.power = FB_BLANK_POWERDOWN;
else
fujitsu_bl->bl_device->props.power = FB_BLANK_UNBLANK;
@@ -849,103 +821,100 @@ static int acpi_fujitsu_laptop_add(struct acpi_device *device)
if (error)
goto err_free_fifo;
- error = fujitsu_laptop_platform_add();
+ error = fujitsu_laptop_platform_add(device);
if (error)
goto err_free_fifo;
return 0;
err_free_fifo:
- kfifo_free(&fujitsu_laptop->fifo);
+ kfifo_free(&priv->fifo);
err_stop:
return error;
}
static int acpi_fujitsu_laptop_remove(struct acpi_device *device)
{
- struct fujitsu_laptop *fujitsu_laptop = acpi_driver_data(device);
+ struct fujitsu_laptop *priv = acpi_driver_data(device);
- fujitsu_laptop_platform_remove();
+ fujitsu_laptop_platform_remove(device);
- kfifo_free(&fujitsu_laptop->fifo);
+ kfifo_free(&priv->fifo);
return 0;
}
-static void acpi_fujitsu_laptop_press(int scancode)
+static void acpi_fujitsu_laptop_press(struct acpi_device *device, int scancode)
{
- struct input_dev *input = fujitsu_laptop->input;
+ struct fujitsu_laptop *priv = acpi_driver_data(device);
int status;
- status = kfifo_in_locked(&fujitsu_laptop->fifo,
- (unsigned char *)&scancode, sizeof(scancode),
- &fujitsu_laptop->fifo_lock);
+ status = kfifo_in_locked(&priv->fifo, (unsigned char *)&scancode,
+ sizeof(scancode), &priv->fifo_lock);
if (status != sizeof(scancode)) {
- vdbg_printk(FUJLAPTOP_DBG_WARN,
- "Could not push scancode [0x%x]\n", scancode);
+ dev_info(&priv->input->dev, "Could not push scancode [0x%x]\n",
+ scancode);
return;
}
- sparse_keymap_report_event(input, scancode, 1, false);
- vdbg_printk(FUJLAPTOP_DBG_TRACE,
- "Push scancode into ringbuffer [0x%x]\n", scancode);
+ sparse_keymap_report_event(priv->input, scancode, 1, false);
+ dev_dbg(&priv->input->dev, "Push scancode into ringbuffer [0x%x]\n",
+ scancode);
}
-static void acpi_fujitsu_laptop_release(void)
+static void acpi_fujitsu_laptop_release(struct acpi_device *device)
{
- struct input_dev *input = fujitsu_laptop->input;
+ struct fujitsu_laptop *priv = acpi_driver_data(device);
int scancode, status;
while (true) {
- status = kfifo_out_locked(&fujitsu_laptop->fifo,
+ status = kfifo_out_locked(&priv->fifo,
(unsigned char *)&scancode,
- sizeof(scancode),
- &fujitsu_laptop->fifo_lock);
+ sizeof(scancode), &priv->fifo_lock);
if (status != sizeof(scancode))
return;
- sparse_keymap_report_event(input, scancode, 0, false);
- vdbg_printk(FUJLAPTOP_DBG_TRACE,
- "Pop scancode from ringbuffer [0x%x]\n", scancode);
+ sparse_keymap_report_event(priv->input, scancode, 0, false);
+ dev_dbg(&priv->input->dev,
+ "Pop scancode from ringbuffer [0x%x]\n", scancode);
}
}
static void acpi_fujitsu_laptop_notify(struct acpi_device *device, u32 event)
{
- struct input_dev *input;
+ struct fujitsu_laptop *priv = acpi_driver_data(device);
int scancode, i = 0;
unsigned int irb;
- input = fujitsu_laptop->input;
-
if (event != ACPI_FUJITSU_NOTIFY_CODE1) {
- vdbg_printk(FUJLAPTOP_DBG_WARN,
- "Unsupported event [0x%x]\n", event);
- sparse_keymap_report_event(input, -1, 1, true);
+ acpi_handle_info(device->handle, "Unsupported event [0x%x]\n",
+ event);
+ sparse_keymap_report_event(priv->input, -1, 1, true);
return;
}
- if (fujitsu_laptop->flags_supported)
- fujitsu_laptop->flags_state =
- call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0);
+ if (priv->flags_supported)
+ priv->flags_state = call_fext_func(device, FUNC_FLAGS, 0x4, 0x0,
+ 0x0);
- while ((irb = call_fext_func(FUNC_BUTTONS, 0x1, 0x0, 0x0)) != 0 &&
+ while ((irb = call_fext_func(device,
+ FUNC_BUTTONS, 0x1, 0x0, 0x0)) != 0 &&
i++ < MAX_HOTKEY_RINGBUFFER_SIZE) {
scancode = irb & 0x4ff;
- if (sparse_keymap_entry_from_scancode(input, scancode))
- acpi_fujitsu_laptop_press(scancode);
+ if (sparse_keymap_entry_from_scancode(priv->input, scancode))
+ acpi_fujitsu_laptop_press(device, scancode);
else if (scancode == 0)
- acpi_fujitsu_laptop_release();
+ acpi_fujitsu_laptop_release(device);
else
- vdbg_printk(FUJLAPTOP_DBG_WARN,
- "Unknown GIRB result [%x]\n", irb);
+ acpi_handle_info(device->handle,
+ "Unknown GIRB result [%x]\n", irb);
}
/* On some models (first seen on the Skylake-based Lifebook
* E736/E746/E756), the touchpad toggle hotkey (Fn+F4) is
* handled in software; its state is queried using FUNC_FLAGS
*/
- if ((fujitsu_laptop->flags_supported & BIT(26)) &&
- (call_fext_func(FUNC_FLAGS, 0x1, 0x0, 0x0) & BIT(26)))
- sparse_keymap_report_event(input, BIT(26), 1, true);
+ if ((priv->flags_supported & BIT(26)) &&
+ (call_fext_func(device, FUNC_FLAGS, 0x1, 0x0, 0x0) & BIT(26)))
+ sparse_keymap_report_event(priv->input, BIT(26), 1, true);
}
/* Initialization */
@@ -992,16 +961,9 @@ static int __init fujitsu_init(void)
{
int ret;
- if (acpi_disabled)
- return -ENODEV;
-
- fujitsu_bl = kzalloc(sizeof(struct fujitsu_bl), GFP_KERNEL);
- if (!fujitsu_bl)
- return -ENOMEM;
-
ret = acpi_bus_register_driver(&acpi_fujitsu_bl_driver);
if (ret)
- goto err_free_fujitsu_bl;
+ return ret;
/* Register platform stuff */
@@ -1011,28 +973,18 @@ static int __init fujitsu_init(void)
/* Register laptop driver */
- fujitsu_laptop = kzalloc(sizeof(struct fujitsu_laptop), GFP_KERNEL);
- if (!fujitsu_laptop) {
- ret = -ENOMEM;
- goto err_unregister_platform_driver;
- }
-
ret = acpi_bus_register_driver(&acpi_fujitsu_laptop_driver);
if (ret)
- goto err_free_fujitsu_laptop;
+ goto err_unregister_platform_driver;
pr_info("driver " FUJITSU_DRIVER_VERSION " successfully loaded\n");
return 0;
-err_free_fujitsu_laptop:
- kfree(fujitsu_laptop);
err_unregister_platform_driver:
platform_driver_unregister(&fujitsu_pf_driver);
err_unregister_acpi:
acpi_bus_unregister_driver(&acpi_fujitsu_bl_driver);
-err_free_fujitsu_bl:
- kfree(fujitsu_bl);
return ret;
}
@@ -1041,14 +993,10 @@ static void __exit fujitsu_cleanup(void)
{
acpi_bus_unregister_driver(&acpi_fujitsu_laptop_driver);
- kfree(fujitsu_laptop);
-
platform_driver_unregister(&fujitsu_pf_driver);
acpi_bus_unregister_driver(&acpi_fujitsu_bl_driver);
- kfree(fujitsu_bl);
-
pr_info("driver unloaded\n");
}
@@ -1059,10 +1007,6 @@ module_param(use_alt_lcd_levels, int, 0644);
MODULE_PARM_DESC(use_alt_lcd_levels, "Interface used for setting LCD brightness level (-1 = auto, 0 = force SBLL, 1 = force SBL2)");
module_param(disable_brightness_adjust, bool, 0644);
MODULE_PARM_DESC(disable_brightness_adjust, "Disable LCD brightness adjustment");
-#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG
-module_param_named(debug, dbg_level, uint, 0644);
-MODULE_PARM_DESC(debug, "Sets debug level bit-mask");
-#endif
MODULE_AUTHOR("Jonathan Woithe, Peter Gruber, Tony Vroon");
MODULE_DESCRIPTION("Fujitsu laptop extras support");
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 24ca9fbe31cc..527e5d9ab9bf 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -423,9 +423,43 @@ static ssize_t store_ideapad_fan(struct device *dev,
static DEVICE_ATTR(fan_mode, 0644, show_ideapad_fan, store_ideapad_fan);
+static ssize_t touchpad_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+ unsigned long result;
+
+ if (read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &result))
+ return sprintf(buf, "-1\n");
+ return sprintf(buf, "%lu\n", result);
+}
+
+/* Switch to RO for now: It might be revisited in the future */
+static ssize_t __maybe_unused touchpad_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ideapad_private *priv = dev_get_drvdata(dev);
+ bool state;
+ int ret;
+
+ ret = kstrtobool(buf, &state);
+ if (ret)
+ return ret;
+
+ ret = write_ec_cmd(priv->adev->handle, VPCCMD_W_TOUCHPAD, state);
+ if (ret < 0)
+ return -EIO;
+ return count;
+}
+
+static DEVICE_ATTR_RO(touchpad);
+
static struct attribute *ideapad_attributes[] = {
&dev_attr_camera_power.attr,
&dev_attr_fan_mode.attr,
+ &dev_attr_touchpad.attr,
NULL
};
@@ -478,7 +512,7 @@ static int ideapad_rfk_set(void *data, bool blocked)
return write_ec_cmd(priv->priv->adev->handle, opcode, !blocked);
}
-static struct rfkill_ops ideapad_rfk_ops = {
+static const struct rfkill_ops ideapad_rfk_ops = {
.set_block = ideapad_rfk_set,
};
@@ -810,7 +844,6 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
case 8:
case 7:
case 6:
- case 1:
ideapad_input_report(priv, vpc_bit);
break;
case 5:
@@ -828,6 +861,13 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
case 0:
ideapad_check_special_buttons(priv);
break;
+ case 1:
+ /* Some IdeaPads report event 1 every ~20
+ * seconds while on battery power; some
+ * report this when changing to/from tablet
+ * mode. Squelch this event.
+ */
+ break;
default:
pr_info("Unknown event: %lu\n", vpc_bit);
}
@@ -911,6 +951,20 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
},
},
{
+ .ident = "Lenovo Legion Y520-15IKBN",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y520-15IKBN"),
+ },
+ },
+ {
+ .ident = "Lenovo Legion Y720-15IKBN",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y720-15IKBN"),
+ },
+ },
+ {
.ident = "Lenovo Yoga 2 11 / 13 / Pro",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/platform/x86/intel_bxtwc_tmu.c b/drivers/platform/x86/intel_bxtwc_tmu.c
index e202abd5b0df..ea865d4ca220 100644
--- a/drivers/platform/x86/intel_bxtwc_tmu.c
+++ b/drivers/platform/x86/intel_bxtwc_tmu.c
@@ -92,10 +92,6 @@ static int bxt_wcove_tmu_probe(struct platform_device *pdev)
}
wctmu->irq = virq;
- /* Enable TMU interrupts */
- regmap_update_bits(wctmu->regmap, BXTWC_MIRQLVL1,
- BXTWC_MIRQLVL1_MTMU, 0);
-
/* Unmask TMU second level Wake & System alarm */
regmap_update_bits(wctmu->regmap, BXTWC_MTMUIRQ_REG,
BXTWC_TMU_ALRM_MASK, 0);
diff --git a/drivers/platform/x86/intel_cht_int33fe.c b/drivers/platform/x86/intel_cht_int33fe.c
index 6a1b2ca5b6fe..da706e2c4232 100644
--- a/drivers/platform/x86/intel_cht_int33fe.c
+++ b/drivers/platform/x86/intel_cht_int33fe.c
@@ -34,6 +34,13 @@ struct cht_int33fe_data {
struct i2c_client *pi3usb30532;
};
+static const char * const max17047_suppliers[] = { "bq24190-charger" };
+
+static const struct property_entry max17047_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("supplied-from", max17047_suppliers),
+ { }
+};
+
static int cht_int33fe_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
@@ -70,6 +77,7 @@ static int cht_int33fe_probe(struct i2c_client *client)
memset(&board_info, 0, sizeof(board_info));
strlcpy(board_info.type, "max17047", I2C_NAME_SIZE);
+ board_info.properties = max17047_props;
data->max17047 = i2c_acpi_new_device(dev, 1, &board_info);
if (!data->max17047)
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index cbe01021c939..ef9b0af8cdd3 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -142,7 +142,7 @@ static int memory_set_cur_bandwidth(struct thermal_cooling_device *cdev,
return 0;
}
-static struct thermal_cooling_device_ops memory_cooling_ops = {
+static const struct thermal_cooling_device_ops memory_cooling_ops = {
.get_max_state = memory_get_max_bandwidth,
.get_cur_state = memory_get_cur_bandwidth,
.set_cur_state = memory_set_cur_bandwidth,
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index e4d4dfe3e1d1..bb792a52248b 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -186,7 +186,7 @@ static inline void ipc_data_writel(u32 data, u32 offset)
writel(data, ipcdev.ipc_base + IPC_WRITE_BUFFER + offset);
}
-static inline u8 ipc_data_readb(u32 offset)
+static inline u8 __maybe_unused ipc_data_readb(u32 offset)
{
return readb(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
}
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index 9e90827c176a..61b9014d2610 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -563,11 +563,11 @@ static struct attribute *msipf_old_attributes[] = {
NULL
};
-static struct attribute_group msipf_attribute_group = {
+static const struct attribute_group msipf_attribute_group = {
.attrs = msipf_attributes
};
-static struct attribute_group msipf_old_attribute_group = {
+static const struct attribute_group msipf_old_attribute_group = {
.attrs = msipf_old_attributes
};
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 975f4e100dbd..76b0a58e205b 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -228,10 +228,6 @@ struct pcc_acpi {
struct backlight_device *backlight;
};
-struct pcc_keyinput {
- struct acpi_hotkey *hotkey;
-};
-
/* method access functions */
static int acpi_pcc_write_sset(struct pcc_acpi *pcc, int func, int val)
{
diff --git a/drivers/platform/x86/peaq-wmi.c b/drivers/platform/x86/peaq-wmi.c
new file mode 100644
index 000000000000..ca75b4dc437e
--- /dev/null
+++ b/drivers/platform/x86/peaq-wmi.c
@@ -0,0 +1,100 @@
+/*
+ * PEAQ 2-in-1 WMI hotkey driver
+ * Copyright (C) 2017 Hans de Goede <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/input-polldev.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#define PEAQ_DOLBY_BUTTON_GUID "ABBC0F6F-8EA1-11D1-00A0-C90629100000"
+#define PEAQ_DOLBY_BUTTON_METHOD_ID 5
+#define PEAQ_POLL_INTERVAL_MS 250
+#define PEAQ_POLL_IGNORE_MS 500
+#define PEAQ_POLL_MAX_MS 1000
+
+MODULE_ALIAS("wmi:"PEAQ_DOLBY_BUTTON_GUID);
+
+static unsigned int peaq_ignore_events_counter;
+static struct input_polled_dev *peaq_poll_dev;
+
+/*
+ * The Dolby button (yes really a Dolby button) causes an ACPI variable to get
+ * set on both press and release. The WMI method checks and clears that flag.
+ * So for a press + release we will get back One from the WMI method either once
+ * (if polling after the release) or twice (polling between press and release).
+ * We ignore events for 0.5s after the first event to avoid reporting 2 presses.
+ */
+static void peaq_wmi_poll(struct input_polled_dev *dev)
+{
+ union acpi_object obj;
+ acpi_status status;
+ u32 dummy = 0;
+
+ struct acpi_buffer input = { sizeof(dummy), &dummy };
+ struct acpi_buffer output = { sizeof(obj), &obj };
+
+ status = wmi_evaluate_method(PEAQ_DOLBY_BUTTON_GUID, 1,
+ PEAQ_DOLBY_BUTTON_METHOD_ID,
+ &input, &output);
+ if (ACPI_FAILURE(status))
+ return;
+
+ if (obj.type != ACPI_TYPE_INTEGER) {
+ dev_err(&peaq_poll_dev->input->dev,
+ "Error WMBC did not return an integer\n");
+ return;
+ }
+
+ if (peaq_ignore_events_counter && --peaq_ignore_events_counter > 0)
+ return;
+
+ if (obj.integer.value) {
+ input_event(peaq_poll_dev->input, EV_KEY, KEY_SOUND, 1);
+ input_sync(peaq_poll_dev->input);
+ input_event(peaq_poll_dev->input, EV_KEY, KEY_SOUND, 0);
+ input_sync(peaq_poll_dev->input);
+ peaq_ignore_events_counter = max(1u,
+ PEAQ_POLL_IGNORE_MS / peaq_poll_dev->poll_interval);
+ }
+}
+
+static int __init peaq_wmi_init(void)
+{
+ if (!wmi_has_guid(PEAQ_DOLBY_BUTTON_GUID))
+ return -ENODEV;
+
+ peaq_poll_dev = input_allocate_polled_device();
+ if (!peaq_poll_dev)
+ return -ENOMEM;
+
+ peaq_poll_dev->poll = peaq_wmi_poll;
+ peaq_poll_dev->poll_interval = PEAQ_POLL_INTERVAL_MS;
+ peaq_poll_dev->poll_interval_max = PEAQ_POLL_MAX_MS;
+ peaq_poll_dev->input->name = "PEAQ WMI hotkeys";
+ peaq_poll_dev->input->phys = "wmi/input0";
+ peaq_poll_dev->input->id.bustype = BUS_HOST;
+ input_set_capability(peaq_poll_dev->input, EV_KEY, KEY_SOUND);
+
+ return input_register_polled_device(peaq_poll_dev);
+}
+
+static void __exit peaq_wmi_exit(void)
+{
+ if (!wmi_has_guid(PEAQ_DOLBY_BUTTON_GUID))
+ return;
+
+ input_unregister_polled_device(peaq_poll_dev);
+}
+
+module_init(peaq_wmi_init);
+module_exit(peaq_wmi_exit);
+
+MODULE_DESCRIPTION("PEAQ 2-in-1 WMI hotkey driver");
+MODULE_AUTHOR("Hans de Goede <[email protected]>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index 8c146e2b6727..5c4dfe48f03d 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -591,7 +591,7 @@ static int seclinux_rfkill_set(void *data, bool blocked)
!blocked);
}
-static struct rfkill_ops seclinux_rfkill_ops = {
+static const struct rfkill_ops seclinux_rfkill_ops = {
.set_block = seclinux_rfkill_set,
};
@@ -651,7 +651,7 @@ static void swsmi_rfkill_query(struct rfkill *rfkill, void *priv)
rfkill_set_sw_state(rfkill, !ret);
}
-static struct rfkill_ops swsmi_rfkill_ops = {
+static const struct rfkill_ops swsmi_rfkill_ops = {
.set_block = swsmi_rfkill_set,
.query = swsmi_rfkill_query,
};
@@ -1446,9 +1446,9 @@ static int __init samsung_sabi_init(struct samsung_laptop *samsung)
const struct sabi_config *config = NULL;
const struct sabi_commands *commands;
unsigned int ifaceP;
+ int loca = 0xffff;
int ret = 0;
int i;
- int loca;
samsung->f0000_segment = ioremap_nocache(0xf0000, 0xffff);
if (!samsung->f0000_segment) {
diff --git a/drivers/platform/x86/silead_dmi.c b/drivers/platform/x86/silead_dmi.c
index a3a57d93cf06..3cd3bdfe51df 100644
--- a/drivers/platform/x86/silead_dmi.c
+++ b/drivers/platform/x86/silead_dmi.c
@@ -80,6 +80,48 @@ static const struct silead_ts_dmi_data surftab_wintron70_st70416_6_data = {
.properties = surftab_wintron70_st70416_6_props,
};
+static const struct property_entry gp_electronic_t701_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
+ PROPERTY_ENTRY_STRING("firmware-name",
+ "gsl1680-gp-electronic-t701.fw"),
+ { }
+};
+
+static const struct silead_ts_dmi_data gp_electronic_t701_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = gp_electronic_t701_props,
+};
+
+static const struct property_entry pipo_w2s_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1660),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 880),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name",
+ "gsl1680-pipo-w2s.fw"),
+ { }
+};
+
+static const struct silead_ts_dmi_data pipo_w2s_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = pipo_w2s_props,
+};
+
+static const struct property_entry pov_mobii_wintab_p800w_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1800),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1150),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name",
+ "gsl3692-pov-mobii-wintab-p800w.fw"),
+ { }
+};
+
+static const struct silead_ts_dmi_data pov_mobii_wintab_p800w_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = pov_mobii_wintab_p800w_props,
+};
+
static const struct dmi_system_id silead_ts_dmi_table[] = {
{
/* CUBE iwork8 Air */
@@ -117,6 +159,34 @@ static const struct dmi_system_id silead_ts_dmi_table[] = {
DMI_MATCH(DMI_BIOS_VERSION, "TREK.G.WI71C.JGBMRBA04"),
},
},
+ {
+ /* GP-electronic T701 */
+ .driver_data = (void *)&gp_electronic_t701_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T701"),
+ DMI_MATCH(DMI_BIOS_VERSION, "BYT70A.YNCHENG.WIN.007"),
+ },
+ },
+ {
+ /* Pipo W2S */
+ .driver_data = (void *)&pipo_w2s_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PIPO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "W2S"),
+ },
+ },
+ {
+ /* Point of View mobii wintab p800w */
+ .driver_data = (void *)&pov_mobii_wintab_p800w_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ DMI_MATCH(DMI_BIOS_VERSION, "3BAIR1013"),
+ /* Above matches are too generic, add bios-date match */
+ DMI_MATCH(DMI_BIOS_DATE, "08/22/2014"),
+ },
+ },
{ },
};
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index aa2ee51d3547..bfae79534f44 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -222,7 +222,7 @@ struct sony_laptop_keypress {
/* Correspondance table between sonypi events
* and input layer indexes in the keymap
*/
-static int sony_laptop_input_index[] = {
+static const int sony_laptop_input_index[] = {
-1, /* 0 no event */
-1, /* 1 SONYPI_EVENT_JOGDIAL_DOWN */
-1, /* 2 SONYPI_EVENT_JOGDIAL_UP */
@@ -4032,7 +4032,7 @@ static struct attribute *spic_attributes[] = {
NULL
};
-static struct attribute_group spic_attribute_group = {
+static const struct attribute_group spic_attribute_group = {
.attrs = spic_attributes
};
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index f6861b551178..b22573131e53 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -590,8 +590,8 @@ static int acpi_evalf(acpi_handle handle,
break;
/* add more types as needed */
default:
- pr_err("acpi_evalf() called "
- "with invalid format character '%c'\n", c);
+ pr_err("acpi_evalf() called with invalid format character '%c'\n",
+ c);
va_end(ap);
return 0;
}
@@ -619,8 +619,8 @@ static int acpi_evalf(acpi_handle handle,
break;
/* add more types as needed */
default:
- pr_err("acpi_evalf() called "
- "with invalid format character '%c'\n", res_type);
+ pr_err("acpi_evalf() called with invalid format character '%c'\n",
+ res_type);
return 0;
}
@@ -790,8 +790,8 @@ static int __init setup_acpi_notify(struct ibm_struct *ibm)
ibm->acpi->type, dispatch_acpi_notify, ibm);
if (ACPI_FAILURE(status)) {
if (status == AE_ALREADY_EXISTS) {
- pr_notice("another device driver is already "
- "handling %s events\n", ibm->name);
+ pr_notice("another device driver is already handling %s events\n",
+ ibm->name);
} else {
pr_err("acpi_install_notify_handler(%s) failed: %s\n",
ibm->name, acpi_format_exception(status));
@@ -1095,8 +1095,7 @@ static void printk_deprecated_attribute(const char * const what,
const char * const details)
{
tpacpi_log_usertask("deprecated sysfs attribute");
- pr_warn("WARNING: sysfs attribute %s is deprecated and "
- "will be removed. %s\n",
+ pr_warn("WARNING: sysfs attribute %s is deprecated and will be removed. %s\n",
what, details);
}
@@ -1796,8 +1795,7 @@ static void __init tpacpi_check_outdated_fw(void)
* best if the user upgrades the firmware anyway.
*/
pr_warn("WARNING: Outdated ThinkPad BIOS/EC firmware\n");
- pr_warn("WARNING: This firmware may be missing critical bug "
- "fixes and/or important features\n");
+ pr_warn("WARNING: This firmware may be missing critical bug fixes and/or important features\n");
}
}
@@ -2166,8 +2164,7 @@ static int hotkey_mask_set(u32 mask)
* a given event.
*/
if (!hotkey_mask_get() && !rc && (fwmask & ~hotkey_acpi_mask)) {
- pr_notice("asked for hotkey mask 0x%08x, but "
- "firmware forced it to 0x%08x\n",
+ pr_notice("asked for hotkey mask 0x%08x, but firmware forced it to 0x%08x\n",
fwmask, hotkey_acpi_mask);
}
@@ -2192,11 +2189,9 @@ static int hotkey_user_mask_set(const u32 mask)
(mask == 0xffff || mask == 0xffffff ||
mask == 0xffffffff)) {
tp_warned.hotkey_mask_ff = 1;
- pr_notice("setting the hotkey mask to 0x%08x is likely "
- "not the best way to go about it\n", mask);
- pr_notice("please consider using the driver defaults, "
- "and refer to up-to-date thinkpad-acpi "
- "documentation\n");
+ pr_notice("setting the hotkey mask to 0x%08x is likely not the best way to go about it\n",
+ mask);
+ pr_notice("please consider using the driver defaults, and refer to up-to-date thinkpad-acpi documentation\n");
}
/* Try to enable what the user asked for, plus whatever we need.
@@ -2571,17 +2566,14 @@ static void hotkey_poll_setup(const bool may_warn)
NULL, TPACPI_NVRAM_KTHREAD_NAME);
if (IS_ERR(tpacpi_hotkey_task)) {
tpacpi_hotkey_task = NULL;
- pr_err("could not create kernel thread "
- "for hotkey polling\n");
+ pr_err("could not create kernel thread for hotkey polling\n");
}
}
} else {
hotkey_poll_stop_sync();
if (may_warn && (poll_driver_mask || poll_user_mask) &&
hotkey_poll_freq == 0) {
- pr_notice("hot keys 0x%08x and/or events 0x%08x "
- "require polling, which is currently "
- "disabled\n",
+ pr_notice("hot keys 0x%08x and/or events 0x%08x require polling, which is currently disabled\n",
poll_user_mask, poll_driver_mask);
}
}
@@ -2808,12 +2800,10 @@ static ssize_t hotkey_source_mask_store(struct device *dev,
mutex_unlock(&hotkey_mutex);
if (rc < 0)
- pr_err("hotkey_source_mask: "
- "failed to update the firmware event mask!\n");
+ pr_err("hotkey_source_mask: failed to update the firmware event mask!\n");
if (r_ev)
- pr_notice("hotkey_source_mask: "
- "some important events were disabled: 0x%04x\n",
+ pr_notice("hotkey_source_mask: some important events were disabled: 0x%04x\n",
r_ev);
tpacpi_disclose_usertask("hotkey_source_mask", "set to 0x%08lx\n", t);
@@ -3074,8 +3064,7 @@ static void hotkey_exit(void)
if (((tp_features.hotkey_mask &&
hotkey_mask_set(hotkey_orig_mask)) |
hotkey_status_set(false)) != 0)
- pr_err("failed to restore hot key mask "
- "to BIOS defaults\n");
+ pr_err("failed to restore hot key mask to BIOS defaults\n");
}
static void __init hotkey_unmap(const unsigned int scancode)
@@ -3587,11 +3576,8 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
* userspace. tpacpi_detect_brightness_capabilities() must have
* been called before this point */
if (acpi_video_get_backlight_type() != acpi_backlight_vendor) {
- pr_info("This ThinkPad has standard ACPI backlight "
- "brightness control, supported by the ACPI "
- "video driver\n");
- pr_notice("Disabling thinkpad-acpi brightness events "
- "by default...\n");
+ pr_info("This ThinkPad has standard ACPI backlight brightness control, supported by the ACPI video driver\n");
+ pr_notice("Disabling thinkpad-acpi brightness events by default...\n");
/* Disable brightness up/down on Lenovo thinkpads when
* ACPI is handling them, otherwise it is plain impossible
@@ -3760,7 +3746,7 @@ static bool adaptive_keyboard_hotkey_notify_hotkey(unsigned int scancode)
TP_ACPI_HOTKEYSCAN_EXTENDED_START -
TP_ACPI_HOTKEYSCAN_ADAPTIVE_START) {
pr_info("Unhandled adaptive keyboard key: 0x%x\n",
- scancode);
+ scancode);
return false;
}
keycode = hotkey_keycode_map[scancode - FIRST_ADAPTIVE_KEY +
@@ -3957,14 +3943,12 @@ static bool hotkey_notify_6xxx(const u32 hkey,
/* recommended action: immediate sleep/hibernate */
break;
case TP_HKEY_EV_ALARM_SENSOR_HOT:
- pr_crit("THERMAL ALARM: "
- "a sensor reports something is too hot!\n");
+ pr_crit("THERMAL ALARM: a sensor reports something is too hot!\n");
/* recommended action: warn user through gui, that */
/* some internal component is too hot */
break;
case TP_HKEY_EV_ALARM_SENSOR_XHOT:
- pr_alert("THERMAL EMERGENCY: "
- "a sensor reports something is extremely hot!\n");
+ pr_alert("THERMAL EMERGENCY: a sensor reports something is extremely hot!\n");
/* recommended action: immediate sleep/hibernate */
break;
case TP_HKEY_EV_AC_CHANGED:
@@ -4089,8 +4073,8 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
}
if (!known_ev) {
pr_notice("unhandled HKEY event 0x%04x\n", hkey);
- pr_notice("please report the conditions when this "
- "event happened to %s\n", TPACPI_MAIL);
+ pr_notice("please report the conditions when this event happened to %s\n",
+ TPACPI_MAIL);
}
/* netlink events */
@@ -4124,8 +4108,7 @@ static void hotkey_resume(void)
if (hotkey_status_set(true) < 0 ||
hotkey_mask_set(hotkey_acpi_mask) < 0)
- pr_err("error while attempting to reset the event "
- "firmware interface\n");
+ pr_err("error while attempting to reset the event firmware interface\n");
tpacpi_send_radiosw_update();
hotkey_tablet_mode_notify_change();
@@ -4177,12 +4160,8 @@ static void hotkey_enabledisable_warn(bool enable)
{
tpacpi_log_usertask("procfs hotkey enable/disable");
if (!WARN((tpacpi_lifecycle == TPACPI_LIFE_RUNNING || !enable),
- pr_fmt("hotkey enable/disable functionality has been "
- "removed from the driver. "
- "Hotkeys are always enabled.\n")))
- pr_err("Please remove the hotkey=enable module "
- "parameter, it is deprecated. "
- "Hotkeys are always enabled.\n");
+ pr_fmt("hotkey enable/disable functionality has been removed from the driver. Hotkeys are always enabled.\n")))
+ pr_err("Please remove the hotkey=enable module parameter, it is deprecated. Hotkeys are always enabled.\n");
}
static int hotkey_write(char *buf)
@@ -4840,8 +4819,7 @@ static void video_exit(void)
dbg_printk(TPACPI_DBG_EXIT,
"restoring original video autoswitch mode\n");
if (video_autosw_set(video_orig_autosw))
- pr_err("error while trying to restore original "
- "video autoswitch mode\n");
+ pr_err("error while trying to restore original video autoswitch mode\n");
}
static int video_outputsw_get(void)
@@ -5931,8 +5909,7 @@ static int __init led_init(struct ibm_init_struct *iibm)
}
#ifdef CONFIG_THINKPAD_ACPI_UNSAFE_LEDS
- pr_notice("warning: userspace override of important "
- "firmware LEDs is enabled\n");
+ pr_notice("warning: userspace override of important firmware LEDs is enabled\n");
#endif
return 0;
}
@@ -5961,8 +5938,7 @@ static int led_read(struct seq_file *m)
}
}
- seq_printf(m, "commands:\t"
- "<led> on, <led> off, <led> blink (<led> is 0-15)\n");
+ seq_printf(m, "commands:\t<led> on, <led> off, <led> blink (<led> is 0-15)\n");
return 0;
}
@@ -6335,13 +6311,10 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
if (ta1 == 0) {
/* This is sheer paranoia, but we handle it anyway */
if (acpi_tmp7) {
- pr_err("ThinkPad ACPI EC access misbehaving, "
- "falling back to ACPI TMPx access "
- "mode\n");
+ pr_err("ThinkPad ACPI EC access misbehaving, falling back to ACPI TMPx access mode\n");
thermal_read_mode = TPACPI_THERMAL_ACPI_TMP07;
} else {
- pr_err("ThinkPad ACPI EC access misbehaving, "
- "disabling thermal sensors access\n");
+ pr_err("ThinkPad ACPI EC access misbehaving, disabling thermal sensors access\n");
thermal_read_mode = TPACPI_THERMAL_NONE;
}
} else {
@@ -6820,26 +6793,20 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
if (!brightness_enable) {
dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
- "brightness support disabled by "
- "module parameter\n");
+ "brightness support disabled by module parameter\n");
return 1;
}
if (acpi_video_get_backlight_type() != acpi_backlight_vendor) {
if (brightness_enable > 1) {
- pr_info("Standard ACPI backlight interface "
- "available, not loading native one\n");
+ pr_info("Standard ACPI backlight interface available, not loading native one\n");
return 1;
} else if (brightness_enable == 1) {
- pr_warn("Cannot enable backlight brightness support, "
- "ACPI is already handling it. Refer to the "
- "acpi_backlight kernel parameter.\n");
+ pr_warn("Cannot enable backlight brightness support, ACPI is already handling it. Refer to the acpi_backlight kernel parameter.\n");
return 1;
}
} else if (tp_features.bright_acpimode && brightness_enable > 1) {
- pr_notice("Standard ACPI backlight interface not "
- "available, thinkpad_acpi native "
- "brightness control enabled\n");
+ pr_notice("Standard ACPI backlight interface not available, thinkpad_acpi native brightness control enabled\n");
}
/*
@@ -6890,10 +6857,10 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
"brightness is supported\n");
if (quirks & TPACPI_BRGHT_Q_ASK) {
- pr_notice("brightness: will use unverified default: "
- "brightness_mode=%d\n", brightness_mode);
- pr_notice("brightness: please report to %s whether it works well "
- "or not on your ThinkPad\n", TPACPI_MAIL);
+ pr_notice("brightness: will use unverified default: brightness_mode=%d\n",
+ brightness_mode);
+ pr_notice("brightness: please report to %s whether it works well or not on your ThinkPad\n",
+ TPACPI_MAIL);
}
/* Added by mistake in early 2007. Probably useless, but it could
@@ -6903,8 +6870,7 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
backlight_update_status(ibm_backlight_device);
vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
- "brightness: registering brightness hotkeys "
- "as change notification\n");
+ "brightness: registering brightness hotkeys as change notification\n");
tpacpi_hotkey_driver_mask_set(hotkey_driver_mask
| TP_ACPI_HKEY_BRGHTUP_MASK
| TP_ACPI_HKEY_BRGHTDWN_MASK);
@@ -7567,8 +7533,8 @@ static int __init volume_init(struct ibm_init_struct *iibm)
return -EINVAL;
if (volume_mode == TPACPI_VOL_MODE_UCMS_STEP) {
- pr_err("UCMS step volume mode not implemented, "
- "please contact %s\n", TPACPI_MAIL);
+ pr_err("UCMS step volume mode not implemented, please contact %s\n",
+ TPACPI_MAIL);
return 1;
}
@@ -7581,8 +7547,7 @@ static int __init volume_init(struct ibm_init_struct *iibm)
*/
if (!alsa_enable) {
vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_MIXER,
- "ALSA mixer disabled by parameter, "
- "not loading volume subdriver...\n");
+ "ALSA mixer disabled by parameter, not loading volume subdriver...\n");
return 1;
}
@@ -7674,12 +7639,9 @@ static int volume_read(struct seq_file *m)
if (volume_control_allowed) {
seq_printf(m, "commands:\tunmute, mute\n");
if (!tp_features.mixer_no_level_control) {
- seq_printf(m,
- "commands:\tup, down\n");
- seq_printf(m,
- "commands:\tlevel <level>"
- " (<level> is 0-%d)\n",
- TP_EC_VOLUME_MAX);
+ seq_printf(m, "commands:\tup, down\n");
+ seq_printf(m, "commands:\tlevel <level> (<level> is 0-%d)\n",
+ TP_EC_VOLUME_MAX);
}
}
}
@@ -7702,10 +7664,8 @@ static int volume_write(char *buf)
if (!volume_control_allowed && tpacpi_lifecycle != TPACPI_LIFE_INIT) {
if (unlikely(!tp_warned.volume_ctrl_forbidden)) {
tp_warned.volume_ctrl_forbidden = 1;
- pr_notice("Console audio control in monitor mode, "
- "changes are not allowed\n");
- pr_notice("Use the volume_control=1 module parameter "
- "to enable volume control\n");
+ pr_notice("Console audio control in monitor mode, changes are not allowed\n");
+ pr_notice("Use the volume_control=1 module parameter to enable volume control\n");
}
return -EPERM;
}
@@ -7987,8 +7947,7 @@ TPACPI_HANDLE(sfan, ec, "SFAN", /* 570 */
static void fan_quirk1_setup(void)
{
if (fan_control_initial_status == 0x07) {
- pr_notice("fan_init: initial fan status is unknown, "
- "assuming it is in auto mode\n");
+ pr_notice("fan_init: initial fan status is unknown, assuming it is in auto mode\n");
tp_features.fan_ctrl_status_undef = 1;
}
}
@@ -8385,8 +8344,8 @@ static void fan_watchdog_fire(struct work_struct *ignored)
pr_notice("fan watchdog: enabling fan\n");
rc = fan_set_enable();
if (rc < 0) {
- pr_err("fan watchdog: error %d while enabling fan, "
- "will try again later...\n", -rc);
+ pr_err("fan watchdog: error %d while enabling fan, will try again later...\n",
+ rc);
/* reschedule for later */
fan_watchdog_reset();
}
@@ -8680,8 +8639,7 @@ static int __init fan_init(struct ibm_init_struct *iibm)
"secondary fan support enabled\n");
}
} else {
- pr_err("ThinkPad ACPI EC access misbehaving, "
- "fan status and control unavailable\n");
+ pr_err("ThinkPad ACPI EC access misbehaving, fan status and control unavailable\n");
return 1;
}
}
@@ -8780,8 +8738,8 @@ static void fan_suspend(void)
fan_control_resume_level = 0;
rc = fan_get_status_safe(&fan_control_resume_level);
if (rc < 0)
- pr_notice("failed to read fan level for later "
- "restore during resume: %d\n", rc);
+ pr_notice("failed to read fan level for later restore during resume: %d\n",
+ rc);
/* if it is undefined, don't attempt to restore it.
* KEEP THIS LAST */
@@ -8900,20 +8858,17 @@ static int fan_read(struct seq_file *m)
break;
default:
- seq_printf(m, " (<level> is 0-7, "
- "auto, disengaged, full-speed)\n");
+ seq_printf(m, " (<level> is 0-7, auto, disengaged, full-speed)\n");
break;
}
}
if (fan_control_commands & TPACPI_FAN_CMD_ENABLE)
seq_printf(m, "commands:\tenable, disable\n"
- "commands:\twatchdog <timeout> (<timeout> "
- "is 0 (off), 1-120 (seconds))\n");
+ "commands:\twatchdog <timeout> (<timeout> is 0 (off), 1-120 (seconds))\n");
if (fan_control_commands & TPACPI_FAN_CMD_SPEED)
- seq_printf(m, "commands:\tspeed <speed>"
- " (<speed> is 0-65535)\n");
+ seq_printf(m, "commands:\tspeed <speed> (<speed> is 0-65535)\n");
return 0;
}
@@ -9439,8 +9394,7 @@ static int __must_check __init get_thinkpad_model_data(
tp->ec_release = (ec_fw_string[4] << 8)
| ec_fw_string[5];
} else {
- pr_notice("ThinkPad firmware release %s "
- "doesn't match the known patterns\n",
+ pr_notice("ThinkPad firmware release %s doesn't match the known patterns\n",
ec_fw_string);
pr_notice("please report this to %s\n",
TPACPI_MAIL);
@@ -9635,8 +9589,7 @@ MODULE_PARM_DESC(debug, "Sets debug level bit-mask");
module_param(force_load, bool, 0444);
MODULE_PARM_DESC(force_load,
- "Attempts to load the driver even on a "
- "mis-identified ThinkPad when true");
+ "Attempts to load the driver even on a mis-identified ThinkPad when true");
module_param_named(fan_control, fan_control_allowed, bool, 0444);
MODULE_PARM_DESC(fan_control,
@@ -9644,8 +9597,7 @@ MODULE_PARM_DESC(fan_control,
module_param_named(brightness_mode, brightness_mode, uint, 0444);
MODULE_PARM_DESC(brightness_mode,
- "Selects brightness control strategy: "
- "0=auto, 1=EC, 2=UCMS, 3=EC+NVRAM");
+ "Selects brightness control strategy: 0=auto, 1=EC, 2=UCMS, 3=EC+NVRAM");
module_param(brightness_enable, uint, 0444);
MODULE_PARM_DESC(brightness_enable,
@@ -9654,18 +9606,15 @@ MODULE_PARM_DESC(brightness_enable,
#ifdef CONFIG_THINKPAD_ACPI_ALSA_SUPPORT
module_param_named(volume_mode, volume_mode, uint, 0444);
MODULE_PARM_DESC(volume_mode,
- "Selects volume control strategy: "
- "0=auto, 1=EC, 2=N/A, 3=EC+NVRAM");
+ "Selects volume control strategy: 0=auto, 1=EC, 2=N/A, 3=EC+NVRAM");
module_param_named(volume_capabilities, volume_capabilities, uint, 0444);
MODULE_PARM_DESC(volume_capabilities,
- "Selects the mixer capabilites: "
- "0=auto, 1=volume and mute, 2=mute only");
+ "Selects the mixer capabilites: 0=auto, 1=volume and mute, 2=mute only");
module_param_named(volume_control, volume_control_allowed, bool, 0444);
MODULE_PARM_DESC(volume_control,
- "Enables software override for the console audio "
- "control when true");
+ "Enables software override for the console audio control when true");
module_param_named(software_mute, software_mute_requested, bool, 0444);
MODULE_PARM_DESC(software_mute,
@@ -9680,10 +9629,10 @@ module_param_named(enable, alsa_enable, bool, 0444);
MODULE_PARM_DESC(enable, "Enable the ALSA interface for the ACPI EC Mixer");
#endif /* CONFIG_THINKPAD_ACPI_ALSA_SUPPORT */
+/* The module parameter can't be read back, that's why 0 is used here */
#define TPACPI_PARAM(feature) \
module_param_call(feature, set_ibm_param, NULL, NULL, 0); \
- MODULE_PARM_DESC(feature, "Simulates thinkpad-acpi procfs command " \
- "at module load, see documentation")
+ MODULE_PARM_DESC(feature, "Simulates thinkpad-acpi procfs command at module load, see documentation")
TPACPI_PARAM(hotkey);
TPACPI_PARAM(bluetooth);
diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c
index 70205d222da9..1032c00b907b 100644
--- a/drivers/platform/x86/topstar-laptop.c
+++ b/drivers/platform/x86/topstar-laptop.c
@@ -162,6 +162,7 @@ static int acpi_topstar_remove(struct acpi_device *device)
}
static const struct acpi_device_id topstar_device_ids[] = {
+ { "TPS0001", 0 },
{ "TPSACPI01", 0 },
{ "", 0 },
};
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index d0daf75cbed1..88f9f79a7cf6 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -1502,14 +1502,9 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
int ret;
u32 video_out;
- cmd = kmalloc(count + 1, GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
- if (copy_from_user(cmd, buf, count)) {
- kfree(cmd);
- return -EFAULT;
- }
- cmd[count] = '\0';
+ cmd = memdup_user_nul(buf, count);
+ if (IS_ERR(cmd))
+ return PTR_ERR(cmd);
buffer = cmd;
diff --git a/drivers/platform/x86/toshiba_haps.c b/drivers/platform/x86/toshiba_haps.c
index b3dec521e2b6..fb2736602558 100644
--- a/drivers/platform/x86/toshiba_haps.c
+++ b/drivers/platform/x86/toshiba_haps.c
@@ -132,7 +132,7 @@ static struct attribute *haps_attributes[] = {
NULL,
};
-static struct attribute_group haps_attr_group = {
+static const struct attribute_group haps_attr_group = {
.attrs = haps_attributes,
};
diff --git a/drivers/platform/x86/wmi-bmof.c b/drivers/platform/x86/wmi-bmof.c
new file mode 100644
index 000000000000..c4530ba715e8
--- /dev/null
+++ b/drivers/platform/x86/wmi-bmof.c
@@ -0,0 +1,125 @@
+/*
+ * WMI embedded Binary MOF driver
+ *
+ * Copyright (c) 2015 Andrew Lutomirski
+ * Copyright (C) 2017 VMware, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/wmi.h>
+
+#define WMI_BMOF_GUID "05901221-D566-11D1-B2F0-00A0C9062910"
+
+struct bmof_priv {
+ union acpi_object *bmofdata;
+ struct bin_attribute bmof_bin_attr;
+};
+
+static ssize_t
+read_bmof(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct bmof_priv *priv =
+ container_of(attr, struct bmof_priv, bmof_bin_attr);
+
+ if (off < 0)
+ return -EINVAL;
+
+ if (off >= priv->bmofdata->buffer.length)
+ return 0;
+
+ if (count > priv->bmofdata->buffer.length - off)
+ count = priv->bmofdata->buffer.length - off;
+
+ memcpy(buf, priv->bmofdata->buffer.pointer + off, count);
+ return count;
+}
+
+static int wmi_bmof_probe(struct wmi_device *wdev)
+{
+ struct bmof_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&wdev->dev, sizeof(struct bmof_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(&wdev->dev, priv);
+
+ priv->bmofdata = wmidev_block_query(wdev, 0);
+ if (!priv->bmofdata) {
+ dev_err(&wdev->dev, "failed to read Binary MOF\n");
+ return -EIO;
+ }
+
+ if (priv->bmofdata->type != ACPI_TYPE_BUFFER) {
+ dev_err(&wdev->dev, "Binary MOF is not a buffer\n");
+ ret = -EIO;
+ goto err_free;
+ }
+
+ sysfs_bin_attr_init(&priv->bmof_bin_attr);
+ priv->bmof_bin_attr.attr.name = "bmof";
+ priv->bmof_bin_attr.attr.mode = 0400;
+ priv->bmof_bin_attr.read = read_bmof;
+ priv->bmof_bin_attr.size = priv->bmofdata->buffer.length;
+
+ ret = sysfs_create_bin_file(&wdev->dev.kobj, &priv->bmof_bin_attr);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+ err_free:
+ kfree(priv->bmofdata);
+ return ret;
+}
+
+static int wmi_bmof_remove(struct wmi_device *wdev)
+{
+ struct bmof_priv *priv = dev_get_drvdata(&wdev->dev);
+
+ sysfs_remove_bin_file(&wdev->dev.kobj, &priv->bmof_bin_attr);
+ kfree(priv->bmofdata);
+ return 0;
+}
+
+static const struct wmi_device_id wmi_bmof_id_table[] = {
+ { .guid_string = WMI_BMOF_GUID },
+ { },
+};
+
+static struct wmi_driver wmi_bmof_driver = {
+ .driver = {
+ .name = "wmi-bmof",
+ },
+ .probe = wmi_bmof_probe,
+ .remove = wmi_bmof_remove,
+ .id_table = wmi_bmof_id_table,
+};
+
+module_wmi_driver(wmi_bmof_driver);
+
+MODULE_ALIAS("wmi:" WMI_BMOF_GUID);
+MODULE_AUTHOR("Andrew Lutomirski <[email protected]>");
+MODULE_DESCRIPTION("WMI embedded Binary MOF driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index ceeb8c188ef3..1a764e311e11 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -8,6 +8,10 @@
* Copyright (c) 2001-2007 Anton Altaparmakov
* Copyright (C) 2001,2002 Jakob Kemi <[email protected]>
*
+ * WMI bus infrastructure by Andrew Lutomirski and Darren Hart:
+ * Copyright (C) 2015 Andrew Lutomirski
+ * Copyright (C) 2017 VMware, Inc. All Rights Reserved.
+ *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
@@ -37,6 +41,8 @@
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/wmi.h>
#include <linux/uuid.h>
ACPI_MODULE_NAME("wmi");
@@ -44,8 +50,6 @@ MODULE_AUTHOR("Carlos Corbacho");
MODULE_DESCRIPTION("ACPI-WMI Mapping Driver");
MODULE_LICENSE("GPL");
-#define ACPI_WMI_CLASS "wmi"
-
static LIST_HEAD(wmi_block_list);
struct guid_block {
@@ -62,12 +66,14 @@ struct guid_block {
};
struct wmi_block {
+ struct wmi_device dev;
struct list_head list;
struct guid_block gblock;
- acpi_handle handle;
+ struct acpi_device *acpi_device;
wmi_notify_handler handler;
void *handler_data;
- struct device dev;
+
+ bool read_takes_no_args;
};
@@ -90,9 +96,8 @@ module_param(debug_dump_wdg, bool, 0444);
MODULE_PARM_DESC(debug_dump_wdg,
"Dump available WMI interfaces [0/1]");
-static int acpi_wmi_remove(struct acpi_device *device);
-static int acpi_wmi_add(struct acpi_device *device);
-static void acpi_wmi_notify(struct acpi_device *device, u32 event);
+static int acpi_wmi_remove(struct platform_device *device);
+static int acpi_wmi_probe(struct platform_device *device);
static const struct acpi_device_id wmi_device_ids[] = {
{"PNP0C14", 0},
@@ -101,15 +106,13 @@ static const struct acpi_device_id wmi_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, wmi_device_ids);
-static struct acpi_driver acpi_wmi_driver = {
- .name = "wmi",
- .class = ACPI_WMI_CLASS,
- .ids = wmi_device_ids,
- .ops = {
- .add = acpi_wmi_add,
- .remove = acpi_wmi_remove,
- .notify = acpi_wmi_notify,
+static struct platform_driver acpi_wmi_driver = {
+ .driver = {
+ .name = "acpi-wmi",
+ .acpi_match_table = wmi_device_ids,
},
+ .probe = acpi_wmi_probe,
+ .remove = acpi_wmi_remove,
};
/*
@@ -139,6 +142,30 @@ static bool find_guid(const char *guid_string, struct wmi_block **out)
return false;
}
+static int get_subobj_info(acpi_handle handle, const char *pathname,
+ struct acpi_device_info **info)
+{
+ struct acpi_device_info *dummy_info, **info_ptr;
+ acpi_handle subobj_handle;
+ acpi_status status;
+
+ status = acpi_get_handle(handle, (char *)pathname, &subobj_handle);
+ if (status == AE_NOT_FOUND)
+ return -ENOENT;
+ else if (ACPI_FAILURE(status))
+ return -EIO;
+
+ info_ptr = info ? info : &dummy_info;
+ status = acpi_get_object_info(subobj_handle, info_ptr);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ if (!info)
+ kfree(dummy_info);
+
+ return 0;
+}
+
static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
{
struct guid_block *block = NULL;
@@ -147,7 +174,7 @@ static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
acpi_handle handle;
block = &wblock->gblock;
- handle = wblock->handle;
+ handle = wblock->acpi_device->handle;
snprintf(method, 5, "WE%02X", block->notify_id);
status = acpi_execute_simple_method(handle, method, enable);
@@ -186,7 +213,7 @@ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
return AE_ERROR;
block = &wblock->gblock;
- handle = wblock->handle;
+ handle = wblock->acpi_device->handle;
if (!(block->flags & ACPI_WMI_METHOD))
return AE_BAD_DATA;
@@ -221,19 +248,10 @@ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
}
EXPORT_SYMBOL_GPL(wmi_evaluate_method);
-/**
- * wmi_query_block - Return contents of a WMI block
- * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
- * @instance: Instance index
- * &out: Empty buffer to return the contents of the data block to
- *
- * Return the contents of an ACPI-WMI data block to a buffer
- */
-acpi_status wmi_query_block(const char *guid_string, u8 instance,
-struct acpi_buffer *out)
+static acpi_status __query_block(struct wmi_block *wblock, u8 instance,
+ struct acpi_buffer *out)
{
struct guid_block *block = NULL;
- struct wmi_block *wblock = NULL;
acpi_handle handle;
acpi_status status, wc_status = AE_ERROR;
struct acpi_object_list input;
@@ -241,14 +259,11 @@ struct acpi_buffer *out)
char method[5];
char wc_method[5] = "WC";
- if (!guid_string || !out)
+ if (!out)
return AE_BAD_PARAMETER;
- if (!find_guid(guid_string, &wblock))
- return AE_ERROR;
-
block = &wblock->gblock;
- handle = wblock->handle;
+ handle = wblock->acpi_device->handle;
if (block->instance_count < instance)
return AE_BAD_PARAMETER;
@@ -262,6 +277,9 @@ struct acpi_buffer *out)
wq_params[0].type = ACPI_TYPE_INTEGER;
wq_params[0].integer.value = instance;
+ if (instance == 0 && wblock->read_takes_no_args)
+ input.count = 0;
+
/*
* If ACPI_WMI_EXPENSIVE, call the relevant WCxx method first to
* enable collection.
@@ -294,8 +312,59 @@ struct acpi_buffer *out)
return status;
}
+
+/**
+ * wmi_query_block - Return contents of a WMI block (deprecated)
+ * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ * @instance: Instance index
+ * &out: Empty buffer to return the contents of the data block to
+ *
+ * Return the contents of an ACPI-WMI data block to a buffer
+ */
+acpi_status wmi_query_block(const char *guid_string, u8 instance,
+ struct acpi_buffer *out)
+{
+ struct wmi_block *wblock;
+
+ if (!guid_string)
+ return AE_BAD_PARAMETER;
+
+ if (!find_guid(guid_string, &wblock))
+ return AE_ERROR;
+
+ return __query_block(wblock, instance, out);
+}
EXPORT_SYMBOL_GPL(wmi_query_block);
+union acpi_object *wmidev_block_query(struct wmi_device *wdev, u8 instance)
+{
+ struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct wmi_block *wblock = container_of(wdev, struct wmi_block, dev);
+
+ if (ACPI_FAILURE(__query_block(wblock, instance, &out)))
+ return NULL;
+
+ return (union acpi_object *)out.pointer;
+}
+EXPORT_SYMBOL_GPL(wmidev_block_query);
+
+struct wmi_device *wmidev_get_other_guid(struct wmi_device *wdev,
+ const char *guid_string)
+{
+ struct wmi_block *this_wb = container_of(wdev, struct wmi_block, dev);
+ struct wmi_block *other_wb;
+
+ if (!find_guid(guid_string, &other_wb))
+ return NULL;
+
+ if (other_wb->acpi_device != this_wb->acpi_device)
+ return NULL;
+
+ get_device(&other_wb->dev.dev);
+ return &other_wb->dev;
+}
+EXPORT_SYMBOL_GPL(wmidev_get_other_guid);
+
/**
* wmi_set_block - Write to a WMI block
* @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
@@ -305,7 +374,7 @@ EXPORT_SYMBOL_GPL(wmi_query_block);
* Write the contents of the input buffer to an ACPI-WMI data block
*/
acpi_status wmi_set_block(const char *guid_string, u8 instance,
-const struct acpi_buffer *in)
+ const struct acpi_buffer *in)
{
struct guid_block *block = NULL;
struct wmi_block *wblock = NULL;
@@ -321,7 +390,7 @@ const struct acpi_buffer *in)
return AE_ERROR;
block = &wblock->gblock;
- handle = wblock->handle;
+ handle = wblock->acpi_device->handle;
if (block->instance_count < instance)
return AE_BAD_PARAMETER;
@@ -352,9 +421,10 @@ EXPORT_SYMBOL_GPL(wmi_set_block);
static void wmi_dump_wdg(const struct guid_block *g)
{
pr_info("%pUL:\n", g->guid);
- pr_info("\tobject_id: %c%c\n", g->object_id[0], g->object_id[1]);
- pr_info("\tnotify_id: %02X\n", g->notify_id);
- pr_info("\treserved: %02X\n", g->reserved);
+ if (g->flags & ACPI_WMI_EVENT)
+ pr_info("\tnotify_id: 0x%02X\n", g->notify_id);
+ else
+ pr_info("\tobject_id: %2pE\n", g->object_id);
pr_info("\tinstance_count: %d\n", g->instance_count);
pr_info("\tflags: %#x", g->flags);
if (g->flags) {
@@ -525,8 +595,8 @@ acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out)
if ((gblock->flags & ACPI_WMI_EVENT) &&
(gblock->notify_id == event))
- return acpi_evaluate_object(wblock->handle, "_WED",
- &input, out);
+ return acpi_evaluate_object(wblock->acpi_device->handle,
+ "_WED", &input, out);
}
return AE_NOT_FOUND;
@@ -545,99 +615,320 @@ bool wmi_has_guid(const char *guid_string)
}
EXPORT_SYMBOL_GPL(wmi_has_guid);
+static struct wmi_block *dev_to_wblock(struct device *dev)
+{
+ return container_of(dev, struct wmi_block, dev.dev);
+}
+
+static struct wmi_device *dev_to_wdev(struct device *dev)
+{
+ return container_of(dev, struct wmi_device, dev);
+}
+
/*
* sysfs interface
*/
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct wmi_block *wblock;
-
- wblock = dev_get_drvdata(dev);
- if (!wblock) {
- strcat(buf, "\n");
- return strlen(buf);
- }
+ struct wmi_block *wblock = dev_to_wblock(dev);
return sprintf(buf, "wmi:%pUL\n", wblock->gblock.guid);
}
static DEVICE_ATTR_RO(modalias);
+static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct wmi_block *wblock = dev_to_wblock(dev);
+
+ return sprintf(buf, "%pUL\n", wblock->gblock.guid);
+}
+static DEVICE_ATTR_RO(guid);
+
+static ssize_t instance_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wmi_block *wblock = dev_to_wblock(dev);
+
+ return sprintf(buf, "%d\n", (int)wblock->gblock.instance_count);
+}
+static DEVICE_ATTR_RO(instance_count);
+
+static ssize_t expensive_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wmi_block *wblock = dev_to_wblock(dev);
+
+ return sprintf(buf, "%d\n",
+ (wblock->gblock.flags & ACPI_WMI_EXPENSIVE) != 0);
+}
+static DEVICE_ATTR_RO(expensive);
+
static struct attribute *wmi_attrs[] = {
&dev_attr_modalias.attr,
+ &dev_attr_guid.attr,
+ &dev_attr_instance_count.attr,
+ &dev_attr_expensive.attr,
NULL,
};
ATTRIBUTE_GROUPS(wmi);
-static int wmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
+static ssize_t notify_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- char guid_string[37];
+ struct wmi_block *wblock = dev_to_wblock(dev);
- struct wmi_block *wblock;
+ return sprintf(buf, "%02X\n", (unsigned int)wblock->gblock.notify_id);
+}
+static DEVICE_ATTR_RO(notify_id);
+
+static struct attribute *wmi_event_attrs[] = {
+ &dev_attr_notify_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(wmi_event);
+
+static ssize_t object_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct wmi_block *wblock = dev_to_wblock(dev);
+
+ return sprintf(buf, "%c%c\n", wblock->gblock.object_id[0],
+ wblock->gblock.object_id[1]);
+}
+static DEVICE_ATTR_RO(object_id);
+
+static ssize_t setable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct wmi_device *wdev = dev_to_wdev(dev);
+
+ return sprintf(buf, "%d\n", (int)wdev->setable);
+}
+static DEVICE_ATTR_RO(setable);
- if (add_uevent_var(env, "MODALIAS="))
+static struct attribute *wmi_data_attrs[] = {
+ &dev_attr_object_id.attr,
+ &dev_attr_setable.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(wmi_data);
+
+static struct attribute *wmi_method_attrs[] = {
+ &dev_attr_object_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(wmi_method);
+
+static int wmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct wmi_block *wblock = dev_to_wblock(dev);
+
+ if (add_uevent_var(env, "MODALIAS=wmi:%pUL", wblock->gblock.guid))
return -ENOMEM;
- wblock = dev_get_drvdata(dev);
- if (!wblock)
+ if (add_uevent_var(env, "WMI_GUID=%pUL", wblock->gblock.guid))
return -ENOMEM;
- sprintf(guid_string, "%pUL", wblock->gblock.guid);
+ return 0;
+}
- strcpy(&env->buf[env->buflen - 1], "wmi:");
- memcpy(&env->buf[env->buflen - 1 + 4], guid_string, 36);
- env->buflen += 40;
+static void wmi_dev_release(struct device *dev)
+{
+ struct wmi_block *wblock = dev_to_wblock(dev);
+
+ kfree(wblock);
+}
+
+static int wmi_dev_match(struct device *dev, struct device_driver *driver)
+{
+ struct wmi_driver *wmi_driver =
+ container_of(driver, struct wmi_driver, driver);
+ struct wmi_block *wblock = dev_to_wblock(dev);
+ const struct wmi_device_id *id = wmi_driver->id_table;
+
+ while (id->guid_string) {
+ uuid_le driver_guid;
+
+ if (WARN_ON(uuid_le_to_bin(id->guid_string, &driver_guid)))
+ continue;
+ if (!memcmp(&driver_guid, wblock->gblock.guid, 16))
+ return 1;
+
+ id++;
+ }
return 0;
}
-static void wmi_dev_free(struct device *dev)
+static int wmi_dev_probe(struct device *dev)
{
- struct wmi_block *wmi_block = container_of(dev, struct wmi_block, dev);
+ struct wmi_block *wblock = dev_to_wblock(dev);
+ struct wmi_driver *wdriver =
+ container_of(dev->driver, struct wmi_driver, driver);
+ int ret = 0;
+
+ if (ACPI_FAILURE(wmi_method_enable(wblock, 1)))
+ dev_warn(dev, "failed to enable device -- probing anyway\n");
+
+ if (wdriver->probe) {
+ ret = wdriver->probe(dev_to_wdev(dev));
+ if (ret != 0 && ACPI_FAILURE(wmi_method_enable(wblock, 0)))
+ dev_warn(dev, "failed to disable device\n");
+ }
+
+ return ret;
+}
+
+static int wmi_dev_remove(struct device *dev)
+{
+ struct wmi_block *wblock = dev_to_wblock(dev);
+ struct wmi_driver *wdriver =
+ container_of(dev->driver, struct wmi_driver, driver);
+ int ret = 0;
+
+ if (wdriver->remove)
+ ret = wdriver->remove(dev_to_wdev(dev));
+
+ if (ACPI_FAILURE(wmi_method_enable(wblock, 0)))
+ dev_warn(dev, "failed to disable device\n");
- kfree(wmi_block);
+ return ret;
}
-static struct class wmi_class = {
+static struct class wmi_bus_class = {
+ .name = "wmi_bus",
+};
+
+static struct bus_type wmi_bus_type = {
.name = "wmi",
- .dev_release = wmi_dev_free,
- .dev_uevent = wmi_dev_uevent,
.dev_groups = wmi_groups,
+ .match = wmi_dev_match,
+ .uevent = wmi_dev_uevent,
+ .probe = wmi_dev_probe,
+ .remove = wmi_dev_remove,
+};
+
+static struct device_type wmi_type_event = {
+ .name = "event",
+ .groups = wmi_event_groups,
+ .release = wmi_dev_release,
};
-static int wmi_create_device(const struct guid_block *gblock,
- struct wmi_block *wblock, acpi_handle handle)
+static struct device_type wmi_type_method = {
+ .name = "method",
+ .groups = wmi_method_groups,
+ .release = wmi_dev_release,
+};
+
+static struct device_type wmi_type_data = {
+ .name = "data",
+ .groups = wmi_data_groups,
+ .release = wmi_dev_release,
+};
+
+static int wmi_create_device(struct device *wmi_bus_dev,
+ const struct guid_block *gblock,
+ struct wmi_block *wblock,
+ struct acpi_device *device)
{
- wblock->dev.class = &wmi_class;
+ struct acpi_device_info *info;
+ char method[5];
+ int result;
- dev_set_name(&wblock->dev, "%pUL", gblock->guid);
+ if (gblock->flags & ACPI_WMI_EVENT) {
+ wblock->dev.dev.type = &wmi_type_event;
+ goto out_init;
+ }
- dev_set_drvdata(&wblock->dev, wblock);
+ if (gblock->flags & ACPI_WMI_METHOD) {
+ wblock->dev.dev.type = &wmi_type_method;
+ goto out_init;
+ }
- return device_register(&wblock->dev);
+ /*
+ * Data Block Query Control Method (WQxx by convention) is
+ * required per the WMI documentation. If it is not present,
+ * we ignore this data block.
+ */
+ strcpy(method, "WQ");
+ strncat(method, wblock->gblock.object_id, 2);
+ result = get_subobj_info(device->handle, method, &info);
+
+ if (result) {
+ dev_warn(wmi_bus_dev,
+ "%s data block query control method not found",
+ method);
+ return result;
+ }
+
+ wblock->dev.dev.type = &wmi_type_data;
+
+ /*
+ * The Microsoft documentation specifically states:
+ *
+ * Data blocks registered with only a single instance
+ * can ignore the parameter.
+ *
+ * ACPICA will get mad at us if we call the method with the wrong number
+ * of arguments, so check what our method expects. (On some Dell
+ * laptops, WQxx may not be a method at all.)
+ */
+ if (info->type != ACPI_TYPE_METHOD || info->param_count == 0)
+ wblock->read_takes_no_args = true;
+
+ kfree(info);
+
+ strcpy(method, "WS");
+ strncat(method, wblock->gblock.object_id, 2);
+ result = get_subobj_info(device->handle, method, NULL);
+
+ if (result == 0)
+ wblock->dev.setable = true;
+
+ out_init:
+ wblock->dev.dev.bus = &wmi_bus_type;
+ wblock->dev.dev.parent = wmi_bus_dev;
+
+ dev_set_name(&wblock->dev.dev, "%pUL", gblock->guid);
+
+ device_initialize(&wblock->dev.dev);
+
+ return 0;
}
-static void wmi_free_devices(void)
+static void wmi_free_devices(struct acpi_device *device)
{
struct wmi_block *wblock, *next;
/* Delete devices for all the GUIDs */
list_for_each_entry_safe(wblock, next, &wmi_block_list, list) {
- list_del(&wblock->list);
- if (wblock->dev.class)
- device_unregister(&wblock->dev);
- else
- kfree(wblock);
+ if (wblock->acpi_device == device) {
+ list_del(&wblock->list);
+ device_unregister(&wblock->dev.dev);
+ }
}
}
-static bool guid_already_parsed(const char *guid_string)
+static bool guid_already_parsed(struct acpi_device *device,
+ const u8 *guid)
{
struct wmi_block *wblock;
- list_for_each_entry(wblock, &wmi_block_list, list)
- if (memcmp(wblock->gblock.guid, guid_string, 16) == 0)
+ list_for_each_entry(wblock, &wmi_block_list, list) {
+ if (memcmp(wblock->gblock.guid, guid, 16) == 0) {
+ /*
+ * Because we historically didn't track the relationship
+ * between GUIDs and ACPI nodes, we don't know whether
+ * we need to suppress GUIDs that are unique on a
+ * given node but duplicated across nodes.
+ */
+ dev_warn(&device->dev, "duplicate WMI GUID %pUL (first instance was on %s)\n",
+ guid, dev_name(&wblock->acpi_device->dev));
return true;
+ }
+ }
return false;
}
@@ -645,17 +936,17 @@ static bool guid_already_parsed(const char *guid_string)
/*
* Parse the _WDG method for the GUID data blocks
*/
-static int parse_wdg(acpi_handle handle)
+static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
{
struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL};
- union acpi_object *obj;
const struct guid_block *gblock;
- struct wmi_block *wblock;
+ struct wmi_block *wblock, *next;
+ union acpi_object *obj;
acpi_status status;
- int retval;
+ int retval = 0;
u32 i, total;
- status = acpi_evaluate_object(handle, "_WDG", NULL, &out);
+ status = acpi_evaluate_object(device->handle, "_WDG", NULL, &out);
if (ACPI_FAILURE(status))
return -ENXIO;
@@ -675,25 +966,28 @@ static int parse_wdg(acpi_handle handle)
if (debug_dump_wdg)
wmi_dump_wdg(&gblock[i]);
+ /*
+ * Some WMI devices, like those for nVidia hooks, have a
+ * duplicate GUID. It's not clear what we should do in this
+ * case yet, so for now, we'll just ignore the duplicate
+ * for device creation.
+ */
+ if (guid_already_parsed(device, gblock[i].guid))
+ continue;
+
wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL);
- if (!wblock)
- return -ENOMEM;
+ if (!wblock) {
+ retval = -ENOMEM;
+ break;
+ }
- wblock->handle = handle;
+ wblock->acpi_device = device;
wblock->gblock = gblock[i];
- /*
- Some WMI devices, like those for nVidia hooks, have a
- duplicate GUID. It's not clear what we should do in this
- case yet, so for now, we'll just ignore the duplicate
- for device creation.
- */
- if (!guid_already_parsed(gblock[i].guid)) {
- retval = wmi_create_device(&gblock[i], wblock, handle);
- if (retval) {
- wmi_free_devices();
- goto out_free_pointer;
- }
+ retval = wmi_create_device(wmi_bus_dev, &gblock[i], wblock, device);
+ if (retval) {
+ kfree(wblock);
+ continue;
}
list_add_tail(&wblock->list, &wmi_block_list);
@@ -704,11 +998,27 @@ static int parse_wdg(acpi_handle handle)
}
}
- retval = 0;
+ /*
+ * Now that all of the devices are created, add them to the
+ * device tree and probe subdrivers.
+ */
+ list_for_each_entry_safe(wblock, next, &wmi_block_list, list) {
+ if (wblock->acpi_device != device)
+ continue;
+
+ retval = device_add(&wblock->dev.dev);
+ if (retval) {
+ dev_err(wmi_bus_dev, "failed to register %pULL\n",
+ wblock->gblock.guid);
+ if (debug_event)
+ wmi_method_enable(wblock, 0);
+ list_del(&wblock->list);
+ put_device(&wblock->dev.dev);
+ }
+ }
out_free_pointer:
kfree(out.pointer);
-
return retval;
}
@@ -756,67 +1066,168 @@ acpi_wmi_ec_space_handler(u32 function, acpi_physical_address address,
}
}
-static void acpi_wmi_notify(struct acpi_device *device, u32 event)
+static void acpi_wmi_notify_handler(acpi_handle handle, u32 event,
+ void *context)
{
struct guid_block *block;
struct wmi_block *wblock;
struct list_head *p;
+ bool found_it = false;
list_for_each(p, &wmi_block_list) {
wblock = list_entry(p, struct wmi_block, list);
block = &wblock->gblock;
- if ((block->flags & ACPI_WMI_EVENT) &&
- (block->notify_id == event)) {
- if (wblock->handler)
- wblock->handler(event, wblock->handler_data);
- if (debug_event) {
- pr_info("DEBUG Event GUID: %pUL\n",
- wblock->gblock.guid);
- }
-
- acpi_bus_generate_netlink_event(
- device->pnp.device_class, dev_name(&device->dev),
- event, 0);
+ if (wblock->acpi_device->handle == handle &&
+ (block->flags & ACPI_WMI_EVENT) &&
+ (block->notify_id == event))
+ {
+ found_it = true;
break;
}
}
+
+ if (!found_it)
+ return;
+
+ /* If a driver is bound, then notify the driver. */
+ if (wblock->dev.dev.driver) {
+ struct wmi_driver *driver;
+ struct acpi_object_list input;
+ union acpi_object params[1];
+ struct acpi_buffer evdata = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+
+ driver = container_of(wblock->dev.dev.driver,
+ struct wmi_driver, driver);
+
+ input.count = 1;
+ input.pointer = params;
+ params[0].type = ACPI_TYPE_INTEGER;
+ params[0].integer.value = event;
+
+ status = acpi_evaluate_object(wblock->acpi_device->handle,
+ "_WED", &input, &evdata);
+ if (ACPI_FAILURE(status)) {
+ dev_warn(&wblock->dev.dev,
+ "failed to get event data\n");
+ return;
+ }
+
+ if (driver->notify)
+ driver->notify(&wblock->dev,
+ (union acpi_object *)evdata.pointer);
+
+ kfree(evdata.pointer);
+ } else if (wblock->handler) {
+ /* Legacy handler */
+ wblock->handler(event, wblock->handler_data);
+ }
+
+ if (debug_event) {
+ pr_info("DEBUG Event GUID: %pUL\n",
+ wblock->gblock.guid);
+ }
+
+ acpi_bus_generate_netlink_event(
+ wblock->acpi_device->pnp.device_class,
+ dev_name(&wblock->dev.dev),
+ event, 0);
+
}
-static int acpi_wmi_remove(struct acpi_device *device)
+static int acpi_wmi_remove(struct platform_device *device)
{
- acpi_remove_address_space_handler(device->handle,
+ struct acpi_device *acpi_device = ACPI_COMPANION(&device->dev);
+
+ acpi_remove_notify_handler(acpi_device->handle, ACPI_DEVICE_NOTIFY,
+ acpi_wmi_notify_handler);
+ acpi_remove_address_space_handler(acpi_device->handle,
ACPI_ADR_SPACE_EC, &acpi_wmi_ec_space_handler);
- wmi_free_devices();
+ wmi_free_devices(acpi_device);
+ device_unregister((struct device *)dev_get_drvdata(&device->dev));
return 0;
}
-static int acpi_wmi_add(struct acpi_device *device)
+static int acpi_wmi_probe(struct platform_device *device)
{
+ struct acpi_device *acpi_device;
+ struct device *wmi_bus_dev;
acpi_status status;
int error;
- status = acpi_install_address_space_handler(device->handle,
+ acpi_device = ACPI_COMPANION(&device->dev);
+ if (!acpi_device) {
+ dev_err(&device->dev, "ACPI companion is missing\n");
+ return -ENODEV;
+ }
+
+ status = acpi_install_address_space_handler(acpi_device->handle,
ACPI_ADR_SPACE_EC,
&acpi_wmi_ec_space_handler,
NULL, NULL);
if (ACPI_FAILURE(status)) {
- pr_err("Error installing EC region handler\n");
+ dev_err(&device->dev, "Error installing EC region handler\n");
return -ENODEV;
}
- error = parse_wdg(device->handle);
+ status = acpi_install_notify_handler(acpi_device->handle,
+ ACPI_DEVICE_NOTIFY,
+ acpi_wmi_notify_handler,
+ NULL);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&device->dev, "Error installing notify handler\n");
+ error = -ENODEV;
+ goto err_remove_ec_handler;
+ }
+
+ wmi_bus_dev = device_create(&wmi_bus_class, &device->dev, MKDEV(0, 0),
+ NULL, "wmi_bus-%s", dev_name(&device->dev));
+ if (IS_ERR(wmi_bus_dev)) {
+ error = PTR_ERR(wmi_bus_dev);
+ goto err_remove_notify_handler;
+ }
+ dev_set_drvdata(&device->dev, wmi_bus_dev);
+
+ error = parse_wdg(wmi_bus_dev, acpi_device);
if (error) {
- acpi_remove_address_space_handler(device->handle,
- ACPI_ADR_SPACE_EC,
- &acpi_wmi_ec_space_handler);
pr_err("Failed to parse WDG method\n");
- return error;
+ goto err_remove_busdev;
}
return 0;
+
+err_remove_busdev:
+ device_unregister(wmi_bus_dev);
+
+err_remove_notify_handler:
+ acpi_remove_notify_handler(acpi_device->handle, ACPI_DEVICE_NOTIFY,
+ acpi_wmi_notify_handler);
+
+err_remove_ec_handler:
+ acpi_remove_address_space_handler(acpi_device->handle,
+ ACPI_ADR_SPACE_EC,
+ &acpi_wmi_ec_space_handler);
+
+ return error;
+}
+
+int __must_check __wmi_driver_register(struct wmi_driver *driver,
+ struct module *owner)
+{
+ driver->driver.owner = owner;
+ driver->driver.bus = &wmi_bus_type;
+
+ return driver_register(&driver->driver);
}
+EXPORT_SYMBOL(__wmi_driver_register);
+
+void wmi_driver_unregister(struct wmi_driver *driver)
+{
+ driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL(wmi_driver_unregister);
static int __init acpi_wmi_init(void)
{
@@ -825,27 +1236,36 @@ static int __init acpi_wmi_init(void)
if (acpi_disabled)
return -ENODEV;
- error = class_register(&wmi_class);
+ error = class_register(&wmi_bus_class);
if (error)
return error;
- error = acpi_bus_register_driver(&acpi_wmi_driver);
+ error = bus_register(&wmi_bus_type);
+ if (error)
+ goto err_unreg_class;
+
+ error = platform_driver_register(&acpi_wmi_driver);
if (error) {
pr_err("Error loading mapper\n");
- class_unregister(&wmi_class);
- return error;
+ goto err_unreg_bus;
}
- pr_info("Mapper loaded\n");
return 0;
+
+err_unreg_class:
+ class_unregister(&wmi_bus_class);
+
+err_unreg_bus:
+ bus_unregister(&wmi_bus_type);
+
+ return error;
}
static void __exit acpi_wmi_exit(void)
{
- acpi_bus_unregister_driver(&acpi_wmi_driver);
- class_unregister(&wmi_class);
-
- pr_info("Mapper unloaded\n");
+ platform_driver_unregister(&acpi_wmi_driver);
+ class_unregister(&wmi_bus_class);
+ bus_unregister(&wmi_bus_type);
}
subsys_initcall(acpi_wmi_init);
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 4b717c699313..43d8ed577e70 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -15,10 +15,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/acpi.h>
@@ -149,8 +145,8 @@ static int vendor_resource_matches(struct pnp_dev *dev,
uuid_len == sizeof(match->data) &&
memcmp(uuid, match->data, uuid_len) == 0) {
if (expected_len && expected_len != actual_len) {
- dev_err(&dev->dev, "wrong vendor descriptor size; "
- "expected %d, found %d bytes\n",
+ dev_err(&dev->dev,
+ "wrong vendor descriptor size; expected %d, found %d bytes\n",
expected_len, actual_len);
return 0;
}
@@ -180,6 +176,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
struct pnp_dev *dev = data;
struct acpi_resource_dma *dma;
struct acpi_resource_vendor_typed *vendor_typed;
+ struct acpi_resource_gpio *gpio;
struct resource_win win = {{0}, 0};
struct resource *r = &win.res;
int i, flags;
@@ -203,13 +200,27 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
* one interrupt, we won't be able to re-encode it.
*/
if (pnp_can_write(dev)) {
- dev_warn(&dev->dev, "multiple interrupts in "
- "_CRS descriptor; configuration can't "
- "be changed\n");
+ dev_warn(&dev->dev,
+ "multiple interrupts in _CRS descriptor; configuration can't be changed\n");
dev->capabilities &= ~PNP_WRITE;
}
}
return AE_OK;
+ } else if (acpi_gpio_get_irq_resource(res, &gpio)) {
+ /*
+ * If the resource is GpioInt() type then extract the IRQ
+ * from GPIO resource and fill it into IRQ resource type.
+ */
+ i = acpi_dev_gpio_irq_get(dev->data, 0);
+ if (i >= 0) {
+ flags = acpi_dev_irq_flags(gpio->triggering,
+ gpio->polarity,
+ gpio->sharable);
+ } else {
+ flags = IORESOURCE_DISABLED;
+ }
+ pnp_add_irq_resource(dev, i, flags);
+ return AE_OK;
} else if (r->flags & IORESOURCE_DISABLED) {
pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED);
return AE_OK;
@@ -331,8 +342,8 @@ static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev,
if (p->interrupts[i] < PNP_IRQ_NR)
__set_bit(p->interrupts[i], map.bits);
else
- dev_err(&dev->dev, "ignoring IRQ %d option "
- "(too large for %d entry bitmap)\n",
+ dev_err(&dev->dev,
+ "ignoring IRQ %d option (too large for %d entry bitmap)\n",
p->interrupts[i], PNP_IRQ_NR);
}
}
@@ -933,8 +944,9 @@ int pnpacpi_encode_resources(struct pnp_dev *dev, struct acpi_buffer *buffer)
case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
case ACPI_RESOURCE_TYPE_GENERIC_REGISTER:
default: /* other type */
- dev_warn(&dev->dev, "can't encode unknown resource "
- "type %d\n", resource->type);
+ dev_warn(&dev->dev,
+ "can't encode unknown resource type %d\n",
+ resource->type);
return -EINVAL;
}
resource++;
diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
index 76d13150283f..a75ff3622450 100644
--- a/drivers/pwm/pwm-jz4740.c
+++ b/drivers/pwm/pwm-jz4740.c
@@ -21,22 +21,10 @@
#include <linux/platform_device.h>
#include <linux/pwm.h>
-#include <asm/mach-jz4740/gpio.h>
#include <asm/mach-jz4740/timer.h>
#define NUM_PWM 8
-static const unsigned int jz4740_pwm_gpio_list[NUM_PWM] = {
- JZ_GPIO_PWM0,
- JZ_GPIO_PWM1,
- JZ_GPIO_PWM2,
- JZ_GPIO_PWM3,
- JZ_GPIO_PWM4,
- JZ_GPIO_PWM5,
- JZ_GPIO_PWM6,
- JZ_GPIO_PWM7,
-};
-
struct jz4740_pwm_chip {
struct pwm_chip chip;
struct clk *clk;
@@ -49,9 +37,6 @@ static inline struct jz4740_pwm_chip *to_jz4740(struct pwm_chip *chip)
static int jz4740_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
- unsigned int gpio = jz4740_pwm_gpio_list[pwm->hwpwm];
- int ret;
-
/*
* Timers 0 and 1 are used for system tasks, so they are unavailable
* for use as PWMs.
@@ -59,15 +44,6 @@ static int jz4740_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
if (pwm->hwpwm < 2)
return -EBUSY;
- ret = gpio_request(gpio, pwm->label);
- if (ret) {
- dev_err(chip->dev, "Failed to request GPIO#%u for PWM: %d\n",
- gpio, ret);
- return ret;
- }
-
- jz_gpio_set_function(gpio, JZ_GPIO_FUNC_PWM);
-
jz4740_timer_start(pwm->hwpwm);
return 0;
@@ -75,13 +51,8 @@ static int jz4740_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
static void jz4740_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
- unsigned int gpio = jz4740_pwm_gpio_list[pwm->hwpwm];
-
jz4740_timer_set_ctrl(pwm->hwpwm, 0);
- jz_gpio_set_function(gpio, JZ_GPIO_FUNC_NONE);
- gpio_free(gpio);
-
jz4740_timer_stop(pwm->hwpwm);
}
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index faad69a1a597..8891a8e50f12 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -6,7 +6,6 @@ config REMOTEPROC
select CRC32
select FW_LOADER
select VIRTIO
- select VIRTUALIZATION
help
Support for remote processors (such as DSP coprocessors). These
are mainly used on embedded systems.
@@ -18,7 +17,6 @@ config OMAP_REMOTEPROC
depends on HAS_DMA
depends on ARCH_OMAP4 || SOC_OMAP5
depends on OMAP_IOMMU
- depends on REMOTEPROC
select MAILBOX
select OMAP2PLUS_MBOX
select RPMSG_VIRTIO
@@ -38,7 +36,6 @@ config OMAP_REMOTEPROC
config WKUP_M3_RPROC
tristate "AMx3xx Wakeup M3 remoteproc support"
depends on SOC_AM33XX || SOC_AM43XX
- depends on REMOTEPROC
help
Say y here to support Wakeup M3 remote processor on TI AM33xx
and AM43xx family of SoCs.
@@ -51,8 +48,7 @@ config WKUP_M3_RPROC
config DA8XX_REMOTEPROC
tristate "DA8xx/OMAP-L13x remoteproc support"
depends on ARCH_DAVINCI_DA8XX
- depends on REMOTEPROC
- select CMA if MMU
+ depends on DMA_CMA
select RPMSG_VIRTIO
help
Say y here to support DA8xx/OMAP-L13x remote processors via the
@@ -71,10 +67,20 @@ config DA8XX_REMOTEPROC
It's safe to say n here if you're not interested in multimedia
offloading.
+config KEYSTONE_REMOTEPROC
+ tristate "Keystone Remoteproc support"
+ depends on ARCH_KEYSTONE
+ select RPMSG_VIRTIO
+ help
+ Say Y here here to support Keystone remote processors (DSP)
+ via the remote processor framework.
+
+ It's safe to say N here if you're not interested in the Keystone
+ DSPs or just want to use a bare minimum kernel.
+
config QCOM_ADSP_PIL
tristate "Qualcomm ADSP Peripheral Image Loader"
depends on OF && ARCH_QCOM
- depends on REMOTEPROC
depends on QCOM_SMEM
depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
select MFD_SYSCON
@@ -92,7 +98,6 @@ config QCOM_Q6V5_PIL
tristate "Qualcomm Hexagon V5 Peripherial Image Loader"
depends on OF && ARCH_QCOM
depends on QCOM_SMEM
- depends on REMOTEPROC
depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
select MFD_SYSCON
select QCOM_RPROC_COMMON
@@ -106,7 +111,6 @@ config QCOM_WCNSS_PIL
depends on OF && ARCH_QCOM
depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
depends on QCOM_SMEM
- depends on REMOTEPROC
select QCOM_MDT_LOADER
select QCOM_RPROC_COMMON
select QCOM_SCM
@@ -117,7 +121,6 @@ config QCOM_WCNSS_PIL
config ST_REMOTEPROC
tristate "ST remoteproc support"
depends on ARCH_STI
- depends on REMOTEPROC
select MAILBOX
select STI_MBOX
select RPMSG_VIRTIO
@@ -128,7 +131,6 @@ config ST_REMOTEPROC
config ST_SLIM_REMOTEPROC
tristate
- depends on REMOTEPROC
endif # REMOTEPROC
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index ffc5e430df27..f1ce5fc8a2f3 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -11,6 +11,7 @@ remoteproc-y += remoteproc_elf_loader.o
obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o
obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o
obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o
+obj-$(CONFIG_KEYSTONE_REMOTEPROC) += keystone_remoteproc.o
obj-$(CONFIG_QCOM_ADSP_PIL) += qcom_adsp_pil.o
obj-$(CONFIG_QCOM_RPROC_COMMON) += qcom_common.o
obj-$(CONFIG_QCOM_Q6V5_PIL) += qcom_q6v5_pil.o
diff --git a/drivers/remoteproc/da8xx_remoteproc.c b/drivers/remoteproc/da8xx_remoteproc.c
index 3814de28599c..99539cec1329 100644
--- a/drivers/remoteproc/da8xx_remoteproc.c
+++ b/drivers/remoteproc/da8xx_remoteproc.c
@@ -137,6 +137,7 @@ static int da8xx_rproc_stop(struct rproc *rproc)
{
struct da8xx_rproc *drproc = rproc->priv;
+ davinci_clk_reset_assert(drproc->dsp_clk);
clk_disable(drproc->dsp_clk);
return 0;
@@ -157,22 +158,6 @@ static const struct rproc_ops da8xx_rproc_ops = {
.kick = da8xx_rproc_kick,
};
-static int reset_assert(struct device *dev)
-{
- struct clk *dsp_clk;
-
- dsp_clk = clk_get(dev, NULL);
- if (IS_ERR(dsp_clk)) {
- dev_err(dev, "clk_get error: %ld\n", PTR_ERR(dsp_clk));
- return PTR_ERR(dsp_clk);
- }
-
- davinci_clk_reset_assert(dsp_clk);
- clk_put(dsp_clk);
-
- return 0;
-}
-
static int da8xx_rproc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -223,6 +208,7 @@ static int da8xx_rproc_probe(struct platform_device *pdev)
drproc = rproc->priv;
drproc->rproc = rproc;
+ drproc->dsp_clk = dsp_clk;
rproc->has_iommu = false;
platform_set_drvdata(pdev, rproc);
@@ -241,7 +227,7 @@ static int da8xx_rproc_probe(struct platform_device *pdev)
* *not* in reset, but da8xx_rproc_start() needs the DSP to be
* held in reset at the time it is called.
*/
- ret = reset_assert(dev);
+ ret = davinci_clk_reset_assert(drproc->dsp_clk);
if (ret)
goto free_rproc;
@@ -250,7 +236,6 @@ static int da8xx_rproc_probe(struct platform_device *pdev)
drproc->ack_fxn = irq_data->chip->irq_ack;
drproc->irq_data = irq_data;
drproc->irq = irq;
- drproc->dsp_clk = dsp_clk;
ret = rproc_add(rproc);
if (ret) {
@@ -268,21 +253,10 @@ free_rproc:
static int da8xx_rproc_remove(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
struct rproc *rproc = platform_get_drvdata(pdev);
struct da8xx_rproc *drproc = (struct da8xx_rproc *)rproc->priv;
/*
- * It's important to place the DSP in reset before going away,
- * since a subsequent insmod of this module may enable the DSP's
- * clock before its program/boot-address has been loaded and
- * before this module's probe has had a chance to reset the DSP.
- * Without the reset, the DSP can lockup permanently when it
- * begins executing garbage.
- */
- reset_assert(dev);
-
- /*
* The devm subsystem might end up releasing things before
* freeing the irq, thus allowing an interrupt to sneak in while
* the device is being removed. This should prevent that.
diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c
new file mode 100644
index 000000000000..5f776bfd674a
--- /dev/null
+++ b/drivers/remoteproc/keystone_remoteproc.c
@@ -0,0 +1,525 @@
+/*
+ * TI Keystone DSP remoteproc driver
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/workqueue.h>
+#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+
+#include "remoteproc_internal.h"
+
+#define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1)
+
+/**
+ * struct keystone_rproc_mem - internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: Bus address used to access the memory region
+ * @dev_addr: Device address of the memory region from DSP view
+ * @size: Size of the memory region
+ */
+struct keystone_rproc_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ u32 dev_addr;
+ size_t size;
+};
+
+/**
+ * struct keystone_rproc - keystone remote processor driver structure
+ * @dev: cached device pointer
+ * @rproc: remoteproc device handle
+ * @mem: internal memory regions data
+ * @num_mems: number of internal memory regions
+ * @dev_ctrl: device control regmap handle
+ * @reset: reset control handle
+ * @boot_offset: boot register offset in @dev_ctrl regmap
+ * @irq_ring: irq entry for vring
+ * @irq_fault: irq entry for exception
+ * @kick_gpio: gpio used for virtio kicks
+ * @workqueue: workqueue for processing virtio interrupts
+ */
+struct keystone_rproc {
+ struct device *dev;
+ struct rproc *rproc;
+ struct keystone_rproc_mem *mem;
+ int num_mems;
+ struct regmap *dev_ctrl;
+ struct reset_control *reset;
+ u32 boot_offset;
+ int irq_ring;
+ int irq_fault;
+ int kick_gpio;
+ struct work_struct workqueue;
+};
+
+/* Put the DSP processor into reset */
+static void keystone_rproc_dsp_reset(struct keystone_rproc *ksproc)
+{
+ reset_control_assert(ksproc->reset);
+}
+
+/* Configure the boot address and boot the DSP processor */
+static int keystone_rproc_dsp_boot(struct keystone_rproc *ksproc, u32 boot_addr)
+{
+ int ret;
+
+ if (boot_addr & (SZ_1K - 1)) {
+ dev_err(ksproc->dev, "invalid boot address 0x%x, must be aligned on a 1KB boundary\n",
+ boot_addr);
+ return -EINVAL;
+ }
+
+ ret = regmap_write(ksproc->dev_ctrl, ksproc->boot_offset, boot_addr);
+ if (ret) {
+ dev_err(ksproc->dev, "regmap_write of boot address failed, status = %d\n",
+ ret);
+ return ret;
+ }
+
+ reset_control_deassert(ksproc->reset);
+
+ return 0;
+}
+
+/*
+ * Process the remoteproc exceptions
+ *
+ * The exception reporting on Keystone DSP remote processors is very simple
+ * compared to the equivalent processors on the OMAP family, it is notified
+ * through a software-designed specific interrupt source in the IPC interrupt
+ * generation register.
+ *
+ * This function just invokes the rproc_report_crash to report the exception
+ * to the remoteproc driver core, to trigger a recovery.
+ */
+static irqreturn_t keystone_rproc_exception_interrupt(int irq, void *dev_id)
+{
+ struct keystone_rproc *ksproc = dev_id;
+
+ rproc_report_crash(ksproc->rproc, RPROC_FATAL_ERROR);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Main virtqueue message workqueue function
+ *
+ * This function is executed upon scheduling of the keystone remoteproc
+ * driver's workqueue. The workqueue is scheduled by the vring ISR handler.
+ *
+ * There is no payload message indicating the virtqueue index as is the
+ * case with mailbox-based implementations on OMAP family. As such, this
+ * handler processes both the Tx and Rx virtqueue indices on every invocation.
+ * The rproc_vq_interrupt function can detect if there are new unprocessed
+ * messages or not (returns IRQ_NONE vs IRQ_HANDLED), but there is no need
+ * to check for these return values. The index 0 triggering will process all
+ * pending Rx buffers, and the index 1 triggering will process all newly
+ * available Tx buffers and will wakeup any potentially blocked senders.
+ *
+ * NOTE:
+ * 1. A payload could be added by using some of the source bits in the
+ * IPC interrupt generation registers, but this would need additional
+ * changes to the overall IPC stack, and currently there are no benefits
+ * of adapting that approach.
+ * 2. The current logic is based on an inherent design assumption of supporting
+ * only 2 vrings, but this can be changed if needed.
+ */
+static void handle_event(struct work_struct *work)
+{
+ struct keystone_rproc *ksproc =
+ container_of(work, struct keystone_rproc, workqueue);
+
+ rproc_vq_interrupt(ksproc->rproc, 0);
+ rproc_vq_interrupt(ksproc->rproc, 1);
+}
+
+/*
+ * Interrupt handler for processing vring kicks from remote processor
+ */
+static irqreturn_t keystone_rproc_vring_interrupt(int irq, void *dev_id)
+{
+ struct keystone_rproc *ksproc = dev_id;
+
+ schedule_work(&ksproc->workqueue);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Power up the DSP remote processor.
+ *
+ * This function will be invoked only after the firmware for this rproc
+ * was loaded, parsed successfully, and all of its resource requirements
+ * were met.
+ */
+static int keystone_rproc_start(struct rproc *rproc)
+{
+ struct keystone_rproc *ksproc = rproc->priv;
+ int ret;
+
+ INIT_WORK(&ksproc->workqueue, handle_event);
+
+ ret = request_irq(ksproc->irq_ring, keystone_rproc_vring_interrupt, 0,
+ dev_name(ksproc->dev), ksproc);
+ if (ret) {
+ dev_err(ksproc->dev, "failed to enable vring interrupt, ret = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = request_irq(ksproc->irq_fault, keystone_rproc_exception_interrupt,
+ 0, dev_name(ksproc->dev), ksproc);
+ if (ret) {
+ dev_err(ksproc->dev, "failed to enable exception interrupt, ret = %d\n",
+ ret);
+ goto free_vring_irq;
+ }
+
+ ret = keystone_rproc_dsp_boot(ksproc, rproc->bootaddr);
+ if (ret)
+ goto free_exc_irq;
+
+ return 0;
+
+free_exc_irq:
+ free_irq(ksproc->irq_fault, ksproc);
+free_vring_irq:
+ free_irq(ksproc->irq_ring, ksproc);
+ flush_work(&ksproc->workqueue);
+out:
+ return ret;
+}
+
+/*
+ * Stop the DSP remote processor.
+ *
+ * This function puts the DSP processor into reset, and finishes processing
+ * of any pending messages.
+ */
+static int keystone_rproc_stop(struct rproc *rproc)
+{
+ struct keystone_rproc *ksproc = rproc->priv;
+
+ keystone_rproc_dsp_reset(ksproc);
+ free_irq(ksproc->irq_fault, ksproc);
+ free_irq(ksproc->irq_ring, ksproc);
+ flush_work(&ksproc->workqueue);
+
+ return 0;
+}
+
+/*
+ * Kick the remote processor to notify about pending unprocessed messages.
+ * The vqid usage is not used and is inconsequential, as the kick is performed
+ * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
+ * the remote processor is expected to process both its Tx and Rx virtqueues.
+ */
+static void keystone_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct keystone_rproc *ksproc = rproc->priv;
+
+ if (WARN_ON(ksproc->kick_gpio < 0))
+ return;
+
+ gpio_set_value(ksproc->kick_gpio, 1);
+}
+
+/*
+ * Custom function to translate a DSP device address (internal RAMs only) to a
+ * kernel virtual address. The DSPs can access their RAMs at either an internal
+ * address visible only from a DSP, or at the SoC-level bus address. Both these
+ * addresses need to be looked through for translation. The translated addresses
+ * can be used either by the remoteproc core for loading (when using kernel
+ * remoteproc loader), or by any rpmsg bus drivers.
+ */
+static void *keystone_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
+{
+ struct keystone_rproc *ksproc = rproc->priv;
+ void __iomem *va = NULL;
+ phys_addr_t bus_addr;
+ u32 dev_addr, offset;
+ size_t size;
+ int i;
+
+ if (len <= 0)
+ return NULL;
+
+ for (i = 0; i < ksproc->num_mems; i++) {
+ bus_addr = ksproc->mem[i].bus_addr;
+ dev_addr = ksproc->mem[i].dev_addr;
+ size = ksproc->mem[i].size;
+
+ if (da < KEYSTONE_RPROC_LOCAL_ADDRESS_MASK) {
+ /* handle DSP-view addresses */
+ if ((da >= dev_addr) &&
+ ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = ksproc->mem[i].cpu_addr + offset;
+ break;
+ }
+ } else {
+ /* handle SoC-view addresses */
+ if ((da >= bus_addr) &&
+ (da + len) <= (bus_addr + size)) {
+ offset = da - bus_addr;
+ va = ksproc->mem[i].cpu_addr + offset;
+ break;
+ }
+ }
+ }
+
+ return (__force void *)va;
+}
+
+static const struct rproc_ops keystone_rproc_ops = {
+ .start = keystone_rproc_start,
+ .stop = keystone_rproc_stop,
+ .kick = keystone_rproc_kick,
+ .da_to_va = keystone_rproc_da_to_va,
+};
+
+static int keystone_rproc_of_get_memories(struct platform_device *pdev,
+ struct keystone_rproc *ksproc)
+{
+ static const char * const mem_names[] = {"l2sram", "l1pram", "l1dram"};
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int num_mems = 0;
+ int i;
+
+ num_mems = ARRAY_SIZE(mem_names);
+ ksproc->mem = devm_kcalloc(ksproc->dev, num_mems,
+ sizeof(*ksproc->mem), GFP_KERNEL);
+ if (!ksproc->mem)
+ return -ENOMEM;
+
+ for (i = 0; i < num_mems; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ mem_names[i]);
+ ksproc->mem[i].cpu_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ksproc->mem[i].cpu_addr)) {
+ dev_err(dev, "failed to parse and map %s memory\n",
+ mem_names[i]);
+ return PTR_ERR(ksproc->mem[i].cpu_addr);
+ }
+ ksproc->mem[i].bus_addr = res->start;
+ ksproc->mem[i].dev_addr =
+ res->start & KEYSTONE_RPROC_LOCAL_ADDRESS_MASK;
+ ksproc->mem[i].size = resource_size(res);
+
+ /* zero out memories to start in a pristine state */
+ memset((__force void *)ksproc->mem[i].cpu_addr, 0,
+ ksproc->mem[i].size);
+ }
+ ksproc->num_mems = num_mems;
+
+ return 0;
+}
+
+static int keystone_rproc_of_get_dev_syscon(struct platform_device *pdev,
+ struct keystone_rproc *ksproc)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ if (!of_property_read_bool(np, "ti,syscon-dev")) {
+ dev_err(dev, "ti,syscon-dev property is absent\n");
+ return -EINVAL;
+ }
+
+ ksproc->dev_ctrl =
+ syscon_regmap_lookup_by_phandle(np, "ti,syscon-dev");
+ if (IS_ERR(ksproc->dev_ctrl)) {
+ ret = PTR_ERR(ksproc->dev_ctrl);
+ return ret;
+ }
+
+ if (of_property_read_u32_index(np, "ti,syscon-dev", 1,
+ &ksproc->boot_offset)) {
+ dev_err(dev, "couldn't read the boot register offset\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int keystone_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct keystone_rproc *ksproc;
+ struct rproc *rproc;
+ int dsp_id;
+ char *fw_name = NULL;
+ char *template = "keystone-dsp%d-fw";
+ int name_len = 0;
+ int ret = 0;
+
+ if (!np) {
+ dev_err(dev, "only DT-based devices are supported\n");
+ return -ENODEV;
+ }
+
+ dsp_id = of_alias_get_id(np, "rproc");
+ if (dsp_id < 0) {
+ dev_warn(dev, "device does not have an alias id\n");
+ return dsp_id;
+ }
+
+ /* construct a custom default fw name - subject to change in future */
+ name_len = strlen(template); /* assuming a single digit alias */
+ fw_name = devm_kzalloc(dev, name_len, GFP_KERNEL);
+ if (!fw_name)
+ return -ENOMEM;
+ snprintf(fw_name, name_len, template, dsp_id);
+
+ rproc = rproc_alloc(dev, dev_name(dev), &keystone_rproc_ops, fw_name,
+ sizeof(*ksproc));
+ if (!rproc)
+ return -ENOMEM;
+
+ rproc->has_iommu = false;
+ ksproc = rproc->priv;
+ ksproc->rproc = rproc;
+ ksproc->dev = dev;
+
+ ret = keystone_rproc_of_get_dev_syscon(pdev, ksproc);
+ if (ret)
+ goto free_rproc;
+
+ ksproc->reset = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(ksproc->reset)) {
+ ret = PTR_ERR(ksproc->reset);
+ goto free_rproc;
+ }
+
+ /* enable clock for accessing DSP internal memories */
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable clock, status = %d\n", ret);
+ pm_runtime_put_noidle(dev);
+ goto disable_rpm;
+ }
+
+ ret = keystone_rproc_of_get_memories(pdev, ksproc);
+ if (ret)
+ goto disable_clk;
+
+ ksproc->irq_ring = platform_get_irq_byname(pdev, "vring");
+ if (ksproc->irq_ring < 0) {
+ ret = ksproc->irq_ring;
+ dev_err(dev, "failed to get vring interrupt, status = %d\n",
+ ret);
+ goto disable_clk;
+ }
+
+ ksproc->irq_fault = platform_get_irq_byname(pdev, "exception");
+ if (ksproc->irq_fault < 0) {
+ ret = ksproc->irq_fault;
+ dev_err(dev, "failed to get exception interrupt, status = %d\n",
+ ret);
+ goto disable_clk;
+ }
+
+ ksproc->kick_gpio = of_get_named_gpio_flags(np, "kick-gpios", 0, NULL);
+ if (ksproc->kick_gpio < 0) {
+ ret = ksproc->kick_gpio;
+ dev_err(dev, "failed to get gpio for virtio kicks, status = %d\n",
+ ret);
+ goto disable_clk;
+ }
+
+ if (of_reserved_mem_device_init(dev))
+ dev_warn(dev, "device does not have specific CMA pool\n");
+
+ /* ensure the DSP is in reset before loading firmware */
+ ret = reset_control_status(ksproc->reset);
+ if (ret < 0) {
+ dev_err(dev, "failed to get reset status, status = %d\n", ret);
+ goto release_mem;
+ } else if (ret == 0) {
+ WARN(1, "device is not in reset\n");
+ keystone_rproc_dsp_reset(ksproc);
+ }
+
+ ret = rproc_add(rproc);
+ if (ret) {
+ dev_err(dev, "failed to add register device with remoteproc core, status = %d\n",
+ ret);
+ goto release_mem;
+ }
+
+ platform_set_drvdata(pdev, ksproc);
+
+ return 0;
+
+release_mem:
+ of_reserved_mem_device_release(dev);
+disable_clk:
+ pm_runtime_put_sync(dev);
+disable_rpm:
+ pm_runtime_disable(dev);
+free_rproc:
+ rproc_free(rproc);
+ return ret;
+}
+
+static int keystone_rproc_remove(struct platform_device *pdev)
+{
+ struct keystone_rproc *ksproc = platform_get_drvdata(pdev);
+
+ rproc_del(ksproc->rproc);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ rproc_free(ksproc->rproc);
+ of_reserved_mem_device_release(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id keystone_rproc_of_match[] = {
+ { .compatible = "ti,k2hk-dsp", },
+ { .compatible = "ti,k2l-dsp", },
+ { .compatible = "ti,k2e-dsp", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, keystone_rproc_of_match);
+
+static struct platform_driver keystone_rproc_driver = {
+ .probe = keystone_rproc_probe,
+ .remove = keystone_rproc_remove,
+ .driver = {
+ .name = "keystone-rproc",
+ .of_match_table = keystone_rproc_of_match,
+ },
+};
+
+module_platform_driver(keystone_rproc_driver);
+
+MODULE_AUTHOR("Suman Anna <[email protected]>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI Keystone DSP Remoteproc driver");
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 3dabb20b8d5d..564061dcc019 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -847,6 +847,63 @@ static void rproc_resource_cleanup(struct rproc *rproc)
kref_put(&rvdev->refcount, rproc_vdev_release);
}
+static int rproc_start(struct rproc *rproc, const struct firmware *fw)
+{
+ struct resource_table *table, *loaded_table;
+ struct device *dev = &rproc->dev;
+ int ret, tablesz;
+
+ /* look for the resource table */
+ table = rproc_find_rsc_table(rproc, fw, &tablesz);
+ if (!table) {
+ dev_err(dev, "Resource table look up failed\n");
+ return -EINVAL;
+ }
+
+ /* load the ELF segments to memory */
+ ret = rproc_load_segments(rproc, fw);
+ if (ret) {
+ dev_err(dev, "Failed to load program segments: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * The starting device has been given the rproc->cached_table as the
+ * resource table. The address of the vring along with the other
+ * allocated resources (carveouts etc) is stored in cached_table.
+ * In order to pass this information to the remote device we must copy
+ * this information to device memory. We also update the table_ptr so
+ * that any subsequent changes will be applied to the loaded version.
+ */
+ loaded_table = rproc_find_loaded_rsc_table(rproc, fw);
+ if (loaded_table) {
+ memcpy(loaded_table, rproc->cached_table, tablesz);
+ rproc->table_ptr = loaded_table;
+ }
+
+ /* power up the remote processor */
+ ret = rproc->ops->start(rproc);
+ if (ret) {
+ dev_err(dev, "can't start rproc %s: %d\n", rproc->name, ret);
+ return ret;
+ }
+
+ /* probe any subdevices for the remote processor */
+ ret = rproc_probe_subdevices(rproc);
+ if (ret) {
+ dev_err(dev, "failed to probe subdevices for %s: %d\n",
+ rproc->name, ret);
+ rproc->ops->stop(rproc);
+ return ret;
+ }
+
+ rproc->state = RPROC_RUNNING;
+
+ dev_info(dev, "remote processor %s is now up\n", rproc->name);
+
+ return 0;
+}
+
/*
* take a firmware and boot a remote processor with it.
*/
@@ -854,7 +911,7 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
{
struct device *dev = &rproc->dev;
const char *name = rproc->firmware;
- struct resource_table *table, *loaded_table;
+ struct resource_table *table;
int ret, tablesz;
ret = rproc_fw_sanity_check(rproc, fw);
@@ -905,50 +962,12 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
goto clean_up_resources;
}
- /* load the ELF segments to memory */
- ret = rproc_load_segments(rproc, fw);
- if (ret) {
- dev_err(dev, "Failed to load program segments: %d\n", ret);
- goto clean_up_resources;
- }
-
- /*
- * The starting device has been given the rproc->cached_table as the
- * resource table. The address of the vring along with the other
- * allocated resources (carveouts etc) is stored in cached_table.
- * In order to pass this information to the remote device we must copy
- * this information to device memory. We also update the table_ptr so
- * that any subsequent changes will be applied to the loaded version.
- */
- loaded_table = rproc_find_loaded_rsc_table(rproc, fw);
- if (loaded_table) {
- memcpy(loaded_table, rproc->cached_table, tablesz);
- rproc->table_ptr = loaded_table;
- }
-
- /* power up the remote processor */
- ret = rproc->ops->start(rproc);
- if (ret) {
- dev_err(dev, "can't start rproc %s: %d\n", rproc->name, ret);
+ ret = rproc_start(rproc, fw);
+ if (ret)
goto clean_up_resources;
- }
-
- /* probe any subdevices for the remote processor */
- ret = rproc_probe_subdevices(rproc);
- if (ret) {
- dev_err(dev, "failed to probe subdevices for %s: %d\n",
- rproc->name, ret);
- goto stop_rproc;
- }
-
- rproc->state = RPROC_RUNNING;
-
- dev_info(dev, "remote processor %s is now up\n", rproc->name);
return 0;
-stop_rproc:
- rproc->ops->stop(rproc);
clean_up_resources:
rproc_resource_cleanup(rproc);
clean_up:
@@ -994,6 +1013,32 @@ static int rproc_trigger_auto_boot(struct rproc *rproc)
return ret;
}
+static int rproc_stop(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ /* remove any subdevices for the remote processor */
+ rproc_remove_subdevices(rproc);
+
+ /* power off the remote processor */
+ ret = rproc->ops->stop(rproc);
+ if (ret) {
+ dev_err(dev, "can't stop rproc: %d\n", ret);
+ return ret;
+ }
+
+ /* if in crash state, unlock crash handler */
+ if (rproc->state == RPROC_CRASHED)
+ complete_all(&rproc->crash_comp);
+
+ rproc->state = RPROC_OFFLINE;
+
+ dev_info(dev, "stopped remote processor %s\n", rproc->name);
+
+ return 0;
+}
+
/**
* rproc_trigger_recovery() - recover a remoteproc
* @rproc: the remote processor
@@ -1006,23 +1051,40 @@ static int rproc_trigger_auto_boot(struct rproc *rproc)
*/
int rproc_trigger_recovery(struct rproc *rproc)
{
- dev_err(&rproc->dev, "recovering %s\n", rproc->name);
+ const struct firmware *firmware_p;
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ dev_err(dev, "recovering %s\n", rproc->name);
init_completion(&rproc->crash_comp);
- /* shut down the remote */
- /* TODO: make sure this works with rproc->power > 1 */
- rproc_shutdown(rproc);
+ ret = mutex_lock_interruptible(&rproc->lock);
+ if (ret)
+ return ret;
+
+ ret = rproc_stop(rproc);
+ if (ret)
+ goto unlock_mutex;
/* wait until there is no more rproc users */
wait_for_completion(&rproc->crash_comp);
- /*
- * boot the remote processor up again
- */
- rproc_boot(rproc);
+ /* load firmware */
+ ret = request_firmware(&firmware_p, rproc->firmware, dev);
+ if (ret < 0) {
+ dev_err(dev, "request_firmware failed: %d\n", ret);
+ goto unlock_mutex;
+ }
- return 0;
+ /* boot the remote processor up again */
+ ret = rproc_start(rproc, firmware_p);
+
+ release_firmware(firmware_p);
+
+unlock_mutex:
+ mutex_unlock(&rproc->lock);
+ return ret;
}
/**
@@ -1163,14 +1225,9 @@ void rproc_shutdown(struct rproc *rproc)
if (!atomic_dec_and_test(&rproc->power))
goto out;
- /* remove any subdevices for the remote processor */
- rproc_remove_subdevices(rproc);
-
- /* power off the remote processor */
- ret = rproc->ops->stop(rproc);
+ ret = rproc_stop(rproc);
if (ret) {
atomic_inc(&rproc->power);
- dev_err(dev, "can't stop rproc: %d\n", ret);
goto out;
}
@@ -1183,15 +1240,6 @@ void rproc_shutdown(struct rproc *rproc)
kfree(rproc->cached_table);
rproc->cached_table = NULL;
rproc->table_ptr = NULL;
-
- /* if in crash state, unlock crash handler */
- if (rproc->state == RPROC_CRASHED)
- complete_all(&rproc->crash_comp);
-
- rproc->state = RPROC_OFFLINE;
-
- dev_info(dev, "stopped remote processor %s\n", rproc->name);
-
out:
mutex_unlock(&rproc->lock);
}
diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig
index edc008f55663..1323a245763b 100644
--- a/drivers/rpmsg/Kconfig
+++ b/drivers/rpmsg/Kconfig
@@ -13,6 +13,16 @@ config RPMSG_CHAR
in /dev. They make it possible for user-space programs to send and
receive rpmsg packets.
+config RPMSG_QCOM_GLINK_RPM
+ tristate "Qualcomm RPM Glink driver"
+ select RPMSG
+ depends on HAS_IOMEM
+ depends on MAILBOX
+ help
+ Say y here to enable support for the GLINK RPM communication driver,
+ which serves as a channel for communication with the RPM in GLINK
+ enabled systems.
+
config RPMSG_QCOM_SMD
tristate "Qualcomm Shared Memory Driver (SMD)"
depends on QCOM_SMEM
@@ -26,6 +36,5 @@ config RPMSG_VIRTIO
tristate
select RPMSG
select VIRTIO
- select VIRTUALIZATION
endmenu
diff --git a/drivers/rpmsg/Makefile b/drivers/rpmsg/Makefile
index fae9a6d548fb..28cc19088cc0 100644
--- a/drivers/rpmsg/Makefile
+++ b/drivers/rpmsg/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_RPMSG) += rpmsg_core.o
obj-$(CONFIG_RPMSG_CHAR) += rpmsg_char.o
+obj-$(CONFIG_RPMSG_QCOM_GLINK_RPM) += qcom_glink_rpm.o
obj-$(CONFIG_RPMSG_QCOM_SMD) += qcom_smd.o
obj-$(CONFIG_RPMSG_VIRTIO) += virtio_rpmsg_bus.o
diff --git a/drivers/rpmsg/qcom_glink_rpm.c b/drivers/rpmsg/qcom_glink_rpm.c
new file mode 100644
index 000000000000..3559a3e84c1e
--- /dev/null
+++ b/drivers/rpmsg/qcom_glink_rpm.c
@@ -0,0 +1,1233 @@
+/*
+ * Copyright (c) 2016-2017, Linaro Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/rpmsg.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/mailbox_client.h>
+
+#include "rpmsg_internal.h"
+
+#define RPM_TOC_SIZE 256
+#define RPM_TOC_MAGIC 0x67727430 /* grt0 */
+#define RPM_TOC_MAX_ENTRIES ((RPM_TOC_SIZE - sizeof(struct rpm_toc)) / \
+ sizeof(struct rpm_toc_entry))
+
+#define RPM_TX_FIFO_ID 0x61703272 /* ap2r */
+#define RPM_RX_FIFO_ID 0x72326170 /* r2ap */
+
+#define GLINK_NAME_SIZE 32
+
+#define RPM_GLINK_CID_MIN 1
+#define RPM_GLINK_CID_MAX 65536
+
+struct rpm_toc_entry {
+ __le32 id;
+ __le32 offset;
+ __le32 size;
+} __packed;
+
+struct rpm_toc {
+ __le32 magic;
+ __le32 count;
+
+ struct rpm_toc_entry entries[];
+} __packed;
+
+struct glink_msg {
+ __le16 cmd;
+ __le16 param1;
+ __le32 param2;
+ u8 data[];
+} __packed;
+
+struct glink_rpm_pipe {
+ void __iomem *tail;
+ void __iomem *head;
+
+ void __iomem *fifo;
+
+ size_t length;
+};
+
+/**
+ * struct glink_defer_cmd - deferred incoming control message
+ * @node: list node
+ * @msg: message header
+ * data: payload of the message
+ *
+ * Copy of a received control message, to be added to @rx_queue and processed
+ * by @rx_work of @glink_rpm.
+ */
+struct glink_defer_cmd {
+ struct list_head node;
+
+ struct glink_msg msg;
+ u8 data[];
+};
+
+/**
+ * struct glink_rpm - driver context, relates to one remote subsystem
+ * @dev: reference to the associated struct device
+ * @doorbell: "rpm_hlos" ipc doorbell
+ * @rx_pipe: pipe object for receive FIFO
+ * @tx_pipe: pipe object for transmit FIFO
+ * @irq: IRQ for signaling incoming events
+ * @rx_work: worker for handling received control messages
+ * @rx_lock: protects the @rx_queue
+ * @rx_queue: queue of received control messages to be processed in @rx_work
+ * @tx_lock: synchronizes operations on the tx fifo
+ * @idr_lock: synchronizes @lcids and @rcids modifications
+ * @lcids: idr of all channels with a known local channel id
+ * @rcids: idr of all channels with a known remote channel id
+ */
+struct glink_rpm {
+ struct device *dev;
+
+ struct mbox_client mbox_client;
+ struct mbox_chan *mbox_chan;
+
+ struct glink_rpm_pipe rx_pipe;
+ struct glink_rpm_pipe tx_pipe;
+
+ int irq;
+
+ struct work_struct rx_work;
+ spinlock_t rx_lock;
+ struct list_head rx_queue;
+
+ struct mutex tx_lock;
+
+ struct mutex idr_lock;
+ struct idr lcids;
+ struct idr rcids;
+};
+
+enum {
+ GLINK_STATE_CLOSED,
+ GLINK_STATE_OPENING,
+ GLINK_STATE_OPEN,
+ GLINK_STATE_CLOSING,
+};
+
+/**
+ * struct glink_channel - internal representation of a channel
+ * @rpdev: rpdev reference, only used for primary endpoints
+ * @ept: rpmsg endpoint this channel is associated with
+ * @glink: glink_rpm context handle
+ * @refcount: refcount for the channel object
+ * @recv_lock: guard for @ept.cb
+ * @name: unique channel name/identifier
+ * @lcid: channel id, in local space
+ * @rcid: channel id, in remote space
+ * @buf: receive buffer, for gathering fragments
+ * @buf_offset: write offset in @buf
+ * @buf_size: size of current @buf
+ * @open_ack: completed once remote has acked the open-request
+ * @open_req: completed once open-request has been received
+ */
+struct glink_channel {
+ struct rpmsg_endpoint ept;
+
+ struct rpmsg_device *rpdev;
+ struct glink_rpm *glink;
+
+ struct kref refcount;
+
+ spinlock_t recv_lock;
+
+ char *name;
+ unsigned int lcid;
+ unsigned int rcid;
+
+ void *buf;
+ int buf_offset;
+ int buf_size;
+
+ struct completion open_ack;
+ struct completion open_req;
+};
+
+#define to_glink_channel(_ept) container_of(_ept, struct glink_channel, ept)
+
+static const struct rpmsg_endpoint_ops glink_endpoint_ops;
+
+#define RPM_CMD_VERSION 0
+#define RPM_CMD_VERSION_ACK 1
+#define RPM_CMD_OPEN 2
+#define RPM_CMD_CLOSE 3
+#define RPM_CMD_OPEN_ACK 4
+#define RPM_CMD_TX_DATA 9
+#define RPM_CMD_CLOSE_ACK 11
+#define RPM_CMD_TX_DATA_CONT 12
+#define RPM_CMD_READ_NOTIF 13
+
+#define GLINK_FEATURE_INTENTLESS BIT(1)
+
+static struct glink_channel *glink_rpm_alloc_channel(struct glink_rpm *glink,
+ const char *name)
+{
+ struct glink_channel *channel;
+
+ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return ERR_PTR(-ENOMEM);
+
+ /* Setup glink internal glink_channel data */
+ spin_lock_init(&channel->recv_lock);
+ channel->glink = glink;
+ channel->name = kstrdup(name, GFP_KERNEL);
+
+ init_completion(&channel->open_req);
+ init_completion(&channel->open_ack);
+
+ kref_init(&channel->refcount);
+
+ return channel;
+}
+
+static void glink_rpm_channel_release(struct kref *ref)
+{
+ struct glink_channel *channel = container_of(ref, struct glink_channel,
+ refcount);
+
+ kfree(channel->name);
+ kfree(channel);
+}
+
+static size_t glink_rpm_rx_avail(struct glink_rpm *glink)
+{
+ struct glink_rpm_pipe *pipe = &glink->rx_pipe;
+ unsigned int head;
+ unsigned int tail;
+
+ head = readl(pipe->head);
+ tail = readl(pipe->tail);
+
+ if (head < tail)
+ return pipe->length - tail + head;
+ else
+ return head - tail;
+}
+
+static void glink_rpm_rx_peak(struct glink_rpm *glink,
+ void *data, size_t count)
+{
+ struct glink_rpm_pipe *pipe = &glink->rx_pipe;
+ unsigned int tail;
+ size_t len;
+
+ tail = readl(pipe->tail);
+
+ len = min_t(size_t, count, pipe->length - tail);
+ if (len) {
+ __ioread32_copy(data, pipe->fifo + tail,
+ len / sizeof(u32));
+ }
+
+ if (len != count) {
+ __ioread32_copy(data + len, pipe->fifo,
+ (count - len) / sizeof(u32));
+ }
+}
+
+static void glink_rpm_rx_advance(struct glink_rpm *glink,
+ size_t count)
+{
+ struct glink_rpm_pipe *pipe = &glink->rx_pipe;
+ unsigned int tail;
+
+ tail = readl(pipe->tail);
+
+ tail += count;
+ if (tail >= pipe->length)
+ tail -= pipe->length;
+
+ writel(tail, pipe->tail);
+}
+
+static size_t glink_rpm_tx_avail(struct glink_rpm *glink)
+{
+ struct glink_rpm_pipe *pipe = &glink->tx_pipe;
+ unsigned int head;
+ unsigned int tail;
+
+ head = readl(pipe->head);
+ tail = readl(pipe->tail);
+
+ if (tail <= head)
+ return pipe->length - head + tail;
+ else
+ return tail - head;
+}
+
+static unsigned int glink_rpm_tx_write(struct glink_rpm *glink,
+ unsigned int head,
+ const void *data, size_t count)
+{
+ struct glink_rpm_pipe *pipe = &glink->tx_pipe;
+ size_t len;
+
+ len = min_t(size_t, count, pipe->length - head);
+ if (len) {
+ __iowrite32_copy(pipe->fifo + head, data,
+ len / sizeof(u32));
+ }
+
+ if (len != count) {
+ __iowrite32_copy(pipe->fifo, data + len,
+ (count - len) / sizeof(u32));
+ }
+
+ head += count;
+ if (head >= pipe->length)
+ head -= pipe->length;
+
+ return head;
+}
+
+static int glink_rpm_tx(struct glink_rpm *glink,
+ const void *hdr, size_t hlen,
+ const void *data, size_t dlen, bool wait)
+{
+ struct glink_rpm_pipe *pipe = &glink->tx_pipe;
+ unsigned int head;
+ unsigned int tlen = hlen + dlen;
+ int ret;
+
+ /* Reject packets that are too big */
+ if (tlen >= glink->tx_pipe.length)
+ return -EINVAL;
+
+ if (WARN(tlen % 8, "Unaligned TX request"))
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&glink->tx_lock);
+ if (ret)
+ return ret;
+
+ while (glink_rpm_tx_avail(glink) < tlen) {
+ if (!wait) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ msleep(10);
+ }
+
+ head = readl(pipe->head);
+ head = glink_rpm_tx_write(glink, head, hdr, hlen);
+ head = glink_rpm_tx_write(glink, head, data, dlen);
+ writel(head, pipe->head);
+
+ mbox_send_message(glink->mbox_chan, NULL);
+ mbox_client_txdone(glink->mbox_chan, 0);
+
+out:
+ mutex_unlock(&glink->tx_lock);
+
+ return ret;
+}
+
+static int glink_rpm_send_version(struct glink_rpm *glink)
+{
+ struct glink_msg msg;
+
+ msg.cmd = cpu_to_le16(RPM_CMD_VERSION);
+ msg.param1 = cpu_to_le16(1);
+ msg.param2 = cpu_to_le32(GLINK_FEATURE_INTENTLESS);
+
+ return glink_rpm_tx(glink, &msg, sizeof(msg), NULL, 0, true);
+}
+
+static void glink_rpm_send_version_ack(struct glink_rpm *glink)
+{
+ struct glink_msg msg;
+
+ msg.cmd = cpu_to_le16(RPM_CMD_VERSION_ACK);
+ msg.param1 = cpu_to_le16(1);
+ msg.param2 = cpu_to_le32(0);
+
+ glink_rpm_tx(glink, &msg, sizeof(msg), NULL, 0, true);
+}
+
+static void glink_rpm_send_open_ack(struct glink_rpm *glink,
+ struct glink_channel *channel)
+{
+ struct glink_msg msg;
+
+ msg.cmd = cpu_to_le16(RPM_CMD_OPEN_ACK);
+ msg.param1 = cpu_to_le16(channel->rcid);
+ msg.param2 = cpu_to_le32(0);
+
+ glink_rpm_tx(glink, &msg, sizeof(msg), NULL, 0, true);
+}
+
+/**
+ * glink_rpm_send_open_req() - send a RPM_CMD_OPEN request to the remote
+ * @glink:
+ * @channel:
+ *
+ * Allocates a local channel id and sends a RPM_CMD_OPEN message to the remote.
+ * Will return with refcount held, regardless of outcome.
+ *
+ * Returns 0 on success, negative errno otherwise.
+ */
+static int glink_rpm_send_open_req(struct glink_rpm *glink,
+ struct glink_channel *channel)
+{
+ struct {
+ struct glink_msg msg;
+ u8 name[GLINK_NAME_SIZE];
+ } __packed req;
+ int name_len = strlen(channel->name) + 1;
+ int req_len = ALIGN(sizeof(req.msg) + name_len, 8);
+ int ret;
+
+ kref_get(&channel->refcount);
+
+ mutex_lock(&glink->idr_lock);
+ ret = idr_alloc_cyclic(&glink->lcids, channel,
+ RPM_GLINK_CID_MIN, RPM_GLINK_CID_MAX, GFP_KERNEL);
+ mutex_unlock(&glink->idr_lock);
+ if (ret < 0)
+ return ret;
+
+ channel->lcid = ret;
+
+ req.msg.cmd = cpu_to_le16(RPM_CMD_OPEN);
+ req.msg.param1 = cpu_to_le16(channel->lcid);
+ req.msg.param2 = cpu_to_le32(name_len);
+ strcpy(req.name, channel->name);
+
+ ret = glink_rpm_tx(glink, &req, req_len, NULL, 0, true);
+ if (ret)
+ goto remove_idr;
+
+ return 0;
+
+remove_idr:
+ mutex_lock(&glink->idr_lock);
+ idr_remove(&glink->lcids, channel->lcid);
+ channel->lcid = 0;
+ mutex_unlock(&glink->idr_lock);
+
+ return ret;
+}
+
+static void glink_rpm_send_close_req(struct glink_rpm *glink,
+ struct glink_channel *channel)
+{
+ struct glink_msg req;
+
+ req.cmd = cpu_to_le16(RPM_CMD_CLOSE);
+ req.param1 = cpu_to_le16(channel->lcid);
+ req.param2 = 0;
+
+ glink_rpm_tx(glink, &req, sizeof(req), NULL, 0, true);
+}
+
+static void glink_rpm_send_close_ack(struct glink_rpm *glink, unsigned int rcid)
+{
+ struct glink_msg req;
+
+ req.cmd = cpu_to_le16(RPM_CMD_CLOSE_ACK);
+ req.param1 = cpu_to_le16(rcid);
+ req.param2 = 0;
+
+ glink_rpm_tx(glink, &req, sizeof(req), NULL, 0, true);
+}
+
+static int glink_rpm_rx_defer(struct glink_rpm *glink, size_t extra)
+{
+ struct glink_defer_cmd *dcmd;
+
+ extra = ALIGN(extra, 8);
+
+ if (glink_rpm_rx_avail(glink) < sizeof(struct glink_msg) + extra) {
+ dev_dbg(glink->dev, "Insufficient data in rx fifo");
+ return -ENXIO;
+ }
+
+ dcmd = kzalloc(sizeof(*dcmd) + extra, GFP_ATOMIC);
+ if (!dcmd)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&dcmd->node);
+
+ glink_rpm_rx_peak(glink, &dcmd->msg, sizeof(dcmd->msg) + extra);
+
+ spin_lock(&glink->rx_lock);
+ list_add_tail(&dcmd->node, &glink->rx_queue);
+ spin_unlock(&glink->rx_lock);
+
+ schedule_work(&glink->rx_work);
+ glink_rpm_rx_advance(glink, sizeof(dcmd->msg) + extra);
+
+ return 0;
+}
+
+static int glink_rpm_rx_data(struct glink_rpm *glink, size_t avail)
+{
+ struct glink_channel *channel;
+ struct {
+ struct glink_msg msg;
+ __le32 chunk_size;
+ __le32 left_size;
+ } __packed hdr;
+ unsigned int chunk_size;
+ unsigned int left_size;
+ unsigned int rcid;
+
+ if (avail < sizeof(hdr)) {
+ dev_dbg(glink->dev, "Not enough data in fifo\n");
+ return -EAGAIN;
+ }
+
+ glink_rpm_rx_peak(glink, &hdr, sizeof(hdr));
+ chunk_size = le32_to_cpu(hdr.chunk_size);
+ left_size = le32_to_cpu(hdr.left_size);
+
+ if (avail < sizeof(hdr) + chunk_size) {
+ dev_dbg(glink->dev, "Payload not yet in fifo\n");
+ return -EAGAIN;
+ }
+
+ if (WARN(chunk_size % 4, "Incoming data must be word aligned\n"))
+ return -EINVAL;
+
+ rcid = le16_to_cpu(hdr.msg.param1);
+ channel = idr_find(&glink->rcids, rcid);
+ if (!channel) {
+ dev_dbg(glink->dev, "Data on non-existing channel\n");
+
+ /* Drop the message */
+ glink_rpm_rx_advance(glink, ALIGN(sizeof(hdr) + chunk_size, 8));
+ return 0;
+ }
+
+ /* Might have an ongoing, fragmented, message to append */
+ if (!channel->buf) {
+ channel->buf = kmalloc(chunk_size + left_size, GFP_ATOMIC);
+ if (!channel->buf)
+ return -ENOMEM;
+
+ channel->buf_size = chunk_size + left_size;
+ channel->buf_offset = 0;
+ }
+
+ glink_rpm_rx_advance(glink, sizeof(hdr));
+
+ if (channel->buf_size - channel->buf_offset < chunk_size) {
+ dev_err(glink->dev, "Insufficient space in input buffer\n");
+
+ /* The packet header lied, drop payload */
+ glink_rpm_rx_advance(glink, chunk_size);
+ return -ENOMEM;
+ }
+
+ glink_rpm_rx_peak(glink, channel->buf + channel->buf_offset, chunk_size);
+ channel->buf_offset += chunk_size;
+
+ /* Handle message when no fragments remain to be received */
+ if (!left_size) {
+ spin_lock(&channel->recv_lock);
+ if (channel->ept.cb) {
+ channel->ept.cb(channel->ept.rpdev,
+ channel->buf,
+ channel->buf_offset,
+ channel->ept.priv,
+ RPMSG_ADDR_ANY);
+ }
+ spin_unlock(&channel->recv_lock);
+
+ kfree(channel->buf);
+ channel->buf = NULL;
+ channel->buf_size = 0;
+ }
+
+ /* Each message starts at 8 byte aligned address */
+ glink_rpm_rx_advance(glink, ALIGN(chunk_size, 8));
+
+ return 0;
+}
+
+static int glink_rpm_rx_open_ack(struct glink_rpm *glink, unsigned int lcid)
+{
+ struct glink_channel *channel;
+
+ channel = idr_find(&glink->lcids, lcid);
+ if (!channel) {
+ dev_err(glink->dev, "Invalid open ack packet\n");
+ return -EINVAL;
+ }
+
+ complete(&channel->open_ack);
+
+ return 0;
+}
+
+static irqreturn_t glink_rpm_intr(int irq, void *data)
+{
+ struct glink_rpm *glink = data;
+ struct glink_msg msg;
+ unsigned int param1;
+ unsigned int param2;
+ unsigned int avail;
+ unsigned int cmd;
+ int ret;
+
+ for (;;) {
+ avail = glink_rpm_rx_avail(glink);
+ if (avail < sizeof(msg))
+ break;
+
+ glink_rpm_rx_peak(glink, &msg, sizeof(msg));
+
+ cmd = le16_to_cpu(msg.cmd);
+ param1 = le16_to_cpu(msg.param1);
+ param2 = le32_to_cpu(msg.param2);
+
+ switch (cmd) {
+ case RPM_CMD_VERSION:
+ case RPM_CMD_VERSION_ACK:
+ case RPM_CMD_CLOSE:
+ case RPM_CMD_CLOSE_ACK:
+ ret = glink_rpm_rx_defer(glink, 0);
+ break;
+ case RPM_CMD_OPEN_ACK:
+ ret = glink_rpm_rx_open_ack(glink, param1);
+ glink_rpm_rx_advance(glink, ALIGN(sizeof(msg), 8));
+ break;
+ case RPM_CMD_OPEN:
+ ret = glink_rpm_rx_defer(glink, param2);
+ break;
+ case RPM_CMD_TX_DATA:
+ case RPM_CMD_TX_DATA_CONT:
+ ret = glink_rpm_rx_data(glink, avail);
+ break;
+ case RPM_CMD_READ_NOTIF:
+ glink_rpm_rx_advance(glink, ALIGN(sizeof(msg), 8));
+
+ mbox_send_message(glink->mbox_chan, NULL);
+ mbox_client_txdone(glink->mbox_chan, 0);
+
+ ret = 0;
+ break;
+ default:
+ dev_err(glink->dev, "unhandled rx cmd: %d\n", cmd);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Locally initiated rpmsg_create_ept */
+static struct glink_channel *glink_rpm_create_local(struct glink_rpm *glink,
+ const char *name)
+{
+ struct glink_channel *channel;
+ int ret;
+
+ channel = glink_rpm_alloc_channel(glink, name);
+ if (IS_ERR(channel))
+ return ERR_CAST(channel);
+
+ ret = glink_rpm_send_open_req(glink, channel);
+ if (ret)
+ goto release_channel;
+
+ ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ);
+ if (!ret)
+ goto err_timeout;
+
+ ret = wait_for_completion_timeout(&channel->open_req, 5 * HZ);
+ if (!ret)
+ goto err_timeout;
+
+ glink_rpm_send_open_ack(glink, channel);
+
+ return channel;
+
+err_timeout:
+ /* glink_rpm_send_open_req() did register the channel in lcids*/
+ mutex_lock(&glink->idr_lock);
+ idr_remove(&glink->lcids, channel->lcid);
+ mutex_unlock(&glink->idr_lock);
+
+release_channel:
+ /* Release glink_rpm_send_open_req() reference */
+ kref_put(&channel->refcount, glink_rpm_channel_release);
+ /* Release glink_rpm_alloc_channel() reference */
+ kref_put(&channel->refcount, glink_rpm_channel_release);
+
+ return ERR_PTR(-ETIMEDOUT);
+}
+
+/* Remote initiated rpmsg_create_ept */
+static int glink_rpm_create_remote(struct glink_rpm *glink,
+ struct glink_channel *channel)
+{
+ int ret;
+
+ glink_rpm_send_open_ack(glink, channel);
+
+ ret = glink_rpm_send_open_req(glink, channel);
+ if (ret)
+ goto close_link;
+
+ ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ);
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ goto close_link;
+ }
+
+ return 0;
+
+close_link:
+ /*
+ * Send a close request to "undo" our open-ack. The close-ack will
+ * release the last reference.
+ */
+ glink_rpm_send_close_req(glink, channel);
+
+ /* Release glink_rpm_send_open_req() reference */
+ kref_put(&channel->refcount, glink_rpm_channel_release);
+
+ return ret;
+}
+
+static struct rpmsg_endpoint *glink_rpm_create_ept(struct rpmsg_device *rpdev,
+ rpmsg_rx_cb_t cb, void *priv,
+ struct rpmsg_channel_info chinfo)
+{
+ struct glink_channel *parent = to_glink_channel(rpdev->ept);
+ struct glink_channel *channel;
+ struct glink_rpm *glink = parent->glink;
+ struct rpmsg_endpoint *ept;
+ const char *name = chinfo.name;
+ int cid;
+ int ret;
+
+ idr_for_each_entry(&glink->rcids, channel, cid) {
+ if (!strcmp(channel->name, name))
+ break;
+ }
+
+ if (!channel) {
+ channel = glink_rpm_create_local(glink, name);
+ if (IS_ERR(channel))
+ return NULL;
+ } else {
+ ret = glink_rpm_create_remote(glink, channel);
+ if (ret)
+ return NULL;
+ }
+
+ ept = &channel->ept;
+ ept->rpdev = rpdev;
+ ept->cb = cb;
+ ept->priv = priv;
+ ept->ops = &glink_endpoint_ops;
+
+ return ept;
+}
+
+static void glink_rpm_destroy_ept(struct rpmsg_endpoint *ept)
+{
+ struct glink_channel *channel = to_glink_channel(ept);
+ struct glink_rpm *glink = channel->glink;
+ unsigned long flags;
+
+ spin_lock_irqsave(&channel->recv_lock, flags);
+ channel->ept.cb = NULL;
+ spin_unlock_irqrestore(&channel->recv_lock, flags);
+
+ /* Decouple the potential rpdev from the channel */
+ channel->rpdev = NULL;
+
+ glink_rpm_send_close_req(glink, channel);
+}
+
+static int __glink_rpm_send(struct glink_channel *channel,
+ void *data, int len, bool wait)
+{
+ struct glink_rpm *glink = channel->glink;
+ struct {
+ struct glink_msg msg;
+ __le32 chunk_size;
+ __le32 left_size;
+ } __packed req;
+
+ if (WARN(len % 8, "RPM GLINK expects 8 byte aligned messages\n"))
+ return -EINVAL;
+
+ req.msg.cmd = cpu_to_le16(RPM_CMD_TX_DATA);
+ req.msg.param1 = cpu_to_le16(channel->lcid);
+ req.msg.param2 = cpu_to_le32(channel->rcid);
+ req.chunk_size = cpu_to_le32(len);
+ req.left_size = cpu_to_le32(0);
+
+ return glink_rpm_tx(glink, &req, sizeof(req), data, len, wait);
+}
+
+static int glink_rpm_send(struct rpmsg_endpoint *ept, void *data, int len)
+{
+ struct glink_channel *channel = to_glink_channel(ept);
+
+ return __glink_rpm_send(channel, data, len, true);
+}
+
+static int glink_rpm_trysend(struct rpmsg_endpoint *ept, void *data, int len)
+{
+ struct glink_channel *channel = to_glink_channel(ept);
+
+ return __glink_rpm_send(channel, data, len, false);
+}
+
+/*
+ * Finds the device_node for the glink child interested in this channel.
+ */
+static struct device_node *glink_rpm_match_channel(struct device_node *node,
+ const char *channel)
+{
+ struct device_node *child;
+ const char *name;
+ const char *key;
+ int ret;
+
+ for_each_available_child_of_node(node, child) {
+ key = "qcom,glink-channels";
+ ret = of_property_read_string(child, key, &name);
+ if (ret)
+ continue;
+
+ if (strcmp(name, channel) == 0)
+ return child;
+ }
+
+ return NULL;
+}
+
+static const struct rpmsg_device_ops glink_device_ops = {
+ .create_ept = glink_rpm_create_ept,
+};
+
+static const struct rpmsg_endpoint_ops glink_endpoint_ops = {
+ .destroy_ept = glink_rpm_destroy_ept,
+ .send = glink_rpm_send,
+ .trysend = glink_rpm_trysend,
+};
+
+static void glink_rpm_rpdev_release(struct device *dev)
+{
+ struct rpmsg_device *rpdev = to_rpmsg_device(dev);
+ struct glink_channel *channel = to_glink_channel(rpdev->ept);
+
+ channel->rpdev = NULL;
+ kfree(rpdev);
+}
+
+static int glink_rpm_rx_open(struct glink_rpm *glink, unsigned int rcid,
+ char *name)
+{
+ struct glink_channel *channel;
+ struct rpmsg_device *rpdev;
+ bool create_device = false;
+ int lcid;
+ int ret;
+
+ idr_for_each_entry(&glink->lcids, channel, lcid) {
+ if (!strcmp(channel->name, name))
+ break;
+ }
+
+ if (!channel) {
+ channel = glink_rpm_alloc_channel(glink, name);
+ if (IS_ERR(channel))
+ return PTR_ERR(channel);
+
+ /* The opening dance was initiated by the remote */
+ create_device = true;
+ }
+
+ mutex_lock(&glink->idr_lock);
+ ret = idr_alloc(&glink->rcids, channel, rcid, rcid + 1, GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(glink->dev, "Unable to insert channel into rcid list\n");
+ mutex_unlock(&glink->idr_lock);
+ goto free_channel;
+ }
+ channel->rcid = ret;
+ mutex_unlock(&glink->idr_lock);
+
+ complete(&channel->open_req);
+
+ if (create_device) {
+ rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL);
+ if (!rpdev) {
+ ret = -ENOMEM;
+ goto rcid_remove;
+ }
+
+ rpdev->ept = &channel->ept;
+ strncpy(rpdev->id.name, name, RPMSG_NAME_SIZE);
+ rpdev->src = RPMSG_ADDR_ANY;
+ rpdev->dst = RPMSG_ADDR_ANY;
+ rpdev->ops = &glink_device_ops;
+
+ rpdev->dev.of_node = glink_rpm_match_channel(glink->dev->of_node, name);
+ rpdev->dev.parent = glink->dev;
+ rpdev->dev.release = glink_rpm_rpdev_release;
+
+ ret = rpmsg_register_device(rpdev);
+ if (ret)
+ goto free_rpdev;
+
+ channel->rpdev = rpdev;
+ }
+
+ return 0;
+
+free_rpdev:
+ kfree(rpdev);
+rcid_remove:
+ mutex_lock(&glink->idr_lock);
+ idr_remove(&glink->rcids, channel->rcid);
+ channel->rcid = 0;
+ mutex_unlock(&glink->idr_lock);
+free_channel:
+ /* Release the reference, iff we took it */
+ if (create_device)
+ kref_put(&channel->refcount, glink_rpm_channel_release);
+
+ return ret;
+}
+
+static void glink_rpm_rx_close(struct glink_rpm *glink, unsigned int rcid)
+{
+ struct rpmsg_channel_info chinfo;
+ struct glink_channel *channel;
+
+ channel = idr_find(&glink->rcids, rcid);
+ if (WARN(!channel, "close request on unknown channel\n"))
+ return;
+
+ if (channel->rpdev) {
+ strncpy(chinfo.name, channel->name, sizeof(chinfo.name));
+ chinfo.src = RPMSG_ADDR_ANY;
+ chinfo.dst = RPMSG_ADDR_ANY;
+
+ rpmsg_unregister_device(glink->dev, &chinfo);
+ }
+
+ glink_rpm_send_close_ack(glink, channel->rcid);
+
+ mutex_lock(&glink->idr_lock);
+ idr_remove(&glink->rcids, channel->rcid);
+ channel->rcid = 0;
+ mutex_unlock(&glink->idr_lock);
+
+ kref_put(&channel->refcount, glink_rpm_channel_release);
+}
+
+static void glink_rpm_rx_close_ack(struct glink_rpm *glink, unsigned int lcid)
+{
+ struct glink_channel *channel;
+
+ channel = idr_find(&glink->lcids, lcid);
+ if (WARN(!channel, "close ack on unknown channel\n"))
+ return;
+
+ mutex_lock(&glink->idr_lock);
+ idr_remove(&glink->lcids, channel->lcid);
+ channel->lcid = 0;
+ mutex_unlock(&glink->idr_lock);
+
+ kref_put(&channel->refcount, glink_rpm_channel_release);
+}
+
+static void glink_rpm_work(struct work_struct *work)
+{
+ struct glink_rpm *glink = container_of(work, struct glink_rpm, rx_work);
+ struct glink_defer_cmd *dcmd;
+ struct glink_msg *msg;
+ unsigned long flags;
+ unsigned int param1;
+ unsigned int param2;
+ unsigned int cmd;
+
+ for (;;) {
+ spin_lock_irqsave(&glink->rx_lock, flags);
+ if (list_empty(&glink->rx_queue)) {
+ spin_unlock_irqrestore(&glink->rx_lock, flags);
+ break;
+ }
+ dcmd = list_first_entry(&glink->rx_queue, struct glink_defer_cmd, node);
+ list_del(&dcmd->node);
+ spin_unlock_irqrestore(&glink->rx_lock, flags);
+
+ msg = &dcmd->msg;
+ cmd = le16_to_cpu(msg->cmd);
+ param1 = le16_to_cpu(msg->param1);
+ param2 = le32_to_cpu(msg->param2);
+
+ switch (cmd) {
+ case RPM_CMD_VERSION:
+ glink_rpm_send_version_ack(glink);
+ break;
+ case RPM_CMD_VERSION_ACK:
+ break;
+ case RPM_CMD_OPEN:
+ glink_rpm_rx_open(glink, param1, msg->data);
+ break;
+ case RPM_CMD_CLOSE:
+ glink_rpm_rx_close(glink, param1);
+ break;
+ case RPM_CMD_CLOSE_ACK:
+ glink_rpm_rx_close_ack(glink, param1);
+ break;
+ default:
+ WARN(1, "Unknown defer object %d\n", cmd);
+ break;
+ }
+
+ kfree(dcmd);
+ }
+}
+
+static int glink_rpm_parse_toc(struct device *dev,
+ void __iomem *msg_ram,
+ size_t msg_ram_size,
+ struct glink_rpm_pipe *rx,
+ struct glink_rpm_pipe *tx)
+{
+ struct rpm_toc *toc;
+ int num_entries;
+ unsigned int id;
+ size_t offset;
+ size_t size;
+ void *buf;
+ int i;
+
+ buf = kzalloc(RPM_TOC_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ __ioread32_copy(buf, msg_ram + msg_ram_size - RPM_TOC_SIZE,
+ RPM_TOC_SIZE / sizeof(u32));
+
+ toc = buf;
+
+ if (le32_to_cpu(toc->magic) != RPM_TOC_MAGIC) {
+ dev_err(dev, "RPM TOC has invalid magic\n");
+ goto err_inval;
+ }
+
+ num_entries = le32_to_cpu(toc->count);
+ if (num_entries > RPM_TOC_MAX_ENTRIES) {
+ dev_err(dev, "Invalid number of toc entries\n");
+ goto err_inval;
+ }
+
+ for (i = 0; i < num_entries; i++) {
+ id = le32_to_cpu(toc->entries[i].id);
+ offset = le32_to_cpu(toc->entries[i].offset);
+ size = le32_to_cpu(toc->entries[i].size);
+
+ if (offset > msg_ram_size || offset + size > msg_ram_size) {
+ dev_err(dev, "TOC entry with invalid size\n");
+ continue;
+ }
+
+ switch (id) {
+ case RPM_RX_FIFO_ID:
+ rx->length = size;
+
+ rx->tail = msg_ram + offset;
+ rx->head = msg_ram + offset + sizeof(u32);
+ rx->fifo = msg_ram + offset + 2 * sizeof(u32);
+ break;
+ case RPM_TX_FIFO_ID:
+ tx->length = size;
+
+ tx->tail = msg_ram + offset;
+ tx->head = msg_ram + offset + sizeof(u32);
+ tx->fifo = msg_ram + offset + 2 * sizeof(u32);
+ break;
+ }
+ }
+
+ if (!rx->fifo || !tx->fifo) {
+ dev_err(dev, "Unable to find rx and tx descriptors\n");
+ goto err_inval;
+ }
+
+ kfree(buf);
+ return 0;
+
+err_inval:
+ kfree(buf);
+ return -EINVAL;
+}
+
+static int glink_rpm_probe(struct platform_device *pdev)
+{
+ struct glink_rpm *glink;
+ struct device_node *np;
+ void __iomem *msg_ram;
+ size_t msg_ram_size;
+ struct device *dev = &pdev->dev;
+ struct resource r;
+ int irq;
+ int ret;
+
+ glink = devm_kzalloc(dev, sizeof(*glink), GFP_KERNEL);
+ if (!glink)
+ return -ENOMEM;
+
+ glink->dev = dev;
+
+ mutex_init(&glink->tx_lock);
+ spin_lock_init(&glink->rx_lock);
+ INIT_LIST_HEAD(&glink->rx_queue);
+ INIT_WORK(&glink->rx_work, glink_rpm_work);
+
+ mutex_init(&glink->idr_lock);
+ idr_init(&glink->lcids);
+ idr_init(&glink->rcids);
+
+ glink->mbox_client.dev = &pdev->dev;
+ glink->mbox_chan = mbox_request_channel(&glink->mbox_client, 0);
+ if (IS_ERR(glink->mbox_chan)) {
+ if (PTR_ERR(glink->mbox_chan) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to acquire IPC channel\n");
+ return PTR_ERR(glink->mbox_chan);
+ }
+
+ np = of_parse_phandle(dev->of_node, "qcom,rpm-msg-ram", 0);
+ ret = of_address_to_resource(np, 0, &r);
+ of_node_put(np);
+ if (ret)
+ return ret;
+
+ msg_ram = devm_ioremap(dev, r.start, resource_size(&r));
+ msg_ram_size = resource_size(&r);
+ if (!msg_ram)
+ return -ENOMEM;
+
+ ret = glink_rpm_parse_toc(dev, msg_ram, msg_ram_size,
+ &glink->rx_pipe, &glink->tx_pipe);
+ if (ret)
+ return ret;
+
+ writel(0, glink->tx_pipe.head);
+ writel(0, glink->rx_pipe.tail);
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(dev, irq,
+ glink_rpm_intr,
+ IRQF_NO_SUSPEND | IRQF_SHARED,
+ "glink-rpm", glink);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ\n");
+ return ret;
+ }
+
+ glink->irq = irq;
+
+ ret = glink_rpm_send_version(glink);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, glink);
+
+ return 0;
+}
+
+static int glink_rpm_remove_device(struct device *dev, void *data)
+{
+ device_unregister(dev);
+
+ return 0;
+}
+
+static int glink_rpm_remove(struct platform_device *pdev)
+{
+ struct glink_rpm *glink = platform_get_drvdata(pdev);
+ struct glink_channel *channel;
+ int cid;
+ int ret;
+
+ disable_irq(glink->irq);
+ cancel_work_sync(&glink->rx_work);
+
+ ret = device_for_each_child(glink->dev, NULL, glink_rpm_remove_device);
+ if (ret)
+ dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret);
+
+ /* Release any defunct local channels, waiting for close-ack */
+ idr_for_each_entry(&glink->lcids, channel, cid)
+ kref_put(&channel->refcount, glink_rpm_channel_release);
+
+ idr_destroy(&glink->lcids);
+ idr_destroy(&glink->rcids);
+
+ return 0;
+}
+
+static const struct of_device_id glink_rpm_of_match[] = {
+ { .compatible = "qcom,glink-rpm" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, glink_rpm_of_match);
+
+static struct platform_driver glink_rpm_driver = {
+ .probe = glink_rpm_probe,
+ .remove = glink_rpm_remove,
+ .driver = {
+ .name = "qcom_glink_rpm",
+ .of_match_table = glink_rpm_of_match,
+ },
+};
+
+static int __init glink_rpm_init(void)
+{
+ return platform_driver_register(&glink_rpm_driver);
+}
+subsys_initcall(glink_rpm_init);
+
+static void __exit glink_rpm_exit(void)
+{
+ platform_driver_unregister(&glink_rpm_driver);
+}
+module_exit(glink_rpm_exit);
+
+MODULE_AUTHOR("Bjorn Andersson <[email protected]>");
+MODULE_DESCRIPTION("Qualcomm GLINK RPM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index beaef5dd973e..a0a39a8821a3 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -969,6 +969,14 @@ static const struct rpmsg_endpoint_ops qcom_smd_endpoint_ops = {
.poll = qcom_smd_poll,
};
+static void qcom_smd_release_device(struct device *dev)
+{
+ struct rpmsg_device *rpdev = to_rpmsg_device(dev);
+ struct qcom_smd_device *qsdev = to_smd_device(rpdev);
+
+ kfree(qsdev);
+}
+
/*
* Create a smd client device for channel that is being opened.
*/
@@ -998,6 +1006,7 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel)
rpdev->dev.of_node = qcom_smd_match_channel(edge->of_node, channel->name);
rpdev->dev.parent = &edge->dev;
+ rpdev->dev.release = qcom_smd_release_device;
return rpmsg_register_device(rpdev);
}
@@ -1013,6 +1022,8 @@ static int qcom_smd_create_chrdev(struct qcom_smd_edge *edge)
qsdev->edge = edge;
qsdev->rpdev.ops = &qcom_smd_device_ops;
qsdev->rpdev.dev.parent = &edge->dev;
+ qsdev->rpdev.dev.release = qcom_smd_release_device;
+
return rpmsg_chrdev_register_device(&qsdev->rpdev);
}
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index 2576284f99a7..e0996fce3963 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -390,7 +390,7 @@ static int rpmsg_eptdev_create(struct rpmsg_ctrldev *ctrldev,
ret = device_add(dev);
if (ret) {
- dev_err(dev, "device_register failed: %d\n", ret);
+ dev_err(dev, "device_add failed: %d\n", ret);
put_device(dev);
}
@@ -505,7 +505,7 @@ static int rpmsg_chrdev_probe(struct rpmsg_device *rpdev)
ret = device_add(dev);
if (ret) {
- dev_err(&rpdev->dev, "device_register failed: %d\n", ret);
+ dev_err(&rpdev->dev, "device_add failed: %d\n", ret);
put_device(dev);
}
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index ad3d2a9df287..dffa3aab7178 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -343,6 +343,11 @@ static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rpmsg_device *rpdev = to_rpmsg_device(dev);
+ ssize_t len;
+
+ len = of_device_modalias(dev, buf, PAGE_SIZE);
+ if (len != -ENODEV)
+ return len;
return sprintf(buf, RPMSG_DEVICE_MODALIAS_FMT "\n", rpdev->id.name);
}
@@ -387,6 +392,11 @@ static int rpmsg_dev_match(struct device *dev, struct device_driver *drv)
static int rpmsg_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct rpmsg_device *rpdev = to_rpmsg_device(dev);
+ int ret;
+
+ ret = of_device_uevent_modalias(dev, env);
+ if (ret != -ENODEV)
+ return ret;
return add_uevent_var(env, "MODALIAS=" RPMSG_DEVICE_MODALIAS_FMT,
rpdev->id.name);
@@ -464,13 +474,6 @@ static struct bus_type rpmsg_bus = {
.remove = rpmsg_dev_remove,
};
-static void rpmsg_release_device(struct device *dev)
-{
- struct rpmsg_device *rpdev = to_rpmsg_device(dev);
-
- kfree(rpdev);
-}
-
int rpmsg_register_device(struct rpmsg_device *rpdev)
{
struct device *dev = &rpdev->dev;
@@ -480,7 +483,6 @@ int rpmsg_register_device(struct rpmsg_device *rpdev)
rpdev->id.name, rpdev->src, rpdev->dst);
rpdev->dev.bus = &rpmsg_bus;
- rpdev->dev.release = rpmsg_release_device;
ret = device_register(&rpdev->dev);
if (ret) {
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index f7cade09d38a..eee2a9f77d37 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -314,7 +314,7 @@ static int virtio_rpmsg_announce_create(struct rpmsg_device *rpdev)
int err = 0;
/* need to tell remote processor's name service about this channel ? */
- if (rpdev->announce &&
+ if (rpdev->announce && rpdev->ept &&
virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) {
struct rpmsg_ns_msg nsm;
@@ -338,12 +338,12 @@ static int virtio_rpmsg_announce_destroy(struct rpmsg_device *rpdev)
int err = 0;
/* tell remote processor's name service we're removing this channel */
- if (rpdev->announce &&
+ if (rpdev->announce && rpdev->ept &&
virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) {
struct rpmsg_ns_msg nsm;
strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE);
- nsm.addr = rpdev->src;
+ nsm.addr = rpdev->ept->addr;
nsm.flags = RPMSG_NS_DESTROY;
err = rpmsg_sendto(rpdev->ept, &nsm, sizeof(nsm), RPMSG_NS_ADDR);
@@ -360,6 +360,14 @@ static const struct rpmsg_device_ops virtio_rpmsg_ops = {
.announce_destroy = virtio_rpmsg_announce_destroy,
};
+static void virtio_rpmsg_release_device(struct device *dev)
+{
+ struct rpmsg_device *rpdev = to_rpmsg_device(dev);
+ struct virtio_rpmsg_channel *vch = to_virtio_rpmsg_channel(rpdev);
+
+ kfree(vch);
+}
+
/*
* create an rpmsg channel using its name and address info.
* this function will be used to create both static and dynamic
@@ -390,9 +398,6 @@ static struct rpmsg_device *rpmsg_create_channel(struct virtproc_info *vrp,
/* Link the channel to our vrp */
vch->vrp = vrp;
- /* Assign callbacks for rpmsg_channel */
- vch->rpdev.ops = &virtio_rpmsg_ops;
-
/* Assign public information to the rpmsg_device */
rpdev = &vch->rpdev;
rpdev->src = chinfo->src;
@@ -408,6 +413,7 @@ static struct rpmsg_device *rpmsg_create_channel(struct virtproc_info *vrp,
strncpy(rpdev->id.name, chinfo->name, RPMSG_NAME_SIZE);
rpdev->dev.parent = &vrp->vdev->dev;
+ rpdev->dev.release = virtio_rpmsg_release_device;
ret = rpmsg_register_device(rpdev);
if (ret)
return NULL;
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 0f1fe4ff7f51..670ac0a4ef49 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3921,7 +3921,6 @@ EXPORT_SYMBOL(dasd_schedule_requeue);
int dasd_generic_pm_freeze(struct ccw_device *cdev)
{
struct dasd_device *device = dasd_device_from_cdev(cdev);
- int rc;
if (IS_ERR(device))
return PTR_ERR(device);
@@ -3930,7 +3929,7 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev)
set_bit(DASD_FLAG_SUSPENDED, &device->flags);
if (device->discipline->freeze)
- rc = device->discipline->freeze(device);
+ device->discipline->freeze(device);
/* disallow new I/O */
dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 1e560188dd13..0e0e622eadc3 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -754,7 +754,6 @@ static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
struct alias_pav_group *pavgroup;
struct dasd_device *device, *temp;
struct dasd_eckd_private *private;
- int rc;
unsigned long flags;
LIST_HEAD(active);
@@ -785,7 +784,7 @@ static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
device = list_first_entry(&active, struct dasd_device,
alias_list);
spin_unlock_irqrestore(&lcu->lock, flags);
- rc = dasd_flush_device_queue(device);
+ dasd_flush_device_queue(device);
spin_lock_irqsave(&lcu->lock, flags);
/*
* only move device around if it wasn't moved away while we
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 7c7351276d2e..779dce069cc5 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -150,7 +150,7 @@ static int __init dasd_busid(char *str, int *id0, int *id1, int *devno)
/* Old style 0xXXXX or XXXX */
if (!kstrtouint(str, 16, &val)) {
*id0 = *id1 = 0;
- if (val < 0 || val > 0xffff)
+ if (val > 0xffff)
return -EINVAL;
*devno = val;
return 0;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 122456e4db89..c3e5ad641b0b 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -213,10 +213,8 @@ static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
geo->head |= head;
}
-static int
-check_XRC (struct ccw1 *de_ccw,
- struct DE_eckd_data *data,
- struct dasd_device *device)
+static int check_XRC(struct ccw1 *ccw, struct DE_eckd_data *data,
+ struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
int rc;
@@ -224,7 +222,7 @@ check_XRC (struct ccw1 *de_ccw,
if (!private->rdc_data.facilities.XRC_supported)
return 0;
- /* switch on System Time Stamp - needed for XRC Support */
+ /* switch on System Time Stamp - needed for XRC Support */
data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
@@ -233,24 +231,30 @@ check_XRC (struct ccw1 *de_ccw,
if (rc == -EOPNOTSUPP || rc == -EACCES)
rc = 0;
- de_ccw->count = sizeof(struct DE_eckd_data);
- de_ccw->flags |= CCW_FLAG_SLI;
+ if (ccw) {
+ ccw->count = sizeof(struct DE_eckd_data);
+ ccw->flags |= CCW_FLAG_SLI;
+ }
+
return rc;
}
static int
define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
- unsigned int totrk, int cmd, struct dasd_device *device)
+ unsigned int totrk, int cmd, struct dasd_device *device,
+ int blksize)
{
struct dasd_eckd_private *private = device->private;
- u32 begcyl, endcyl;
u16 heads, beghead, endhead;
+ u32 begcyl, endcyl;
int rc = 0;
- ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
- ccw->flags = 0;
- ccw->count = 16;
- ccw->cda = (__u32) __pa(data);
+ if (ccw) {
+ ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
+ ccw->flags = 0;
+ ccw->count = 16;
+ ccw->cda = (__u32)__pa(data);
+ }
memset(data, 0, sizeof(struct DE_eckd_data));
switch (cmd) {
@@ -269,18 +273,24 @@ define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
data->mask.perm = 0x1;
data->attributes.operation = DASD_BYPASS_CACHE;
break;
+ case DASD_ECKD_CCW_READ_TRACK:
+ case DASD_ECKD_CCW_READ_TRACK_DATA:
+ data->mask.perm = 0x1;
+ data->attributes.operation = private->attrib.operation;
+ data->blk_size = 0;
+ break;
case DASD_ECKD_CCW_WRITE:
case DASD_ECKD_CCW_WRITE_MT:
case DASD_ECKD_CCW_WRITE_KD:
case DASD_ECKD_CCW_WRITE_KD_MT:
data->mask.perm = 0x02;
data->attributes.operation = private->attrib.operation;
- rc = check_XRC (ccw, data, device);
+ rc = check_XRC(ccw, data, device);
break;
case DASD_ECKD_CCW_WRITE_CKD:
case DASD_ECKD_CCW_WRITE_CKD_MT:
data->attributes.operation = DASD_BYPASS_CACHE;
- rc = check_XRC (ccw, data, device);
+ rc = check_XRC(ccw, data, device);
break;
case DASD_ECKD_CCW_ERASE:
case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
@@ -288,7 +298,18 @@ define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
data->mask.perm = 0x3;
data->mask.auth = 0x1;
data->attributes.operation = DASD_BYPASS_CACHE;
- rc = check_XRC (ccw, data, device);
+ rc = check_XRC(ccw, data, device);
+ break;
+ case DASD_ECKD_CCW_WRITE_FULL_TRACK:
+ data->mask.perm = 0x03;
+ data->attributes.operation = private->attrib.operation;
+ data->blk_size = 0;
+ break;
+ case DASD_ECKD_CCW_WRITE_TRACK_DATA:
+ data->mask.perm = 0x02;
+ data->attributes.operation = private->attrib.operation;
+ data->blk_size = blksize;
+ rc = check_XRC(ccw, data, device);
break;
default:
dev_err(&device->cdev->dev,
@@ -325,36 +346,26 @@ define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
return rc;
}
-static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata,
- struct dasd_device *device)
-{
- struct dasd_eckd_private *private = device->private;
- int rc;
-
- if (!private->rdc_data.facilities.XRC_supported)
- return 0;
-
- /* switch on System Time Stamp - needed for XRC Support */
- pfxdata->define_extent.ga_extended |= 0x08; /* 'Time Stamp Valid' */
- pfxdata->define_extent.ga_extended |= 0x02; /* 'Extended Parameter' */
- pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */
-
- rc = get_phys_clock(&pfxdata->define_extent.ep_sys_time);
- /* Ignore return code if sync clock is switched off. */
- if (rc == -EOPNOTSUPP || rc == -EACCES)
- rc = 0;
- return rc;
-}
-static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk,
- unsigned int rec_on_trk, int count, int cmd,
- struct dasd_device *device, unsigned int reclen,
- unsigned int tlf)
+static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data,
+ unsigned int trk, unsigned int rec_on_trk,
+ int count, int cmd, struct dasd_device *device,
+ unsigned int reclen, unsigned int tlf)
{
struct dasd_eckd_private *private = device->private;
int sector;
int dn, d;
+ if (ccw) {
+ ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD_EXT;
+ ccw->flags = 0;
+ if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK)
+ ccw->count = 22;
+ else
+ ccw->count = 20;
+ ccw->cda = (__u32)__pa(data);
+ }
+
memset(data, 0, sizeof(*data));
sector = 0;
if (rec_on_trk) {
@@ -481,14 +492,12 @@ static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk,
static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
unsigned int trk, unsigned int totrk, int cmd,
struct dasd_device *basedev, struct dasd_device *startdev,
- unsigned char format, unsigned int rec_on_trk, int count,
+ unsigned int format, unsigned int rec_on_trk, int count,
unsigned int blksize, unsigned int tlf)
{
struct dasd_eckd_private *basepriv, *startpriv;
- struct DE_eckd_data *dedata;
struct LRE_eckd_data *lredata;
- u32 begcyl, endcyl;
- u16 heads, beghead, endhead;
+ struct DE_eckd_data *dedata;
int rc = 0;
basepriv = basedev->private;
@@ -527,98 +536,19 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
pfxdata->validity.hyper_pav = 1;
}
- /* define extend data (mostly)*/
- switch (cmd) {
- case DASD_ECKD_CCW_READ_HOME_ADDRESS:
- case DASD_ECKD_CCW_READ_RECORD_ZERO:
- case DASD_ECKD_CCW_READ:
- case DASD_ECKD_CCW_READ_MT:
- case DASD_ECKD_CCW_READ_CKD:
- case DASD_ECKD_CCW_READ_CKD_MT:
- case DASD_ECKD_CCW_READ_KD:
- case DASD_ECKD_CCW_READ_KD_MT:
- dedata->mask.perm = 0x1;
- dedata->attributes.operation = basepriv->attrib.operation;
- break;
- case DASD_ECKD_CCW_READ_COUNT:
- dedata->mask.perm = 0x1;
- dedata->attributes.operation = DASD_BYPASS_CACHE;
- break;
- case DASD_ECKD_CCW_READ_TRACK:
- case DASD_ECKD_CCW_READ_TRACK_DATA:
- dedata->mask.perm = 0x1;
- dedata->attributes.operation = basepriv->attrib.operation;
- dedata->blk_size = 0;
- break;
- case DASD_ECKD_CCW_WRITE:
- case DASD_ECKD_CCW_WRITE_MT:
- case DASD_ECKD_CCW_WRITE_KD:
- case DASD_ECKD_CCW_WRITE_KD_MT:
- dedata->mask.perm = 0x02;
- dedata->attributes.operation = basepriv->attrib.operation;
- rc = check_XRC_on_prefix(pfxdata, basedev);
- break;
- case DASD_ECKD_CCW_WRITE_CKD:
- case DASD_ECKD_CCW_WRITE_CKD_MT:
- dedata->attributes.operation = DASD_BYPASS_CACHE;
- rc = check_XRC_on_prefix(pfxdata, basedev);
- break;
- case DASD_ECKD_CCW_ERASE:
- case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
- case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
- dedata->mask.perm = 0x3;
- dedata->mask.auth = 0x1;
- dedata->attributes.operation = DASD_BYPASS_CACHE;
- rc = check_XRC_on_prefix(pfxdata, basedev);
- break;
- case DASD_ECKD_CCW_WRITE_FULL_TRACK:
- dedata->mask.perm = 0x03;
- dedata->attributes.operation = basepriv->attrib.operation;
- dedata->blk_size = 0;
- break;
- case DASD_ECKD_CCW_WRITE_TRACK_DATA:
- dedata->mask.perm = 0x02;
- dedata->attributes.operation = basepriv->attrib.operation;
- dedata->blk_size = blksize;
- rc = check_XRC_on_prefix(pfxdata, basedev);
- break;
- default:
- DBF_DEV_EVENT(DBF_ERR, basedev,
- "PFX LRE unknown opcode 0x%x", cmd);
- BUG();
- return -EINVAL;
- }
-
- dedata->attributes.mode = 0x3; /* ECKD */
-
- if ((basepriv->rdc_data.cu_type == 0x2105 ||
- basepriv->rdc_data.cu_type == 0x2107 ||
- basepriv->rdc_data.cu_type == 0x1750)
- && !(basepriv->uses_cdl && trk < 2))
- dedata->ga_extended |= 0x40; /* Regular Data Format Mode */
-
- heads = basepriv->rdc_data.trk_per_cyl;
- begcyl = trk / heads;
- beghead = trk % heads;
- endcyl = totrk / heads;
- endhead = totrk % heads;
-
- /* check for sequential prestage - enhance cylinder range */
- if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
- dedata->attributes.operation == DASD_SEQ_ACCESS) {
-
- if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
- endcyl += basepriv->attrib.nr_cyl;
- else
- endcyl = (basepriv->real_cyl - 1);
- }
+ rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize);
- set_ch_t(&dedata->beg_ext, begcyl, beghead);
- set_ch_t(&dedata->end_ext, endcyl, endhead);
+ /*
+ * For some commands the System Time Stamp is set in the define extent
+ * data when XRC is supported. The validity of the time stamp must be
+ * reflected in the prefix data as well.
+ */
+ if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
+ pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */
if (format == 1) {
- fill_LRE_data(lredata, trk, rec_on_trk, count, cmd,
- basedev, blksize, tlf);
+ locate_record_ext(NULL, lredata, trk, rec_on_trk, count, cmd,
+ basedev, blksize, tlf);
}
return rc;
@@ -1887,7 +1817,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
ccw = cqr->cpaddr;
/* Define extent for the first 3 tracks. */
define_extent(ccw++, cqr->data, 0, 2,
- DASD_ECKD_CCW_READ_COUNT, device);
+ DASD_ECKD_CCW_READ_COUNT, device, 0);
LO_data = cqr->data + sizeof(struct DE_eckd_data);
/* Locate record for the first 4 records on track 0. */
ccw[-1].flags |= CCW_FLAG_CC;
@@ -2266,7 +2196,7 @@ dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
count, 0, 0);
} else {
define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit,
- DASD_ECKD_CCW_READ_COUNT, startdev);
+ DASD_ECKD_CCW_READ_COUNT, startdev, 0);
data += sizeof(struct DE_eckd_data);
ccw[-1].flags |= CCW_FLAG_CC;
@@ -2420,7 +2350,7 @@ dasd_eckd_build_format(struct dasd_device *base,
} else {
define_extent(ccw++, (struct DE_eckd_data *) data,
fdata->start_unit, fdata->stop_unit,
- DASD_ECKD_CCW_WRITE_CKD, startdev);
+ DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
/* grant subsystem permission to format R0 */
if (r0_perm)
((struct DE_eckd_data *) data)
@@ -2444,7 +2374,7 @@ dasd_eckd_build_format(struct dasd_device *base,
} else {
define_extent(ccw++, (struct DE_eckd_data *) data,
fdata->start_unit, fdata->stop_unit,
- DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev);
+ DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev, 0);
data += sizeof(struct DE_eckd_data);
}
ccw[-1].flags |= CCW_FLAG_CC;
@@ -2463,7 +2393,7 @@ dasd_eckd_build_format(struct dasd_device *base,
} else {
define_extent(ccw++, (struct DE_eckd_data *) data,
fdata->start_unit, fdata->stop_unit,
- DASD_ECKD_CCW_WRITE_CKD, startdev);
+ DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
data += sizeof(struct DE_eckd_data);
}
ccw[-1].flags |= CCW_FLAG_CC;
@@ -3187,7 +3117,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
sizeof(struct PFX_eckd_data));
} else {
if (define_extent(ccw++, cqr->data, first_trk,
- last_trk, cmd, basedev) == -EAGAIN) {
+ last_trk, cmd, basedev, 0) == -EAGAIN) {
/* Clock not in sync and XRC is enabled.
* Try again later.
*/
@@ -3509,12 +3439,19 @@ static int prepare_itcw(struct itcw *itcw,
dedata->mask.perm = 0x02;
dedata->attributes.operation = basepriv->attrib.operation;
dedata->blk_size = blksize;
- rc = check_XRC_on_prefix(&pfxdata, basedev);
+ rc = check_XRC(NULL, dedata, basedev);
dedata->ga_extended |= 0x42;
lredata->operation.orientation = 0x0;
lredata->operation.operation = 0x3F;
lredata->extended_operation = 0x23;
lredata->auxiliary.check_bytes = 0x2;
+ /*
+ * If XRC is supported the System Time Stamp is set. The
+ * validity of the time stamp must be reflected in the prefix
+ * data as well.
+ */
+ if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
+ pfxdata.validity.time_stamp = 1; /* 'Time Stamp Valid' */
pfx_cmd = DASD_ECKD_CCW_PFX;
break;
case DASD_ECKD_CCW_READ_COUNT_MT:
@@ -3842,25 +3779,28 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
return cqr;
}
-static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
- struct dasd_block *block,
- struct request *req)
+static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
+ struct dasd_block *block,
+ struct request *req)
{
- unsigned long *idaws;
+ sector_t start_padding_sectors, end_sector_offset, end_padding_sectors;
+ unsigned int seg_len, len_to_track_end;
+ unsigned int cidaw, cplength, datasize;
+ sector_t first_trk, last_trk, sectors;
+ struct dasd_eckd_private *base_priv;
struct dasd_device *basedev;
- struct dasd_ccw_req *cqr;
- struct ccw1 *ccw;
struct req_iterator iter;
+ struct dasd_ccw_req *cqr;
+ unsigned int first_offs;
+ unsigned int trkcount;
+ unsigned long *idaws;
+ unsigned int size;
+ unsigned char cmd;
struct bio_vec bv;
+ struct ccw1 *ccw;
+ int use_prefix;
+ void *data;
char *dst;
- unsigned char cmd;
- unsigned int trkcount;
- unsigned int seg_len, len_to_track_end;
- unsigned int first_offs;
- unsigned int cidaw, cplength, datasize;
- sector_t first_trk, last_trk, sectors;
- sector_t start_padding_sectors, end_sector_offset, end_padding_sectors;
- unsigned int pfx_datasize;
/*
* raw track access needs to be mutiple of 64k and on 64k boundary
@@ -3878,8 +3818,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
DBF_DEV_EVENT(DBF_ERR, basedev,
"raw write not track aligned (%lu,%lu) req %p",
start_padding_sectors, end_padding_sectors, req);
- cqr = ERR_PTR(-EINVAL);
- goto out;
+ return ERR_PTR(-EINVAL);
}
first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
@@ -3892,10 +3831,8 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
cmd = DASD_ECKD_CCW_READ_TRACK;
else if (rq_data_dir(req) == WRITE)
cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK;
- else {
- cqr = ERR_PTR(-EINVAL);
- goto out;
- }
+ else
+ return ERR_PTR(-EINVAL);
/*
* Raw track based I/O needs IDAWs for each page,
@@ -3903,38 +3840,46 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
*/
cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK;
- /* 1x prefix + one read/write ccw per track */
- cplength = 1 + trkcount;
-
/*
- * struct PFX_eckd_data has up to 2 byte as extended parameter
- * this is needed for write full track and has to be mentioned
- * separately
- * add 8 instead of 2 to keep 8 byte boundary
+ * struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes
+ * of extended parameter. This is needed for write full track.
*/
- pfx_datasize = sizeof(struct PFX_eckd_data) + 8;
+ base_priv = basedev->private;
+ use_prefix = base_priv->features.feature[8] & 0x01;
+ if (use_prefix) {
+ cplength = 1 + trkcount;
+ size = sizeof(struct PFX_eckd_data) + 2;
+ } else {
+ cplength = 2 + trkcount;
+ size = sizeof(struct DE_eckd_data) +
+ sizeof(struct LRE_eckd_data) + 2;
+ }
+ size = ALIGN(size, 8);
- datasize = pfx_datasize + cidaw * sizeof(unsigned long long);
+ datasize = size + cidaw * sizeof(unsigned long long);
/* Allocate the ccw request. */
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
datasize, startdev);
if (IS_ERR(cqr))
- goto out;
+ return cqr;
+
ccw = cqr->cpaddr;
+ data = cqr->data;
- if (prefix_LRE(ccw++, cqr->data, first_trk, last_trk, cmd,
- basedev, startdev, 1 /* format */, first_offs + 1,
- trkcount, 0, 0) == -EAGAIN) {
- /* Clock not in sync and XRC is enabled.
- * Try again later.
- */
- dasd_sfree_request(cqr, startdev);
- cqr = ERR_PTR(-EAGAIN);
- goto out;
+ if (use_prefix) {
+ prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev,
+ startdev, 1, first_offs + 1, trkcount, 0, 0);
+ } else {
+ define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0);
+ ccw[-1].flags |= CCW_FLAG_CC;
+
+ data += sizeof(struct DE_eckd_data);
+ locate_record_ext(ccw++, data, first_trk, first_offs + 1,
+ trkcount, cmd, basedev, 0, 0);
}
- idaws = (unsigned long *)(cqr->data + pfx_datasize);
+ idaws = (unsigned long *)(cqr->data + size);
len_to_track_end = 0;
if (start_padding_sectors) {
ccw[-1].flags |= CCW_FLAG_CC;
@@ -3984,9 +3929,6 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
- if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
- cqr = NULL;
-out:
return cqr;
}
@@ -4096,7 +4038,7 @@ static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
private->count++;
if ((base->features & DASD_FEATURE_USERAW))
- cqr = dasd_raw_build_cp(startdev, block, req);
+ cqr = dasd_eckd_build_cp_raw(startdev, block, req);
else
cqr = dasd_eckd_build_cp(startdev, block, req);
if (IS_ERR(cqr))
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index e2a710c250a5..fb1f537d986a 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -29,6 +29,7 @@
#define DASD_ECKD_CCW_SNID 0x34
#define DASD_ECKD_CCW_RSSD 0x3e
#define DASD_ECKD_CCW_LOCATE_RECORD 0x47
+#define DASD_ECKD_CCW_LOCATE_RECORD_EXT 0x4b
#define DASD_ECKD_CCW_SNSS 0x54
#define DASD_ECKD_CCW_DEFINE_EXTENT 0x63
#define DASD_ECKD_CCW_WRITE_MT 0x85
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 06eb1de52d1c..68bae4f6bd88 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -18,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/pfn_t.h>
+#include <linux/uio.h>
#include <linux/dax.h>
#include <asm/extmem.h>
#include <asm/io.h>
@@ -43,8 +44,15 @@ static const struct block_device_operations dcssblk_devops = {
.release = dcssblk_release,
};
+static size_t dcssblk_dax_copy_from_iter(struct dax_device *dax_dev,
+ pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
+{
+ return copy_from_iter(addr, bytes, i);
+}
+
static const struct dax_operations dcssblk_dax_ops = {
.direct_access = dcssblk_dax_direct_access,
+ .copy_from_iter = dcssblk_dax_copy_from_iter,
};
struct dcssblk_dev_info {
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 42018a20f2b7..0071febac9e6 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -278,7 +278,7 @@ struct scm_queue {
spinlock_t lock;
};
-static int scm_blk_request(struct blk_mq_hw_ctx *hctx,
+static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *qd)
{
struct scm_device *scmdev = hctx->queue->queuedata;
@@ -290,7 +290,7 @@ static int scm_blk_request(struct blk_mq_hw_ctx *hctx,
spin_lock(&sq->lock);
if (!scm_permit_request(bdev, req)) {
spin_unlock(&sq->lock);
- return BLK_MQ_RQ_QUEUE_BUSY;
+ return BLK_STS_RESOURCE;
}
scmrq = sq->scmrq;
@@ -299,7 +299,7 @@ static int scm_blk_request(struct blk_mq_hw_ctx *hctx,
if (!scmrq) {
SCM_LOG(5, "no request");
spin_unlock(&sq->lock);
- return BLK_MQ_RQ_QUEUE_BUSY;
+ return BLK_STS_RESOURCE;
}
scm_request_init(bdev, scmrq);
sq->scmrq = scmrq;
@@ -315,7 +315,7 @@ static int scm_blk_request(struct blk_mq_hw_ctx *hctx,
sq->scmrq = NULL;
spin_unlock(&sq->lock);
- return BLK_MQ_RQ_QUEUE_BUSY;
+ return BLK_STS_RESOURCE;
}
blk_mq_start_request(req);
@@ -324,7 +324,7 @@ static int scm_blk_request(struct blk_mq_hw_ctx *hctx,
sq->scmrq = NULL;
}
spin_unlock(&sq->lock);
- return BLK_MQ_RQ_QUEUE_OK;
+ return BLK_STS_OK;
}
static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index ba0e4f93503d..186d05e4c767 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -433,12 +433,7 @@ do_kdgkb_ioctl(struct kbd_data *kbd, struct kbsentry __user *u_kbs,
case KDSKBSENT:
if (!perm)
return -EPERM;
- len = strnlen_user(u_kbs->kb_string, sizeof(u_kbs->kb_string));
- if (!len)
- return -EFAULT;
- if (len > sizeof(u_kbs->kb_string))
- return -EINVAL;
- p = memdup_user_nul(u_kbs->kb_string, len);
+ p = strndup_user(u_kbs->kb_string, sizeof(u_kbs->kb_string));
if (IS_ERR(p))
return PTR_ERR(p);
kfree(kbd->func_table[kb_func]);
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index a25367ebaa89..82f05c4b8c52 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -69,12 +69,10 @@ out_unlock:
static void vfio_ccw_sch_io_todo(struct work_struct *work)
{
struct vfio_ccw_private *private;
- struct subchannel *sch;
struct irb *irb;
private = container_of(work, struct vfio_ccw_private, io_work);
irb = &private->irb;
- sch = private->sch;
if (scsw_is_solicited(&irb->scsw)) {
cp_update_scsw(&private->cp, &irb->scsw);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index b1c27e28859b..b5f4006198b9 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -94,7 +94,7 @@ static inline int zcrypt_process_rescan(void)
atomic_set(&zcrypt_rescan_req, 0);
atomic_inc(&zcrypt_rescan_count);
ap_bus_force_rescan();
- ZCRYPT_DBF(DBF_INFO, "rescan count=%07d",
+ ZCRYPT_DBF(DBF_INFO, "rescan count=%07d\n",
atomic_inc_return(&zcrypt_rescan_count));
return 1;
}
@@ -822,7 +822,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
rc = zcrypt_rsa_modexpo(&mex);
} while (rc == -EAGAIN);
if (rc) {
- ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d", rc);
+ ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc);
return rc;
}
return put_user(mex.outputdatalength, &umex->outputdatalength);
@@ -841,7 +841,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
rc = zcrypt_rsa_crt(&crt);
} while (rc == -EAGAIN);
if (rc) {
- ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d", rc);
+ ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc);
return rc;
}
return put_user(crt.outputdatalength, &ucrt->outputdatalength);
@@ -860,7 +860,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
rc = zcrypt_send_cprb(&xcRB);
} while (rc == -EAGAIN);
if (rc)
- ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d", rc);
+ ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d\n", rc);
if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
return -EFAULT;
return rc;
@@ -879,7 +879,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
rc = zcrypt_send_ep11_cprb(&xcrb);
} while (rc == -EAGAIN);
if (rc)
- ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d", rc);
+ ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
return -EFAULT;
return rc;
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 95e32a47face..4b3b08025ef6 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -296,8 +296,8 @@ NCR_700_detect(struct scsi_host_template *tpnt,
if(tpnt->sdev_attrs == NULL)
tpnt->sdev_attrs = NCR_700_dev_attrs;
- memory = dma_alloc_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
- &pScript, GFP_KERNEL);
+ memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript,
+ GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
if(memory == NULL) {
printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
return NULL;
@@ -410,8 +410,8 @@ NCR_700_release(struct Scsi_Host *host)
struct NCR_700_Host_Parameters *hostdata =
(struct NCR_700_Host_Parameters *)host->hostdata[0];
- dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
- hostdata->script, hostdata->pScript);
+ dma_free_attrs(hostdata->dev, TOTAL_MEM_SIZE, hostdata->script,
+ hostdata->pScript, DMA_ATTR_NON_CONSISTENT);
return 1;
}
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 3c52867dfe28..d384f4f86c26 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -47,17 +47,6 @@ config SCSI_NETLINK
default n
depends on NET
-config SCSI_MQ_DEFAULT
- bool "SCSI: use blk-mq I/O path by default"
- depends on SCSI
- ---help---
- This option enables the new blk-mq based I/O path for SCSI
- devices by default. With the option the scsi_mod.use_blk_mq
- module/boot option defaults to Y, without it to N, but it can
- still be overridden either way.
-
- If unsure say N.
-
config SCSI_PROC_FS
bool "legacy /proc/scsi/ support"
depends on SCSI && PROC_FS
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 43d88389e899..707ee2f5954d 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -2071,20 +2071,15 @@ int aac_get_adapter_info(struct aac_dev* dev)
expose_physicals = 0;
}
- if(dev->dac_support != 0) {
- if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(64)) &&
- !pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(64))) {
+ if (dev->dac_support) {
+ if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(64))) {
if (!dev->in_reset)
- printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n",
- dev->name, dev->id);
- } else if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(32)) &&
- !pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32))) {
- printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n",
- dev->name, dev->id);
+ dev_info(&dev->pdev->dev, "64 Bit DAC enabled\n");
+ } else if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(32))) {
+ dev_info(&dev->pdev->dev, "DMA mask set failed, 64 Bit DAC disabled\n");
dev->dac_support = 0;
} else {
- printk(KERN_WARNING"%s%d: No suitable DMA available.\n",
- dev->name, dev->id);
+ dev_info(&dev->pdev->dev, "No suitable DMA available\n");
rcode = -ENOMEM;
}
}
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index d281492009fb..d31a9bc2ba69 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -97,7 +97,7 @@ enum {
#define PMC_GLOBAL_INT_BIT0 0x00000001
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 50792
+# define AAC_DRIVER_BUILD 50834
# define AAC_DRIVER_BRANCH "-custom"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -415,6 +415,7 @@ struct aac_ciss_identify_pd {
* These macros convert from physical channels to virtual channels
*/
#define CONTAINER_CHANNEL (0)
+#define NATIVE_CHANNEL (1)
#define CONTAINER_TO_CHANNEL(cont) (CONTAINER_CHANNEL)
#define CONTAINER_TO_ID(cont) (cont)
#define CONTAINER_TO_LUN(cont) (0)
@@ -423,7 +424,6 @@ struct aac_ciss_identify_pd {
#define PMC_DEVICE_S6 0x28b
#define PMC_DEVICE_S7 0x28c
#define PMC_DEVICE_S8 0x28d
-#define PMC_DEVICE_S9 0x28f
#define aac_phys_to_logical(x) ((x)+1)
#define aac_logical_to_phys(x) ((x)?(x)-1:0)
@@ -2377,6 +2377,7 @@ struct revision
#define SOFT_RESET_TIME 60
+
struct aac_common
{
/*
@@ -2487,7 +2488,9 @@ struct aac_hba_info {
#define IOP_RESET_FW_FIB_DUMP 0x00000034
#define IOP_RESET 0x00001000
#define IOP_RESET_ALWAYS 0x00001001
-#define RE_INIT_ADAPTER 0x000000ee
+#define RE_INIT_ADAPTER 0x000000ee
+
+#define IOP_SRC_RESET_MASK 0x00000100
/*
* Adapter Status Register
@@ -2512,6 +2515,7 @@ struct aac_hba_info {
#define SELF_TEST_FAILED 0x00000004
#define MONITOR_PANIC 0x00000020
+#define KERNEL_BOOTING 0x00000040
#define KERNEL_UP_AND_RUNNING 0x00000080
#define KERNEL_PANIC 0x00000100
#define FLASH_UPD_PENDING 0x00002000
@@ -2684,6 +2688,18 @@ int aac_probe_container(struct aac_dev *dev, int cid);
int _aac_rx_init(struct aac_dev *dev);
int aac_rx_select_comm(struct aac_dev *dev, int comm);
int aac_rx_deliver_producer(struct fib * fib);
+
+static inline int aac_is_src(struct aac_dev *dev)
+{
+ u16 device = dev->pdev->device;
+
+ if (device == PMC_DEVICE_S6 ||
+ device == PMC_DEVICE_S7 ||
+ device == PMC_DEVICE_S8)
+ return 1;
+ return 0;
+}
+
char * get_container_type(unsigned type);
extern int numacb;
extern char aac_driver_version[];
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index d2f8d5954840..9ab0fa959d83 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -668,7 +668,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
- p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
+ p = kmalloc(sg_count[i], GFP_KERNEL);
if (!p) {
rcode = -ENOMEM;
goto cleanup;
@@ -732,8 +732,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
rcode = -EINVAL;
goto cleanup;
}
- /* Does this really need to be GFP_DMA? */
- p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
+
+ p = kmalloc(sg_count[i], GFP_KERNEL);
if(!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
sg_count[i], i, upsg->count));
@@ -788,8 +788,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
rcode = -EINVAL;
goto cleanup;
}
- /* Does this really need to be GFP_DMA? */
- p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
+
+ p = kmalloc(sg_count[i], GFP_KERNEL);
if(!p) {
dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
sg_count[i], i, usg->count));
@@ -845,8 +845,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
rcode = -EINVAL;
goto cleanup;
}
- /* Does this really need to be GFP_DMA? */
- p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
+ p = kmalloc(sg_count[i], GFP_KERNEL|GFP_DMA32);
if (!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
sg_count[i], i, usg->count));
@@ -887,7 +886,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
rcode = -EINVAL;
goto cleanup;
}
- p = kmalloc(sg_count[i], GFP_KERNEL);
+ p = kmalloc(sg_count[i], GFP_KERNEL|GFP_DMA32);
if (!p) {
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
sg_count[i], i, upsg->count));
@@ -950,12 +949,15 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
&((struct aac_native_hba *)srbfib->hw_fib_va)->resp.err;
struct aac_srb_reply reply;
+ memset(&reply, 0, sizeof(reply));
reply.status = ST_OK;
if (srbfib->flags & FIB_CONTEXT_FLAG_FASTRESP) {
/* fast response */
reply.srb_status = SRB_STATUS_SUCCESS;
reply.scsi_status = 0;
reply.data_xfer_length = byte_count;
+ reply.sense_data_size = 0;
+ memset(reply.sense_data, 0, AAC_SENSE_BUFFERSIZE);
} else {
reply.srb_status = err->service_response;
reply.scsi_status = err->status;
@@ -1019,6 +1021,7 @@ static int aac_get_hba_info(struct aac_dev *dev, void __user *arg)
{
struct aac_hba_info hbainfo;
+ memset(&hbainfo, 0, sizeof(hbainfo));
hbainfo.adapter_number = (u8) dev->id;
hbainfo.system_io_bus_number = dev->pdev->bus->number;
hbainfo.device_number = (dev->pdev->devfn >> 3);
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 1151505853cf..9ee025b1d0e0 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -53,11 +53,8 @@ static inline int aac_is_msix_mode(struct aac_dev *dev)
{
u32 status = 0;
- if (dev->pdev->device == PMC_DEVICE_S6 ||
- dev->pdev->device == PMC_DEVICE_S7 ||
- dev->pdev->device == PMC_DEVICE_S8) {
+ if (aac_is_src(dev))
status = src_readl(dev, MUnit.OMR);
- }
return (status & AAC_INT_MODE_MSIX);
}
@@ -325,9 +322,7 @@ int aac_send_shutdown(struct aac_dev * dev)
/* FIB should be freed only after getting the response from the F/W */
if (status != -ERESTARTSYS)
aac_fib_free(fibctx);
- if ((dev->pdev->device == PMC_DEVICE_S7 ||
- dev->pdev->device == PMC_DEVICE_S8 ||
- dev->pdev->device == PMC_DEVICE_S9) &&
+ if (aac_is_src(dev) &&
dev->msi_enabled)
aac_set_intx_mode(dev);
return status;
@@ -583,9 +578,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
dev->max_fib_size = status[1] & 0xFFE0;
host->sg_tablesize = status[2] >> 16;
dev->sg_tablesize = status[2] & 0xFFFF;
- if (dev->pdev->device == PMC_DEVICE_S7 ||
- dev->pdev->device == PMC_DEVICE_S8 ||
- dev->pdev->device == PMC_DEVICE_S9) {
+ if (aac_is_src(dev)) {
if (host->can_queue > (status[3] >> 16) -
AAC_NUM_MGT_FIB)
host->can_queue = (status[3] >> 16) -
@@ -604,10 +597,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
pr_warn("numacb=%d ignored\n", numacb);
}
- if (dev->pdev->device == PMC_DEVICE_S6 ||
- dev->pdev->device == PMC_DEVICE_S7 ||
- dev->pdev->device == PMC_DEVICE_S8 ||
- dev->pdev->device == PMC_DEVICE_S9)
+ if (aac_is_src(dev))
aac_define_int_mode(dev);
/*
* Ok now init the communication subsystem
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 7a1b8a2ce658..1c617ccfaf12 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -803,11 +803,11 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
if (aac_check_eeh_failure(dev))
return -EFAULT;
- /* Only set for first known interruptable command */
- if (down_interruptible(&fibptr->event_wait)) {
+ fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
+ if (down_interruptible(&fibptr->event_wait))
fibptr->done = 2;
- up(&fibptr->event_wait);
- }
+ fibptr->flags &= ~(FIB_CONTEXT_FLAG_WAIT);
+
spin_lock_irqsave(&fibptr->event_lock, flags);
if ((fibptr->done == 0) || (fibptr->done == 2)) {
fibptr->done = 2; /* Tell interrupt we aborted */
@@ -1513,6 +1513,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
struct scsi_cmnd *command_list;
int jafo = 0;
int bled;
+ u64 dmamask;
+ int num_of_fibs = 0;
/*
* Assumptions:
@@ -1546,10 +1548,20 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
/*
* Loop through the fibs, close the synchronous FIBS
*/
- for (retval = 1, index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
+ retval = 1;
+ num_of_fibs = aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB;
+ for (index = 0; index < num_of_fibs; index++) {
+
struct fib *fib = &aac->fibs[index];
- if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
- (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) {
+ __le32 XferState = fib->hw_fib_va->header.XferState;
+ bool is_response_expected = false;
+
+ if (!(XferState & cpu_to_le32(NoResponseExpected | Async)) &&
+ (XferState & cpu_to_le32(ResponseExpected)))
+ is_response_expected = true;
+
+ if (is_response_expected
+ || fib->flags & FIB_CONTEXT_FLAG_WAIT) {
unsigned long flagv;
spin_lock_irqsave(&fib->event_lock, flagv);
up(&fib->event_wait);
@@ -1580,21 +1592,27 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
aac_free_irq(aac);
kfree(aac->fsa_dev);
aac->fsa_dev = NULL;
+
+ dmamask = DMA_BIT_MASK(32);
quirks = aac_get_driver_ident(index)->quirks;
- if (quirks & AAC_QUIRK_31BIT) {
- if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(31)))) ||
- ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(31)))))
- goto out;
- } else {
- if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32)))) ||
- ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(32)))))
- goto out;
+ if (quirks & AAC_QUIRK_31BIT)
+ retval = pci_set_dma_mask(aac->pdev, dmamask);
+ else if (!(quirks & AAC_QUIRK_SRC))
+ retval = pci_set_dma_mask(aac->pdev, dmamask);
+ else
+ retval = pci_set_consistent_dma_mask(aac->pdev, dmamask);
+
+ if (quirks & AAC_QUIRK_31BIT && !retval) {
+ dmamask = DMA_BIT_MASK(31);
+ retval = pci_set_consistent_dma_mask(aac->pdev, dmamask);
}
+
+ if (retval)
+ goto out;
+
if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
goto out;
- if (quirks & AAC_QUIRK_31BIT)
- if ((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32))))
- goto out;
+
if (jafo) {
aac->thread = kthread_run(aac_command_thread, aac, "%s",
aac->name);
@@ -1768,8 +1786,6 @@ int aac_check_health(struct aac_dev * aac)
int BlinkLED;
unsigned long time_now, flagv = 0;
struct list_head * entry;
- struct Scsi_Host * host;
- int bled;
/* Extending the scope of fib_lock slightly to protect aac->in_reset */
if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
@@ -1881,19 +1897,6 @@ int aac_check_health(struct aac_dev * aac)
printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
- if (!aac_check_reset || ((aac_check_reset == 1) &&
- (aac->supplement_adapter_info.supported_options2 &
- AAC_OPTION_IGNORE_RESET)))
- goto out;
- host = aac->scsi_host_ptr;
- if (aac->thread->pid != current->pid)
- spin_lock_irqsave(host->host_lock, flagv);
- bled = aac_check_reset != 1 ? 1 : 0;
- _aac_reset_adapter(aac, bled, IOP_HWSOFT_RESET);
- if (aac->thread->pid != current->pid)
- spin_unlock_irqrestore(host->host_lock, flagv);
- return BlinkLED;
-
out:
aac->in_reset = 0;
return BlinkLED;
@@ -2483,7 +2486,7 @@ int aac_command_thread(void *data)
if ((time_before(next_check_jiffies,next_jiffies))
&& ((difference = next_check_jiffies - jiffies) <= 0)) {
next_check_jiffies = next_jiffies;
- if (aac_check_health(dev) == 0) {
+ if (aac_adapter_check_health(dev) == 0) {
difference = ((long)(unsigned)check_interval)
* HZ;
next_check_jiffies = jiffies + difference;
@@ -2496,7 +2499,7 @@ int aac_command_thread(void *data)
int ret;
/* Don't even try to talk to adapter if its sick */
- ret = aac_check_health(dev);
+ ret = aac_adapter_check_health(dev);
if (ret || !dev->queues)
break;
next_check_jiffies = jiffies
@@ -2588,10 +2591,7 @@ void aac_free_irq(struct aac_dev *dev)
int cpu;
cpu = cpumask_first(cpu_online_mask);
- if (dev->pdev->device == PMC_DEVICE_S6 ||
- dev->pdev->device == PMC_DEVICE_S7 ||
- dev->pdev->device == PMC_DEVICE_S8 ||
- dev->pdev->device == PMC_DEVICE_S9) {
+ if (aac_is_src(dev)) {
if (dev->max_msix > 1) {
for (i = 0; i < dev->max_msix; i++)
free_irq(pci_irq_vector(dev->pdev, i),
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 372a07533026..0f277df73af0 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -405,17 +405,23 @@ static int aac_slave_configure(struct scsi_device *sdev)
int chn, tid;
unsigned int depth = 0;
unsigned int set_timeout = 0;
+ bool set_qd_dev_type = false;
+ u8 devtype = 0;
chn = aac_logical_to_phys(sdev_channel(sdev));
tid = sdev_id(sdev);
- if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS &&
- aac->hba_map[chn][tid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
- depth = aac->hba_map[chn][tid].qd_limit;
+ if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS && aac->sa_firmware) {
+ devtype = aac->hba_map[chn][tid].devtype;
+
+ if (devtype == AAC_DEVTYPE_NATIVE_RAW)
+ depth = aac->hba_map[chn][tid].qd_limit;
+ else if (devtype == AAC_DEVTYPE_ARC_RAW)
+ set_qd_dev_type = true;
+
set_timeout = 1;
goto common_config;
}
-
if (aac->jbod && (sdev->type == TYPE_DISK))
sdev->removable = 1;
@@ -466,9 +472,26 @@ static int aac_slave_configure(struct scsi_device *sdev)
++num_lsu;
depth = (host->can_queue - num_one) / num_lsu;
+
+ if (sdev_channel(sdev) != NATIVE_CHANNEL)
+ goto common_config;
+
+ set_qd_dev_type = true;
+
}
common_config:
+
+ /*
+ * Check if SATA drive
+ */
+ if (set_qd_dev_type) {
+ if (strncmp(sdev->vendor, "ATA", 3) == 0)
+ depth = 32;
+ else
+ depth = 64;
+ }
+
/*
* Firmware has an individual device recovery time typically
* of 35 seconds, give us a margin.
@@ -601,6 +624,56 @@ static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg)
return aac_do_ioctl(dev, cmd, arg);
}
+static int get_num_of_incomplete_fibs(struct aac_dev *aac)
+{
+
+ unsigned long flags;
+ struct scsi_device *sdev = NULL;
+ struct Scsi_Host *shost = aac->scsi_host_ptr;
+ struct scsi_cmnd *scmnd = NULL;
+ struct device *ctrl_dev;
+
+ int mlcnt = 0;
+ int llcnt = 0;
+ int ehcnt = 0;
+ int fwcnt = 0;
+ int krlcnt = 0;
+
+ __shost_for_each_device(sdev, shost) {
+ spin_lock_irqsave(&sdev->list_lock, flags);
+ list_for_each_entry(scmnd, &sdev->cmd_list, list) {
+ switch (scmnd->SCp.phase) {
+ case AAC_OWNER_FIRMWARE:
+ fwcnt++;
+ break;
+ case AAC_OWNER_ERROR_HANDLER:
+ ehcnt++;
+ break;
+ case AAC_OWNER_LOWLEVEL:
+ llcnt++;
+ break;
+ case AAC_OWNER_MIDLEVEL:
+ mlcnt++;
+ break;
+ default:
+ krlcnt++;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&sdev->list_lock, flags);
+ }
+
+ ctrl_dev = &aac->pdev->dev;
+
+ dev_info(ctrl_dev, "outstanding cmd: midlevel-%d\n", mlcnt);
+ dev_info(ctrl_dev, "outstanding cmd: lowlevel-%d\n", llcnt);
+ dev_info(ctrl_dev, "outstanding cmd: error handler-%d\n", ehcnt);
+ dev_info(ctrl_dev, "outstanding cmd: firmware-%d\n", fwcnt);
+ dev_info(ctrl_dev, "outstanding cmd: kernel-%d\n", krlcnt);
+
+ return mlcnt + llcnt + ehcnt + fwcnt;
+}
+
static int aac_eh_abort(struct scsi_cmnd* cmd)
{
struct scsi_device * dev = cmd->device;
@@ -661,8 +734,8 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
(fib_callback) aac_hba_callback,
(void *) cmd);
- /* Wait up to 2 minutes for completion */
- for (count = 0; count < 120; ++count) {
+ /* Wait up to 15 secs for completion */
+ for (count = 0; count < 15; ++count) {
if (cmd->SCp.sent_command) {
ret = SUCCESS;
break;
@@ -754,6 +827,12 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
int count;
u32 bus, cid;
int ret = FAILED;
+ int status = 0;
+ __le32 supported_options2 = 0;
+ bool is_mu_reset;
+ bool is_ignore_reset;
+ bool is_doorbell_reset;
+
bus = aac_logical_to_phys(scmd_channel(cmd));
cid = scmd_id(cmd);
@@ -817,8 +896,8 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
(fib_callback) aac_hba_callback,
(void *) cmd);
- /* Wait up to 2 minutes for completion */
- for (count = 0; count < 120; ++count) {
+ /* Wait up to 15 seconds for completion */
+ for (count = 0; count < 15; ++count) {
if (cmd->SCp.sent_command) {
ret = SUCCESS;
break;
@@ -826,12 +905,10 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
msleep(1000);
}
- if (ret != SUCCESS)
- pr_err("%s: Host adapter reset request timed out\n",
- AAC_DRIVERNAME);
+ if (ret == SUCCESS)
+ goto out;
+
} else {
- struct scsi_cmnd *command;
- unsigned long flags;
/* Mark the assoc. FIB to not complete, eh handler does this */
for (count = 0;
@@ -846,68 +923,42 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
}
}
+ }
- pr_err("%s: Host adapter reset request. SCSI hang ?\n",
- AAC_DRIVERNAME);
-
- count = aac_check_health(aac);
- if (count)
- return count;
- /*
- * Wait for all commands to complete to this specific
- * target (block maximum 60 seconds).
- */
- for (count = 60; count; --count) {
- int active = aac->in_reset;
-
- if (active == 0)
- __shost_for_each_device(dev, host) {
- spin_lock_irqsave(&dev->list_lock, flags);
- list_for_each_entry(command, &dev->cmd_list,
- list) {
- if ((command != cmd) &&
- (command->SCp.phase ==
- AAC_OWNER_FIRMWARE)) {
- active++;
- break;
- }
- }
- spin_unlock_irqrestore(&dev->list_lock, flags);
- if (active)
- break;
+ pr_err("%s: Host adapter reset request. SCSI hang ?\n", AAC_DRIVERNAME);
- }
- /*
- * We can exit If all the commands are complete
- */
- if (active == 0)
- return SUCCESS;
- ssleep(1);
- }
- pr_err("%s: SCSI bus appears hung\n", AAC_DRIVERNAME);
+ /*
+ * Check the health of the controller
+ */
+ status = aac_adapter_check_health(aac);
+ if (status)
+ dev_err(&aac->pdev->dev, "Adapter health - %d\n", status);
- /*
- * This adapter needs a blind reset, only do so for
- * Adapters that support a register, instead of a commanded,
- * reset.
- */
- if (((aac->supplement_adapter_info.supported_options2 &
- AAC_OPTION_MU_RESET) ||
- (aac->supplement_adapter_info.supported_options2 &
- AAC_OPTION_DOORBELL_RESET)) &&
- aac_check_reset &&
- ((aac_check_reset != 1) ||
- !(aac->supplement_adapter_info.supported_options2 &
- AAC_OPTION_IGNORE_RESET))) {
- /* Bypass wait for command quiesce */
- aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET);
- }
- ret = SUCCESS;
- }
+ count = get_num_of_incomplete_fibs(aac);
+ if (count == 0)
+ return SUCCESS;
+
+ /*
+ * Check if reset is supported by the firmware
+ */
+ supported_options2 = aac->supplement_adapter_info.supported_options2;
+ is_mu_reset = supported_options2 & AAC_OPTION_MU_RESET;
+ is_doorbell_reset = supported_options2 & AAC_OPTION_DOORBELL_RESET;
+ is_ignore_reset = supported_options2 & AAC_OPTION_IGNORE_RESET;
/*
- * Cause an immediate retry of the command with a ten second delay
- * after successful tur
+ * This adapter needs a blind reset, only do so for
+ * Adapters that support a register, instead of a commanded,
+ * reset.
*/
+ if ((is_mu_reset || is_doorbell_reset)
+ && aac_check_reset
+ && (aac_check_reset != -1 || !is_ignore_reset)) {
+ /* Bypass wait for command quiesce */
+ aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET);
+ }
+ ret = SUCCESS;
+
+out:
return ret;
}
@@ -1365,10 +1416,7 @@ static void __aac_shutdown(struct aac_dev * aac)
kthread_stop(aac->thread);
}
aac_adapter_disable_int(aac);
- if (aac->pdev->device == PMC_DEVICE_S6 ||
- aac->pdev->device == PMC_DEVICE_S7 ||
- aac->pdev->device == PMC_DEVICE_S8 ||
- aac->pdev->device == PMC_DEVICE_S9) {
+ if (aac_is_src(aac)) {
if (aac->max_msix > 1) {
for (i = 0; i < aac->max_msix; i++) {
free_irq(pci_irq_vector(aac->pdev, i),
@@ -1403,6 +1451,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
int error = -ENODEV;
int unique_id = 0;
u64 dmamask;
+ int mask_bits = 0;
extern int aac_sync_mode;
/*
@@ -1426,18 +1475,32 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto out;
error = -ENODEV;
+ if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) {
+ error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (error) {
+ dev_err(&pdev->dev, "PCI 32 BIT dma mask set failed");
+ goto out_disable_pdev;
+ }
+ }
+
/*
* If the quirk31 bit is set, the adapter needs adapter
* to driver communication memory to be allocated below 2gig
*/
- if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
+ if (aac_drivers[index].quirks & AAC_QUIRK_31BIT) {
dmamask = DMA_BIT_MASK(31);
- else
+ mask_bits = 31;
+ } else {
dmamask = DMA_BIT_MASK(32);
+ mask_bits = 32;
+ }
- if (pci_set_dma_mask(pdev, dmamask) ||
- pci_set_consistent_dma_mask(pdev, dmamask))
+ error = pci_set_consistent_dma_mask(pdev, dmamask);
+ if (error) {
+ dev_err(&pdev->dev, "PCI %d B consistent dma mask set failed\n"
+ , mask_bits);
goto out_disable_pdev;
+ }
pci_set_master(pdev);
@@ -1501,15 +1564,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_deinit;
}
- /*
- * If we had set a smaller DMA mask earlier, set it to 4gig
- * now since the adapter can dma data to at least a 4gig
- * address space.
- */
- if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
- goto out_deinit;
-
aac->maximum_num_channels = aac_drivers[index].channels;
error = aac_get_adapter_info(aac);
if (error < 0)
@@ -1627,9 +1681,7 @@ static int aac_acquire_resources(struct aac_dev *dev)
aac_adapter_enable_int(dev);
- if ((dev->pdev->device == PMC_DEVICE_S7 ||
- dev->pdev->device == PMC_DEVICE_S8 ||
- dev->pdev->device == PMC_DEVICE_S9))
+ if (aac_is_src(dev))
aac_define_int_mode(dev);
if (dev->msi_enabled)
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 7b0410e0f569..48c2b2b34b72 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -694,33 +694,52 @@ static void aac_dump_fw_fib_iop_reset(struct aac_dev *dev)
0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
}
-static void aac_send_iop_reset(struct aac_dev *dev, int bled)
+static bool aac_is_ctrl_up_and_running(struct aac_dev *dev)
{
- u32 var, reset_mask;
+ bool ctrl_up = true;
+ unsigned long status, start;
+ bool is_up = false;
- aac_dump_fw_fib_iop_reset(dev);
+ start = jiffies;
+ do {
+ schedule();
+ status = src_readl(dev, MUnit.OMR);
- bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
- 0, 0, 0, 0, 0, 0, &var,
- &reset_mask, NULL, NULL, NULL);
+ if (status == 0xffffffff)
+ status = 0;
- if ((bled || var != 0x00000001) && !dev->doorbell_mask)
- bled = -EINVAL;
- else if (dev->doorbell_mask) {
- reset_mask = dev->doorbell_mask;
- bled = 0;
- var = 0x00000001;
- }
+ if (status & KERNEL_BOOTING) {
+ start = jiffies;
+ continue;
+ }
+
+ if (time_after(jiffies, start+HZ*SOFT_RESET_TIME)) {
+ ctrl_up = false;
+ break;
+ }
+
+ is_up = status & KERNEL_UP_AND_RUNNING;
+
+ } while (!is_up);
+
+ return ctrl_up;
+}
+
+static void aac_notify_fw_of_iop_reset(struct aac_dev *dev)
+{
+ aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, NULL,
+ NULL, NULL, NULL, NULL);
+}
+
+static void aac_send_iop_reset(struct aac_dev *dev)
+{
+ aac_dump_fw_fib_iop_reset(dev);
+
+ aac_notify_fw_of_iop_reset(dev);
aac_set_intx_mode(dev);
- if (!bled && (dev->supplement_adapter_info.supported_options2 &
- AAC_OPTION_DOORBELL_RESET)) {
- src_writel(dev, MUnit.IDR, reset_mask);
- } else {
- src_writel(dev, MUnit.IDR, 0x100);
- }
- msleep(30000);
+ src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK);
}
static void aac_send_hardware_soft_reset(struct aac_dev *dev)
@@ -735,14 +754,14 @@ static void aac_send_hardware_soft_reset(struct aac_dev *dev)
static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
{
- unsigned long status, start;
+ bool is_ctrl_up;
+ int ret = 0;
if (bled < 0)
goto invalid_out;
if (bled)
- pr_err("%s%d: adapter kernel panic'd %x.\n",
- dev->name, dev->id, bled);
+ dev_err(&dev->pdev->dev, "adapter kernel panic'd %x.\n", bled);
/*
* When there is a BlinkLED, IOP_RESET has not effect
@@ -752,48 +771,55 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
- switch (reset_type) {
- case IOP_HWSOFT_RESET:
- aac_send_iop_reset(dev, bled);
+ dev_err(&dev->pdev->dev, "Controller reset type is %d\n", reset_type);
+
+ if (reset_type & HW_IOP_RESET) {
+ dev_info(&dev->pdev->dev, "Issuing IOP reset\n");
+ aac_send_iop_reset(dev);
+
/*
- * Check to see if KERNEL_UP_AND_RUNNING
- * Wait for the adapter to be up and running.
- * If !KERNEL_UP_AND_RUNNING issue HW Soft Reset
+ * Creates a delay or wait till up and running comes thru
*/
- status = src_readl(dev, MUnit.OMR);
- if (dev->sa_firmware
- && !(status & KERNEL_UP_AND_RUNNING)) {
- start = jiffies;
- do {
- status = src_readl(dev, MUnit.OMR);
- if (time_after(jiffies,
- start+HZ*SOFT_RESET_TIME)) {
- aac_send_hardware_soft_reset(dev);
- start = jiffies;
- }
- } while (!(status & KERNEL_UP_AND_RUNNING));
+ is_ctrl_up = aac_is_ctrl_up_and_running(dev);
+ if (!is_ctrl_up)
+ dev_err(&dev->pdev->dev, "IOP reset failed\n");
+ else {
+ dev_info(&dev->pdev->dev, "IOP reset succeded\n");
+ goto set_startup;
}
- break;
- case HW_SOFT_RESET:
- if (dev->sa_firmware) {
- aac_send_hardware_soft_reset(dev);
- aac_set_intx_mode(dev);
- }
- break;
- default:
- aac_send_iop_reset(dev, bled);
- break;
}
-invalid_out:
+ if (!dev->sa_firmware) {
+ dev_err(&dev->pdev->dev, "ARC Reset attempt failed\n");
+ ret = -ENODEV;
+ goto out;
+ }
- if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC)
- return -ENODEV;
+ if (reset_type & HW_SOFT_RESET) {
+ dev_info(&dev->pdev->dev, "Issuing SOFT reset\n");
+ aac_send_hardware_soft_reset(dev);
+ dev->msi_enabled = 0;
+ is_ctrl_up = aac_is_ctrl_up_and_running(dev);
+ if (!is_ctrl_up) {
+ dev_err(&dev->pdev->dev, "SOFT reset failed\n");
+ ret = -ENODEV;
+ goto out;
+ } else
+ dev_info(&dev->pdev->dev, "SOFT reset succeded\n");
+ }
+
+set_startup:
if (startup_timeout < 300)
startup_timeout = 300;
- return 0;
+out:
+ return ret;
+
+invalid_out:
+ if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC)
+ ret = -ENODEV;
+goto out;
}
/**
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index f792420c533e..a75feebe6ad6 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -776,7 +776,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
* from/to alternative Ram.
*/
if (ATARIHW_PRESENT(ST_SCSI) && !ATARIHW_PRESENT(EXTD_DMA) &&
- m68k_num_memory > 1) {
+ m68k_realnum_memory > 1) {
atari_dma_buffer = atari_stram_alloc(STRAM_BUFFER_SIZE, "SCSI");
if (!atari_dma_buffer) {
pr_err(PFX "can't allocate ST-RAM double buffer\n");
diff --git a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
index ac1c0b631aca..e4469df9c469 100644
--- a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
+++ b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
@@ -3,7 +3,8 @@
* session resources such as connection id and qp resources.
*
* Copyright (c) 2008-2013 Broadcom Corporation
- * Copyright (c) 2014-2015 QLogic Corporation
+ * Copyright (c) 2014-2016 QLogic Corporation
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 1f424e40afdf..7e007e142aab 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -1,7 +1,8 @@
/* bnx2fc.h: QLogic Linux FCoE offload driver.
*
* Copyright (c) 2008-2013 Broadcom Corporation
- * Copyright (c) 2014-2015 QLogic Corporation
+ * Copyright (c) 2014-2016 QLogic Corporation
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -65,7 +66,7 @@
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
-#define BNX2FC_VERSION "2.10.3"
+#define BNX2FC_VERSION "2.11.8"
#define PFX "bnx2fc: "
diff --git a/drivers/scsi/bnx2fc/bnx2fc_constants.h b/drivers/scsi/bnx2fc/bnx2fc_constants.h
index 5b20efb661a5..9ed150307a39 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_constants.h
+++ b/drivers/scsi/bnx2fc/bnx2fc_constants.h
@@ -3,7 +3,8 @@
* session resources such as connection id and qp resources.
*
* Copyright (c) 2008-2013 Broadcom Corporation
- * Copyright (c) 2014-2015 QLogic Corporation
+ * Copyright (c) 2014-2016 QLogic Corporation
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.c b/drivers/scsi/bnx2fc/bnx2fc_debug.c
index c9e0bc7fad3b..47ba3ba1e03b 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_debug.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_debug.c
@@ -3,7 +3,8 @@
* session resources such as connection id and qp resources.
*
* Copyright (c) 2008-2013 Broadcom Corporation
- * Copyright (c) 2014-2015 QLogic Corporation
+ * Copyright (c) 2014-2016 QLogic Corporation
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.h b/drivers/scsi/bnx2fc/bnx2fc_debug.h
index 34fda3e04d27..76717acee3ab 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_debug.h
+++ b/drivers/scsi/bnx2fc/bnx2fc_debug.h
@@ -3,7 +3,8 @@
* session resources such as connection id and qp resources.
*
* Copyright (c) 2008-2013 Broadcom Corporation
- * Copyright (c) 2014-2015 QLogic Corporation
+ * Copyright (c) 2014-2016 QLogic Corporation
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index 68ca518d34b0..76e65a32f38c 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -4,7 +4,8 @@
* and responses.
*
* Copyright (c) 2008-2013 Broadcom Corporation
- * Copyright (c) 2014-2015 QLogic Corporation
+ * Copyright (c) 2014-2016 QLogic Corporation
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -61,13 +62,20 @@ int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req)
struct fc_els_rrq rrq;
struct bnx2fc_rport *tgt = aborted_io_req->tgt;
- struct fc_lport *lport = tgt->rdata->local_port;
+ struct fc_lport *lport = NULL;
struct bnx2fc_els_cb_arg *cb_arg = NULL;
- u32 sid = tgt->sid;
- u32 r_a_tov = lport->r_a_tov;
+ u32 sid = 0;
+ u32 r_a_tov = 0;
unsigned long start = jiffies;
int rc;
+ if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))
+ return -EINVAL;
+
+ lport = tgt->rdata->local_port;
+ sid = tgt->sid;
+ r_a_tov = lport->r_a_tov;
+
BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n",
aborted_io_req->xid);
memset(&rrq, 0, sizeof(rrq));
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index b025ee5da1ba..7dfe709a7138 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -4,7 +4,8 @@
* FIP/FCoE packets, listen to link events etc.
*
* Copyright (c) 2008-2013 Broadcom Corporation
- * Copyright (c) 2014-2015 QLogic Corporation
+ * Copyright (c) 2014-2016 QLogic Corporation
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -522,10 +523,12 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
struct fcoe_crc_eof crc_eof;
struct fc_frame *fp;
struct fc_lport *vn_port;
- struct fcoe_port *port;
+ struct fcoe_port *port, *phys_port;
u8 *mac = NULL;
u8 *dest_mac = NULL;
struct fcoe_hdr *hp;
+ struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
fr = fcoe_dev_from_skb(skb);
lport = fr->fr_dev;
@@ -561,8 +564,19 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
return;
}
+ phys_port = lport_priv(lport);
+ interface = phys_port->priv;
+ ctlr = bnx2fc_to_ctlr(interface);
+
fh = fc_frame_header_get(fp);
+ if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) {
+ BNX2FC_HBA_DBG(lport, "FC frame d_id mismatch with MAC %pM.\n",
+ dest_mac);
+ kfree_skb(skb);
+ return;
+ }
+
vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
if (vn_port) {
port = lport_priv(vn_port);
@@ -572,6 +586,14 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
return;
}
}
+ if (ctlr->state) {
+ if (!ether_addr_equal(mac, ctlr->dest_addr)) {
+ BNX2FC_HBA_DBG(lport, "Wrong source address: mac:%pM dest_addr:%pM.\n",
+ mac, ctlr->dest_addr);
+ kfree_skb(skb);
+ return;
+ }
+ }
if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
fh->fh_type == FC_TYPE_FCP) {
/* Drop FCP data. We dont this in L2 path */
@@ -597,6 +619,18 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
return;
}
+ /*
+ * If the destination ID from the frame header does not match what we
+ * have on record for lport and the search for a NPIV port came up
+ * empty then this is not addressed to our port so simply drop it.
+ */
+ if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) {
+ BNX2FC_HBA_DBG(lport, "Dropping frame due to destination mismatch: lport->port_id=%x fh->d_id=%x.\n",
+ lport->port_id, ntoh24(fh->fh_d_id));
+ kfree_skb(skb);
+ return;
+ }
+
stats = per_cpu_ptr(lport->stats, smp_processor_id());
stats->RxFrames++;
stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
@@ -2105,6 +2139,9 @@ static uint bnx2fc_npiv_create_vports(struct fc_lport *lport,
{
struct fc_vport_identifiers vpid;
uint i, created = 0;
+ u64 wwnn = 0;
+ char wwpn_str[32];
+ char wwnn_str[32];
if (npiv_tbl->count > MAX_NPIV_ENTRIES) {
BNX2FC_HBA_DBG(lport, "Exceeded count max of npiv table\n");
@@ -2123,11 +2160,23 @@ static uint bnx2fc_npiv_create_vports(struct fc_lport *lport,
vpid.disable = false;
for (i = 0; i < npiv_tbl->count; i++) {
- vpid.node_name = wwn_to_u64(npiv_tbl->wwnn[i]);
+ wwnn = wwn_to_u64(npiv_tbl->wwnn[i]);
+ if (wwnn == 0) {
+ /*
+ * If we get a 0 element from for the WWNN then assume
+ * the WWNN should be the same as the physical port.
+ */
+ wwnn = lport->wwnn;
+ }
+ vpid.node_name = wwnn;
vpid.port_name = wwn_to_u64(npiv_tbl->wwpn[i]);
scnprintf(vpid.symbolic_name, sizeof(vpid.symbolic_name),
"NPIV[%u]:%016llx-%016llx",
created, vpid.port_name, vpid.node_name);
+ fcoe_wwn_to_str(vpid.node_name, wwnn_str, sizeof(wwnn_str));
+ fcoe_wwn_to_str(vpid.port_name, wwpn_str, sizeof(wwpn_str));
+ BNX2FC_HBA_DBG(lport, "Creating vport %s:%s.\n", wwnn_str,
+ wwpn_str);
if (fc_vport_create(lport->host, 0, &vpid))
created++;
else
@@ -2524,6 +2573,11 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
bnx2fc_hba_destroy(hba);
}
+static void bnx2fc_rport_terminate_io(struct fc_rport *rport)
+{
+ /* This is a no-op */
+}
+
/**
* bnx2fc_fcoe_reset - Resets the fcoe
*
@@ -2860,7 +2914,7 @@ static struct fc_function_template bnx2fc_transport_function = {
.issue_fc_host_lip = bnx2fc_fcoe_reset,
- .terminate_rport_io = fc_rport_terminate_io,
+ .terminate_rport_io = bnx2fc_rport_terminate_io,
.vport_create = bnx2fc_vport_create,
.vport_delete = bnx2fc_vport_destroy,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 5ff9f89c17c7..913c750205ce 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -3,7 +3,8 @@
* with 57712 FCoE firmware.
*
* Copyright (c) 2008-2013 Broadcom Corporation
- * Copyright (c) 2014-2015 QLogic Corporation
+ * Copyright (c) 2014-2016 QLogic Corporation
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 898461b146cc..5b6153f23f01 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -2,7 +2,8 @@
* IO manager and SCSI IO processing.
*
* Copyright (c) 2008-2013 Broadcom Corporation
- * Copyright (c) 2014-2015 QLogic Corporation
+ * Copyright (c) 2014-2016 QLogic Corporation
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -1166,16 +1167,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
"not on active_q\n", io_req->xid);
/*
- * This condition can happen only due to the FW bug,
- * where we do not receive cleanup response from
- * the FW. Handle this case gracefully by erroring
- * back the IO request to SCSI-ml
+ * The IO is still with the FW.
+ * Return failure and let SCSI-ml retry eh_abort.
*/
- bnx2fc_scsi_done(io_req, DID_ABORT);
-
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
spin_unlock_bh(&tgt->tgt_lock);
- return SUCCESS;
+ return FAILED;
}
/*
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 739bfb62aff6..59a2dfbcbc69 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -3,7 +3,8 @@
* session resources such as connection id and qp resources.
*
* Copyright (c) 2008-2013 Broadcom Corporation
- * Copyright (c) 2014-2015 QLogic Corporation
+ * Copyright (c) 2014-2016 QLogic Corporation
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index f32a66f89d25..03c104b47f31 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1909,7 +1909,8 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
bnx2i_ep_active_list_add(hba, bnx2i_ep);
- if (bnx2i_map_ep_dbell_regs(bnx2i_ep))
+ rc = bnx2i_map_ep_dbell_regs(bnx2i_ep);
+ if (rc)
goto del_active_ep;
mutex_unlock(&hba->net_dev_lock);
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index dab195f04da7..2029ad225121 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -794,18 +794,24 @@ csio_hw_dev_ready(struct csio_hw *hw)
{
uint32_t reg;
int cnt = 6;
+ int src_pf;
while (((reg = csio_rd_reg32(hw, PL_WHOAMI_A)) == 0xFFFFFFFF) &&
(--cnt != 0))
mdelay(100);
- if ((cnt == 0) && (((int32_t)(SOURCEPF_G(reg)) < 0) ||
- (SOURCEPF_G(reg) >= CSIO_MAX_PFN))) {
+ if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK))
+ src_pf = SOURCEPF_G(reg);
+ else
+ src_pf = T6_SOURCEPF_G(reg);
+
+ if ((cnt == 0) && (((int32_t)(src_pf) < 0) ||
+ (src_pf >= CSIO_MAX_PFN))) {
csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt);
return -EIO;
}
- hw->pfn = SOURCEPF_G(reg);
+ hw->pfn = src_pf;
return 0;
}
@@ -1581,10 +1587,16 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
unsigned int mtype = 0, maddr = 0;
uint32_t *cfg_data;
int value_to_add = 0;
+ const char *fw_cfg_file;
+
+ if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK))
+ fw_cfg_file = FW_CFG_NAME_T5;
+ else
+ fw_cfg_file = FW_CFG_NAME_T6;
- if (request_firmware(&cf, FW_CFG_NAME_T5, dev) < 0) {
+ if (request_firmware(&cf, fw_cfg_file, dev) < 0) {
csio_err(hw, "could not find config file %s, err: %d\n",
- FW_CFG_NAME_T5, ret);
+ fw_cfg_file, ret);
return -ENOENT;
}
@@ -1623,9 +1635,8 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word);
}
if (ret == 0) {
- csio_info(hw, "config file upgraded to %s\n",
- FW_CFG_NAME_T5);
- snprintf(path, 64, "%s%s", "/lib/firmware/", FW_CFG_NAME_T5);
+ csio_info(hw, "config file upgraded to %s\n", fw_cfg_file);
+ snprintf(path, 64, "%s%s", "/lib/firmware/", fw_cfg_file);
}
leave:
@@ -1886,6 +1897,19 @@ static struct fw_info fw_info_array[] = {
.intfver_iscsi = FW_INTFVER(T5, ISCSI),
.intfver_fcoe = FW_INTFVER(T5, FCOE),
},
+ }, {
+ .chip = CHELSIO_T6,
+ .fs_name = FW_CFG_NAME_T6,
+ .fw_mod_name = FW_FNAME_T6,
+ .fw_hdr = {
+ .chip = FW_HDR_CHIP_T6,
+ .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
+ .intfver_nic = FW_INTFVER(T6, NIC),
+ .intfver_vnic = FW_INTFVER(T6, VNIC),
+ .intfver_ri = FW_INTFVER(T6, RI),
+ .intfver_iscsi = FW_INTFVER(T6, ISCSI),
+ .intfver_fcoe = FW_INTFVER(T6, FCOE),
+ },
}
};
@@ -2002,6 +2026,7 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
struct device *dev = &pci_dev->dev ;
const u8 *fw_data = NULL;
unsigned int fw_size = 0;
+ const char *fw_bin_file;
/* This is the firmware whose headers the driver was compiled
* against
@@ -2014,9 +2039,14 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
return -EINVAL;
}
- if (request_firmware(&fw, FW_FNAME_T5, dev) < 0) {
+ if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK))
+ fw_bin_file = FW_FNAME_T5;
+ else
+ fw_bin_file = FW_FNAME_T6;
+
+ if (request_firmware(&fw, fw_bin_file, dev) < 0) {
csio_err(hw, "could not find firmware image %s, err: %d\n",
- FW_FNAME_T5, ret);
+ fw_bin_file, ret);
} else {
fw_data = fw->data;
fw_size = fw->size;
@@ -2038,6 +2068,17 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
return ret;
}
+static int csio_hw_check_fwver(struct csio_hw *hw)
+{
+ if (csio_is_t6(hw->pdev->device & CSIO_HW_CHIP_MASK) &&
+ (hw->fwrev < CSIO_MIN_T6_FW)) {
+ csio_hw_print_fw_version(hw, "T6 unsupported fw");
+ return -1;
+ }
+
+ return 0;
+}
+
/*
* csio_hw_configure - Configure HW
* @hw - HW module
@@ -2105,6 +2146,10 @@ csio_hw_configure(struct csio_hw *hw)
if (rv != 0)
goto out;
+ rv = csio_hw_check_fwver(hw);
+ if (rv < 0)
+ goto out;
+
/* If the firmware doesn't support Configuration Files,
* return an error.
*/
@@ -2132,6 +2177,10 @@ csio_hw_configure(struct csio_hw *hw)
}
} else {
+ rv = csio_hw_check_fwver(hw);
+ if (rv < 0)
+ goto out;
+
if (hw->fw_state == CSIO_DEV_STATE_INIT) {
hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
@@ -2241,9 +2290,14 @@ static void
csio_hw_intr_enable(struct csio_hw *hw)
{
uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw));
- uint32_t pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
+ u32 pf = 0;
uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE_A);
+ if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK))
+ pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
+ else
+ pf = T6_SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
+
/*
* Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up
* by FW, so do nothing for INTX.
@@ -2293,7 +2347,12 @@ csio_hw_intr_enable(struct csio_hw *hw)
void
csio_hw_intr_disable(struct csio_hw *hw)
{
- uint32_t pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
+ u32 pf = 0;
+
+ if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK))
+ pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
+ else
+ pf = T6_SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED))
return;
@@ -2918,6 +2977,8 @@ static void csio_cplsw_intr_handler(struct csio_hw *hw)
*/
static void csio_le_intr_handler(struct csio_hw *hw)
{
+ enum chip_type chip = CHELSIO_CHIP_VERSION(hw->chip_id);
+
static struct intr_info le_intr_info[] = {
{ LIPMISS_F, "LE LIP miss", -1, 0 },
{ LIP0_F, "LE 0 LIP error", -1, 0 },
@@ -2927,7 +2988,18 @@ static void csio_le_intr_handler(struct csio_hw *hw)
{ 0, NULL, 0, 0 }
};
- if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE_A, le_intr_info))
+ static struct intr_info t6_le_intr_info[] = {
+ { T6_LIPMISS_F, "LE LIP miss", -1, 0 },
+ { T6_LIP0_F, "LE 0 LIP error", -1, 0 },
+ { TCAMINTPERR_F, "LE parity error", -1, 1 },
+ { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
+ { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE_A,
+ (chip == CHELSIO_T5) ?
+ le_intr_info : t6_le_intr_info))
csio_hw_fatal_err(hw);
}
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h
index 62758e830d3b..9acb89538e29 100644
--- a/drivers/scsi/csiostor/csio_hw.h
+++ b/drivers/scsi/csiostor/csio_hw.h
@@ -71,6 +71,7 @@
#define CSIO_MAX_CMD_PER_LUN 32
#define CSIO_MAX_DDP_BUF_SIZE (1024 * 1024)
#define CSIO_MAX_SECTOR_SIZE 128
+#define CSIO_MIN_T6_FW 0x01102D00 /* FW 1.16.45.0 */
/* Interrupts */
#define CSIO_EXTRA_MSI_IQS 2 /* Extra iqs for INTX/MSI mode
diff --git a/drivers/scsi/csiostor/csio_hw_chip.h b/drivers/scsi/csiostor/csio_hw_chip.h
index b56a11d817be..aaabdbe11d88 100644
--- a/drivers/scsi/csiostor/csio_hw_chip.h
+++ b/drivers/scsi/csiostor/csio_hw_chip.h
@@ -39,11 +39,15 @@
/* Define MACRO values */
#define CSIO_HW_T5 0x5000
#define CSIO_T5_FCOE_ASIC 0x5600
+#define CSIO_HW_T6 0x6000
+#define CSIO_T6_FCOE_ASIC 0x6600
#define CSIO_HW_CHIP_MASK 0xF000
#define T5_REGMAP_SIZE (332 * 1024)
#define FW_FNAME_T5 "cxgb4/t5fw.bin"
#define FW_CFG_NAME_T5 "cxgb4/t5-config.txt"
+#define FW_FNAME_T6 "cxgb4/t6fw.bin"
+#define FW_CFG_NAME_T6 "cxgb4/t6-config.txt"
#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
#define CHELSIO_CHIP_FPGA 0x100
@@ -51,12 +55,17 @@
#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
#define CHELSIO_T5 0x5
+#define CHELSIO_T6 0x6
enum chip_type {
T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
T5_FIRST_REV = T5_A0,
T5_LAST_REV = T5_A1,
+
+ T6_A0 = CHELSIO_CHIP_CODE(CHELSIO_T6, 0),
+ T6_FIRST_REV = T6_A0,
+ T6_LAST_REV = T6_A0,
};
static inline int csio_is_t5(uint16_t chip)
@@ -64,6 +73,11 @@ static inline int csio_is_t5(uint16_t chip)
return (chip == CSIO_HW_T5);
}
+static inline int csio_is_t6(uint16_t chip)
+{
+ return (chip == CSIO_HW_T6);
+}
+
/* Define MACRO DEFINITIONS */
#define CSIO_DEVICE(devid, idx) \
{ PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }
diff --git a/drivers/scsi/csiostor/csio_hw_t5.c b/drivers/scsi/csiostor/csio_hw_t5.c
index 3267f4f627c9..f24def6c6fd1 100644
--- a/drivers/scsi/csiostor/csio_hw_t5.c
+++ b/drivers/scsi/csiostor/csio_hw_t5.c
@@ -71,27 +71,6 @@ csio_t5_set_mem_win(struct csio_hw *hw, uint32_t win)
static void
csio_t5_pcie_intr_handler(struct csio_hw *hw)
{
- static struct intr_info sysbus_intr_info[] = {
- { RNPP_F, "RXNP array parity error", -1, 1 },
- { RPCP_F, "RXPC array parity error", -1, 1 },
- { RCIP_F, "RXCIF array parity error", -1, 1 },
- { RCCP_F, "Rx completions control array parity error", -1, 1 },
- { RFTP_F, "RXFT array parity error", -1, 1 },
- { 0, NULL, 0, 0 }
- };
- static struct intr_info pcie_port_intr_info[] = {
- { TPCP_F, "TXPC array parity error", -1, 1 },
- { TNPP_F, "TXNP array parity error", -1, 1 },
- { TFTP_F, "TXFT array parity error", -1, 1 },
- { TCAP_F, "TXCA array parity error", -1, 1 },
- { TCIP_F, "TXCIF array parity error", -1, 1 },
- { RCAP_F, "RXCA array parity error", -1, 1 },
- { OTDD_F, "outbound request TLP discarded", -1, 1 },
- { RDPE_F, "Rx data parity error", -1, 1 },
- { TDUE_F, "Tx uncorrectable data error", -1, 1 },
- { 0, NULL, 0, 0 }
- };
-
static struct intr_info pcie_intr_info[] = {
{ MSTGRPPERR_F, "Master Response Read Queue parity error",
-1, 1 },
@@ -133,13 +112,7 @@ csio_t5_pcie_intr_handler(struct csio_hw *hw)
};
int fat;
- fat = csio_handle_intr_status(hw,
- PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
- sysbus_intr_info) +
- csio_handle_intr_status(hw,
- PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
- pcie_port_intr_info) +
- csio_handle_intr_status(hw, PCIE_INT_CAUSE_A, pcie_intr_info);
+ fat = csio_handle_intr_status(hw, PCIE_INT_CAUSE_A, pcie_intr_info);
if (fat)
csio_hw_fatal_err(hw);
}
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index dbe416ff46c2..ea0c31086cc6 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -952,8 +952,9 @@ static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
struct csio_hw *hw;
struct csio_lnode *ln;
- /* probe only T5 cards */
- if (!csio_is_t5((pdev->device & CSIO_HW_CHIP_MASK)))
+ /* probe only T5 and T6 cards */
+ if (!csio_is_t5((pdev->device & CSIO_HW_CHIP_MASK)) &&
+ !csio_is_t6((pdev->device & CSIO_HW_CHIP_MASK)))
return -ENODEV;
rv = csio_pci_init(pdev, &bars);
@@ -1253,3 +1254,4 @@ MODULE_LICENSE(CSIO_DRV_LICENSE);
MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
MODULE_VERSION(CSIO_DRV_VERSION);
MODULE_FIRMWARE(FW_FNAME_T5);
+MODULE_FIRMWARE(FW_FNAME_T6);
diff --git a/drivers/scsi/csiostor/csio_init.h b/drivers/scsi/csiostor/csio_init.h
index 5cc5d317a442..96b31e5af91e 100644
--- a/drivers/scsi/csiostor/csio_init.h
+++ b/drivers/scsi/csiostor/csio_init.h
@@ -50,7 +50,7 @@
#define CSIO_DRV_AUTHOR "Chelsio Communications"
#define CSIO_DRV_LICENSE "Dual BSD/GPL"
#define CSIO_DRV_DESC "Chelsio FCoE driver"
-#define CSIO_DRV_VERSION "1.0.0"
+#define CSIO_DRV_VERSION "1.0.0-ko"
extern struct fc_function_template csio_fc_transport_funcs;
extern struct fc_function_template csio_fc_transport_vport_funcs;
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index c00b2ff72b55..be5ee2d37815 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -238,14 +238,23 @@ csio_osname(uint8_t *buf, size_t buf_len)
}
static inline void
-csio_append_attrib(uint8_t **ptr, uint16_t type, uint8_t *val, uint16_t len)
+csio_append_attrib(uint8_t **ptr, uint16_t type, void *val, size_t val_len)
{
+ uint16_t len;
struct fc_fdmi_attr_entry *ae = (struct fc_fdmi_attr_entry *)*ptr;
+
+ if (WARN_ON(val_len > U16_MAX))
+ return;
+
+ len = val_len;
+
ae->type = htons(type);
len += 4; /* includes attribute type and length */
len = (len + 3) & ~3; /* should be multiple of 4 bytes */
ae->len = htons(len);
- memcpy(ae->value, val, len);
+ memcpy(ae->value, val, val_len);
+ if (len > val_len)
+ memset(ae->value + val_len, 0, len - val_len);
*ptr += len;
}
@@ -335,7 +344,7 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
numattrs++;
val = htonl(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED,
- (uint8_t *)&val,
+ &val,
FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN);
numattrs++;
@@ -346,23 +355,22 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
else
val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN);
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,
- (uint8_t *)&val,
- FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
+ &val, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
numattrs++;
mfs = ln->ln_sparm.csp.sp_bb_data;
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_MAXFRAMESIZE,
- (uint8_t *)&mfs, FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN);
+ &mfs, sizeof(mfs));
numattrs++;
strcpy(buf, "csiostor");
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_OSDEVICENAME, buf,
- (uint16_t)strlen(buf));
+ strlen(buf));
numattrs++;
if (!csio_hostname(buf, sizeof(buf))) {
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_HOSTNAME,
- buf, (uint16_t)strlen(buf));
+ buf, strlen(buf));
numattrs++;
}
attrib_blk->numattrs = htonl(numattrs);
@@ -444,33 +452,32 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
strcpy(buf, "Chelsio Communications");
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MANUFACTURER, buf,
- (uint16_t)strlen(buf));
+ strlen(buf));
numattrs++;
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_SERIALNUMBER,
- hw->vpd.sn, (uint16_t)sizeof(hw->vpd.sn));
+ hw->vpd.sn, sizeof(hw->vpd.sn));
numattrs++;
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODEL, hw->vpd.id,
- (uint16_t)sizeof(hw->vpd.id));
+ sizeof(hw->vpd.id));
numattrs++;
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODELDESCRIPTION,
- hw->model_desc, (uint16_t)strlen(hw->model_desc));
+ hw->model_desc, strlen(hw->model_desc));
numattrs++;
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_HARDWAREVERSION,
- hw->hw_ver, (uint16_t)sizeof(hw->hw_ver));
+ hw->hw_ver, sizeof(hw->hw_ver));
numattrs++;
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_FIRMWAREVERSION,
- hw->fwrev_str, (uint16_t)strlen(hw->fwrev_str));
+ hw->fwrev_str, strlen(hw->fwrev_str));
numattrs++;
if (!csio_osname(buf, sizeof(buf))) {
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_OSNAMEVERSION,
- buf, (uint16_t)strlen(buf));
+ buf, strlen(buf));
numattrs++;
}
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD,
- (uint8_t *)&maxpayload,
- FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);
+ &maxpayload, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);
len = (uint32_t)(pld - (uint8_t *)cmd);
numattrs++;
attrib_blk->numattrs = htonl(numattrs);
@@ -1794,6 +1801,8 @@ csio_ln_mgmt_submit_req(struct csio_ioreq *io_req,
struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
int rv;
+ BUG_ON(pld_len > pld->len);
+
io_req->io_cbfn = io_cbfn; /* Upper layer callback handler */
io_req->fw_handle = (uintptr_t) (io_req);
io_req->eq_idx = mgmtm->eq_idx;
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index e8f18174f2e9..c0a17789752f 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -480,12 +480,14 @@ csio_wr_iq_create(struct csio_hw *hw, void *priv, int iq_idx,
flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
if (flq_idx != -1) {
+ enum chip_type chip = CHELSIO_CHIP_VERSION(hw->chip_id);
struct csio_q *flq = hw->wrm.q_arr[flq_idx];
iqp.fl0paden = 1;
iqp.fl0packen = flq->un.fl.packen ? 1 : 0;
iqp.fl0fbmin = X_FETCHBURSTMIN_64B;
- iqp.fl0fbmax = X_FETCHBURSTMAX_512B;
+ iqp.fl0fbmax = ((chip == CHELSIO_T5) ?
+ X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B);
iqp.fl0size = csio_q_size(hw, flq_idx) / CSIO_QCREDIT_SZ;
iqp.fl0addr = csio_q_pstart(hw, flq_idx);
}
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 5485d68f286a..a69a9ac836f5 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1608,6 +1608,7 @@ static int init_act_open(struct cxgbi_sock *csk)
struct neighbour *n = NULL;
void *daddr;
unsigned int step;
+ unsigned int rxq_idx;
unsigned int size, size6;
unsigned int linkspeed;
unsigned int rcv_winf, snd_winf;
@@ -1686,7 +1687,9 @@ static int init_act_open(struct cxgbi_sock *csk)
step = lldi->ntxq / lldi->nchan;
csk->txq_idx = cxgb4_port_idx(ndev) * step;
step = lldi->nrxq / lldi->nchan;
- csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step];
+ rxq_idx = (cxgb4_port_idx(ndev) * step) + (cdev->rxq_idx_cntr % step);
+ cdev->rxq_idx_cntr++;
+ csk->rss_qid = lldi->rxq_ids[rxq_idx];
linkspeed = ((struct port_info *)netdev_priv(ndev))->link_cfg.speed;
csk->snd_win = cxgb4i_snd_win;
csk->rcv_win = cxgb4i_rcv_win;
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 37f07aaab1e4..31a5816c2e8d 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -477,6 +477,7 @@ struct cxgbi_device {
unsigned int skb_rx_extra; /* for msg coalesced mode */
unsigned int tx_max_size;
unsigned int rx_max_size;
+ unsigned int rxq_idx_cntr;
struct cxgbi_ports_map pmap;
void (*dev_ddp_cleanup)(struct cxgbi_device *);
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 256af819377d..6d95e8e147e0 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -15,6 +15,8 @@
#ifndef _CXLFLASH_COMMON_H
#define _CXLFLASH_COMMON_H
+#include <linux/async.h>
+#include <linux/cdev.h>
#include <linux/irq_poll.h>
#include <linux/list.h>
#include <linux/rwsem.h>
@@ -85,7 +87,8 @@ enum cxlflash_init_state {
INIT_STATE_NONE,
INIT_STATE_PCI,
INIT_STATE_AFU,
- INIT_STATE_SCSI
+ INIT_STATE_SCSI,
+ INIT_STATE_CDEV
};
enum cxlflash_state {
@@ -115,6 +118,8 @@ struct cxlflash_cfg {
struct pci_device_id *dev_id;
struct Scsi_Host *host;
int num_fc_ports;
+ struct cdev cdev;
+ struct device *chardev;
ulong cxlflash_regs_pci;
@@ -142,8 +147,10 @@ struct cxlflash_cfg {
wait_queue_head_t tmf_waitq;
spinlock_t tmf_slock;
bool tmf_active;
+ bool ws_unmap; /* Write-same unmap supported */
wait_queue_head_t reset_waitq;
enum cxlflash_state state;
+ async_cookie_t async_reset_cookie;
};
struct afu_cmd {
@@ -155,7 +162,10 @@ struct afu_cmd {
struct list_head queue;
u32 hwq_index;
- u8 cmd_tmf:1;
+ u8 cmd_tmf:1,
+ cmd_aborted:1;
+
+ struct list_head list; /* Pending commands link */
/* As per the SISLITE spec the IOARCB EA has to be 16-byte aligned.
* However for performance reasons the IOARCB/IOASA should be
@@ -168,12 +178,20 @@ static inline struct afu_cmd *sc_to_afuc(struct scsi_cmnd *sc)
return PTR_ALIGN(scsi_cmd_priv(sc), __alignof__(struct afu_cmd));
}
+static inline struct afu_cmd *sc_to_afuci(struct scsi_cmnd *sc)
+{
+ struct afu_cmd *afuc = sc_to_afuc(sc);
+
+ INIT_LIST_HEAD(&afuc->queue);
+ return afuc;
+}
+
static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc)
{
struct afu_cmd *afuc = sc_to_afuc(sc);
memset(afuc, 0, sizeof(*afuc));
- return afuc;
+ return sc_to_afuci(sc);
}
struct hwq {
@@ -191,9 +209,10 @@ struct hwq {
struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
ctx_hndl_t ctx_hndl; /* master's context handle */
u32 index; /* Index of this hwq */
+ struct list_head pending_cmds; /* Commands pending completion */
atomic_t hsq_credits;
- spinlock_t hsq_slock;
+ spinlock_t hsq_slock; /* Hardware send queue lock */
struct sisl_ioarcb *hsq_start;
struct sisl_ioarcb *hsq_end;
struct sisl_ioarcb *hsq_curr;
@@ -204,7 +223,6 @@ struct hwq {
bool toggle;
s64 room;
- spinlock_t rrin_slock; /* Lock to rrin queuing and cmd_room updates */
struct irq_poll irqpoll;
} __aligned(cache_line_size());
@@ -212,7 +230,7 @@ struct hwq {
struct afu {
struct hwq hwqs[CXLFLASH_MAX_HWQS];
int (*send_cmd)(struct afu *, struct afu_cmd *);
- void (*context_reset)(struct afu_cmd *);
+ int (*context_reset)(struct hwq *);
/* AFU HW */
struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */
@@ -245,21 +263,31 @@ static inline bool afu_is_irqpoll_enabled(struct afu *afu)
return !!afu->irqpoll_weight;
}
-static inline bool afu_is_cmd_mode(struct afu *afu, u64 cmd_mode)
+static inline bool afu_has_cap(struct afu *afu, u64 cap)
{
u64 afu_cap = afu->interface_version >> SISL_INTVER_CAP_SHIFT;
- return afu_cap & cmd_mode;
+ return afu_cap & cap;
+}
+
+static inline bool afu_is_afu_debug(struct afu *afu)
+{
+ return afu_has_cap(afu, SISL_INTVER_CAP_AFU_DEBUG);
+}
+
+static inline bool afu_is_lun_provision(struct afu *afu)
+{
+ return afu_has_cap(afu, SISL_INTVER_CAP_LUN_PROVISION);
}
static inline bool afu_is_sq_cmd_mode(struct afu *afu)
{
- return afu_is_cmd_mode(afu, SISL_INTVER_CAP_SQ_CMD_MODE);
+ return afu_has_cap(afu, SISL_INTVER_CAP_SQ_CMD_MODE);
}
static inline bool afu_is_ioarrin_cmd_mode(struct afu *afu)
{
- return afu_is_cmd_mode(afu, SISL_INTVER_CAP_IOARRIN_CMD_MODE);
+ return afu_has_cap(afu, SISL_INTVER_CAP_IOARRIN_CMD_MODE);
}
static inline u64 lun_to_lunid(u64 lun)
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index a7d57c343492..077f62e208aa 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -34,6 +34,10 @@ MODULE_AUTHOR("Manoj N. Kumar <[email protected]>");
MODULE_AUTHOR("Matthew R. Ochs <[email protected]>");
MODULE_LICENSE("GPL");
+static struct class *cxlflash_class;
+static u32 cxlflash_major;
+static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
+
/**
* process_cmd_err() - command error handler
* @cmd: AFU command that experienced the error.
@@ -151,9 +155,10 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
* cmd_complete() - command completion handler
* @cmd: AFU command that has completed.
*
- * Prepares and submits command that has either completed or timed out to
- * the SCSI stack. Checks AFU command back into command pool for non-internal
- * (cmd->scp populated) commands.
+ * For SCSI commands this routine prepares and submits commands that have
+ * either completed or timed out to the SCSI stack. For internal commands
+ * (TMF or AFU), this routine simply notifies the originator that the
+ * command has completed.
*/
static void cmd_complete(struct afu_cmd *cmd)
{
@@ -162,7 +167,11 @@ static void cmd_complete(struct afu_cmd *cmd)
struct afu *afu = cmd->parent;
struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev;
- bool cmd_is_tmf;
+ struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
+
+ spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
+ list_del(&cmd->list);
+ spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
if (cmd->scp) {
scp = cmd->scp;
@@ -171,73 +180,124 @@ static void cmd_complete(struct afu_cmd *cmd)
else
scp->result = (DID_OK << 16);
- cmd_is_tmf = cmd->cmd_tmf;
-
dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
__func__, scp, scp->result, cmd->sa.ioasc);
-
scp->scsi_done(scp);
-
- if (cmd_is_tmf) {
- spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
- cfg->tmf_active = false;
- wake_up_all_locked(&cfg->tmf_waitq);
- spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
- }
+ } else if (cmd->cmd_tmf) {
+ spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
+ cfg->tmf_active = false;
+ wake_up_all_locked(&cfg->tmf_waitq);
+ spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
} else
complete(&cmd->cevent);
}
/**
- * context_reset() - reset command owner context via specified register
- * @cmd: AFU command that timed out.
+ * flush_pending_cmds() - flush all pending commands on this hardware queue
+ * @hwq: Hardware queue to flush.
+ *
+ * The hardware send queue lock associated with this hardware queue must be
+ * held when calling this routine.
+ */
+static void flush_pending_cmds(struct hwq *hwq)
+{
+ struct cxlflash_cfg *cfg = hwq->afu->parent;
+ struct afu_cmd *cmd, *tmp;
+ struct scsi_cmnd *scp;
+ ulong lock_flags;
+
+ list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) {
+ /* Bypass command when on a doneq, cmd_complete() will handle */
+ if (!list_empty(&cmd->queue))
+ continue;
+
+ list_del(&cmd->list);
+
+ if (cmd->scp) {
+ scp = cmd->scp;
+ scp->result = (DID_IMM_RETRY << 16);
+ scp->scsi_done(scp);
+ } else {
+ cmd->cmd_aborted = true;
+
+ if (cmd->cmd_tmf) {
+ spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
+ cfg->tmf_active = false;
+ wake_up_all_locked(&cfg->tmf_waitq);
+ spin_unlock_irqrestore(&cfg->tmf_slock,
+ lock_flags);
+ } else
+ complete(&cmd->cevent);
+ }
+ }
+}
+
+/**
+ * context_reset() - reset context via specified register
+ * @hwq: Hardware queue owning the context to be reset.
* @reset_reg: MMIO register to perform reset.
+ *
+ * When the reset is successful, the SISLite specification guarantees that
+ * the AFU has aborted all currently pending I/O. Accordingly, these commands
+ * must be flushed.
+ *
+ * Return: 0 on success, -errno on failure
*/
-static void context_reset(struct afu_cmd *cmd, __be64 __iomem *reset_reg)
+static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg)
{
- int nretry = 0;
- u64 rrin = 0x1;
- struct afu *afu = cmd->parent;
- struct cxlflash_cfg *cfg = afu->parent;
+ struct cxlflash_cfg *cfg = hwq->afu->parent;
struct device *dev = &cfg->dev->dev;
+ int rc = -ETIMEDOUT;
+ int nretry = 0;
+ u64 val = 0x1;
+ ulong lock_flags;
- dev_dbg(dev, "%s: cmd=%p\n", __func__, cmd);
+ dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq);
- writeq_be(rrin, reset_reg);
+ spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
+
+ writeq_be(val, reset_reg);
do {
- rrin = readq_be(reset_reg);
- if (rrin != 0x1)
+ val = readq_be(reset_reg);
+ if ((val & 0x1) == 0x0) {
+ rc = 0;
break;
+ }
+
/* Double delay each time */
udelay(1 << nretry);
} while (nretry++ < MC_ROOM_RETRY_CNT);
- dev_dbg(dev, "%s: returning rrin=%016llx nretry=%d\n",
- __func__, rrin, nretry);
+ if (!rc)
+ flush_pending_cmds(hwq);
+
+ spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
+
+ dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n",
+ __func__, rc, val, nretry);
+ return rc;
}
/**
- * context_reset_ioarrin() - reset command owner context via IOARRIN register
- * @cmd: AFU command that timed out.
+ * context_reset_ioarrin() - reset context via IOARRIN register
+ * @hwq: Hardware queue owning the context to be reset.
+ *
+ * Return: 0 on success, -errno on failure
*/
-static void context_reset_ioarrin(struct afu_cmd *cmd)
+static int context_reset_ioarrin(struct hwq *hwq)
{
- struct afu *afu = cmd->parent;
- struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
-
- context_reset(cmd, &hwq->host_map->ioarrin);
+ return context_reset(hwq, &hwq->host_map->ioarrin);
}
/**
- * context_reset_sq() - reset command owner context w/ SQ Context Reset register
- * @cmd: AFU command that timed out.
+ * context_reset_sq() - reset context via SQ_CONTEXT_RESET register
+ * @hwq: Hardware queue owning the context to be reset.
+ *
+ * Return: 0 on success, -errno on failure
*/
-static void context_reset_sq(struct afu_cmd *cmd)
+static int context_reset_sq(struct hwq *hwq)
{
- struct afu *afu = cmd->parent;
- struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
-
- context_reset(cmd, &hwq->host_map->sq_ctx_reset);
+ return context_reset(hwq, &hwq->host_map->sq_ctx_reset);
}
/**
@@ -261,7 +321,7 @@ static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
* To avoid the performance penalty of MMIO, spread the update of
* 'room' over multiple commands.
*/
- spin_lock_irqsave(&hwq->rrin_slock, lock_flags);
+ spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
if (--hwq->room < 0) {
room = readq_be(&hwq->host_map->cmd_room);
if (room <= 0) {
@@ -275,9 +335,10 @@ static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
hwq->room = room - 1;
}
+ list_add(&cmd->list, &hwq->pending_cmds);
writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
out:
- spin_unlock_irqrestore(&hwq->rrin_slock, lock_flags);
+ spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__,
cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
return rc;
@@ -315,6 +376,8 @@ static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
hwq->hsq_curr++;
else
hwq->hsq_curr = hwq->hsq_start;
+
+ list_add(&cmd->list, &hwq->pending_cmds);
writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail);
spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
@@ -332,8 +395,7 @@ out:
* @afu: AFU associated with the host.
* @cmd: AFU command that was sent.
*
- * Return:
- * 0 on success, -1 on timeout/error
+ * Return: 0 on success, -errno on failure
*/
static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
{
@@ -343,15 +405,16 @@ static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
- if (!timeout) {
- afu->context_reset(cmd);
- rc = -1;
- }
+ if (!timeout)
+ rc = -ETIMEDOUT;
+
+ if (cmd->cmd_aborted)
+ rc = -EAGAIN;
if (unlikely(cmd->sa.ioasc != 0)) {
dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
__func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
- rc = -1;
+ rc = -EIO;
}
return rc;
@@ -396,25 +459,35 @@ static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
/**
* send_tmf() - sends a Task Management Function (TMF)
- * @afu: AFU to checkout from.
- * @scp: SCSI command from stack.
+ * @cfg: Internal structure associated with the host.
+ * @sdev: SCSI device destined for TMF.
* @tmfcmd: TMF command to send.
*
* Return:
- * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
+ * 0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure
*/
-static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
+static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
+ u64 tmfcmd)
{
- struct Scsi_Host *host = scp->device->host;
- struct cxlflash_cfg *cfg = shost_priv(host);
- struct afu_cmd *cmd = sc_to_afucz(scp);
+ struct afu *afu = cfg->afu;
+ struct afu_cmd *cmd = NULL;
struct device *dev = &cfg->dev->dev;
- int hwq_index = cmd_to_target_hwq(host, scp, afu);
- struct hwq *hwq = get_hwq(afu, hwq_index);
+ struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
+ char *buf = NULL;
ulong lock_flags;
int rc = 0;
ulong to;
+ buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
+ if (unlikely(!buf)) {
+ dev_err(dev, "%s: no memory for command\n", __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
+ INIT_LIST_HEAD(&cmd->queue);
+
/* When Task Management Function is active do not send another */
spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
if (cfg->tmf_active)
@@ -424,15 +497,14 @@ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
cfg->tmf_active = true;
spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
- cmd->scp = scp;
cmd->parent = afu;
cmd->cmd_tmf = true;
- cmd->hwq_index = hwq_index;
+ cmd->hwq_index = hwq->index;
cmd->rcb.ctx_id = hwq->ctx_hndl;
cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
- cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
- cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
+ cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel);
+ cmd->rcb.lun_id = lun_to_lunid(sdev->lun);
cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
SISL_REQ_FLAGS_SUP_UNDERRUN |
SISL_REQ_FLAGS_TMF_CMD);
@@ -453,12 +525,20 @@ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
cfg->tmf_slock,
to);
if (!to) {
- cfg->tmf_active = false;
dev_err(dev, "%s: TMF timed out\n", __func__);
- rc = -1;
- }
+ rc = -ETIMEDOUT;
+ } else if (cmd->cmd_aborted) {
+ dev_err(dev, "%s: TMF aborted\n", __func__);
+ rc = -EAGAIN;
+ } else if (cmd->sa.ioasc) {
+ dev_err(dev, "%s: TMF failed ioasc=%08x\n",
+ __func__, cmd->sa.ioasc);
+ rc = -EIO;
+ }
+ cfg->tmf_active = false;
spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
out:
+ kfree(buf);
return rc;
}
@@ -485,7 +565,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
struct cxlflash_cfg *cfg = shost_priv(host);
struct afu *afu = cfg->afu;
struct device *dev = &cfg->dev->dev;
- struct afu_cmd *cmd = sc_to_afucz(scp);
+ struct afu_cmd *cmd = sc_to_afuci(scp);
struct scatterlist *sg = scsi_sglist(scp);
int hwq_index = cmd_to_target_hwq(host, scp, afu);
struct hwq *hwq = get_hwq(afu, hwq_index);
@@ -585,6 +665,20 @@ static void free_mem(struct cxlflash_cfg *cfg)
}
/**
+ * cxlflash_reset_sync() - synchronizing point for asynchronous resets
+ * @cfg: Internal structure associated with the host.
+ */
+static void cxlflash_reset_sync(struct cxlflash_cfg *cfg)
+{
+ if (cfg->async_reset_cookie == 0)
+ return;
+
+ /* Wait until all async calls prior to this cookie have completed */
+ async_synchronize_cookie(cfg->async_reset_cookie + 1);
+ cfg->async_reset_cookie = 0;
+}
+
+/**
* stop_afu() - stops the AFU command timers and unmaps the MMIO space
* @cfg: Internal structure associated with the host.
*
@@ -600,6 +694,8 @@ static void stop_afu(struct cxlflash_cfg *cfg)
int i;
cancel_work_sync(&cfg->work_q);
+ if (!current_is_async())
+ cxlflash_reset_sync(cfg);
if (likely(afu)) {
while (atomic_read(&afu->cmds_active))
@@ -677,6 +773,7 @@ static void term_mc(struct cxlflash_cfg *cfg, u32 index)
struct afu *afu = cfg->afu;
struct device *dev = &cfg->dev->dev;
struct hwq *hwq;
+ ulong lock_flags;
if (!afu) {
dev_err(dev, "%s: returning with NULL afu\n", __func__);
@@ -694,6 +791,10 @@ static void term_mc(struct cxlflash_cfg *cfg, u32 index)
if (index != PRIMARY_HWQ)
WARN_ON(cxl_release_context(hwq->ctx));
hwq->ctx = NULL;
+
+ spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
+ flush_pending_cmds(hwq);
+ spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
}
/**
@@ -788,6 +889,46 @@ static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
}
/**
+ * cxlflash_get_minor() - gets the first available minor number
+ *
+ * Return: Unique minor number that can be used to create the character device.
+ */
+static int cxlflash_get_minor(void)
+{
+ int minor;
+ long bit;
+
+ bit = find_first_zero_bit(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
+ if (bit >= CXLFLASH_MAX_ADAPTERS)
+ return -1;
+
+ minor = bit & MINORMASK;
+ set_bit(minor, cxlflash_minor);
+ return minor;
+}
+
+/**
+ * cxlflash_put_minor() - releases the minor number
+ * @minor: Minor number that is no longer needed.
+ */
+static void cxlflash_put_minor(int minor)
+{
+ clear_bit(minor, cxlflash_minor);
+}
+
+/**
+ * cxlflash_release_chrdev() - release the character device for the host
+ * @cfg: Internal structure associated with the host.
+ */
+static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg)
+{
+ device_unregister(cfg->chardev);
+ cfg->chardev = NULL;
+ cdev_del(&cfg->cdev);
+ cxlflash_put_minor(MINOR(cfg->cdev.dev));
+}
+
+/**
* cxlflash_remove() - PCI entry point to tear down host
* @pdev: PCI device associated with the host.
*
@@ -822,6 +963,8 @@ static void cxlflash_remove(struct pci_dev *pdev)
cxlflash_stop_term_user_contexts(cfg);
switch (cfg->init_state) {
+ case INIT_STATE_CDEV:
+ cxlflash_release_chrdev(cfg);
case INIT_STATE_SCSI:
cxlflash_term_local_luns(cfg);
scsi_remove_host(cfg->host);
@@ -1690,6 +1833,18 @@ static int init_global(struct cxlflash_cfg *cfg)
SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
&hwq->ctrl_map->ctx_cap);
}
+
+ /*
+ * Determine write-same unmap support for host by evaluating the unmap
+ * sector support bit of the context control register associated with
+ * the primary hardware queue. Note that while this status is reflected
+ * in a context register, the outcome can be assumed to be host-wide.
+ */
+ hwq = get_hwq(afu, PRIMARY_HWQ);
+ reg = readq_be(&hwq->host_map->ctx_ctrl);
+ if (reg & SISL_CTX_CTRL_UNMAP_SECTOR)
+ cfg->ws_unmap = true;
+
/* Initialize heartbeat */
afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
out:
@@ -1722,7 +1877,10 @@ static int start_afu(struct cxlflash_cfg *cfg)
hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
hwq->hrrq_curr = hwq->hrrq_start;
hwq->toggle = 1;
+
+ /* Initialize spin locks */
spin_lock_init(&hwq->hrrq_slock);
+ spin_lock_init(&hwq->hsq_slock);
/* Initialize SQ */
if (afu_is_sq_cmd_mode(afu)) {
@@ -1731,7 +1889,6 @@ static int start_afu(struct cxlflash_cfg *cfg)
hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
hwq->hsq_curr = hwq->hsq_start;
- spin_lock_init(&hwq->hsq_slock);
atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
}
@@ -1821,6 +1978,7 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
hwq->afu = cfg->afu;
hwq->index = index;
+ INIT_LIST_HEAD(&hwq->pending_cmds);
if (index == PRIMARY_HWQ)
ctx = cxl_get_context(cfg->dev);
@@ -1984,7 +2142,6 @@ static int init_afu(struct cxlflash_cfg *cfg)
for (i = 0; i < afu->num_hwqs; i++) {
hwq = get_hwq(afu, i);
- spin_lock_init(&hwq->rrin_slock);
hwq->room = readq_be(&hwq->host_map->cmd_room);
}
@@ -2003,29 +2160,107 @@ err1:
}
/**
- * cxlflash_afu_sync() - builds and sends an AFU sync command
+ * afu_reset() - resets the AFU
+ * @cfg: Internal structure associated with the host.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int afu_reset(struct cxlflash_cfg *cfg)
+{
+ struct device *dev = &cfg->dev->dev;
+ int rc = 0;
+
+ /* Stop the context before the reset. Since the context is
+ * no longer available restart it after the reset is complete
+ */
+ term_afu(cfg);
+
+ rc = init_afu(cfg);
+
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/**
+ * drain_ioctls() - wait until all currently executing ioctls have completed
+ * @cfg: Internal structure associated with the host.
+ *
+ * Obtain write access to read/write semaphore that wraps ioctl
+ * handling to 'drain' ioctls currently executing.
+ */
+static void drain_ioctls(struct cxlflash_cfg *cfg)
+{
+ down_write(&cfg->ioctl_rwsem);
+ up_write(&cfg->ioctl_rwsem);
+}
+
+/**
+ * cxlflash_async_reset_host() - asynchronous host reset handler
+ * @data: Private data provided while scheduling reset.
+ * @cookie: Cookie that can be used for checkpointing.
+ */
+static void cxlflash_async_reset_host(void *data, async_cookie_t cookie)
+{
+ struct cxlflash_cfg *cfg = data;
+ struct device *dev = &cfg->dev->dev;
+ int rc = 0;
+
+ if (cfg->state != STATE_RESET) {
+ dev_dbg(dev, "%s: Not performing a reset, state=%d\n",
+ __func__, cfg->state);
+ goto out;
+ }
+
+ drain_ioctls(cfg);
+ cxlflash_mark_contexts_error(cfg);
+ rc = afu_reset(cfg);
+ if (rc)
+ cfg->state = STATE_FAILTERM;
+ else
+ cfg->state = STATE_NORMAL;
+ wake_up_all(&cfg->reset_waitq);
+
+out:
+ scsi_unblock_requests(cfg->host);
+}
+
+/**
+ * cxlflash_schedule_async_reset() - schedule an asynchronous host reset
+ * @cfg: Internal structure associated with the host.
+ */
+static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg)
+{
+ struct device *dev = &cfg->dev->dev;
+
+ if (cfg->state != STATE_NORMAL) {
+ dev_dbg(dev, "%s: Not performing reset state=%d\n",
+ __func__, cfg->state);
+ return;
+ }
+
+ cfg->state = STATE_RESET;
+ scsi_block_requests(cfg->host);
+ cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host,
+ cfg);
+}
+
+/**
+ * send_afu_cmd() - builds and sends an internal AFU command
* @afu: AFU associated with the host.
- * @ctx_hndl_u: Identifies context requesting sync.
- * @res_hndl_u: Identifies resource requesting sync.
- * @mode: Type of sync to issue (lightweight, heavyweight, global).
+ * @rcb: Pre-populated IOARCB describing command to send.
*
- * The AFU can only take 1 sync command at a time. This routine enforces this
- * limitation by using a mutex to provide exclusive access to the AFU during
- * the sync. This design point requires calling threads to not be on interrupt
- * context due to the possibility of sleeping during concurrent sync operations.
+ * The AFU can only take one internal AFU command at a time. This limitation is
+ * enforced by using a mutex to provide exclusive access to the AFU during the
+ * operation. This design point requires calling threads to not be on interrupt
+ * context due to the possibility of sleeping during concurrent AFU operations.
*
- * AFU sync operations are only necessary and allowed when the device is
- * operating normally. When not operating normally, sync requests can occur as
- * part of cleaning up resources associated with an adapter prior to removal.
- * In this scenario, these requests are simply ignored (safe due to the AFU
- * going away).
+ * The command status is optionally passed back to the caller when the caller
+ * populates the IOASA field of the IOARCB with a pointer to an IOASA structure.
*
* Return:
- * 0 on success
- * -1 on failure
+ * 0 on success, -errno on failure
*/
-int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
- res_hndl_t res_hndl_u, u8 mode)
+static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
{
struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev;
@@ -2033,6 +2268,7 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
char *buf = NULL;
int rc = 0;
+ int nretry = 0;
static DEFINE_MUTEX(sync_active);
if (cfg->state != STATE_NORMAL) {
@@ -2043,39 +2279,52 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
mutex_lock(&sync_active);
atomic_inc(&afu->cmds_active);
- buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
+ buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
if (unlikely(!buf)) {
dev_err(dev, "%s: no memory for command\n", __func__);
- rc = -1;
+ rc = -ENOMEM;
goto out;
}
cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
+
+retry:
+ memset(cmd, 0, sizeof(*cmd));
+ memcpy(&cmd->rcb, rcb, sizeof(*rcb));
+ INIT_LIST_HEAD(&cmd->queue);
init_completion(&cmd->cevent);
cmd->parent = afu;
cmd->hwq_index = hwq->index;
-
- dev_dbg(dev, "%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
-
- cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
cmd->rcb.ctx_id = hwq->ctx_hndl;
- cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
- cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
-
- cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */
- cmd->rcb.cdb[1] = mode;
- /* The cdb is aligned, no unaligned accessors required */
- *((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u);
- *((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u);
+ dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n",
+ __func__, afu, cmd, cmd->rcb.cdb[0], nretry);
rc = afu->send_cmd(afu, cmd);
- if (unlikely(rc))
+ if (unlikely(rc)) {
+ rc = -ENOBUFS;
goto out;
+ }
rc = wait_resp(afu, cmd);
- if (unlikely(rc))
- rc = -1;
+ switch (rc) {
+ case -ETIMEDOUT:
+ rc = afu->context_reset(hwq);
+ if (rc) {
+ cxlflash_schedule_async_reset(cfg);
+ break;
+ }
+ /* fall through to retry */
+ case -EAGAIN:
+ if (++nretry < 2)
+ goto retry;
+ /* fall through to exit */
+ default:
+ break;
+ }
+
+ if (rcb->ioasa)
+ *rcb->ioasa = cmd->sa;
out:
atomic_dec(&afu->cmds_active);
mutex_unlock(&sync_active);
@@ -2085,38 +2334,88 @@ out:
}
/**
- * afu_reset() - resets the AFU
- * @cfg: Internal structure associated with the host.
+ * cxlflash_afu_sync() - builds and sends an AFU sync command
+ * @afu: AFU associated with the host.
+ * @ctx: Identifies context requesting sync.
+ * @res: Identifies resource requesting sync.
+ * @mode: Type of sync to issue (lightweight, heavyweight, global).
*
- * Return: 0 on success, -errno on failure
+ * AFU sync operations are only necessary and allowed when the device is
+ * operating normally. When not operating normally, sync requests can occur as
+ * part of cleaning up resources associated with an adapter prior to removal.
+ * In this scenario, these requests are simply ignored (safe due to the AFU
+ * going away).
+ *
+ * Return:
+ * 0 on success, -errno on failure
*/
-static int afu_reset(struct cxlflash_cfg *cfg)
+int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode)
{
+ struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev;
- int rc = 0;
+ struct sisl_ioarcb rcb = { 0 };
- /* Stop the context before the reset. Since the context is
- * no longer available restart it after the reset is complete
- */
- term_afu(cfg);
+ dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n",
+ __func__, afu, ctx, res, mode);
- rc = init_afu(cfg);
+ rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
+ rcb.msi = SISL_MSI_RRQ_UPDATED;
+ rcb.timeout = MC_AFU_SYNC_TIMEOUT;
- dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
- return rc;
+ rcb.cdb[0] = SISL_AFU_CMD_SYNC;
+ rcb.cdb[1] = mode;
+ put_unaligned_be16(ctx, &rcb.cdb[2]);
+ put_unaligned_be32(res, &rcb.cdb[4]);
+
+ return send_afu_cmd(afu, &rcb);
}
/**
- * drain_ioctls() - wait until all currently executing ioctls have completed
- * @cfg: Internal structure associated with the host.
+ * cxlflash_eh_abort_handler() - abort a SCSI command
+ * @scp: SCSI command to abort.
*
- * Obtain write access to read/write semaphore that wraps ioctl
- * handling to 'drain' ioctls currently executing.
+ * CXL Flash devices do not support a single command abort. Reset the context
+ * as per SISLite specification. Flush any pending commands in the hardware
+ * queue before the reset.
+ *
+ * Return: SUCCESS/FAILED as defined in scsi/scsi.h
*/
-static void drain_ioctls(struct cxlflash_cfg *cfg)
+static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp)
{
- down_write(&cfg->ioctl_rwsem);
- up_write(&cfg->ioctl_rwsem);
+ int rc = FAILED;
+ struct Scsi_Host *host = scp->device->host;
+ struct cxlflash_cfg *cfg = shost_priv(host);
+ struct afu_cmd *cmd = sc_to_afuc(scp);
+ struct device *dev = &cfg->dev->dev;
+ struct afu *afu = cfg->afu;
+ struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
+
+ dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
+ "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
+ scp->device->channel, scp->device->id, scp->device->lun,
+ get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
+ get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
+
+ /* When the state is not normal, another reset/reload is in progress.
+ * Return failed and the mid-layer will invoke host reset handler.
+ */
+ if (cfg->state != STATE_NORMAL) {
+ dev_dbg(dev, "%s: Invalid state for abort, state=%d\n",
+ __func__, cfg->state);
+ goto out;
+ }
+
+ rc = afu->context_reset(hwq);
+ if (unlikely(rc))
+ goto out;
+
+ rc = SUCCESS;
+
+out:
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
+ return rc;
}
/**
@@ -2130,24 +2429,18 @@ static void drain_ioctls(struct cxlflash_cfg *cfg)
static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
{
int rc = SUCCESS;
- struct Scsi_Host *host = scp->device->host;
+ struct scsi_device *sdev = scp->device;
+ struct Scsi_Host *host = sdev->host;
struct cxlflash_cfg *cfg = shost_priv(host);
struct device *dev = &cfg->dev->dev;
- struct afu *afu = cfg->afu;
int rcr = 0;
- dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
- "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
- scp->device->channel, scp->device->id, scp->device->lun,
- get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
-
+ dev_dbg(dev, "%s: %d/%d/%d/%llu\n", __func__,
+ host->host_no, sdev->channel, sdev->id, sdev->lun);
retry:
switch (cfg->state) {
case STATE_NORMAL:
- rcr = send_tmf(afu, scp, TMF_LUN_RESET);
+ rcr = send_tmf(cfg, sdev, TMF_LUN_RESET);
if (unlikely(rcr))
rc = FAILED;
break;
@@ -2184,13 +2477,7 @@ static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
struct cxlflash_cfg *cfg = shost_priv(host);
struct device *dev = &cfg->dev->dev;
- dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
- "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
- scp->device->channel, scp->device->id, scp->device->lun,
- get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
- get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
+ dev_dbg(dev, "%s: %d\n", __func__, host->host_no);
switch (cfg->state) {
case STATE_NORMAL:
@@ -2427,7 +2714,14 @@ static ssize_t lun_mode_store(struct device *dev,
static ssize_t ioctl_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
+ ssize_t bytes = 0;
+
+ bytes = scnprintf(buf, PAGE_SIZE,
+ "disk: %u\n", DK_CXLFLASH_VERSION_0);
+ bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
+ "host: %u\n", HT_CXLFLASH_VERSION_0);
+
+ return bytes;
}
/**
@@ -2833,6 +3127,7 @@ static struct scsi_host_template driver_template = {
.ioctl = cxlflash_ioctl,
.proc_name = CXLFLASH_NAME,
.queuecommand = cxlflash_queuecommand,
+ .eh_abort_handler = cxlflash_eh_abort_handler,
.eh_device_reset_handler = cxlflash_eh_device_reset_handler,
.eh_host_reset_handler = cxlflash_eh_host_reset_handler,
.change_queue_depth = cxlflash_change_queue_depth,
@@ -2923,6 +3218,397 @@ static void cxlflash_worker_thread(struct work_struct *work)
}
/**
+ * cxlflash_chr_open() - character device open handler
+ * @inode: Device inode associated with this character device.
+ * @file: File pointer for this device.
+ *
+ * Only users with admin privileges are allowed to open the character device.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int cxlflash_chr_open(struct inode *inode, struct file *file)
+{
+ struct cxlflash_cfg *cfg;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev);
+ file->private_data = cfg;
+
+ return 0;
+}
+
+/**
+ * decode_hioctl() - translates encoded host ioctl to easily identifiable string
+ * @cmd: The host ioctl command to decode.
+ *
+ * Return: A string identifying the decoded host ioctl.
+ */
+static char *decode_hioctl(int cmd)
+{
+ switch (cmd) {
+ case HT_CXLFLASH_LUN_PROVISION:
+ return __stringify_1(HT_CXLFLASH_LUN_PROVISION);
+ }
+
+ return "UNKNOWN";
+}
+
+/**
+ * cxlflash_lun_provision() - host LUN provisioning handler
+ * @cfg: Internal structure associated with the host.
+ * @arg: Kernel copy of userspace ioctl data structure.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int cxlflash_lun_provision(struct cxlflash_cfg *cfg,
+ struct ht_cxlflash_lun_provision *lunprov)
+{
+ struct afu *afu = cfg->afu;
+ struct device *dev = &cfg->dev->dev;
+ struct sisl_ioarcb rcb;
+ struct sisl_ioasa asa;
+ __be64 __iomem *fc_port_regs;
+ u16 port = lunprov->port;
+ u16 scmd = lunprov->hdr.subcmd;
+ u16 type;
+ u64 reg;
+ u64 size;
+ u64 lun_id;
+ int rc = 0;
+
+ if (!afu_is_lun_provision(afu)) {
+ rc = -ENOTSUPP;
+ goto out;
+ }
+
+ if (port >= cfg->num_fc_ports) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ switch (scmd) {
+ case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN:
+ type = SISL_AFU_LUN_PROVISION_CREATE;
+ size = lunprov->size;
+ lun_id = 0;
+ break;
+ case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN:
+ type = SISL_AFU_LUN_PROVISION_DELETE;
+ size = 0;
+ lun_id = lunprov->lun_id;
+ break;
+ case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT:
+ fc_port_regs = get_fc_port_regs(cfg, port);
+
+ reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]);
+ lunprov->max_num_luns = reg;
+ reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]);
+ lunprov->cur_num_luns = reg;
+ reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]);
+ lunprov->max_cap_port = reg;
+ reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]);
+ lunprov->cur_cap_port = reg;
+
+ goto out;
+ default:
+ rc = -EINVAL;
+ goto out;
+ }
+
+ memset(&rcb, 0, sizeof(rcb));
+ memset(&asa, 0, sizeof(asa));
+ rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
+ rcb.lun_id = lun_id;
+ rcb.msi = SISL_MSI_RRQ_UPDATED;
+ rcb.timeout = MC_LUN_PROV_TIMEOUT;
+ rcb.ioasa = &asa;
+
+ rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION;
+ rcb.cdb[1] = type;
+ rcb.cdb[2] = port;
+ put_unaligned_be64(size, &rcb.cdb[8]);
+
+ rc = send_afu_cmd(afu, &rcb);
+ if (rc) {
+ dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
+ __func__, rc, asa.ioasc, asa.afu_extra);
+ goto out;
+ }
+
+ if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) {
+ lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo;
+ memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid));
+ }
+out:
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/**
+ * cxlflash_afu_debug() - host AFU debug handler
+ * @cfg: Internal structure associated with the host.
+ * @arg: Kernel copy of userspace ioctl data structure.
+ *
+ * For debug requests requiring a data buffer, always provide an aligned
+ * (cache line) buffer to the AFU to appease any alignment requirements.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
+ struct ht_cxlflash_afu_debug *afu_dbg)
+{
+ struct afu *afu = cfg->afu;
+ struct device *dev = &cfg->dev->dev;
+ struct sisl_ioarcb rcb;
+ struct sisl_ioasa asa;
+ char *buf = NULL;
+ char *kbuf = NULL;
+ void __user *ubuf = (__force void __user *)afu_dbg->data_ea;
+ u16 req_flags = SISL_REQ_FLAGS_AFU_CMD;
+ u32 ulen = afu_dbg->data_len;
+ bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE;
+ int rc = 0;
+
+ if (!afu_is_afu_debug(afu)) {
+ rc = -ENOTSUPP;
+ goto out;
+ }
+
+ if (ulen) {
+ req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN;
+
+ if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (unlikely(!access_ok(is_write ? VERIFY_READ : VERIFY_WRITE,
+ ubuf, ulen))) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL);
+ if (unlikely(!buf)) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ kbuf = PTR_ALIGN(buf, cache_line_size());
+
+ if (is_write) {
+ req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
+
+ rc = copy_from_user(kbuf, ubuf, ulen);
+ if (unlikely(rc))
+ goto out;
+ }
+ }
+
+ memset(&rcb, 0, sizeof(rcb));
+ memset(&asa, 0, sizeof(asa));
+
+ rcb.req_flags = req_flags;
+ rcb.msi = SISL_MSI_RRQ_UPDATED;
+ rcb.timeout = MC_AFU_DEBUG_TIMEOUT;
+ rcb.ioasa = &asa;
+
+ if (ulen) {
+ rcb.data_len = ulen;
+ rcb.data_ea = (uintptr_t)kbuf;
+ }
+
+ rcb.cdb[0] = SISL_AFU_CMD_DEBUG;
+ memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd,
+ HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN);
+
+ rc = send_afu_cmd(afu, &rcb);
+ if (rc) {
+ dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
+ __func__, rc, asa.ioasc, asa.afu_extra);
+ goto out;
+ }
+
+ if (ulen && !is_write)
+ rc = copy_to_user(ubuf, kbuf, ulen);
+out:
+ kfree(buf);
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/**
+ * cxlflash_chr_ioctl() - character device IOCTL handler
+ * @file: File pointer for this device.
+ * @cmd: IOCTL command.
+ * @arg: Userspace ioctl data structure.
+ *
+ * A read/write semaphore is used to implement a 'drain' of currently
+ * running ioctls. The read semaphore is taken at the beginning of each
+ * ioctl thread and released upon concluding execution. Additionally the
+ * semaphore should be released and then reacquired in any ioctl execution
+ * path which will wait for an event to occur that is outside the scope of
+ * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
+ * a thread simply needs to acquire the write semaphore.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ typedef int (*hioctl) (struct cxlflash_cfg *, void *);
+
+ struct cxlflash_cfg *cfg = file->private_data;
+ struct device *dev = &cfg->dev->dev;
+ char buf[sizeof(union cxlflash_ht_ioctls)];
+ void __user *uarg = (void __user *)arg;
+ struct ht_cxlflash_hdr *hdr;
+ size_t size = 0;
+ bool known_ioctl = false;
+ int idx = 0;
+ int rc = 0;
+ hioctl do_ioctl = NULL;
+
+ static const struct {
+ size_t size;
+ hioctl ioctl;
+ } ioctl_tbl[] = { /* NOTE: order matters here */
+ { sizeof(struct ht_cxlflash_lun_provision),
+ (hioctl)cxlflash_lun_provision },
+ { sizeof(struct ht_cxlflash_afu_debug),
+ (hioctl)cxlflash_afu_debug },
+ };
+
+ /* Hold read semaphore so we can drain if needed */
+ down_read(&cfg->ioctl_rwsem);
+
+ dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n",
+ __func__, cmd, idx, sizeof(ioctl_tbl));
+
+ switch (cmd) {
+ case HT_CXLFLASH_LUN_PROVISION:
+ case HT_CXLFLASH_AFU_DEBUG:
+ known_ioctl = true;
+ idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd);
+ size = ioctl_tbl[idx].size;
+ do_ioctl = ioctl_tbl[idx].ioctl;
+
+ if (likely(do_ioctl))
+ break;
+
+ /* fall through */
+ default:
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (unlikely(copy_from_user(&buf, uarg, size))) {
+ dev_err(dev, "%s: copy_from_user() fail "
+ "size=%lu cmd=%d (%s) uarg=%p\n",
+ __func__, size, cmd, decode_hioctl(cmd), uarg);
+ rc = -EFAULT;
+ goto out;
+ }
+
+ hdr = (struct ht_cxlflash_hdr *)&buf;
+ if (hdr->version != HT_CXLFLASH_VERSION_0) {
+ dev_dbg(dev, "%s: Version %u not supported for %s\n",
+ __func__, hdr->version, decode_hioctl(cmd));
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) {
+ dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = do_ioctl(cfg, (void *)&buf);
+ if (likely(!rc))
+ if (unlikely(copy_to_user(uarg, &buf, size))) {
+ dev_err(dev, "%s: copy_to_user() fail "
+ "size=%lu cmd=%d (%s) uarg=%p\n",
+ __func__, size, cmd, decode_hioctl(cmd), uarg);
+ rc = -EFAULT;
+ }
+
+ /* fall through to exit */
+
+out:
+ up_read(&cfg->ioctl_rwsem);
+ if (unlikely(rc && known_ioctl))
+ dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
+ __func__, decode_hioctl(cmd), cmd, rc);
+ else
+ dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
+ __func__, decode_hioctl(cmd), cmd, rc);
+ return rc;
+}
+
+/*
+ * Character device file operations
+ */
+static const struct file_operations cxlflash_chr_fops = {
+ .owner = THIS_MODULE,
+ .open = cxlflash_chr_open,
+ .unlocked_ioctl = cxlflash_chr_ioctl,
+ .compat_ioctl = cxlflash_chr_ioctl,
+};
+
+/**
+ * init_chrdev() - initialize the character device for the host
+ * @cfg: Internal structure associated with the host.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int init_chrdev(struct cxlflash_cfg *cfg)
+{
+ struct device *dev = &cfg->dev->dev;
+ struct device *char_dev;
+ dev_t devno;
+ int minor;
+ int rc = 0;
+
+ minor = cxlflash_get_minor();
+ if (unlikely(minor < 0)) {
+ dev_err(dev, "%s: Exhausted allowed adapters\n", __func__);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ devno = MKDEV(cxlflash_major, minor);
+ cdev_init(&cfg->cdev, &cxlflash_chr_fops);
+
+ rc = cdev_add(&cfg->cdev, devno, 1);
+ if (rc) {
+ dev_err(dev, "%s: cdev_add failed rc=%d\n", __func__, rc);
+ goto err1;
+ }
+
+ char_dev = device_create(cxlflash_class, NULL, devno,
+ NULL, "cxlflash%d", minor);
+ if (IS_ERR(char_dev)) {
+ rc = PTR_ERR(char_dev);
+ dev_err(dev, "%s: device_create failed rc=%d\n",
+ __func__, rc);
+ goto err2;
+ }
+
+ cfg->chardev = char_dev;
+out:
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
+ return rc;
+err2:
+ cdev_del(&cfg->cdev);
+err1:
+ cxlflash_put_minor(minor);
+ goto out;
+}
+
+/**
* cxlflash_probe() - PCI entry point to add host
* @pdev: PCI device associated with the host.
* @dev_id: PCI device id associated with device.
@@ -3032,6 +3718,13 @@ static int cxlflash_probe(struct pci_dev *pdev,
}
cfg->init_state = INIT_STATE_SCSI;
+ rc = init_chrdev(cfg);
+ if (rc) {
+ dev_err(dev, "%s: init_chrdev failed rc=%d\n", __func__, rc);
+ goto out_remove;
+ }
+ cfg->init_state = INIT_STATE_CDEV;
+
if (wq_has_sleeper(&cfg->reset_waitq)) {
cfg->state = STATE_PROBED;
wake_up_all(&cfg->reset_waitq);
@@ -3134,6 +3827,63 @@ static void cxlflash_pci_resume(struct pci_dev *pdev)
scsi_unblock_requests(cfg->host);
}
+/**
+ * cxlflash_devnode() - provides devtmpfs for devices in the cxlflash class
+ * @dev: Character device.
+ * @mode: Mode that can be used to verify access.
+ *
+ * Return: Allocated string describing the devtmpfs structure.
+ */
+static char *cxlflash_devnode(struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev));
+}
+
+/**
+ * cxlflash_class_init() - create character device class
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int cxlflash_class_init(void)
+{
+ dev_t devno;
+ int rc = 0;
+
+ rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash");
+ if (unlikely(rc)) {
+ pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__, rc);
+ goto out;
+ }
+
+ cxlflash_major = MAJOR(devno);
+
+ cxlflash_class = class_create(THIS_MODULE, "cxlflash");
+ if (IS_ERR(cxlflash_class)) {
+ rc = PTR_ERR(cxlflash_class);
+ pr_err("%s: class_create failed rc=%d\n", __func__, rc);
+ goto err;
+ }
+
+ cxlflash_class->devnode = cxlflash_devnode;
+out:
+ pr_debug("%s: returning rc=%d\n", __func__, rc);
+ return rc;
+err:
+ unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
+ goto out;
+}
+
+/**
+ * cxlflash_class_exit() - destroy character device class
+ */
+static void cxlflash_class_exit(void)
+{
+ dev_t devno = MKDEV(cxlflash_major, 0);
+
+ class_destroy(cxlflash_class);
+ unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
+}
+
static const struct pci_error_handlers cxlflash_err_handler = {
.error_detected = cxlflash_pci_error_detected,
.slot_reset = cxlflash_pci_slot_reset,
@@ -3159,10 +3909,23 @@ static struct pci_driver cxlflash_driver = {
*/
static int __init init_cxlflash(void)
{
+ int rc;
+
check_sizes();
cxlflash_list_init();
+ rc = cxlflash_class_init();
+ if (unlikely(rc))
+ goto out;
- return pci_register_driver(&cxlflash_driver);
+ rc = pci_register_driver(&cxlflash_driver);
+ if (unlikely(rc))
+ goto err;
+out:
+ pr_debug("%s: returning rc=%d\n", __func__, rc);
+ return rc;
+err:
+ cxlflash_class_exit();
+ goto out;
}
/**
@@ -3174,6 +3937,7 @@ static void __exit exit_cxlflash(void)
cxlflash_free_errpage();
pci_unregister_driver(&cxlflash_driver);
+ cxlflash_class_exit();
}
module_init(init_cxlflash);
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
index 49657f1f409e..880e348ed5c9 100644
--- a/drivers/scsi/cxlflash/main.h
+++ b/drivers/scsi/cxlflash/main.h
@@ -22,6 +22,7 @@
#define CXLFLASH_NAME "cxlflash"
#define CXLFLASH_ADAPTER_NAME "IBM POWER CXL Flash Adapter"
+#define CXLFLASH_MAX_ADAPTERS 32
#define PCI_DEVICE_ID_IBM_CORSA 0x04F0
#define PCI_DEVICE_ID_IBM_FLASH_GT 0x0600
@@ -40,6 +41,10 @@
/* FC defines */
#define FC_MTIP_CMDCONFIG 0x010
#define FC_MTIP_STATUS 0x018
+#define FC_MAX_NUM_LUNS 0x080 /* Max LUNs host can provision for port */
+#define FC_CUR_NUM_LUNS 0x088 /* Cur number LUNs provisioned for port */
+#define FC_MAX_CAP_PORT 0x090 /* Max capacity all LUNs for port (4K blocks) */
+#define FC_CUR_CAP_PORT 0x098 /* Cur capacity all LUNs for port (4K blocks) */
#define FC_PNAME 0x300
#define FC_CONFIG 0x320
@@ -62,6 +67,8 @@
/* AFU command timeout values */
#define MC_AFU_SYNC_TIMEOUT 5 /* 5 secs */
+#define MC_LUN_PROV_TIMEOUT 5 /* 5 secs */
+#define MC_AFU_DEBUG_TIMEOUT 5 /* 5 secs */
/* AFU command room retry limit */
#define MC_ROOM_RETRY_CNT 10
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
index a768360d2fa6..09daa86670fc 100644
--- a/drivers/scsi/cxlflash/sislite.h
+++ b/drivers/scsi/cxlflash/sislite.h
@@ -72,6 +72,13 @@ struct sisl_ioarcb {
u16 timeout; /* in units specified by req_flags */
u32 rsvd1;
u8 cdb[16]; /* must be in big endian */
+#define SISL_AFU_CMD_SYNC 0xC0 /* AFU sync command */
+#define SISL_AFU_CMD_LUN_PROVISION 0xD0 /* AFU LUN provision command */
+#define SISL_AFU_CMD_DEBUG 0xE0 /* AFU debug command */
+
+#define SISL_AFU_LUN_PROVISION_CREATE 0x00 /* LUN provision create type */
+#define SISL_AFU_LUN_PROVISION_DELETE 0x01 /* LUN provision delete type */
+
union {
u64 reserved; /* Reserved for IOARRIN mode */
struct sisl_ioasa *ioasa; /* IOASA EA for SQ Mode */
@@ -156,6 +163,7 @@ struct sisl_rc {
};
#define SISL_SENSE_DATA_LEN 20 /* Sense data length */
+#define SISL_WWID_DATA_LEN 16 /* WWID data length */
/*
* IOASA: 64 bytes & must follow IOARCB, min 16 byte alignment required,
@@ -167,7 +175,12 @@ struct sisl_ioasa {
u32 ioasc;
#define SISL_IOASC_GOOD_COMPLETION 0x00000000U
};
- u32 resid;
+
+ union {
+ u32 resid;
+ u32 lunid_hi;
+ };
+
u8 port;
u8 afu_extra;
/* when afu_rc=0x04, 0x14, 0x31 (_xxx_DMA_ERR):
@@ -190,7 +203,14 @@ struct sisl_ioasa {
u8 scsi_extra;
u8 fc_extra;
- u8 sense_data[SISL_SENSE_DATA_LEN];
+
+ union {
+ u8 sense_data[SISL_SENSE_DATA_LEN];
+ struct {
+ u32 lunid_lo;
+ u8 wwid[SISL_WWID_DATA_LEN];
+ };
+ };
/* These fields are defined by the SISlite architecture for the
* host to use as they see fit for their implementation.
@@ -263,6 +283,7 @@ struct sisl_host_map {
__be64 rrq_end; /* write sequence: start followed by end */
__be64 cmd_room;
__be64 ctx_ctrl; /* least significant byte or b56:63 is LISN# */
+#define SISL_CTX_CTRL_UNMAP_SECTOR 0x8000000000000000ULL /* b0 */
__be64 mbox_w; /* restricted use */
__be64 sq_start; /* Submission Queue (R/W): write sequence and */
__be64 sq_end; /* inclusion semantics are the same as RRQ */
@@ -392,6 +413,8 @@ struct sisl_global_regs {
#define SISL_INTVER_CAP_SQ_CMD_MODE 0x400000000000ULL
#define SISL_INTVER_CAP_RESERVED_CMD_MODE_A 0x200000000000ULL
#define SISL_INTVER_CAP_RESERVED_CMD_MODE_B 0x100000000000ULL
+#define SISL_INTVER_CAP_LUN_PROVISION 0x080000000000ULL
+#define SISL_INTVER_CAP_AFU_DEBUG 0x040000000000ULL
};
#define CXLFLASH_NUM_FC_PORTS_PER_BANK 2 /* fixed # of ports per bank */
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index fe9f17a6268b..ad0f9968ccfb 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -57,6 +57,19 @@ static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
}
/**
+ * marshal_udir_to_rele() - translate udirect to release structure
+ * @udirect: Source structure from which to translate/copy.
+ * @release: Destination structure for the translate/copy.
+ */
+static void marshal_udir_to_rele(struct dk_cxlflash_udirect *udirect,
+ struct dk_cxlflash_release *release)
+{
+ release->hdr = udirect->hdr;
+ release->context_id = udirect->context_id;
+ release->rsrc_handle = udirect->rsrc_handle;
+}
+
+/**
* cxlflash_free_errpage() - frees resources associated with global error page
*/
void cxlflash_free_errpage(void)
@@ -622,6 +635,7 @@ int _cxlflash_disk_release(struct scsi_device *sdev,
res_hndl_t rhndl = release->rsrc_handle;
int rc = 0;
+ int rcr = 0;
u64 ctxid = DECODE_CTXID(release->context_id),
rctxid = release->context_id;
@@ -686,8 +700,12 @@ int _cxlflash_disk_release(struct scsi_device *sdev,
rhte_f1->dw = 0;
dma_wmb(); /* Make RHT entry bottom-half clearing visible */
- if (!ctxi->err_recovery_active)
- cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
+ if (!ctxi->err_recovery_active) {
+ rcr = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
+ if (unlikely(rcr))
+ dev_dbg(dev, "%s: AFU sync failed rc=%d\n",
+ __func__, rcr);
+ }
break;
default:
WARN(1, "Unsupported LUN mode!");
@@ -1929,6 +1947,7 @@ static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
struct afu *afu = cfg->afu;
struct llun_info *lli = sdev->hostdata;
struct glun_info *gli = lli->parent;
+ struct dk_cxlflash_release rel = { { 0 }, 0 };
struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg;
@@ -1970,13 +1989,18 @@ static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
rsrc_handle = (rhte - ctxi->rht_start);
rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port);
- cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC);
last_lba = gli->max_lba;
pphys->hdr.return_flags = 0;
pphys->last_lba = last_lba;
pphys->rsrc_handle = rsrc_handle;
+ rc = cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC);
+ if (unlikely(rc)) {
+ dev_dbg(dev, "%s: AFU sync failed rc=%d\n", __func__, rc);
+ goto err2;
+ }
+
out:
if (likely(ctxi))
put_context(ctxi);
@@ -1984,6 +2008,10 @@ out:
__func__, rsrc_handle, rc, last_lba);
return rc;
+err2:
+ marshal_udir_to_rele(pphys, &rel);
+ _cxlflash_disk_release(sdev, ctxi, &rel);
+ goto out;
err1:
cxlflash_lun_detach(gli);
goto out;
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index 90b5c19f81f0..bdfb93061460 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -446,6 +446,7 @@ static int write_same16(struct scsi_device *sdev,
while (left > 0) {
scsi_cmd[0] = WRITE_SAME_16;
+ scsi_cmd[1] = cfg->ws_unmap ? 0x8 : 0;
put_unaligned_be64(offset, &scsi_cmd[2]);
put_unaligned_be32(ws_limit < left ? ws_limit : left,
&scsi_cmd[10]);
@@ -594,7 +595,9 @@ static int grow_lxt(struct afu *afu,
rhte->lxt_cnt = my_new_size;
dma_wmb(); /* Make RHT entry's LXT table size update visible */
- cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
+ rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
+ if (unlikely(rc))
+ rc = -EAGAIN;
/* free old lxt if reallocated */
if (lxt != lxt_old)
@@ -673,8 +676,11 @@ static int shrink_lxt(struct afu *afu,
rhte->lxt_start = lxt;
dma_wmb(); /* Make RHT entry's LXT table update visible */
- if (needs_sync)
- cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
+ if (needs_sync) {
+ rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
+ if (unlikely(rc))
+ rc = -EAGAIN;
+ }
if (needs_ws) {
/*
@@ -792,6 +798,21 @@ int _cxlflash_vlun_resize(struct scsi_device *sdev,
rc = grow_lxt(afu, sdev, ctxid, rhndl, rhte, &new_size);
else if (new_size < rhte->lxt_cnt)
rc = shrink_lxt(afu, sdev, rhndl, rhte, ctxi, &new_size);
+ else {
+ /*
+ * Rare case where there is already sufficient space, just
+ * need to perform a translation sync with the AFU. This
+ * scenario likely follows a previous sync failure during
+ * a resize operation. Accordingly, perform the heavyweight
+ * form of translation sync as it is unknown which type of
+ * resize failed previously.
+ */
+ rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
+ if (unlikely(rc)) {
+ rc = -EAGAIN;
+ goto out;
+ }
+ }
resize->hdr.return_flags = 0;
resize->last_lba = (new_size * MC_CHUNK_SIZE * gli->blk_len);
@@ -1084,10 +1105,13 @@ static int clone_lxt(struct afu *afu,
{
struct cxlflash_cfg *cfg = afu->parent;
struct device *dev = &cfg->dev->dev;
- struct sisl_lxt_entry *lxt;
+ struct sisl_lxt_entry *lxt = NULL;
+ bool locked = false;
u32 ngrps;
u64 aun; /* chunk# allocated by block allocator */
- int i, j;
+ int j;
+ int i = 0;
+ int rc = 0;
ngrps = LXT_NUM_GROUPS(rhte_src->lxt_cnt);
@@ -1095,33 +1119,29 @@ static int clone_lxt(struct afu *afu,
/* allocate new LXTs for clone */
lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
GFP_KERNEL);
- if (unlikely(!lxt))
- return -ENOMEM;
+ if (unlikely(!lxt)) {
+ rc = -ENOMEM;
+ goto out;
+ }
/* copy over */
memcpy(lxt, rhte_src->lxt_start,
(sizeof(*lxt) * rhte_src->lxt_cnt));
- /* clone the LBAs in block allocator via ref_cnt */
+ /* clone the LBAs in block allocator via ref_cnt, note that the
+ * block allocator mutex must be held until it is established
+ * that this routine will complete without the need for a
+ * cleanup.
+ */
mutex_lock(&blka->mutex);
+ locked = true;
for (i = 0; i < rhte_src->lxt_cnt; i++) {
aun = (lxt[i].rlba_base >> MC_CHUNK_SHIFT);
if (ba_clone(&blka->ba_lun, aun) == -1ULL) {
- /* free the clones already made */
- for (j = 0; j < i; j++) {
- aun = (lxt[j].rlba_base >>
- MC_CHUNK_SHIFT);
- ba_free(&blka->ba_lun, aun);
- }
-
- mutex_unlock(&blka->mutex);
- kfree(lxt);
- return -EIO;
+ rc = -EIO;
+ goto err;
}
}
- mutex_unlock(&blka->mutex);
- } else {
- lxt = NULL;
}
/*
@@ -1136,10 +1156,31 @@ static int clone_lxt(struct afu *afu,
rhte->lxt_cnt = rhte_src->lxt_cnt;
dma_wmb(); /* Make RHT entry's LXT table size update visible */
- cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
+ rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
+ if (unlikely(rc)) {
+ rc = -EAGAIN;
+ goto err2;
+ }
- dev_dbg(dev, "%s: returning\n", __func__);
- return 0;
+out:
+ if (locked)
+ mutex_unlock(&blka->mutex);
+ dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
+ return rc;
+err2:
+ /* Reset the RHTE */
+ rhte->lxt_cnt = 0;
+ dma_wmb();
+ rhte->lxt_start = NULL;
+ dma_wmb();
+err:
+ /* free the clones already made */
+ for (j = 0; j < i; j++) {
+ aun = (lxt[j].rlba_base >> MC_CHUNK_SHIFT);
+ ba_free(&blka->ba_lun, aun);
+ }
+ kfree(lxt);
+ goto out;
}
/**
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index c01b47e5b55a..0962fd544401 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -57,7 +57,6 @@
/* device handler flags */
#define ALUA_OPTIMIZE_STPG 0x01
#define ALUA_RTPG_EXT_HDR_UNSUPP 0x02
-#define ALUA_SYNC_STPG 0x04
/* State machine flags */
#define ALUA_PG_RUN_RTPG 0x10
#define ALUA_PG_RUN_STPG 0x20
@@ -70,7 +69,6 @@ MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than
static LIST_HEAD(port_group_list);
static DEFINE_SPINLOCK(port_group_lock);
static struct workqueue_struct *kaluad_wq;
-static struct workqueue_struct *kaluad_sync_wq;
struct alua_port_group {
struct kref kref;
@@ -380,8 +378,6 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
}
spin_lock_irqsave(&pg->lock, flags);
- if (sdev->synchronous_alua)
- pg->flags |= ALUA_SYNC_STPG;
if (pg_updated)
list_add_rcu(&h->node, &pg->dh_list);
spin_unlock_irqrestore(&pg->lock, flags);
@@ -785,7 +781,6 @@ static void alua_rtpg_work(struct work_struct *work)
int err = SCSI_DH_OK;
struct alua_queue_data *qdata, *tmp;
unsigned long flags;
- struct workqueue_struct *alua_wq = kaluad_wq;
spin_lock_irqsave(&pg->lock, flags);
sdev = pg->rtpg_sdev;
@@ -796,8 +791,6 @@ static void alua_rtpg_work(struct work_struct *work)
kref_put(&pg->kref, release_port_group);
return;
}
- if (pg->flags & ALUA_SYNC_STPG)
- alua_wq = kaluad_sync_wq;
pg->flags |= ALUA_PG_RUNNING;
if (pg->flags & ALUA_PG_RUN_RTPG) {
int state = pg->state;
@@ -810,7 +803,7 @@ static void alua_rtpg_work(struct work_struct *work)
pg->flags &= ~ALUA_PG_RUNNING;
pg->flags |= ALUA_PG_RUN_RTPG;
spin_unlock_irqrestore(&pg->lock, flags);
- queue_delayed_work(alua_wq, &pg->rtpg_work,
+ queue_delayed_work(kaluad_wq, &pg->rtpg_work,
pg->interval * HZ);
return;
}
@@ -822,7 +815,7 @@ static void alua_rtpg_work(struct work_struct *work)
pg->flags &= ~ALUA_PG_RUNNING;
pg->flags |= ALUA_PG_RUN_RTPG;
spin_unlock_irqrestore(&pg->lock, flags);
- queue_delayed_work(alua_wq, &pg->rtpg_work,
+ queue_delayed_work(kaluad_wq, &pg->rtpg_work,
pg->interval * HZ);
return;
}
@@ -839,7 +832,7 @@ static void alua_rtpg_work(struct work_struct *work)
pg->interval = 0;
pg->flags &= ~ALUA_PG_RUNNING;
spin_unlock_irqrestore(&pg->lock, flags);
- queue_delayed_work(alua_wq, &pg->rtpg_work,
+ queue_delayed_work(kaluad_wq, &pg->rtpg_work,
pg->interval * HZ);
return;
}
@@ -874,8 +867,6 @@ static bool alua_rtpg_queue(struct alua_port_group *pg,
{
int start_queue = 0;
unsigned long flags;
- struct workqueue_struct *alua_wq = kaluad_wq;
-
if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev))
return false;
@@ -900,12 +891,10 @@ static bool alua_rtpg_queue(struct alua_port_group *pg,
}
}
- if (pg->flags & ALUA_SYNC_STPG)
- alua_wq = kaluad_sync_wq;
spin_unlock_irqrestore(&pg->lock, flags);
if (start_queue) {
- if (queue_delayed_work(alua_wq, &pg->rtpg_work,
+ if (queue_delayed_work(kaluad_wq, &pg->rtpg_work,
msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS)))
sdev = NULL;
else
@@ -1166,16 +1155,11 @@ static int __init alua_init(void)
/* Temporary failure, bypass */
return SCSI_DH_DEV_TEMP_BUSY;
}
- kaluad_sync_wq = create_workqueue("kaluad_sync");
- if (!kaluad_sync_wq) {
- destroy_workqueue(kaluad_wq);
- return SCSI_DH_DEV_TEMP_BUSY;
- }
+
r = scsi_register_device_handler(&alua_dh);
if (r != 0) {
printk(KERN_ERR "%s: Failed to register scsi device handler",
ALUA_DH_NAME);
- destroy_workqueue(kaluad_sync_wq);
destroy_workqueue(kaluad_wq);
}
return r;
@@ -1184,7 +1168,6 @@ static int __init alua_init(void)
static void __exit alua_exit(void)
{
scsi_unregister_device_handler(&alua_dh);
- destroy_workqueue(kaluad_sync_wq);
destroy_workqueue(kaluad_wq);
}
diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h
index b6030e3edd01..1da6407ee142 100644
--- a/drivers/scsi/esas2r/esas2r.h
+++ b/drivers/scsi/esas2r/esas2r.h
@@ -945,8 +945,8 @@ struct esas2r_adapter {
struct list_head vrq_mds_head;
struct esas2r_mem_desc *vrq_mds;
int num_vrqs;
- struct semaphore fm_api_semaphore;
- struct semaphore fs_api_semaphore;
+ struct mutex fm_api_mutex;
+ struct mutex fs_api_mutex;
struct semaphore nvram_semaphore;
struct atto_ioctl *local_atto_ioctl;
u8 fw_coredump_buff[ESAS2R_FWCOREDUMP_SZ];
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index 6432a50b26d8..5b14dd29b764 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -327,8 +327,8 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
esas2r_debug("new adapter %p, name %s", a, a->name);
spin_lock_init(&a->request_lock);
spin_lock_init(&a->fw_event_lock);
- sema_init(&a->fm_api_semaphore, 1);
- sema_init(&a->fs_api_semaphore, 1);
+ mutex_init(&a->fm_api_mutex);
+ mutex_init(&a->fs_api_mutex);
sema_init(&a->nvram_semaphore, 1);
esas2r_fw_event_off(a);
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index 2d4b7f049a68..97623002908f 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -110,7 +110,7 @@ static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
{
struct esas2r_request *rq;
- if (down_interruptible(&a->fm_api_semaphore)) {
+ if (mutex_lock_interruptible(&a->fm_api_mutex)) {
fi->status = FI_STAT_BUSY;
return;
}
@@ -173,7 +173,7 @@ all_done:
free_req:
esas2r_free_request(a, (struct esas2r_request *)rq);
free_sem:
- up(&a->fm_api_semaphore);
+ mutex_unlock(&a->fm_api_mutex);
return;
}
@@ -1962,7 +1962,7 @@ int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count)
(struct esas2r_ioctl_fs *)a->fs_api_buffer;
/* If another flash request is already in progress, return. */
- if (down_interruptible(&a->fs_api_semaphore)) {
+ if (mutex_lock_interruptible(&a->fs_api_mutex)) {
busy:
fs->status = ATTO_STS_OUT_OF_RSRC;
return -EBUSY;
@@ -1978,7 +1978,7 @@ busy:
rq = esas2r_alloc_request(a);
if (rq == NULL) {
esas2r_debug("esas2r_read_fs: out of requests");
- up(&a->fs_api_semaphore);
+ mutex_unlock(&a->fs_api_mutex);
goto busy;
}
@@ -2006,7 +2006,7 @@ busy:
;
dont_wait:
/* Free the request and keep going */
- up(&a->fs_api_semaphore);
+ mutex_unlock(&a->fs_api_mutex);
esas2r_free_request(a, (struct esas2r_request *)rq);
/* Pick up possible error code from above */
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 539e23ec0e59..85f9a3eba387 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -519,7 +519,7 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
* @skb: The receive skb
* @netdev: The associated net device
* @ptype: The packet_type structure which was used to register this handler
- * @orig_dev: The original net_device the the skb was received on.
+ * @orig_dev: The original net_device the skb was received on.
* (in case dev is a bond)
*
* Returns: 0 for success
@@ -542,7 +542,7 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
* @skb: The receive skb
* @netdev: The associated net device
* @ptype: The packet_type structure which was used to register this handler
- * @orig_dev: The original net_device the the skb was received on.
+ * @orig_dev: The original net_device the skb was received on.
* (in case dev is a bond)
*
* Returns: 0 for success
@@ -2258,7 +2258,7 @@ static int _fcoe_create(struct net_device *netdev, enum fip_mode fip_mode,
fcoe_interface_cleanup(fcoe);
mutex_unlock(&fcoe_config_mutex);
fcoe_ctlr_device_delete(ctlr_dev);
- goto out;
+ return rc;
}
/* Make this the "master" N_Port */
@@ -2299,7 +2299,7 @@ static int _fcoe_create(struct net_device *netdev, enum fip_mode fip_mode,
out_nodev:
rtnl_unlock();
mutex_unlock(&fcoe_config_mutex);
-out:
+
return rc;
}
@@ -2590,7 +2590,7 @@ module_exit(fcoe_exit);
* fcoe_flogi_resp() - FCoE specific FLOGI and FDISC response handler
* @seq: active sequence in the FLOGI or FDISC exchange
* @fp: response frame, or error encoded in a pointer (timeout)
- * @arg: pointer the the fcoe_ctlr structure
+ * @arg: pointer to the fcoe_ctlr structure
*
* This handles MAC address management for FCoE, then passes control on to
* the libfc FLOGI response handler.
@@ -2619,7 +2619,7 @@ done:
* fcoe_logo_resp() - FCoE specific LOGO response handler
* @seq: active sequence in the LOGO exchange
* @fp: response frame, or error encoded in a pointer (timeout)
- * @arg: pointer the the fcoe_ctlr structure
+ * @arg: pointer to the fcoe_ctlr structure
*
* This handles MAC address management for FCoE, then passes control on to
* the libfc LOGO response handler.
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index d6498fabe628..5e3d909cfc53 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -632,6 +632,7 @@ static ssize_t fnic_reset_stats_write(struct file *file,
sizeof(struct io_path_stats) - sizeof(u64));
memset(fw_stats_p+1, 0,
sizeof(struct fw_stats) - sizeof(u64));
+ getnstimeofday(&stats->stats_timestamps.last_reset_time);
}
(*ppos)++;
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index e72becaad8a5..999fc7547560 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -65,6 +65,30 @@ void fnic_handle_link(struct work_struct *work)
fnic->link_status = vnic_dev_link_status(fnic->vdev);
fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
+ switch (vnic_dev_port_speed(fnic->vdev)) {
+ case DCEM_PORTSPEED_10G:
+ fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT;
+ fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT;
+ break;
+ case DCEM_PORTSPEED_25G:
+ fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT;
+ fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT;
+ break;
+ case DCEM_PORTSPEED_40G:
+ case DCEM_PORTSPEED_4x10G:
+ fc_host_speed(fnic->lport->host) = FC_PORTSPEED_40GBIT;
+ fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT;
+ break;
+ case DCEM_PORTSPEED_100G:
+ fc_host_speed(fnic->lport->host) = FC_PORTSPEED_100GBIT;
+ fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT;
+ break;
+ default:
+ fc_host_speed(fnic->lport->host) = FC_PORTSPEED_UNKNOWN;
+ fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
+ break;
+ }
+
if (old_link_status == fnic->link_status) {
if (!fnic->link_status) {
/* DOWN -> DOWN */
diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h
index c35b8f1889ea..e0bc659ed71f 100644
--- a/drivers/scsi/fnic/fnic_io.h
+++ b/drivers/scsi/fnic/fnic_io.h
@@ -66,4 +66,13 @@ struct fnic_io_req {
struct completion *dr_done; /* completion for device reset */
};
+enum fnic_port_speeds {
+ DCEM_PORTSPEED_NONE = 0,
+ DCEM_PORTSPEED_1G = 1000,
+ DCEM_PORTSPEED_10G = 10000,
+ DCEM_PORTSPEED_40G = 40000,
+ DCEM_PORTSPEED_4x10G = 41000,
+ DCEM_PORTSPEED_25G = 25000,
+ DCEM_PORTSPEED_100G = 100000,
+};
#endif /* _FNIC_IO_H_ */
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index ba58b7953263..aacadbf20b69 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -176,11 +176,21 @@ static void fnic_get_host_speed(struct Scsi_Host *shost)
/* Add in other values as they get defined in fw */
switch (port_speed) {
- case 10000:
+ case DCEM_PORTSPEED_10G:
fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
break;
+ case DCEM_PORTSPEED_25G:
+ fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
+ break;
+ case DCEM_PORTSPEED_40G:
+ case DCEM_PORTSPEED_4x10G:
+ fc_host_speed(shost) = FC_PORTSPEED_40GBIT;
+ break;
+ case DCEM_PORTSPEED_100G:
+ fc_host_speed(shost) = FC_PORTSPEED_100GBIT;
+ break;
default:
- fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+ fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
}
}
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index d048f3b5006f..6c0646d62dfb 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -466,15 +466,27 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
}
rp = rport->dd_data;
- if (!rp || rp->rp_state != RPORT_ST_READY) {
+ if (!rp || rp->rp_state == RPORT_ST_DELETE) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "returning DID_NO_CONNECT for IO as rport is removed\n");
+ "rport 0x%x removed, returning DID_NO_CONNECT\n",
+ rport->port_id);
+
atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
sc->result = DID_NO_CONNECT<<16;
done(sc);
return 0;
}
+ if (rp->rp_state != RPORT_ST_READY) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n",
+ rport->port_id, rp->rp_state);
+
+ sc->result = DID_IMM_RETRY << 16;
+ done(sc);
+ return 0;
+ }
+
if (lp->state != LPORT_ST_READY || !(lp->link_up))
return SCSI_MLQUEUE_HOST_BUSY;
@@ -633,6 +645,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
+ atomic64_set(&fnic->io_cmpl_skip, 0);
spin_lock_irqsave(&fnic->fnic_lock, flags);
diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h
index 88c73cccb015..e007feedbf72 100644
--- a/drivers/scsi/fnic/fnic_stats.h
+++ b/drivers/scsi/fnic/fnic_stats.h
@@ -16,6 +16,12 @@
*/
#ifndef _FNIC_STATS_H_
#define _FNIC_STATS_H_
+
+struct stats_timestamps {
+ struct timespec last_reset_time;
+ struct timespec last_read_time;
+};
+
struct io_path_stats {
atomic64_t active_ios;
atomic64_t max_active_ios;
@@ -110,6 +116,7 @@ struct misc_stats {
};
struct fnic_stats {
+ struct stats_timestamps stats_timestamps;
struct io_path_stats io_stats;
struct abort_stats abts_stats;
struct terminate_stats term_stats;
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index b5ac5381a0d7..4826f596cb31 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -219,7 +219,31 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
int buf_size = debug->buf_size;
struct timespec val1, val2;
+ getnstimeofday(&val1);
len = snprintf(debug->debug_buffer + len, buf_size - len,
+ "------------------------------------------\n"
+ "\t\tTime\n"
+ "------------------------------------------\n");
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
+ "Current time : [%ld:%ld]\n"
+ "Last stats reset time: [%ld:%ld]\n"
+ "Last stats read time: [%ld:%ld]\n"
+ "delta since last reset: [%ld:%ld]\n"
+ "delta since last read: [%ld:%ld]\n",
+ val1.tv_sec, val1.tv_nsec,
+ stats->stats_timestamps.last_reset_time.tv_sec,
+ stats->stats_timestamps.last_reset_time.tv_nsec,
+ stats->stats_timestamps.last_read_time.tv_sec,
+ stats->stats_timestamps.last_read_time.tv_nsec,
+ timespec_sub(val1, stats->stats_timestamps.last_reset_time).tv_sec,
+ timespec_sub(val1, stats->stats_timestamps.last_reset_time).tv_nsec,
+ timespec_sub(val1, stats->stats_timestamps.last_read_time).tv_sec,
+ timespec_sub(val1, stats->stats_timestamps.last_read_time).tv_nsec);
+
+ stats->stats_timestamps.last_read_time = val1;
+
+ len += snprintf(debug->debug_buffer + len, buf_size - len,
"------------------------------------------\n"
"\t\tIO Statistics\n"
"------------------------------------------\n");
diff --git a/drivers/scsi/hisi_sas/Kconfig b/drivers/scsi/hisi_sas/Kconfig
index 374a329b91fc..d42f29a5eb65 100644
--- a/drivers/scsi/hisi_sas/Kconfig
+++ b/drivers/scsi/hisi_sas/Kconfig
@@ -6,4 +6,12 @@ config SCSI_HISI_SAS
select BLK_DEV_INTEGRITY
depends on ATA
help
- This driver supports HiSilicon's SAS HBA
+ This driver supports HiSilicon's SAS HBA, including support based
+ on platform device
+
+config SCSI_HISI_SAS_PCI
+ tristate "HiSilicon SAS on PCI bus"
+ depends on SCSI_HISI_SAS
+ depends on PCI
+ help
+ This driver supports HiSilicon's SAS HBA based on PCI device
diff --git a/drivers/scsi/hisi_sas/Makefile b/drivers/scsi/hisi_sas/Makefile
index c6d3a1b5fcb9..24623f228510 100644
--- a/drivers/scsi/hisi_sas/Makefile
+++ b/drivers/scsi/hisi_sas/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas_main.o
obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas_v1_hw.o hisi_sas_v2_hw.o
+obj-$(CONFIG_SCSI_HISI_SAS_PCI) += hisi_sas_v3_hw.o
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 4e28f32e90b0..a722f2bd72ab 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -18,6 +18,7 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
@@ -33,10 +34,24 @@
#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
#define HISI_SAS_RESET_BIT 0
-#define HISI_SAS_STATUS_BUF_SZ \
- (sizeof(struct hisi_sas_err_record) + 1024)
-#define HISI_SAS_COMMAND_TABLE_SZ \
- (((sizeof(union hisi_sas_command_table)+3)/4)*4)
+#define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer))
+#define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table))
+
+#define hisi_sas_status_buf_addr(buf) \
+ (buf + offsetof(struct hisi_sas_slot_buf_table, status_buffer))
+#define hisi_sas_status_buf_addr_mem(slot) hisi_sas_status_buf_addr(slot->buf)
+#define hisi_sas_status_buf_addr_dma(slot) \
+ hisi_sas_status_buf_addr(slot->buf_dma)
+
+#define hisi_sas_cmd_hdr_addr(buf) \
+ (buf + offsetof(struct hisi_sas_slot_buf_table, command_header))
+#define hisi_sas_cmd_hdr_addr_mem(slot) hisi_sas_cmd_hdr_addr(slot->buf)
+#define hisi_sas_cmd_hdr_addr_dma(slot) hisi_sas_cmd_hdr_addr(slot->buf_dma)
+
+#define hisi_sas_sge_addr(buf) \
+ (buf + offsetof(struct hisi_sas_slot_buf_table, sge_page))
+#define hisi_sas_sge_addr_mem(slot) hisi_sas_sge_addr(slot->buf)
+#define hisi_sas_sge_addr_dma(slot) hisi_sas_sge_addr(slot->buf_dma)
#define HISI_SAS_MAX_SSP_RESP_SZ (sizeof(struct ssp_frame_hdr) + 1024)
#define HISI_SAS_MAX_SMP_RESP_SZ 1028
@@ -46,6 +61,12 @@
((type == SAS_EDGE_EXPANDER_DEVICE) || \
(type == SAS_FANOUT_EXPANDER_DEVICE))
+#define HISI_SAS_SATA_PROTOCOL_NONDATA 0x1
+#define HISI_SAS_SATA_PROTOCOL_PIO 0x2
+#define HISI_SAS_SATA_PROTOCOL_DMA 0x4
+#define HISI_SAS_SATA_PROTOCOL_FPDMA 0x8
+#define HISI_SAS_SATA_PROTOCOL_ATAPI 0x10
+
struct hisi_hba;
enum {
@@ -78,11 +99,11 @@ struct hisi_sas_phy {
struct work_struct phyup_ws;
u64 port_id; /* from hw */
u64 dev_sas_addr;
- u64 phy_type;
u64 frame_rcvd_size;
u8 frame_rcvd[32];
u8 phy_attached;
u8 reserved[3];
+ u32 phy_type;
enum sas_linkrate minimum_linkrate;
enum sas_linkrate maximum_linkrate;
};
@@ -102,20 +123,23 @@ struct hisi_sas_cq {
struct hisi_sas_dq {
struct hisi_hba *hisi_hba;
+ struct hisi_sas_slot *slot_prep;
+ spinlock_t lock;
int wr_point;
int id;
};
struct hisi_sas_device {
- enum sas_device_type dev_type;
struct hisi_hba *hisi_hba;
struct domain_device *sas_device;
+ struct hisi_sas_dq *dq;
+ struct list_head list;
u64 attached_phy;
- u64 device_id;
atomic64_t running_req;
- struct list_head list;
- u8 dev_status;
+ enum sas_device_type dev_type;
+ int device_id;
int sata_idx;
+ u8 dev_status;
};
struct hisi_sas_slot {
@@ -129,14 +153,10 @@ struct hisi_sas_slot {
int cmplt_queue_slot;
int idx;
int abort;
+ void *buf;
+ dma_addr_t buf_dma;
void *cmd_hdr;
dma_addr_t cmd_hdr_dma;
- void *status_buffer;
- dma_addr_t status_buffer_dma;
- void *command_table;
- dma_addr_t command_table_dma;
- struct hisi_sas_sge_page *sge_page;
- dma_addr_t sge_page_dma;
struct work_struct abort_slot;
struct timer_list internal_abort_timer;
};
@@ -154,9 +174,8 @@ struct hisi_sas_hw {
struct domain_device *device);
struct hisi_sas_device *(*alloc_dev)(struct domain_device *device);
void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no);
- int (*get_free_slot)(struct hisi_hba *hisi_hba, u32 dev_id,
- int *q, int *s);
- void (*start_delivery)(struct hisi_hba *hisi_hba);
+ int (*get_free_slot)(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq);
+ void (*start_delivery)(struct hisi_sas_dq *dq);
int (*prep_ssp)(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot, int is_tmf,
struct hisi_sas_tmf_task *tmf);
@@ -179,6 +198,8 @@ struct hisi_sas_hw {
void (*free_device)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *dev);
int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id);
+ void (*dereg_device)(struct hisi_hba *hisi_hba,
+ struct domain_device *device);
int (*soft_reset)(struct hisi_hba *hisi_hba);
int max_command_entries;
int complete_hdr_size;
@@ -188,7 +209,10 @@ struct hisi_hba {
/* This must be the first element, used by SHOST_TO_SAS_HA */
struct sas_ha_struct *p;
- struct platform_device *pdev;
+ struct platform_device *platform_dev;
+ struct pci_dev *pci_dev;
+ struct device *dev;
+
void __iomem *regs;
struct regmap *ctrl;
u32 ctrl_reset_reg;
@@ -217,12 +241,9 @@ struct hisi_hba {
struct hisi_sas_port port[HISI_SAS_MAX_PHYS];
int queue_count;
- struct hisi_sas_slot *slot_prep;
- struct dma_pool *sge_page_pool;
+ struct dma_pool *buffer_pool;
struct hisi_sas_device devices[HISI_SAS_MAX_DEVICES];
- struct dma_pool *command_table_pool;
- struct dma_pool *status_buffer_pool;
struct hisi_sas_cmd_hdr *cmd_hdr[HISI_SAS_MAX_QUEUES];
dma_addr_t cmd_hdr_dma[HISI_SAS_MAX_QUEUES];
void *complete_hdr[HISI_SAS_MAX_QUEUES];
@@ -334,7 +355,7 @@ struct hisi_sas_command_table_stp {
#define HISI_SAS_SGE_PAGE_CNT SG_CHUNK_SIZE
struct hisi_sas_sge_page {
struct hisi_sas_sge sge[HISI_SAS_SGE_PAGE_CNT];
-};
+} __aligned(16);
struct hisi_sas_command_table_ssp {
struct ssp_frame_hdr hdr;
@@ -353,9 +374,31 @@ union hisi_sas_command_table {
struct hisi_sas_command_table_ssp ssp;
struct hisi_sas_command_table_smp smp;
struct hisi_sas_command_table_stp stp;
+} __aligned(16);
+
+struct hisi_sas_status_buffer {
+ struct hisi_sas_err_record err;
+ u8 iu[1024];
+} __aligned(16);
+
+struct hisi_sas_slot_buf_table {
+ struct hisi_sas_status_buffer status_buffer;
+ union hisi_sas_command_table command_header;
+ struct hisi_sas_sge_page sge_page;
};
+extern struct scsi_transport_template *hisi_sas_stt;
+extern struct scsi_host_template *hisi_sas_sht;
+
+extern void hisi_sas_init_add(struct hisi_hba *hisi_hba);
+extern int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost);
+extern void hisi_sas_free(struct hisi_hba *hisi_hba);
+extern u8 hisi_sas_get_ata_protocol(u8 cmd, int direction);
extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port);
+extern void hisi_sas_sata_done(struct sas_task *task,
+ struct hisi_sas_slot *slot);
+extern int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag);
+extern int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba);
extern int hisi_sas_probe(struct platform_device *pdev,
const struct hisi_sas_hw *ops);
extern int hisi_sas_remove(struct platform_device *pdev);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index d622db502ec9..4022c3f8295f 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -23,6 +23,97 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
int abort_flag, int tag);
static int hisi_sas_softreset_ata_disk(struct domain_device *device);
+u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
+{
+ switch (cmd) {
+ case ATA_CMD_FPDMA_WRITE:
+ case ATA_CMD_FPDMA_READ:
+ case ATA_CMD_FPDMA_RECV:
+ case ATA_CMD_FPDMA_SEND:
+ case ATA_CMD_NCQ_NON_DATA:
+ return HISI_SAS_SATA_PROTOCOL_FPDMA;
+
+ case ATA_CMD_DOWNLOAD_MICRO:
+ case ATA_CMD_ID_ATA:
+ case ATA_CMD_PMP_READ:
+ case ATA_CMD_READ_LOG_EXT:
+ case ATA_CMD_PIO_READ:
+ case ATA_CMD_PIO_READ_EXT:
+ case ATA_CMD_PMP_WRITE:
+ case ATA_CMD_WRITE_LOG_EXT:
+ case ATA_CMD_PIO_WRITE:
+ case ATA_CMD_PIO_WRITE_EXT:
+ return HISI_SAS_SATA_PROTOCOL_PIO;
+
+ case ATA_CMD_DSM:
+ case ATA_CMD_DOWNLOAD_MICRO_DMA:
+ case ATA_CMD_PMP_READ_DMA:
+ case ATA_CMD_PMP_WRITE_DMA:
+ case ATA_CMD_READ:
+ case ATA_CMD_READ_EXT:
+ case ATA_CMD_READ_LOG_DMA_EXT:
+ case ATA_CMD_READ_STREAM_DMA_EXT:
+ case ATA_CMD_TRUSTED_RCV_DMA:
+ case ATA_CMD_TRUSTED_SND_DMA:
+ case ATA_CMD_WRITE:
+ case ATA_CMD_WRITE_EXT:
+ case ATA_CMD_WRITE_FUA_EXT:
+ case ATA_CMD_WRITE_QUEUED:
+ case ATA_CMD_WRITE_LOG_DMA_EXT:
+ case ATA_CMD_WRITE_STREAM_DMA_EXT:
+ return HISI_SAS_SATA_PROTOCOL_DMA;
+
+ case ATA_CMD_CHK_POWER:
+ case ATA_CMD_DEV_RESET:
+ case ATA_CMD_EDD:
+ case ATA_CMD_FLUSH:
+ case ATA_CMD_FLUSH_EXT:
+ case ATA_CMD_VERIFY:
+ case ATA_CMD_VERIFY_EXT:
+ case ATA_CMD_SET_FEATURES:
+ case ATA_CMD_STANDBY:
+ case ATA_CMD_STANDBYNOW1:
+ return HISI_SAS_SATA_PROTOCOL_NONDATA;
+ default:
+ if (direction == DMA_NONE)
+ return HISI_SAS_SATA_PROTOCOL_NONDATA;
+ return HISI_SAS_SATA_PROTOCOL_PIO;
+ }
+}
+EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
+
+void hisi_sas_sata_done(struct sas_task *task,
+ struct hisi_sas_slot *slot)
+{
+ struct task_status_struct *ts = &task->task_status;
+ struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
+ struct hisi_sas_status_buffer *status_buf =
+ hisi_sas_status_buf_addr_mem(slot);
+ u8 *iu = &status_buf->iu[0];
+ struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
+
+ resp->frame_len = sizeof(struct dev_to_host_fis);
+ memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
+
+ ts->buf_valid_size = sizeof(*resp);
+}
+EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
+
+int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
+{
+ struct ata_queued_cmd *qc = task->uldd_task;
+
+ if (qc) {
+ if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
+ qc->tf.command == ATA_CMD_FPDMA_READ) {
+ *tag = qc->tag;
+ return 1;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
+
static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
{
return device->port->ha->lldd_ha;
@@ -79,7 +170,7 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
{
if (task) {
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
struct domain_device *device = task->dev;
struct hisi_sas_device *sas_dev = device->lldd_dev;
@@ -94,17 +185,9 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
atomic64_dec(&sas_dev->running_req);
}
- if (slot->command_table)
- dma_pool_free(hisi_hba->command_table_pool,
- slot->command_table, slot->command_table_dma);
-
- if (slot->status_buffer)
- dma_pool_free(hisi_hba->status_buffer_pool,
- slot->status_buffer, slot->status_buffer_dma);
+ if (slot->buf)
+ dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
- if (slot->sge_page)
- dma_pool_free(hisi_hba->sge_page_pool, slot->sge_page,
- slot->sge_page_dma);
list_del_init(&slot->entry);
slot->task = NULL;
@@ -156,7 +239,7 @@ static void hisi_sas_slot_abort(struct work_struct *work)
struct scsi_cmnd *cmnd = task->uldd_task;
struct hisi_sas_tmf_task tmf_task;
struct scsi_lun lun;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
int tag = abort_slot->idx;
unsigned long flags;
@@ -179,17 +262,18 @@ out:
task->task_done(task);
}
-static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
- int is_tmf, struct hisi_sas_tmf_task *tmf,
- int *pass)
+static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
+ *dq, int is_tmf, struct hisi_sas_tmf_task *tmf,
+ int *pass)
{
+ struct hisi_hba *hisi_hba = dq->hisi_hba;
struct domain_device *device = task->dev;
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_sas_port *port;
struct hisi_sas_slot *slot;
struct hisi_sas_cmd_hdr *cmd_hdr_base;
struct asd_sas_port *sas_port = device->port;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
unsigned long flags;
@@ -209,7 +293,7 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
if (DEV_IS_GONE(sas_dev)) {
if (sas_dev)
- dev_info(dev, "task prep: device %llu not ready\n",
+ dev_info(dev, "task prep: device %d not ready\n",
sas_dev->device_id);
else
dev_info(dev, "task prep: device %016llx not ready\n",
@@ -240,18 +324,24 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
} else
n_elem = task->num_scatter;
+ spin_lock_irqsave(&hisi_hba->lock, flags);
if (hisi_hba->hw->slot_index_alloc)
rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
device);
else
rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
- if (rc)
+ if (rc) {
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
goto err_out;
- rc = hisi_hba->hw->get_free_slot(hisi_hba, sas_dev->device_id,
- &dlvry_queue, &dlvry_queue_slot);
+ }
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+
+ rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
if (rc)
goto err_out_tag;
+ dlvry_queue = dq->id;
+ dlvry_queue_slot = dq->wr_point;
slot = &hisi_hba->slot_info[slot_idx];
memset(slot, 0, sizeof(struct hisi_sas_slot));
@@ -266,24 +356,15 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
task->lldd_task = slot;
INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
- slot->status_buffer = dma_pool_alloc(hisi_hba->status_buffer_pool,
- GFP_ATOMIC,
- &slot->status_buffer_dma);
- if (!slot->status_buffer) {
+ slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
+ GFP_ATOMIC, &slot->buf_dma);
+ if (!slot->buf) {
rc = -ENOMEM;
goto err_out_slot_buf;
}
- memset(slot->status_buffer, 0, HISI_SAS_STATUS_BUF_SZ);
-
- slot->command_table = dma_pool_alloc(hisi_hba->command_table_pool,
- GFP_ATOMIC,
- &slot->command_table_dma);
- if (!slot->command_table) {
- rc = -ENOMEM;
- goto err_out_status_buf;
- }
- memset(slot->command_table, 0, HISI_SAS_COMMAND_TABLE_SZ);
memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
+ memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
+ memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
switch (task->task_proto) {
case SAS_PROTOCOL_SMP:
@@ -306,9 +387,7 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
if (rc) {
dev_err(dev, "task prep: rc = 0x%x\n", rc);
- if (slot->sge_page)
- goto err_out_sge;
- goto err_out_command_table;
+ goto err_out_buf;
}
list_add_tail(&slot->entry, &sas_dev->list);
@@ -316,26 +395,22 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
- hisi_hba->slot_prep = slot;
+ dq->slot_prep = slot;
atomic64_inc(&sas_dev->running_req);
++(*pass);
return 0;
-err_out_sge:
- dma_pool_free(hisi_hba->sge_page_pool, slot->sge_page,
- slot->sge_page_dma);
-err_out_command_table:
- dma_pool_free(hisi_hba->command_table_pool, slot->command_table,
- slot->command_table_dma);
-err_out_status_buf:
- dma_pool_free(hisi_hba->status_buffer_pool, slot->status_buffer,
- slot->status_buffer_dma);
+err_out_buf:
+ dma_pool_free(hisi_hba->buffer_pool, slot->buf,
+ slot->buf_dma);
err_out_slot_buf:
/* Nothing to be done */
err_out_tag:
+ spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot_idx);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
err_out:
dev_err(dev, "task prep: failed[%d]!\n", rc);
if (!sas_protocol_ata(task->task_proto))
@@ -353,20 +428,23 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
u32 pass = 0;
unsigned long flags;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
+ struct domain_device *device = task->dev;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ struct hisi_sas_dq *dq = sas_dev->dq;
if (unlikely(test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)))
return -EINVAL;
/* protect task_prep and start_delivery sequence */
- spin_lock_irqsave(&hisi_hba->lock, flags);
- rc = hisi_sas_task_prep(task, hisi_hba, is_tmf, tmf, &pass);
+ spin_lock_irqsave(&dq->lock, flags);
+ rc = hisi_sas_task_prep(task, dq, is_tmf, tmf, &pass);
if (rc)
dev_err(dev, "task exec: failed[%d]!\n", rc);
if (likely(pass))
- hisi_hba->hw->start_delivery(hisi_hba);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ hisi_hba->hw->start_delivery(dq);
+ spin_unlock_irqrestore(&dq->lock, flags);
return rc;
}
@@ -421,12 +499,16 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
spin_lock(&hisi_hba->lock);
for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
+ int queue = i % hisi_hba->queue_count;
+ struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
+
hisi_hba->devices[i].device_id = i;
sas_dev = &hisi_hba->devices[i];
sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
sas_dev->dev_type = device->dev_type;
sas_dev->hisi_hba = hisi_hba;
sas_dev->sas_device = device;
+ sas_dev->dq = dq;
INIT_LIST_HEAD(&hisi_hba->devices[i].list);
break;
}
@@ -441,7 +523,7 @@ static int hisi_sas_dev_found(struct domain_device *device)
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct domain_device *parent_dev = device->parent;
struct hisi_sas_device *sas_dev;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
if (hisi_hba->hw->alloc_dev)
sas_dev = hisi_hba->hw->alloc_dev(device);
@@ -622,19 +704,28 @@ static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
}
}
+static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
+ struct domain_device *device)
+{
+ if (hisi_hba->hw->dereg_device)
+ hisi_hba->hw->dereg_device(hisi_hba, device);
+}
+
static void hisi_sas_dev_gone(struct domain_device *device)
{
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
- struct device *dev = &hisi_hba->pdev->dev;
- u64 dev_id = sas_dev->device_id;
+ struct device *dev = hisi_hba->dev;
+ int dev_id = sas_dev->device_id;
- dev_info(dev, "found dev[%lld:%x] is gone\n",
+ dev_info(dev, "found dev[%d:%x] is gone\n",
sas_dev->device_id, sas_dev->dev_type);
hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV, 0);
+ hisi_sas_dereg_device(hisi_hba, device);
+
hisi_hba->hw->free_device(hisi_hba, sas_dev);
device->lldd_dev = NULL;
memset(sas_dev, 0, sizeof(*sas_dev));
@@ -691,8 +782,13 @@ static void hisi_sas_task_done(struct sas_task *task)
static void hisi_sas_tmf_timedout(unsigned long data)
{
struct sas_task *task = (struct sas_task *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
- task->task_state_flags |= SAS_TASK_STATE_ABORTED;
complete(&task->slow_task->completion);
}
@@ -704,7 +800,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
{
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
struct sas_task *task;
int res, retry;
@@ -821,7 +917,7 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device)
struct ata_link *link;
int rc = TMF_RESP_FUNC_FAILED;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
int s = sizeof(struct host_to_dev_fis);
unsigned long flags;
@@ -879,7 +975,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
return -1;
if (!test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
struct sas_ha_struct *sas_ha = &hisi_hba->sha;
unsigned long flags;
@@ -912,7 +1008,7 @@ static int hisi_sas_abort_task(struct sas_task *task)
struct domain_device *device = task->dev;
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
int rc = TMF_RESP_FUNC_FAILED;
unsigned long flags;
@@ -961,9 +1057,10 @@ static int hisi_sas_abort_task(struct sas_task *task)
if (task->dev->dev_type == SAS_SATA_DEV) {
hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_DEV, 0);
+ hisi_sas_dereg_device(hisi_hba, device);
rc = hisi_sas_softreset_ata_disk(device);
}
- } else if (task->task_proto & SAS_PROTOCOL_SMP) {
+ } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
/* SMP */
struct hisi_sas_slot *slot = task->lldd_task;
u32 tag = slot->idx;
@@ -1027,6 +1124,10 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
return TMF_RESP_FUNC_FAILED;
sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
+ hisi_sas_internal_task_abort(hisi_hba, device,
+ HISI_SAS_INT_ABT_DEV, 0);
+ hisi_sas_dereg_device(hisi_hba, device);
+
rc = hisi_sas_debug_I_T_nexus_reset(device);
if (rc == TMF_RESP_FUNC_COMPLETE) {
@@ -1041,7 +1142,7 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
{
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
unsigned long flags;
int rc = TMF_RESP_FUNC_FAILED;
@@ -1054,6 +1155,7 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
HISI_SAS_INT_ABT_DEV, 0);
if (rc == TMF_RESP_FUNC_FAILED)
goto out;
+ hisi_sas_dereg_device(hisi_hba, device);
phy = sas_get_local_phy(device);
@@ -1077,7 +1179,7 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
}
out:
if (rc != TMF_RESP_FUNC_COMPLETE)
- dev_err(dev, "lu_reset: for device[%llx]:rc= %d\n",
+ dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
sas_dev->device_id, rc);
return rc;
}
@@ -1124,19 +1226,20 @@ static int hisi_sas_query_task(struct sas_task *task)
}
static int
-hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
+hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
struct sas_task *task, int abort_flag,
int task_tag)
{
struct domain_device *device = task->dev;
struct hisi_sas_device *sas_dev = device->lldd_dev;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
struct hisi_sas_port *port;
struct hisi_sas_slot *slot;
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_cmd_hdr *cmd_hdr_base;
+ struct hisi_sas_dq *dq = sas_dev->dq;
int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
- unsigned long flags;
+ unsigned long flags, flags_dq;
if (unlikely(test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)))
return -EINVAL;
@@ -1147,14 +1250,22 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
port = to_hisi_sas_port(sas_port);
/* simply get a slot and send abort command */
+ spin_lock_irqsave(&hisi_hba->lock, flags);
rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
- if (rc)
+ if (rc) {
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
goto err_out;
- rc = hisi_hba->hw->get_free_slot(hisi_hba, sas_dev->device_id,
- &dlvry_queue, &dlvry_queue_slot);
+ }
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+
+ spin_lock_irqsave(&dq->lock, flags_dq);
+ rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
if (rc)
goto err_out_tag;
+ dlvry_queue = dq->id;
+ dlvry_queue_slot = dq->wr_point;
+
slot = &hisi_hba->slot_info[slot_idx];
memset(slot, 0, sizeof(struct hisi_sas_slot));
@@ -1181,17 +1292,21 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
- hisi_hba->slot_prep = slot;
+ dq->slot_prep = slot;
atomic64_inc(&sas_dev->running_req);
- /* send abort command to our chip */
- hisi_hba->hw->start_delivery(hisi_hba);
+ /* send abort command to the chip */
+ hisi_hba->hw->start_delivery(dq);
+ spin_unlock_irqrestore(&dq->lock, flags_dq);
return 0;
err_out_tag:
+ spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot_idx);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ spin_unlock_irqrestore(&dq->lock, flags_dq);
err_out:
dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
@@ -1214,9 +1329,8 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
{
struct sas_task *task;
struct hisi_sas_device *sas_dev = device->lldd_dev;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
int res;
- unsigned long flags;
if (!hisi_hba->hw->prep_abort)
return -EOPNOTSUPP;
@@ -1233,11 +1347,8 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
task->slow_task->timer.expires = jiffies + msecs_to_jiffies(110);
add_timer(&task->slow_task->timer);
- /* Lock as we are alloc'ing a slot, which cannot be interrupted */
- spin_lock_irqsave(&hisi_hba->lock, flags);
res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
task, abort_flag, tag);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
if (res) {
del_timer(&task->slow_task->timer);
dev_err(dev, "internal task abort: executing internal task failed: %d\n",
@@ -1247,6 +1358,17 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
wait_for_completion(&task->slow_task->completion);
res = TMF_RESP_FUNC_FAILED;
+ /* Internal abort timed out */
+ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+ struct hisi_sas_slot *slot = task->lldd_task;
+
+ if (slot)
+ slot->task = NULL;
+ dev_err(dev, "internal task abort: timeout.\n");
+ }
+ }
+
if (task->task_status.resp == SAS_TASK_COMPLETE &&
task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
res = TMF_RESP_FUNC_COMPLETE;
@@ -1259,13 +1381,6 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
goto exit;
}
- /* Internal abort timed out */
- if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
- dev_err(dev, "internal task abort: timeout.\n");
- }
- }
-
exit:
dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
"resp: 0x%x sts 0x%x\n",
@@ -1353,9 +1468,10 @@ void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
}
EXPORT_SYMBOL_GPL(hisi_sas_rescan_topology);
-static struct scsi_transport_template *hisi_sas_stt;
+struct scsi_transport_template *hisi_sas_stt;
+EXPORT_SYMBOL_GPL(hisi_sas_stt);
-static struct scsi_host_template hisi_sas_sht = {
+static struct scsi_host_template _hisi_sas_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.queuecommand = sas_queuecommand,
@@ -1375,6 +1491,8 @@ static struct scsi_host_template hisi_sas_sht = {
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
};
+struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
+EXPORT_SYMBOL_GPL(hisi_sas_sht);
static struct sas_domain_function_template hisi_sas_transport_ops = {
.lldd_dev_found = hisi_sas_dev_found,
@@ -1422,10 +1540,9 @@ void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
}
EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
-static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
+int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
{
- struct platform_device *pdev = hisi_hba->pdev;
- struct device *dev = &pdev->dev;
+ struct device *dev = hisi_hba->dev;
int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
spin_lock_init(&hisi_hba->lock);
@@ -1468,16 +1585,9 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
goto err_out;
}
- s = HISI_SAS_STATUS_BUF_SZ;
- hisi_hba->status_buffer_pool = dma_pool_create("status_buffer",
- dev, s, 16, 0);
- if (!hisi_hba->status_buffer_pool)
- goto err_out;
-
- s = HISI_SAS_COMMAND_TABLE_SZ;
- hisi_hba->command_table_pool = dma_pool_create("command_table",
- dev, s, 16, 0);
- if (!hisi_hba->command_table_pool)
+ s = sizeof(struct hisi_sas_slot_buf_table);
+ hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
+ if (!hisi_hba->buffer_pool)
goto err_out;
s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
@@ -1512,11 +1622,6 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
if (!hisi_hba->slot_index_tags)
goto err_out;
- hisi_hba->sge_page_pool = dma_pool_create("status_sge", dev,
- sizeof(struct hisi_sas_sge_page), 16, 0);
- if (!hisi_hba->sge_page_pool)
- goto err_out;
-
s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
&hisi_hba->initial_fis_dma, GFP_KERNEL);
@@ -1542,10 +1647,11 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
err_out:
return -ENOMEM;
}
+EXPORT_SYMBOL_GPL(hisi_sas_alloc);
-static void hisi_sas_free(struct hisi_hba *hisi_hba)
+void hisi_sas_free(struct hisi_hba *hisi_hba)
{
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
for (i = 0; i < hisi_hba->queue_count; i++) {
@@ -1562,9 +1668,7 @@ static void hisi_sas_free(struct hisi_hba *hisi_hba)
hisi_hba->complete_hdr_dma[i]);
}
- dma_pool_destroy(hisi_hba->status_buffer_pool);
- dma_pool_destroy(hisi_hba->command_table_pool);
- dma_pool_destroy(hisi_hba->sge_page_pool);
+ dma_pool_destroy(hisi_hba->buffer_pool);
s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
if (hisi_hba->itct)
@@ -1598,6 +1702,7 @@ static void hisi_sas_free(struct hisi_hba *hisi_hba)
if (hisi_hba->wq)
destroy_workqueue(hisi_hba->wq);
}
+EXPORT_SYMBOL_GPL(hisi_sas_free);
static void hisi_sas_rst_work_handler(struct work_struct *work)
{
@@ -1607,65 +1712,99 @@ static void hisi_sas_rst_work_handler(struct work_struct *work)
hisi_sas_controller_reset(hisi_hba);
}
-static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
- const struct hisi_sas_hw *hw)
+int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
{
- struct resource *res;
- struct Scsi_Host *shost;
- struct hisi_hba *hisi_hba;
- struct device *dev = &pdev->dev;
- struct device_node *np = pdev->dev.of_node;
+ struct device *dev = hisi_hba->dev;
+ struct platform_device *pdev = hisi_hba->platform_dev;
+ struct device_node *np = pdev ? pdev->dev.of_node : NULL;
struct clk *refclk;
- shost = scsi_host_alloc(&hisi_sas_sht, sizeof(*hisi_hba));
- if (!shost) {
- dev_err(dev, "scsi host alloc failed\n");
- return NULL;
- }
- hisi_hba = shost_priv(shost);
-
- INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
- hisi_hba->hw = hw;
- hisi_hba->pdev = pdev;
- hisi_hba->shost = shost;
- SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
-
- init_timer(&hisi_hba->timer);
-
if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
- SAS_ADDR_SIZE))
- goto err_out;
+ SAS_ADDR_SIZE)) {
+ dev_err(dev, "could not get property sas-addr\n");
+ return -ENOENT;
+ }
if (np) {
+ /*
+ * These properties are only required for platform device-based
+ * controller with DT firmware.
+ */
hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
"hisilicon,sas-syscon");
- if (IS_ERR(hisi_hba->ctrl))
- goto err_out;
+ if (IS_ERR(hisi_hba->ctrl)) {
+ dev_err(dev, "could not get syscon\n");
+ return -ENOENT;
+ }
if (device_property_read_u32(dev, "ctrl-reset-reg",
- &hisi_hba->ctrl_reset_reg))
- goto err_out;
+ &hisi_hba->ctrl_reset_reg)) {
+ dev_err(dev,
+ "could not get property ctrl-reset-reg\n");
+ return -ENOENT;
+ }
if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
- &hisi_hba->ctrl_reset_sts_reg))
- goto err_out;
+ &hisi_hba->ctrl_reset_sts_reg)) {
+ dev_err(dev,
+ "could not get property ctrl-reset-sts-reg\n");
+ return -ENOENT;
+ }
if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
- &hisi_hba->ctrl_clock_ena_reg))
- goto err_out;
+ &hisi_hba->ctrl_clock_ena_reg)) {
+ dev_err(dev,
+ "could not get property ctrl-clock-ena-reg\n");
+ return -ENOENT;
+ }
}
- refclk = devm_clk_get(&pdev->dev, NULL);
+ refclk = devm_clk_get(dev, NULL);
if (IS_ERR(refclk))
dev_dbg(dev, "no ref clk property\n");
else
hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
- if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy))
- goto err_out;
+ if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
+ dev_err(dev, "could not get property phy-count\n");
+ return -ENOENT;
+ }
if (device_property_read_u32(dev, "queue-count",
- &hisi_hba->queue_count))
+ &hisi_hba->queue_count)) {
+ dev_err(dev, "could not get property queue-count\n");
+ return -ENOENT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
+
+static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
+ const struct hisi_sas_hw *hw)
+{
+ struct resource *res;
+ struct Scsi_Host *shost;
+ struct hisi_hba *hisi_hba;
+ struct device *dev = &pdev->dev;
+
+ shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
+ if (!shost) {
+ dev_err(dev, "scsi host alloc failed\n");
+ return NULL;
+ }
+ hisi_hba = shost_priv(shost);
+
+ INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
+ hisi_hba->hw = hw;
+ hisi_hba->dev = dev;
+ hisi_hba->platform_dev = pdev;
+ hisi_hba->shost = shost;
+ SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
+
+ init_timer(&hisi_hba->timer);
+
+ if (hisi_sas_get_fw_info(hisi_hba) < 0)
goto err_out;
if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
@@ -1691,7 +1830,7 @@ err_out:
return NULL;
}
-static void hisi_sas_init_add(struct hisi_hba *hisi_hba)
+void hisi_sas_init_add(struct hisi_hba *hisi_hba)
{
int i;
@@ -1700,6 +1839,7 @@ static void hisi_sas_init_add(struct hisi_hba *hisi_hba)
hisi_hba->sas_addr,
SAS_ADDR_SIZE);
}
+EXPORT_SYMBOL_GPL(hisi_sas_init_add);
int hisi_sas_probe(struct platform_device *pdev,
const struct hisi_sas_hw *hw)
@@ -1743,7 +1883,7 @@ int hisi_sas_probe(struct platform_device *pdev,
shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
sha->sas_ha_name = DRV_NAME;
- sha->dev = &hisi_hba->pdev->dev;
+ sha->dev = hisi_hba->dev;
sha->lldd_module = THIS_MODULE;
sha->sas_addr = &hisi_hba->sas_addr[0];
sha->num_phys = hisi_hba->n_phy;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index fc1c1b2c1a19..08eca20b0b81 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -505,7 +505,7 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
struct domain_device *device = sas_dev->sas_device;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
u64 qw0, device_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
struct asd_sas_port *sas_port = device->port;
@@ -571,7 +571,7 @@ static int reset_hw_v1_hw(struct hisi_hba *hisi_hba)
int i;
unsigned long end_time;
u32 val;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
for (i = 0; i < hisi_hba->n_phy; i++) {
u32 phy_ctrl = hisi_sas_phy_read32(hisi_hba, i, PHY_CTRL);
@@ -756,7 +756,7 @@ static void init_reg_v1_hw(struct hisi_hba *hisi_hba)
static int hw_init_v1_hw(struct hisi_hba *hisi_hba)
{
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
int rc;
rc = reset_hw_v1_hw(hisi_hba);
@@ -900,22 +900,17 @@ static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id)
return bitmap;
}
-/**
- * This function allocates across all queues to load balance.
- * Slots are allocated from queues in a round-robin fashion.
- *
+/*
* The callpath to this function and upto writing the write
* queue pointer should be safe from interruption.
*/
-static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, u32 dev_id,
- int *q, int *s)
+static int
+get_free_slot_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
{
- struct device *dev = &hisi_hba->pdev->dev;
- struct hisi_sas_dq *dq;
+ struct device *dev = hisi_hba->dev;
+ int queue = dq->id;
u32 r, w;
- int queue = dev_id % hisi_hba->queue_count;
- dq = &hisi_hba->dq[queue];
w = dq->wr_point;
r = hisi_sas_read32_relaxed(hisi_hba,
DLVRY_Q_0_RD_PTR + (queue * 0x14));
@@ -924,16 +919,14 @@ static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, u32 dev_id,
return -EAGAIN;
}
- *q = queue;
- *s = w;
return 0;
}
-static void start_delivery_v1_hw(struct hisi_hba *hisi_hba)
+static void start_delivery_v1_hw(struct hisi_sas_dq *dq)
{
- int dlvry_queue = hisi_hba->slot_prep->dlvry_queue;
- int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot;
- struct hisi_sas_dq *dq = &hisi_hba->dq[dlvry_queue];
+ struct hisi_hba *hisi_hba = dq->hisi_hba;
+ int dlvry_queue = dq->slot_prep->dlvry_queue;
+ int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot;
dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
@@ -946,7 +939,8 @@ static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba,
struct scatterlist *scatter,
int n_elem)
{
- struct device *dev = &hisi_hba->pdev->dev;
+ struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot);
+ struct device *dev = hisi_hba->dev;
struct scatterlist *sg;
int i;
@@ -956,13 +950,8 @@ static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba,
return -EINVAL;
}
- slot->sge_page = dma_pool_alloc(hisi_hba->sge_page_pool, GFP_ATOMIC,
- &slot->sge_page_dma);
- if (!slot->sge_page)
- return -ENOMEM;
-
for_each_sg(scatter, sg, n_elem, i) {
- struct hisi_sas_sge *entry = &slot->sge_page->sge[i];
+ struct hisi_sas_sge *entry = &sge_page->sge[i];
entry->addr = cpu_to_le64(sg_dma_address(sg));
entry->page_ctrl_0 = entry->page_ctrl_1 = 0;
@@ -970,7 +959,7 @@ static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba,
entry->data_off = 0;
}
- hdr->prd_table_addr = cpu_to_le64(slot->sge_page_dma);
+ hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot));
hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
@@ -983,7 +972,7 @@ static int prep_smp_v1_hw(struct hisi_hba *hisi_hba,
struct sas_task *task = slot->task;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct domain_device *device = task->dev;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
struct hisi_sas_port *port = slot->port;
struct scatterlist *sg_req, *sg_resp;
struct hisi_sas_device *sas_dev = device->lldd_dev;
@@ -1033,7 +1022,7 @@ static int prep_smp_v1_hw(struct hisi_hba *hisi_hba,
hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF);
hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
- hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
+ hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
return 0;
@@ -1114,10 +1103,11 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
}
hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
- hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma);
- hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
+ hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
+ hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
- buf_cmd = slot->command_table + sizeof(struct ssp_frame_hdr);
+ buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) +
+ sizeof(struct ssp_frame_hdr);
if (task->ssp_task.enable_first_burst) {
fburst = (1 << 7);
dw2 |= 1 << CMD_HDR_FIRST_BURST_OFF;
@@ -1154,8 +1144,9 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot)
{
struct task_status_struct *ts = &task->task_status;
- struct hisi_sas_err_record_v1 *err_record = slot->status_buffer;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct hisi_sas_err_record_v1 *err_record =
+ hisi_sas_status_buf_addr_mem(slot);
+ struct device *dev = hisi_hba->dev;
switch (task->task_proto) {
case SAS_PROTOCOL_SSP:
@@ -1281,7 +1272,7 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
{
struct sas_task *task = slot->task;
struct hisi_sas_device *sas_dev;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
struct task_status_struct *ts;
struct domain_device *device;
enum exec_status sts;
@@ -1371,8 +1362,11 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
switch (task->task_proto) {
case SAS_PROTOCOL_SSP:
{
- struct ssp_response_iu *iu = slot->status_buffer +
- sizeof(struct hisi_sas_err_record);
+ struct hisi_sas_status_buffer *status_buffer =
+ hisi_sas_status_buf_addr_mem(slot);
+ struct ssp_response_iu *iu = (struct ssp_response_iu *)
+ &status_buffer->iu[0];
+
sas_ssp_task_response(dev, task, iu);
break;
}
@@ -1389,7 +1383,7 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
DMA_TO_DEVICE);
memcpy(to + sg_resp->offset,
- slot->status_buffer +
+ hisi_sas_status_buf_addr_mem(slot) +
sizeof(struct hisi_sas_err_record),
sg_dma_len(sg_resp));
kunmap_atomic(to);
@@ -1430,7 +1424,7 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p)
{
struct hisi_sas_phy *phy = p;
struct hisi_hba *hisi_hba = phy->hisi_hba;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
struct asd_sas_phy *sas_phy = &phy->sas_phy;
int i, phy_no = sas_phy->id;
u32 irq_value, context, port_id, link_rate;
@@ -1511,7 +1505,7 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p)
struct hisi_hba *hisi_hba = phy->hisi_hba;
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct sas_ha_struct *sha = &hisi_hba->sha;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
int phy_no = sas_phy->id;
u32 irq_value;
irqreturn_t res = IRQ_HANDLED;
@@ -1538,7 +1532,7 @@ static irqreturn_t int_abnormal_v1_hw(int irq, void *p)
{
struct hisi_sas_phy *phy = p;
struct hisi_hba *hisi_hba = phy->hisi_hba;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
struct asd_sas_phy *sas_phy = &phy->sas_phy;
u32 irq_value, irq_mask_old;
int phy_no = sas_phy->id;
@@ -1641,7 +1635,7 @@ static irqreturn_t cq_interrupt_v1_hw(int irq, void *p)
static irqreturn_t fatal_ecc_int_v1_hw(int irq, void *p)
{
struct hisi_hba *hisi_hba = p;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
u32 ecc_int = hisi_sas_read32(hisi_hba, SAS_ECC_INTR);
if (ecc_int & SAS_ECC_INTR_DQ_ECC1B_MSK) {
@@ -1700,7 +1694,7 @@ static irqreturn_t fatal_ecc_int_v1_hw(int irq, void *p)
static irqreturn_t fatal_axi_int_v1_hw(int irq, void *p)
{
struct hisi_hba *hisi_hba = p;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
u32 axi_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC2);
u32 axi_info = hisi_sas_read32(hisi_hba, HGC_AXI_FIFO_ERR_INFO);
@@ -1738,7 +1732,7 @@ static irq_handler_t fatal_interrupts[HISI_SAS_MAX_QUEUES] = {
static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
{
- struct platform_device *pdev = hisi_hba->pdev;
+ struct platform_device *pdev = hisi_hba->platform_dev;
struct device *dev = &pdev->dev;
int i, j, irq, rc, idx;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index e241921bee10..551d103c27f1 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -554,12 +554,6 @@ enum {
#define DIR_TO_DEVICE 2
#define DIR_RESERVED 3
-#define SATA_PROTOCOL_NONDATA 0x1
-#define SATA_PROTOCOL_PIO 0x2
-#define SATA_PROTOCOL_DMA 0x4
-#define SATA_PROTOCOL_FPDMA 0x8
-#define SATA_PROTOCOL_ATAPI 0x10
-
#define ERR_ON_TX_PHASE(err_phase) (err_phase == 0x2 || \
err_phase == 0x4 || err_phase == 0x8 ||\
err_phase == 0x6 || err_phase == 0xa)
@@ -659,7 +653,7 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
static bool sata_index_alloc_v2_hw(struct hisi_hba *hisi_hba, int *idx)
{
unsigned int index;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
void *bitmap = hisi_hba->sata_dev_bitmap;
index = find_first_zero_bit(bitmap, HISI_MAX_SATA_SUPPORT_V2_HW);
@@ -695,6 +689,9 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device)
if (sata_dev && (i & 1))
continue;
if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
+ int queue = i % hisi_hba->queue_count;
+ struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
+
hisi_hba->devices[i].device_id = i;
sas_dev = &hisi_hba->devices[i];
sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
@@ -702,6 +699,7 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device)
sas_dev->hisi_hba = hisi_hba;
sas_dev->sas_device = device;
sas_dev->sata_idx = sata_idx;
+ sas_dev->dq = dq;
INIT_LIST_HEAD(&hisi_hba->devices[i].list);
break;
}
@@ -756,7 +754,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
struct domain_device *device = sas_dev->sas_device;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
u64 qw0, device_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
struct domain_device *parent_dev = device->parent;
@@ -809,7 +807,7 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_device *sas_dev)
{
u64 dev_id = sas_dev->device_id;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
int i;
@@ -853,7 +851,7 @@ static int reset_hw_v2_hw(struct hisi_hba *hisi_hba)
int i, reset_val;
u32 val;
unsigned long end_time;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
/* The mask needs to be set depending on the number of phys */
if (hisi_hba->n_phy == 9)
@@ -989,7 +987,7 @@ static void phys_try_accept_stp_links_v2_hw(struct hisi_hba *hisi_hba)
static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
{
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
int i;
/* Global registers init */
@@ -1170,7 +1168,7 @@ static void set_link_timer_quirk(struct hisi_hba *hisi_hba)
static int hw_init_v2_hw(struct hisi_hba *hisi_hba)
{
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
int rc;
rc = reset_hw_v2_hw(hisi_hba);
@@ -1219,7 +1217,7 @@ static bool tx_fifo_is_empty_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
static bool axi_bus_is_idle_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
{
int i, max_loop = 1000;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
u32 status, axi_status, dfx_val, dfx_tx_val;
for (i = 0; i < max_loop; i++) {
@@ -1245,7 +1243,7 @@ static bool axi_bus_is_idle_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
static bool wait_io_done_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
{
int i, max_loop = 1000;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
u32 status, tx_dfx0;
for (i = 0; i < max_loop; i++) {
@@ -1283,7 +1281,7 @@ static bool allowed_disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
static void disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
{
u32 cfg, axi_val, dfx0_val, txid_auto;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
/* Close axi bus. */
axi_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE +
@@ -1454,22 +1452,17 @@ static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
return bitmap;
}
-/**
- * This function allocates across all queues to load balance.
- * Slots are allocated from queues in a round-robin fashion.
- *
+/*
* The callpath to this function and upto writing the write
* queue pointer should be safe from interruption.
*/
-static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, u32 dev_id,
- int *q, int *s)
+static int
+get_free_slot_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
{
- struct device *dev = &hisi_hba->pdev->dev;
- struct hisi_sas_dq *dq;
+ struct device *dev = hisi_hba->dev;
+ int queue = dq->id;
u32 r, w;
- int queue = dev_id % hisi_hba->queue_count;
- dq = &hisi_hba->dq[queue];
w = dq->wr_point;
r = hisi_sas_read32_relaxed(hisi_hba,
DLVRY_Q_0_RD_PTR + (queue * 0x14));
@@ -1479,16 +1472,14 @@ static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, u32 dev_id,
return -EAGAIN;
}
- *q = queue;
- *s = w;
return 0;
}
-static void start_delivery_v2_hw(struct hisi_hba *hisi_hba)
+static void start_delivery_v2_hw(struct hisi_sas_dq *dq)
{
- int dlvry_queue = hisi_hba->slot_prep->dlvry_queue;
- int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot;
- struct hisi_sas_dq *dq = &hisi_hba->dq[dlvry_queue];
+ struct hisi_hba *hisi_hba = dq->hisi_hba;
+ int dlvry_queue = dq->slot_prep->dlvry_queue;
+ int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot;
dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
@@ -1501,7 +1492,8 @@ static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,
struct scatterlist *scatter,
int n_elem)
{
- struct device *dev = &hisi_hba->pdev->dev;
+ struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot);
+ struct device *dev = hisi_hba->dev;
struct scatterlist *sg;
int i;
@@ -1511,13 +1503,8 @@ static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,
return -EINVAL;
}
- slot->sge_page = dma_pool_alloc(hisi_hba->sge_page_pool, GFP_ATOMIC,
- &slot->sge_page_dma);
- if (!slot->sge_page)
- return -ENOMEM;
-
for_each_sg(scatter, sg, n_elem, i) {
- struct hisi_sas_sge *entry = &slot->sge_page->sge[i];
+ struct hisi_sas_sge *entry = &sge_page->sge[i];
entry->addr = cpu_to_le64(sg_dma_address(sg));
entry->page_ctrl_0 = entry->page_ctrl_1 = 0;
@@ -1525,7 +1512,7 @@ static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,
entry->data_off = 0;
}
- hdr->prd_table_addr = cpu_to_le64(slot->sge_page_dma);
+ hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot));
hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
@@ -1538,7 +1525,7 @@ static int prep_smp_v2_hw(struct hisi_hba *hisi_hba,
struct sas_task *task = slot->task;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct domain_device *device = task->dev;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
struct hisi_sas_port *port = slot->port;
struct scatterlist *sg_req, *sg_resp;
struct hisi_sas_device *sas_dev = device->lldd_dev;
@@ -1589,7 +1576,7 @@ static int prep_smp_v2_hw(struct hisi_hba *hisi_hba,
hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF);
hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
- hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
+ hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
return 0;
@@ -1663,10 +1650,11 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
}
hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
- hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma);
- hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
+ hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
+ hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
- buf_cmd = slot->command_table + sizeof(struct ssp_frame_hdr);
+ buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) +
+ sizeof(struct ssp_frame_hdr);
memcpy(buf_cmd, &task->ssp_task.LUN, 8);
if (!is_tmf) {
@@ -1692,20 +1680,6 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
return 0;
}
-static void sata_done_v2_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
- struct hisi_sas_slot *slot)
-{
- struct task_status_struct *ts = &task->task_status;
- struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
- struct dev_to_host_fis *d2h = slot->status_buffer +
- sizeof(struct hisi_sas_err_record);
-
- resp->frame_len = sizeof(struct dev_to_host_fis);
- memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
-
- ts->buf_valid_size = sizeof(*resp);
-}
-
#define TRANS_TX_ERR 0
#define TRANS_RX_ERR 1
#define DMA_TX_ERR 2
@@ -1907,7 +1881,8 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
int err_phase)
{
struct task_status_struct *ts = &task->task_status;
- struct hisi_sas_err_record_v2 *err_record = slot->status_buffer;
+ struct hisi_sas_err_record_v2 *err_record =
+ hisi_sas_status_buf_addr_mem(slot);
u32 trans_tx_fail_type = cpu_to_le32(err_record->trans_tx_fail_type);
u32 trans_rx_fail_type = cpu_to_le32(err_record->trans_rx_fail_type);
u16 dma_tx_err_type = cpu_to_le16(err_record->dma_tx_err_type);
@@ -2198,7 +2173,7 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
break;
}
}
- sata_done_v2_hw(hisi_hba, task, slot);
+ hisi_sas_sata_done(task, slot);
}
break;
default:
@@ -2211,7 +2186,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
{
struct sas_task *task = slot->task;
struct hisi_sas_device *sas_dev;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
struct task_status_struct *ts;
struct domain_device *device;
enum exec_status sts;
@@ -2296,8 +2271,10 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
switch (task->task_proto) {
case SAS_PROTOCOL_SSP:
{
- struct ssp_response_iu *iu = slot->status_buffer +
- sizeof(struct hisi_sas_err_record);
+ struct hisi_sas_status_buffer *status_buffer =
+ hisi_sas_status_buf_addr_mem(slot);
+ struct ssp_response_iu *iu = (struct ssp_response_iu *)
+ &status_buffer->iu[0];
sas_ssp_task_response(dev, task, iu);
break;
@@ -2315,7 +2292,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
DMA_TO_DEVICE);
memcpy(to + sg_resp->offset,
- slot->status_buffer +
+ hisi_sas_status_buf_addr_mem(slot) +
sizeof(struct hisi_sas_err_record),
sg_dma_len(sg_resp));
kunmap_atomic(to);
@@ -2326,7 +2303,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
{
ts->stat = SAM_STAT_GOOD;
- sata_done_v2_hw(hisi_hba, task, slot);
+ hisi_sas_sata_done(task, slot);
break;
}
default:
@@ -2344,7 +2321,9 @@ out:
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
+ spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_task_free(hisi_hba, task, slot);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
sts = ts->stat;
if (task->task_done)
@@ -2353,78 +2332,6 @@ out:
return sts;
}
-static u8 get_ata_protocol(u8 cmd, int direction)
-{
- switch (cmd) {
- case ATA_CMD_FPDMA_WRITE:
- case ATA_CMD_FPDMA_READ:
- case ATA_CMD_FPDMA_RECV:
- case ATA_CMD_FPDMA_SEND:
- case ATA_CMD_NCQ_NON_DATA:
- return SATA_PROTOCOL_FPDMA;
-
- case ATA_CMD_DOWNLOAD_MICRO:
- case ATA_CMD_ID_ATA:
- case ATA_CMD_PMP_READ:
- case ATA_CMD_READ_LOG_EXT:
- case ATA_CMD_PIO_READ:
- case ATA_CMD_PIO_READ_EXT:
- case ATA_CMD_PMP_WRITE:
- case ATA_CMD_WRITE_LOG_EXT:
- case ATA_CMD_PIO_WRITE:
- case ATA_CMD_PIO_WRITE_EXT:
- return SATA_PROTOCOL_PIO;
-
- case ATA_CMD_DSM:
- case ATA_CMD_DOWNLOAD_MICRO_DMA:
- case ATA_CMD_PMP_READ_DMA:
- case ATA_CMD_PMP_WRITE_DMA:
- case ATA_CMD_READ:
- case ATA_CMD_READ_EXT:
- case ATA_CMD_READ_LOG_DMA_EXT:
- case ATA_CMD_READ_STREAM_DMA_EXT:
- case ATA_CMD_TRUSTED_RCV_DMA:
- case ATA_CMD_TRUSTED_SND_DMA:
- case ATA_CMD_WRITE:
- case ATA_CMD_WRITE_EXT:
- case ATA_CMD_WRITE_FUA_EXT:
- case ATA_CMD_WRITE_QUEUED:
- case ATA_CMD_WRITE_LOG_DMA_EXT:
- case ATA_CMD_WRITE_STREAM_DMA_EXT:
- return SATA_PROTOCOL_DMA;
-
- case ATA_CMD_CHK_POWER:
- case ATA_CMD_DEV_RESET:
- case ATA_CMD_EDD:
- case ATA_CMD_FLUSH:
- case ATA_CMD_FLUSH_EXT:
- case ATA_CMD_VERIFY:
- case ATA_CMD_VERIFY_EXT:
- case ATA_CMD_SET_FEATURES:
- case ATA_CMD_STANDBY:
- case ATA_CMD_STANDBYNOW1:
- return SATA_PROTOCOL_NONDATA;
- default:
- if (direction == DMA_NONE)
- return SATA_PROTOCOL_NONDATA;
- return SATA_PROTOCOL_PIO;
- }
-}
-
-static int get_ncq_tag_v2_hw(struct sas_task *task, u32 *tag)
-{
- struct ata_queued_cmd *qc = task->uldd_task;
-
- if (qc) {
- if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
- qc->tf.command == ATA_CMD_FPDMA_READ) {
- *tag = qc->tag;
- return 1;
- }
- }
- return 0;
-}
-
static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
struct hisi_sas_slot *slot)
{
@@ -2465,13 +2372,14 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
(task->ata_task.fis.control & ATA_SRST))
dw1 |= 1 << CMD_HDR_RESET_OFF;
- dw1 |= (get_ata_protocol(task->ata_task.fis.command, task->data_dir))
+ dw1 |= (hisi_sas_get_ata_protocol(
+ task->ata_task.fis.command, task->data_dir))
<< CMD_HDR_FRAME_TYPE_OFF;
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
hdr->dw1 = cpu_to_le32(dw1);
/* dw2 */
- if (task->ata_task.use_ncq && get_ncq_tag_v2_hw(task, &hdr_tag)) {
+ if (task->ata_task.use_ncq && hisi_sas_get_ncq_tag(task, &hdr_tag)) {
task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
}
@@ -2490,12 +2398,11 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
return rc;
}
-
hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
- hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma);
- hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
+ hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
+ hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
- buf_cmd = slot->command_table;
+ buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot);
if (likely(!task->ata_task.device_control_reg_update))
task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
@@ -2578,7 +2485,7 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
u32 port_id, link_rate, hard_phy_linkrate;
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd;
@@ -2765,7 +2672,7 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
{
struct hisi_hba *hisi_hba = p;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
u32 ent_msk, ent_tmp, irq_msk;
int phy_no = 0;
@@ -2825,7 +2732,7 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
static void
one_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, u32 irq_value)
{
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
u32 reg_val;
if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF)) {
@@ -2914,7 +2821,7 @@ static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba,
u32 irq_value)
{
u32 reg_val;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF)) {
reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR);
@@ -3064,7 +2971,7 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
{
struct hisi_hba *hisi_hba = p;
u32 irq_value, irq_msk, err_value;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0xfffffffe);
@@ -3162,13 +3069,14 @@ static void cq_tasklet_v2_hw(unsigned long val)
struct hisi_sas_complete_v2_hdr *complete_queue;
u32 rd_point = cq->rd_point, wr_point, dev_id;
int queue = cq->id;
+ struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
if (unlikely(hisi_hba->reject_stp_links_msk))
phys_try_accept_stp_links_v2_hw(hisi_hba);
complete_queue = hisi_hba->complete_hdr[queue];
- spin_lock(&hisi_hba->lock);
+ spin_lock(&dq->lock);
wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
(0x14 * queue));
@@ -3218,7 +3126,7 @@ static void cq_tasklet_v2_hw(unsigned long val)
/* update rd_point */
cq->rd_point = rd_point;
hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
- spin_unlock(&hisi_hba->lock);
+ spin_unlock(&dq->lock);
}
static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
@@ -3239,7 +3147,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
struct hisi_sas_phy *phy = p;
struct hisi_hba *hisi_hba = phy->hisi_hba;
struct asd_sas_phy *sas_phy = &phy->sas_phy;
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
struct hisi_sas_initial_fis *initial_fis;
struct dev_to_host_fis *fis;
u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate;
@@ -3341,7 +3249,7 @@ static irq_handler_t fatal_interrupts[HISI_SAS_FATAL_INT_NR] = {
*/
static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
{
- struct platform_device *pdev = hisi_hba->pdev;
+ struct platform_device *pdev = hisi_hba->platform_dev;
struct device *dev = &pdev->dev;
int i, irq, rc, irq_map[128];
@@ -3455,7 +3363,7 @@ static int hisi_sas_v2_init(struct hisi_hba *hisi_hba)
static void interrupt_disable_v2_hw(struct hisi_hba *hisi_hba)
{
- struct platform_device *pdev = hisi_hba->pdev;
+ struct platform_device *pdev = hisi_hba->platform_dev;
int i;
for (i = 0; i < hisi_hba->queue_count; i++)
@@ -3477,7 +3385,7 @@ static void interrupt_disable_v2_hw(struct hisi_hba *hisi_hba)
static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
{
- struct device *dev = &hisi_hba->pdev->dev;
+ struct device *dev = hisi_hba->dev;
u32 old_state, state;
int rc, cnt;
int phy_no;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
new file mode 100644
index 000000000000..83d2dca1c650
--- /dev/null
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -0,0 +1,1846 @@
+/*
+ * Copyright (c) 2017 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include "hisi_sas.h"
+#define DRV_NAME "hisi_sas_v3_hw"
+
+/* global registers need init*/
+#define DLVRY_QUEUE_ENABLE 0x0
+#define IOST_BASE_ADDR_LO 0x8
+#define IOST_BASE_ADDR_HI 0xc
+#define ITCT_BASE_ADDR_LO 0x10
+#define ITCT_BASE_ADDR_HI 0x14
+#define IO_BROKEN_MSG_ADDR_LO 0x18
+#define IO_BROKEN_MSG_ADDR_HI 0x1c
+#define PHY_CONTEXT 0x20
+#define PHY_STATE 0x24
+#define PHY_PORT_NUM_MA 0x28
+#define PHY_CONN_RATE 0x30
+#define AXI_AHB_CLK_CFG 0x3c
+#define ITCT_CLR 0x44
+#define ITCT_CLR_EN_OFF 16
+#define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF)
+#define ITCT_DEV_OFF 0
+#define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF)
+#define AXI_USER1 0x48
+#define AXI_USER2 0x4c
+#define IO_SATA_BROKEN_MSG_ADDR_LO 0x58
+#define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c
+#define SATA_INITI_D2H_STORE_ADDR_LO 0x60
+#define SATA_INITI_D2H_STORE_ADDR_HI 0x64
+#define CFG_MAX_TAG 0x68
+#define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84
+#define HGC_SAS_TXFAIL_RETRY_CTRL 0x88
+#define HGC_GET_ITV_TIME 0x90
+#define DEVICE_MSG_WORK_MODE 0x94
+#define OPENA_WT_CONTI_TIME 0x9c
+#define I_T_NEXUS_LOSS_TIME 0xa0
+#define MAX_CON_TIME_LIMIT_TIME 0xa4
+#define BUS_INACTIVE_LIMIT_TIME 0xa8
+#define REJECT_TO_OPEN_LIMIT_TIME 0xac
+#define CFG_AGING_TIME 0xbc
+#define HGC_DFX_CFG2 0xc0
+#define CFG_ABT_SET_QUERY_IPTT 0xd4
+#define CFG_SET_ABORTED_IPTT_OFF 0
+#define CFG_SET_ABORTED_IPTT_MSK (0xfff << CFG_SET_ABORTED_IPTT_OFF)
+#define CFG_SET_ABORTED_EN_OFF 12
+#define CFG_ABT_SET_IPTT_DONE 0xd8
+#define CFG_ABT_SET_IPTT_DONE_OFF 0
+#define HGC_IOMB_PROC1_STATUS 0x104
+#define CFG_1US_TIMER_TRSH 0xcc
+#define CHNL_INT_STATUS 0x148
+#define INT_COAL_EN 0x19c
+#define OQ_INT_COAL_TIME 0x1a0
+#define OQ_INT_COAL_CNT 0x1a4
+#define ENT_INT_COAL_TIME 0x1a8
+#define ENT_INT_COAL_CNT 0x1ac
+#define OQ_INT_SRC 0x1b0
+#define OQ_INT_SRC_MSK 0x1b4
+#define ENT_INT_SRC1 0x1b8
+#define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0
+#define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF)
+#define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8
+#define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF)
+#define ENT_INT_SRC2 0x1bc
+#define ENT_INT_SRC3 0x1c0
+#define ENT_INT_SRC3_WP_DEPTH_OFF 8
+#define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF 9
+#define ENT_INT_SRC3_RP_DEPTH_OFF 10
+#define ENT_INT_SRC3_AXI_OFF 11
+#define ENT_INT_SRC3_FIFO_OFF 12
+#define ENT_INT_SRC3_LM_OFF 14
+#define ENT_INT_SRC3_ITC_INT_OFF 15
+#define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF)
+#define ENT_INT_SRC3_ABT_OFF 16
+#define ENT_INT_SRC_MSK1 0x1c4
+#define ENT_INT_SRC_MSK2 0x1c8
+#define ENT_INT_SRC_MSK3 0x1cc
+#define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31
+#define CHNL_PHYUPDOWN_INT_MSK 0x1d0
+#define CHNL_ENT_INT_MSK 0x1d4
+#define HGC_COM_INT_MSK 0x1d8
+#define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF)
+#define SAS_ECC_INTR 0x1e8
+#define SAS_ECC_INTR_MSK 0x1ec
+#define HGC_ERR_STAT_EN 0x238
+#define DLVRY_Q_0_BASE_ADDR_LO 0x260
+#define DLVRY_Q_0_BASE_ADDR_HI 0x264
+#define DLVRY_Q_0_DEPTH 0x268
+#define DLVRY_Q_0_WR_PTR 0x26c
+#define DLVRY_Q_0_RD_PTR 0x270
+#define HYPER_STREAM_ID_EN_CFG 0xc80
+#define OQ0_INT_SRC_MSK 0xc90
+#define COMPL_Q_0_BASE_ADDR_LO 0x4e0
+#define COMPL_Q_0_BASE_ADDR_HI 0x4e4
+#define COMPL_Q_0_DEPTH 0x4e8
+#define COMPL_Q_0_WR_PTR 0x4ec
+#define COMPL_Q_0_RD_PTR 0x4f0
+#define AWQOS_AWCACHE_CFG 0xc84
+#define ARQOS_ARCACHE_CFG 0xc88
+
+/* phy registers requiring init */
+#define PORT_BASE (0x2000)
+#define PHY_CFG (PORT_BASE + 0x0)
+#define HARD_PHY_LINKRATE (PORT_BASE + 0x4)
+#define PHY_CFG_ENA_OFF 0
+#define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF)
+#define PHY_CFG_DC_OPT_OFF 2
+#define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF)
+#define PROG_PHY_LINK_RATE (PORT_BASE + 0x8)
+#define PHY_CTRL (PORT_BASE + 0x14)
+#define PHY_CTRL_RESET_OFF 0
+#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
+#define SL_CFG (PORT_BASE + 0x84)
+#define SL_CONTROL (PORT_BASE + 0x94)
+#define SL_CONTROL_NOTIFY_EN_OFF 0
+#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
+#define SL_CTA_OFF 17
+#define SL_CTA_MSK (0x1 << SL_CTA_OFF)
+#define TX_ID_DWORD0 (PORT_BASE + 0x9c)
+#define TX_ID_DWORD1 (PORT_BASE + 0xa0)
+#define TX_ID_DWORD2 (PORT_BASE + 0xa4)
+#define TX_ID_DWORD3 (PORT_BASE + 0xa8)
+#define TX_ID_DWORD4 (PORT_BASE + 0xaC)
+#define TX_ID_DWORD5 (PORT_BASE + 0xb0)
+#define TX_ID_DWORD6 (PORT_BASE + 0xb4)
+#define TXID_AUTO (PORT_BASE + 0xb8)
+#define CT3_OFF 1
+#define CT3_MSK (0x1 << CT3_OFF)
+#define TX_HARDRST_OFF 2
+#define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF)
+#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
+#define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
+#define SAS_SSP_CON_TIMER_CFG (PORT_BASE + 0x134)
+#define SAS_SMP_CON_TIMER_CFG (PORT_BASE + 0x138)
+#define SAS_STP_CON_TIMER_CFG (PORT_BASE + 0x13c)
+#define CHL_INT0 (PORT_BASE + 0x1b4)
+#define CHL_INT0_HOTPLUG_TOUT_OFF 0
+#define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF)
+#define CHL_INT0_SL_RX_BCST_ACK_OFF 1
+#define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF)
+#define CHL_INT0_SL_PHY_ENABLE_OFF 2
+#define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF)
+#define CHL_INT0_NOT_RDY_OFF 4
+#define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF)
+#define CHL_INT0_PHY_RDY_OFF 5
+#define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF)
+#define CHL_INT1 (PORT_BASE + 0x1b8)
+#define CHL_INT1_DMAC_TX_ECC_ERR_OFF 15
+#define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF)
+#define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17
+#define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF)
+#define CHL_INT2 (PORT_BASE + 0x1bc)
+#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
+#define CHL_INT1_MSK (PORT_BASE + 0x1c4)
+#define CHL_INT2_MSK (PORT_BASE + 0x1c8)
+#define CHL_INT_COAL_EN (PORT_BASE + 0x1d0)
+#define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0)
+#define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4)
+#define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8)
+#define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc)
+#define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0)
+#define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4)
+
+/* HW dma structures */
+/* Delivery queue header */
+/* dw0 */
+#define CMD_HDR_ABORT_FLAG_OFF 0
+#define CMD_HDR_ABORT_FLAG_MSK (0x3 << CMD_HDR_ABORT_FLAG_OFF)
+#define CMD_HDR_ABORT_DEVICE_TYPE_OFF 2
+#define CMD_HDR_ABORT_DEVICE_TYPE_MSK (0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF)
+#define CMD_HDR_RESP_REPORT_OFF 5
+#define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF)
+#define CMD_HDR_TLR_CTRL_OFF 6
+#define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF)
+#define CMD_HDR_PORT_OFF 18
+#define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF)
+#define CMD_HDR_PRIORITY_OFF 27
+#define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF)
+#define CMD_HDR_CMD_OFF 29
+#define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF)
+/* dw1 */
+#define CMD_HDR_UNCON_CMD_OFF 3
+#define CMD_HDR_DIR_OFF 5
+#define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF)
+#define CMD_HDR_RESET_OFF 7
+#define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF)
+#define CMD_HDR_VDTL_OFF 10
+#define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF)
+#define CMD_HDR_FRAME_TYPE_OFF 11
+#define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF)
+#define CMD_HDR_DEV_ID_OFF 16
+#define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF)
+/* dw2 */
+#define CMD_HDR_CFL_OFF 0
+#define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF)
+#define CMD_HDR_NCQ_TAG_OFF 10
+#define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF)
+#define CMD_HDR_MRFL_OFF 15
+#define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF)
+#define CMD_HDR_SG_MOD_OFF 24
+#define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF)
+/* dw3 */
+#define CMD_HDR_IPTT_OFF 0
+#define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF)
+/* dw6 */
+#define CMD_HDR_DIF_SGL_LEN_OFF 0
+#define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF)
+#define CMD_HDR_DATA_SGL_LEN_OFF 16
+#define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF)
+/* dw7 */
+#define CMD_HDR_ADDR_MODE_SEL_OFF 15
+#define CMD_HDR_ADDR_MODE_SEL_MSK (1 << CMD_HDR_ADDR_MODE_SEL_OFF)
+#define CMD_HDR_ABORT_IPTT_OFF 16
+#define CMD_HDR_ABORT_IPTT_MSK (0xffff << CMD_HDR_ABORT_IPTT_OFF)
+
+/* Completion header */
+/* dw0 */
+#define CMPLT_HDR_CMPLT_OFF 0
+#define CMPLT_HDR_CMPLT_MSK (0x3 << CMPLT_HDR_CMPLT_OFF)
+#define CMPLT_HDR_ERROR_PHASE_OFF 2
+#define CMPLT_HDR_ERROR_PHASE_MSK (0xff << CMPLT_HDR_ERROR_PHASE_OFF)
+#define CMPLT_HDR_RSPNS_XFRD_OFF 10
+#define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF)
+#define CMPLT_HDR_ERX_OFF 12
+#define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF)
+#define CMPLT_HDR_ABORT_STAT_OFF 13
+#define CMPLT_HDR_ABORT_STAT_MSK (0x7 << CMPLT_HDR_ABORT_STAT_OFF)
+/* abort_stat */
+#define STAT_IO_NOT_VALID 0x1
+#define STAT_IO_NO_DEVICE 0x2
+#define STAT_IO_COMPLETE 0x3
+#define STAT_IO_ABORTED 0x4
+/* dw1 */
+#define CMPLT_HDR_IPTT_OFF 0
+#define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF)
+#define CMPLT_HDR_DEV_ID_OFF 16
+#define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF)
+/* dw3 */
+#define CMPLT_HDR_IO_IN_TARGET_OFF 17
+#define CMPLT_HDR_IO_IN_TARGET_MSK (0x1 << CMPLT_HDR_IO_IN_TARGET_OFF)
+
+/* ITCT header */
+/* qw0 */
+#define ITCT_HDR_DEV_TYPE_OFF 0
+#define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF)
+#define ITCT_HDR_VALID_OFF 2
+#define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF)
+#define ITCT_HDR_MCR_OFF 5
+#define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF)
+#define ITCT_HDR_VLN_OFF 9
+#define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF)
+#define ITCT_HDR_SMP_TIMEOUT_OFF 16
+#define ITCT_HDR_AWT_CONTINUE_OFF 25
+#define ITCT_HDR_PORT_ID_OFF 28
+#define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF)
+/* qw2 */
+#define ITCT_HDR_INLT_OFF 0
+#define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF)
+#define ITCT_HDR_RTOLT_OFF 48
+#define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF)
+
+struct hisi_sas_complete_v3_hdr {
+ __le32 dw0;
+ __le32 dw1;
+ __le32 act;
+ __le32 dw3;
+};
+
+struct hisi_sas_err_record_v3 {
+ /* dw0 */
+ __le32 trans_tx_fail_type;
+
+ /* dw1 */
+ __le32 trans_rx_fail_type;
+
+ /* dw2 */
+ __le16 dma_tx_err_type;
+ __le16 sipc_rx_err_type;
+
+ /* dw3 */
+ __le32 dma_rx_err_type;
+};
+
+#define RX_DATA_LEN_UNDERFLOW_OFF 6
+#define RX_DATA_LEN_UNDERFLOW_MSK (1 << RX_DATA_LEN_UNDERFLOW_OFF)
+
+#define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096
+#define HISI_SAS_MSI_COUNT_V3_HW 32
+
+enum {
+ HISI_SAS_PHY_PHY_UPDOWN,
+ HISI_SAS_PHY_CHNL_INT,
+ HISI_SAS_PHY_INT_NR
+};
+
+#define DIR_NO_DATA 0
+#define DIR_TO_INI 1
+#define DIR_TO_DEVICE 2
+#define DIR_RESERVED 3
+
+#define CMD_IS_UNCONSTRAINT(cmd) \
+ ((cmd == ATA_CMD_READ_LOG_EXT) || \
+ (cmd == ATA_CMD_READ_LOG_DMA_EXT) || \
+ (cmd == ATA_CMD_DEV_RESET))
+
+static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
+{
+ void __iomem *regs = hisi_hba->regs + off;
+
+ return readl(regs);
+}
+
+static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off)
+{
+ void __iomem *regs = hisi_hba->regs + off;
+
+ return readl_relaxed(regs);
+}
+
+static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val)
+{
+ void __iomem *regs = hisi_hba->regs + off;
+
+ writel(val, regs);
+}
+
+static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no,
+ u32 off, u32 val)
+{
+ void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off;
+
+ writel(val, regs);
+}
+
+static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
+ int phy_no, u32 off)
+{
+ void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off;
+
+ return readl(regs);
+}
+
+static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int i;
+
+ /* Global registers init */
+ hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
+ (u32)((1ULL << hisi_hba->queue_count) - 1));
+ hisi_sas_write32(hisi_hba, AXI_USER1, 0x0);
+ hisi_sas_write32(hisi_hba, AXI_USER2, 0x40000060);
+ hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
+ hisi_sas_write32(hisi_hba, CFG_1US_TIMER_TRSH, 0xd);
+ hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
+ hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
+ hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
+ hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffff);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff);
+ hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
+ hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0);
+ hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0);
+ hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30);
+ hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0);
+ hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0);
+ for (i = 0; i < hisi_hba->queue_count; i++)
+ hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
+
+ hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 1);
+ hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
+ hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff07fff);
+
+ for (i = 0; i < hisi_hba->n_phy; i++) {
+ hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x801);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
+ hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
+ hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff);
+ hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x83f801fc);
+ hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x0);
+ hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL, 0x199b4fa);
+ hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG,
+ 0xa0064);
+ hisi_sas_phy_write32(hisi_hba, i, SAS_STP_CON_TIMER_CFG,
+ 0xa0064);
+ }
+ for (i = 0; i < hisi_hba->queue_count; i++) {
+ /* Delivery queue */
+ hisi_sas_write32(hisi_hba,
+ DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14),
+ upper_32_bits(hisi_hba->cmd_hdr_dma[i]));
+
+ hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14),
+ lower_32_bits(hisi_hba->cmd_hdr_dma[i]));
+
+ hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14),
+ HISI_SAS_QUEUE_SLOTS);
+
+ /* Completion queue */
+ hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14),
+ upper_32_bits(hisi_hba->complete_hdr_dma[i]));
+
+ hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14),
+ lower_32_bits(hisi_hba->complete_hdr_dma[i]));
+
+ hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14),
+ HISI_SAS_QUEUE_SLOTS);
+ }
+
+ /* itct */
+ hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO,
+ lower_32_bits(hisi_hba->itct_dma));
+
+ hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI,
+ upper_32_bits(hisi_hba->itct_dma));
+
+ /* iost */
+ hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO,
+ lower_32_bits(hisi_hba->iost_dma));
+
+ hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI,
+ upper_32_bits(hisi_hba->iost_dma));
+
+ /* breakpoint */
+ hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO,
+ lower_32_bits(hisi_hba->breakpoint_dma));
+
+ hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI,
+ upper_32_bits(hisi_hba->breakpoint_dma));
+
+ /* SATA broken msg */
+ hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO,
+ lower_32_bits(hisi_hba->sata_breakpoint_dma));
+
+ hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI,
+ upper_32_bits(hisi_hba->sata_breakpoint_dma));
+
+ /* SATA initial fis */
+ hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO,
+ lower_32_bits(hisi_hba->initial_fis_dma));
+
+ hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI,
+ upper_32_bits(hisi_hba->initial_fis_dma));
+}
+
+static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
+
+ cfg &= ~PHY_CFG_DC_OPT_MSK;
+ cfg |= 1 << PHY_CFG_DC_OPT_OFF;
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
+}
+
+static void config_id_frame_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ struct sas_identify_frame identify_frame;
+ u32 *identify_buffer;
+
+ memset(&identify_frame, 0, sizeof(identify_frame));
+ identify_frame.dev_type = SAS_END_DEVICE;
+ identify_frame.frame_type = 0;
+ identify_frame._un1 = 1;
+ identify_frame.initiator_bits = SAS_PROTOCOL_ALL;
+ identify_frame.target_bits = SAS_PROTOCOL_NONE;
+ memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE);
+ memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE);
+ identify_frame.phy_id = phy_no;
+ identify_buffer = (u32 *)(&identify_frame);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0,
+ __swab32(identify_buffer[0]));
+ hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1,
+ __swab32(identify_buffer[1]));
+ hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2,
+ __swab32(identify_buffer[2]));
+ hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3,
+ __swab32(identify_buffer[3]));
+ hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4,
+ __swab32(identify_buffer[4]));
+ hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5,
+ __swab32(identify_buffer[5]));
+}
+
+static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_device *sas_dev)
+{
+ struct domain_device *device = sas_dev->sas_device;
+ struct device *dev = hisi_hba->dev;
+ u64 qw0, device_id = sas_dev->device_id;
+ struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
+ struct domain_device *parent_dev = device->parent;
+ struct asd_sas_port *sas_port = device->port;
+ struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
+
+ memset(itct, 0, sizeof(*itct));
+
+ /* qw0 */
+ qw0 = 0;
+ switch (sas_dev->dev_type) {
+ case SAS_END_DEVICE:
+ case SAS_EDGE_EXPANDER_DEVICE:
+ case SAS_FANOUT_EXPANDER_DEVICE:
+ qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF;
+ break;
+ case SAS_SATA_DEV:
+ case SAS_SATA_PENDING:
+ if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+ qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF;
+ else
+ qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF;
+ break;
+ default:
+ dev_warn(dev, "setup itct: unsupported dev type (%d)\n",
+ sas_dev->dev_type);
+ }
+
+ qw0 |= ((1 << ITCT_HDR_VALID_OFF) |
+ (device->linkrate << ITCT_HDR_MCR_OFF) |
+ (1 << ITCT_HDR_VLN_OFF) |
+ (0xfa << ITCT_HDR_SMP_TIMEOUT_OFF) |
+ (1 << ITCT_HDR_AWT_CONTINUE_OFF) |
+ (port->id << ITCT_HDR_PORT_ID_OFF));
+ itct->qw0 = cpu_to_le64(qw0);
+
+ /* qw1 */
+ memcpy(&itct->sas_addr, device->sas_addr, SAS_ADDR_SIZE);
+ itct->sas_addr = __swab64(itct->sas_addr);
+
+ /* qw2 */
+ if (!dev_is_sata(device))
+ itct->qw2 = cpu_to_le64((5000ULL << ITCT_HDR_INLT_OFF) |
+ (0x1ULL << ITCT_HDR_RTOLT_OFF));
+}
+
+static void free_device_v3_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_device *sas_dev)
+{
+ u64 dev_id = sas_dev->device_id;
+ struct device *dev = hisi_hba->dev;
+ struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
+ u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+
+ /* clear the itct interrupt state */
+ if (ENT_INT_SRC3_ITC_INT_MSK & reg_val)
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ ENT_INT_SRC3_ITC_INT_MSK);
+
+ /* clear the itct table*/
+ reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
+ reg_val |= ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
+ hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
+
+ udelay(10);
+ reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
+ if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) {
+ dev_dbg(dev, "got clear ITCT done interrupt\n");
+
+ /* invalid the itct state*/
+ memset(itct, 0, sizeof(struct hisi_sas_itct));
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
+ ENT_INT_SRC3_ITC_INT_MSK);
+ hisi_hba->devices[dev_id].dev_type = SAS_PHY_UNUSED;
+ hisi_hba->devices[dev_id].dev_status = HISI_SAS_DEV_NORMAL;
+
+ /* clear the itct */
+ hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
+ dev_dbg(dev, "clear ITCT ok\n");
+ }
+}
+
+static void dereg_device_v3_hw(struct hisi_hba *hisi_hba,
+ struct domain_device *device)
+{
+ struct hisi_sas_slot *slot, *slot2;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ u32 cfg_abt_set_query_iptt;
+
+ cfg_abt_set_query_iptt = hisi_sas_read32(hisi_hba,
+ CFG_ABT_SET_QUERY_IPTT);
+ list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) {
+ cfg_abt_set_query_iptt &= ~CFG_SET_ABORTED_IPTT_MSK;
+ cfg_abt_set_query_iptt |= (1 << CFG_SET_ABORTED_EN_OFF) |
+ (slot->idx << CFG_SET_ABORTED_IPTT_OFF);
+ hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT,
+ cfg_abt_set_query_iptt);
+ }
+ cfg_abt_set_query_iptt &= ~(1 << CFG_SET_ABORTED_EN_OFF);
+ hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT,
+ cfg_abt_set_query_iptt);
+ hisi_sas_write32(hisi_hba, CFG_ABT_SET_IPTT_DONE,
+ 1 << CFG_ABT_SET_IPTT_DONE_OFF);
+}
+
+static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
+{
+ init_reg_v3_hw(hisi_hba);
+
+ return 0;
+}
+
+static void enable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
+
+ cfg |= PHY_CFG_ENA_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
+}
+
+static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
+
+ cfg &= ~PHY_CFG_ENA_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
+}
+
+static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ config_id_frame_v3_hw(hisi_hba, phy_no);
+ config_phy_opt_mode_v3_hw(hisi_hba, phy_no);
+ enable_phy_v3_hw(hisi_hba, phy_no);
+}
+
+static void stop_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ disable_phy_v3_hw(hisi_hba, phy_no);
+}
+
+static void start_phys_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int i;
+
+ for (i = 0; i < hisi_hba->n_phy; i++)
+ start_phy_v3_hw(hisi_hba, i);
+}
+
+static void phy_hard_reset_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ u32 txid_auto;
+
+ stop_phy_v3_hw(hisi_hba, phy_no);
+ if (phy->identify.device_type == SAS_END_DEVICE) {
+ txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
+ hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
+ txid_auto | TX_HARDRST_MSK);
+ }
+ msleep(100);
+ start_phy_v3_hw(hisi_hba, phy_no);
+}
+
+enum sas_linkrate phy_get_max_linkrate_v3_hw(void)
+{
+ return SAS_LINK_RATE_12_0_GBPS;
+}
+
+static void phys_init_v3_hw(struct hisi_hba *hisi_hba)
+{
+ start_phys_v3_hw(hisi_hba);
+}
+
+static void sl_notify_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
+{
+ u32 sl_control;
+
+ sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
+ sl_control |= SL_CONTROL_NOTIFY_EN_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
+ msleep(1);
+ sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
+ sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
+}
+
+static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id)
+{
+ int i, bitmap = 0;
+ u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
+
+ for (i = 0; i < hisi_hba->n_phy; i++)
+ if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id)
+ bitmap |= 1 << i;
+
+ return bitmap;
+}
+
+/**
+ * The callpath to this function and upto writing the write
+ * queue pointer should be safe from interruption.
+ */
+static int
+get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
+{
+ struct device *dev = hisi_hba->dev;
+ int queue = dq->id;
+ u32 r, w;
+
+ w = dq->wr_point;
+ r = hisi_sas_read32_relaxed(hisi_hba,
+ DLVRY_Q_0_RD_PTR + (queue * 0x14));
+ if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
+ dev_warn(dev, "full queue=%d r=%d w=%d\n\n",
+ queue, r, w);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static void start_delivery_v3_hw(struct hisi_sas_dq *dq)
+{
+ struct hisi_hba *hisi_hba = dq->hisi_hba;
+ int dlvry_queue = dq->slot_prep->dlvry_queue;
+ int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot;
+
+ dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
+ hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
+ dq->wr_point);
+}
+
+static int prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot,
+ struct hisi_sas_cmd_hdr *hdr,
+ struct scatterlist *scatter,
+ int n_elem)
+{
+ struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot);
+ struct device *dev = hisi_hba->dev;
+ struct scatterlist *sg;
+ int i;
+
+ if (n_elem > HISI_SAS_SGE_PAGE_CNT) {
+ dev_err(dev, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
+ n_elem);
+ return -EINVAL;
+ }
+
+ for_each_sg(scatter, sg, n_elem, i) {
+ struct hisi_sas_sge *entry = &sge_page->sge[i];
+
+ entry->addr = cpu_to_le64(sg_dma_address(sg));
+ entry->page_ctrl_0 = entry->page_ctrl_1 = 0;
+ entry->data_len = cpu_to_le32(sg_dma_len(sg));
+ entry->data_off = 0;
+ }
+
+ hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot));
+
+ hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
+
+ return 0;
+}
+
+static int prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot, int is_tmf,
+ struct hisi_sas_tmf_task *tmf)
+{
+ struct sas_task *task = slot->task;
+ struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
+ struct domain_device *device = task->dev;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ struct hisi_sas_port *port = slot->port;
+ struct sas_ssp_task *ssp_task = &task->ssp_task;
+ struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
+ int has_data = 0, rc, priority = is_tmf;
+ u8 *buf_cmd;
+ u32 dw1 = 0, dw2 = 0;
+
+ hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) |
+ (2 << CMD_HDR_TLR_CTRL_OFF) |
+ (port->id << CMD_HDR_PORT_OFF) |
+ (priority << CMD_HDR_PRIORITY_OFF) |
+ (1 << CMD_HDR_CMD_OFF)); /* ssp */
+
+ dw1 = 1 << CMD_HDR_VDTL_OFF;
+ if (is_tmf) {
+ dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF;
+ dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF;
+ } else {
+ dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF;
+ switch (scsi_cmnd->sc_data_direction) {
+ case DMA_TO_DEVICE:
+ has_data = 1;
+ dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF;
+ break;
+ case DMA_FROM_DEVICE:
+ has_data = 1;
+ dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF;
+ break;
+ default:
+ dw1 &= ~CMD_HDR_DIR_MSK;
+ }
+ }
+
+ /* map itct entry */
+ dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
+ hdr->dw1 = cpu_to_le32(dw1);
+
+ dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr)
+ + 3) / 4) << CMD_HDR_CFL_OFF) |
+ ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) |
+ (2 << CMD_HDR_SG_MOD_OFF);
+ hdr->dw2 = cpu_to_le32(dw2);
+ hdr->transfer_tags = cpu_to_le32(slot->idx);
+
+ if (has_data) {
+ rc = prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
+ slot->n_elem);
+ if (rc)
+ return rc;
+ }
+
+ hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
+ hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
+ hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
+
+ buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) +
+ sizeof(struct ssp_frame_hdr);
+
+ memcpy(buf_cmd, &task->ssp_task.LUN, 8);
+ if (!is_tmf) {
+ buf_cmd[9] = ssp_task->task_attr | (ssp_task->task_prio << 3);
+ memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
+ } else {
+ buf_cmd[10] = tmf->tmf;
+ switch (tmf->tmf) {
+ case TMF_ABORT_TASK:
+ case TMF_QUERY_TASK:
+ buf_cmd[12] =
+ (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
+ buf_cmd[13] =
+ tmf->tag_of_task_to_be_managed & 0xff;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int prep_smp_v3_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot)
+{
+ struct sas_task *task = slot->task;
+ struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
+ struct domain_device *device = task->dev;
+ struct device *dev = hisi_hba->dev;
+ struct hisi_sas_port *port = slot->port;
+ struct scatterlist *sg_req, *sg_resp;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ dma_addr_t req_dma_addr;
+ unsigned int req_len, resp_len;
+ int elem, rc;
+
+ /*
+ * DMA-map SMP request, response buffers
+ */
+ /* req */
+ sg_req = &task->smp_task.smp_req;
+ elem = dma_map_sg(dev, sg_req, 1, DMA_TO_DEVICE);
+ if (!elem)
+ return -ENOMEM;
+ req_len = sg_dma_len(sg_req);
+ req_dma_addr = sg_dma_address(sg_req);
+
+ /* resp */
+ sg_resp = &task->smp_task.smp_resp;
+ elem = dma_map_sg(dev, sg_resp, 1, DMA_FROM_DEVICE);
+ if (!elem) {
+ rc = -ENOMEM;
+ goto err_out_req;
+ }
+ resp_len = sg_dma_len(sg_resp);
+ if ((req_len & 0x3) || (resp_len & 0x3)) {
+ rc = -EINVAL;
+ goto err_out_resp;
+ }
+
+ /* create header */
+ /* dw0 */
+ hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) |
+ (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */
+ (2 << CMD_HDR_CMD_OFF)); /* smp */
+
+ /* map itct entry */
+ hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) |
+ (1 << CMD_HDR_FRAME_TYPE_OFF) |
+ (DIR_NO_DATA << CMD_HDR_DIR_OFF));
+
+ /* dw2 */
+ hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) |
+ (HISI_SAS_MAX_SMP_RESP_SZ / 4 <<
+ CMD_HDR_MRFL_OFF));
+
+ hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF);
+
+ hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
+ hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
+
+ return 0;
+
+err_out_resp:
+ dma_unmap_sg(dev, &slot->task->smp_task.smp_resp, 1,
+ DMA_FROM_DEVICE);
+err_out_req:
+ dma_unmap_sg(dev, &slot->task->smp_task.smp_req, 1,
+ DMA_TO_DEVICE);
+ return rc;
+}
+
+static int get_ncq_tag_v3_hw(struct sas_task *task, u32 *tag)
+{
+ struct ata_queued_cmd *qc = task->uldd_task;
+
+ if (qc) {
+ if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
+ qc->tf.command == ATA_CMD_FPDMA_READ) {
+ *tag = qc->tag;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int prep_ata_v3_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot)
+{
+ struct sas_task *task = slot->task;
+ struct domain_device *device = task->dev;
+ struct domain_device *parent_dev = device->parent;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
+ struct asd_sas_port *sas_port = device->port;
+ struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
+ u8 *buf_cmd;
+ int has_data = 0, rc = 0, hdr_tag = 0;
+ u32 dw1 = 0, dw2 = 0;
+
+ hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF);
+ if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
+ hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF);
+ else
+ hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF);
+
+ switch (task->data_dir) {
+ case DMA_TO_DEVICE:
+ has_data = 1;
+ dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF;
+ break;
+ case DMA_FROM_DEVICE:
+ has_data = 1;
+ dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF;
+ break;
+ default:
+ dw1 &= ~CMD_HDR_DIR_MSK;
+ }
+
+ if ((task->ata_task.fis.command == ATA_CMD_DEV_RESET) &&
+ (task->ata_task.fis.control & ATA_SRST))
+ dw1 |= 1 << CMD_HDR_RESET_OFF;
+
+ dw1 |= (hisi_sas_get_ata_protocol(
+ task->ata_task.fis.command, task->data_dir))
+ << CMD_HDR_FRAME_TYPE_OFF;
+ dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
+
+ if (CMD_IS_UNCONSTRAINT(task->ata_task.fis.command))
+ dw1 |= 1 << CMD_HDR_UNCON_CMD_OFF;
+
+ hdr->dw1 = cpu_to_le32(dw1);
+
+ /* dw2 */
+ if (task->ata_task.use_ncq && get_ncq_tag_v3_hw(task, &hdr_tag)) {
+ task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
+ dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
+ }
+
+ dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF |
+ 2 << CMD_HDR_SG_MOD_OFF;
+ hdr->dw2 = cpu_to_le32(dw2);
+
+ /* dw3 */
+ hdr->transfer_tags = cpu_to_le32(slot->idx);
+
+ if (has_data) {
+ rc = prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
+ slot->n_elem);
+ if (rc)
+ return rc;
+ }
+
+ hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
+ hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
+ hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
+
+ buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot);
+
+ if (likely(!task->ata_task.device_control_reg_update))
+ task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
+ /* fill in command FIS */
+ memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
+
+ return 0;
+}
+
+static int prep_abort_v3_hw(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot,
+ int device_id, int abort_flag, int tag_to_abort)
+{
+ struct sas_task *task = slot->task;
+ struct domain_device *dev = task->dev;
+ struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
+ struct hisi_sas_port *port = slot->port;
+
+ /* dw0 */
+ hdr->dw0 = cpu_to_le32((5 << CMD_HDR_CMD_OFF) | /*abort*/
+ (port->id << CMD_HDR_PORT_OFF) |
+ ((dev_is_sata(dev) ? 1:0)
+ << CMD_HDR_ABORT_DEVICE_TYPE_OFF) |
+ (abort_flag
+ << CMD_HDR_ABORT_FLAG_OFF));
+
+ /* dw1 */
+ hdr->dw1 = cpu_to_le32(device_id
+ << CMD_HDR_DEV_ID_OFF);
+
+ /* dw7 */
+ hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF);
+ hdr->transfer_tags = cpu_to_le32(slot->idx);
+
+ return 0;
+}
+
+static int phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
+{
+ int i, res = 0;
+ u32 context, port_id, link_rate, hard_phy_linkrate;
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct device *dev = hisi_hba->dev;
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
+
+ port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
+ port_id = (port_id >> (4 * phy_no)) & 0xf;
+ link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
+ link_rate = (link_rate >> (phy_no * 4)) & 0xf;
+
+ if (port_id == 0xf) {
+ dev_err(dev, "phyup: phy%d invalid portid\n", phy_no);
+ res = IRQ_NONE;
+ goto end;
+ }
+ sas_phy->linkrate = link_rate;
+ hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no,
+ HARD_PHY_LINKRATE);
+ phy->maximum_linkrate = hard_phy_linkrate & 0xf;
+ phy->minimum_linkrate = (hard_phy_linkrate >> 4) & 0xf;
+ phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
+
+ /* Check for SATA dev */
+ context = hisi_sas_read32(hisi_hba, PHY_CONTEXT);
+ if (context & (1 << phy_no)) {
+ struct hisi_sas_initial_fis *initial_fis;
+ struct dev_to_host_fis *fis;
+ u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
+
+ dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate);
+ initial_fis = &hisi_hba->initial_fis[phy_no];
+ fis = &initial_fis->fis;
+ sas_phy->oob_mode = SATA_OOB_MODE;
+ attached_sas_addr[0] = 0x50;
+ attached_sas_addr[7] = phy_no;
+ memcpy(sas_phy->attached_sas_addr,
+ attached_sas_addr,
+ SAS_ADDR_SIZE);
+ memcpy(sas_phy->frame_rcvd, fis,
+ sizeof(struct dev_to_host_fis));
+ phy->phy_type |= PORT_TYPE_SATA;
+ phy->identify.device_type = SAS_SATA_DEV;
+ phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
+ phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
+ } else {
+ u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
+ struct sas_identify_frame *id =
+ (struct sas_identify_frame *)frame_rcvd;
+
+ dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate);
+ for (i = 0; i < 6; i++) {
+ u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no,
+ RX_IDAF_DWORD0 + (i * 4));
+ frame_rcvd[i] = __swab32(idaf);
+ }
+ sas_phy->oob_mode = SAS_OOB_MODE;
+ memcpy(sas_phy->attached_sas_addr,
+ &id->sas_addr,
+ SAS_ADDR_SIZE);
+ phy->phy_type |= PORT_TYPE_SAS;
+ phy->identify.device_type = id->dev_type;
+ phy->frame_rcvd_size = sizeof(struct sas_identify_frame);
+ if (phy->identify.device_type == SAS_END_DEVICE)
+ phy->identify.target_port_protocols =
+ SAS_PROTOCOL_SSP;
+ else if (phy->identify.device_type != SAS_PHY_UNUSED)
+ phy->identify.target_port_protocols =
+ SAS_PROTOCOL_SMP;
+ }
+
+ phy->port_id = port_id;
+ phy->phy_attached = 1;
+ queue_work(hisi_hba->wq, &phy->phyup_ws);
+
+end:
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
+ CHL_INT0_SL_PHY_ENABLE_MSK);
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0);
+
+ return res;
+}
+
+static int phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
+{
+ int res = 0;
+ u32 phy_state, sl_ctrl, txid_auto;
+ struct device *dev = hisi_hba->dev;
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
+
+ phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
+ dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state);
+ hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
+
+ sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
+ hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL,
+ sl_ctrl&(~SL_CTA_MSK));
+
+ txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
+ hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
+ txid_auto | CT3_MSK);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK);
+ hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0);
+
+ return res;
+}
+
+static void phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
+{
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct asd_sas_phy *sas_phy = &phy->sas_phy;
+ struct sas_ha_struct *sas_ha = &hisi_hba->sha;
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
+ sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
+ CHL_INT0_SL_RX_BCST_ACK_MSK);
+ hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
+}
+
+static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p)
+{
+ struct hisi_hba *hisi_hba = p;
+ u32 irq_msk;
+ int phy_no = 0;
+ irqreturn_t res = IRQ_NONE;
+
+ irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS)
+ & 0x11111111;
+ while (irq_msk) {
+ if (irq_msk & 1) {
+ u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT0);
+ u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
+ int rdy = phy_state & (1 << phy_no);
+
+ if (rdy) {
+ if (irq_value & CHL_INT0_SL_PHY_ENABLE_MSK)
+ /* phy up */
+ if (phy_up_v3_hw(phy_no, hisi_hba)
+ == IRQ_HANDLED)
+ res = IRQ_HANDLED;
+ if (irq_value & CHL_INT0_SL_RX_BCST_ACK_MSK)
+ /* phy bcast */
+ phy_bcast_v3_hw(phy_no, hisi_hba);
+ } else {
+ if (irq_value & CHL_INT0_NOT_RDY_MSK)
+ /* phy down */
+ if (phy_down_v3_hw(phy_no, hisi_hba)
+ == IRQ_HANDLED)
+ res = IRQ_HANDLED;
+ }
+ }
+ irq_msk >>= 4;
+ phy_no++;
+ }
+
+ return res;
+}
+
+static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
+{
+ struct hisi_hba *hisi_hba = p;
+ struct device *dev = hisi_hba->dev;
+ u32 ent_msk, ent_tmp, irq_msk;
+ int phy_no = 0;
+
+ ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
+ ent_tmp = ent_msk;
+ ent_msk |= ENT_INT_SRC_MSK3_ENT95_MSK_MSK;
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_msk);
+
+ irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS)
+ & 0xeeeeeeee;
+
+ while (irq_msk) {
+ u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT0);
+ u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT1);
+ u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
+ CHL_INT2);
+
+ if ((irq_msk & (4 << (phy_no * 4))) &&
+ irq_value1) {
+ if (irq_value1 & (CHL_INT1_DMAC_RX_ECC_ERR_MSK |
+ CHL_INT1_DMAC_TX_ECC_ERR_MSK))
+ panic("%s: DMAC RX/TX ecc bad error! (0x%x)",
+ dev_name(dev), irq_value1);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT1, irq_value1);
+ }
+
+ if (irq_msk & (8 << (phy_no * 4)) && irq_value2)
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT2, irq_value2);
+
+
+ if (irq_msk & (2 << (phy_no * 4)) && irq_value0) {
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ CHL_INT0, irq_value0
+ & (~CHL_INT0_HOTPLUG_TOUT_MSK)
+ & (~CHL_INT0_SL_PHY_ENABLE_MSK)
+ & (~CHL_INT0_NOT_RDY_MSK));
+ }
+ irq_msk &= ~(0xe << (phy_no * 4));
+ phy_no++;
+ }
+
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, ent_tmp);
+
+ return IRQ_HANDLED;
+}
+
+static void
+slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
+ struct hisi_sas_slot *slot)
+{
+ struct task_status_struct *ts = &task->task_status;
+ struct hisi_sas_complete_v3_hdr *complete_queue =
+ hisi_hba->complete_hdr[slot->cmplt_queue];
+ struct hisi_sas_complete_v3_hdr *complete_hdr =
+ &complete_queue[slot->cmplt_queue_slot];
+ struct hisi_sas_err_record_v3 *record =
+ hisi_sas_status_buf_addr_mem(slot);
+ u32 dma_rx_err_type = record->dma_rx_err_type;
+ u32 trans_tx_fail_type = record->trans_tx_fail_type;
+
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SSP:
+ if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
+ ts->residual = trans_tx_fail_type;
+ ts->stat = SAS_DATA_UNDERRUN;
+ } else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
+ ts->stat = SAS_QUEUE_FULL;
+ slot->abort = 1;
+ } else {
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ }
+ break;
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
+ ts->residual = trans_tx_fail_type;
+ ts->stat = SAS_DATA_UNDERRUN;
+ } else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
+ ts->stat = SAS_PHY_DOWN;
+ slot->abort = 1;
+ } else {
+ ts->stat = SAS_OPEN_REJECT;
+ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+ }
+ hisi_sas_sata_done(task, slot);
+ break;
+ case SAS_PROTOCOL_SMP:
+ ts->stat = SAM_STAT_CHECK_CONDITION;
+ break;
+ default:
+ break;
+ }
+}
+
+static int
+slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
+{
+ struct sas_task *task = slot->task;
+ struct hisi_sas_device *sas_dev;
+ struct device *dev = hisi_hba->dev;
+ struct task_status_struct *ts;
+ struct domain_device *device;
+ enum exec_status sts;
+ struct hisi_sas_complete_v3_hdr *complete_queue =
+ hisi_hba->complete_hdr[slot->cmplt_queue];
+ struct hisi_sas_complete_v3_hdr *complete_hdr =
+ &complete_queue[slot->cmplt_queue_slot];
+ int aborted;
+ unsigned long flags;
+
+ if (unlikely(!task || !task->lldd_task || !task->dev))
+ return -EINVAL;
+
+ ts = &task->task_status;
+ device = task->dev;
+ sas_dev = device->lldd_dev;
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
+ task->task_state_flags &=
+ ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ memset(ts, 0, sizeof(*ts));
+ ts->resp = SAS_TASK_COMPLETE;
+ if (unlikely(aborted)) {
+ ts->stat = SAS_ABORTED_TASK;
+ hisi_sas_slot_task_free(hisi_hba, task, slot);
+ return -1;
+ }
+
+ if (unlikely(!sas_dev)) {
+ dev_dbg(dev, "slot complete: port has not device\n");
+ ts->stat = SAS_PHY_DOWN;
+ goto out;
+ }
+
+ /*
+ * Use SAS+TMF status codes
+ */
+ switch ((complete_hdr->dw0 & CMPLT_HDR_ABORT_STAT_MSK)
+ >> CMPLT_HDR_ABORT_STAT_OFF) {
+ case STAT_IO_ABORTED:
+ /* this IO has been aborted by abort command */
+ ts->stat = SAS_ABORTED_TASK;
+ goto out;
+ case STAT_IO_COMPLETE:
+ /* internal abort command complete */
+ ts->stat = TMF_RESP_FUNC_SUCC;
+ goto out;
+ case STAT_IO_NO_DEVICE:
+ ts->stat = TMF_RESP_FUNC_COMPLETE;
+ goto out;
+ case STAT_IO_NOT_VALID:
+ /*
+ * abort single IO, the controller can't find the IO
+ */
+ ts->stat = TMF_RESP_FUNC_FAILED;
+ goto out;
+ default:
+ break;
+ }
+
+ /* check for erroneous completion */
+ if ((complete_hdr->dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) {
+ slot_err_v3_hw(hisi_hba, task, slot);
+ if (unlikely(slot->abort))
+ return ts->stat;
+ goto out;
+ }
+
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SSP: {
+ struct ssp_response_iu *iu =
+ hisi_sas_status_buf_addr_mem(slot) +
+ sizeof(struct hisi_sas_err_record);
+
+ sas_ssp_task_response(dev, task, iu);
+ break;
+ }
+ case SAS_PROTOCOL_SMP: {
+ struct scatterlist *sg_resp = &task->smp_task.smp_resp;
+ void *to;
+
+ ts->stat = SAM_STAT_GOOD;
+ to = kmap_atomic(sg_page(sg_resp));
+
+ dma_unmap_sg(dev, &task->smp_task.smp_resp, 1,
+ DMA_FROM_DEVICE);
+ dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
+ DMA_TO_DEVICE);
+ memcpy(to + sg_resp->offset,
+ hisi_sas_status_buf_addr_mem(slot) +
+ sizeof(struct hisi_sas_err_record),
+ sg_dma_len(sg_resp));
+ kunmap_atomic(to);
+ break;
+ }
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ ts->stat = SAM_STAT_GOOD;
+ hisi_sas_sata_done(task, slot);
+ break;
+ default:
+ ts->stat = SAM_STAT_CHECK_CONDITION;
+ break;
+ }
+
+ if (!slot->port->port_attached) {
+ dev_err(dev, "slot complete: port %d has removed\n",
+ slot->port->sas_port.id);
+ ts->stat = SAS_PHY_DOWN;
+ }
+
+out:
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ spin_lock_irqsave(&hisi_hba->lock, flags);
+ hisi_sas_slot_task_free(hisi_hba, task, slot);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ sts = ts->stat;
+
+ if (task->task_done)
+ task->task_done(task);
+
+ return sts;
+}
+
+static void cq_tasklet_v3_hw(unsigned long val)
+{
+ struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val;
+ struct hisi_hba *hisi_hba = cq->hisi_hba;
+ struct hisi_sas_slot *slot;
+ struct hisi_sas_itct *itct;
+ struct hisi_sas_complete_v3_hdr *complete_queue;
+ u32 rd_point = cq->rd_point, wr_point, dev_id;
+ int queue = cq->id;
+ struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
+
+ complete_queue = hisi_hba->complete_hdr[queue];
+
+ spin_lock(&dq->lock);
+ wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
+ (0x14 * queue));
+
+ while (rd_point != wr_point) {
+ struct hisi_sas_complete_v3_hdr *complete_hdr;
+ int iptt;
+
+ complete_hdr = &complete_queue[rd_point];
+
+ /* Check for NCQ completion */
+ if (complete_hdr->act) {
+ u32 act_tmp = complete_hdr->act;
+ int ncq_tag_count = ffs(act_tmp);
+
+ dev_id = (complete_hdr->dw1 & CMPLT_HDR_DEV_ID_MSK) >>
+ CMPLT_HDR_DEV_ID_OFF;
+ itct = &hisi_hba->itct[dev_id];
+
+ /* The NCQ tags are held in the itct header */
+ while (ncq_tag_count) {
+ __le64 *ncq_tag = &itct->qw4_15[0];
+
+ ncq_tag_count -= 1;
+ iptt = (ncq_tag[ncq_tag_count / 5]
+ >> (ncq_tag_count % 5) * 12) & 0xfff;
+
+ slot = &hisi_hba->slot_info[iptt];
+ slot->cmplt_queue_slot = rd_point;
+ slot->cmplt_queue = queue;
+ slot_complete_v3_hw(hisi_hba, slot);
+
+ act_tmp &= ~(1 << ncq_tag_count);
+ ncq_tag_count = ffs(act_tmp);
+ }
+ } else {
+ iptt = (complete_hdr->dw1) & CMPLT_HDR_IPTT_MSK;
+ slot = &hisi_hba->slot_info[iptt];
+ slot->cmplt_queue_slot = rd_point;
+ slot->cmplt_queue = queue;
+ slot_complete_v3_hw(hisi_hba, slot);
+ }
+
+ if (++rd_point >= HISI_SAS_QUEUE_SLOTS)
+ rd_point = 0;
+ }
+
+ /* update rd_point */
+ cq->rd_point = rd_point;
+ hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
+ spin_unlock(&dq->lock);
+}
+
+static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p)
+{
+ struct hisi_sas_cq *cq = p;
+ struct hisi_hba *hisi_hba = cq->hisi_hba;
+ int queue = cq->id;
+
+ hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
+
+ tasklet_schedule(&cq->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
+{
+ struct device *dev = hisi_hba->dev;
+ struct pci_dev *pdev = hisi_hba->pci_dev;
+ int vectors, rc;
+ int i, k;
+ int max_msi = HISI_SAS_MSI_COUNT_V3_HW;
+
+ vectors = pci_alloc_irq_vectors(hisi_hba->pci_dev, 1,
+ max_msi, PCI_IRQ_MSI);
+ if (vectors < max_msi) {
+ dev_err(dev, "could not allocate all msi (%d)\n", vectors);
+ return -ENOENT;
+ }
+
+ rc = devm_request_irq(dev, pci_irq_vector(pdev, 1),
+ int_phy_up_down_bcast_v3_hw, 0,
+ DRV_NAME " phy", hisi_hba);
+ if (rc) {
+ dev_err(dev, "could not request phy interrupt, rc=%d\n", rc);
+ rc = -ENOENT;
+ goto free_irq_vectors;
+ }
+
+ rc = devm_request_irq(dev, pci_irq_vector(pdev, 2),
+ int_chnl_int_v3_hw, 0,
+ DRV_NAME " channel", hisi_hba);
+ if (rc) {
+ dev_err(dev, "could not request chnl interrupt, rc=%d\n", rc);
+ rc = -ENOENT;
+ goto free_phy_irq;
+ }
+
+ /* Init tasklets for cq only */
+ for (i = 0; i < hisi_hba->queue_count; i++) {
+ struct hisi_sas_cq *cq = &hisi_hba->cq[i];
+ struct tasklet_struct *t = &cq->tasklet;
+
+ rc = devm_request_irq(dev, pci_irq_vector(pdev, i+16),
+ cq_interrupt_v3_hw, 0,
+ DRV_NAME " cq", cq);
+ if (rc) {
+ dev_err(dev,
+ "could not request cq%d interrupt, rc=%d\n",
+ i, rc);
+ rc = -ENOENT;
+ goto free_cq_irqs;
+ }
+
+ tasklet_init(t, cq_tasklet_v3_hw, (unsigned long)cq);
+ }
+
+ return 0;
+
+free_cq_irqs:
+ for (k = 0; k < i; k++) {
+ struct hisi_sas_cq *cq = &hisi_hba->cq[k];
+
+ free_irq(pci_irq_vector(pdev, k+16), cq);
+ }
+ free_irq(pci_irq_vector(pdev, 2), hisi_hba);
+free_phy_irq:
+ free_irq(pci_irq_vector(pdev, 1), hisi_hba);
+free_irq_vectors:
+ pci_free_irq_vectors(pdev);
+ return rc;
+}
+
+static int hisi_sas_v3_init(struct hisi_hba *hisi_hba)
+{
+ int rc;
+
+ rc = hw_init_v3_hw(hisi_hba);
+ if (rc)
+ return rc;
+
+ rc = interrupt_init_v3_hw(hisi_hba);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static const struct hisi_sas_hw hisi_sas_v3_hw = {
+ .hw_init = hisi_sas_v3_init,
+ .setup_itct = setup_itct_v3_hw,
+ .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW,
+ .get_wideport_bitmap = get_wideport_bitmap_v3_hw,
+ .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr),
+ .free_device = free_device_v3_hw,
+ .sl_notify = sl_notify_v3_hw,
+ .prep_ssp = prep_ssp_v3_hw,
+ .prep_smp = prep_smp_v3_hw,
+ .prep_stp = prep_ata_v3_hw,
+ .prep_abort = prep_abort_v3_hw,
+ .get_free_slot = get_free_slot_v3_hw,
+ .start_delivery = start_delivery_v3_hw,
+ .slot_complete = slot_complete_v3_hw,
+ .phys_init = phys_init_v3_hw,
+ .phy_enable = enable_phy_v3_hw,
+ .phy_disable = disable_phy_v3_hw,
+ .phy_hard_reset = phy_hard_reset_v3_hw,
+ .phy_get_max_linkrate = phy_get_max_linkrate_v3_hw,
+ .dereg_device = dereg_device_v3_hw,
+};
+
+static struct Scsi_Host *
+hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost;
+ struct hisi_hba *hisi_hba;
+ struct device *dev = &pdev->dev;
+
+ shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
+ if (!shost)
+ goto err_out;
+ hisi_hba = shost_priv(shost);
+
+ hisi_hba->hw = &hisi_sas_v3_hw;
+ hisi_hba->pci_dev = pdev;
+ hisi_hba->dev = dev;
+ hisi_hba->shost = shost;
+ SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
+
+ init_timer(&hisi_hba->timer);
+
+ if (hisi_sas_get_fw_info(hisi_hba) < 0)
+ goto err_out;
+
+ if (hisi_sas_alloc(hisi_hba, shost)) {
+ hisi_sas_free(hisi_hba);
+ goto err_out;
+ }
+
+ return shost;
+err_out:
+ dev_err(dev, "shost alloc failed\n");
+ return NULL;
+}
+
+static int
+hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct Scsi_Host *shost;
+ struct hisi_hba *hisi_hba;
+ struct device *dev = &pdev->dev;
+ struct asd_sas_phy **arr_phy;
+ struct asd_sas_port **arr_port;
+ struct sas_ha_struct *sha;
+ int rc, phy_nr, port_nr, i;
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ goto err_out;
+
+ pci_set_master(pdev);
+
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_out_disable_device;
+
+ if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) ||
+ (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) {
+ if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
+ (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
+ dev_err(dev, "No usable DMA addressing method\n");
+ rc = -EIO;
+ goto err_out_regions;
+ }
+ }
+
+ shost = hisi_sas_shost_alloc_pci(pdev);
+ if (!shost) {
+ rc = -ENOMEM;
+ goto err_out_regions;
+ }
+
+ sha = SHOST_TO_SAS_HA(shost);
+ hisi_hba = shost_priv(shost);
+ dev_set_drvdata(dev, sha);
+
+ hisi_hba->regs = pcim_iomap(pdev, 5, 0);
+ if (!hisi_hba->regs) {
+ dev_err(dev, "cannot map register.\n");
+ rc = -ENOMEM;
+ goto err_out_ha;
+ }
+
+ phy_nr = port_nr = hisi_hba->n_phy;
+
+ arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
+ arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
+ if (!arr_phy || !arr_port) {
+ rc = -ENOMEM;
+ goto err_out_ha;
+ }
+
+ sha->sas_phy = arr_phy;
+ sha->sas_port = arr_port;
+ sha->core.shost = shost;
+ sha->lldd_ha = hisi_hba;
+
+ shost->transportt = hisi_sas_stt;
+ shost->max_id = HISI_SAS_MAX_DEVICES;
+ shost->max_lun = ~0;
+ shost->max_channel = 1;
+ shost->max_cmd_len = 16;
+ shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
+ shost->can_queue = hisi_hba->hw->max_command_entries;
+ shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
+
+ sha->sas_ha_name = DRV_NAME;
+ sha->dev = dev;
+ sha->lldd_module = THIS_MODULE;
+ sha->sas_addr = &hisi_hba->sas_addr[0];
+ sha->num_phys = hisi_hba->n_phy;
+ sha->core.shost = hisi_hba->shost;
+
+ for (i = 0; i < hisi_hba->n_phy; i++) {
+ sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
+ sha->sas_port[i] = &hisi_hba->port[i].sas_port;
+ }
+
+ hisi_sas_init_add(hisi_hba);
+
+ rc = scsi_add_host(shost, dev);
+ if (rc)
+ goto err_out_ha;
+
+ rc = sas_register_ha(sha);
+ if (rc)
+ goto err_out_register_ha;
+
+ rc = hisi_hba->hw->hw_init(hisi_hba);
+ if (rc)
+ goto err_out_register_ha;
+
+ scsi_scan_host(shost);
+
+ return 0;
+
+err_out_register_ha:
+ scsi_remove_host(shost);
+err_out_ha:
+ kfree(shost);
+err_out_regions:
+ pci_release_regions(pdev);
+err_out_disable_device:
+ pci_disable_device(pdev);
+err_out:
+ return rc;
+}
+
+static void
+hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
+{
+ int i;
+
+ free_irq(pci_irq_vector(pdev, 1), hisi_hba);
+ free_irq(pci_irq_vector(pdev, 2), hisi_hba);
+ for (i = 0; i < hisi_hba->queue_count; i++) {
+ struct hisi_sas_cq *cq = &hisi_hba->cq[i];
+
+ free_irq(pci_irq_vector(pdev, i+16), cq);
+ }
+ pci_free_irq_vectors(pdev);
+}
+
+static void hisi_sas_v3_remove(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sas_ha_struct *sha = dev_get_drvdata(dev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+
+ sas_unregister_ha(sha);
+ sas_remove_host(sha->core.shost);
+
+ hisi_sas_free(hisi_hba);
+ hisi_sas_v3_destroy_irqs(pdev, hisi_hba);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+enum {
+ /* instances of the controller */
+ hip08,
+};
+
+static const struct pci_device_id sas_v3_pci_table[] = {
+ { PCI_VDEVICE(HUAWEI, 0xa230), hip08 },
+ {}
+};
+
+static struct pci_driver sas_v3_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = sas_v3_pci_table,
+ .probe = hisi_sas_v3_probe,
+ .remove = hisi_sas_v3_remove,
+};
+
+module_pci_driver(sas_v3_pci_driver);
+
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("John Garry <[email protected]>");
+MODULE_DESCRIPTION("HISILICON SAS controller v3 hw driver based on pci device");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 73daace478cb..8914eab84337 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -60,7 +60,7 @@
* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
* with an optional trailing '-' followed by a byte value (0-255).
*/
-#define HPSA_DRIVER_VERSION "3.4.18-0"
+#define HPSA_DRIVER_VERSION "3.4.20-0"
#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
#define HPSA "hpsa"
@@ -258,7 +258,6 @@ static int hpsa_scan_finished(struct Scsi_Host *sh,
static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
-static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
static int hpsa_slave_alloc(struct scsi_device *sdev);
static int hpsa_slave_configure(struct scsi_device *sdev);
static void hpsa_slave_destroy(struct scsi_device *sdev);
@@ -326,7 +325,7 @@ static inline bool hpsa_is_cmd_idle(struct CommandList *c)
static inline bool hpsa_is_pending_event(struct CommandList *c)
{
- return c->abort_pending || c->reset_pending;
+ return c->reset_pending;
}
/* extract sense key, asc, and ascq from sense data. -1 means invalid. */
@@ -581,12 +580,6 @@ static u32 soft_unresettable_controller[] = {
0x409D0E11, /* Smart Array 6400 EM */
};
-static u32 needs_abort_tags_swizzled[] = {
- 0x323D103C, /* Smart Array P700m */
- 0x324a103C, /* Smart Array P712m */
- 0x324b103C, /* SmartArray P711m */
-};
-
static int board_id_in_array(u32 a[], int nelems, u32 board_id)
{
int i;
@@ -615,12 +608,6 @@ static int ctlr_is_resettable(u32 board_id)
ctlr_is_soft_resettable(board_id);
}
-static int ctlr_needs_abort_tags_swizzled(u32 board_id)
-{
- return board_id_in_array(needs_abort_tags_swizzled,
- ARRAY_SIZE(needs_abort_tags_swizzled), board_id);
-}
-
static ssize_t host_show_resettable(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -928,8 +915,8 @@ static struct device_attribute *hpsa_shost_attrs[] = {
NULL,
};
-#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \
- HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
+#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\
+ HPSA_MAX_CONCURRENT_PASSTHRUS)
static struct scsi_host_template hpsa_driver_template = {
.module = THIS_MODULE,
@@ -941,7 +928,6 @@ static struct scsi_host_template hpsa_driver_template = {
.change_queue_depth = hpsa_change_queue_depth,
.this_id = -1,
.use_clustering = ENABLE_CLUSTERING,
- .eh_abort_handler = hpsa_eh_abort_handler,
.eh_device_reset_handler = hpsa_eh_device_reset_handler,
.ioctl = hpsa_ioctl,
.slave_alloc = hpsa_slave_alloc,
@@ -1110,6 +1096,7 @@ static int is_firmware_flash_cmd(u8 *cdb)
*/
#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
+#define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ)
static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
struct CommandList *c)
{
@@ -1859,10 +1846,13 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h,
* A reset can cause a device status to change
* re-schedule the scan to see what happened.
*/
+ spin_lock_irqsave(&h->reset_lock, flags);
if (h->reset_in_progress) {
h->drv_req_rescan = 1;
+ spin_unlock_irqrestore(&h->reset_lock, flags);
return;
}
+ spin_unlock_irqrestore(&h->reset_lock, flags);
added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
@@ -2066,10 +2056,13 @@ static int hpsa_slave_configure(struct scsi_device *sdev)
sd = sdev->hostdata;
sdev->no_uld_attach = !sd || !sd->expose_device;
- if (sd)
- queue_depth = sd->queue_depth != 0 ?
- sd->queue_depth : sdev->host->can_queue;
- else
+ if (sd) {
+ if (sd->external)
+ queue_depth = EXTERNAL_QD;
+ else
+ queue_depth = sd->queue_depth != 0 ?
+ sd->queue_depth : sdev->host->can_queue;
+ } else
queue_depth = sdev->host->can_queue;
scsi_change_queue_depth(sdev, queue_depth);
@@ -2354,26 +2347,12 @@ static void hpsa_cmd_resolve_events(struct ctlr_info *h,
bool do_wake = false;
/*
- * Prevent the following race in the abort handler:
- *
- * 1. LLD is requested to abort a SCSI command
- * 2. The SCSI command completes
- * 3. The struct CommandList associated with step 2 is made available
- * 4. New I/O request to LLD to another LUN re-uses struct CommandList
- * 5. Abort handler follows scsi_cmnd->host_scribble and
- * finds struct CommandList and tries to aborts it
- * Now we have aborted the wrong command.
- *
- * Reset c->scsi_cmd here so that the abort or reset handler will know
+ * Reset c->scsi_cmd here so that the reset handler will know
* this command has completed. Then, check to see if the handler is
* waiting for this command, and, if so, wake it.
*/
c->scsi_cmd = SCSI_CMD_IDLE;
mb(); /* Declare command idle before checking for pending events. */
- if (c->abort_pending) {
- do_wake = true;
- c->abort_pending = false;
- }
if (c->reset_pending) {
unsigned long flags;
struct hpsa_scsi_dev_t *dev;
@@ -2416,20 +2395,6 @@ static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
}
-static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd)
-{
- cmd->result = DID_ABORT << 16;
-}
-
-static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c,
- struct scsi_cmnd *cmd)
-{
- hpsa_set_scsi_cmd_aborted(cmd);
- dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
- c->Request.CDB, c->err_info->ScsiStatus);
- hpsa_cmd_resolve_and_free(h, c);
-}
-
static void process_ioaccel2_completion(struct ctlr_info *h,
struct CommandList *c, struct scsi_cmnd *cmd,
struct hpsa_scsi_dev_t *dev)
@@ -2554,12 +2519,9 @@ static void complete_scsi_command(struct CommandList *cp)
return hpsa_cmd_free_and_done(h, cp, cmd);
}
- if ((unlikely(hpsa_is_pending_event(cp)))) {
+ if ((unlikely(hpsa_is_pending_event(cp))))
if (cp->reset_pending)
return hpsa_cmd_free_and_done(h, cp, cmd);
- if (cp->abort_pending)
- return hpsa_cmd_abort_and_free(h, cp, cmd);
- }
if (cp->cmd_type == CMD_IOACCEL2)
return process_ioaccel2_completion(h, cp, cmd, dev);
@@ -2679,8 +2641,8 @@ static void complete_scsi_command(struct CommandList *cp)
cp->Request.CDB);
break;
case CMD_ABORTED:
- /* Return now to avoid calling scsi_done(). */
- return hpsa_cmd_abort_and_free(h, cp, cmd);
+ cmd->result = DID_ABORT << 16;
+ break;
case CMD_ABORT_FAILED:
cmd->result = DID_ERROR << 16;
dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
@@ -3090,7 +3052,7 @@ static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
if (unlikely(rc))
atomic_set(&dev->reset_cmds_out, 0);
else
- wait_for_device_to_become_ready(h, scsi3addr, 0);
+ rc = wait_for_device_to_become_ready(h, scsi3addr, 0);
mutex_unlock(&h->reset_mutex);
return rc;
@@ -3165,7 +3127,7 @@ static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
le16_to_cpu(map_buff->layout_map_count));
dev_info(&h->pdev->dev, "flags = 0x%x\n",
le16_to_cpu(map_buff->flags));
- dev_info(&h->pdev->dev, "encrypytion = %s\n",
+ dev_info(&h->pdev->dev, "encryption = %s\n",
le16_to_cpu(map_buff->flags) &
RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
dev_info(&h->pdev->dev, "dekindex = %u\n",
@@ -3353,6 +3315,11 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h,
bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
+ if (encl_dev->target == -1 || encl_dev->lun == -1) {
+ rc = IO_OK;
+ goto out;
+ }
+
if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) {
rc = IO_OK;
goto out;
@@ -3781,53 +3748,6 @@ static unsigned char hpsa_volume_offline(struct ctlr_info *h,
return HPSA_LV_OK;
}
-/*
- * Find out if a logical device supports aborts by simply trying one.
- * Smart Array may claim not to support aborts on logical drives, but
- * if a MSA2000 * is connected, the drives on that will be presented
- * by the Smart Array as logical drives, and aborts may be sent to
- * those devices successfully. So the simplest way to find out is
- * to simply try an abort and see how the device responds.
- */
-static int hpsa_device_supports_aborts(struct ctlr_info *h,
- unsigned char *scsi3addr)
-{
- struct CommandList *c;
- struct ErrorInfo *ei;
- int rc = 0;
-
- u64 tag = (u64) -1; /* bogus tag */
-
- /* Assume that physical devices support aborts */
- if (!is_logical_dev_addr_mode(scsi3addr))
- return 1;
-
- c = cmd_alloc(h);
-
- (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG);
- (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
- DEFAULT_TIMEOUT);
- /* no unmap needed here because no data xfer. */
- ei = c->err_info;
- switch (ei->CommandStatus) {
- case CMD_INVALID:
- rc = 0;
- break;
- case CMD_UNABORTABLE:
- case CMD_ABORT_FAILED:
- rc = 1;
- break;
- case CMD_TMF_STATUS:
- rc = hpsa_evaluate_tmf_status(h, c);
- break;
- default:
- rc = 0;
- break;
- }
- cmd_free(h, c);
- return rc;
-}
-
static int hpsa_update_device_info(struct ctlr_info *h,
unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
unsigned char *is_OBDR_device)
@@ -3907,6 +3827,9 @@ static int hpsa_update_device_info(struct ctlr_info *h,
this_device->queue_depth = h->nr_cmds;
}
+ if (this_device->external)
+ this_device->queue_depth = EXTERNAL_QD;
+
if (is_OBDR_device) {
/* See if this is a One-Button-Disaster-Recovery device
* by looking for "$DR-10" at offset 43 in inquiry data.
@@ -3924,31 +3847,6 @@ bail_out:
return rc;
}
-static void hpsa_update_device_supports_aborts(struct ctlr_info *h,
- struct hpsa_scsi_dev_t *dev, u8 *scsi3addr)
-{
- unsigned long flags;
- int rc, entry;
- /*
- * See if this device supports aborts. If we already know
- * the device, we already know if it supports aborts, otherwise
- * we have to find out if it supports aborts by trying one.
- */
- spin_lock_irqsave(&h->devlock, flags);
- rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry);
- if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) &&
- entry >= 0 && entry < h->ndevices) {
- dev->supports_aborts = h->dev[entry]->supports_aborts;
- spin_unlock_irqrestore(&h->devlock, flags);
- } else {
- spin_unlock_irqrestore(&h->devlock, flags);
- dev->supports_aborts =
- hpsa_device_supports_aborts(h, scsi3addr);
- if (dev->supports_aborts < 0)
- dev->supports_aborts = 0;
- }
-}
-
/*
* Helper function to assign bus, target, lun mapping of devices.
* Logical drive target and lun are assigned at this time, but
@@ -3986,35 +3884,6 @@ static void figure_bus_target_lun(struct ctlr_info *h,
0, lunid & 0x3fff);
}
-
-/*
- * Get address of physical disk used for an ioaccel2 mode command:
- * 1. Extract ioaccel2 handle from the command.
- * 2. Find a matching ioaccel2 handle from list of physical disks.
- * 3. Return:
- * 1 and set scsi3addr to address of matching physical
- * 0 if no matching physical disk was found.
- */
-static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
- struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
-{
- struct io_accel2_cmd *c2 =
- &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&h->devlock, flags);
- for (i = 0; i < h->ndevices; i++)
- if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) {
- memcpy(scsi3addr, h->dev[i]->scsi3addr,
- sizeof(h->dev[i]->scsi3addr));
- spin_unlock_irqrestore(&h->devlock, flags);
- return 1;
- }
- spin_unlock_irqrestore(&h->devlock, flags);
- return 0;
-}
-
static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position,
int i, int nphysicals, int nlocal_logicals)
{
@@ -4115,14 +3984,6 @@ static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
int rc;
struct ext_report_lun_entry *rle;
- /*
- * external targets don't support BMIC
- */
- if (dev->external) {
- dev->queue_depth = 7;
- return;
- }
-
rle = &rlep->LUN[rle_index];
dev->ioaccel_handle = rle->ioaccel_handle;
@@ -4387,7 +4248,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
}
figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
- hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
this_device = currentsd[ncurrent];
/* Turn on discovery_polling if there are ext target devices.
@@ -4584,7 +4444,55 @@ sglist_finished:
return 0;
}
-#define IO_ACCEL_INELIGIBLE (1)
+#define BUFLEN 128
+static inline void warn_zero_length_transfer(struct ctlr_info *h,
+ u8 *cdb, int cdb_len,
+ const char *func)
+{
+ char buf[BUFLEN];
+ int outlen;
+ int i;
+
+ outlen = scnprintf(buf, BUFLEN,
+ "%s: Blocking zero-length request: CDB:", func);
+ for (i = 0; i < cdb_len; i++)
+ outlen += scnprintf(buf+outlen, BUFLEN - outlen,
+ "%02hhx", cdb[i]);
+ dev_warn(&h->pdev->dev, "%s\n", buf);
+}
+
+#define IO_ACCEL_INELIGIBLE 1
+/* zero-length transfers trigger hardware errors. */
+static bool is_zero_length_transfer(u8 *cdb)
+{
+ u32 block_cnt;
+
+ /* Block zero-length transfer sizes on certain commands. */
+ switch (cdb[0]) {
+ case READ_10:
+ case WRITE_10:
+ case VERIFY: /* 0x2F */
+ case WRITE_VERIFY: /* 0x2E */
+ block_cnt = get_unaligned_be16(&cdb[7]);
+ break;
+ case READ_12:
+ case WRITE_12:
+ case VERIFY_12: /* 0xAF */
+ case WRITE_VERIFY_12: /* 0xAE */
+ block_cnt = get_unaligned_be32(&cdb[6]);
+ break;
+ case READ_16:
+ case WRITE_16:
+ case VERIFY_16: /* 0x8F */
+ block_cnt = get_unaligned_be32(&cdb[10]);
+ break;
+ default:
+ return false;
+ }
+
+ return block_cnt == 0;
+}
+
static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
{
int is_write = 0;
@@ -4651,6 +4559,12 @@ static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
+ if (is_zero_length_transfer(cdb)) {
+ warn_zero_length_transfer(h, cdb, cdb_len, __func__);
+ atomic_dec(&phys_disk->ioaccel_cmds_out);
+ return IO_ACCEL_INELIGIBLE;
+ }
+
if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
atomic_dec(&phys_disk->ioaccel_cmds_out);
return IO_ACCEL_INELIGIBLE;
@@ -4815,6 +4729,12 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
+ if (is_zero_length_transfer(cdb)) {
+ warn_zero_length_transfer(h, cdb, cdb_len, __func__);
+ atomic_dec(&phys_disk->ioaccel_cmds_out);
+ return IO_ACCEL_INELIGIBLE;
+ }
+
if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
atomic_dec(&phys_disk->ioaccel_cmds_out);
return IO_ACCEL_INELIGIBLE;
@@ -5460,9 +5380,7 @@ static void hpsa_command_resubmit_worker(struct work_struct *work)
return hpsa_cmd_free_and_done(c->h, c, cmd);
}
if (c->reset_pending)
- return hpsa_cmd_resolve_and_free(c->h, c);
- if (c->abort_pending)
- return hpsa_cmd_abort_and_free(c->h, c, cmd);
+ return hpsa_cmd_free_and_done(c->h, c, cmd);
if (c->cmd_type == CMD_IOACCEL2) {
struct ctlr_info *h = c->h;
struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
@@ -5613,10 +5531,14 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
/*
* Do the scan after a reset completion
*/
+ spin_lock_irqsave(&h->reset_lock, flags);
if (h->reset_in_progress) {
h->drv_req_rescan = 1;
+ spin_unlock_irqrestore(&h->reset_lock, flags);
+ hpsa_scan_complete(h);
return;
}
+ spin_unlock_irqrestore(&h->reset_lock, flags);
hpsa_update_scsi_devices(h);
@@ -5828,24 +5750,37 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h,
*/
static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
{
- int rc;
+ int rc = SUCCESS;
struct ctlr_info *h;
struct hpsa_scsi_dev_t *dev;
u8 reset_type;
char msg[48];
+ unsigned long flags;
/* find the controller to which the command to be aborted was sent */
h = sdev_to_hba(scsicmd->device);
if (h == NULL) /* paranoia */
return FAILED;
- if (lockup_detected(h))
- return FAILED;
+ spin_lock_irqsave(&h->reset_lock, flags);
+ h->reset_in_progress = 1;
+ spin_unlock_irqrestore(&h->reset_lock, flags);
+
+ if (lockup_detected(h)) {
+ rc = FAILED;
+ goto return_reset_status;
+ }
dev = scsicmd->device->hostdata;
if (!dev) {
dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
- return FAILED;
+ rc = FAILED;
+ goto return_reset_status;
+ }
+
+ if (dev->devtype == TYPE_ENCLOSURE) {
+ rc = SUCCESS;
+ goto return_reset_status;
}
/* if controller locked up, we can guarantee command won't complete */
@@ -5854,7 +5789,8 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
"cmd %d RESET FAILED, lockup detected",
hpsa_get_cmd_index(scsicmd));
hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
- return FAILED;
+ rc = FAILED;
+ goto return_reset_status;
}
/* this reset request might be the result of a lockup; check */
@@ -5863,12 +5799,15 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
"cmd %d RESET FAILED, new lockup detected",
hpsa_get_cmd_index(scsicmd));
hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
- return FAILED;
+ rc = FAILED;
+ goto return_reset_status;
}
/* Do not attempt on controller */
- if (is_hba_lunid(dev->scsi3addr))
- return SUCCESS;
+ if (is_hba_lunid(dev->scsi3addr)) {
+ rc = SUCCESS;
+ goto return_reset_status;
+ }
if (is_logical_dev_addr_mode(dev->scsi3addr))
reset_type = HPSA_DEVICE_RESET_MSG;
@@ -5879,446 +5818,26 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
- h->reset_in_progress = 1;
-
/* send a reset to the SCSI LUN which the command was sent to */
rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type,
DEFAULT_REPLY_QUEUE);
+ if (rc == 0)
+ rc = SUCCESS;
+ else
+ rc = FAILED;
+
sprintf(msg, "reset %s %s",
reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ",
- rc == 0 ? "completed successfully" : "failed");
+ rc == SUCCESS ? "completed successfully" : "failed");
hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
- h->reset_in_progress = 0;
- return rc == 0 ? SUCCESS : FAILED;
-}
-
-static void swizzle_abort_tag(u8 *tag)
-{
- u8 original_tag[8];
-
- memcpy(original_tag, tag, 8);
- tag[0] = original_tag[3];
- tag[1] = original_tag[2];
- tag[2] = original_tag[1];
- tag[3] = original_tag[0];
- tag[4] = original_tag[7];
- tag[5] = original_tag[6];
- tag[6] = original_tag[5];
- tag[7] = original_tag[4];
-}
-
-static void hpsa_get_tag(struct ctlr_info *h,
- struct CommandList *c, __le32 *taglower, __le32 *tagupper)
-{
- u64 tag;
- if (c->cmd_type == CMD_IOACCEL1) {
- struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
- &h->ioaccel_cmd_pool[c->cmdindex];
- tag = le64_to_cpu(cm1->tag);
- *tagupper = cpu_to_le32(tag >> 32);
- *taglower = cpu_to_le32(tag);
- return;
- }
- if (c->cmd_type == CMD_IOACCEL2) {
- struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
- &h->ioaccel2_cmd_pool[c->cmdindex];
- /* upper tag not used in ioaccel2 mode */
- memset(tagupper, 0, sizeof(*tagupper));
- *taglower = cm2->Tag;
- return;
- }
- tag = le64_to_cpu(c->Header.tag);
- *tagupper = cpu_to_le32(tag >> 32);
- *taglower = cpu_to_le32(tag);
-}
-
-static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
- struct CommandList *abort, int reply_queue)
-{
- int rc = IO_OK;
- struct CommandList *c;
- struct ErrorInfo *ei;
- __le32 tagupper, taglower;
-
- c = cmd_alloc(h);
-
- /* fill_cmd can't fail here, no buffer to map */
- (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag,
- 0, 0, scsi3addr, TYPE_MSG);
- if (h->needs_abort_tags_swizzled)
- swizzle_abort_tag(&c->Request.CDB[4]);
- (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
- hpsa_get_tag(h, abort, &taglower, &tagupper);
- dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
- __func__, tagupper, taglower);
- /* no unmap needed here because no data xfer. */
-
- ei = c->err_info;
- switch (ei->CommandStatus) {
- case CMD_SUCCESS:
- break;
- case CMD_TMF_STATUS:
- rc = hpsa_evaluate_tmf_status(h, c);
- break;
- case CMD_UNABORTABLE: /* Very common, don't make noise. */
- rc = -1;
- break;
- default:
- dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
- __func__, tagupper, taglower);
- hpsa_scsi_interpret_error(h, c);
- rc = -1;
- break;
- }
- cmd_free(h, c);
- dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
- __func__, tagupper, taglower);
- return rc;
-}
-
-static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h,
- struct CommandList *command_to_abort, int reply_queue)
-{
- struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
- struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
- struct io_accel2_cmd *c2a =
- &h->ioaccel2_cmd_pool[command_to_abort->cmdindex];
- struct scsi_cmnd *scmd = command_to_abort->scsi_cmd;
- struct hpsa_scsi_dev_t *dev = scmd->device->hostdata;
- if (!dev)
- return;
-
- /*
- * We're overlaying struct hpsa_tmf_struct on top of something which
- * was allocated as a struct io_accel2_cmd, so we better be sure it
- * actually fits, and doesn't overrun the error info space.
- */
- BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) >
- sizeof(struct io_accel2_cmd));
- BUG_ON(offsetof(struct io_accel2_cmd, error_data) <
- offsetof(struct hpsa_tmf_struct, error_len) +
- sizeof(ac->error_len));
-
- c->cmd_type = IOACCEL2_TMF;
- c->scsi_cmd = SCSI_CMD_BUSY;
-
- /* Adjust the DMA address to point to the accelerated command buffer */
- c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
- (c->cmdindex * sizeof(struct io_accel2_cmd));
- BUG_ON(c->busaddr & 0x0000007F);
-
- memset(ac, 0, sizeof(*c2)); /* yes this is correct */
- ac->iu_type = IOACCEL2_IU_TMF_TYPE;
- ac->reply_queue = reply_queue;
- ac->tmf = IOACCEL2_TMF_ABORT;
- ac->it_nexus = cpu_to_le32(dev->ioaccel_handle);
- memset(ac->lun_id, 0, sizeof(ac->lun_id));
- ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT);
- ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag));
- ac->error_ptr = cpu_to_le64(c->busaddr +
- offsetof(struct io_accel2_cmd, error_data));
- ac->error_len = cpu_to_le32(sizeof(c2->error_data));
-}
-
-/* ioaccel2 path firmware cannot handle abort task requests.
- * Change abort requests to physical target reset, and send to the
- * address of the physical disk used for the ioaccel 2 command.
- * Return 0 on success (IO_OK)
- * -1 on failure
- */
-
-static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
- unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
-{
- int rc = IO_OK;
- struct scsi_cmnd *scmd; /* scsi command within request being aborted */
- struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
- unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
- unsigned char *psa = &phys_scsi3addr[0];
-
- /* Get a pointer to the hpsa logical device. */
- scmd = abort->scsi_cmd;
- dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
- if (dev == NULL) {
- dev_warn(&h->pdev->dev,
- "Cannot abort: no device pointer for command.\n");
- return -1; /* not abortable */
- }
-
- if (h->raid_offload_debug > 0)
- dev_info(&h->pdev->dev,
- "scsi %d:%d:%d:%d %s scsi3addr 0x%8phN\n",
- h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
- "Reset as abort", scsi3addr);
-
- if (!dev->offload_enabled) {
- dev_warn(&h->pdev->dev,
- "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
- return -1; /* not abortable */
- }
-
- /* Incoming scsi3addr is logical addr. We need physical disk addr. */
- if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
- dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
- return -1; /* not abortable */
- }
-
- /* send the reset */
- if (h->raid_offload_debug > 0)
- dev_info(&h->pdev->dev,
- "Reset as abort: Resetting physical device at scsi3addr 0x%8phN\n",
- psa);
- rc = hpsa_do_reset(h, dev, psa, HPSA_PHYS_TARGET_RESET, reply_queue);
- if (rc != 0) {
- dev_warn(&h->pdev->dev,
- "Reset as abort: Failed on physical device at scsi3addr 0x%8phN\n",
- psa);
- return rc; /* failed to reset */
- }
-
- /* wait for device to recover */
- if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
- dev_warn(&h->pdev->dev,
- "Reset as abort: Failed: Device never recovered from reset: 0x%8phN\n",
- psa);
- return -1; /* failed to recover */
- }
-
- /* device recovered */
- dev_info(&h->pdev->dev,
- "Reset as abort: Device recovered from reset: scsi3addr 0x%8phN\n",
- psa);
-
- return rc; /* success */
-}
-
-static int hpsa_send_abort_ioaccel2(struct ctlr_info *h,
- struct CommandList *abort, int reply_queue)
-{
- int rc = IO_OK;
- struct CommandList *c;
- __le32 taglower, tagupper;
- struct hpsa_scsi_dev_t *dev;
- struct io_accel2_cmd *c2;
-
- dev = abort->scsi_cmd->device->hostdata;
- if (!dev)
- return -1;
-
- if (!dev->offload_enabled && !dev->hba_ioaccel_enabled)
- return -1;
-
- c = cmd_alloc(h);
- setup_ioaccel2_abort_cmd(c, h, abort, reply_queue);
- c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
- (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
- hpsa_get_tag(h, abort, &taglower, &tagupper);
- dev_dbg(&h->pdev->dev,
- "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n",
- __func__, tagupper, taglower);
- /* no unmap needed here because no data xfer. */
-
- dev_dbg(&h->pdev->dev,
- "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n",
- __func__, tagupper, taglower, c2->error_data.serv_response);
- switch (c2->error_data.serv_response) {
- case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
- case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
- rc = 0;
- break;
- case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
- case IOACCEL2_SERV_RESPONSE_FAILURE:
- case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
- rc = -1;
- break;
- default:
- dev_warn(&h->pdev->dev,
- "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n",
- __func__, tagupper, taglower,
- c2->error_data.serv_response);
- rc = -1;
- }
- cmd_free(h, c);
- dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
- tagupper, taglower);
+return_reset_status:
+ spin_lock_irqsave(&h->reset_lock, flags);
+ h->reset_in_progress = 0;
+ spin_unlock_irqrestore(&h->reset_lock, flags);
return rc;
}
-static int hpsa_send_abort_both_ways(struct ctlr_info *h,
- struct hpsa_scsi_dev_t *dev, struct CommandList *abort, int reply_queue)
-{
- /*
- * ioccelerator mode 2 commands should be aborted via the
- * accelerated path, since RAID path is unaware of these commands,
- * but not all underlying firmware can handle abort TMF.
- * Change abort to physical device reset when abort TMF is unsupported.
- */
- if (abort->cmd_type == CMD_IOACCEL2) {
- if ((HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags) ||
- dev->physical_device)
- return hpsa_send_abort_ioaccel2(h, abort,
- reply_queue);
- else
- return hpsa_send_reset_as_abort_ioaccel2(h,
- dev->scsi3addr,
- abort, reply_queue);
- }
- return hpsa_send_abort(h, dev->scsi3addr, abort, reply_queue);
-}
-
-/* Find out which reply queue a command was meant to return on */
-static int hpsa_extract_reply_queue(struct ctlr_info *h,
- struct CommandList *c)
-{
- if (c->cmd_type == CMD_IOACCEL2)
- return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue;
- return c->Header.ReplyQueue;
-}
-
-/*
- * Limit concurrency of abort commands to prevent
- * over-subscription of commands
- */
-static inline int wait_for_available_abort_cmd(struct ctlr_info *h)
-{
-#define ABORT_CMD_WAIT_MSECS 5000
- return !wait_event_timeout(h->abort_cmd_wait_queue,
- atomic_dec_if_positive(&h->abort_cmds_available) >= 0,
- msecs_to_jiffies(ABORT_CMD_WAIT_MSECS));
-}
-
-/* Send an abort for the specified command.
- * If the device and controller support it,
- * send a task abort request.
- */
-static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
-{
-
- int rc;
- struct ctlr_info *h;
- struct hpsa_scsi_dev_t *dev;
- struct CommandList *abort; /* pointer to command to be aborted */
- struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
- char msg[256]; /* For debug messaging. */
- int ml = 0;
- __le32 tagupper, taglower;
- int refcount, reply_queue;
-
- if (sc == NULL)
- return FAILED;
-
- if (sc->device == NULL)
- return FAILED;
-
- /* Find the controller of the command to be aborted */
- h = sdev_to_hba(sc->device);
- if (h == NULL)
- return FAILED;
-
- /* Find the device of the command to be aborted */
- dev = sc->device->hostdata;
- if (!dev) {
- dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
- msg);
- return FAILED;
- }
-
- /* If controller locked up, we can guarantee command won't complete */
- if (lockup_detected(h)) {
- hpsa_show_dev_msg(KERN_WARNING, h, dev,
- "ABORT FAILED, lockup detected");
- return FAILED;
- }
-
- /* This is a good time to check if controller lockup has occurred */
- if (detect_controller_lockup(h)) {
- hpsa_show_dev_msg(KERN_WARNING, h, dev,
- "ABORT FAILED, new lockup detected");
- return FAILED;
- }
-
- /* Check that controller supports some kind of task abort */
- if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
- !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
- return FAILED;
-
- memset(msg, 0, sizeof(msg));
- ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p",
- h->scsi_host->host_no, sc->device->channel,
- sc->device->id, sc->device->lun,
- "Aborting command", sc);
-
- /* Get SCSI command to be aborted */
- abort = (struct CommandList *) sc->host_scribble;
- if (abort == NULL) {
- /* This can happen if the command already completed. */
- return SUCCESS;
- }
- refcount = atomic_inc_return(&abort->refcount);
- if (refcount == 1) { /* Command is done already. */
- cmd_free(h, abort);
- return SUCCESS;
- }
-
- /* Don't bother trying the abort if we know it won't work. */
- if (abort->cmd_type != CMD_IOACCEL2 &&
- abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) {
- cmd_free(h, abort);
- return FAILED;
- }
-
- /*
- * Check that we're aborting the right command.
- * It's possible the CommandList already completed and got re-used.
- */
- if (abort->scsi_cmd != sc) {
- cmd_free(h, abort);
- return SUCCESS;
- }
-
- abort->abort_pending = true;
- hpsa_get_tag(h, abort, &taglower, &tagupper);
- reply_queue = hpsa_extract_reply_queue(h, abort);
- ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
- as = abort->scsi_cmd;
- if (as != NULL)
- ml += sprintf(msg+ml,
- "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ",
- as->cmd_len, as->cmnd[0], as->cmnd[1],
- as->serial_number);
- dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg);
- hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command");
-
- /*
- * Command is in flight, or possibly already completed
- * by the firmware (but not to the scsi mid layer) but we can't
- * distinguish which. Send the abort down.
- */
- if (wait_for_available_abort_cmd(h)) {
- dev_warn(&h->pdev->dev,
- "%s FAILED, timeout waiting for an abort command to become available.\n",
- msg);
- cmd_free(h, abort);
- return FAILED;
- }
- rc = hpsa_send_abort_both_ways(h, dev, abort, reply_queue);
- atomic_inc(&h->abort_cmds_available);
- wake_up_all(&h->abort_cmd_wait_queue);
- if (rc != 0) {
- dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg);
- hpsa_show_dev_msg(KERN_WARNING, h, dev,
- "FAILED to abort command");
- cmd_free(h, abort);
- return FAILED;
- }
- dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg);
- wait_event(h->event_sync_wait_queue,
- abort->scsi_cmd != sc || lockup_detected(h));
- cmd_free(h, abort);
- return !lockup_detected(h) ? SUCCESS : FAILED;
-}
-
/*
* For operations with an associated SCSI command, a command block is allocated
* at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
@@ -6364,9 +5883,7 @@ static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
{
/*
* Release our reference to the block. We don't need to do anything
- * else to free it, because it is accessed by index. (There's no point
- * in checking the result of the decrement, since we cannot guarantee
- * that there isn't a concurrent abort which is also accessing it.)
+ * else to free it, because it is accessed by index.
*/
(void)atomic_dec(&c->refcount);
}
@@ -6905,7 +6422,6 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
int cmd_type)
{
int pci_dir = XFER_NONE;
- u64 tag; /* for commands to be aborted */
c->cmd_type = CMD_IOCTL_PEND;
c->scsi_cmd = SCSI_CMD_BUSY;
@@ -7089,27 +6605,6 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
c->Request.CDB[6] = 0x00;
c->Request.CDB[7] = 0x00;
break;
- case HPSA_ABORT_MSG:
- memcpy(&tag, buff, sizeof(tag));
- dev_dbg(&h->pdev->dev,
- "Abort Tag:0x%016llx using rqst Tag:0x%016llx",
- tag, c->Header.tag);
- c->Request.CDBLen = 16;
- c->Request.type_attr_dir =
- TYPE_ATTR_DIR(cmd_type,
- ATTR_SIMPLE, XFER_WRITE);
- c->Request.Timeout = 0; /* Don't time out */
- c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
- c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
- c->Request.CDB[2] = 0x00; /* reserved */
- c->Request.CDB[3] = 0x00; /* reserved */
- /* Tag to abort goes in CDB[4]-CDB[11] */
- memcpy(&c->Request.CDB[4], &tag, sizeof(tag));
- c->Request.CDB[12] = 0x00; /* reserved */
- c->Request.CDB[13] = 0x00; /* reserved */
- c->Request.CDB[14] = 0x00; /* reserved */
- c->Request.CDB[15] = 0x00; /* reserved */
- break;
default:
dev_warn(&h->pdev->dev, "unknown message type %d\n",
cmd);
@@ -8067,9 +7562,6 @@ static int hpsa_pci_init(struct ctlr_info *h)
h->product_name = products[prod_index].product_name;
h->access = *(products[prod_index].access);
- h->needs_abort_tags_swizzled =
- ctlr_needs_abort_tags_swizzled(h->board_id);
-
pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
@@ -8627,41 +8119,79 @@ out:
return rc;
}
-static void hpsa_rescan_ctlr_worker(struct work_struct *work)
+static void hpsa_perform_rescan(struct ctlr_info *h)
{
+ struct Scsi_Host *sh = NULL;
unsigned long flags;
- struct ctlr_info *h = container_of(to_delayed_work(work),
- struct ctlr_info, rescan_ctlr_work);
-
-
- if (h->remove_in_progress)
- return;
/*
* Do the scan after the reset
*/
+ spin_lock_irqsave(&h->reset_lock, flags);
if (h->reset_in_progress) {
h->drv_req_rescan = 1;
+ spin_unlock_irqrestore(&h->reset_lock, flags);
return;
}
+ spin_unlock_irqrestore(&h->reset_lock, flags);
+
+ sh = scsi_host_get(h->scsi_host);
+ if (sh != NULL) {
+ hpsa_scan_start(sh);
+ scsi_host_put(sh);
+ h->drv_req_rescan = 0;
+ }
+}
+
+/*
+ * watch for controller events
+ */
+static void hpsa_event_monitor_worker(struct work_struct *work)
+{
+ struct ctlr_info *h = container_of(to_delayed_work(work),
+ struct ctlr_info, event_monitor_work);
+ unsigned long flags;
+
+ spin_lock_irqsave(&h->lock, flags);
+ if (h->remove_in_progress) {
+ spin_unlock_irqrestore(&h->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&h->lock, flags);
- if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
- scsi_host_get(h->scsi_host);
+ if (hpsa_ctlr_needs_rescan(h)) {
hpsa_ack_ctlr_events(h);
- hpsa_scan_start(h->scsi_host);
- scsi_host_put(h->scsi_host);
+ hpsa_perform_rescan(h);
+ }
+
+ spin_lock_irqsave(&h->lock, flags);
+ if (!h->remove_in_progress)
+ schedule_delayed_work(&h->event_monitor_work,
+ HPSA_EVENT_MONITOR_INTERVAL);
+ spin_unlock_irqrestore(&h->lock, flags);
+}
+
+static void hpsa_rescan_ctlr_worker(struct work_struct *work)
+{
+ unsigned long flags;
+ struct ctlr_info *h = container_of(to_delayed_work(work),
+ struct ctlr_info, rescan_ctlr_work);
+
+ spin_lock_irqsave(&h->lock, flags);
+ if (h->remove_in_progress) {
+ spin_unlock_irqrestore(&h->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&h->lock, flags);
+
+ if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) {
+ hpsa_perform_rescan(h);
} else if (h->discovery_polling) {
hpsa_disable_rld_caching(h);
if (hpsa_luns_changed(h)) {
- struct Scsi_Host *sh = NULL;
-
dev_info(&h->pdev->dev,
"driver discovery polling rescan.\n");
- sh = scsi_host_get(h->scsi_host);
- if (sh != NULL) {
- hpsa_scan_start(sh);
- scsi_host_put(sh);
- }
+ hpsa_perform_rescan(h);
}
}
spin_lock_irqsave(&h->lock, flags);
@@ -8750,8 +8280,8 @@ reinit_after_soft_reset:
spin_lock_init(&h->lock);
spin_lock_init(&h->offline_device_lock);
spin_lock_init(&h->scan_lock);
+ spin_lock_init(&h->reset_lock);
atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
- atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS);
/* Allocate and clear per-cpu variable lockup_detected */
h->lockup_detected = alloc_percpu(u32);
@@ -8803,7 +8333,6 @@ reinit_after_soft_reset:
if (rc)
goto clean5; /* cmd, irq, shost, pci, lu, aer/h */
init_waitqueue_head(&h->scan_wait_queue);
- init_waitqueue_head(&h->abort_cmd_wait_queue);
init_waitqueue_head(&h->event_sync_wait_queue);
mutex_init(&h->reset_mutex);
h->scan_finished = 1; /* no scan currently in progress */
@@ -8926,6 +8455,9 @@ reinit_after_soft_reset:
INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
h->heartbeat_sample_interval);
+ INIT_DELAYED_WORK(&h->event_monitor_work, hpsa_event_monitor_worker);
+ schedule_delayed_work(&h->event_monitor_work,
+ HPSA_EVENT_MONITOR_INTERVAL);
return 0;
clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
@@ -9094,6 +8626,7 @@ static void hpsa_remove_one(struct pci_dev *pdev)
spin_unlock_irqrestore(&h->lock, flags);
cancel_delayed_work_sync(&h->monitor_ctlr_work);
cancel_delayed_work_sync(&h->rescan_ctlr_work);
+ cancel_delayed_work_sync(&h->event_monitor_work);
destroy_workqueue(h->rescan_ctlr_wq);
destroy_workqueue(h->resubmit_wq);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 6f04f2ad4125..1c49741bc639 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -57,6 +57,7 @@ struct hpsa_sas_phy {
bool added_to_port;
};
+#define EXTERNAL_QD 7
struct hpsa_scsi_dev_t {
unsigned int devtype;
int bus, target, lun; /* as presented to the OS */
@@ -244,6 +245,7 @@ struct ctlr_info {
u32 __percpu *lockup_detected;
struct delayed_work monitor_ctlr_work;
struct delayed_work rescan_ctlr_work;
+ struct delayed_work event_monitor_work;
int remove_in_progress;
/* Address of h->q[x] is passed to intr handler to know which queue */
u8 q[MAX_REPLY_QUEUES];
@@ -296,11 +298,11 @@ struct ctlr_info {
struct workqueue_struct *resubmit_wq;
struct workqueue_struct *rescan_ctlr_wq;
atomic_t abort_cmds_available;
- wait_queue_head_t abort_cmd_wait_queue;
wait_queue_head_t event_sync_wait_queue;
struct mutex reset_mutex;
u8 reset_in_progress;
struct hpsa_sas_node *sas_host;
+ spinlock_t reset_lock;
};
struct offline_device_entry {
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 5961705eef76..078afe448115 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -809,10 +809,7 @@ struct bmic_identify_physical_device {
u8 max_temperature_degreesC;
u8 logical_blocks_per_phys_block_exp; /* phyblocksize = 512*2^exp */
__le16 current_queue_depth_limit;
- u8 switch_name[10];
- __le16 switch_port;
- u8 alternate_paths_switch_name[40];
- u8 alternate_paths_switch_port[8];
+ u8 reserved_switch_stuff[60];
__le16 power_on_hours; /* valid only if gas gauge supported */
__le16 percent_endurance_used; /* valid only if gas gauge supported. */
#define BMIC_PHYS_DRIVE_SSD_WEAROUT(idphydrv) \
@@ -828,11 +825,22 @@ struct bmic_identify_physical_device {
(idphydrv->smart_carrier_authentication == 0x01)
u8 smart_carrier_app_fw_version;
u8 smart_carrier_bootloader_fw_version;
+ u8 sanitize_support_flags;
+ u8 drive_key_flags;
u8 encryption_key_name[64];
__le32 misc_drive_flags;
__le16 dek_index;
- u8 padding[112];
-};
+ __le16 hba_drive_encryption_flags;
+ __le16 max_overwrite_time;
+ __le16 max_block_erase_time;
+ __le16 max_crypto_erase_time;
+ u8 device_connector_info[5];
+ u8 connector_name[8][8];
+ u8 page_83_id[16];
+ u8 max_link_rate[256];
+ u8 neg_phys_link_rate[256];
+ u8 box_conn_name[8];
+} __attribute((aligned(512)));
struct bmic_sense_subsystem_info {
u8 primary_slot_number;
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index db17ad15b0c1..7226226f7383 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -800,7 +800,7 @@ static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag)
hptiop_finish_scsi_req(hba, tag, req);
}
-void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
+static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
{
struct hpt_iop_request_header __iomem *req;
struct hpt_iop_request_ioctl_command __iomem *p;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 26cd3c28186a..cc4e05be8d4a 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -4935,7 +4935,7 @@ static struct vio_device_id ibmvfc_device_table[] = {
};
MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
-static struct dev_pm_ops ibmvfc_pm_ops = {
+static const struct dev_pm_ops ibmvfc_pm_ops = {
.resume = ibmvfc_resume
};
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 1deb0a9f14a6..da22b3665cb0 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -2336,7 +2336,7 @@ static struct vio_device_id ibmvscsi_device_table[] = {
};
MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
-static struct dev_pm_ops ibmvscsi_pm_ops = {
+static const struct dev_pm_ops ibmvscsi_pm_ops = {
.resume = ibmvscsi_resume
};
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index dd6828f7f772..42381adf0769 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2556,7 +2556,7 @@ iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
* the array. */
if (items)
num_arrays++;
- q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL);
+ q->pool = kvzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL);
if (q->pool == NULL)
return -ENOMEM;
@@ -2590,7 +2590,7 @@ void iscsi_pool_free(struct iscsi_pool *q)
for (i = 0; i < q->max; i++)
kfree(q->pool[i]);
- kfree(q->pool);
+ kvfree(q->pool);
}
EXPORT_SYMBOL_GPL(iscsi_pool_free);
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index aadbd5314c5c..c0d0d979b76d 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -27,30 +27,38 @@
#include "sas_internal.h"
#include "sas_dump.h"
-void sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
+int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
{
+ int rc = 0;
+
if (!test_bit(SAS_HA_REGISTERED, &ha->state))
- return;
+ return 0;
if (test_bit(SAS_HA_DRAINING, &ha->state)) {
/* add it to the defer list, if not already pending */
if (list_empty(&sw->drain_node))
list_add(&sw->drain_node, &ha->defer_q);
} else
- scsi_queue_work(ha->core.shost, &sw->work);
+ rc = scsi_queue_work(ha->core.shost, &sw->work);
+
+ return rc;
}
-static void sas_queue_event(int event, unsigned long *pending,
+static int sas_queue_event(int event, unsigned long *pending,
struct sas_work *work,
struct sas_ha_struct *ha)
{
+ int rc = 0;
+
if (!test_and_set_bit(event, pending)) {
unsigned long flags;
spin_lock_irqsave(&ha->lock, flags);
- sas_queue_work(ha, work);
+ rc = sas_queue_work(ha, work);
spin_unlock_irqrestore(&ha->lock, flags);
}
+
+ return rc;
}
@@ -116,32 +124,32 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
mutex_unlock(&ha->disco_mutex);
}
-static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event)
+static int notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event)
{
BUG_ON(event >= HA_NUM_EVENTS);
- sas_queue_event(event, &sas_ha->pending,
- &sas_ha->ha_events[event].work, sas_ha);
+ return sas_queue_event(event, &sas_ha->pending,
+ &sas_ha->ha_events[event].work, sas_ha);
}
-static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
+static int notify_port_event(struct asd_sas_phy *phy, enum port_event event)
{
struct sas_ha_struct *ha = phy->ha;
BUG_ON(event >= PORT_NUM_EVENTS);
- sas_queue_event(event, &phy->port_events_pending,
- &phy->port_events[event].work, ha);
+ return sas_queue_event(event, &phy->port_events_pending,
+ &phy->port_events[event].work, ha);
}
-void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
+int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
{
struct sas_ha_struct *ha = phy->ha;
BUG_ON(event >= PHY_NUM_EVENTS);
- sas_queue_event(event, &phy->phy_events_pending,
- &phy->phy_events[event].work, ha);
+ return sas_queue_event(event, &phy->phy_events_pending,
+ &phy->phy_events[event].work, ha);
}
int sas_init_events(struct sas_ha_struct *sas_ha)
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index b306b7843d99..a216c957b639 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -76,7 +76,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work);
void sas_porte_link_reset_err(struct work_struct *work);
void sas_porte_timer_event(struct work_struct *work);
void sas_porte_hard_reset(struct work_struct *work);
-void sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw);
+int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw);
int sas_notify_lldd_dev_found(struct domain_device *);
void sas_notify_lldd_dev_gone(struct domain_device *);
@@ -85,7 +85,7 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id,
enum phy_func phy_func, struct sas_phy_linkrates *);
int sas_smp_get_phy_events(struct sas_phy *phy);
-void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event);
+int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event);
void sas_device_set_phy(struct domain_device *dev, struct sas_port *port);
struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index f2c0ba6ced78..562dc0139735 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -756,6 +756,7 @@ struct lpfc_hba {
uint8_t nvmet_support; /* driver supports NVMET */
#define LPFC_NVMET_MAX_PORTS 32
uint8_t mds_diags_support;
+ uint32_t initial_imax;
/* HBA Config Parameters */
uint32_t cfg_ack0;
@@ -777,6 +778,7 @@ struct lpfc_hba {
uint32_t cfg_poll_tmo;
uint32_t cfg_task_mgmt_tmo;
uint32_t cfg_use_msi;
+ uint32_t cfg_auto_imax;
uint32_t cfg_fcp_imax;
uint32_t cfg_fcp_cpu_map;
uint32_t cfg_fcp_io_channel;
@@ -913,16 +915,16 @@ struct lpfc_hba {
/*
* stat counters
*/
- uint64_t fc4ScsiInputRequests;
- uint64_t fc4ScsiOutputRequests;
- uint64_t fc4ScsiControlRequests;
- uint64_t fc4ScsiIoCmpls;
- uint64_t fc4NvmeInputRequests;
- uint64_t fc4NvmeOutputRequests;
- uint64_t fc4NvmeControlRequests;
- uint64_t fc4NvmeIoCmpls;
- uint64_t fc4NvmeLsRequests;
- uint64_t fc4NvmeLsCmpls;
+ atomic_t fc4ScsiInputRequests;
+ atomic_t fc4ScsiOutputRequests;
+ atomic_t fc4ScsiControlRequests;
+ atomic_t fc4ScsiIoCmpls;
+ atomic_t fc4NvmeInputRequests;
+ atomic_t fc4NvmeOutputRequests;
+ atomic_t fc4NvmeControlRequests;
+ atomic_t fc4NvmeIoCmpls;
+ atomic_t fc4NvmeLsRequests;
+ atomic_t fc4NvmeLsCmpls;
uint64_t bg_guard_err_cnt;
uint64_t bg_apptag_err_cnt;
@@ -1050,6 +1052,7 @@ struct lpfc_hba {
uint8_t temp_sensor_support;
/* Fields used for heart beat. */
+ unsigned long last_eqdelay_time;
unsigned long last_completion_time;
unsigned long skipped_hb;
struct timer_list hb_tmofunc;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index bb2d9e238225..4ed48ed38e79 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -148,9 +148,9 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = vport->phba;
struct lpfc_nvmet_tgtport *tgtp;
struct nvme_fc_local_port *localport;
- struct lpfc_nvme_lport *lport;
- struct lpfc_nvme_rport *rport;
+ struct lpfc_nodelist *ndlp;
struct nvme_fc_remote_port *nrport;
+ uint64_t data1, data2, data3, tot;
char *statep;
int len = 0;
@@ -171,7 +171,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
else
statep = "INIT";
len += snprintf(buf + len, PAGE_SIZE - len,
- "NVME Target: Enabled State %s\n",
+ "NVME Target Enabled State %s\n",
statep);
len += snprintf(buf + len, PAGE_SIZE - len,
"%s%d WWPN x%llx WWNN x%llx DID x%06x\n",
@@ -245,11 +245,21 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
atomic_read(&tgtp->xmt_abort_rsp),
atomic_read(&tgtp->xmt_abort_rsp_error));
+ spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
+ spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
+ tot = phba->sli4_hba.nvmet_xri_cnt -
+ (phba->sli4_hba.nvmet_ctx_get_cnt +
+ phba->sli4_hba.nvmet_ctx_put_cnt);
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
+
len += snprintf(buf + len, PAGE_SIZE - len,
- "IO_CTX: %08x outstanding %08x total %x",
- phba->sli4_hba.nvmet_ctx_cnt,
+ "IO_CTX: %08x WAIT: cur %08x tot %08x\n"
+ "CTX Outstanding %08llx\n",
+ phba->sli4_hba.nvmet_xri_cnt,
phba->sli4_hba.nvmet_io_wait_cnt,
- phba->sli4_hba.nvmet_io_wait_total);
+ phba->sli4_hba.nvmet_io_wait_total,
+ tot);
len += snprintf(buf+len, PAGE_SIZE-len, "\n");
return len;
@@ -265,7 +275,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n");
spin_lock_irq(shost->host_lock);
- lport = (struct lpfc_nvme_lport *)localport->private;
/* Port state is only one of two values for now. */
if (localport->port_id)
@@ -281,9 +290,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
wwn_to_u64(vport->fc_nodename.u.wwn),
localport->port_id, statep);
- list_for_each_entry(rport, &lport->rport_list, list) {
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!ndlp->nrport)
+ continue;
+
/* local short-hand pointer. */
- nrport = rport->remoteport;
+ nrport = ndlp->nrport->remoteport;
/* Port state is only one of two values for now. */
switch (nrport->port_state) {
@@ -311,25 +323,23 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
len += snprintf(buf + len, PAGE_SIZE - len, "DID x%06x ",
nrport->port_id);
- switch (nrport->port_role) {
- case FC_PORT_ROLE_NVME_INITIATOR:
+ /* An NVME rport can have multiple roles. */
+ if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR)
len += snprintf(buf + len, PAGE_SIZE - len,
"INITIATOR ");
- break;
- case FC_PORT_ROLE_NVME_TARGET:
+ if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET)
len += snprintf(buf + len, PAGE_SIZE - len,
"TARGET ");
- break;
- case FC_PORT_ROLE_NVME_DISCOVERY:
+ if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY)
len += snprintf(buf + len, PAGE_SIZE - len,
- "DISCOVERY ");
- break;
- default:
+ "DISCSRVC ");
+ if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
+ FC_PORT_ROLE_NVME_TARGET |
+ FC_PORT_ROLE_NVME_DISCOVERY))
len += snprintf(buf + len, PAGE_SIZE - len,
- "UNKNOWN_ROLE x%x",
+ "UNKNOWN ROLE x%x",
nrport->port_role);
- break;
- }
+
len += snprintf(buf + len, PAGE_SIZE - len, "%s ", statep);
/* Terminate the string. */
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
@@ -338,19 +348,21 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n");
len += snprintf(buf+len, PAGE_SIZE-len,
- "LS: Xmt %016llx Cmpl %016llx\n",
- phba->fc4NvmeLsRequests,
- phba->fc4NvmeLsCmpls);
-
+ "LS: Xmt %016x Cmpl %016x\n",
+ atomic_read(&phba->fc4NvmeLsRequests),
+ atomic_read(&phba->fc4NvmeLsCmpls));
+
+ tot = atomic_read(&phba->fc4NvmeIoCmpls);
+ data1 = atomic_read(&phba->fc4NvmeInputRequests);
+ data2 = atomic_read(&phba->fc4NvmeOutputRequests);
+ data3 = atomic_read(&phba->fc4NvmeControlRequests);
len += snprintf(buf+len, PAGE_SIZE-len,
"FCP: Rd %016llx Wr %016llx IO %016llx\n",
- phba->fc4NvmeInputRequests,
- phba->fc4NvmeOutputRequests,
- phba->fc4NvmeControlRequests);
+ data1, data2, data3);
len += snprintf(buf+len, PAGE_SIZE-len,
- " Cmpl %016llx\n", phba->fc4NvmeIoCmpls);
-
+ " Cmpl %016llx Outstanding %016llx\n",
+ tot, (data1 + data2 + data3) - tot);
return len;
}
@@ -1342,6 +1354,8 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
goto board_mode_out;
}
wait_for_completion(&online_compl);
+ if (status)
+ status = -EIO;
} else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
@@ -3198,9 +3212,12 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
shost = lpfc_shost_from_vport(vport);
spin_lock_irq(shost->host_lock);
- list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
- if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport)
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ if (!NLP_CHK_NODE_ACT(ndlp))
+ continue;
+ if (ndlp->rport)
ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
+ }
spin_unlock_irq(shost->host_lock);
}
@@ -4467,9 +4484,11 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
phba->cfg_fcp_imax = (uint32_t)val;
+ phba->initial_imax = phba->cfg_fcp_imax;
for (i = 0; i < phba->io_channel_irqs; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
- lpfc_modify_hba_eq_delay(phba, i);
+ lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT,
+ val);
return strlen(buf);
}
@@ -4524,6 +4543,16 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR,
lpfc_fcp_imax_show, lpfc_fcp_imax_store);
+/*
+ * lpfc_auto_imax: Controls Auto-interrupt coalescing values support.
+ * 0 No auto_imax support
+ * 1 auto imax on
+ * Auto imax will change the value of fcp_imax on a per EQ basis, using
+ * the EQ Delay Multiplier, depending on the activity for that EQ.
+ * Value range [0,1]. Default value is 1.
+ */
+LPFC_ATTR_RW(auto_imax, 1, 0, 1, "Enable Auto imax");
+
/**
* lpfc_state_show - Display current driver CPU affinity
* @dev: class converted to a Scsi_host structure.
@@ -5150,6 +5179,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_task_mgmt_tmo,
&dev_attr_lpfc_use_msi,
&dev_attr_lpfc_nvme_oas,
+ &dev_attr_lpfc_auto_imax,
&dev_attr_lpfc_fcp_imax,
&dev_attr_lpfc_fcp_cpu_map,
&dev_attr_lpfc_fcp_io_channel,
@@ -6168,6 +6198,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN);
lpfc_use_msi_init(phba, lpfc_use_msi);
lpfc_nvme_oas_init(phba, lpfc_nvme_oas);
+ lpfc_auto_imax_init(phba, lpfc_auto_imax);
lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
@@ -6212,6 +6243,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP;
}
+ if (phba->cfg_auto_imax && !phba->cfg_fcp_imax)
+ phba->cfg_auto_imax = 0;
+ phba->initial_imax = phba->cfg_fcp_imax;
+
/* A value of 0 means use the number of CPUs found in the system */
if (phba->cfg_fcp_io_channel == 0)
phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu;
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 24ce96dcc94d..9c0c1463057d 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -503,26 +503,23 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
Did, vport->fc_flag, vport->fc_rscn_id_cnt);
/*
- * This NPortID was previously a FCP target,
+ * This NPortID was previously a FCP/NVMe target,
* Don't even bother to send GFF_ID.
*/
ndlp = lpfc_findnode_did(vport, Did);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp))
- ndlp->nlp_fc4_type = fc4_type;
-
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
- ndlp->nlp_fc4_type = fc4_type;
-
- if (ndlp->nlp_type & NLP_FCP_TARGET)
- lpfc_setup_disc_node(vport, Did);
-
- else if (lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID,
- 0, Did) == 0)
- vport->num_disc_nodes++;
-
- else
- lpfc_setup_disc_node(vport, Did);
- }
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
+ (ndlp->nlp_type &
+ (NLP_FCP_TARGET | NLP_NVME_TARGET))) {
+ if (fc4_type == FC_TYPE_FCP)
+ ndlp->nlp_fc4_type |= NLP_FC4_FCP;
+ if (fc4_type == FC_TYPE_NVME)
+ ndlp->nlp_fc4_type |= NLP_FC4_NVME;
+ lpfc_setup_disc_node(vport, Did);
+ } else if (lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID,
+ 0, Did) == 0)
+ vport->num_disc_nodes++;
+ else
+ lpfc_setup_disc_node(vport, Did);
} else {
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
"Skip2 GID_FTrsp: did:x%x flg:x%x cnt:%d",
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 89afa7813e5a..5cc8b0f7d885 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -323,7 +323,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
raw_index = phba->hbq_get[i];
getidx = le32_to_cpu(raw_index);
len += snprintf(buf+len, size-len,
- "entrys:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n",
+ "entries:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n",
hbqs->entry_count, hbqs->buffer_count, hbqs->hbqPutIdx,
hbqs->next_hbqPutIdx, hbqs->local_hbqGetIdx, getidx);
@@ -550,8 +550,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
struct lpfc_nodelist *ndlp;
unsigned char *statep;
struct nvme_fc_local_port *localport;
- struct lpfc_nvme_lport *lport;
- struct lpfc_nvme_rport *rport;
struct lpfc_nvmet_tgtport *tgtp;
struct nvme_fc_remote_port *nrport;
@@ -623,6 +621,13 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
ndlp->nlp_sid);
if (ndlp->nlp_type & NLP_FCP_INITIATOR)
len += snprintf(buf+len, size-len, "FCP_INITIATOR ");
+ if (ndlp->nlp_type & NLP_NVME_TARGET)
+ len += snprintf(buf + len,
+ size - len, "NVME_TGT sid:%d ",
+ NLP_NO_SID);
+ if (ndlp->nlp_type & NLP_NVME_INITIATOR)
+ len += snprintf(buf + len,
+ size - len, "NVME_INITIATOR ");
len += snprintf(buf+len, size-len, "usgmap:%x ",
ndlp->nlp_usg_map);
len += snprintf(buf+len, size-len, "refcnt:%x",
@@ -660,7 +665,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
goto out_exit;
spin_lock_irq(shost->host_lock);
- lport = (struct lpfc_nvme_lport *)localport->private;
/* Port state is only one of two values for now. */
if (localport->port_id)
@@ -673,9 +677,12 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
localport->port_id, statep);
len += snprintf(buf + len, size - len, "\tRport List:\n");
- list_for_each_entry(rport, &lport->rport_list, list) {
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
/* local short-hand pointer. */
- nrport = rport->remoteport;
+ if (!ndlp->nrport)
+ continue;
+
+ nrport = ndlp->nrport->remoteport;
/* Port state is only one of two values for now. */
switch (nrport->port_state) {
@@ -698,26 +705,23 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
nrport->port_name);
len += snprintf(buf + len, size - len, "WWNN x%llx ",
nrport->node_name);
- switch (nrport->port_role) {
- case FC_PORT_ROLE_NVME_INITIATOR:
+
+ /* An NVME rport can have multiple roles. */
+ if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR)
len += snprintf(buf + len, size - len,
- "NVME INITIATOR ");
- break;
- case FC_PORT_ROLE_NVME_TARGET:
+ "INITIATOR ");
+ if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET)
len += snprintf(buf + len, size - len,
- "NVME TARGET ");
- break;
- case FC_PORT_ROLE_NVME_DISCOVERY:
+ "TARGET ");
+ if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY)
len += snprintf(buf + len, size - len,
- "NVME DISCOVERY ");
- break;
- default:
+ "DISCSRVC ");
+ if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
+ FC_PORT_ROLE_NVME_TARGET |
+ FC_PORT_ROLE_NVME_DISCOVERY))
len += snprintf(buf + len, size - len,
"UNKNOWN ROLE x%x",
nrport->port_role);
- break;
- }
-
/* Terminate the string. */
len += snprintf(buf + len, size - len, "\n");
}
@@ -746,6 +750,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
struct lpfc_hba *phba = vport->phba;
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
+ uint64_t tot, data1, data2, data3;
int len = 0;
int cnt;
@@ -843,11 +848,21 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
}
+ spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
+ spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
+ tot = phba->sli4_hba.nvmet_xri_cnt -
+ (phba->sli4_hba.nvmet_ctx_get_cnt +
+ phba->sli4_hba.nvmet_ctx_put_cnt);
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
+
len += snprintf(buf + len, size - len,
- "IO_CTX: %08x outstanding %08x total %08x\n",
- phba->sli4_hba.nvmet_ctx_cnt,
+ "IO_CTX: %08x WAIT: cur %08x tot %08x\n"
+ "CTX Outstanding %08llx\n",
+ phba->sli4_hba.nvmet_xri_cnt,
phba->sli4_hba.nvmet_io_wait_cnt,
- phba->sli4_hba.nvmet_io_wait_total);
+ phba->sli4_hba.nvmet_io_wait_total,
+ tot);
} else {
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return len;
@@ -856,18 +871,22 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
"\nNVME Lport Statistics\n");
len += snprintf(buf + len, size - len,
- "LS: Xmt %016llx Cmpl %016llx\n",
- phba->fc4NvmeLsRequests,
- phba->fc4NvmeLsCmpls);
+ "LS: Xmt %016x Cmpl %016x\n",
+ atomic_read(&phba->fc4NvmeLsRequests),
+ atomic_read(&phba->fc4NvmeLsCmpls));
+
+ tot = atomic_read(&phba->fc4NvmeIoCmpls);
+ data1 = atomic_read(&phba->fc4NvmeInputRequests);
+ data2 = atomic_read(&phba->fc4NvmeOutputRequests);
+ data3 = atomic_read(&phba->fc4NvmeControlRequests);
len += snprintf(buf + len, size - len,
"FCP: Rd %016llx Wr %016llx IO %016llx\n",
- phba->fc4NvmeInputRequests,
- phba->fc4NvmeOutputRequests,
- phba->fc4NvmeControlRequests);
+ data1, data2, data3);
len += snprintf(buf + len, size - len,
- " Cmpl %016llx\n", phba->fc4NvmeIoCmpls);
+ " Cmpl %016llx Outstanding %016llx\n",
+ tot, (data1 + data2 + data3) - tot);
}
return len;
@@ -3229,9 +3248,9 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype,
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"\n%s EQ info: EQ-STAT[max:x%x noE:x%x "
- "bs:x%x proc:x%llx]\n",
+ "bs:x%x proc:x%llx eqd %d]\n",
eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3,
- (unsigned long long)qp->q_cnt_4);
+ (unsigned long long)qp->q_cnt_4, qp->q_mode);
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
"EQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
"HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 8e532b39ae93..6d1d6f691df4 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -2168,6 +2168,19 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_fc4_type, ndlp->nlp_DID);
return 1;
}
+
+ /* SLI3 ports don't support NVME. If this rport is a strict NVME
+ * FC4 type, implicitly LOGO.
+ */
+ if (phba->sli_rev == LPFC_SLI_REV3 &&
+ ndlp->nlp_fc4_type == NLP_FC4_NVME) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n",
+ ndlp->nlp_type);
+ lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
+ return 1;
+ }
+
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
ndlp->nlp_DID, elscmd);
if (!elsiocb)
@@ -2268,7 +2281,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* The driver supports 2 FC4 types. Make sure
* a PRLI is issued for all types before exiting.
*/
- if (local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME))
+ if (phba->sli_rev == LPFC_SLI_REV4 &&
+ local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME))
goto send_next_prli;
return 0;
@@ -3332,6 +3346,19 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*/
switch (stat.un.b.lsRjtRsnCode) {
case LSRJT_UNABLE_TPC:
+ /* The driver has a VALID PLOGI but the rport has
+ * rejected the PRLI - can't do it now. Delay
+ * for 1 second and try again - don't care about
+ * the explanation.
+ */
+ if (cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) {
+ delay = 1000;
+ maxretry = lpfc_max_els_tries + 1;
+ retry = 1;
+ break;
+ }
+
+ /* Legacy bug fix code for targets with PLOGI delays. */
if (stat.un.b.lsRjtRsnCodeExp ==
LSEXP_CMD_IN_PROGRESS) {
if (cmd == ELS_CMD_PLOGI) {
@@ -3350,9 +3377,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
retry = 1;
break;
}
- if ((cmd == ELS_CMD_PLOGI) ||
- (cmd == ELS_CMD_PRLI) ||
- (cmd == ELS_CMD_NVMEPRLI)) {
+ if (cmd == ELS_CMD_PLOGI) {
delay = 1000;
maxretry = lpfc_max_els_tries + 1;
retry = 1;
@@ -5678,27 +5703,13 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
rjt_err = LSRJT_CMD_UNSUPPORTED;
goto rjt;
}
- if (beacon->lcb_frequency == 0) {
- rjt_err = LSRJT_CMD_UNSUPPORTED;
- goto rjt;
- }
- if ((beacon->lcb_type != LPFC_LCB_GREEN) &&
- (beacon->lcb_type != LPFC_LCB_AMBER)) {
- rjt_err = LSRJT_CMD_UNSUPPORTED;
- goto rjt;
- }
- if ((beacon->lcb_sub_command != LPFC_LCB_ON) &&
- (beacon->lcb_sub_command != LPFC_LCB_OFF)) {
- rjt_err = LSRJT_CMD_UNSUPPORTED;
- goto rjt;
- }
- if ((beacon->lcb_sub_command == LPFC_LCB_ON) &&
- (beacon->lcb_type != LPFC_LCB_GREEN) &&
- (beacon->lcb_type != LPFC_LCB_AMBER)) {
+ if (beacon->lcb_sub_command != LPFC_LCB_ON &&
+ beacon->lcb_sub_command != LPFC_LCB_OFF) {
rjt_err = LSRJT_CMD_UNSUPPORTED;
goto rjt;
}
- if (be16_to_cpu(beacon->lcb_duration) != 0) {
+ if (beacon->lcb_sub_command == LPFC_LCB_ON &&
+ be16_to_cpu(beacon->lcb_duration) != 0) {
rjt_err = LSRJT_CMD_UNSUPPORTED;
goto rjt;
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 3ffcd9215ca8..aa5e5ff56dfb 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -4167,14 +4167,14 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_unregister_remote_port(ndlp);
}
- /* Notify the NVME transport of this rport's loss on the
- * Initiator. For NVME Target, should upcall transport
- * in the else clause when API available.
- */
if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
vport->phba->nport_event_cnt++;
if (vport->phba->nvmet_support == 0)
+ /* Start devloss */
lpfc_nvme_unregister_port(vport, ndlp);
+ else
+ /* NVMET has no upcall. */
+ lpfc_nlp_put(ndlp);
}
}
@@ -4182,8 +4182,10 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (new_state == NLP_STE_MAPPED_NODE ||
new_state == NLP_STE_UNMAPPED_NODE) {
- if ((ndlp->nlp_fc4_type & NLP_FC4_FCP) ||
- (ndlp->nlp_DID == Fabric_DID)) {
+ if (ndlp->nlp_fc4_type & NLP_FC4_FCP ||
+ ndlp->nlp_DID == Fabric_DID ||
+ ndlp->nlp_DID == NameServer_DID ||
+ ndlp->nlp_DID == FDMI_DID) {
vport->phba->nport_event_cnt++;
/*
* Tell the fc transport about the port, if we haven't
@@ -4192,7 +4194,8 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_register_remote_port(vport, ndlp);
}
/* Notify the NVME transport of this new rport. */
- if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
+ if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
+ ndlp->nlp_fc4_type & NLP_FC4_NVME) {
if (vport->phba->nvmet_support == 0) {
/* Register this rport with the transport.
* Initiators take the NDLP ref count in
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index e0a5fce416ae..bb4715705fa3 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -197,6 +197,7 @@ struct lpfc_sli_intf {
/* Delay Multiplier constant */
#define LPFC_DMULT_CONST 651042
+#define LPFC_DMULT_MAX 1023
/* Configuration of Interrupts / sec for entire HBA port */
#define LPFC_MIN_IMAX 5000
@@ -657,6 +658,15 @@ struct lpfc_register {
#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
#define LPFC_CTL_PORT_ER2_OFFSET 0x410
+#define LPFC_CTL_PORT_EQ_DELAY_OFFSET 0x418
+#define lpfc_sliport_eqdelay_delay_SHIFT 16
+#define lpfc_sliport_eqdelay_delay_MASK 0xffff
+#define lpfc_sliport_eqdelay_delay_WORD word0
+#define lpfc_sliport_eqdelay_id_SHIFT 0
+#define lpfc_sliport_eqdelay_id_MASK 0xfff
+#define lpfc_sliport_eqdelay_id_WORD word0
+#define LPFC_SEC_TO_USEC 1000000
+
/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically
* reside in BAR 2.
*/
@@ -3258,6 +3268,10 @@ struct lpfc_sli4_parameters {
#define cfg_xib_SHIFT 4
#define cfg_xib_MASK 0x00000001
#define cfg_xib_WORD word19
+#define cfg_eqdr_SHIFT 8
+#define cfg_eqdr_MASK 0x00000001
+#define cfg_eqdr_WORD word19
+#define LPFC_NODELAY_MAX_IO 32
};
#define LPFC_SET_UE_RECOVERY 0x10
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 9add9473cae5..491aa95eb0f6 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1249,6 +1249,12 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
int retval, i;
struct lpfc_sli *psli = &phba->sli;
LIST_HEAD(completions);
+ struct lpfc_queue *qp;
+ unsigned long time_elapsed;
+ uint32_t tick_cqe, max_cqe, val;
+ uint64_t tot, data1, data2, data3;
+ struct lpfc_register reg_data;
+ void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr;
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
@@ -1263,6 +1269,98 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
(phba->pport->fc_flag & FC_OFFLINE_MODE))
return;
+ if (phba->cfg_auto_imax) {
+ if (!phba->last_eqdelay_time) {
+ phba->last_eqdelay_time = jiffies;
+ goto skip_eqdelay;
+ }
+ time_elapsed = jiffies - phba->last_eqdelay_time;
+ phba->last_eqdelay_time = jiffies;
+
+ tot = 0xffff;
+ /* Check outstanding IO count */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ if (phba->nvmet_support) {
+ spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
+ spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
+ tot = phba->sli4_hba.nvmet_xri_cnt -
+ (phba->sli4_hba.nvmet_ctx_get_cnt +
+ phba->sli4_hba.nvmet_ctx_put_cnt);
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
+ } else {
+ tot = atomic_read(&phba->fc4NvmeIoCmpls);
+ data1 = atomic_read(
+ &phba->fc4NvmeInputRequests);
+ data2 = atomic_read(
+ &phba->fc4NvmeOutputRequests);
+ data3 = atomic_read(
+ &phba->fc4NvmeControlRequests);
+ tot = (data1 + data2 + data3) - tot;
+ }
+ }
+
+ /* Interrupts per sec per EQ */
+ val = phba->cfg_fcp_imax / phba->io_channel_irqs;
+ tick_cqe = val / CONFIG_HZ; /* Per tick per EQ */
+
+ /* Assume 1 CQE/ISR, calc max CQEs allowed for time duration */
+ max_cqe = time_elapsed * tick_cqe;
+
+ for (i = 0; i < phba->io_channel_irqs; i++) {
+ /* Fast-path EQ */
+ qp = phba->sli4_hba.hba_eq[i];
+ if (!qp)
+ continue;
+
+ /* Use no EQ delay if we don't have many outstanding
+ * IOs, or if we are only processing 1 CQE/ISR or less.
+ * Otherwise, assume we can process up to lpfc_fcp_imax
+ * interrupts per HBA.
+ */
+ if (tot < LPFC_NODELAY_MAX_IO ||
+ qp->EQ_cqe_cnt <= max_cqe)
+ val = 0;
+ else
+ val = phba->cfg_fcp_imax;
+
+ if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
+ /* Use EQ Delay Register method */
+
+ /* Convert for EQ Delay register */
+ if (val) {
+ /* First, interrupts per sec per EQ */
+ val = phba->cfg_fcp_imax /
+ phba->io_channel_irqs;
+
+ /* us delay between each interrupt */
+ val = LPFC_SEC_TO_USEC / val;
+ }
+ if (val != qp->q_mode) {
+ reg_data.word0 = 0;
+ bf_set(lpfc_sliport_eqdelay_id,
+ &reg_data, qp->queue_id);
+ bf_set(lpfc_sliport_eqdelay_delay,
+ &reg_data, val);
+ writel(reg_data.word0, eqdreg);
+ }
+ } else {
+ /* Use mbox command method */
+ if (val != qp->q_mode)
+ lpfc_modify_hba_eq_delay(phba, i,
+ 1, val);
+ }
+
+ /*
+ * val is cfg_fcp_imax or 0 for mbox delay or us delay
+ * between interrupts for EQDR.
+ */
+ qp->q_mode = val;
+ qp->EQ_cqe_cnt = 0;
+ }
+ }
+
+skip_eqdelay:
spin_lock_irq(&phba->pport->work_port_lock);
if (time_after(phba->last_completion_time +
@@ -2707,13 +2805,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY);
- if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
- /* Remove the NVME transport reference now and
- * continue to remove the node.
- */
- lpfc_nlp_put(ndlp);
- }
-
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RM);
}
@@ -3392,7 +3483,6 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
/* For NVMET, ALL remaining XRIs are dedicated for IO processing */
nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
-
if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
/* els xri-sgl expanded */
xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
@@ -3596,14 +3686,6 @@ lpfc_get_wwpn(struct lpfc_hba *phba)
LPFC_MBOXQ_t *mboxq;
MAILBOX_t *mb;
- if (phba->sli_rev < LPFC_SLI_REV4) {
- /* Reset the port first */
- lpfc_sli_brdrestart(phba);
- rc = lpfc_sli_chipset_init(phba);
- if (rc)
- return (uint64_t)-1;
- }
-
mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
GFP_KERNEL);
if (!mboxq)
@@ -3757,8 +3839,19 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
int i;
uint64_t wwn;
bool use_no_reset_hba = false;
+ int rc;
- wwn = lpfc_get_wwpn(phba);
+ if (lpfc_no_hba_reset_cnt) {
+ if (phba->sli_rev < LPFC_SLI_REV4 &&
+ dev == &phba->pcidev->dev) {
+ /* Reset the port first */
+ lpfc_sli_brdrestart(phba);
+ rc = lpfc_sli_chipset_init(phba);
+ if (rc)
+ return NULL;
+ }
+ wwn = lpfc_get_wwpn(phba);
+ }
for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
if (wwn == lpfc_no_hba_reset[i]) {
@@ -5837,7 +5930,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
- INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_get_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_put_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
/* Fast-path XRI aborted CQ Event work queue list */
@@ -5846,7 +5940,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* This abort list used by worker thread */
spin_lock_init(&phba->sli4_hba.sgl_list_lock);
- spin_lock_init(&phba->sli4_hba.nvmet_io_lock);
+ spin_lock_init(&phba->sli4_hba.nvmet_ctx_get_lock);
+ spin_lock_init(&phba->sli4_hba.nvmet_ctx_put_lock);
spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
/*
@@ -6731,6 +6826,16 @@ lpfc_create_shost(struct lpfc_hba *phba)
phba->fc_arbtov = FF_DEF_ARBTOV;
atomic_set(&phba->sdev_cnt, 0);
+ atomic_set(&phba->fc4ScsiInputRequests, 0);
+ atomic_set(&phba->fc4ScsiOutputRequests, 0);
+ atomic_set(&phba->fc4ScsiControlRequests, 0);
+ atomic_set(&phba->fc4ScsiIoCmpls, 0);
+ atomic_set(&phba->fc4NvmeInputRequests, 0);
+ atomic_set(&phba->fc4NvmeOutputRequests, 0);
+ atomic_set(&phba->fc4NvmeControlRequests, 0);
+ atomic_set(&phba->fc4NvmeIoCmpls, 0);
+ atomic_set(&phba->fc4NvmeLsRequests, 0);
+ atomic_set(&phba->fc4NvmeLsCmpls, 0);
vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
if (!vport)
return -ENODEV;
@@ -7247,6 +7352,9 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
break;
case LPFC_SLI_INTF_IF_TYPE_2:
+ phba->sli4_hba.u.if_type2.EQDregaddr =
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_CTL_PORT_EQ_DELAY_OFFSET;
phba->sli4_hba.u.if_type2.ERR1regaddr =
phba->sli4_hba.conf_regs_memmap_p +
LPFC_CTL_PORT_ER1_OFFSET;
@@ -8773,7 +8881,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
}
for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
- lpfc_modify_hba_eq_delay(phba, qidx);
+ lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
+ phba->cfg_fcp_imax);
return 0;
@@ -9655,6 +9764,7 @@ static int
lpfc_sli4_enable_msix(struct lpfc_hba *phba)
{
int vectors, rc, index;
+ char *name;
/* Set up MSI-X multi-message vectors */
vectors = phba->io_channel_irqs;
@@ -9673,9 +9783,9 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
/* Assign MSI-X vectors to interrupt handlers */
for (index = 0; index < vectors; index++) {
- memset(&phba->sli4_hba.handler_name[index], 0, 16);
- snprintf((char *)&phba->sli4_hba.handler_name[index],
- LPFC_SLI4_HANDLER_NAME_SZ,
+ name = phba->sli4_hba.hba_eq_hdl[index].handler_name;
+ memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
+ snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
LPFC_DRIVER_HANDLER_NAME"%d", index);
phba->sli4_hba.hba_eq_hdl[index].idx = index;
@@ -9684,12 +9794,12 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
if (phba->cfg_fof && (index == (vectors - 1)))
rc = request_irq(pci_irq_vector(phba->pcidev, index),
&lpfc_sli4_fof_intr_handler, 0,
- (char *)&phba->sli4_hba.handler_name[index],
+ name,
&phba->sli4_hba.hba_eq_hdl[index]);
else
rc = request_irq(pci_irq_vector(phba->pcidev, index),
&lpfc_sli4_hba_intr_handler, 0,
- (char *)&phba->sli4_hba.handler_name[index],
+ name,
&phba->sli4_hba.hba_eq_hdl[index]);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -10241,6 +10351,9 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
if (bf_get(cfg_xib, mbx_sli4_parameters) && phba->cfg_suppress_rsp)
phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
+ if (bf_get(cfg_eqdr, mbx_sli4_parameters))
+ phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
+
/* Make sure that sge_supp_len can be handled by the driver */
if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 8008c8205fb6..0a0a1b92d01d 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -186,13 +186,12 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
/* Remove this rport from the lport's list - memory is owned by the
* transport. Remove the ndlp reference for the NVME transport before
- * calling state machine to remove the node, this is devloss = 0
- * semantics.
+ * calling state machine to remove the node.
*/
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6146 remoteport delete complete %p\n",
remoteport);
- list_del(&rport->list);
+ ndlp->nrport = NULL;
lpfc_nlp_put(ndlp);
rport_err:
@@ -212,7 +211,7 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_dmabuf *buf_ptr;
struct lpfc_nodelist *ndlp;
- vport->phba->fc4NvmeLsCmpls++;
+ atomic_inc(&vport->phba->fc4NvmeLsCmpls);
pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
@@ -479,7 +478,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
&pnvme_lsreq->rspdma);
- vport->phba->fc4NvmeLsRequests++;
+ atomic_inc(&vport->phba->fc4NvmeLsRequests);
/* Hardcode the wait to 30 seconds. Connections are failing otherwise.
* This code allows it all to work.
@@ -774,7 +773,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
wcqe);
return;
}
- phba->fc4NvmeIoCmpls++;
+ atomic_inc(&phba->fc4NvmeIoCmpls);
nCmd = lpfc_ncmd->nvmeCmd;
rport = lpfc_ncmd->nrport;
@@ -999,7 +998,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
NVME_WRITE_CMD);
- phba->fc4NvmeOutputRequests++;
+ atomic_inc(&phba->fc4NvmeOutputRequests);
} else {
/* Word 7 */
bf_set(wqe_cmnd, &wqe->generic.wqe_com,
@@ -1020,7 +1019,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
NVME_READ_CMD);
- phba->fc4NvmeInputRequests++;
+ atomic_inc(&phba->fc4NvmeInputRequests);
}
} else {
/* Word 4 */
@@ -1041,7 +1040,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
/* Word 11 */
bf_set(wqe_cmd_type, &wqe->generic.wqe_com, NVME_READ_CMD);
- phba->fc4NvmeControlRequests++;
+ atomic_inc(&phba->fc4NvmeControlRequests);
}
/*
* Finish initializing those WQE fields that are independent
@@ -1362,6 +1361,13 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
return 0;
out_free_nvme_buf:
+ if (lpfc_ncmd->nvmeCmd->sg_cnt) {
+ if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
+ atomic_dec(&phba->fc4NvmeOutputRequests);
+ else
+ atomic_dec(&phba->fc4NvmeInputRequests);
+ } else
+ atomic_dec(&phba->fc4NvmeControlRequests);
lpfc_release_nvme_buf(phba, lpfc_ncmd);
out_fail:
return ret;
@@ -1421,7 +1427,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_lport *lport;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
- struct lpfc_nodelist *ndlp;
struct lpfc_nvme_rport *rport;
struct lpfc_nvme_buf *lpfc_nbuf;
struct lpfc_iocbq *abts_buf;
@@ -1443,38 +1448,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
pnvme_rport->port_id,
pnvme_fcreq);
- /*
- * Catch race where our node has transitioned, but the
- * transport is still transitioning.
- */
- ndlp = rport->ndlp;
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_ABTS,
- "6054 rport %p, ndlp %p, DID x%06x ndlp "
- " not ready.\n",
- rport, ndlp, pnvme_rport->port_id);
-
- ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
- if (!ndlp) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
- "6055 Could not find node for "
- "DID %x\n",
- pnvme_rport->port_id);
- return;
- }
- }
-
- /* The remote node has to be ready to send an abort. */
- if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) &&
- !(ndlp->nlp_type & NLP_NVME_TARGET)) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
- "6048 rport %p, DID x%06x not ready for "
- "IO. State x%x, Type x%x\n",
- rport, pnvme_rport->port_id,
- ndlp->nlp_state, ndlp->nlp_type);
- return;
- }
-
/* If the hba is getting reset, this flag is set. It is
* cleared when the reset is complete and rings reestablished.
*/
@@ -1535,7 +1508,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
nvmereq_wqe->sli4_xritag,
- nvmereq_wqe->hba_wqidx, ndlp->nlp_DID);
+ nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
/* Outstanding abort is in progress */
if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
@@ -2208,7 +2181,6 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
lport = (struct lpfc_nvme_lport *)localport->private;
vport->localport = localport;
lport->vport = vport;
- INIT_LIST_HEAD(&lport->rport_list);
vport->nvmei_support = 1;
len = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max);
vport->phba->total_nvme_bufs += len;
@@ -2233,7 +2205,6 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
#if (IS_ENABLED(CONFIG_NVME_FC))
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
- struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL;
int ret;
if (vport->nvmei_support == 0)
@@ -2246,19 +2217,6 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
"6011 Destroying NVME localport %p\n",
localport);
- list_for_each_entry_safe(rport, rport_next, &lport->rport_list, list) {
- /* The last node ref has to get released now before the rport
- * private memory area is released by the transport.
- */
- list_del(&rport->list);
-
- init_completion(&rport->rport_unreg_done);
- ret = nvme_fc_unregister_remoteport(rport->remoteport);
- if (ret)
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
- "6008 rport fail destroy %x\n", ret);
- wait_for_completion_timeout(&rport->rport_unreg_done, 5);
- }
/* lport's rport list is clear. Unregister
* lport and release resources.
@@ -2340,99 +2298,68 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
localport = vport->localport;
lport = (struct lpfc_nvme_lport *)localport->private;
- if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) {
-
- /* The driver isn't expecting the rport wwn to change
- * but it might get a different DID on a different
- * fabric.
+ /* NVME rports are not preserved across devloss.
+ * Just register this instance. Note, rpinfo->dev_loss_tmo
+ * is left 0 to indicate accept transport defaults. The
+ * driver communicates port role capabilities consistent
+ * with the PRLI response data.
+ */
+ memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info));
+ rpinfo.port_id = ndlp->nlp_DID;
+ if (ndlp->nlp_type & NLP_NVME_TARGET)
+ rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
+ if (ndlp->nlp_type & NLP_NVME_INITIATOR)
+ rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
+
+ if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
+ rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
+
+ rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
+ rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
+ ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
+ if (!ret) {
+ /* If the ndlp already has an nrport, this is just
+ * a resume of the existing rport. Else this is a
+ * new rport.
*/
- list_for_each_entry(rport, &lport->rport_list, list) {
- if (rport->remoteport->port_name !=
- wwn_to_u64(ndlp->nlp_portname.u.wwn))
- continue;
- lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
- "6035 lport %p, found matching rport "
- "at wwpn 0x%llx, Data: x%x x%x x%x "
- "x%06x\n",
- lport,
- rport->remoteport->port_name,
- rport->remoteport->port_id,
- rport->remoteport->port_role,
+ rport = remote_port->private;
+ if (ndlp->nrport == rport) {
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ LOG_NVME_DISC,
+ "6014 Rebinding lport to "
+ "rport wwpn 0x%llx, "
+ "Data: x%x x%x x%x x%06x\n",
+ remote_port->port_name,
+ remote_port->port_id,
+ remote_port->port_role,
ndlp->nlp_type,
ndlp->nlp_DID);
- remote_port = rport->remoteport;
- if ((remote_port->port_id == 0) &&
- (remote_port->port_role ==
- FC_PORT_ROLE_NVME_DISCOVERY)) {
- remote_port->port_id = ndlp->nlp_DID;
- remote_port->port_role &=
- ~FC_PORT_ROLE_NVME_DISCOVERY;
- if (ndlp->nlp_type & NLP_NVME_TARGET)
- remote_port->port_role |=
- FC_PORT_ROLE_NVME_TARGET;
- if (ndlp->nlp_type & NLP_NVME_INITIATOR)
- remote_port->port_role |=
- FC_PORT_ROLE_NVME_INITIATOR;
-
- lpfc_printf_vlog(ndlp->vport, KERN_INFO,
- LOG_NVME_DISC,
- "6014 Rebinding lport to "
- "rport wwpn 0x%llx, "
- "Data: x%x x%x x%x x%06x\n",
- remote_port->port_name,
- remote_port->port_id,
- remote_port->port_role,
- ndlp->nlp_type,
- ndlp->nlp_DID);
- }
- return 0;
- }
-
- /* NVME rports are not preserved across devloss.
- * Just register this instance.
- */
- rpinfo.port_id = ndlp->nlp_DID;
- rpinfo.port_role = 0;
- if (ndlp->nlp_type & NLP_NVME_TARGET)
- rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
- if (ndlp->nlp_type & NLP_NVME_INITIATOR)
- rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
- rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
- rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
- ret = nvme_fc_register_remoteport(localport, &rpinfo,
- &remote_port);
- if (!ret) {
- rport = remote_port->private;
+ } else {
+ /* New rport. */
rport->remoteport = remote_port;
rport->lport = lport;
rport->ndlp = lpfc_nlp_get(ndlp);
if (!rport->ndlp)
return -1;
ndlp->nrport = rport;
- INIT_LIST_HEAD(&rport->list);
- list_add_tail(&rport->list, &lport->rport_list);
lpfc_printf_vlog(vport, KERN_INFO,
LOG_NVME_DISC | LOG_NODE,
- "6022 Binding new rport to lport %p "
- "Rport WWNN 0x%llx, Rport WWPN 0x%llx "
- "DID x%06x Role x%x\n",
+ "6022 Binding new rport to "
+ "lport %p Rport WWNN 0x%llx, "
+ "Rport WWPN 0x%llx DID "
+ "x%06x Role x%x\n",
lport,
rpinfo.node_name, rpinfo.port_name,
rpinfo.port_id, rpinfo.port_role);
- } else {
- lpfc_printf_vlog(vport, KERN_ERR,
- LOG_NVME_DISC | LOG_NODE,
- "6031 RemotePort Registration failed "
- "err: %d, DID x%06x\n",
- ret, ndlp->nlp_DID);
}
} else {
- ret = -EINVAL;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6027 Unknown nlp_type x%x on DID x%06x "
- "ndlp %p. Not Registering nvme rport\n",
- ndlp->nlp_type, ndlp->nlp_DID, ndlp);
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_NVME_DISC | LOG_NODE,
+ "6031 RemotePort Registration failed "
+ "err: %d, DID x%06x\n",
+ ret, ndlp->nlp_DID);
}
+
return ret;
#else
return 0;
@@ -2460,7 +2387,6 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct lpfc_nvme_lport *lport;
struct lpfc_nvme_rport *rport;
struct nvme_fc_remote_port *remoteport;
- unsigned long wait_tmo;
localport = vport->localport;
@@ -2491,6 +2417,10 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
*/
if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) {
init_completion(&rport->rport_unreg_done);
+
+ /* No concern about the role change on the nvme remoteport.
+ * The transport will update it.
+ */
ret = nvme_fc_unregister_remoteport(remoteport);
if (ret != 0) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
@@ -2499,17 +2429,6 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ret, remoteport->port_state);
}
- /* Wait for the driver's delete completion routine to finish
- * before proceeding. This guarantees the transport and driver
- * have completed the unreg process.
- */
- wait_tmo = msecs_to_jiffies(5000);
- ret = wait_for_completion_timeout(&rport->rport_unreg_done,
- wait_tmo);
- if (ret == 0) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
- "6169 Unreg nvme wait timeout\n");
- }
}
return;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index ec32f45daa66..d192bb268f99 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -35,13 +35,11 @@ struct lpfc_nvme_qhandle {
/* Declare nvme-based local and remote port definitions. */
struct lpfc_nvme_lport {
struct lpfc_vport *vport;
- struct list_head rport_list;
struct completion lport_unreg_done;
/* Add sttats counters here */
};
struct lpfc_nvme_rport {
- struct list_head list;
struct lpfc_nvme_lport *lport;
struct nvme_fc_remote_port *remoteport;
struct lpfc_nodelist *ndlp;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 518b15e6f222..fbeec344c6cc 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -112,6 +112,15 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
status = bf_get(lpfc_wcqe_c_status, wcqe);
result = wcqe->parameter;
+ ctxp = cmdwqe->context2;
+
+ if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6410 NVMET LS cmpl state mismatch IO x%x: "
+ "%d %d\n",
+ ctxp->oxid, ctxp->state, ctxp->entry_cnt);
+ }
+
if (!phba->targetport)
goto out;
@@ -123,15 +132,14 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
out:
- ctxp = cmdwqe->context2;
rsp = &ctxp->ctx.ls_req;
lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
ctxp->oxid, status, result);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
- "6038 %s: Entrypoint: ctx %p status %x/%x\n", __func__,
- ctxp, status, result);
+ "6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
+ status, result, ctxp->oxid);
lpfc_nlp_put(cmdwqe->context1);
cmdwqe->context2 = NULL;
@@ -162,7 +170,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
struct lpfc_nvmet_tgtport *tgtp;
struct fc_frame_header *fc_hdr;
struct rqb_dmabuf *nvmebuf;
- struct lpfc_dmabuf *hbufp;
uint32_t *payload;
uint32_t size, oxid, sid, rc;
unsigned long iflag;
@@ -173,11 +180,16 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
ctxp->txrdy = NULL;
ctxp->txrdy_phys = 0;
}
+
+ if (ctxp->state == LPFC_NVMET_STE_FREE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6411 NVMET free, already free IO x%x: %d %d\n",
+ ctxp->oxid, ctxp->state, ctxp->entry_cnt);
+ }
ctxp->state = LPFC_NVMET_STE_FREE;
spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
if (phba->sli4_hba.nvmet_io_wait_cnt) {
- hbufp = &nvmebuf->hbuf;
list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
nvmebuf, struct rqb_dmabuf,
hbuf.list);
@@ -193,7 +205,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
sid = sli4_sid_from_fc_hdr(fc_hdr);
ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
- memset(ctxp, 0, sizeof(ctxp->ctx));
ctxp->wqeq = NULL;
ctxp->txrdy = NULL;
ctxp->offset = 0;
@@ -256,11 +267,11 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
}
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
- spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
+ spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_put_lock, iflag);
list_add_tail(&ctx_buf->list,
- &phba->sli4_hba.lpfc_nvmet_ctx_list);
- phba->sli4_hba.nvmet_ctx_cnt++;
- spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
+ &phba->sli4_hba.lpfc_nvmet_ctx_put_list);
+ phba->sli4_hba.nvmet_ctx_put_cnt++;
+ spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_put_lock, iflag);
#endif
}
@@ -580,8 +591,17 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
int rc;
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
- "6023 %s: Entrypoint ctx %p %p\n", __func__,
- ctxp, tgtport);
+ "6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
+
+ if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
+ (ctxp->entry_cnt != 1)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6412 NVMET LS rsp state mismatch "
+ "oxid x%x: %d %d\n",
+ ctxp->oxid, ctxp->state, ctxp->entry_cnt);
+ }
+ ctxp->state = LPFC_NVMET_STE_LS_RSP;
+ ctxp->entry_cnt++;
nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
rsp->rsplen);
@@ -751,15 +771,14 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
unsigned long flags;
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6103 Abort op: oxri x%x flg x%x cnt %d\n",
- ctxp->oxid, ctxp->flag, ctxp->entry_cnt);
+ "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
+ ctxp->oxid, ctxp->flag, ctxp->state);
- lpfc_nvmeio_data(phba, "NVMET FCP ABRT: "
- "xri x%x flg x%x cnt x%x\n",
- ctxp->oxid, ctxp->flag, ctxp->entry_cnt);
+ lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
+ ctxp->oxid, ctxp->flag, ctxp->state);
atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
- ctxp->entry_cnt++;
+
spin_lock_irqsave(&ctxp->ctxlock, flags);
/* Since iaab/iaar are NOT set, we need to check
@@ -770,12 +789,17 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
return;
}
ctxp->flag |= LPFC_NVMET_ABORT_OP;
- if (ctxp->flag & LPFC_NVMET_IO_INP)
- lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
- ctxp->oxid);
- else
+
+ /* An state of LPFC_NVMET_STE_RCV means we have just received
+ * the NVME command and have not started processing it.
+ * (by issuing any IO WQEs on this exchange yet)
+ */
+ if (ctxp->state == LPFC_NVMET_STE_RCV)
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
ctxp->oxid);
+ else
+ lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
+ ctxp->oxid);
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
}
@@ -790,6 +814,13 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
unsigned long flags;
bool aborting = false;
+ if (ctxp->state != LPFC_NVMET_STE_DONE &&
+ ctxp->state != LPFC_NVMET_STE_ABORT) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6413 NVMET release bad state %d %d oxid x%x\n",
+ ctxp->state, ctxp->entry_cnt, ctxp->oxid);
+ }
+
spin_lock_irqsave(&ctxp->ctxlock, flags);
if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
(ctxp->flag & LPFC_NVMET_XBUSY)) {
@@ -828,37 +859,55 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = {
.target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
};
-void
+static void
lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
{
struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
unsigned long flags;
- list_for_each_entry_safe(
- ctx_buf, next_ctx_buf,
- &phba->sli4_hba.lpfc_nvmet_ctx_list, list) {
- spin_lock_irqsave(
- &phba->sli4_hba.abts_nvme_buf_list_lock, flags);
+ spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, flags);
+ spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
+ list_for_each_entry_safe(ctx_buf, next_ctx_buf,
+ &phba->sli4_hba.lpfc_nvmet_ctx_get_list, list) {
+ spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ list_del_init(&ctx_buf->list);
+ spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ __lpfc_clear_active_sglq(phba,
+ ctx_buf->sglq->sli4_lxritag);
+ ctx_buf->sglq->state = SGL_FREED;
+ ctx_buf->sglq->ndlp = NULL;
+
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
+ list_add_tail(&ctx_buf->sglq->list,
+ &phba->sli4_hba.lpfc_nvmet_sgl_list);
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
+
+ lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
+ kfree(ctx_buf->context);
+ }
+ list_for_each_entry_safe(ctx_buf, next_ctx_buf,
+ &phba->sli4_hba.lpfc_nvmet_ctx_put_list, list) {
+ spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
list_del_init(&ctx_buf->list);
- spin_unlock_irqrestore(
- &phba->sli4_hba.abts_nvme_buf_list_lock, flags);
+ spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
__lpfc_clear_active_sglq(phba,
ctx_buf->sglq->sli4_lxritag);
ctx_buf->sglq->state = SGL_FREED;
ctx_buf->sglq->ndlp = NULL;
- spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags);
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
list_add_tail(&ctx_buf->sglq->list,
&phba->sli4_hba.lpfc_nvmet_sgl_list);
- spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock,
- flags);
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
kfree(ctx_buf->context);
}
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
+ spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, flags);
}
-int
+static int
lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
{
struct lpfc_nvmet_ctxbuf *ctx_buf;
@@ -891,6 +940,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
return -ENOMEM;
}
ctx_buf->context->ctxbuf = ctx_buf;
+ ctx_buf->context->state = LPFC_NVMET_STE_FREE;
ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
if (!ctx_buf->iocbq) {
@@ -926,12 +976,12 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
"6407 Ran out of NVMET XRIs\n");
return -ENOMEM;
}
- spin_lock(&phba->sli4_hba.nvmet_io_lock);
+ spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
list_add_tail(&ctx_buf->list,
- &phba->sli4_hba.lpfc_nvmet_ctx_list);
- spin_unlock(&phba->sli4_hba.nvmet_io_lock);
+ &phba->sli4_hba.lpfc_nvmet_ctx_get_list);
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
}
- phba->sli4_hba.nvmet_ctx_cnt = phba->sli4_hba.nvmet_xri_cnt;
+ phba->sli4_hba.nvmet_ctx_get_cnt = phba->sli4_hba.nvmet_xri_cnt;
return 0;
}
@@ -1103,7 +1153,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
}
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6318 XB aborted %x flg x%x (%x)\n",
+ "6318 XB aborted oxid %x flg x%x (%x)\n",
ctxp->oxid, ctxp->flag, released);
if (released)
lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
@@ -1253,7 +1303,8 @@ dropit:
ctxp->oxid = oxid;
ctxp->sid = sid;
ctxp->wqeq = NULL;
- ctxp->state = LPFC_NVMET_STE_RCV;
+ ctxp->state = LPFC_NVMET_STE_LS_RCV;
+ ctxp->entry_cnt = 1;
ctxp->rqb_buffer = (void *)nvmebuf;
lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
@@ -1268,8 +1319,8 @@ dropit:
payload, size);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
- "6037 %s: ctx %p sz %d rc %d: %08x %08x %08x "
- "%08x %08x %08x\n", __func__, ctxp, size, rc,
+ "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
+ "%08x %08x %08x\n", size, rc,
*payload, *(payload+1), *(payload+2),
*(payload+3), *(payload+4), *(payload+5));
@@ -1337,13 +1388,31 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
goto dropit;
}
- spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
- if (phba->sli4_hba.nvmet_ctx_cnt) {
- list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_list,
+ spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, iflag);
+ if (phba->sli4_hba.nvmet_ctx_get_cnt) {
+ list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_get_list,
ctx_buf, struct lpfc_nvmet_ctxbuf, list);
- phba->sli4_hba.nvmet_ctx_cnt--;
+ phba->sli4_hba.nvmet_ctx_get_cnt--;
+ } else {
+ spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
+ if (phba->sli4_hba.nvmet_ctx_put_cnt) {
+ list_splice(&phba->sli4_hba.lpfc_nvmet_ctx_put_list,
+ &phba->sli4_hba.lpfc_nvmet_ctx_get_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_put_list);
+ phba->sli4_hba.nvmet_ctx_get_cnt =
+ phba->sli4_hba.nvmet_ctx_put_cnt;
+ phba->sli4_hba.nvmet_ctx_put_cnt = 0;
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
+
+ list_remove_head(
+ &phba->sli4_hba.lpfc_nvmet_ctx_get_list,
+ ctx_buf, struct lpfc_nvmet_ctxbuf, list);
+ phba->sli4_hba.nvmet_ctx_get_cnt--;
+ } else {
+ spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
+ }
}
- spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
+ spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, iflag);
fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
@@ -1383,7 +1452,11 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
sid = sli4_sid_from_fc_hdr(fc_hdr);
ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
- memset(ctxp, 0, sizeof(ctxp->ctx));
+ if (ctxp->state != LPFC_NVMET_STE_FREE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6414 NVMET Context corrupt %d %d oxid x%x\n",
+ ctxp->state, ctxp->entry_cnt, ctxp->oxid);
+ }
ctxp->wqeq = NULL;
ctxp->txrdy = NULL;
ctxp->offset = 0;
@@ -1547,9 +1620,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
if (!lpfc_is_link_up(phba)) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
- "6104 lpfc_nvmet_prep_ls_wqe: link err: "
- "NPORT x%x oxid:x%x\n",
- ctxp->sid, ctxp->oxid);
+ "6104 NVMET prep LS wqe: link err: "
+ "NPORT x%x oxid:x%x ste %d\n",
+ ctxp->sid, ctxp->oxid, ctxp->state);
return NULL;
}
@@ -1557,9 +1630,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
nvmewqe = lpfc_sli_get_iocbq(phba);
if (nvmewqe == NULL) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
- "6105 lpfc_nvmet_prep_ls_wqe: No WQE: "
- "NPORT x%x oxid:x%x\n",
- ctxp->sid, ctxp->oxid);
+ "6105 NVMET prep LS wqe: No WQE: "
+ "NPORT x%x oxid x%x ste %d\n",
+ ctxp->sid, ctxp->oxid, ctxp->state);
return NULL;
}
@@ -1568,9 +1641,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
- "6106 lpfc_nvmet_prep_ls_wqe: No ndlp: "
- "NPORT x%x oxid:x%x\n",
- ctxp->sid, ctxp->oxid);
+ "6106 NVMET prep LS wqe: No ndlp: "
+ "NPORT x%x oxid x%x ste %d\n",
+ ctxp->sid, ctxp->oxid, ctxp->state);
goto nvme_wqe_free_wqeq_exit;
}
ctxp->wqeq = nvmewqe;
@@ -1642,9 +1715,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
- /* Xmit NVME response to remote NPORT <did> */
+ /* Xmit NVMET response to remote NPORT <did> */
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
- "6039 Xmit NVME LS response to remote "
+ "6039 Xmit NVMET LS response to remote "
"NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
rspsize);
@@ -1676,9 +1749,9 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
if (!lpfc_is_link_up(phba)) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6107 lpfc_nvmet_prep_fcp_wqe: link err:"
- "NPORT x%x oxid:x%x\n", ctxp->sid,
- ctxp->oxid);
+ "6107 NVMET prep FCP wqe: link err:"
+ "NPORT x%x oxid x%x ste %d\n",
+ ctxp->sid, ctxp->oxid, ctxp->state);
return NULL;
}
@@ -1687,17 +1760,18 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6108 lpfc_nvmet_prep_fcp_wqe: no ndlp: "
- "NPORT x%x oxid:x%x\n",
- ctxp->sid, ctxp->oxid);
+ "6108 NVMET prep FCP wqe: no ndlp: "
+ "NPORT x%x oxid x%x ste %d\n",
+ ctxp->sid, ctxp->oxid, ctxp->state);
return NULL;
}
if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6109 lpfc_nvmet_prep_fcp_wqe: seg cnt err: "
- "NPORT x%x oxid:x%x cnt %d\n",
- ctxp->sid, ctxp->oxid, phba->cfg_nvme_seg_cnt);
+ "6109 NVMET prep FCP wqe: seg cnt err: "
+ "NPORT x%x oxid x%x ste %d cnt %d\n",
+ ctxp->sid, ctxp->oxid, ctxp->state,
+ phba->cfg_nvme_seg_cnt);
return NULL;
}
@@ -1708,9 +1782,9 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
nvmewqe = ctxp->ctxbuf->iocbq;
if (nvmewqe == NULL) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6110 lpfc_nvmet_prep_fcp_wqe: No "
- "WQE: NPORT x%x oxid:x%x\n",
- ctxp->sid, ctxp->oxid);
+ "6110 NVMET prep FCP wqe: No "
+ "WQE: NPORT x%x oxid x%x ste %d\n",
+ ctxp->sid, ctxp->oxid, ctxp->state);
return NULL;
}
ctxp->wqeq = nvmewqe;
@@ -1722,13 +1796,12 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
/* Sanity check */
if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
(ctxp->entry_cnt == 1)) ||
- ((ctxp->state == LPFC_NVMET_STE_DATA) &&
- (ctxp->entry_cnt > 1))) {
+ (ctxp->state == LPFC_NVMET_STE_DATA)) {
wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6111 Wrong state %s: %d cnt %d\n",
- __func__, ctxp->state, ctxp->entry_cnt);
+ "6111 Wrong state NVMET FCP: %d cnt %d\n",
+ ctxp->state, ctxp->entry_cnt);
return NULL;
}
@@ -1832,7 +1905,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
}
- ctxp->state = LPFC_NVMET_STE_DATA;
break;
case NVMET_FCOP_WRITEDATA:
@@ -1923,7 +1995,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->sge_len = 0;
sgl++;
- ctxp->state = LPFC_NVMET_STE_DATA;
atomic_inc(&tgtp->xmt_fcp_write);
break;
@@ -1980,7 +2051,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com,
FCP_COMMAND_TRSP);
bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
- ctxp->state = LPFC_NVMET_STE_RSP;
if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
/* Good response - all zero's on wire */
@@ -2029,6 +2099,8 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
sgl++;
ctxp->offset += cnt;
}
+ ctxp->state = LPFC_NVMET_STE_DATA;
+ ctxp->entry_cnt++;
return nvmewqe;
}
@@ -2124,10 +2196,6 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
status = bf_get(lpfc_wcqe_c_status, wcqe);
result = wcqe->parameter;
- tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
- if (ctxp->flag & LPFC_NVMET_ABORT_OP)
- atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
-
if (!ctxp) {
/* if context is clear, related io alrady complete */
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
@@ -2137,6 +2205,10 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
return;
}
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ if (ctxp->flag & LPFC_NVMET_ABORT_OP)
+ atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
+
/* Sanity check */
if (ctxp->state != LPFC_NVMET_STE_ABORT) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
@@ -2206,17 +2278,32 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
atomic_inc(&tgtp->xmt_ls_abort_cmpl);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n",
+ "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
ctxp, wcqe->word0, wcqe->total_data_placed,
result, wcqe->word3);
- if (ctxp) {
- cmdwqe->context2 = NULL;
- cmdwqe->context3 = NULL;
- lpfc_sli_release_iocbq(phba, cmdwqe);
- kfree(ctxp);
- } else
+ if (!ctxp) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+ "6415 NVMET LS Abort No ctx: WCQE: "
+ "%08x %08x %08x %08x\n",
+ wcqe->word0, wcqe->total_data_placed,
+ result, wcqe->word3);
+
lpfc_sli_release_iocbq(phba, cmdwqe);
+ return;
+ }
+
+ if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6416 NVMET LS abort cmpl state mismatch: "
+ "oxid x%x: %d %d\n",
+ ctxp->oxid, ctxp->state, ctxp->entry_cnt);
+ }
+
+ cmdwqe->context2 = NULL;
+ cmdwqe->context3 = NULL;
+ lpfc_sli_release_iocbq(phba, cmdwqe);
+ kfree(ctxp);
}
static int
@@ -2240,7 +2327,7 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
atomic_inc(&tgtp->xmt_abort_rsp_error);
- lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
"6134 Drop ABTS - wrong NDLP state x%x.\n",
(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
@@ -2250,7 +2337,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
abts_wqeq = ctxp->wqeq;
wqe_abts = &abts_wqeq->wqe;
- ctxp->state = LPFC_NVMET_STE_ABORT;
/*
* Since we zero the whole WQE, we need to ensure we set the WQE fields
@@ -2338,7 +2424,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
atomic_inc(&tgtp->xmt_abort_rsp_error);
- lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
"6160 Drop ABORT - wrong NDLP state x%x.\n",
(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
@@ -2351,7 +2437,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
if (!ctxp->abort_wqeq) {
atomic_inc(&tgtp->xmt_abort_rsp_error);
- lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
"6161 ABORT failed: No wqeqs: "
"xri: x%x\n", ctxp->oxid);
/* No failure to an ABTS request. */
@@ -2437,6 +2523,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
abts_wqeq->iocb_cmpl = 0;
abts_wqeq->iocb_flag |= LPFC_IO_NVME;
abts_wqeq->context2 = ctxp;
+ abts_wqeq->vport = phba->pport;
rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
spin_unlock_irqrestore(&phba->hbalock, flags);
if (rc == WQE_SUCCESS) {
@@ -2471,6 +2558,15 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
ctxp->wqeq->hba_wqidx = 0;
}
+ if (ctxp->state == LPFC_NVMET_STE_FREE) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
+ ctxp->state, ctxp->entry_cnt, ctxp->oxid);
+ rc = WQE_BUSY;
+ goto aerr;
+ }
+ ctxp->state = LPFC_NVMET_STE_ABORT;
+ ctxp->entry_cnt++;
rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
if (rc == 0)
goto aerr;
@@ -2487,10 +2583,9 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
}
aerr:
- atomic_inc(&tgtp->xmt_abort_rsp_error);
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
atomic_inc(&tgtp->xmt_abort_rsp_error);
- lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
"6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
ctxp->oxid, rc);
return 1;
@@ -2507,12 +2602,24 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
unsigned long flags;
int rc;
+ if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
+ (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
+ ctxp->state = LPFC_NVMET_STE_LS_ABORT;
+ ctxp->entry_cnt++;
+ } else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6418 NVMET LS abort state mismatch "
+ "IO x%x: %d %d\n",
+ ctxp->oxid, ctxp->state, ctxp->entry_cnt);
+ ctxp->state = LPFC_NVMET_STE_LS_ABORT;
+ }
+
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
if (!ctxp->wqeq) {
/* Issue ABTS for this WQE based on iotag */
ctxp->wqeq = lpfc_sli_get_iocbq(phba);
if (!ctxp->wqeq) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
"6068 Abort failed: No wqeqs: "
"xri: x%x\n", xri);
/* No failure to an ABTS request. */
@@ -2523,7 +2630,10 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
abts_wqeq = ctxp->wqeq;
wqe_abts = &abts_wqeq->wqe;
- lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
+ if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
+ rc = WQE_BUSY;
+ goto out;
+ }
spin_lock_irqsave(&phba->hbalock, flags);
abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
@@ -2535,13 +2645,13 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
atomic_inc(&tgtp->xmt_abort_unsol);
return 0;
}
-
+out:
atomic_inc(&tgtp->xmt_abort_rsp_error);
abts_wqeq->context2 = NULL;
abts_wqeq->context3 = NULL;
lpfc_sli_release_iocbq(phba, abts_wqeq);
kfree(ctxp);
- lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
"6056 Failed to Issue ABTS. Status x%x\n", rc);
return 0;
}
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 6eb2f5d8d4ed..e675ef17be08 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -93,12 +93,14 @@ struct lpfc_nvmet_rcv_ctx {
uint16_t cpu;
uint16_t state;
/* States */
-#define LPFC_NVMET_STE_FREE 0
-#define LPFC_NVMET_STE_RCV 1
-#define LPFC_NVMET_STE_DATA 2
-#define LPFC_NVMET_STE_ABORT 3
-#define LPFC_NVMET_STE_RSP 4
-#define LPFC_NVMET_STE_DONE 5
+#define LPFC_NVMET_STE_LS_RCV 1
+#define LPFC_NVMET_STE_LS_ABORT 2
+#define LPFC_NVMET_STE_LS_RSP 3
+#define LPFC_NVMET_STE_RCV 4
+#define LPFC_NVMET_STE_DATA 5
+#define LPFC_NVMET_STE_ABORT 6
+#define LPFC_NVMET_STE_DONE 7
+#define LPFC_NVMET_STE_FREE 0xff
uint16_t flag;
#define LPFC_NVMET_IO_INP 0x1 /* IO is in progress on exchange */
#define LPFC_NVMET_ABORT_OP 0x2 /* Abort WQE issued on exchange */
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 54fd0c81ceaf..cfe1d01eb73f 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -3931,7 +3931,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct Scsi_Host *shost;
uint32_t logit = LOG_FCP;
- phba->fc4ScsiIoCmpls++;
+ atomic_inc(&phba->fc4ScsiIoCmpls);
/* Sanity check on return of outstanding command */
cmd = lpfc_cmd->pCmd;
@@ -4250,19 +4250,19 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
vport->cfg_first_burst_size;
}
fcp_cmnd->fcpCntl3 = WRITE_DATA;
- phba->fc4ScsiOutputRequests++;
+ atomic_inc(&phba->fc4ScsiOutputRequests);
} else {
iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
iocb_cmd->ulpPU = PARM_READ_CHECK;
fcp_cmnd->fcpCntl3 = READ_DATA;
- phba->fc4ScsiInputRequests++;
+ atomic_inc(&phba->fc4ScsiInputRequests);
}
} else {
iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
iocb_cmd->un.fcpi.fcpi_parm = 0;
iocb_cmd->ulpPU = 0;
fcp_cmnd->fcpCntl3 = 0;
- phba->fc4ScsiControlRequests++;
+ atomic_inc(&phba->fc4ScsiControlRequests);
}
if (phba->sli_rev == 3 &&
!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
@@ -4640,7 +4640,16 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
(uint32_t)
(cmnd->request->timeout / 1000));
-
+ switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
+ case WRITE_DATA:
+ atomic_dec(&phba->fc4ScsiOutputRequests);
+ break;
+ case READ_DATA:
+ atomic_dec(&phba->fc4ScsiInputRequests);
+ break;
+ default:
+ atomic_dec(&phba->fc4ScsiControlRequests);
+ }
goto out_host_busy_free_buf;
}
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index d6b184839bc2..e948ea05fd33 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -968,6 +968,7 @@ __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
list_remove_head(lpfc_els_sgl_list, sglq,
struct lpfc_sglq, list);
if (sglq == start_sglq) {
+ list_add_tail(&sglq->list, lpfc_els_sgl_list);
sglq = NULL;
break;
} else
@@ -4302,7 +4303,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
/* Perform FCoE PCI function reset before freeing queue memory */
rc = lpfc_pci_function_reset(phba);
- lpfc_sli4_queue_destroy(phba);
/* Restore PCI cmd register */
pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
@@ -4427,6 +4427,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
pci_disable_pcie_error_reporting(phba->pcidev);
lpfc_hba_down_post(phba);
+ lpfc_sli4_queue_destroy(phba);
return rc;
}
@@ -6926,18 +6927,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
cnt = phba->cfg_iocb_cnt * 1024;
/* We need 1 iocbq for every SGL, for IO processing */
cnt += phba->sli4_hba.nvmet_xri_cnt;
- /* Initialize and populate the iocb list per host */
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2821 initialize iocb list %d total %d\n",
- phba->cfg_iocb_cnt, cnt);
- rc = lpfc_init_iocb_list(phba, cnt);
- if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "1413 Failed to init iocb list.\n");
- goto out_destroy_queue;
- }
-
- lpfc_nvmet_create_targetport(phba);
} else {
/* update host scsi xri-sgl sizes and mappings */
rc = lpfc_sli4_scsi_sgl_update(phba);
@@ -6958,18 +6947,24 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
}
cnt = phba->cfg_iocb_cnt * 1024;
+ }
+
+ if (!phba->sli.iocbq_lookup) {
/* Initialize and populate the iocb list per host */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2820 initialize iocb list %d total %d\n",
+ "2821 initialize iocb list %d total %d\n",
phba->cfg_iocb_cnt, cnt);
rc = lpfc_init_iocb_list(phba, cnt);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "6301 Failed to init iocb list.\n");
+ "1413 Failed to init iocb list.\n");
goto out_destroy_queue;
}
}
+ if (phba->nvmet_support)
+ lpfc_nvmet_create_targetport(phba);
+
if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
/* Post initial buffers to all RQs created */
for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
@@ -7512,7 +7507,8 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
"(%d):0308 Mbox cmd issue - BUSY Data: "
"x%x x%x x%x x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0xffffff,
- mbx->mbxCommand, phba->pport->port_state,
+ mbx->mbxCommand,
+ phba->pport ? phba->pport->port_state : 0xff,
psli->sli_flag, flag);
psli->slistat.mbox_busy++;
@@ -7564,7 +7560,8 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
"(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
"x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0,
- mbx->mbxCommand, phba->pport->port_state,
+ mbx->mbxCommand,
+ phba->pport ? phba->pport->port_state : 0xff,
psli->sli_flag, flag);
if (mbx->mbxCommand != MBX_HEARTBEAT) {
@@ -10950,6 +10947,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocbq;
struct lpfc_iocbq *abtsiocb;
+ struct lpfc_sli_ring *pring_s4;
IOCB_t *cmd = NULL;
int errcnt = 0, ret_val = 0;
int i;
@@ -11003,8 +11001,15 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
/* Setup callback routine and issue the command. */
abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
- ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
- abtsiocb, 0);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
+ if (!pring_s4)
+ continue;
+ ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
+ abtsiocb, 0);
+ } else
+ ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
+ abtsiocb, 0);
if (ret_val == IOCB_ERROR) {
lpfc_sli_release_iocbq(phba, abtsiocb);
errcnt++;
@@ -13256,6 +13261,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"6126 Receive Frame Truncated!!\n");
+ /* Drop thru */
case FC_STATUS_RQ_SUCCESS:
lpfc_sli4_rq_release(hrq, drq);
spin_lock_irqsave(&phba->hbalock, iflags);
@@ -13466,6 +13472,7 @@ process_cq:
/* Track the max number of CQEs processed in 1 EQ */
if (ecount > cq->CQ_max_cqe)
cq->CQ_max_cqe = ecount;
+ cq->assoc_qp->EQ_cqe_cnt += ecount;
/* Catch the no cq entry condition */
if (unlikely(ecount == 0))
@@ -13547,6 +13554,9 @@ lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
return;
}
+ /* Save EQ associated with this CQ */
+ cq->assoc_qp = phba->sli4_hba.fof_eq;
+
/* Process all the entries to the OAS CQ */
while ((cqe = lpfc_sli4_cq_get(cq))) {
workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
@@ -13557,6 +13567,7 @@ lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
/* Track the max number of CQEs processed in 1 EQ */
if (ecount > cq->CQ_max_cqe)
cq->CQ_max_cqe = ecount;
+ cq->assoc_qp->EQ_cqe_cnt += ecount;
/* Catch the no cq entry condition */
if (unlikely(ecount == 0))
@@ -13617,7 +13628,6 @@ lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
/* Check device state for handling interrupt */
if (unlikely(lpfc_intr_state_check(phba))) {
- eq->EQ_badstate++;
/* Check again for link_state with lock held */
spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->link_state < LPFC_LINK_DOWN)
@@ -13729,7 +13739,6 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
/* Check device state for handling interrupt */
if (unlikely(lpfc_intr_state_check(phba))) {
- fpeq->EQ_badstate++;
/* Check again for link_state with lock held */
spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->link_state < LPFC_LINK_DOWN)
@@ -13988,14 +13997,15 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
* fails this function will return -ENXIO.
**/
int
-lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq)
+lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
+ uint32_t numq, uint32_t imax)
{
struct lpfc_mbx_modify_eq_delay *eq_delay;
LPFC_MBOXQ_t *mbox;
struct lpfc_queue *eq;
int cnt, rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
- uint32_t result;
+ uint32_t result, val;
int qidx;
union lpfc_sli4_cfg_shdr *shdr;
uint16_t dmult;
@@ -14014,22 +14024,45 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq)
eq_delay = &mbox->u.mqe.un.eq_delay;
/* Calculate delay multiper from maximum interrupt per second */
- result = phba->cfg_fcp_imax / phba->io_channel_irqs;
+ result = imax / phba->io_channel_irqs;
if (result > LPFC_DMULT_CONST || result == 0)
dmult = 0;
else
dmult = LPFC_DMULT_CONST/result - 1;
+ if (dmult > LPFC_DMULT_MAX)
+ dmult = LPFC_DMULT_MAX;
cnt = 0;
for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) {
eq = phba->sli4_hba.hba_eq[qidx];
if (!eq)
continue;
+ eq->q_mode = imax;
eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
eq_delay->u.request.eq[cnt].phase = 0;
eq_delay->u.request.eq[cnt].delay_multi = dmult;
cnt++;
- if (cnt >= LPFC_MAX_EQ_DELAY_EQID_CNT)
+
+ /* q_mode is only used for auto_imax */
+ if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
+ /* Use EQ Delay Register method for q_mode */
+
+ /* Convert for EQ Delay register */
+ val = phba->cfg_fcp_imax;
+ if (val) {
+ /* First, interrupts per sec per EQ */
+ val = phba->cfg_fcp_imax /
+ phba->io_channel_irqs;
+
+ /* us delay between each interrupt */
+ val = LPFC_SEC_TO_USEC / val;
+ }
+ eq->q_mode = val;
+ } else {
+ eq->q_mode = imax;
+ }
+
+ if (cnt >= numq)
break;
}
eq_delay->u.request.num_eq = cnt;
@@ -16126,9 +16159,6 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
return rc;
}
-static char *lpfc_rctl_names[] = FC_RCTL_NAMES_INIT;
-static char *lpfc_type_names[] = FC_TYPE_NAMES_INIT;
-
/**
* lpfc_fc_frame_check - Check that this frame is a valid frame to handle
* @phba: pointer to lpfc_hba struct that the frame was received on
@@ -16203,22 +16233,18 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
}
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
+ "2538 Received frame rctl:x%x, type:x%x, "
"frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
- (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS) ? "MDS Diags" :
- lpfc_rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
- (fc_hdr->fh_type == FC_TYPE_VENDOR_UNIQUE) ?
- "Vendor Unique" : lpfc_type_names[fc_hdr->fh_type],
- fc_hdr->fh_type, be32_to_cpu(header[0]),
- be32_to_cpu(header[1]), be32_to_cpu(header[2]),
- be32_to_cpu(header[3]), be32_to_cpu(header[4]),
- be32_to_cpu(header[5]), be32_to_cpu(header[6]));
+ fc_hdr->fh_r_ctl, fc_hdr->fh_type,
+ be32_to_cpu(header[0]), be32_to_cpu(header[1]),
+ be32_to_cpu(header[2]), be32_to_cpu(header[3]),
+ be32_to_cpu(header[4]), be32_to_cpu(header[5]),
+ be32_to_cpu(header[6]));
return 0;
drop:
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
- "2539 Dropped frame rctl:%s type:%s\n",
- lpfc_rctl_names[fc_hdr->fh_r_ctl],
- lpfc_type_names[fc_hdr->fh_type]);
+ "2539 Dropped frame rctl:x%x type:x%x\n",
+ fc_hdr->fh_r_ctl, fc_hdr->fh_type);
return 1;
}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 9085306ddd78..a3b1b5145d2b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -321,6 +321,7 @@ struct lpfc_sli {
#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
#define LPFC_SLI_SUPPRESS_RSP 0x4000 /* Suppress RSP feature is supported */
+#define LPFC_SLI_USE_EQDR 0x8000 /* EQ Delay Register is supported */
struct lpfc_sli_ring *sli3_ring;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index cf863db27700..7a1d74e9e877 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -168,7 +168,7 @@ struct lpfc_queue {
struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
struct lpfc_rqb *rqbp; /* ptr to RQ buffers */
- uint16_t sgl_list_cnt;
+ uint32_t q_mode;
uint16_t db_format;
#define LPFC_DB_RING_FORMAT 0x01
#define LPFC_DB_LIST_FORMAT 0x02
@@ -181,7 +181,7 @@ struct lpfc_queue {
/* defines for EQ stats */
#define EQ_max_eqe q_cnt_1
#define EQ_no_entry q_cnt_2
-#define EQ_badstate q_cnt_3
+#define EQ_cqe_cnt q_cnt_3
#define EQ_processed q_cnt_4
/* defines for CQ stats */
@@ -407,8 +407,10 @@ struct lpfc_max_cfg_param {
struct lpfc_hba;
/* SLI4 HBA multi-fcp queue handler struct */
+#define LPFC_SLI4_HANDLER_NAME_SZ 16
struct lpfc_hba_eq_hdl {
uint32_t idx;
+ char handler_name[LPFC_SLI4_HANDLER_NAME_SZ];
struct lpfc_hba *phba;
atomic_t hba_eq_in_use;
struct cpumask *cpumask;
@@ -480,7 +482,6 @@ struct lpfc_sli4_lnk_info {
#define LPFC_SLI4_HANDLER_CNT (LPFC_HBA_IO_CHAN_MAX+ \
LPFC_FOF_IO_CHAN_NUM)
-#define LPFC_SLI4_HANDLER_NAME_SZ 16
/* Used for IRQ vector to CPU mapping */
struct lpfc_vector_map_info {
@@ -522,6 +523,7 @@ struct lpfc_sli4_hba {
#define SLIPORT_ERR2_REG_FAILURE_CQ 0x4
#define SLIPORT_ERR2_REG_FAILURE_BUS 0x5
#define SLIPORT_ERR2_REG_FAILURE_RQ 0x6
+ void __iomem *EQDregaddr;
} if_type2;
} u;
@@ -548,7 +550,6 @@ struct lpfc_sli4_hba {
uint32_t ue_to_rp;
struct lpfc_register sli_intf;
struct lpfc_pc_sli4_params pc_sli4_params;
- uint8_t handler_name[LPFC_SLI4_HANDLER_CNT][LPFC_SLI4_HANDLER_NAME_SZ];
struct lpfc_hba_eq_hdl *hba_eq_hdl; /* HBA per-WQ handle */
/* Pointers to the constructed SLI4 queues */
@@ -620,7 +621,8 @@ struct lpfc_sli4_hba {
uint16_t scsi_xri_start;
uint16_t els_xri_cnt;
uint16_t nvmet_xri_cnt;
- uint16_t nvmet_ctx_cnt;
+ uint16_t nvmet_ctx_get_cnt;
+ uint16_t nvmet_ctx_put_cnt;
uint16_t nvmet_io_wait_cnt;
uint16_t nvmet_io_wait_total;
struct list_head lpfc_els_sgl_list;
@@ -629,7 +631,8 @@ struct lpfc_sli4_hba {
struct list_head lpfc_abts_nvmet_ctx_list;
struct list_head lpfc_abts_scsi_buf_list;
struct list_head lpfc_abts_nvme_buf_list;
- struct list_head lpfc_nvmet_ctx_list;
+ struct list_head lpfc_nvmet_ctx_get_list;
+ struct list_head lpfc_nvmet_ctx_put_list;
struct list_head lpfc_nvmet_io_wait_list;
struct lpfc_sglq **lpfc_sglq_active_list;
struct list_head lpfc_rpi_hdr_list;
@@ -661,7 +664,8 @@ struct lpfc_sli4_hba {
spinlock_t abts_nvme_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t sgl_list_lock; /* list of aborted els IOs */
- spinlock_t nvmet_io_lock;
+ spinlock_t nvmet_ctx_get_lock; /* list of avail XRI contexts */
+ spinlock_t nvmet_ctx_put_lock; /* list of avail XRI contexts */
spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */
uint32_t physical_port;
@@ -755,7 +759,8 @@ struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
uint32_t);
void lpfc_sli4_queue_free(struct lpfc_queue *);
int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
-int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq);
+int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
+ uint32_t numq, uint32_t imax);
int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t, uint32_t);
int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c2653244221c..c6a24c3e2d5e 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "11.2.0.14"
+#define LPFC_DRIVER_VERSION "11.4.0.1"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index 4cf9ed96414f..544d6f7e6138 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -574,7 +574,7 @@ mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen)
kioc->pool_index = right_pool;
kioc->free_buf = 1;
- kioc->buf_vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL,
+ kioc->buf_vaddr = pci_pool_alloc(pool->handle, GFP_ATOMIC,
&kioc->buf_paddr);
spin_unlock_irqrestore(&pool->lock, flags);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index a5d872664257..22998cbd538f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -2859,7 +2859,7 @@ _scsih_internal_device_block(struct scsi_device *sdev,
sas_device_priv_data->sas_target->handle);
sas_device_priv_data->block = 1;
- r = scsi_internal_device_block(sdev, false);
+ r = scsi_internal_device_block_nowait(sdev);
if (r == -EINVAL)
sdev_printk(KERN_WARNING, sdev,
"device_block failed with return(%d) for handle(0x%04x)\n",
@@ -2883,7 +2883,7 @@ _scsih_internal_device_unblock(struct scsi_device *sdev,
sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
"handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
sas_device_priv_data->block = 0;
- r = scsi_internal_device_unblock(sdev, SDEV_RUNNING);
+ r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
if (r == -EINVAL) {
/* The device has been set to SDEV_RUNNING by SD layer during
* device addition but the request queue is still stopped by
@@ -2895,14 +2895,14 @@ _scsih_internal_device_unblock(struct scsi_device *sdev,
"performing a block followed by an unblock\n",
r, sas_device_priv_data->sas_target->handle);
sas_device_priv_data->block = 1;
- r = scsi_internal_device_block(sdev, false);
+ r = scsi_internal_device_block_nowait(sdev);
if (r)
sdev_printk(KERN_WARNING, sdev, "retried device_block "
"failed with return(%d) for handle(0x%04x)\n",
r, sas_device_priv_data->sas_target->handle);
sas_device_priv_data->block = 0;
- r = scsi_internal_device_unblock(sdev, SDEV_RUNNING);
+ r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
if (r)
sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
" failed with return(%d) for handle(0x%04x)\n",
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
index 8c65e3b034dc..7d91e53562f8 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
@@ -1,5 +1,5 @@
/* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
index 617529b058f4..f9c50faa748e 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
@@ -1,5 +1,5 @@
/* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.c b/drivers/scsi/qedf/drv_scsi_fw_funcs.c
index 11e0cc082ec0..5d5095e3d96d 100644
--- a/drivers/scsi/qedf/drv_scsi_fw_funcs.c
+++ b/drivers/scsi/qedf/drv_scsi_fw_funcs.c
@@ -1,5 +1,5 @@
/* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.h b/drivers/scsi/qedf/drv_scsi_fw_funcs.h
index 9cb45410bc45..8fbe6e4d0b4f 100644
--- a/drivers/scsi/qedf/drv_scsi_fw_funcs.h
+++ b/drivers/scsi/qedf/drv_scsi_fw_funcs.h
@@ -1,5 +1,5 @@
/* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 07ee88200e91..4d038926a455 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/qedf_attr.c b/drivers/scsi/qedf/qedf_attr.c
index 47720611ad2c..fa6727685627 100644
--- a/drivers/scsi/qedf/qedf_attr.c
+++ b/drivers/scsi/qedf/qedf_attr.c
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
@@ -8,6 +8,25 @@
*/
#include "qedf.h"
+inline bool qedf_is_vport(struct qedf_ctx *qedf)
+{
+ return qedf->lport->vport != NULL;
+}
+
+/* Get base qedf for physical port from vport */
+static struct qedf_ctx *qedf_get_base_qedf(struct qedf_ctx *qedf)
+{
+ struct fc_lport *lport;
+ struct fc_lport *base_lport;
+
+ if (!(qedf_is_vport(qedf)))
+ return NULL;
+
+ lport = qedf->lport;
+ base_lport = shost_priv(vport_to_shost(lport->vport));
+ return lport_priv(base_lport);
+}
+
static ssize_t
qedf_fcoe_mac_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -26,34 +45,34 @@ qedf_fcoe_mac_show(struct device *dev,
return scnprintf(buf, PAGE_SIZE, "%pM\n", fcoe_mac);
}
+static ssize_t
+qedf_fka_period_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fc_lport *lport = shost_priv(class_to_shost(dev));
+ struct qedf_ctx *qedf = lport_priv(lport);
+ int fka_period = -1;
+
+ if (qedf_is_vport(qedf))
+ qedf = qedf_get_base_qedf(qedf);
+
+ if (qedf->ctlr.sel_fcf)
+ fka_period = qedf->ctlr.sel_fcf->fka_period;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", fka_period);
+}
+
static DEVICE_ATTR(fcoe_mac, S_IRUGO, qedf_fcoe_mac_show, NULL);
+static DEVICE_ATTR(fka_period, S_IRUGO, qedf_fka_period_show, NULL);
struct device_attribute *qedf_host_attrs[] = {
&dev_attr_fcoe_mac,
+ &dev_attr_fka_period,
NULL,
};
extern const struct qed_fcoe_ops *qed_ops;
-inline bool qedf_is_vport(struct qedf_ctx *qedf)
-{
- return (!(qedf->lport->vport == NULL));
-}
-
-/* Get base qedf for physical port from vport */
-static struct qedf_ctx *qedf_get_base_qedf(struct qedf_ctx *qedf)
-{
- struct fc_lport *lport;
- struct fc_lport *base_lport;
-
- if (!(qedf_is_vport(qedf)))
- return NULL;
-
- lport = qedf->lport;
- base_lport = shost_priv(vport_to_shost(lport->vport));
- return (struct qedf_ctx *)(lport_priv(base_lport));
-}
-
void qedf_capture_grc_dump(struct qedf_ctx *qedf)
{
struct qedf_ctx *base_qedf;
diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
index 7d173f48a81e..50083cae84c3 100644
--- a/drivers/scsi/qedf/qedf_dbg.h
+++ b/drivers/scsi/qedf/qedf_dbg.h
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c
index 00a1d6405ebe..2b1ef3075e93 100644
--- a/drivers/scsi/qedf/qedf_debugfs.c
+++ b/drivers/scsi/qedf/qedf_debugfs.c
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016 QLogic Corporation
+ * Copyright (c) 2016-2017 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 90627033bde6..eb07f1de8afa 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
@@ -44,7 +44,7 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
goto els_err;
}
- if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
+ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op);
rc = -EINVAL;
goto els_err;
@@ -225,7 +225,7 @@ int qedf_send_rrq(struct qedf_ioreq *aborted_io_req)
fcport = aborted_io_req->fcport;
/* Check that fcport is still offloaded */
- if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
+ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
return -EINVAL;
}
@@ -550,7 +550,7 @@ static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl)
fcport = orig_io_req->fcport;
/* Check that fcport is still offloaded */
- if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
+ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
return -EINVAL;
}
diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c
index 0d4bf70803ae..aefd24ca9604 100644
--- a/drivers/scsi/qedf/qedf_fip.c
+++ b/drivers/scsi/qedf/qedf_fip.c
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
@@ -155,10 +155,9 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
struct fip_wwn_desc *wp;
struct fip_vn_desc *vp;
size_t rlen, dlen;
- uint32_t cvl_port_id;
- __u8 cvl_mac[ETH_ALEN];
u16 op;
u8 sub;
+ bool do_reset = false;
eth_hdr = (struct ethhdr *)skb_mac_header(skb);
fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2);
@@ -189,8 +188,6 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
return;
}
- cvl_port_id = 0;
- memset(cvl_mac, 0, ETH_ALEN);
/*
* We need to loop through the CVL descriptors to determine
* if we want to reset the fcoe link
@@ -204,7 +201,9 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
mp = (struct fip_mac_desc *)desc;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
"fd_mac=%pM\n", mp->fd_mac);
- ether_addr_copy(cvl_mac, mp->fd_mac);
+ if (ether_addr_equal(mp->fd_mac,
+ qedf->ctlr.sel_fcf->fcf_mac))
+ do_reset = true;
break;
case FIP_DT_NAME:
wp = (struct fip_wwn_desc *)desc;
@@ -216,7 +215,9 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
vp = (struct fip_vn_desc *)desc;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
"fd_fc_id=%x.\n", ntoh24(vp->fd_fc_id));
- cvl_port_id = ntoh24(vp->fd_fc_id);
+ if (ntoh24(vp->fd_fc_id) ==
+ qedf->lport->port_id)
+ do_reset = true;
break;
default:
/* Ignore anything else */
@@ -227,11 +228,8 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
}
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
- "cvl_port_id=%06x cvl_mac=%pM.\n", cvl_port_id,
- cvl_mac);
- if (cvl_port_id == qedf->lport->port_id &&
- ether_addr_equal(cvl_mac,
- qedf->ctlr.sel_fcf->fcf_mac)) {
+ "do_reset=%d.\n", do_reset);
+ if (do_reset) {
fcoe_ctlr_link_down(&qedf->ctlr);
qedf_wait_for_upload(qedf);
fcoe_ctlr_link_up(&qedf->ctlr);
diff --git a/drivers/scsi/qedf/qedf_hsi.h b/drivers/scsi/qedf/qedf_hsi.h
index dfd65dec2874..7faef80c5f7a 100644
--- a/drivers/scsi/qedf/qedf_hsi.h
+++ b/drivers/scsi/qedf/qedf_hsi.h
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 1d7f90d0adc1..ded386036c27 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
@@ -1041,10 +1041,13 @@ static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
}
- memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
- if (fcp_sns_len)
- memcpy(sc_cmd->sense_buffer, sense_data,
- fcp_sns_len);
+ /* The sense buffer can be NULL for TMF commands */
+ if (sc_cmd->sense_buffer) {
+ memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ if (fcp_sns_len)
+ memcpy(sc_cmd->sense_buffer, sense_data,
+ fcp_sns_len);
+ }
}
static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
@@ -1476,8 +1479,8 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
{
struct fc_lport *lport;
struct qedf_rport *fcport = io_req->fcport;
- struct fc_rport_priv *rdata = fcport->rdata;
- struct qedf_ctx *qedf = fcport->qedf;
+ struct fc_rport_priv *rdata;
+ struct qedf_ctx *qedf;
u16 xid;
u32 r_a_tov = 0;
int rc = 0;
@@ -1485,15 +1488,18 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
struct fcoe_wqe *sqe;
u16 sqe_idx;
- r_a_tov = rdata->r_a_tov;
- lport = qedf->lport;
-
+ /* Sanity check qedf_rport before dereferencing any pointers */
if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
- QEDF_ERR(&(qedf->dbg_ctx), "tgt not offloaded\n");
+ QEDF_ERR(NULL, "tgt not offloaded\n");
rc = 1;
goto abts_err;
}
+ rdata = fcport->rdata;
+ r_a_tov = rdata->r_a_tov;
+ qedf = fcport->qedf;
+ lport = qedf->lport;
+
if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
rc = 1;
@@ -1729,6 +1735,13 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
return SUCCESS;
}
+ /* Sanity check qedf_rport before dereferencing any pointers */
+ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
+ QEDF_ERR(NULL, "tgt not offloaded\n");
+ rc = 1;
+ return SUCCESS;
+ }
+
qedf = fcport->qedf;
if (!qedf) {
QEDF_ERR(NULL, "qedf is NULL.\n");
@@ -1837,7 +1850,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
return FAILED;
}
- if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
+ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
rc = FAILED;
return FAILED;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 542a6e75c2bb..b58bba4604e8 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1,6 +1,6 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
@@ -22,6 +22,7 @@
#include <linux/if_vlan.h>
#include <linux/cpu.h>
#include "qedf.h"
+#include <uapi/linux/pci_regs.h>
const struct qed_fcoe_ops *qed_ops;
@@ -94,7 +95,7 @@ module_param_named(dp_module, qedf_dp_module, uint, S_IRUGO);
MODULE_PARM_DESC(dp_module, " bit flags control for verbose printk passed "
"qed module during probe.");
-static uint qedf_dp_level;
+static uint qedf_dp_level = QED_LEVEL_NOTICE;
module_param_named(dp_level, qedf_dp_level, uint, S_IRUGO);
MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module "
"during probe (0-3: 0 more verbose).");
@@ -441,7 +442,8 @@ static void qedf_link_update(void *dev, struct qed_link_output *link)
qedf_update_link_speed(qedf, link);
if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) {
- QEDF_ERR(&(qedf->dbg_ctx), "DCBx done.\n");
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "DCBx done.\n");
if (atomic_read(&qedf->link_down_tmo_valid) > 0)
queue_delayed_work(qedf->link_update_wq,
&qedf->link_recovery, 0);
@@ -627,6 +629,16 @@ static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
}
+static int qedf_eh_bus_reset(struct scsi_cmnd *sc_cmd)
+{
+ QEDF_ERR(NULL, "BUS RESET Issued...\n");
+ /*
+ * Essentially a no-op but return SUCCESS to prevent
+ * unnecessary escalation to the host reset handler.
+ */
+ return SUCCESS;
+}
+
void qedf_wait_for_upload(struct qedf_ctx *qedf)
{
while (1) {
@@ -639,27 +651,17 @@ void qedf_wait_for_upload(struct qedf_ctx *qedf)
}
}
-/* Reset the host by gracefully logging out and then logging back in */
-static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
+/* Performs soft reset of qedf_ctx by simulating a link down/up */
+static void qedf_ctx_soft_reset(struct fc_lport *lport)
{
- struct fc_lport *lport;
struct qedf_ctx *qedf;
- lport = shost_priv(sc_cmd->device->host);
-
if (lport->vport) {
QEDF_ERR(NULL, "Cannot issue host reset on NPIV port.\n");
- return SUCCESS;
+ return;
}
- qedf = (struct qedf_ctx *)lport_priv(lport);
-
- if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN ||
- test_bit(QEDF_UNLOADING, &qedf->flags) ||
- test_bit(QEDF_DBG_STOP_IO, &qedf->flags))
- return FAILED;
-
- QEDF_ERR(&(qedf->dbg_ctx), "HOST RESET Issued...");
+ qedf = lport_priv(lport);
/* For host reset, essentially do a soft link up/down */
atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
@@ -671,6 +673,24 @@ static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
qedf->vlan_id = 0;
queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
0);
+}
+
+/* Reset the host by gracefully logging out and then logging back in */
+static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
+{
+ struct fc_lport *lport;
+ struct qedf_ctx *qedf;
+
+ lport = shost_priv(sc_cmd->device->host);
+ qedf = lport_priv(lport);
+
+ if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN ||
+ test_bit(QEDF_UNLOADING, &qedf->flags))
+ return FAILED;
+
+ QEDF_ERR(&(qedf->dbg_ctx), "HOST RESET Issued...");
+
+ qedf_ctx_soft_reset(lport);
return SUCCESS;
}
@@ -688,7 +708,7 @@ static struct scsi_host_template qedf_host_template = {
.module = THIS_MODULE,
.name = QEDF_MODULE_NAME,
.this_id = -1,
- .cmd_per_lun = 3,
+ .cmd_per_lun = 32,
.use_clustering = ENABLE_CLUSTERING,
.max_sectors = 0xffff,
.queuecommand = qedf_queuecommand,
@@ -696,11 +716,13 @@ static struct scsi_host_template qedf_host_template = {
.eh_abort_handler = qedf_eh_abort,
.eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */
.eh_target_reset_handler = qedf_eh_target_reset, /* target reset */
+ .eh_bus_reset_handler = qedf_eh_bus_reset,
.eh_host_reset_handler = qedf_eh_host_reset,
.slave_configure = qedf_slave_configure,
.dma_boundary = QED_HW_DMA_BOUNDARY,
.sg_tablesize = QEDF_MAX_BDS_PER_CMD,
.can_queue = FCOE_PARAMS_NUM_TASKS,
+ .change_queue_depth = scsi_change_queue_depth,
};
static int qedf_get_paged_crc_eof(struct sk_buff *skb, int tlen)
@@ -950,25 +972,21 @@ static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
sizeof(void *);
fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE;
- fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
- &fcport->sq_dma, GFP_KERNEL);
+ fcport->sq = dma_zalloc_coherent(&qedf->pdev->dev,
+ fcport->sq_mem_size, &fcport->sq_dma, GFP_KERNEL);
if (!fcport->sq) {
- QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send "
- "queue.\n");
+ QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n");
rval = 1;
goto out;
}
- memset(fcport->sq, 0, fcport->sq_mem_size);
- fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
+ fcport->sq_pbl = dma_zalloc_coherent(&qedf->pdev->dev,
fcport->sq_pbl_size, &fcport->sq_pbl_dma, GFP_KERNEL);
if (!fcport->sq_pbl) {
- QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send "
- "queue PBL.\n");
+ QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n");
rval = 1;
goto out_free_sq;
}
- memset(fcport->sq_pbl, 0, fcport->sq_pbl_size);
/* Create PBL */
num_pages = fcport->sq_mem_size / QEDF_PAGE_SIZE;
@@ -1334,6 +1352,59 @@ static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
ether_addr_copy(qedf->ctlr.ctl_src_addr, qedf->mac);
}
+static void qedf_setup_fdmi(struct qedf_ctx *qedf)
+{
+ struct fc_lport *lport = qedf->lport;
+ struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
+ u8 buf[8];
+ int i, pos;
+
+ /*
+ * fdmi_enabled needs to be set for libfc to execute FDMI registration.
+ */
+ lport->fdmi_enabled = 1;
+
+ /*
+ * Setup the necessary fc_host attributes to that will be used to fill
+ * in the FDMI information.
+ */
+
+ /* Get the PCI-e Device Serial Number Capability */
+ pos = pci_find_ext_capability(qedf->pdev, PCI_EXT_CAP_ID_DSN);
+ if (pos) {
+ pos += 4;
+ for (i = 0; i < 8; i++)
+ pci_read_config_byte(qedf->pdev, pos + i, &buf[i]);
+
+ snprintf(fc_host->serial_number,
+ sizeof(fc_host->serial_number),
+ "%02X%02X%02X%02X%02X%02X%02X%02X",
+ buf[7], buf[6], buf[5], buf[4],
+ buf[3], buf[2], buf[1], buf[0]);
+ } else
+ snprintf(fc_host->serial_number,
+ sizeof(fc_host->serial_number), "Unknown");
+
+ snprintf(fc_host->manufacturer,
+ sizeof(fc_host->manufacturer), "%s", "Cavium Inc.");
+
+ snprintf(fc_host->model, sizeof(fc_host->model), "%s", "QL41000");
+
+ snprintf(fc_host->model_description, sizeof(fc_host->model_description),
+ "%s", "QLogic FastLinQ QL41000 Series 10/25/40/50GGbE Controller"
+ "(FCoE)");
+
+ snprintf(fc_host->hardware_version, sizeof(fc_host->hardware_version),
+ "Rev %d", qedf->pdev->revision);
+
+ snprintf(fc_host->driver_version, sizeof(fc_host->driver_version),
+ "%s", QEDF_VERSION);
+
+ snprintf(fc_host->firmware_version, sizeof(fc_host->firmware_version),
+ "%d.%d.%d.%d", FW_MAJOR_VERSION, FW_MINOR_VERSION,
+ FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
+}
+
static int qedf_lport_setup(struct qedf_ctx *qedf)
{
struct fc_lport *lport = qedf->lport;
@@ -1377,6 +1448,8 @@ static int qedf_lport_setup(struct qedf_ctx *qedf)
snprintf(fc_host_symbolic_name(lport->host), 256,
"QLogic %s v%s", QEDF_MODULE_NAME, QEDF_VERSION);
+ qedf_setup_fdmi(qedf);
+
return 0;
}
@@ -1613,8 +1686,7 @@ static int qedf_fcoe_reset(struct Scsi_Host *shost)
{
struct fc_lport *lport = shost_priv(shost);
- fc_fabric_logoff(lport);
- fc_fabric_login(lport);
+ qedf_ctx_soft_reset(lport);
return 0;
}
@@ -1979,6 +2051,8 @@ static int qedf_setup_int(struct qedf_ctx *qedf)
* Learn interrupt configuration
*/
rc = qed_ops->common->set_fp_int(qedf->cdev, num_online_cpus());
+ if (rc <= 0)
+ return 0;
rc = qed_ops->common->get_fp_int(qedf->cdev, &qedf->int_info);
if (rc)
@@ -2011,6 +2085,8 @@ static void qedf_recv_frame(struct qedf_ctx *qedf,
u8 *dest_mac = NULL;
struct fcoe_hdr *hp;
struct qedf_rport *fcport;
+ struct fc_lport *vn_port;
+ u32 f_ctl;
lport = qedf->lport;
if (lport == NULL || lport->state == LPORT_ST_DISABLED) {
@@ -2047,6 +2123,10 @@ static void qedf_recv_frame(struct qedf_ctx *qedf,
fh = fc_frame_header_get(fp);
+ /*
+ * Invalid frame filters.
+ */
+
if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
fh->fh_type == FC_TYPE_FCP) {
/* Drop FCP data. We dont this in L2 path */
@@ -2072,6 +2152,45 @@ static void qedf_recv_frame(struct qedf_ctx *qedf,
return;
}
+ if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
+ "FC frame d_id mismatch with MAC %pM.\n", dest_mac);
+ return;
+ }
+
+ if (qedf->ctlr.state) {
+ if (!ether_addr_equal(mac, qedf->ctlr.dest_addr)) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
+ "Wrong source address: mac:%pM dest_addr:%pM.\n",
+ mac, qedf->ctlr.dest_addr);
+ kfree_skb(skb);
+ return;
+ }
+ }
+
+ vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
+
+ /*
+ * If the destination ID from the frame header does not match what we
+ * have on record for lport and the search for a NPIV port came up
+ * empty then this is not addressed to our port so simply drop it.
+ */
+ if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
+ "Dropping frame due to destination mismatch: lport->port_id=%x fh->d_id=%x.\n",
+ lport->port_id, ntoh24(fh->fh_d_id));
+ kfree_skb(skb);
+ return;
+ }
+
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) &&
+ (f_ctl & FC_FC_EX_CTX)) {
+ /* Drop incoming ABTS response that has both SEQ/EX CTX set */
+ kfree_skb(skb);
+ return;
+ }
+
/*
* If a connection is uploading, drop incoming FCoE frames as there
* is a small window where we could try to return a frame while libfc
@@ -2474,14 +2593,12 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf)
}
/* Allocate list of PBL pages */
- qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev,
+ qedf->bdq_pbl_list = dma_zalloc_coherent(&qedf->pdev->dev,
QEDF_PAGE_SIZE, &qedf->bdq_pbl_list_dma, GFP_KERNEL);
if (!qedf->bdq_pbl_list) {
- QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL "
- "pages.\n");
+ QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n");
return -ENOMEM;
}
- memset(qedf->bdq_pbl_list, 0, QEDF_PAGE_SIZE);
/*
* Now populate PBL list with pages that contain pointers to the
@@ -2548,8 +2665,9 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
qedf->global_queues[i] = kzalloc(sizeof(struct global_queue),
GFP_KERNEL);
if (!qedf->global_queues[i]) {
- QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocation "
+ QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocate "
"global queue %d.\n", i);
+ status = -ENOMEM;
goto mem_alloc_failure;
}
@@ -2565,32 +2683,26 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE);
qedf->global_queues[i]->cq =
- dma_alloc_coherent(&qedf->pdev->dev,
+ dma_zalloc_coherent(&qedf->pdev->dev,
qedf->global_queues[i]->cq_mem_size,
&qedf->global_queues[i]->cq_dma, GFP_KERNEL);
if (!qedf->global_queues[i]->cq) {
- QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
- "cq.\n");
+ QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n");
status = -ENOMEM;
goto mem_alloc_failure;
}
- memset(qedf->global_queues[i]->cq, 0,
- qedf->global_queues[i]->cq_mem_size);
qedf->global_queues[i]->cq_pbl =
- dma_alloc_coherent(&qedf->pdev->dev,
+ dma_zalloc_coherent(&qedf->pdev->dev,
qedf->global_queues[i]->cq_pbl_size,
&qedf->global_queues[i]->cq_pbl_dma, GFP_KERNEL);
if (!qedf->global_queues[i]->cq_pbl) {
- QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
- "cq PBL.\n");
+ QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n");
status = -ENOMEM;
goto mem_alloc_failure;
}
- memset(qedf->global_queues[i]->cq_pbl, 0,
- qedf->global_queues[i]->cq_pbl_size);
/* Create PBL */
num_pages = qedf->global_queues[i]->cq_mem_size /
@@ -2683,8 +2795,7 @@ static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
cq_mem_size = ALIGN(cq_mem_size, QEDF_PAGE_SIZE);
cq_num_entries = cq_mem_size / sizeof(struct fcoe_cqe);
- memset(&(qedf->pf_params), 0,
- sizeof(qedf->pf_params));
+ memset(&(qedf->pf_params), 0, sizeof(qedf->pf_params));
/* Setup the value for fcoe PF */
qedf->pf_params.fcoe_pf_params.num_cons = QEDF_MAX_SESSIONS;
diff --git a/drivers/scsi/qedf/qedf_version.h b/drivers/scsi/qedf/qedf_version.h
index 4ae5f537a440..6fa442061c32 100644
--- a/drivers/scsi/qedf/qedf_version.h
+++ b/drivers/scsi/qedf/qedf_version.h
@@ -1,15 +1,15 @@
/*
* QLogic FCoE Offload Driver
- * Copyright (c) 2016 Cavium Inc.
+ * Copyright (c) 2016-2017 Cavium Inc.
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
-#define QEDF_VERSION "8.10.7.0"
+#define QEDF_VERSION "8.18.22.0"
#define QEDF_DRIVER_MAJOR_VER 8
-#define QEDF_DRIVER_MINOR_VER 10
-#define QEDF_DRIVER_REV_VER 7
+#define QEDF_DRIVER_MINOR_VER 18
+#define QEDF_DRIVER_REV_VER 22
#define QEDF_DRIVER_ENG_VER 0
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index e937490d5d97..19254bd739d9 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -333,7 +333,7 @@ static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
/* Obtain buffer address from rqe_opaque */
idx = cqe->rqe_opaque.lo;
- if ((idx < 0) || (idx > (QEDI_BDQ_NUM - 1))) {
+ if (idx > (QEDI_BDQ_NUM - 1)) {
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"wrong idx %d returned by FW, dropping the unsolicited pkt\n",
idx);
@@ -370,7 +370,7 @@ static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
/* Obtain buffer address from rqe_opaque */
idx = cqe->rqe_opaque.lo;
- if ((idx < 0) || (idx > (QEDI_BDQ_NUM - 1))) {
+ if (idx > (QEDI_BDQ_NUM - 1)) {
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
"wrong idx %d returned by FW, dropping the unsolicited pkt\n",
idx);
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index de952935b5d2..036cc3f217b1 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -2,6 +2,7 @@ config SCSI_QLA_FC
tristate "QLogic QLA2XXX Fibre Channel Support"
depends on PCI && SCSI
depends on SCSI_FC_ATTRS
+ depends on NVME_FC || !NVME_FC
select FW_LOADER
select BTREE
---help---
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index 44def6bb4bb0..0b767a0bb308 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,6 +1,6 @@
qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
- qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o
+ qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o qla_nvme.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 7c8d6c54ab70..08a1feb3a195 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -769,7 +769,7 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
did.b.area = (type & 0x0000ff00) >> 8;
did.b.al_pa = (type & 0x000000ff);
- ql_log(ql_log_info, vha, 0x70e3, "portid=%02x%02x%02x done\n",
+ ql_log(ql_log_info, vha, 0xd04d, "portid=%02x%02x%02x done\n",
did.b.domain, did.b.area, did.b.al_pa);
ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
@@ -929,7 +929,7 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
iter->name, ret);
else
ql_dbg(ql_dbg_init, vha, 0x00f4,
- "Successfully created sysfs %s binary attribure.\n",
+ "Successfully created sysfs %s binary attribute.\n",
iter->name);
}
}
@@ -2096,7 +2096,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
}
if (qos) {
- qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx);
+ qpair = qla2xxx_create_qpair(vha, qos, vha->vp_idx, true);
if (!qpair)
ql_log(ql_log_warn, vha, 0x7084,
"Can't create qpair for VP[%d]\n",
@@ -2289,7 +2289,7 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
- fc_host_supported_classes(vha->host) = ha->tgt.enable_class_2 ?
+ fc_host_supported_classes(vha->host) = ha->base_qpair->enable_class_2 ?
(FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index ca3420de5a01..2ea0ef93f5cb 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -293,7 +293,7 @@ qla2x00_process_els(struct bsg_job *bsg_job)
if (bsg_job->request_payload.sg_cnt > 1 ||
bsg_job->reply_payload.sg_cnt > 1) {
ql_dbg(ql_dbg_user, vha, 0x7002,
- "Multiple SG's are not suppored for ELS requests, "
+ "Multiple SG's are not supported for ELS requests, "
"request_sg_cnt=%x reply_sg_cnt=%x.\n",
bsg_job->request_payload.sg_cnt,
bsg_job->reply_payload.sg_cnt);
@@ -2135,7 +2135,7 @@ qla8044_serdes_op(struct bsg_job *bsg_job)
bsg_reply->reply_payload_rcv_len = sizeof(sr);
break;
default:
- ql_dbg(ql_dbg_user, vha, 0x70cf,
+ ql_dbg(ql_dbg_user, vha, 0x7020,
"Unknown serdes cmd %x.\n", sr.cmd);
rval = -EINVAL;
break;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 88748a6ab73f..26751d34bcf2 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -15,9 +15,10 @@
* | | | 0x015b-0x0160 |
* | | | 0x016e |
* | Mailbox commands | 0x1199 | 0x1193 |
- * | Device Discovery | 0x2004 | 0x2016 |
- * | | | 0x2011-0x2012, |
- * | | | 0x2099-0x20a4 |
+ * | Device Discovery | 0x2134 | 0x210e-0x2116 |
+ * | | | 0x211a |
+ * | | | 0x211c-0x2128 |
+ * | | | 0x212a-0x2130 |
* | Queue Command and IO tracing | 0x3074 | 0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x303d-0x3041 |
@@ -59,10 +60,10 @@
* | | | 0xb13c-0xb140 |
* | | | 0xb149 |
* | MultiQ | 0xc010 | |
- * | Misc | 0xd301 | 0xd031-0xd0ff |
+ * | Misc | 0xd302 | 0xd031-0xd0ff |
* | | | 0xd101-0xd1fe |
* | | | 0xd214-0xd2fe |
- * | Target Mode | 0xe080 | |
+ * | Target Mode | 0xe081 | |
* | Target Mode Management | 0xf09b | 0xf002 |
* | | | 0xf046-0xf049 |
* | Target Mode Task Management | 0x1000d | |
@@ -498,6 +499,50 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
}
static inline void *
+qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+{
+ struct qla2xxx_offld_chain *c = ptr;
+
+ if (!ha->exlogin_buf)
+ return ptr;
+
+ *last_chain = &c->type;
+
+ c->type = cpu_to_be32(DUMP_CHAIN_EXLOGIN);
+ c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) +
+ ha->exlogin_size);
+ c->size = cpu_to_be32(ha->exlogin_size);
+ c->addr = cpu_to_be64(ha->exlogin_buf_dma);
+
+ ptr += sizeof(struct qla2xxx_offld_chain);
+ memcpy(ptr, ha->exlogin_buf, ha->exlogin_size);
+
+ return (char *)ptr + cpu_to_be32(c->size);
+}
+
+static inline void *
+qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+{
+ struct qla2xxx_offld_chain *c = ptr;
+
+ if (!ha->exchoffld_buf)
+ return ptr;
+
+ *last_chain = &c->type;
+
+ c->type = cpu_to_be32(DUMP_CHAIN_EXCHG);
+ c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) +
+ ha->exchoffld_size);
+ c->size = cpu_to_be32(ha->exchoffld_size);
+ c->addr = cpu_to_be64(ha->exchoffld_buf_dma);
+
+ ptr += sizeof(struct qla2xxx_offld_chain);
+ memcpy(ptr, ha->exchoffld_buf, ha->exchoffld_size);
+
+ return (char *)ptr + cpu_to_be32(c->size);
+}
+
+static inline void *
qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
uint32_t **last_chain)
{
@@ -1606,6 +1651,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
+ nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
if (last_chain) {
ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
*last_chain |= htonl(DUMP_CHAIN_LAST);
@@ -1932,6 +1978,8 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
+ nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
+ nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain);
if (last_chain) {
ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
*last_chain |= htonl(DUMP_CHAIN_LAST);
@@ -2443,6 +2491,8 @@ copy_queue:
nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
+ nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
+ nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain);
if (last_chain) {
ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
*last_chain |= htonl(DUMP_CHAIN_LAST);
@@ -2713,3 +2763,104 @@ ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
buf + cnt, min(16U, size - cnt), false);
}
}
+
+/*
+ * This function is for formatting and logging log messages.
+ * It is to be used when vha is available. It formats the message
+ * and logs it to the messages file. All the messages will be logged
+ * irrespective of value of ql2xextended_error_logging.
+ * parameters:
+ * level: The level of the log messages to be printed in the
+ * messages file.
+ * vha: Pointer to the scsi_qla_host_t
+ * id: This is a unique id for the level. It identifies the
+ * part of the code from where the message originated.
+ * msg: The message to be displayed.
+ */
+void
+ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char pbuf[128];
+
+ if (level > ql_errlev)
+ return;
+
+ if (qpair != NULL) {
+ const struct pci_dev *pdev = qpair->pdev;
+ /* <module-name> <msg-id>:<host> Message */
+ snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: ",
+ QL_MSGHDR, dev_name(&(pdev->dev)), id);
+ } else {
+ snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
+ QL_MSGHDR, "0000:00:00.0", id);
+ }
+ pbuf[sizeof(pbuf) - 1] = 0;
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ switch (level) {
+ case ql_log_fatal: /* FATAL LOG */
+ pr_crit("%s%pV", pbuf, &vaf);
+ break;
+ case ql_log_warn:
+ pr_err("%s%pV", pbuf, &vaf);
+ break;
+ case ql_log_info:
+ pr_warn("%s%pV", pbuf, &vaf);
+ break;
+ default:
+ pr_info("%s%pV", pbuf, &vaf);
+ break;
+ }
+
+ va_end(va);
+}
+
+/*
+ * This function is for formatting and logging debug information.
+ * It is to be used when vha is available. It formats the message
+ * and logs it to the messages file.
+ * parameters:
+ * level: The level of the debug messages to be printed.
+ * If ql2xextended_error_logging value is correctly set,
+ * this message will appear in the messages file.
+ * vha: Pointer to the scsi_qla_host_t.
+ * id: This is a unique identifier for the level. It identifies the
+ * part of the code from where the message originated.
+ * msg: The message to be displayed.
+ */
+void
+ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+
+ if (!ql_mask_match(level))
+ return;
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ if (qpair != NULL) {
+ const struct pci_dev *pdev = qpair->pdev;
+ /* <module-name> <pci-name> <msg-id>:<host> Message */
+ pr_warn("%s [%s]-%04x: %pV",
+ QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
+ &vaf);
+ } else {
+ pr_warn("%s [%s]-%04x: : %pV",
+ QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
+ }
+
+ va_end(va);
+
+}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index c6bffe929fe7..8877aa97d829 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -232,6 +232,15 @@ struct qla2xxx_fce_chain {
uint32_t eregs[8];
};
+/* used by exchange off load and extended login offload */
+struct qla2xxx_offld_chain {
+ uint32_t type;
+ uint32_t chain_size;
+
+ uint32_t size;
+ u64 addr;
+};
+
struct qla2xxx_mq_chain {
uint32_t type;
uint32_t chain_size;
@@ -258,6 +267,8 @@ struct qla2xxx_mqueue_chain {
#define DUMP_CHAIN_FCE 0x7FFFFAF0
#define DUMP_CHAIN_MQ 0x7FFFFAF1
#define DUMP_CHAIN_QUEUE 0x7FFFFAF2
+#define DUMP_CHAIN_EXLOGIN 0x7FFFFAF3
+#define DUMP_CHAIN_EXCHG 0x7FFFFAF4
#define DUMP_CHAIN_LAST 0x80000000
struct qla2xxx_fw_dump {
@@ -313,12 +324,18 @@ void __attribute__((format (printf, 4, 5)))
ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...);
void __attribute__((format (printf, 4, 5)))
ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_dbg_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
+
void __attribute__((format (printf, 4, 5)))
ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...);
void __attribute__((format (printf, 4, 5)))
ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
+
/* Debug Levels */
/* The 0x40000000 is the max value any debug level can have
* as ql2xextended_error_logging is of type signed int
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index eddbc1218a39..0730b10b4280 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -37,6 +37,7 @@
#include "qla_bsg.h"
#include "qla_nx.h"
#include "qla_nx2.h"
+#include "qla_nvme.h"
#define QLA2XXX_DRIVER_NAME "qla2xxx"
#define QLA2XXX_APIDEV "ql2xapidev"
#define QLA2XXX_MANUFACTURER "QLogic Corporation"
@@ -252,6 +253,8 @@
#define NPH_F_PORT 0x7fe /* FFFFFE */
#define NPH_IP_BROADCAST 0x7ff /* FFFFFF */
+#define NPH_SNS_LID(ha) (IS_FWI2_CAPABLE(ha) ? NPH_SNS : SIMPLE_NAME_SERVER)
+
#define MAX_CMDSZ 16 /* SCSI maximum CDB size. */
#include "qla_fw.h"
@@ -284,7 +287,7 @@ struct name_list_extended {
#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */
#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
-#define EXTENDED_EXCH_ENTRY_CNT 32768 /* Entries for offload case */
+#define FW_DEF_EXCHANGES_CNT 2048
struct req_que;
struct qla_tgt_sess;
@@ -341,6 +344,7 @@ struct srb_iocb {
#define SRB_LOGIN_RETRIED BIT_0
#define SRB_LOGIN_COND_PLOGI BIT_1
#define SRB_LOGIN_SKIP_PRLI BIT_2
+#define SRB_LOGIN_NVME_PRLI BIT_3
uint16_t data[2];
u32 iop[2];
} logio;
@@ -409,6 +413,21 @@ struct srb_iocb {
struct {
struct imm_ntfy_from_isp *ntfy;
} nack;
+ struct {
+ __le16 comp_status;
+ uint16_t rsp_pyld_len;
+ uint8_t aen_op;
+ void *desc;
+
+ /* These are only used with ls4 requests */
+ int cmd_len;
+ int rsp_len;
+ dma_addr_t cmd_dma;
+ dma_addr_t rsp_dma;
+ enum nvmefc_fcp_datadir dir;
+ uint32_t dl;
+ uint32_t timeout_sec;
+ } nvme;
} u;
struct timer_list timer;
@@ -434,9 +453,24 @@ struct srb_iocb {
#define SRB_NACK_PLOGI 16
#define SRB_NACK_PRLI 17
#define SRB_NACK_LOGO 18
+#define SRB_NVME_CMD 19
+#define SRB_NVME_LS 20
+#define SRB_PRLI_CMD 21
+
+enum {
+ TYPE_SRB,
+ TYPE_TGT_CMD,
+};
typedef struct srb {
+ /*
+ * Do not move cmd_type field, it needs to
+ * line up with qla_tgt_cmd->cmd_type
+ */
+ uint8_t cmd_type;
+ uint8_t pad[3];
atomic_t ref_count;
+ wait_queue_head_t nvme_ls_waitQ;
struct fc_port *fcport;
struct scsi_qla_host *vha;
uint32_t handle;
@@ -1075,6 +1109,7 @@ struct mbx_cmd_32 {
#define MBX_1 BIT_1
#define MBX_0 BIT_0
+#define RNID_TYPE_PORT_LOGIN 0x7
#define RNID_TYPE_SET_VERSION 0x9
#define RNID_TYPE_ASIC_TEMP 0xC
@@ -2139,6 +2174,7 @@ typedef struct {
uint8_t fabric_port_name[WWN_SIZE];
uint16_t fp_speed;
uint8_t fc4_type;
+ uint8_t fc4f_nvme; /* nvme fc4 feature bits */
} sw_info_t;
/* FCP-4 types */
@@ -2167,7 +2203,8 @@ typedef enum {
FCT_SWITCH,
FCT_BROADCAST,
FCT_INITIATOR,
- FCT_TARGET
+ FCT_TARGET,
+ FCT_NVME
} fc_port_type_t;
enum qla_sess_deletion {
@@ -2224,10 +2261,12 @@ enum fcport_mgt_event {
FCME_RSCN,
FCME_GIDPN_DONE,
FCME_PLOGI_DONE, /* Initiator side sent LLIOCB */
+ FCME_PRLI_DONE,
FCME_GNL_DONE,
FCME_GPSC_DONE,
FCME_GPDB_DONE,
FCME_GPNID_DONE,
+ FCME_GFFID_DONE,
FCME_DELETE_DONE,
};
@@ -2261,6 +2300,17 @@ typedef struct fc_port {
unsigned int login_pause:1;
unsigned int login_succ:1;
+ struct work_struct nvme_del_work;
+ atomic_t nvme_ref_count;
+ wait_queue_head_t nvme_waitQ;
+ uint32_t nvme_prli_service_param;
+#define NVME_PRLI_SP_CONF BIT_7
+#define NVME_PRLI_SP_INITIATOR BIT_5
+#define NVME_PRLI_SP_TARGET BIT_4
+#define NVME_PRLI_SP_DISCOVERY BIT_3
+ uint8_t nvme_flag;
+#define NVME_FLAG_REGISTERED 4
+
struct fc_port *conflict;
unsigned char logout_completed;
int generation;
@@ -2293,6 +2343,7 @@ typedef struct fc_port {
u32 supported_classes;
uint8_t fc4_type;
+ uint8_t fc4f_nvme;
uint8_t scan_state;
unsigned long last_queue_full;
@@ -2300,6 +2351,8 @@ typedef struct fc_port {
uint16_t port_id;
+ struct nvme_fc_remote_port *nvme_remote_port;
+
unsigned long retry_delay_timestamp;
struct qla_tgt_sess *tgt_session;
struct ct_sns_desc ct_desc;
@@ -2732,7 +2785,7 @@ struct ct_sns_req {
struct {
uint8_t reserved;
- uint8_t port_name[3];
+ uint8_t port_id[3];
} gff_id;
struct {
@@ -2814,6 +2867,7 @@ struct ct_sns_rsp {
} gpsc;
#define GFF_FCP_SCSI_OFFSET 7
+#define GFF_NVME_OFFSET 23 /* type = 28h */
struct {
uint8_t fc4_features[128];
} gff_id;
@@ -3039,6 +3093,7 @@ enum qla_work_type {
QLA_EVT_GPNID_DONE,
QLA_EVT_NEW_SESS,
QLA_EVT_GPDB,
+ QLA_EVT_PRLI,
QLA_EVT_GPSC,
QLA_EVT_UPD_FCPORT,
QLA_EVT_GNL,
@@ -3169,6 +3224,21 @@ struct qla_tc_param {
#define QLA_PRECONFIG_VPORTS 32
#define QLA_MAX_VPORTS_QLA24XX 128
#define QLA_MAX_VPORTS_QLA25XX 256
+
+struct qla_tgt_counters {
+ uint64_t qla_core_sbt_cmd;
+ uint64_t core_qla_que_buf;
+ uint64_t qla_core_ret_ctio;
+ uint64_t core_qla_snd_status;
+ uint64_t qla_core_ret_sta_ctio;
+ uint64_t core_qla_free_cmd;
+ uint64_t num_q_full_sent;
+ uint64_t num_alloc_iocb_failed;
+ uint64_t num_term_xchg_sent;
+};
+
+struct qla_qpair;
+
/* Response queue data structure */
struct rsp_que {
dma_addr_t dma;
@@ -3188,6 +3258,7 @@ struct rsp_que {
struct qla_msix_entry *msix;
struct req_que *req;
srb_t *status_srb; /* status continuation entry */
+ struct qla_qpair *qpair;
dma_addr_t dma_fx00;
response_t *ring_fx00;
@@ -3228,6 +3299,15 @@ struct req_que {
struct qla_qpair {
spinlock_t qp_lock;
atomic_t ref_count;
+ uint32_t lun_cnt;
+ /*
+ * For qpair 0, qp_lock_ptr will point at hardware_lock due to
+ * legacy code. For other Qpair(s), it will point at qp_lock.
+ */
+ spinlock_t *qp_lock_ptr;
+ struct scsi_qla_host *vha;
+ u32 chip_reset;
+
/* distill these fields down to 'online=0/1'
* ha->flags.eeh_busy
* ha->flags.pci_channel_io_perm_failure
@@ -3237,14 +3317,18 @@ struct qla_qpair {
/* move vha->flags.difdix_supported here */
uint32_t difdix_supported:1;
uint32_t delete_in_progress:1;
+ uint32_t fw_started:1;
+ uint32_t enable_class_2:1;
+ uint32_t enable_explicit_conf:1;
+ uint32_t use_shadow_reg:1;
uint16_t id; /* qp number used with FW */
- uint16_t num_active_cmd; /* cmds down at firmware */
- cpumask_t cpu_mask; /* CPU mask for cpu affinity operation */
uint16_t vp_idx; /* vport ID */
-
mempool_t *srb_mempool;
+ struct pci_dev *pdev;
+ void (*reqq_start_iocbs)(struct qla_qpair *);
+
/* to do: New driver: move queues to here instead of pointers */
struct req_que *req;
struct rsp_que *rsp;
@@ -3253,7 +3337,9 @@ struct qla_qpair {
struct qla_hw_data *hw;
struct work_struct q_work;
struct list_head qp_list_elem; /* vha->qp_list */
- struct scsi_qla_host *vha;
+ struct list_head hints_list;
+ uint16_t cpuid;
+ struct qla_tgt_counters tgt_counters;
};
/* Place holder for FW buffer parameters */
@@ -3272,8 +3358,6 @@ struct scsi_qlt_host {
struct qlt_hw_data {
/* Protected by hw lock */
- uint32_t enable_class_2:1;
- uint32_t enable_explicit_conf:1;
uint32_t node_name_set:1;
dma_addr_t atio_dma; /* Physical address. */
@@ -3285,9 +3369,6 @@ struct qlt_hw_data {
uint32_t __iomem *atio_q_out;
struct qla_tgt_func_tmpl *tgt_ops;
- struct qla_tgt_cmd *cmds[DEFAULT_OUTSTANDING_COMMANDS];
- uint16_t current_handle;
-
struct qla_tgt_vp_map *tgt_vp_map;
int saved_set;
@@ -3302,6 +3383,7 @@ struct qlt_hw_data {
struct dentry *dfs_tgt_sess;
struct dentry *dfs_tgt_port_database;
+ struct dentry *dfs_naqp;
struct list_head q_full_list;
uint32_t num_pend_cmds;
@@ -3310,7 +3392,8 @@ struct qlt_hw_data {
spinlock_t q_full_lock;
uint32_t leak_exchg_thresh_hold;
spinlock_t sess_lock;
- int rspq_vector_cpuid;
+ int num_act_qpairs;
+#define DEFAULT_NAQP 2
spinlock_t atio_lock ____cacheline_aligned;
struct btree_head32 host_map;
};
@@ -3591,6 +3674,10 @@ struct qla_hw_data {
#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha))
#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
#define IS_FAWWN_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_EXCHG_OFFLD_CAPABLE(ha) \
+ (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_EXLOGIN_OFFLD_CAPABLE(ha) \
+ (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
/* HBA serial number */
uint8_t serial0;
@@ -3927,24 +4014,11 @@ struct qla_hw_data {
struct work_struct board_disable;
struct mr_data_fx00 mr;
- uint32_t chip_reset;
struct qlt_hw_data tgt;
int allow_cna_fw_dump;
};
-struct qla_tgt_counters {
- uint64_t qla_core_sbt_cmd;
- uint64_t core_qla_que_buf;
- uint64_t qla_core_ret_ctio;
- uint64_t core_qla_snd_status;
- uint64_t qla_core_ret_sta_ctio;
- uint64_t core_qla_free_cmd;
- uint64_t num_q_full_sent;
- uint64_t num_alloc_iocb_failed;
- uint64_t num_term_xchg_sent;
-};
-
/*
* Qlogic scsi host structure
*/
@@ -3973,6 +4047,9 @@ typedef struct scsi_qla_host {
uint32_t fw_tgt_reported:1;
uint32_t bbcr_enable:1;
uint32_t qpairs_available:1;
+ uint32_t qpairs_req_created:1;
+ uint32_t qpairs_rsp_created:1;
+ uint32_t nvme_enabled:1;
} flags;
atomic_t loop_state;
@@ -4017,7 +4094,6 @@ typedef struct scsi_qla_host {
#define PFLG_DISCONNECTED 0 /* PCI device removed */
#define PFLG_DRIVER_REMOVING 1 /* PCI driver .remove */
#define PFLG_DRIVER_PROBING 2 /* PCI driver .probe */
-#define PCI_ERR 30
uint32_t device_flags;
#define SWITCH_FOUND BIT_0
@@ -4052,6 +4128,13 @@ typedef struct scsi_qla_host {
uint8_t port_name[WWN_SIZE];
uint8_t fabric_node_name[WWN_SIZE];
+ struct nvme_fc_local_port *nvme_local_port;
+ atomic_t nvme_ref_count;
+ wait_queue_head_t nvme_waitQ;
+ struct list_head nvme_rport_list;
+ atomic_t nvme_active_aen_cnt;
+ uint16_t nvme_last_rptd_aen;
+
uint16_t fcoe_vlan_id;
uint16_t fcoe_fcf_idx;
uint8_t fcoe_vn_port_mac[6];
@@ -4108,10 +4191,8 @@ typedef struct scsi_qla_host {
struct fc_host_statistics fc_host_stat;
struct qla_statistics qla_stats;
struct bidi_statistics bidi_stats;
-
atomic_t vref_count;
struct qla8044_reset_template reset_tmplt;
- struct qla_tgt_counters tgt_counters;
uint16_t bbcr;
struct name_list_extended gnl;
/* Count of active session/fcport */
@@ -4156,6 +4237,26 @@ struct qla2_sgx {
srb_t *sp;
};
+#define QLA_FW_STARTED(_ha) { \
+ int i; \
+ _ha->flags.fw_started = 1; \
+ _ha->base_qpair->fw_started = 1; \
+ for (i = 0; i < _ha->max_qpairs; i++) { \
+ if (_ha->queue_pair_map[i]) \
+ _ha->queue_pair_map[i]->fw_started = 1; \
+ } \
+}
+
+#define QLA_FW_STOPPED(_ha) { \
+ int i; \
+ _ha->flags.fw_started = 0; \
+ _ha->base_qpair->fw_started = 0; \
+ for (i = 0; i < _ha->max_qpairs; i++) { \
+ if (_ha->queue_pair_map[i]) \
+ _ha->queue_pair_map[i]->fw_started = 0; \
+ } \
+}
+
/*
* Macros to help code, maintain, etc.
*/
@@ -4199,6 +4300,25 @@ struct qla2_sgx {
#define QLA_QPAIR_MARK_NOT_BUSY(__qpair) \
atomic_dec(&__qpair->ref_count); \
+
+#define QLA_ENA_CONF(_ha) {\
+ int i;\
+ _ha->base_qpair->enable_explicit_conf = 1; \
+ for (i = 0; i < _ha->max_qpairs; i++) { \
+ if (_ha->queue_pair_map[i]) \
+ _ha->queue_pair_map[i]->enable_explicit_conf = 1; \
+ } \
+}
+
+#define QLA_DIS_CONF(_ha) {\
+ int i;\
+ _ha->base_qpair->enable_explicit_conf = 0; \
+ for (i = 0; i < _ha->max_qpairs; i++) { \
+ if (_ha->queue_pair_map[i]) \
+ _ha->queue_pair_map[i]->enable_explicit_conf = 0; \
+ } \
+}
+
/*
* qla2x00 local function return status codes
*/
@@ -4253,6 +4373,10 @@ enum nexus_wait_type {
WAIT_LUN,
};
+#define USER_CTRL_IRQ(_ha) (ql2xuctrlirq && QLA_TGT_MODE_ENABLED() && \
+ (IS_QLA27XX(_ha) || IS_QLA83XX(_ha)))
+
+#include "qla_target.h"
#include "qla_gbl.h"
#include "qla_dbg.h"
#include "qla_inline.h"
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 989e17b0758c..d231e7156134 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -70,7 +70,7 @@ qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
qla2x00_gid_list_size(ha),
&gid_list_dma, GFP_KERNEL);
if (!gid_list) {
- ql_dbg(ql_dbg_user, vha, 0x705c,
+ ql_dbg(ql_dbg_user, vha, 0x7018,
"DMA allocation failed for %u\n",
qla2x00_gid_list_size(ha));
return 0;
@@ -164,26 +164,56 @@ static int
qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
{
struct scsi_qla_host *vha = s->private;
+ struct qla_qpair *qpair = vha->hw->base_qpair;
+ uint64_t qla_core_sbt_cmd, core_qla_que_buf, qla_core_ret_ctio,
+ core_qla_snd_status, qla_core_ret_sta_ctio, core_qla_free_cmd,
+ num_q_full_sent, num_alloc_iocb_failed, num_term_xchg_sent;
+ u16 i;
+
+ qla_core_sbt_cmd = qpair->tgt_counters.qla_core_sbt_cmd;
+ core_qla_que_buf = qpair->tgt_counters.core_qla_que_buf;
+ qla_core_ret_ctio = qpair->tgt_counters.qla_core_ret_ctio;
+ core_qla_snd_status = qpair->tgt_counters.core_qla_snd_status;
+ qla_core_ret_sta_ctio = qpair->tgt_counters.qla_core_ret_sta_ctio;
+ core_qla_free_cmd = qpair->tgt_counters.core_qla_free_cmd;
+ num_q_full_sent = qpair->tgt_counters.num_q_full_sent;
+ num_alloc_iocb_failed = qpair->tgt_counters.num_alloc_iocb_failed;
+ num_term_xchg_sent = qpair->tgt_counters.num_term_xchg_sent;
+
+ for (i = 0; i < vha->hw->max_qpairs; i++) {
+ qpair = vha->hw->queue_pair_map[i];
+ qla_core_sbt_cmd += qpair->tgt_counters.qla_core_sbt_cmd;
+ core_qla_que_buf += qpair->tgt_counters.core_qla_que_buf;
+ qla_core_ret_ctio += qpair->tgt_counters.qla_core_ret_ctio;
+ core_qla_snd_status += qpair->tgt_counters.core_qla_snd_status;
+ qla_core_ret_sta_ctio +=
+ qpair->tgt_counters.qla_core_ret_sta_ctio;
+ core_qla_free_cmd += qpair->tgt_counters.core_qla_free_cmd;
+ num_q_full_sent += qpair->tgt_counters.num_q_full_sent;
+ num_alloc_iocb_failed +=
+ qpair->tgt_counters.num_alloc_iocb_failed;
+ num_term_xchg_sent += qpair->tgt_counters.num_term_xchg_sent;
+ }
seq_puts(s, "Target Counters\n");
seq_printf(s, "qla_core_sbt_cmd = %lld\n",
- vha->tgt_counters.qla_core_sbt_cmd);
+ qla_core_sbt_cmd);
seq_printf(s, "qla_core_ret_sta_ctio = %lld\n",
- vha->tgt_counters.qla_core_ret_sta_ctio);
+ qla_core_ret_sta_ctio);
seq_printf(s, "qla_core_ret_ctio = %lld\n",
- vha->tgt_counters.qla_core_ret_ctio);
+ qla_core_ret_ctio);
seq_printf(s, "core_qla_que_buf = %lld\n",
- vha->tgt_counters.core_qla_que_buf);
+ core_qla_que_buf);
seq_printf(s, "core_qla_snd_status = %lld\n",
- vha->tgt_counters.core_qla_snd_status);
+ core_qla_snd_status);
seq_printf(s, "core_qla_free_cmd = %lld\n",
- vha->tgt_counters.core_qla_free_cmd);
+ core_qla_free_cmd);
seq_printf(s, "num alloc iocb failed = %lld\n",
- vha->tgt_counters.num_alloc_iocb_failed);
+ num_alloc_iocb_failed);
seq_printf(s, "num term exchange sent = %lld\n",
- vha->tgt_counters.num_term_xchg_sent);
+ num_term_xchg_sent);
seq_printf(s, "num Q full sent = %lld\n",
- vha->tgt_counters.num_q_full_sent);
+ num_q_full_sent);
/* DIF stats */
seq_printf(s, "DIF Inp Bytes = %lld\n",
@@ -314,6 +344,81 @@ static const struct file_operations dfs_fce_ops = {
.release = qla2x00_dfs_fce_release,
};
+static int
+qla_dfs_naqp_show(struct seq_file *s, void *unused)
+{
+ struct scsi_qla_host *vha = s->private;
+ struct qla_hw_data *ha = vha->hw;
+
+ seq_printf(s, "%d\n", ha->tgt.num_act_qpairs);
+ return 0;
+}
+
+static int
+qla_dfs_naqp_open(struct inode *inode, struct file *file)
+{
+ struct scsi_qla_host *vha = inode->i_private;
+
+ return single_open(file, qla_dfs_naqp_show, vha);
+}
+
+static ssize_t
+qla_dfs_naqp_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ struct seq_file *s = file->private_data;
+ struct scsi_qla_host *vha = s->private;
+ struct qla_hw_data *ha = vha->hw;
+ char *buf;
+ int rc = 0;
+ unsigned long num_act_qp;
+
+ if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha))) {
+ pr_err("host%ld: this adapter does not support Multi Q.",
+ vha->host_no);
+ return -EINVAL;
+ }
+
+ if (!vha->flags.qpairs_available) {
+ pr_err("host%ld: Driver is not setup with Multi Q.",
+ vha->host_no);
+ return -EINVAL;
+ }
+ buf = memdup_user_nul(buffer, count);
+ if (IS_ERR(buf)) {
+ pr_err("host%ld: fail to copy user buffer.",
+ vha->host_no);
+ return PTR_ERR(buf);
+ }
+
+ num_act_qp = simple_strtoul(buf, NULL, 0);
+
+ if (num_act_qp >= vha->hw->max_qpairs) {
+ pr_err("User set invalid number of qpairs %lu. Max = %d",
+ num_act_qp, vha->hw->max_qpairs);
+ rc = -EINVAL;
+ goto out_free;
+ }
+
+ if (num_act_qp != ha->tgt.num_act_qpairs) {
+ ha->tgt.num_act_qpairs = num_act_qp;
+ qlt_clr_qp_table(vha);
+ }
+ rc = count;
+out_free:
+ kfree(buf);
+ return rc;
+}
+
+static const struct file_operations dfs_naqp_ops = {
+ .open = qla_dfs_naqp_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = qla_dfs_naqp_write,
+};
+
+
int
qla2x00_dfs_setup(scsi_qla_host_t *vha)
{
@@ -370,7 +475,7 @@ create_nodes:
ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_port_database_ops);
if (!ha->tgt.dfs_tgt_port_database) {
- ql_log(ql_log_warn, vha, 0xffff,
+ ql_log(ql_log_warn, vha, 0xd03f,
"Unable to create debugFS tgt_port_database node.\n");
goto out;
}
@@ -386,11 +491,20 @@ create_nodes:
ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops);
if (!ha->tgt.dfs_tgt_sess) {
- ql_log(ql_log_warn, vha, 0xffff,
- "Unable to create debugFS tgt_sess node.\n");
+ ql_log(ql_log_warn, vha, 0xd040,
+ "Unable to create debugFS tgt_sess node.\n");
goto out;
}
+ if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) {
+ ha->tgt.dfs_naqp = debugfs_create_file("naqp",
+ 0400, ha->dfs_dir, vha, &dfs_naqp_ops);
+ if (!ha->tgt.dfs_naqp) {
+ ql_log(ql_log_warn, vha, 0xd011,
+ "Unable to create debugFS naqp node.\n");
+ goto out;
+ }
+ }
out:
return 0;
}
@@ -400,6 +514,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
+ if (ha->tgt.dfs_naqp) {
+ debugfs_remove(ha->tgt.dfs_naqp);
+ ha->tgt.dfs_naqp = NULL;
+ }
+
if (ha->tgt.dfs_tgt_sess) {
debugfs_remove(ha->tgt.dfs_tgt_sess);
ha->tgt.dfs_tgt_sess = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 1f808928763b..b9c9886e8b1d 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -7,6 +7,9 @@
#ifndef __QLA_FW_H
#define __QLA_FW_H
+#include <linux/nvme.h>
+#include <linux/nvme-fc.h>
+
#define MBS_CHECKSUM_ERROR 0x4010
#define MBS_INVALID_PRODUCT_KEY 0x4020
@@ -37,6 +40,12 @@ struct port_database_24xx {
#define PDF_CLASS_2 BIT_4
#define PDF_HARD_ADDR BIT_1
+ /*
+ * for NVMe, the login_state field has been
+ * split into nibbles.
+ * The lower nibble is for FCP.
+ * The upper nibble is for NVMe.
+ */
uint8_t current_login_state;
uint8_t last_login_state;
#define PDS_PLOGI_PENDING 0x03
@@ -69,7 +78,11 @@ struct port_database_24xx {
uint8_t port_name[WWN_SIZE];
uint8_t node_name[WWN_SIZE];
- uint8_t reserved_3[24];
+ uint8_t reserved_3[4];
+ uint16_t prli_nvme_svc_param_word_0; /* Bits 15-0 of word 0 */
+ uint16_t prli_nvme_svc_param_word_3; /* Bits 15-0 of word 3 */
+ uint16_t nvme_first_burst_size;
+ uint8_t reserved_4[14];
};
/*
@@ -593,9 +606,14 @@ struct sts_entry_24xx {
uint32_t residual_len; /* FW calc residual transfer length. */
- uint16_t reserved_1;
+ union {
+ uint16_t reserved_1;
+ uint16_t nvme_rsp_pyld_len;
+ };
+
uint16_t state_flags; /* State flags. */
#define SF_TRANSFERRED_DATA BIT_11
+#define SF_NVME_ERSP BIT_6
#define SF_FCP_RSP_DMA BIT_0
uint16_t retry_delay;
@@ -605,8 +623,16 @@ struct sts_entry_24xx {
uint32_t rsp_residual_count; /* FCP RSP residual count. */
uint32_t sense_len; /* FCP SENSE length. */
- uint32_t rsp_data_len; /* FCP response data length. */
- uint8_t data[28]; /* FCP response/sense information. */
+
+ union {
+ struct {
+ uint32_t rsp_data_len; /* FCP response data length */
+ uint8_t data[28]; /* FCP rsp/sense information */
+ };
+ struct nvme_fc_ersp_iu nvme_ersp;
+ uint8_t nvme_ersp_data[32];
+ };
+
/*
* If DIF Error is set in comp_status, these additional fields are
* defined:
@@ -819,6 +845,7 @@ struct logio_entry_24xx {
#define LCF_CLASS_2 BIT_8 /* Enable class 2 during PLOGI. */
#define LCF_FREE_NPORT BIT_7 /* Release NPORT handle after LOGO. */
#define LCF_EXPL_LOGO BIT_6 /* Perform an explicit LOGO. */
+#define LCF_NVME_PRLI BIT_6 /* Perform NVME FC4 PRLI */
#define LCF_SKIP_PRLI BIT_5 /* Skip PRLI after PLOGI. */
#define LCF_IMPL_LOGO_ALL BIT_5 /* Implicit LOGO to all ports. */
#define LCF_COND_PLOGI BIT_4 /* PLOGI only if not logged-in. */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 5b2451745e9f..cadb6e3baacc 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -10,6 +10,17 @@
#include <linux/interrupt.h>
/*
+ * Global functions prototype in qla_nvme.c source file.
+ */
+extern void qla_nvme_register_hba(scsi_qla_host_t *);
+extern int qla_nvme_register_remote(scsi_qla_host_t *, fc_port_t *);
+extern void qla_nvme_delete(scsi_qla_host_t *);
+extern void qla_nvme_abort(struct qla_hw_data *, srb_t *sp);
+extern void qla24xx_nvme_ls4_iocb(scsi_qla_host_t *, struct pt_ls4_request *,
+ struct req_que *);
+extern void qla24xx_async_gffid_sp_done(void *, int);
+
+/*
* Global Function Prototypes in qla_init.c source file.
*/
extern int qla2x00_initialize_adapter(scsi_qla_host_t *);
@@ -77,8 +88,7 @@ struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *,
enum qla_work_type);
extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *);
int qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e);
-extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
-extern void *qla2x00_alloc_iocbs_ready(struct scsi_qla_host *, srb_t *);
+extern void *qla2x00_alloc_iocbs_ready(struct qla_qpair *, srb_t *);
extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
extern fc_port_t *
@@ -96,10 +106,11 @@ qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
extern int qla2x00_init_rings(scsi_qla_host_t *);
extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *);
extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *,
- int, int);
+ int, int, bool);
extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *);
void qla2x00_fcport_event_handler(scsi_qla_host_t *, struct event_arg *);
int qla24xx_async_gpdb(struct scsi_qla_host *, fc_port_t *, u8);
+int qla24xx_async_prli(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
struct imm_ntfy_from_isp *, int);
int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *,
@@ -137,8 +148,11 @@ extern int ql2xmdcapmask;
extern int ql2xmdenable;
extern int ql2xexlogins;
extern int ql2xexchoffld;
+extern int ql2xiniexchg;
extern int ql2xfwholdabts;
extern int ql2xmvasynctoatio;
+extern int ql2xuctrlirq;
+extern int ql2xnvmeenable;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -254,7 +268,8 @@ extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
extern int qla2xxx_dif_start_scsi_mq(srb_t *);
extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
-extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
+extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
+extern void *__qla2x00_alloc_iocbs(struct qla_qpair *, srb_t *);
extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *,
uint32_t *, uint16_t, struct qla_tc_param *);
@@ -604,7 +619,7 @@ extern int qla2x00_gpn_id(scsi_qla_host_t *, sw_info_t *);
extern int qla2x00_gnn_id(scsi_qla_host_t *, sw_info_t *);
extern void qla2x00_gff_id(scsi_qla_host_t *, sw_info_t *);
extern int qla2x00_rft_id(scsi_qla_host_t *);
-extern int qla2x00_rff_id(scsi_qla_host_t *);
+extern int qla2x00_rff_id(scsi_qla_host_t *, u8);
extern int qla2x00_rnn_id(scsi_qla_host_t *);
extern int qla2x00_rsnn_nn(scsi_qla_host_t *);
extern void *qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
@@ -630,7 +645,8 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *, struct event_arg *);
int qla24xx_post_gpsc_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *);
int qla2x00_mgmt_svr_login(scsi_qla_host_t *);
-
+void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea);
+int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport);
/*
* Global Function Prototypes in qla_attr.c source file.
*/
@@ -662,9 +678,9 @@ extern int qla25xx_request_irq(struct qla_hw_data *, struct qla_qpair *,
extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *);
extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *);
extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
- uint16_t, int, uint8_t);
+ uint16_t, int, uint8_t, bool);
extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t,
- uint16_t, struct qla_qpair *);
+ uint16_t, struct qla_qpair *, bool);
extern void qla2x00_init_response_q_entries(struct rsp_que *);
extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
@@ -833,14 +849,13 @@ extern irqreturn_t qla8044_intr_handler(int, void *);
extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t);
extern int qla8044_abort_isp(scsi_qla_host_t *);
extern int qla8044_check_fw_alive(struct scsi_qla_host *);
-
-extern void qlt_host_reset_handler(struct qla_hw_data *ha);
extern int qla_get_exlogin_status(scsi_qla_host_t *, uint16_t *,
uint16_t *);
extern int qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr);
extern int qla_get_exchoffld_status(scsi_qla_host_t *, uint16_t *, uint16_t *);
-extern int qla_set_exchoffld_mem_cfg(scsi_qla_host_t *, dma_addr_t);
-extern void qlt_handle_abts_recv(struct scsi_qla_host *, response_t *);
+extern int qla_set_exchoffld_mem_cfg(scsi_qla_host_t *);
+extern void qlt_handle_abts_recv(struct scsi_qla_host *, struct rsp_que *,
+ response_t *);
int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
struct imm_ntfy_from_isp *, int);
@@ -856,5 +871,6 @@ void qla24xx_delete_sess_fn(struct work_struct *);
void qlt_unknown_atio_work_fn(struct work_struct *);
void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
void qlt_remove_target_resources(struct qla_hw_data *);
+void qlt_clr_qp_table(struct scsi_qla_host *vha);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 9bc9aa9e164a..b323a7c71eda 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -124,6 +124,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
int rval;
uint16_t comp_status;
struct qla_hw_data *ha = vha->hw;
+ bool lid_is_sns = false;
rval = QLA_FUNCTION_FAILED;
if (ms_pkt->entry_status != 0) {
@@ -155,6 +156,25 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
} else
rval = QLA_SUCCESS;
break;
+ case CS_PORT_LOGGED_OUT:
+ if (IS_FWI2_CAPABLE(ha)) {
+ if (le16_to_cpu(ms_pkt->loop_id.extended) ==
+ NPH_SNS)
+ lid_is_sns = true;
+ } else {
+ if (le16_to_cpu(ms_pkt->loop_id.extended) ==
+ SIMPLE_NAME_SERVER)
+ lid_is_sns = true;
+ }
+ if (lid_is_sns) {
+ ql_dbg(ql_dbg_async, vha, 0x502b,
+ "%s failed, Name server has logged out",
+ routine);
+ rval = QLA_NOT_LOGGED_IN;
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ }
+ break;
default:
ql_dbg(ql_dbg_disc, vha, 0x2033,
"%s failed, completion status (%x) on port_id: "
@@ -530,6 +550,8 @@ qla2x00_rft_id(scsi_qla_host_t *vha)
ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
+ if (vha->flags.nvme_enabled)
+ ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
@@ -555,7 +577,7 @@ qla2x00_rft_id(scsi_qla_host_t *vha)
* Returns 0 on success.
*/
int
-qla2x00_rff_id(scsi_qla_host_t *vha)
+qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
{
int rval;
struct qla_hw_data *ha = vha->hw;
@@ -593,7 +615,7 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
qlt_rff_id(vha, ct_req);
- ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */
+ ct_req->req.rff_id.fc4_type = type; /* SCSI - FCP */
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
@@ -2004,7 +2026,7 @@ qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- ql_dbg(ql_dbg_disc, vha, 0x20b1,
+ ql_dbg(ql_dbg_disc, vha, 0x201b,
"Vendor Identifier = %s.\n", eiter->a.vendor_identifier);
/* Update MS request size. */
@@ -2144,6 +2166,13 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
eiter->a.fc4_types[2],
eiter->a.fc4_types[1]);
+ if (vha->flags.nvme_enabled) {
+ eiter->a.fc4_types[6] = 1; /* NVMe type 28h */
+ ql_dbg(ql_dbg_disc, vha, 0x211f,
+ "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
+ eiter->a.fc4_types[6]);
+ }
+
/* Supported speed. */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
@@ -2216,7 +2245,7 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
}
size += 4 + 4;
- ql_dbg(ql_dbg_disc, vha, 0x20bc,
+ ql_dbg(ql_dbg_disc, vha, 0x2017,
"Current_Speed = %x.\n", eiter->a.cur_speed);
/* Max frame size. */
@@ -2261,7 +2290,7 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
- ql_dbg(ql_dbg_disc, vha, 0x203d,
+ ql_dbg(ql_dbg_disc, vha, 0x201a,
"HostName=%s.\n", eiter->a.host_name);
/* Node Name */
@@ -2341,6 +2370,15 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
"Port Active FC4 Type = %02x %02x.\n",
eiter->a.port_fc4_type[2], eiter->a.port_fc4_type[1]);
+ if (vha->flags.nvme_enabled) {
+ eiter->a.port_fc4_type[4] = 0;
+ eiter->a.port_fc4_type[5] = 0;
+ eiter->a.port_fc4_type[6] = 1; /* NVMe type 28h */
+ ql_dbg(ql_dbg_disc, vha, 0x2120,
+ "NVME Port Active FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
+ eiter->a.port_fc4_type[6]);
+ }
+
/* Port State */
eiter = entries + size;
eiter->type = cpu_to_be16(FDMI_PORT_STATE);
@@ -2368,13 +2406,13 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + 4);
size += 4 + 4;
- ql_dbg(ql_dbg_disc, vha, 0x20c8,
+ ql_dbg(ql_dbg_disc, vha, 0x201c,
"Port Id = %x.\n", eiter->a.port_id);
/* Update MS request size. */
qla2x00_update_ms_fdmi_iocb(vha, size + 16);
- ql_dbg(ql_dbg_disc, vha, 0x203e,
+ ql_dbg(ql_dbg_disc, vha, 0x2018,
"RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size);
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ca,
entries, size);
@@ -2734,6 +2772,10 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
list[i].fc4_type = FC4_TYPE_FCP_SCSI;
else
list[i].fc4_type = FC4_TYPE_OTHER;
+
+ list[i].fc4f_nvme =
+ ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
+ list[i].fc4f_nvme &= 0xf;
}
/* Last device exit. */
@@ -2747,13 +2789,13 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
{
fc_port_t *fcport = ea->fcport;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %8phC login state %d \n",
- __func__, fcport->port_name, fcport->fw_login_state);
+ ql_dbg(ql_dbg_disc, vha, 0x201d,
+ "%s %8phC login state %d\n",
+ __func__, fcport->port_name, fcport->fw_login_state);
if (ea->sp->gen2 != fcport->login_gen) {
/* PLOGI/PRLI/LOGO came in while cmd was out.*/
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x201e,
"%s %8phC generation changed rscn %d|%d login %d|%d \n",
__func__, fcport->port_name, fcport->last_rscn_gen,
fcport->rscn_gen, fcport->last_login_gen, fcport->login_gen);
@@ -2777,7 +2819,7 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
if (atomic_read(&fcport->state) ==
FCS_ONLINE)
break;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x201f,
"%s %d %8phC post gnl\n",
__func__, __LINE__, fcport->port_name);
qla24xx_post_gnl_work(vha, fcport);
@@ -2786,14 +2828,14 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
} else { /* fcport->d_id.b24 != ea->id.b24 */
fcport->d_id.b24 = ea->id.b24;
if (fcport->deleted == QLA_SESS_DELETED) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x2021,
"%s %d %8phC post del sess\n",
__func__, __LINE__, fcport->port_name);
qlt_schedule_sess_for_deletion_lock(fcport);
}
}
} else { /* ea->sp->gen1 != fcport->rscn_gen */
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x2022,
"%s %d %8phC post gidpn\n",
__func__, __LINE__, fcport->port_name);
/* rscn came in while cmd was out */
@@ -2803,18 +2845,18 @@ void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
/* cable pulled */
if (ea->sp->gen1 == fcport->rscn_gen) {
if (ea->sp->gen2 == fcport->login_gen) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x2042,
"%s %d %8phC post del sess\n", __func__,
__LINE__, fcport->port_name);
qlt_schedule_sess_for_deletion_lock(fcport);
} else {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x2045,
"%s %d %8phC login\n", __func__, __LINE__,
fcport->port_name);
qla24xx_fcport_handle_login(vha, fcport);
}
} else {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x2049,
"%s %d %8phC post gidpn\n", __func__, __LINE__,
fcport->port_name);
qla24xx_post_gidpn_work(vha, fcport);
@@ -2841,7 +2883,7 @@ static void qla2x00_async_gidpn_sp_done(void *s, int res)
ea.rc = res;
ea.event = FCME_GIDPN_DONE;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x204f,
"Async done-%s res %x, WWPN %8phC ID %3phC \n",
sp->name, res, fcport->port_name, id);
@@ -2897,11 +2939,11 @@ int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
if (rval != QLA_SUCCESS)
goto done_free_sp;
- ql_dbg(ql_dbg_disc, vha, 0x206f,
- "Async-%s - %8phC hdl=%x loopid=%x portid %02x%02x%02x.\n",
- sp->name, fcport->port_name,
- sp->handle, fcport->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ ql_dbg(ql_dbg_disc, vha, 0x20a4,
+ "Async-%s - %8phC hdl=%x loopid=%x portid %02x%02x%02x.\n",
+ sp->name, fcport->port_name,
+ sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
return rval;
done_free_sp:
@@ -2952,7 +2994,7 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x2053,
"Async done-%s res %x, WWPN %8phC \n",
sp->name, res, fcport->port_name);
@@ -2965,10 +3007,9 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
if ((ct_rsp->header.reason_code ==
CT_REASON_INVALID_COMMAND_CODE) ||
(ct_rsp->header.reason_code ==
- CT_REASON_COMMAND_UNSUPPORTED)) {
- ql_dbg(ql_dbg_disc, vha, 0x205a,
- "GPSC command unsupported, disabling "
- "query.\n");
+ CT_REASON_COMMAND_UNSUPPORTED)) {
+ ql_dbg(ql_dbg_disc, vha, 0x2019,
+ "GPSC command unsupported, disabling query.\n");
ha->flags.gpsc_supported = 0;
res = QLA_SUCCESS;
}
@@ -2997,12 +3038,11 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
break;
}
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n",
- sp->name,
- fcport->fabric_port_name,
- be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
- be16_to_cpu(ct_rsp->rsp.gpsc.speed));
+ ql_dbg(ql_dbg_disc, vha, 0x2054,
+ "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n",
+ sp->name, fcport->fabric_port_name,
+ be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
+ be16_to_cpu(ct_rsp->rsp.gpsc.speed));
}
done:
memset(&ea, 0, sizeof(ea));
@@ -3058,11 +3098,11 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
if (rval != QLA_SUCCESS)
goto done_free_sp;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
- sp->name, fcport->port_name, sp->handle,
- fcport->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ ql_dbg(ql_dbg_disc, vha, 0x205e,
+ "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
+ sp->name, fcport->port_name, sp->handle,
+ fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
return rval;
done_free_sp:
@@ -3118,21 +3158,32 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
if (fcport) {
/* cable moved. just plugged in */
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__, fcport->port_name);
-
fcport->rscn_gen++;
fcport->d_id = ea->id;
fcport->scan_state = QLA_FCPORT_FOUND;
fcport->flags |= FCF_FABRIC_DEVICE;
- qlt_schedule_sess_for_deletion_lock(fcport);
+ switch (fcport->disc_state) {
+ case DSC_DELETED:
+ ql_dbg(ql_dbg_disc, vha, 0x210d,
+ "%s %d %8phC login\n", __func__, __LINE__,
+ fcport->port_name);
+ qla24xx_fcport_handle_login(vha, fcport);
+ break;
+ case DSC_DELETE_PEND:
+ break;
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0x2064,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__, fcport->port_name);
+ qlt_schedule_sess_for_deletion_lock(fcport);
+ break;
+ }
} else {
/* create new fcport */
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post new sess\n",
- __func__, __LINE__, ea->port_name);
+ ql_dbg(ql_dbg_disc, vha, 0x2065,
+ "%s %d %8phC post new sess\n",
+ __func__, __LINE__, ea->port_name);
qla24xx_post_newsess_work(vha, &ea->id, ea->port_name, NULL);
}
@@ -3149,10 +3200,10 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
struct event_arg ea;
struct qla_work_evt *e;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async done-%s res %x ID %3phC. %8phC\n",
- sp->name, res, ct_req->req.port_id.port_id,
- ct_rsp->rsp.gpn_id.port_name);
+ ql_dbg(ql_dbg_disc, vha, 0x2066,
+ "Async done-%s res %x ID %3phC. %8phC\n",
+ sp->name, res, ct_req->req.port_id.port_id,
+ ct_rsp->rsp.gpn_id.port_name);
memset(&ea, 0, sizeof(ea));
memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
@@ -3214,8 +3265,8 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
GFP_KERNEL);
if (!sp->u.iocb_cmd.u.ctarg.req) {
- ql_log(ql_log_warn, vha, 0xffff,
- "Failed to allocate ct_sns request.\n");
+ ql_log(ql_log_warn, vha, 0xd041,
+ "Failed to allocate ct_sns request.\n");
goto done_free_sp;
}
@@ -3223,8 +3274,8 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
GFP_KERNEL);
if (!sp->u.iocb_cmd.u.ctarg.rsp) {
- ql_log(ql_log_warn, vha, 0xffff,
- "Failed to allocate ct_sns request.\n");
+ ql_log(ql_log_warn, vha, 0xd042,
+ "Failed to allocate ct_sns request.\n");
goto done_free_sp;
}
@@ -3251,9 +3302,9 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
if (rval != QLA_SUCCESS)
goto done_free_sp;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async-%s hdl=%x ID %3phC.\n", sp->name,
- sp->handle, ct_req->req.port_id.port_id);
+ ql_dbg(ql_dbg_disc, vha, 0x2067,
+ "Async-%s hdl=%x ID %3phC.\n", sp->name,
+ sp->handle, ct_req->req.port_id.port_id);
return rval;
done_free_sp:
@@ -3276,3 +3327,111 @@ done_free_sp:
done:
return rval;
}
+
+void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ fc_port_t *fcport = ea->fcport;
+
+ qla24xx_post_gnl_work(vha, fcport);
+}
+
+void qla24xx_async_gffid_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ fc_port_t *fcport = sp->fcport;
+ struct ct_sns_rsp *ct_rsp;
+ struct event_arg ea;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2133,
+ "Async done-%s res %x ID %x. %8phC\n",
+ sp->name, res, fcport->d_id.b24, fcport->port_name);
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
+ /*
+ * FC-GS-7, 5.2.3.12 FC-4 Features - format
+ * The format of the FC-4 Features object, as defined by the FC-4,
+ * Shall be an array of 4-bit values, one for each type code value
+ */
+ if (!res) {
+ if (ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET] & 0xf) {
+ /* w1 b00:03 */
+ fcport->fc4_type =
+ ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
+ fcport->fc4_type &= 0xf;
+ }
+
+ if (ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET] & 0xf) {
+ /* w5 [00:03]/28h */
+ fcport->fc4f_nvme =
+ ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
+ fcport->fc4f_nvme &= 0xf;
+ }
+ }
+
+ memset(&ea, 0, sizeof(ea));
+ ea.sp = sp;
+ ea.fcport = sp->fcport;
+ ea.rc = res;
+ ea.event = FCME_GFFID_DONE;
+
+ qla2x00_fcport_event_handler(vha, &ea);
+ sp->free(sp);
+}
+
+/* Get FC4 Feature with Nport ID. */
+int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+
+ if (!vha->flags.online)
+ return rval;
+
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ return rval;
+
+ fcport->flags |= FCF_ASYNC_SENT;
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gffid";
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
+
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ /* CT_IU preamble */
+ ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD,
+ GFF_ID_RSP_SIZE);
+
+ ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain;
+ ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area;
+ ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa;
+
+ sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla24xx_async_gffid_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2132,
+ "Async-%s hdl=%x %8phC.\n", sp->name,
+ sp->handle, fcport->port_name);
+
+ return rval;
+done_free_sp:
+ sp->free(sp);
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 0391fc317003..072ad1aa5505 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -37,8 +37,11 @@ static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
static int qla84xx_init_chip(scsi_qla_host_t *);
static int qla25xx_init_queues(struct qla_hw_data *);
static int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
+static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *);
static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
struct event_arg *);
+static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
+ struct event_arg *);
/* SRB Extensions ---------------------------------------------------------- */
@@ -141,7 +144,7 @@ qla2x00_async_login_sp_done(void *ptr, int res)
struct srb_iocb *lio = &sp->u.iocb_cmd;
struct event_arg ea;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20dd,
"%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
sp->fcport->flags &= ~FCF_ASYNC_SENT;
@@ -191,6 +194,10 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
lio->timeout = qla2x00_async_iocb_timeout;
sp->done = qla2x00_async_login_sp_done;
lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
+
+ if (fcport->fc4f_nvme)
+ lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
+
if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
lio->u.logio.flags |= SRB_LOGIN_RETRIED;
rval = qla2x00_start_sp(sp);
@@ -327,38 +334,38 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
u16 i, n, found = 0, loop_id;
port_id_t id;
u64 wwn;
- u8 opt = 0;
+ u8 opt = 0, current_login_state;
fcport = ea->fcport;
if (ea->rc) { /* rval */
if (fcport->login_retry == 0) {
fcport->login_retry = vha->hw->login_retry_count;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "GNL failed Port login retry %8phN, retry cnt=%d.\n",
- fcport->port_name, fcport->login_retry);
+ ql_dbg(ql_dbg_disc, vha, 0x20de,
+ "GNL failed Port login retry %8phN, retry cnt=%d.\n",
+ fcport->port_name, fcport->login_retry);
}
return;
}
if (fcport->last_rscn_gen != fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20df,
"%s %8phC rscn gen changed rscn %d|%d \n",
__func__, fcport->port_name,
fcport->last_rscn_gen, fcport->rscn_gen);
qla24xx_post_gidpn_work(vha, fcport);
return;
} else if (fcport->last_login_gen != fcport->login_gen) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %8phC login gen changed login %d|%d \n",
- __func__, fcport->port_name,
- fcport->last_login_gen, fcport->login_gen);
+ ql_dbg(ql_dbg_disc, vha, 0x20e0,
+ "%s %8phC login gen changed login %d|%d\n",
+ __func__, fcport->port_name,
+ fcport->last_login_gen, fcport->login_gen);
return;
}
n = ea->data[0] / sizeof(struct get_name_list_extended);
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20e1,
"%s %d %8phC n %d %02x%02x%02x lid %d \n",
__func__, __LINE__, fcport->port_name, n,
fcport->d_id.b.domain, fcport->d_id.b.area,
@@ -380,20 +387,20 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
loop_id = le16_to_cpu(e->nport_handle);
loop_id = (loop_id & 0x7fff);
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s found %8phC CLS [%d|%d] ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
- __func__, fcport->port_name,
- e->current_login_state, fcport->fw_login_state,
- id.b.domain, id.b.area, id.b.al_pa,
- fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
+ ql_dbg(ql_dbg_disc, vha, 0x20e2,
+ "%s found %8phC CLS [%d|%d] ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
+ __func__, fcport->port_name,
+ e->current_login_state, fcport->fw_login_state,
+ id.b.domain, id.b.area, id.b.al_pa,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
if ((id.b24 != fcport->d_id.b24) ||
((fcport->loop_id != FC_NO_LOOP_ID) &&
(fcport->loop_id != loop_id))) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__, fcport->port_name);
+ ql_dbg(ql_dbg_disc, vha, 0x20e3,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__, fcport->port_name);
qlt_schedule_sess_for_deletion(fcport, 1);
return;
}
@@ -414,24 +421,28 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
fcport->login_pause = 1;
}
- switch (e->current_login_state) {
+ if (fcport->fc4f_nvme)
+ current_login_state = e->current_login_state >> 4;
+ else
+ current_login_state = e->current_login_state & 0xf;
+
+ switch (current_login_state) {
case DSC_LS_PRLI_COMP:
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post gpdb\n",
- __func__, __LINE__, fcport->port_name);
+ ql_dbg(ql_dbg_disc, vha, 0x20e4,
+ "%s %d %8phC post gpdb\n",
+ __func__, __LINE__, fcport->port_name);
opt = PDO_FORCE_ADISC;
qla24xx_post_gpdb_work(vha, fcport, opt);
break;
-
case DSC_LS_PORT_UNAVAIL:
default:
if (fcport->loop_id == FC_NO_LOOP_ID) {
qla2x00_find_new_loop_id(vha, fcport);
fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
}
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC \n",
- __func__, __LINE__, fcport->port_name);
+ ql_dbg(ql_dbg_disc, vha, 0x20e5,
+ "%s %d %8phC\n",
+ __func__, __LINE__, fcport->port_name);
qla24xx_fcport_handle_login(vha, fcport);
break;
}
@@ -456,7 +467,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
qla2x00_find_fcport_by_wwpn(vha,
e->port_name, 0);
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20e6,
"%s %d %8phC post del sess\n",
__func__, __LINE__,
conflict_fcport->port_name);
@@ -487,7 +498,7 @@ qla24xx_async_gnl_sp_done(void *s, int res)
u64 wwn;
struct list_head h;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20e7,
"Async done-%s res %x mb[1]=%x mb[2]=%x \n",
sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
sp->u.iocb_cmd.u.mbx.in_mb[2]);
@@ -512,7 +523,7 @@ qla24xx_async_gnl_sp_done(void *s, int res)
set_bit(loop_id, vha->hw->loop_id_map);
wwn = wwn_to_u64(e->port_name);
- ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8,
"%s %8phC %02x:%02x:%02x state %d/%d lid %x \n",
__func__, (void *)&wwn, e->port_id[2], e->port_id[1],
e->port_id[0], e->current_login_state, e->last_login_state,
@@ -551,7 +562,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
if (!vha->flags.online)
goto done;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20d9,
"Async-gnlist WWPN %8phC \n", fcport->port_name);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
@@ -598,9 +609,9 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
if (rval != QLA_SUCCESS)
goto done_free_sp;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async-%s - OUT WWPN %8phC hndl %x\n",
- sp->name, fcport->port_name, sp->handle);
+ ql_dbg(ql_dbg_disc, vha, 0x20da,
+ "Async-%s - OUT WWPN %8phC hndl %x\n",
+ sp->name, fcport->port_name, sp->handle);
return rval;
@@ -635,7 +646,7 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
int rval = QLA_SUCCESS;
struct event_arg ea;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20db,
"Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
sp->name, res, fcport->port_name, mb[1], mb[2]);
@@ -665,6 +676,104 @@ gpd_error_out:
sp->free(sp);
}
+static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.fcport.fcport = fcport;
+
+ return qla2x00_post_work(vha, e);
+}
+
+static void
+qla2x00_async_prli_sp_done(void *ptr, int res)
+{
+ srb_t *sp = ptr;
+ struct scsi_qla_host *vha = sp->vha;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+ struct event_arg ea;
+
+ ql_dbg(ql_dbg_disc, vha, 0x2129,
+ "%s %8phC res %d \n", __func__,
+ sp->fcport->port_name, res);
+
+ sp->fcport->flags &= ~FCF_ASYNC_SENT;
+
+ if (!test_bit(UNLOADING, &vha->dpc_flags)) {
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_PRLI_DONE;
+ ea.fcport = sp->fcport;
+ ea.data[0] = lio->u.logio.data[0];
+ ea.data[1] = lio->u.logio.data[1];
+ ea.iop[0] = lio->u.logio.iop[0];
+ ea.iop[1] = lio->u.logio.iop[1];
+ ea.sp = sp;
+
+ qla2x00_fcport_event_handler(vha, &ea);
+ }
+
+ sp->free(sp);
+}
+
+int
+qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ srb_t *sp;
+ struct srb_iocb *lio;
+ int rval = QLA_FUNCTION_FAILED;
+
+ if (!vha->flags.online)
+ return rval;
+
+ if (fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
+ fcport->fw_login_state == DSC_LS_PLOGI_COMP ||
+ fcport->fw_login_state == DSC_LS_PRLI_PEND)
+ return rval;
+
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ return rval;
+
+ fcport->flags |= FCF_ASYNC_SENT;
+ fcport->logout_completed = 0;
+
+ sp->type = SRB_PRLI_CMD;
+ sp->name = "prli";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ lio = &sp->u.iocb_cmd;
+ lio->timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_prli_sp_done;
+ lio->u.logio.flags = 0;
+
+ if (fcport->fc4f_nvme)
+ lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ fcport->flags |= FCF_LOGIN_NEEDED;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ goto done_free_sp;
+ }
+
+ ql_dbg(ql_dbg_disc, vha, 0x211b,
+ "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d.\n",
+ fcport->port_name, sp->handle, fcport->loop_id,
+ fcport->d_id.b24, fcport->login_retry);
+
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ return rval;
+}
+
static int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport,
u8 opt)
{
@@ -701,8 +810,8 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
if (pd == NULL) {
- ql_log(ql_log_warn, vha, 0xffff,
- "Failed to allocate port database structure.\n");
+ ql_log(ql_log_warn, vha, 0xd043,
+ "Failed to allocate port database structure.\n");
goto done_free_sp;
}
memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
@@ -734,9 +843,9 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
if (rval != QLA_SUCCESS)
goto done_free_sp;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async-%s %8phC hndl %x opt %x\n",
- sp->name, fcport->port_name, sp->handle, opt);
+ ql_dbg(ql_dbg_disc, vha, 0x20dc,
+ "Async-%s %8phC hndl %x opt %x\n",
+ sp->name, fcport->port_name, sp->handle, opt);
return rval;
@@ -760,27 +869,27 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
fcport->flags &= ~FCF_ASYNC_SENT;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20d2,
"%s %8phC DS %d LS %d rval %d\n", __func__, fcport->port_name,
fcport->disc_state, fcport->fw_login_state, rval);
if (ea->sp->gen2 != fcport->login_gen) {
/* target side must have changed it. */
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
"%s %8phC generation changed rscn %d|%d login %d|%d \n",
__func__, fcport->port_name, fcport->last_rscn_gen,
fcport->rscn_gen, fcport->last_login_gen,
fcport->login_gen);
return;
} else if (ea->sp->gen1 != fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post gidpn\n",
+ ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
__func__, __LINE__, fcport->port_name);
qla24xx_post_gidpn_work(vha, fcport);
return;
}
if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post del sess\n",
+ ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
__func__, __LINE__, fcport->port_name);
qlt_schedule_sess_for_deletion_lock(fcport);
return;
@@ -797,14 +906,14 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
if (!IS_IIDMA_CAPABLE(vha->hw) ||
!vha->hw->flags.gpsc_supported) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20d6,
"%s %d %8phC post upd_fcport fcp_cnt %d\n",
__func__, __LINE__, fcport->port_name,
vha->fcport_count);
qla24xx_post_upd_fcport_work(vha, fcport);
} else {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20d7,
"%s %d %8phC post gpsc fcp_cnt %d\n",
__func__, __LINE__, fcport->port_name,
vha->fcport_count);
@@ -823,7 +932,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
if (fcport->scan_state != QLA_FCPORT_FOUND)
return 0;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20d8,
"%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d|%d retry %d lid %d\n",
__func__, fcport->port_name, fcport->disc_state,
fcport->fw_login_state, fcport->login_pause, fcport->flags,
@@ -854,14 +963,14 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
switch (fcport->disc_state) {
case DSC_DELETED:
if (fcport->loop_id == FC_NO_LOOP_ID) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post gnl\n",
- __func__, __LINE__, fcport->port_name);
+ ql_dbg(ql_dbg_disc, vha, 0x20bd,
+ "%s %d %8phC post gnl\n",
+ __func__, __LINE__, fcport->port_name);
qla24xx_async_gnl(vha, fcport);
} else {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post login\n",
- __func__, __LINE__, fcport->port_name);
+ ql_dbg(ql_dbg_disc, vha, 0x20bf,
+ "%s %d %8phC post login\n",
+ __func__, __LINE__, fcport->port_name);
fcport->disc_state = DSC_LOGIN_PEND;
qla2x00_post_async_login_work(vha, fcport, NULL);
}
@@ -878,16 +987,16 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
if (fcport->flags & FCF_FCP2_DEVICE) {
u8 opt = PDO_FORCE_ADISC;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post gpdb\n",
- __func__, __LINE__, fcport->port_name);
+ ql_dbg(ql_dbg_disc, vha, 0x20c9,
+ "%s %d %8phC post gpdb\n",
+ __func__, __LINE__, fcport->port_name);
fcport->disc_state = DSC_GPDB;
qla24xx_post_gpdb_work(vha, fcport, opt);
} else {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post login \n",
- __func__, __LINE__, fcport->port_name);
+ ql_dbg(ql_dbg_disc, vha, 0x20cf,
+ "%s %d %8phC post login\n",
+ __func__, __LINE__, fcport->port_name);
fcport->disc_state = DSC_LOGIN_PEND;
qla2x00_post_async_login_work(vha, fcport, NULL);
}
@@ -895,18 +1004,18 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
break;
case DSC_LOGIN_FAILED:
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post gidpn \n",
- __func__, __LINE__, fcport->port_name);
+ ql_dbg(ql_dbg_disc, vha, 0x20d0,
+ "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, fcport->port_name);
qla24xx_post_gidpn_work(vha, fcport);
break;
case DSC_LOGIN_COMPLETE:
/* recheck login state */
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post gpdb \n",
- __func__, __LINE__, fcport->port_name);
+ ql_dbg(ql_dbg_disc, vha, 0x20d1,
+ "%s %d %8phC post gpdb\n",
+ __func__, __LINE__, fcport->port_name);
qla24xx_post_gpdb_work(vha, fcport, PDO_FORCE_ADISC);
break;
@@ -923,10 +1032,10 @@ void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea)
{
fcport->rscn_gen++;
- ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
- "%s %8phC DS %d LS %d\n",
- __func__, fcport->port_name, fcport->disc_state,
- fcport->fw_login_state);
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x210c,
+ "%s %8phC DS %d LS %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state);
if (fcport->flags & FCF_ASYNC_SENT)
return;
@@ -993,14 +1102,14 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
return;
}
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
- __func__, fcport->port_name, fcport->disc_state,
- fcport->fw_login_state, fcport->login_pause,
- fcport->deleted, fcport->conflict,
- fcport->last_rscn_gen, fcport->rscn_gen,
- fcport->last_login_gen, fcport->login_gen,
- fcport->flags);
+ ql_dbg(ql_dbg_disc, vha, 0x2102,
+ "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, fcport->login_pause,
+ fcport->deleted, fcport->conflict,
+ fcport->last_rscn_gen, fcport->rscn_gen,
+ fcport->last_login_gen, fcport->login_gen,
+ fcport->flags);
if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
(fcport->fw_login_state == DSC_LS_PRLI_PEND))
@@ -1023,7 +1132,7 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
}
if (fcport->last_rscn_gen != fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post gidpn\n",
+ ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
__func__, __LINE__, fcport->port_name);
qla24xx_async_gidpn(vha, fcport);
@@ -1041,6 +1150,20 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
switch (ea->event) {
case FCME_RELOGIN:
+ case FCME_RSCN:
+ case FCME_GIDPN_DONE:
+ case FCME_GPSC_DONE:
+ case FCME_GPNID_DONE:
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
+ test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
+ return;
+ break;
+ default:
+ break;
+ }
+
+ switch (ea->event) {
+ case FCME_RELOGIN:
if (test_bit(UNLOADING, &vha->dpc_flags))
return;
@@ -1056,10 +1179,10 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
/* cable moved */
rc = qla24xx_post_gpnid_work(vha, &ea->id);
if (rc) {
- ql_log(ql_log_warn, vha, 0xffff,
- "RSCN GPNID work failed %02x%02x%02x\n",
- ea->id.b.domain, ea->id.b.area,
- ea->id.b.al_pa);
+ ql_log(ql_log_warn, vha, 0xd044,
+ "RSCN GPNID work failed %02x%02x%02x\n",
+ ea->id.b.domain, ea->id.b.area,
+ ea->id.b.al_pa);
}
} else {
ea->fcport = fcport;
@@ -1070,14 +1193,14 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
case RSCN_DOM_ADDR:
if (ea->id.b.rsvd_1 == RSCN_AREA_ADDR) {
mask = 0xffff00;
- ql_log(ql_dbg_async, vha, 0xffff,
- "RSCN: Area 0x%06x was affected\n",
- ea->id.b24);
+ ql_dbg(ql_dbg_async, vha, 0x5044,
+ "RSCN: Area 0x%06x was affected\n",
+ ea->id.b24);
} else {
mask = 0xff0000;
- ql_log(ql_dbg_async, vha, 0xffff,
- "RSCN: Domain 0x%06x was affected\n",
- ea->id.b24);
+ ql_dbg(ql_dbg_async, vha, 0x507a,
+ "RSCN: Domain 0x%06x was affected\n",
+ ea->id.b24);
}
rid = ea->id.b24 & mask;
@@ -1092,9 +1215,9 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
break;
case RSCN_FAB_ADDR:
default:
- ql_log(ql_log_warn, vha, 0xffff,
- "RSCN: Fabric was affected. Addr format %d\n",
- ea->id.b.rsvd_1);
+ ql_log(ql_log_warn, vha, 0xd045,
+ "RSCN: Fabric was affected. Addr format %d\n",
+ ea->id.b.rsvd_1);
qla2x00_mark_all_devices_lost(vha, 1);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
@@ -1112,12 +1235,18 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */
qla24xx_handle_plogi_done_event(vha, ea);
break;
+ case FCME_PRLI_DONE:
+ qla24xx_handle_prli_done_event(vha, ea);
+ break;
case FCME_GPDB_DONE:
qla24xx_handle_gpdb_event(vha, ea);
break;
case FCME_GPNID_DONE:
qla24xx_handle_gpnid_event(vha, ea);
break;
+ case FCME_GFFID_DONE:
+ qla24xx_handle_gffid_event(vha, ea);
+ break;
case FCME_DELETE_DONE:
qla24xx_handle_delete_done_event(vha, ea);
break;
@@ -1294,6 +1423,27 @@ qla24xx_async_abort_command(srb_t *sp)
}
static void
+qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
+{
+ switch (ea->data[0]) {
+ case MBS_COMMAND_COMPLETE:
+ ql_dbg(ql_dbg_disc, vha, 0x2118,
+ "%s %d %8phC post gpdb\n",
+ __func__, __LINE__, ea->fcport->port_name);
+
+ ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
+ ea->fcport->logout_on_delete = 1;
+ qla24xx_post_gpdb_work(vha, ea->fcport, 0);
+ break;
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0x2119,
+ "%s %d %8phC unhandle event of %x\n",
+ __func__, __LINE__, ea->fcport->port_name, ea->data[0]);
+ break;
+ }
+}
+
+static void
qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
{
port_id_t cid; /* conflict Nport id */
@@ -1305,15 +1455,22 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
* force a relogin attempt via implicit LOGO, PLOGI, and PRLI
* requests.
*/
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post gpdb\n",
- __func__, __LINE__, ea->fcport->port_name);
- ea->fcport->chip_reset = vha->hw->chip_reset;
- ea->fcport->logout_on_delete = 1;
- qla24xx_post_gpdb_work(vha, ea->fcport, 0);
+ if (ea->fcport->fc4f_nvme) {
+ ql_dbg(ql_dbg_disc, vha, 0x2117,
+ "%s %d %8phC post prli\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ qla24xx_post_prli_work(vha, ea->fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20ea,
+ "%s %d %8phC post gpdb\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
+ ea->fcport->logout_on_delete = 1;
+ qla24xx_post_gpdb_work(vha, ea->fcport, 0);
+ }
break;
case MBS_COMMAND_ERROR:
- ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC cmd error %x\n",
+ ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
__func__, __LINE__, ea->fcport->port_name, ea->data[1]);
ea->fcport->flags &= ~FCF_ASYNC_SENT;
@@ -1330,10 +1487,10 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
cid.b.al_pa = ea->iop[1] & 0xff;
cid.b.rsvd_1 = 0;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC LoopID 0x%x in use post gnl\n",
- __func__, __LINE__, ea->fcport->port_name,
- ea->fcport->loop_id);
+ ql_dbg(ql_dbg_disc, vha, 0x20ec,
+ "%s %d %8phC LoopID 0x%x in use post gnl\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ ea->fcport->loop_id);
if (IS_SW_RESV_ADDR(cid)) {
set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
@@ -1344,11 +1501,11 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
qla24xx_post_gnl_work(vha, ea->fcport);
break;
case MBS_PORT_ID_USED:
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n",
- __func__, __LINE__, ea->fcport->port_name,
- ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
- ea->fcport->d_id.b.al_pa);
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
+ "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
+ ea->fcport->d_id.b.al_pa);
qla2x00_clear_loop_id(ea->fcport);
qla24xx_post_gidpn_work(vha, ea->fcport);
@@ -2524,6 +2681,13 @@ cont_alloc:
ha->chain_offset = dump_size;
dump_size += mq_size + fce_size;
+ if (ha->exchoffld_buf)
+ dump_size += sizeof(struct qla2xxx_offld_chain) +
+ ha->exchoffld_size;
+ if (ha->exlogin_buf)
+ dump_size += sizeof(struct qla2xxx_offld_chain) +
+ ha->exlogin_size;
+
allocate:
ha->fw_dump = vmalloc(dump_size);
if (!ha->fw_dump) {
@@ -2709,7 +2873,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
if (ql2xexlogins)
ha->flags.exlogins_enabled = 1;
- if (ql2xexchoffld)
+ if (qla_is_exch_offld_enabled(vha))
ha->flags.exchoffld_enabled = 1;
rval = qla2x00_execute_fw(vha, srisc_address);
@@ -2946,7 +3110,8 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
}
/* Move PUREX, ABTS RX & RIDA to ATIOQ */
- if (ql2xmvasynctoatio) {
+ if (ql2xmvasynctoatio &&
+ (IS_QLA83XX(ha) || IS_QLA27XX(ha))) {
if (qla_tgt_mode_enabled(vha) ||
qla_dual_mode_enabled(vha))
ha->fw_options[2] |= BIT_11;
@@ -2954,11 +3119,25 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
ha->fw_options[2] &= ~BIT_11;
}
- ql_dbg(ql_dbg_init, vha, 0xffff,
- "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
- __func__, ha->fw_options[1], ha->fw_options[2],
- ha->fw_options[3], vha->host->active_mode);
- qla2x00_set_fw_options(vha, ha->fw_options);
+ if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ /*
+ * Tell FW to track each exchange to prevent
+ * driver from using stale exchange.
+ */
+ if (qla_tgt_mode_enabled(vha) ||
+ qla_dual_mode_enabled(vha))
+ ha->fw_options[2] |= BIT_4;
+ else
+ ha->fw_options[2] &= ~BIT_4;
+ }
+
+ ql_dbg(ql_dbg_init, vha, 0x00e8,
+ "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
+ __func__, ha->fw_options[1], ha->fw_options[2],
+ ha->fw_options[3], vha->host->active_mode);
+
+ if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3])
+ qla2x00_set_fw_options(vha, ha->fw_options);
/* Update Serial Link options. */
if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
@@ -3036,7 +3215,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
icb->rid = cpu_to_le16(rid);
if (ha->flags.msix_enabled) {
msix = &ha->msix_entries[1];
- ql_dbg(ql_dbg_init, vha, 0x00fd,
+ ql_dbg(ql_dbg_init, vha, 0x0019,
"Registering vector 0x%x for base que.\n",
msix->entry);
icb->msix = cpu_to_le16(msix->entry);
@@ -3166,7 +3345,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
/* FA-WWPN Status */
ha->flags.fawwpn_enabled =
(mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0;
- ql_dbg(ql_dbg_init, vha, 0x0141, "FA-WWPN Support: %s.\n",
+ ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
(ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
}
@@ -3178,7 +3357,7 @@ next_check:
} else {
ql_dbg(ql_dbg_init, vha, 0x00d3,
"Init Firmware -- success.\n");
- ha->flags.fw_started = 1;
+ QLA_FW_STARTED(ha);
}
return (rval);
@@ -3840,10 +4019,10 @@ qla2x00_rport_del(void *data)
fcport->drport = NULL;
spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
if (rport) {
- ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
- "%s %8phN. rport %p roles %x \n",
- __func__, fcport->port_name, rport,
- rport->roles);
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x210b,
+ "%s %8phN. rport %p roles %x\n",
+ __func__, fcport->port_name, rport,
+ rport->roles);
fc_remote_port_delete(rport);
}
@@ -3883,7 +4062,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
fcport->logout_on_delete = 1;
if (!fcport->ct_desc.ct_sns) {
- ql_log(ql_log_warn, vha, 0xffff,
+ ql_log(ql_log_warn, vha, 0xd049,
"Failed to allocate ct_sns request.\n");
kfree(fcport);
fcport = NULL;
@@ -3985,7 +4164,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
if (LOOP_TRANSITION(vha)) {
- ql_dbg(ql_dbg_disc, vha, 0x201e,
+ ql_dbg(ql_dbg_disc, vha, 0x2099,
"Needs RSCN update and loop transition.\n");
rval = QLA_FUNCTION_FAILED;
}
@@ -4085,7 +4264,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
if (rval != QLA_SUCCESS)
goto cleanup_allocation;
- ql_dbg(ql_dbg_disc, vha, 0x2017,
+ ql_dbg(ql_dbg_disc, vha, 0x2011,
"Entries in ID list (%d).\n", entries);
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
(uint8_t *)ha->gid_list,
@@ -4094,7 +4273,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
/* Allocate temporary fcport for any new fcports discovered. */
new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (new_fcport == NULL) {
- ql_log(ql_log_warn, vha, 0x2018,
+ ql_log(ql_log_warn, vha, 0x2012,
"Memory allocation failed for fcport.\n");
rval = QLA_MEMORY_ALLOC_FAILED;
goto cleanup_allocation;
@@ -4109,7 +4288,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
fcport->port_type != FCT_BROADCAST &&
(fcport->flags & FCF_FABRIC_DEVICE) == 0) {
- ql_dbg(ql_dbg_disc, vha, 0x2019,
+ ql_dbg(ql_dbg_disc, vha, 0x2096,
"Marking port lost loop_id=0x%04x.\n",
fcport->loop_id);
@@ -4154,11 +4333,11 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
if (rval2 != QLA_SUCCESS) {
- ql_dbg(ql_dbg_disc, vha, 0x201a,
+ ql_dbg(ql_dbg_disc, vha, 0x2097,
"Failed to retrieve fcport information "
"-- get_port_database=%x, loop_id=0x%04x.\n",
rval2, new_fcport->loop_id);
- ql_dbg(ql_dbg_disc, vha, 0x201b,
+ ql_dbg(ql_dbg_disc, vha, 0x2105,
"Scheduling resync.\n");
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
continue;
@@ -4207,7 +4386,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (new_fcport == NULL) {
- ql_log(ql_log_warn, vha, 0x201c,
+ ql_log(ql_log_warn, vha, 0xd031,
"Failed to allocate memory for fcport.\n");
rval = QLA_MEMORY_ALLOC_FAILED;
goto cleanup_allocation;
@@ -4230,7 +4409,7 @@ cleanup_allocation:
kfree(new_fcport);
if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_disc, vha, 0x201d,
+ ql_dbg(ql_dbg_disc, vha, 0x2098,
"Configure local loop error exit: rval=%x.\n", rval);
}
@@ -4300,10 +4479,10 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
if (fcport->port_type == FCT_TARGET)
rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %8phN. rport %p is %s mode \n",
- __func__, fcport->port_name, rport,
- (fcport->port_type == FCT_TARGET) ? "tgt" : "ini");
+ ql_dbg(ql_dbg_disc, vha, 0x20ee,
+ "%s %8phN. rport %p is %s mode\n",
+ __func__, fcport->port_name, rport,
+ (fcport->port_type == FCT_TARGET) ? "tgt" : "ini");
fc_remote_port_rolechg(rport, rport_ids.roles);
}
@@ -4331,7 +4510,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
if (IS_SW_RESV_ADDR(fcport->d_id))
return;
- ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %8phC \n",
+ ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
__func__, fcport->port_name);
if (IS_QLAFX00(vha->hw)) {
@@ -4344,6 +4523,11 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->deleted = 0;
fcport->logout_on_delete = 1;
+ if (fcport->fc4f_nvme) {
+ qla_nvme_register_remote(vha, fcport);
+ return;
+ }
+
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
qla2x00_iidma_fcport(vha, fcport);
qla24xx_update_fcport_fcp_prio(vha, fcport);
@@ -4398,7 +4582,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
loop_id = SNS_FL_PORT;
rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_disc, vha, 0x201f,
+ ql_dbg(ql_dbg_disc, vha, 0x20a0,
"MBX_GET_PORT_NAME failed, No FL Port.\n");
vha->device_flags &= ~SWITCH_FOUND;
@@ -4436,7 +4620,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
return rval;
}
if (mb[0] != MBS_COMMAND_COMPLETE) {
- ql_dbg(ql_dbg_disc, vha, 0x2042,
+ ql_dbg(ql_dbg_disc, vha, 0x20a1,
"Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
"mb[6]=%x mb[7]=%x.\n", loop_id, mb[0], mb[1],
mb[2], mb[6], mb[7]);
@@ -4446,22 +4630,39 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
if (qla2x00_rft_id(vha)) {
/* EMPTY */
- ql_dbg(ql_dbg_disc, vha, 0x2045,
+ ql_dbg(ql_dbg_disc, vha, 0x20a2,
"Register FC-4 TYPE failed.\n");
+ if (test_bit(LOOP_RESYNC_NEEDED,
+ &vha->dpc_flags))
+ break;
}
- if (qla2x00_rff_id(vha)) {
+ if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) {
/* EMPTY */
- ql_dbg(ql_dbg_disc, vha, 0x2049,
+ ql_dbg(ql_dbg_disc, vha, 0x209a,
"Register FC-4 Features failed.\n");
+ if (test_bit(LOOP_RESYNC_NEEDED,
+ &vha->dpc_flags))
+ break;
+ }
+ if (vha->flags.nvme_enabled) {
+ if (qla2x00_rff_id(vha, FC_TYPE_NVME)) {
+ ql_dbg(ql_dbg_disc, vha, 0x2049,
+ "Register NVME FC Type Features failed.\n");
+ }
}
if (qla2x00_rnn_id(vha)) {
/* EMPTY */
- ql_dbg(ql_dbg_disc, vha, 0x204f,
+ ql_dbg(ql_dbg_disc, vha, 0x2104,
"Register Node Name failed.\n");
+ if (test_bit(LOOP_RESYNC_NEEDED,
+ &vha->dpc_flags))
+ break;
} else if (qla2x00_rsnn_nn(vha)) {
/* EMPTY */
- ql_dbg(ql_dbg_disc, vha, 0x2053,
- "Register Symobilic Node Name failed.\n");
+ ql_dbg(ql_dbg_disc, vha, 0x209b,
+ "Register Symbolic Node Name failed.\n");
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
}
}
@@ -4482,6 +4683,9 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
break;
} while (0);
+ if (!vha->nvme_local_port && vha->flags.nvme_enabled)
+ qla_nvme_register_hba(vha);
+
if (rval)
ql_dbg(ql_dbg_disc, vha, 0x2068,
"Configure fabric error exit rval=%d.\n", rval);
@@ -4527,30 +4731,41 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
swl = ha->swl;
if (!swl) {
/*EMPTY*/
- ql_dbg(ql_dbg_disc, vha, 0x2054,
+ ql_dbg(ql_dbg_disc, vha, 0x209c,
"GID_PT allocations failed, fallback on GA_NXT.\n");
} else {
memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
swl = NULL;
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ return rval;
} else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
swl = NULL;
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ return rval;
} else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
swl = NULL;
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ return rval;
} else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) {
swl = NULL;
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ return rval;
}
/* If other queries succeeded probe for FC-4 type */
- if (swl)
+ if (swl) {
qla2x00_gff_id(vha, swl);
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ return rval;
+ }
}
swl_idx = 0;
/* Allocate temporary fcport for any new fcports discovered. */
new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (new_fcport == NULL) {
- ql_log(ql_log_warn, vha, 0x205e,
+ ql_log(ql_log_warn, vha, 0x209d,
"Failed to allocate memory for fcport.\n");
return (QLA_MEMORY_ALLOC_FAILED);
}
@@ -4588,6 +4803,16 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
new_fcport->fp_speed = swl[swl_idx].fp_speed;
new_fcport->fc4_type = swl[swl_idx].fc4_type;
+ new_fcport->nvme_flag = 0;
+ if (vha->flags.nvme_enabled &&
+ swl[swl_idx].fc4f_nvme) {
+ new_fcport->fc4f_nvme =
+ swl[swl_idx].fc4f_nvme;
+ ql_log(ql_log_info, vha, 0x2131,
+ "FOUND: NVME port %8phC as FC Type 28h\n",
+ new_fcport->port_name);
+ }
+
if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
last_dev = 1;
}
@@ -4597,7 +4822,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
/* Send GA_NXT to the switch */
rval = qla2x00_ga_nxt(vha, new_fcport);
if (rval != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x2064,
+ ql_log(ql_log_warn, vha, 0x209e,
"SNS scan failed -- assuming "
"zero-entry result.\n");
rval = QLA_SUCCESS;
@@ -4610,7 +4835,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
wrap.b24 = new_fcport->d_id.b24;
first_dev = 0;
} else if (new_fcport->d_id.b24 == wrap.b24) {
- ql_dbg(ql_dbg_disc, vha, 0x2065,
+ ql_dbg(ql_dbg_disc, vha, 0x209f,
"Device wrap (%02x%02x%02x).\n",
new_fcport->d_id.b.domain,
new_fcport->d_id.b.area,
@@ -4722,7 +4947,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
nxt_d_id.b24 = new_fcport->d_id.b24;
new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (new_fcport == NULL) {
- ql_log(ql_log_warn, vha, 0x2066,
+ ql_log(ql_log_warn, vha, 0xd032,
"Memory allocation failed for fcport.\n");
return (QLA_MEMORY_ALLOC_FAILED);
}
@@ -4753,7 +4978,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
(fcport->flags & FCF_FCP2_DEVICE) == 0 &&
fcport->port_type != FCT_INITIATOR &&
fcport->port_type != FCT_BROADCAST) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20f0,
"%s %d %8phC post del sess\n",
__func__, __LINE__,
fcport->port_name);
@@ -5473,6 +5698,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
struct scsi_qla_host *vp;
unsigned long flags;
fc_port_t *fcport;
+ u16 i;
/* For ISP82XX, driver waits for completion of the commands.
* online flag should be set.
@@ -5498,7 +5724,12 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
ha->current_topology = 0;
ha->flags.fw_started = 0;
ha->flags.fw_init_done = 0;
- ha->chip_reset++;
+ ha->base_qpair->chip_reset++;
+ for (i = 0; i < ha->max_qpairs; i++) {
+ if (ha->queue_pair_map[i])
+ ha->queue_pair_map[i]->chip_reset =
+ ha->base_qpair->chip_reset;
+ }
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -6353,8 +6584,8 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
"-> template size %x bytes\n", dlen);
if (dlen > risc_size * sizeof(*dcode)) {
ql_log(ql_log_warn, vha, 0x0167,
- "Failed fwdump template exceeds array by %x bytes\n",
- (uint32_t)(dlen - risc_size * sizeof(*dcode)));
+ "Failed fwdump template exceeds array by %zx bytes\n",
+ (size_t)(dlen - risc_size * sizeof(*dcode)));
goto default_template;
}
ha->fw_dump_template_len = dlen;
@@ -6655,8 +6886,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
"-> template size %x bytes\n", dlen);
if (dlen > risc_size * sizeof(*fwcode)) {
ql_log(ql_log_warn, vha, 0x0177,
- "Failed fwdump template exceeds array by %x bytes\n",
- (uint32_t)(dlen - risc_size * sizeof(*fwcode)));
+ "Failed fwdump template exceeds array by %zx bytes\n",
+ (size_t)(dlen - risc_size * sizeof(*fwcode)));
goto default_template;
}
ha->fw_dump_template_len = dlen;
@@ -6790,7 +7021,7 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
ret = qla2x00_stop_firmware(vha);
}
- ha->flags.fw_started = 0;
+ QLA_FW_STOPPED(ha);
ha->flags.fw_init_done = 0;
}
@@ -7322,16 +7553,31 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha)
ha->fw_options[2] &= ~BIT_11;
}
+ if (qla_tgt_mode_enabled(vha) ||
+ qla_dual_mode_enabled(vha)) {
+ /* FW auto send SCSI status during */
+ ha->fw_options[1] |= BIT_8;
+ ha->fw_options[10] |= (u16)SAM_STAT_BUSY << 8;
+
+ /* FW perform Exchange validation */
+ ha->fw_options[2] |= BIT_4;
+ } else {
+ ha->fw_options[1] &= ~BIT_8;
+ ha->fw_options[10] &= 0x00ff;
+
+ ha->fw_options[2] &= ~BIT_4;
+ }
+
if (ql2xetsenable) {
/* Enable ETS Burst. */
memset(ha->fw_options, 0, sizeof(ha->fw_options));
ha->fw_options[2] |= BIT_9;
}
- ql_dbg(ql_dbg_init, vha, 0xffff,
- "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
- __func__, ha->fw_options[1], ha->fw_options[2],
- ha->fw_options[3], vha->host->active_mode);
+ ql_dbg(ql_dbg_init, vha, 0x00e9,
+ "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
+ __func__, ha->fw_options[1], ha->fw_options[2],
+ ha->fw_options[3], vha->host->active_mode);
qla2x00_set_fw_options(vha, ha->fw_options);
}
@@ -7512,7 +7758,8 @@ qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
return ret;
}
-struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int vp_idx)
+struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
+ int vp_idx, bool startqp)
{
int rsp_id = 0;
int req_id = 0;
@@ -7539,6 +7786,9 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v
qpair->hw = vha->hw;
qpair->vha = vha;
+ qpair->qp_lock_ptr = &qpair->qp_lock;
+ spin_lock_init(&qpair->qp_lock);
+ qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
/* Assign available que pair id */
mutex_lock(&ha->mq_lock);
@@ -7554,13 +7804,18 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v
ha->queue_pair_map[qpair_id] = qpair;
qpair->id = qpair_id;
qpair->vp_idx = vp_idx;
+ INIT_LIST_HEAD(&qpair->hints_list);
+ qpair->chip_reset = ha->base_qpair->chip_reset;
+ qpair->enable_class_2 = ha->base_qpair->enable_class_2;
+ qpair->enable_explicit_conf =
+ ha->base_qpair->enable_explicit_conf;
for (i = 0; i < ha->msix_count; i++) {
msix = &ha->msix_entries[i];
if (msix->in_use)
continue;
qpair->msix = msix;
- ql_log(ql_dbg_multiq, vha, 0xc00f,
+ ql_dbg(ql_dbg_multiq, vha, 0xc00f,
"Vector %x selected for qpair\n", msix->vector);
break;
}
@@ -7572,11 +7827,14 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v
qpair->msix->in_use = 1;
list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
+ qpair->pdev = ha->pdev;
+ if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
+ qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
mutex_unlock(&ha->mq_lock);
/* Create response queue first */
- rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair);
+ rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp);
if (!rsp_id) {
ql_log(ql_log_warn, vha, 0x0185,
"Failed to create response queue.\n");
@@ -7586,7 +7844,8 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v
qpair->rsp = ha->rsp_q_map[rsp_id];
/* Create request queue */
- req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos);
+ req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos,
+ startqp);
if (!req_id) {
ql_log(ql_log_warn, vha, 0x0186,
"Failed to create request queue.\n");
@@ -7595,6 +7854,9 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v
qpair->req = ha->req_q_map[req_id];
qpair->rsp->req = qpair->req;
+ qpair->rsp->qpair = qpair;
+ /* init qpair to this cpu. Will adjust at run time. */
+ qla_cpu_update(qpair, smp_processor_id());
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
if (ha->fw_attributes & BIT_4)
@@ -7603,7 +7865,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v
qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
if (!qpair->srb_mempool) {
- ql_log(ql_log_warn, vha, 0x0191,
+ ql_log(ql_log_warn, vha, 0xd036,
"Failed to create srb mempool for qpair %d\n",
qpair->id);
goto fail_mempool;
@@ -7645,9 +7907,12 @@ fail_qid_map:
int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
{
- int ret;
+ int ret = QLA_FUNCTION_FAILED;
struct qla_hw_data *ha = qpair->hw;
+ if (!vha->flags.qpairs_req_created && !vha->flags.qpairs_rsp_created)
+ goto fail;
+
qpair->delete_in_progress = 1;
while (atomic_read(&qpair->ref_count))
msleep(500);
@@ -7664,8 +7929,11 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
clear_bit(qpair->id, ha->qpair_qid_map);
ha->num_qpairs--;
list_del(&qpair->qp_list_elem);
- if (list_empty(&vha->qp_list))
+ if (list_empty(&vha->qp_list)) {
vha->flags.qpairs_available = 0;
+ vha->flags.qpairs_req_created = 0;
+ vha->flags.qpairs_rsp_created = 0;
+ }
mempool_destroy(qpair->srb_mempool);
kfree(qpair);
mutex_unlock(&ha->mq_lock);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index c61a6a871c8e..9a2c86eacf44 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -250,6 +250,7 @@ qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
memset(sp, 0, sizeof(*sp));
sp->fcport = fcport;
+ sp->cmd_type = TYPE_SRB;
sp->iocbs = 1;
sp->vha = vha;
done:
@@ -307,3 +308,62 @@ qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t retry_delay)
fcport->retry_delay_timestamp = jiffies +
(retry_delay * HZ / 10);
}
+
+static inline bool
+qla_is_exch_offld_enabled(struct scsi_qla_host *vha)
+{
+ if (qla_ini_mode_enabled(vha) &&
+ (ql2xiniexchg > FW_DEF_EXCHANGES_CNT))
+ return true;
+ else if (qla_tgt_mode_enabled(vha) &&
+ (ql2xexchoffld > FW_DEF_EXCHANGES_CNT))
+ return true;
+ else if (qla_dual_mode_enabled(vha) &&
+ ((ql2xiniexchg + ql2xexchoffld) > FW_DEF_EXCHANGES_CNT))
+ return true;
+ else
+ return false;
+}
+
+static inline void
+qla_cpu_update(struct qla_qpair *qpair, uint16_t cpuid)
+{
+ qpair->cpuid = cpuid;
+
+ if (!list_empty(&qpair->hints_list)) {
+ struct qla_qpair_hint *h;
+
+ list_for_each_entry(h, &qpair->hints_list, hint_elem)
+ h->cpuid = qpair->cpuid;
+ }
+}
+
+static inline struct qla_qpair_hint *
+qla_qpair_to_hint(struct qla_tgt *tgt, struct qla_qpair *qpair)
+{
+ struct qla_qpair_hint *h;
+ u16 i;
+
+ for (i = 0; i < tgt->ha->max_qpairs + 1; i++) {
+ h = &tgt->qphints[i];
+ if (h->qpair == qpair)
+ return h;
+ }
+
+ return NULL;
+}
+
+static inline void
+qla_83xx_start_iocbs(struct qla_qpair *qpair)
+{
+ struct req_que *req = qpair->req;
+
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ WRT_REG_DWORD(req->req_q_in, req->ring_index);
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index ea027f6a7fd4..a36c485fae50 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -464,7 +464,9 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
req->ring_ptr++;
/* Set chip new ring index. */
- if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ if (ha->mqenable || IS_QLA27XX(ha)) {
+ WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ } else if (IS_QLA83XX(ha)) {
WRT_REG_DWORD(req->req_q_in, req->ring_index);
RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
} else if (IS_QLAFX00(ha)) {
@@ -1768,6 +1770,9 @@ qla2xxx_start_scsi_mq(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct qla_qpair *qpair = sp->qpair;
+ /* Acquire qpair specific lock */
+ spin_lock_irqsave(&qpair->qp_lock, flags);
+
/* Setup qpair pointers */
rsp = qpair->rsp;
req = qpair->req;
@@ -1777,15 +1782,14 @@ qla2xxx_start_scsi_mq(srb_t *sp)
/* Send marker if required */
if (vha->marker_needed != 0) {
- if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
- QLA_SUCCESS)
+ if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ QLA_SUCCESS) {
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
return QLA_FUNCTION_FAILED;
+ }
vha->marker_needed = 0;
}
- /* Acquire qpair specific lock */
- spin_lock_irqsave(&qpair->qp_lock, flags);
-
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
for (index = 1; index < req->num_outstanding_cmds; index++) {
@@ -1940,6 +1944,8 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
return qla2xxx_start_scsi_mq(sp);
}
+ spin_lock_irqsave(&qpair->qp_lock, flags);
+
/* Setup qpair pointers */
rsp = qpair->rsp;
req = qpair->req;
@@ -1949,15 +1955,14 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
/* Send marker if required */
if (vha->marker_needed != 0) {
- if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
- QLA_SUCCESS)
+ if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+ QLA_SUCCESS) {
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
return QLA_FUNCTION_FAILED;
+ }
vha->marker_needed = 0;
}
- /* Acquire ring specific lock */
- spin_lock_irqsave(&qpair->qp_lock, flags);
-
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
for (index = 1; index < req->num_outstanding_cmds; index++) {
@@ -2107,20 +2112,13 @@ queuing_error:
/* Generic Control-SRB manipulation functions. */
/* hardware_lock assumed to be held. */
-void *
-qla2x00_alloc_iocbs_ready(scsi_qla_host_t *vha, srb_t *sp)
-{
- if (qla2x00_reset_active(vha))
- return NULL;
-
- return qla2x00_alloc_iocbs(vha, sp);
-}
void *
-qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
+__qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
{
+ scsi_qla_host_t *vha = qpair->vha;
struct qla_hw_data *ha = vha->hw;
- struct req_que *req = ha->req_q_map[0];
+ struct req_que *req = qpair->req;
device_reg_t *reg = ISP_QUE_REG(ha, req->id);
uint32_t index, handle;
request_t *pkt;
@@ -2194,10 +2192,44 @@ skip_cmd_array:
}
queuing_error:
- vha->tgt_counters.num_alloc_iocb_failed++;
+ qpair->tgt_counters.num_alloc_iocb_failed++;
return pkt;
}
+void *
+qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
+{
+ scsi_qla_host_t *vha = qpair->vha;
+
+ if (qla2x00_reset_active(vha))
+ return NULL;
+
+ return __qla2x00_alloc_iocbs(qpair, sp);
+}
+
+void *
+qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
+{
+ return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
+}
+
+static void
+qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
+{
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+
+ logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+ logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
+ if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI)
+ logio->control_flags |= LCF_NVME_PRLI;
+
+ logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ logio->port_id[0] = sp->fcport->d_id.b.al_pa;
+ logio->port_id[1] = sp->fcport->d_id.b.area;
+ logio->port_id[2] = sp->fcport->d_id.b.domain;
+ logio->vp_index = sp->vha->vp_idx;
+}
+
static void
qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
{
@@ -2205,6 +2237,7 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
+
if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
@@ -3125,6 +3158,39 @@ static void qla2x00_send_notify_ack_iocb(srb_t *sp,
nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
}
+/*
+ * Build NVME LS request
+ */
+static int
+qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
+{
+ struct srb_iocb *nvme;
+ int rval = QLA_SUCCESS;
+
+ nvme = &sp->u.iocb_cmd;
+ cmd_pkt->entry_type = PT_LS4_REQUEST;
+ cmd_pkt->entry_count = 1;
+ cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
+
+ cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+
+ cmd_pkt->tx_dseg_count = 1;
+ cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
+ cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
+ cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
+ cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
+
+ cmd_pkt->rx_dseg_count = 1;
+ cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
+ cmd_pkt->dseg1_len = nvme->u.nvme.rsp_len;
+ cmd_pkt->dseg1_address[0] = cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
+ cmd_pkt->dseg1_address[1] = cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
+
+ return rval;
+}
+
int
qla2x00_start_sp(srb_t *sp)
{
@@ -3150,6 +3216,9 @@ qla2x00_start_sp(srb_t *sp)
qla24xx_login_iocb(sp, pkt) :
qla2x00_login_iocb(sp, pkt);
break;
+ case SRB_PRLI_CMD:
+ qla24xx_prli_iocb(sp, pkt);
+ break;
case SRB_LOGOUT_CMD:
IS_FWI2_CAPABLE(ha) ?
qla24xx_logout_iocb(sp, pkt) :
@@ -3178,6 +3247,9 @@ qla2x00_start_sp(srb_t *sp)
case SRB_FXIOCB_BCMD:
qlafx00_fxdisc_iocb(sp, pkt);
break;
+ case SRB_NVME_LS:
+ qla_nvme_ls(sp, pkt);
+ break;
case SRB_ABT_CMD:
IS_QLAFX00(ha) ?
qlafx00_abort_iocb(sp, pkt) :
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 2572121b765b..6c6e624a5aa6 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/cpu.h>
#include <linux/t10-pi.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_bsg_fc.h>
@@ -17,7 +18,7 @@
static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
-static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
+static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
sts_entry_t *);
/**
@@ -431,8 +432,7 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
"Register: 0x%x%x.\n", mb[7], mb[3]);
if (err_level == ERR_LEVEL_NON_FATAL) {
ql_log(ql_log_warn, vha, 0x5063,
- "Not a fatal error, f/w has recovered "
- "iteself.\n");
+ "Not a fatal error, f/w has recovered itself.\n");
} else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
ql_log(ql_log_fatal, vha, 0x5064,
"Recoverable Fatal error: Chip reset "
@@ -709,7 +709,7 @@ skip_rio:
ha->isp_ops->fw_dump(vha, 1);
ha->flags.fw_init_done = 0;
- ha->flags.fw_started = 0;
+ QLA_FW_STOPPED(ha);
if (IS_FWI2_CAPABLE(ha)) {
if (mb[1] == 0 && mb[2] == 0) {
@@ -829,7 +829,7 @@ skip_rio:
fc_host_port_name(vha->host) =
wwn_to_u64(vha->port_name);
ql_dbg(ql_dbg_init + ql_dbg_verbose,
- vha, 0x0144, "LOOP DOWN detected,"
+ vha, 0x00d8, "LOOP DOWN detected,"
"restore WWPN %016llx\n",
wwn_to_u64(vha->port_name));
}
@@ -973,6 +973,23 @@ skip_rio:
if (mb[1] == 0xffff)
goto global_port_update;
+ if (mb[1] == NPH_SNS_LID(ha)) {
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ break;
+ }
+
+ /* use handle_cnt for loop id/nport handle */
+ if (IS_FWI2_CAPABLE(ha))
+ handle_cnt = NPH_SNS;
+ else
+ handle_cnt = SIMPLE_NAME_SERVER;
+ if (mb[1] == handle_cnt) {
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ break;
+ }
+
/* Port logout */
fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
if (!fcport)
@@ -1701,7 +1718,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
case LSC_SCODE_NOXCB:
vha->hw->exch_starvation++;
if (vha->hw->exch_starvation > 5) {
- ql_log(ql_log_warn, vha, 0xffff,
+ ql_log(ql_log_warn, vha, 0xd046,
"Exchange starvation. Resetting RISC\n");
vha->hw->exch_starvation = 0;
@@ -1781,6 +1798,79 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
sp->done(sp, 0);
}
+static void
+qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
+{
+ const char func[] = "NVME-IOCB";
+ fc_port_t *fcport;
+ srb_t *sp;
+ struct srb_iocb *iocb;
+ struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
+ uint16_t state_flags;
+ struct nvmefc_fcp_req *fd;
+ uint16_t ret = 0;
+ struct srb_iocb *nvme;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
+ if (!sp)
+ return;
+
+ iocb = &sp->u.iocb_cmd;
+ fcport = sp->fcport;
+ iocb->u.nvme.comp_status = le16_to_cpu(sts->comp_status);
+ state_flags = le16_to_cpu(sts->state_flags);
+ fd = iocb->u.nvme.desc;
+ nvme = &sp->u.iocb_cmd;
+
+ if (unlikely(nvme->u.nvme.aen_op))
+ atomic_dec(&sp->vha->nvme_active_aen_cnt);
+
+ /*
+ * State flags: Bit 6 and 0.
+ * If 0 is set, we don't care about 6.
+ * both cases resp was dma'd to host buffer
+ * if both are 0, that is good path case.
+ * if six is set and 0 is clear, we need to
+ * copy resp data from status iocb to resp buffer.
+ */
+ if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
+ iocb->u.nvme.rsp_pyld_len = 0;
+ } else if ((state_flags & SF_FCP_RSP_DMA)) {
+ iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
+ } else if (state_flags & SF_NVME_ERSP) {
+ uint32_t *inbuf, *outbuf;
+ uint16_t iter;
+
+ inbuf = (uint32_t *)&sts->nvme_ersp_data;
+ outbuf = (uint32_t *)fd->rspaddr;
+ iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
+ iter = iocb->u.nvme.rsp_pyld_len >> 2;
+ for (; iter; iter--)
+ *outbuf++ = swab32(*inbuf++);
+ } else { /* unhandled case */
+ ql_log(ql_log_warn, fcport->vha, 0x503a,
+ "NVME-%s error. Unhandled state_flags of %x\n",
+ sp->name, state_flags);
+ }
+
+ fd->transferred_length = fd->payload_length -
+ le32_to_cpu(sts->residual_len);
+
+ if (sts->entry_status) {
+ ql_log(ql_log_warn, fcport->vha, 0x5038,
+ "NVME-%s error - hdl=%x entry-status(%x).\n",
+ sp->name, sp->handle, sts->entry_status);
+ ret = QLA_FUNCTION_FAILED;
+ } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
+ ql_log(ql_log_warn, fcport->vha, 0x5039,
+ "NVME-%s error - hdl=%x completion status(%x) resid=%x ox_id=%x\n",
+ sp->name, sp->handle, sts->comp_status,
+ le32_to_cpu(sts->residual_len), sts->ox_id);
+ ret = QLA_FUNCTION_FAILED;
+ }
+ sp->done(sp, ret);
+}
+
/**
* qla2x00_process_response_queue() - Process response queue entries.
* @ha: SCSI driver HA context
@@ -2263,6 +2353,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
return;
}
+ if (sp->cmd_type != TYPE_SRB) {
+ req->outstanding_cmds[handle] = NULL;
+ ql_dbg(ql_dbg_io, vha, 0x3015,
+ "Unknown sp->cmd_type %x %p).\n",
+ sp->cmd_type, sp);
+ return;
+ }
+
+ /* NVME completion. */
+ if (sp->type == SRB_NVME_CMD) {
+ qla24xx_nvme_iocb_entry(vha, req, pkt);
+ return;
+ }
+
if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
return;
@@ -2374,8 +2478,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
((unsigned)(scsi_bufflen(cp) - resid) <
cp->underflow)) {
ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
- "Mid-layer underflow "
- "detected (0x%x of 0x%x bytes).\n",
+ "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
resid, scsi_bufflen(cp));
res = DID_ERROR << 16;
@@ -2408,8 +2511,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
if (scsi_status & SS_RESIDUAL_UNDER) {
if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
- "Dropped frame(s) detected "
- "(0x%x of 0x%x bytes).\n",
+ "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
resid, scsi_bufflen(cp));
res = DID_ERROR << 16 | lscsi_status;
@@ -2420,8 +2522,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
((unsigned)(scsi_bufflen(cp) - resid) <
cp->underflow)) {
ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
- "Mid-layer underflow "
- "detected (0x%x of 0x%x bytes).\n",
+ "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
resid, scsi_bufflen(cp));
res = DID_ERROR << 16;
@@ -2435,9 +2536,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
*/
ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
- "Dropped frame(s) detected (0x%x "
- "of 0x%x bytes).\n", resid,
- scsi_bufflen(cp));
+ "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
+ resid, scsi_bufflen(cp));
res = DID_ERROR << 16 | lscsi_status;
goto check_scsi_status;
@@ -2619,8 +2719,9 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
* qla2x00_error_entry() - Process an error entry.
* @ha: SCSI driver HA context
* @pkt: Entry pointer
+ * return : 1=allow further error analysis. 0=no additional error analysis.
*/
-static void
+static int
qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
{
srb_t *sp;
@@ -2631,7 +2732,8 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
int res = DID_ERROR << 16;
ql_dbg(ql_dbg_async, vha, 0x502a,
- "type of error status in response: 0x%x\n", pkt->entry_status);
+ "iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
+ pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id);
if (que >= ha->max_req_queues || !ha->req_q_map[que])
goto fatal;
@@ -2641,18 +2743,35 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
if (pkt->entry_status & RF_BUSY)
res = DID_BUS_BUSY << 16;
- if (pkt->entry_type == NOTIFY_ACK_TYPE &&
- pkt->handle == QLA_TGT_SKIP_HANDLE)
- return;
+ if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE)
+ return 0;
- sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
- if (sp) {
- sp->done(sp, res);
- return;
+ switch (pkt->entry_type) {
+ case NOTIFY_ACK_TYPE:
+ case STATUS_TYPE:
+ case STATUS_CONT_TYPE:
+ case LOGINOUT_PORT_IOCB_TYPE:
+ case CT_IOCB_TYPE:
+ case ELS_IOCB_TYPE:
+ case ABORT_IOCB_TYPE:
+ case MBX_IOCB_TYPE:
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ if (sp) {
+ sp->done(sp, res);
+ return 0;
+ }
+ break;
+
+ case ABTS_RESP_24XX:
+ case CTIO_TYPE7:
+ case CTIO_CRC2:
+ default:
+ return 1;
}
fatal:
ql_log(ql_log_warn, vha, 0x5030,
"Error entry - invalid handle/queue (%04x).\n", que);
+ return 0;
}
/**
@@ -2708,6 +2827,21 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
sp->done(sp, 0);
}
+void qla24xx_nvme_ls4_iocb(scsi_qla_host_t *vha, struct pt_ls4_request *pkt,
+ struct req_que *req)
+{
+ srb_t *sp;
+ const char func[] = "LS4_IOCB";
+ uint16_t comp_status;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ if (!sp)
+ return;
+
+ comp_status = le16_to_cpu(pkt->status);
+ sp->done(sp, comp_status);
+}
+
/**
* qla24xx_process_response_queue() - Process response queue entries.
* @ha: SCSI driver HA context
@@ -2721,6 +2855,9 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
if (!ha->flags.fw_started)
return;
+ if (rsp->qpair->cpuid != smp_processor_id())
+ qla_cpu_update(rsp->qpair, smp_processor_id());
+
while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
@@ -2733,9 +2870,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
}
if (pkt->entry_status != 0) {
- qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
-
- if (qlt_24xx_process_response_error(vha, pkt))
+ if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt))
goto process_err;
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
@@ -2768,7 +2903,8 @@ process_err:
case ABTS_RECV_24XX:
if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
/* ensure that the ATIO queue is empty */
- qlt_handle_abts_recv(vha, (response_t *)pkt);
+ qlt_handle_abts_recv(vha, rsp,
+ (response_t *)pkt);
break;
} else {
/* drop through */
@@ -2777,11 +2913,16 @@ process_err:
case ABTS_RESP_24XX:
case CTIO_TYPE7:
case CTIO_CRC2:
- qlt_response_pkt_all_vps(vha, (response_t *)pkt);
+ qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
+ break;
+ case PT_LS4_REQUEST:
+ qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
+ rsp->req);
break;
case NOTIFY_ACK_TYPE:
if (pkt->handle == QLA_TGT_SKIP_HANDLE)
- qlt_response_pkt_all_vps(vha, (response_t *)pkt);
+ qlt_response_pkt_all_vps(vha, rsp,
+ (response_t *)pkt);
else
qla24xxx_nack_iocb_entry(vha, rsp->req,
(struct nack_to_isp *)pkt);
@@ -3156,10 +3297,10 @@ struct qla_init_msix_entry {
};
static const struct qla_init_msix_entry msix_entries[] = {
- { "qla2xxx (default)", qla24xx_msix_default },
- { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
- { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
- { "qla2xxx (qpair_multiq)", qla2xxx_msix_rsp_q },
+ { "default", qla24xx_msix_default },
+ { "rsp_q", qla24xx_msix_rsp_q },
+ { "atio_q", qla83xx_msix_atio_q },
+ { "qpair_multiq", qla2xxx_msix_rsp_q },
};
static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
@@ -3183,9 +3324,14 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
min_vecs++;
}
- ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
- ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
- &desc);
+ if (USER_CTRL_IRQ(ha)) {
+ /* user wants to control IRQ setting for target mode */
+ ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
+ ha->msix_count, PCI_IRQ_MSIX);
+ } else
+ ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
+ ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
+ &desc);
if (ret < 0) {
ql_log(ql_log_fatal, vha, 0x00c7,
@@ -3239,7 +3385,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
qentry->handle = rsp;
rsp->msix = qentry;
scnprintf(qentry->name, sizeof(qentry->name),
- "%s", msix_entries[i].name);
+ "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name);
if (IS_P3P_TYPE(ha))
ret = request_irq(qentry->vector,
qla82xx_msix_entries[i].handler,
@@ -3247,7 +3393,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
else
ret = request_irq(qentry->vector,
msix_entries[i].handler,
- 0, msix_entries[i].name, rsp);
+ 0, qentry->name, rsp);
if (ret)
goto msix_register_fail;
qentry->have_irq = 1;
@@ -3263,11 +3409,12 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
rsp->msix = qentry;
qentry->handle = rsp;
scnprintf(qentry->name, sizeof(qentry->name),
- "%s", msix_entries[QLA_ATIO_VECTOR].name);
+ "qla2xxx%lu_%s", vha->host_no,
+ msix_entries[QLA_ATIO_VECTOR].name);
qentry->in_use = 1;
ret = request_irq(qentry->vector,
msix_entries[QLA_ATIO_VECTOR].handler,
- 0, msix_entries[QLA_ATIO_VECTOR].name, rsp);
+ 0, qentry->name, rsp);
qentry->have_irq = 1;
}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index cba1fc5e8be9..7c6d1a404011 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -124,8 +124,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
}
/* if PCI error, then avoid mbx processing.*/
- if (test_bit(PCI_ERR, &base_vha->dpc_flags)) {
- ql_log(ql_log_warn, vha, 0x1191,
+ if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
+ test_bit(UNLOADING, &base_vha->dpc_flags)) {
+ ql_log(ql_log_warn, vha, 0xd04e,
"PCI error, exiting.\n");
return QLA_FUNCTION_TIMEOUT;
}
@@ -169,7 +170,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
*/
if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
/* Timeout occurred. Return error. */
- ql_log(ql_log_warn, vha, 0x1005,
+ ql_log(ql_log_warn, vha, 0xd035,
"Cmd access timeout, cmd=0x%x, Exiting.\n",
mcp->mb[0]);
return QLA_FUNCTION_TIMEOUT;
@@ -317,7 +318,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
ha->mcp = NULL;
rval = QLA_FUNCTION_FAILED;
- ql_log(ql_log_warn, vha, 0x1015,
+ ql_log(ql_log_warn, vha, 0xd048,
"FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
goto premature_exit;
}
@@ -359,7 +360,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
host_status = RD_REG_DWORD(&reg->isp24.host_status);
hccr = RD_REG_DWORD(&reg->isp24.hccr);
- ql_log(ql_log_warn, vha, 0x1119,
+ ql_log(ql_log_warn, vha, 0xd04c,
"MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
"mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
@@ -384,8 +385,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
* then only PCI ERR flag would be set.
* we will do premature exit for above case.
*/
- if (test_bit(UNLOADING, &base_vha->dpc_flags))
- set_bit(PCI_ERR, &base_vha->dpc_flags);
ha->flags.mbox_busy = 0;
rval = QLA_FUNCTION_TIMEOUT;
goto premature_exit;
@@ -561,6 +560,8 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
}
#define EXTENDED_BB_CREDITS BIT_0
+#define NVME_ENABLE_FLAG BIT_3
+
/*
* qla2x00_execute_fw
* Start adapter firmware.
@@ -602,6 +603,9 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
} else
mcp->mb[4] = 0;
+ if (ql2xnvmeenable && IS_QLA27XX(ha))
+ mcp->mb[4] |= NVME_ENABLE_FLAG;
+
if (ha->flags.exlogins_enabled)
mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
@@ -822,7 +826,7 @@ qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
*/
#define CONFIG_XCHOFFLD_MEM 0x3
int
-qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
+qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
{
int rval;
mbx_cmd_t mc;
@@ -835,12 +839,12 @@ qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
- mcp->mb[2] = MSW(phys_addr);
- mcp->mb[3] = LSW(phys_addr);
- mcp->mb[6] = MSW(MSD(phys_addr));
- mcp->mb[7] = LSW(MSD(phys_addr));
- mcp->mb[8] = MSW(ha->exlogin_size);
- mcp->mb[9] = LSW(ha->exlogin_size);
+ mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
+ mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
+ mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
+ mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
+ mcp->mb[8] = MSW(ha->exchoffld_size);
+ mcp->mb[9] = LSW(ha->exchoffld_size);
mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_11|MBX_0;
mcp->tov = MBX_TOV_SECONDS;
@@ -942,6 +946,22 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
"%s: Firmware supports Exchange Offload 0x%x\n",
__func__, ha->fw_attributes_h);
+
+ /* bit 26 of fw_attributes */
+ if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable) {
+ struct init_cb_24xx *icb;
+
+ icb = (struct init_cb_24xx *)ha->init_cb;
+ /*
+ * fw supports nvme and driver load
+ * parameter requested nvme
+ */
+ vha->flags.nvme_enabled = 1;
+ icb->firmware_options_2 &= cpu_to_le32(~0xf);
+ ha->zio_mode = 0;
+ ha->zio_timer = 0;
+ }
+
}
if (IS_QLA27XX(ha)) {
@@ -1049,6 +1069,8 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
mcp->in_mb = MBX_0;
if (IS_FWI2_CAPABLE(vha->hw)) {
mcp->in_mb |= MBX_1;
+ mcp->mb[10] = fwopts[10];
+ mcp->out_mb |= MBX_10;
} else {
mcp->mb[10] = fwopts[10];
mcp->mb[11] = fwopts[11];
@@ -3213,7 +3235,7 @@ qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
if (!IS_QLA8044(vha->hw))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1186,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
"Entered %s.\n", __func__);
mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
@@ -3229,7 +3251,7 @@ qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x1187,
+ ql_dbg(ql_dbg_mbx, vha, 0x11a1,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
@@ -3713,12 +3735,12 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
} else if (rptid_entry->format == 2) {
- ql_dbg(ql_dbg_async, vha, 0xffff,
+ ql_dbg(ql_dbg_async, vha, 0x505f,
"RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]);
- ql_dbg(ql_dbg_async, vha, 0xffff,
+ ql_dbg(ql_dbg_async, vha, 0x5075,
"N2N: Remote WWPN %8phC.\n",
rptid_entry->u.f2.port_name);
@@ -3871,7 +3893,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
rval = QLA_FUNCTION_FAILED;
} else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
ql_dbg(ql_dbg_mbx, vha, 0x10c5,
- "Failed to complet IOCB -- completion status (%x).\n",
+ "Failed to complete IOCB -- completion status (%x).\n",
le16_to_cpu(vce->comp_status));
rval = QLA_FUNCTION_FAILED;
} else {
@@ -5790,7 +5812,7 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
"Entered %s.\n", __func__);
dd_dma = dma_map_single(&vha->hw->pdev->dev,
@@ -5872,13 +5894,13 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ ql_dbg(ql_dbg_mbx, vha, 0x1018,
"%s: %s Failed submission. %x.\n",
__func__, sp->name, rval);
goto done_free_sp;
}
- ql_dbg(ql_dbg_mbx, vha, 0xffff, "MB:%s hndl %x submitted\n",
+ ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
sp->name, sp->handle);
wait_for_completion(&c->u.mbx.comp);
@@ -5887,16 +5909,16 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
rval = c->u.mbx.rc;
switch (rval) {
case QLA_FUNCTION_TIMEOUT:
- ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Timeout. %x.\n",
+ ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
__func__, sp->name, rval);
break;
case QLA_SUCCESS:
- ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s done.\n",
+ ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
__func__, sp->name);
sp->free(sp);
break;
default:
- ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Failed. %x.\n",
+ ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
__func__, sp->name, rval);
sp->free(sp);
break;
@@ -5927,8 +5949,8 @@ int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
if (pd == NULL) {
- ql_log(ql_log_warn, vha, 0xffff,
- "Failed to allocate port database structure.\n");
+ ql_log(ql_log_warn, vha, 0xd047,
+ "Failed to allocate port database structure.\n");
goto done_free_sp;
}
memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
@@ -5945,14 +5967,14 @@ int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
rval = qla24xx_send_mb_cmd(vha, &mc);
if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ ql_dbg(ql_dbg_mbx, vha, 0x1193,
"%s: %8phC fail\n", __func__, fcport->port_name);
goto done_free_sp;
}
rval = __qla24xx_parse_gpdb(vha, fcport, pd);
- ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %8phC done\n",
+ ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
__func__, fcport->port_name);
done_free_sp:
@@ -5967,14 +5989,22 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
{
int rval = QLA_SUCCESS;
uint64_t zero = 0;
+ u8 current_login_state, last_login_state;
+
+ if (fcport->fc4f_nvme) {
+ current_login_state = pd->current_login_state >> 4;
+ last_login_state = pd->last_login_state >> 4;
+ } else {
+ current_login_state = pd->current_login_state & 0xf;
+ last_login_state = pd->last_login_state & 0xf;
+ }
/* Check for logged in state. */
- if (pd->current_login_state != PDS_PRLI_COMPLETE &&
- pd->last_login_state != PDS_PRLI_COMPLETE) {
- ql_dbg(ql_dbg_mbx, vha, 0xffff,
- "Unable to verify login-state (%x/%x) for "
- "loop_id %x.\n", pd->current_login_state,
- pd->last_login_state, fcport->loop_id);
+ if (current_login_state != PDS_PRLI_COMPLETE &&
+ last_login_state != PDS_PRLI_COMPLETE) {
+ ql_dbg(ql_dbg_mbx, vha, 0x119a,
+ "Unable to verify login-state (%x/%x) for loop_id %x.\n",
+ current_login_state, last_login_state, fcport->loop_id);
rval = QLA_FUNCTION_FAILED;
goto gpd_error_out;
}
@@ -5997,12 +6027,17 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
fcport->d_id.b.al_pa = pd->port_id[2];
fcport->d_id.b.rsvd_1 = 0;
- /* If not target must be initiator or unknown type. */
- if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
- fcport->port_type = FCT_INITIATOR;
- else
- fcport->port_type = FCT_TARGET;
-
+ if (fcport->fc4f_nvme) {
+ fcport->nvme_prli_service_param =
+ pd->prli_nvme_svc_param_word_3;
+ fcport->port_type = FCT_NVME;
+ } else {
+ /* If not target must be initiator or unknown type. */
+ if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+ }
/* Passback COS information. */
fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
FC_COS_CLASS2 : FC_COS_CLASS3;
@@ -6040,12 +6075,12 @@ int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
rval = qla24xx_send_mb_cmd(vha, &mc);
if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0xffff,
- "%s: fail\n", __func__);
+ ql_dbg(ql_dbg_mbx, vha, 0x119b,
+ "%s: fail\n", __func__);
} else {
*entries = mc.mb[1];
- ql_dbg(ql_dbg_mbx, vha, 0xffff,
- "%s: done\n", __func__);
+ ql_dbg(ql_dbg_mbx, vha, 0x119c,
+ "%s: done\n", __func__);
}
done:
return rval;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 09a490c98763..f0605cd196fb 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -640,11 +640,12 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
int
qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
- uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
+ uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos, bool startqp)
{
int ret = 0;
struct req_que *req = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
uint16_t que_id = 0;
device_reg_t *reg;
uint32_t cnt;
@@ -731,14 +732,17 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
req->ring_ptr, req->ring_index, req->cnt,
req->id, req->max_q_depth);
- ret = qla25xx_init_req_que(base_vha, req);
- if (ret != QLA_SUCCESS) {
- ql_log(ql_log_fatal, base_vha, 0x00df,
- "%s failed.\n", __func__);
- mutex_lock(&ha->mq_lock);
- clear_bit(que_id, ha->req_qid_map);
- mutex_unlock(&ha->mq_lock);
- goto que_failed;
+ if (startqp) {
+ ret = qla25xx_init_req_que(base_vha, req);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, base_vha, 0x00df,
+ "%s failed.\n", __func__);
+ mutex_lock(&ha->mq_lock);
+ clear_bit(que_id, ha->req_qid_map);
+ mutex_unlock(&ha->mq_lock);
+ goto que_failed;
+ }
+ vha->flags.qpairs_req_created = 1;
}
return req->id;
@@ -765,11 +769,12 @@ static void qla_do_work(struct work_struct *work)
/* create response queue */
int
qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
- uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair)
+ uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair, bool startqp)
{
int ret = 0;
struct rsp_que *rsp = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
uint16_t que_id = 0;
device_reg_t *reg;
@@ -843,14 +848,17 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
if (ret)
goto que_failed;
- ret = qla25xx_init_rsp_que(base_vha, rsp);
- if (ret != QLA_SUCCESS) {
- ql_log(ql_log_fatal, base_vha, 0x00e7,
- "%s failed.\n", __func__);
- mutex_lock(&ha->mq_lock);
- clear_bit(que_id, ha->rsp_qid_map);
- mutex_unlock(&ha->mq_lock);
- goto que_failed;
+ if (startqp) {
+ ret = qla25xx_init_rsp_que(base_vha, rsp);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, base_vha, 0x00e7,
+ "%s failed.\n", __func__);
+ mutex_lock(&ha->mq_lock);
+ clear_bit(que_id, ha->rsp_qid_map);
+ mutex_unlock(&ha->mq_lock);
+ goto que_failed;
+ }
+ vha->flags.qpairs_rsp_created = 1;
}
rsp->req = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
new file mode 100644
index 000000000000..f3710a75fe1f
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -0,0 +1,761 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2017 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_nvme.h"
+#include "qla_def.h"
+#include <linux/scatterlist.h>
+#include <linux/delay.h>
+#include <linux/nvme.h>
+#include <linux/nvme-fc.h>
+
+static struct nvme_fc_port_template qla_nvme_fc_transport;
+
+static void qla_nvme_unregister_remote_port(struct work_struct *);
+
+int qla_nvme_register_remote(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ struct nvme_rport *rport;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_NVME_FC))
+ return 0;
+
+ if (fcport->nvme_flag & NVME_FLAG_REGISTERED)
+ return 0;
+
+ if (!vha->flags.nvme_enabled) {
+ ql_log(ql_log_info, vha, 0x2100,
+ "%s: Not registering target since Host NVME is not enabled\n",
+ __func__);
+ return 0;
+ }
+
+ if (!(fcport->nvme_prli_service_param &
+ (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)))
+ return 0;
+
+ INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port);
+ rport = kzalloc(sizeof(*rport), GFP_KERNEL);
+ if (!rport) {
+ ql_log(ql_log_warn, vha, 0x2101,
+ "%s: unable to alloc memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ rport->req.port_name = wwn_to_u64(fcport->port_name);
+ rport->req.node_name = wwn_to_u64(fcport->node_name);
+ rport->req.port_role = 0;
+
+ if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
+ rport->req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
+
+ if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
+ rport->req.port_role |= FC_PORT_ROLE_NVME_TARGET;
+
+ if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
+ rport->req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
+
+ rport->req.port_id = fcport->d_id.b24;
+
+ ql_log(ql_log_info, vha, 0x2102,
+ "%s: traddr=pn-0x%016llx:nn-0x%016llx PortID:%06x\n",
+ __func__, rport->req.port_name, rport->req.node_name,
+ rport->req.port_id);
+
+ ret = nvme_fc_register_remoteport(vha->nvme_local_port, &rport->req,
+ &fcport->nvme_remote_port);
+ if (ret) {
+ ql_log(ql_log_warn, vha, 0x212e,
+ "Failed to register remote port. Transport returned %d\n",
+ ret);
+ return ret;
+ }
+
+ fcport->nvme_remote_port->private = fcport;
+ fcport->nvme_flag |= NVME_FLAG_REGISTERED;
+ atomic_set(&fcport->nvme_ref_count, 1);
+ init_waitqueue_head(&fcport->nvme_waitQ);
+ rport->fcport = fcport;
+ list_add_tail(&rport->list, &vha->nvme_rport_list);
+ return 0;
+}
+
+/* Allocate a queue for NVMe traffic */
+static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport, unsigned int qidx,
+ u16 qsize, void **handle)
+{
+ struct scsi_qla_host *vha;
+ struct qla_hw_data *ha;
+ struct qla_qpair *qpair;
+
+ if (!qidx)
+ qidx++;
+
+ vha = (struct scsi_qla_host *)lport->private;
+ ha = vha->hw;
+
+ ql_log(ql_log_info, vha, 0x2104,
+ "%s: handle %p, idx =%d, qsize %d\n",
+ __func__, handle, qidx, qsize);
+
+ if (qidx > qla_nvme_fc_transport.max_hw_queues) {
+ ql_log(ql_log_warn, vha, 0x212f,
+ "%s: Illegal qidx=%d. Max=%d\n",
+ __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
+ return -EINVAL;
+ }
+
+ if (ha->queue_pair_map[qidx]) {
+ *handle = ha->queue_pair_map[qidx];
+ ql_log(ql_log_info, vha, 0x2121,
+ "Returning existing qpair of %p for idx=%x\n",
+ *handle, qidx);
+ return 0;
+ }
+
+ ql_log(ql_log_warn, vha, 0xffff,
+ "allocating q for idx=%x w/o cpu mask\n", qidx);
+ qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
+ if (qpair == NULL) {
+ ql_log(ql_log_warn, vha, 0x2122,
+ "Failed to allocate qpair\n");
+ return -EINVAL;
+ }
+ *handle = qpair;
+
+ return 0;
+}
+
+static void qla_nvme_sp_ls_done(void *ptr, int res)
+{
+ srb_t *sp = ptr;
+ struct srb_iocb *nvme;
+ struct nvmefc_ls_req *fd;
+ struct nvme_private *priv;
+
+ if (atomic_read(&sp->ref_count) == 0) {
+ ql_log(ql_log_warn, sp->fcport->vha, 0x2123,
+ "SP reference-count to ZERO on LS_done -- sp=%p.\n", sp);
+ return;
+ }
+
+ if (!atomic_dec_and_test(&sp->ref_count))
+ return;
+
+ if (res)
+ res = -EINVAL;
+
+ nvme = &sp->u.iocb_cmd;
+ fd = nvme->u.nvme.desc;
+ priv = fd->private;
+ priv->comp_status = res;
+ schedule_work(&priv->ls_work);
+ /* work schedule doesn't need the sp */
+ qla2x00_rel_sp(sp);
+}
+
+static void qla_nvme_sp_done(void *ptr, int res)
+{
+ srb_t *sp = ptr;
+ struct srb_iocb *nvme;
+ struct nvmefc_fcp_req *fd;
+
+ nvme = &sp->u.iocb_cmd;
+ fd = nvme->u.nvme.desc;
+
+ if (!atomic_dec_and_test(&sp->ref_count))
+ return;
+
+ if (!(sp->fcport->nvme_flag & NVME_FLAG_REGISTERED))
+ goto rel;
+
+ if (unlikely(nvme->u.nvme.comp_status || res))
+ fd->status = -EINVAL;
+ else
+ fd->status = 0;
+
+ fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
+ fd->done(fd);
+rel:
+ qla2xxx_rel_qpair_sp(sp->qpair, sp);
+}
+
+static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
+ struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
+{
+ struct nvme_private *priv = fd->private;
+ fc_port_t *fcport = rport->private;
+ srb_t *sp = priv->sp;
+ int rval;
+ struct qla_hw_data *ha = fcport->vha->hw;
+
+ rval = ha->isp_ops->abort_command(sp);
+ if (rval != QLA_SUCCESS)
+ ql_log(ql_log_warn, fcport->vha, 0x2125,
+ "%s: failed to abort LS command for SP:%p rval=%x\n",
+ __func__, sp, rval);
+
+ ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
+ "%s: aborted sp:%p on fcport:%p\n", __func__, sp, fcport);
+}
+
+static void qla_nvme_ls_complete(struct work_struct *work)
+{
+ struct nvme_private *priv =
+ container_of(work, struct nvme_private, ls_work);
+ struct nvmefc_ls_req *fd = priv->fd;
+
+ fd->done(fd, priv->comp_status);
+}
+
+static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
+ struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
+{
+ fc_port_t *fcport = (fc_port_t *)rport->private;
+ struct srb_iocb *nvme;
+ struct nvme_private *priv = fd->private;
+ struct scsi_qla_host *vha;
+ int rval = QLA_FUNCTION_FAILED;
+ struct qla_hw_data *ha;
+ srb_t *sp;
+
+ if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
+ return rval;
+
+ vha = fcport->vha;
+ ha = vha->hw;
+ /* Alloc SRB structure */
+ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+ if (!sp)
+ return rval;
+
+ sp->type = SRB_NVME_LS;
+ sp->name = "nvme_ls";
+ sp->done = qla_nvme_sp_ls_done;
+ atomic_set(&sp->ref_count, 1);
+ init_waitqueue_head(&sp->nvme_ls_waitQ);
+ nvme = &sp->u.iocb_cmd;
+ priv->sp = sp;
+ priv->fd = fd;
+ INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
+ nvme->u.nvme.desc = fd;
+ nvme->u.nvme.dir = 0;
+ nvme->u.nvme.dl = 0;
+ nvme->u.nvme.cmd_len = fd->rqstlen;
+ nvme->u.nvme.rsp_len = fd->rsplen;
+ nvme->u.nvme.rsp_dma = fd->rspdma;
+ nvme->u.nvme.timeout_sec = fd->timeout;
+ nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
+ fd->rqstlen, DMA_TO_DEVICE);
+ dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
+ fd->rqstlen, DMA_TO_DEVICE);
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x700e,
+ "qla2x00_start_sp failed = %d\n", rval);
+ atomic_dec(&sp->ref_count);
+ wake_up(&sp->nvme_ls_waitQ);
+ return rval;
+ }
+
+ return rval;
+}
+
+static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
+ struct nvme_fc_remote_port *rport, void *hw_queue_handle,
+ struct nvmefc_fcp_req *fd)
+{
+ struct nvme_private *priv = fd->private;
+ srb_t *sp = priv->sp;
+ int rval;
+ fc_port_t *fcport = rport->private;
+ struct qla_hw_data *ha = fcport->vha->hw;
+
+ rval = ha->isp_ops->abort_command(sp);
+ if (!rval)
+ ql_log(ql_log_warn, fcport->vha, 0x2127,
+ "%s: failed to abort command for SP:%p rval=%x\n",
+ __func__, sp, rval);
+
+ ql_dbg(ql_dbg_io, fcport->vha, 0x2126,
+ "%s: aborted sp:%p on fcport:%p\n", __func__, sp, fcport);
+}
+
+static void qla_nvme_poll(struct nvme_fc_local_port *lport, void *hw_queue_handle)
+{
+ struct scsi_qla_host *vha = lport->private;
+ unsigned long flags;
+ struct qla_qpair *qpair = (struct qla_qpair *)hw_queue_handle;
+
+ /* Acquire ring specific lock */
+ spin_lock_irqsave(&qpair->qp_lock, flags);
+ qla24xx_process_response_queue(vha, qpair->rsp);
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+}
+
+static int qla2x00_start_nvme_mq(srb_t *sp)
+{
+ unsigned long flags;
+ uint32_t *clr_ptr;
+ uint32_t index;
+ uint32_t handle;
+ struct cmd_nvme *cmd_pkt;
+ uint16_t cnt, i;
+ uint16_t req_cnt;
+ uint16_t tot_dsds;
+ uint16_t avail_dsds;
+ uint32_t *cur_dsd;
+ struct req_que *req = NULL;
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_qpair *qpair = sp->qpair;
+ struct srb_iocb *nvme = &sp->u.iocb_cmd;
+ struct scatterlist *sgl, *sg;
+ struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
+ uint32_t rval = QLA_SUCCESS;
+
+ /* Setup qpair pointers */
+ req = qpair->req;
+ tot_dsds = fd->sg_cnt;
+
+ /* Acquire qpair specific lock */
+ spin_lock_irqsave(&qpair->qp_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ handle++;
+ if (handle == req->num_outstanding_cmds)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+
+ if (index == req->num_outstanding_cmds) {
+ rval = -1;
+ goto queuing_error;
+ }
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+ RD_REG_DWORD_RELAXED(req->req_q_out);
+
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length - (req->ring_index - cnt);
+
+ if (req->cnt < (req_cnt + 2)){
+ rval = -1;
+ goto queuing_error;
+ }
+ }
+
+ if (unlikely(!fd->sqid)) {
+ struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
+ if (cmd->sqe.common.opcode == nvme_admin_async_event) {
+ nvme->u.nvme.aen_op = 1;
+ atomic_inc(&vha->nvme_active_aen_cnt);
+ }
+ }
+
+ /* Build command packet. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ req->cnt -= req_cnt;
+
+ cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ /* Zero out remaining portion of packet. */
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+
+ cmd_pkt->entry_status = 0;
+
+ /* Update entry type to indicate Command NVME IOCB */
+ cmd_pkt->entry_type = COMMAND_NVME;
+
+ /* No data transfer how do we check buffer len == 0?? */
+ if (fd->io_dir == NVMEFC_FCP_READ) {
+ cmd_pkt->control_flags =
+ cpu_to_le16(CF_READ_DATA | CF_NVME_ENABLE);
+ vha->qla_stats.input_bytes += fd->payload_length;
+ vha->qla_stats.input_requests++;
+ } else if (fd->io_dir == NVMEFC_FCP_WRITE) {
+ cmd_pkt->control_flags =
+ cpu_to_le16(CF_WRITE_DATA | CF_NVME_ENABLE);
+ vha->qla_stats.output_bytes += fd->payload_length;
+ vha->qla_stats.output_requests++;
+ } else if (fd->io_dir == 0) {
+ cmd_pkt->control_flags = cpu_to_le16(CF_NVME_ENABLE);
+ }
+
+ /* Set NPORT-ID */
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+
+ /* NVME RSP IU */
+ cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
+ cmd_pkt->nvme_rsp_dseg_address[0] = cpu_to_le32(LSD(fd->rspdma));
+ cmd_pkt->nvme_rsp_dseg_address[1] = cpu_to_le32(MSD(fd->rspdma));
+
+ /* NVME CNMD IU */
+ cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
+ cmd_pkt->nvme_cmnd_dseg_address[0] = cpu_to_le32(LSD(fd->cmddma));
+ cmd_pkt->nvme_cmnd_dseg_address[1] = cpu_to_le32(MSD(fd->cmddma));
+
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+ cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
+
+ /* One DSD is available in the Command Type NVME IOCB */
+ avail_dsds = 1;
+ cur_dsd = (uint32_t *)&cmd_pkt->nvme_data_dseg_address[0];
+ sgl = fd->first_sgl;
+
+ /* Load data segments */
+ for_each_sg(sgl, sg, tot_dsds, i) {
+ dma_addr_t sle_dma;
+ cont_a64_entry_t *cont_pkt;
+
+ /* Allocate additional continuation packets? */
+ if (avail_dsds == 0) {
+ /*
+ * Five DSDs are available in the Continuation
+ * Type 1 IOCB.
+ */
+
+ /* Adjust ring index */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else {
+ req->ring_ptr++;
+ }
+ cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
+ *((uint32_t *)(&cont_pkt->entry_type)) =
+ cpu_to_le32(CONTINUE_A64_TYPE);
+
+ cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
+ avail_dsds = 5;
+ }
+
+ sle_dma = sg_dma_address(sg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+ avail_dsds--;
+ }
+
+ /* Set total entry count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ wmb();
+
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else {
+ req->ring_ptr++;
+ }
+
+ /* Set chip new ring index. */
+ WRT_REG_DWORD(req->req_q_in, req->ring_index);
+
+queuing_error:
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+ return rval;
+}
+
+/* Post a command */
+static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
+ struct nvme_fc_remote_port *rport, void *hw_queue_handle,
+ struct nvmefc_fcp_req *fd)
+{
+ fc_port_t *fcport;
+ struct srb_iocb *nvme;
+ struct scsi_qla_host *vha;
+ int rval = QLA_FUNCTION_FAILED;
+ srb_t *sp;
+ struct qla_qpair *qpair = (struct qla_qpair *)hw_queue_handle;
+ struct nvme_private *priv;
+
+ if (!fd) {
+ ql_log(ql_log_warn, NULL, 0x2134, "NO NVMe FCP request\n");
+ return rval;
+ }
+
+ priv = fd->private;
+ fcport = (fc_port_t *)rport->private;
+ if (!fcport) {
+ ql_log(ql_log_warn, NULL, 0x210e, "No fcport ptr\n");
+ return rval;
+ }
+
+ vha = fcport->vha;
+ if ((!qpair) || (!(fcport->nvme_flag & NVME_FLAG_REGISTERED)))
+ return -EBUSY;
+
+ /* Alloc SRB structure */
+ sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
+ if (!sp)
+ return -EIO;
+
+ atomic_set(&sp->ref_count, 1);
+ init_waitqueue_head(&sp->nvme_ls_waitQ);
+ priv->sp = sp;
+ sp->type = SRB_NVME_CMD;
+ sp->name = "nvme_cmd";
+ sp->done = qla_nvme_sp_done;
+ sp->qpair = qpair;
+ nvme = &sp->u.iocb_cmd;
+ nvme->u.nvme.desc = fd;
+
+ rval = qla2x00_start_nvme_mq(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x212d,
+ "qla2x00_start_nvme_mq failed = %d\n", rval);
+ atomic_dec(&sp->ref_count);
+ wake_up(&sp->nvme_ls_waitQ);
+ return -EIO;
+ }
+
+ return rval;
+}
+
+static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
+{
+ struct scsi_qla_host *vha = lport->private;
+
+ atomic_dec(&vha->nvme_ref_count);
+ wake_up_all(&vha->nvme_waitQ);
+
+ ql_log(ql_log_info, vha, 0x210f,
+ "localport delete of %p completed.\n", vha->nvme_local_port);
+ vha->nvme_local_port = NULL;
+}
+
+static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
+{
+ fc_port_t *fcport;
+ struct nvme_rport *r_port, *trport;
+
+ fcport = (fc_port_t *)rport->private;
+ fcport->nvme_remote_port = NULL;
+ fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
+ atomic_dec(&fcport->nvme_ref_count);
+ wake_up_all(&fcport->nvme_waitQ);
+
+ list_for_each_entry_safe(r_port, trport,
+ &fcport->vha->nvme_rport_list, list) {
+ if (r_port->fcport == fcport) {
+ list_del(&r_port->list);
+ break;
+ }
+ }
+ kfree(r_port);
+
+ ql_log(ql_log_info, fcport->vha, 0x2110,
+ "remoteport_delete of %p completed.\n", fcport);
+}
+
+static struct nvme_fc_port_template qla_nvme_fc_transport = {
+ .localport_delete = qla_nvme_localport_delete,
+ .remoteport_delete = qla_nvme_remoteport_delete,
+ .create_queue = qla_nvme_alloc_queue,
+ .delete_queue = NULL,
+ .ls_req = qla_nvme_ls_req,
+ .ls_abort = qla_nvme_ls_abort,
+ .fcp_io = qla_nvme_post_cmd,
+ .fcp_abort = qla_nvme_fcp_abort,
+ .poll_queue = qla_nvme_poll,
+ .max_hw_queues = 8,
+ .max_sgl_segments = 128,
+ .max_dif_sgl_segments = 64,
+ .dma_boundary = 0xFFFFFFFF,
+ .local_priv_sz = 8,
+ .remote_priv_sz = 0,
+ .lsrqst_priv_sz = sizeof(struct nvme_private),
+ .fcprqst_priv_sz = sizeof(struct nvme_private),
+};
+
+#define NVME_ABORT_POLLING_PERIOD 2
+static int qla_nvme_wait_on_command(srb_t *sp)
+{
+ int ret = QLA_SUCCESS;
+
+ wait_event_timeout(sp->nvme_ls_waitQ, (atomic_read(&sp->ref_count) > 1),
+ NVME_ABORT_POLLING_PERIOD*HZ);
+
+ if (atomic_read(&sp->ref_count) > 1)
+ ret = QLA_FUNCTION_FAILED;
+
+ return ret;
+}
+
+static int qla_nvme_wait_on_rport_del(fc_port_t *fcport)
+{
+ int ret = QLA_SUCCESS;
+
+ wait_event_timeout(fcport->nvme_waitQ,
+ atomic_read(&fcport->nvme_ref_count),
+ NVME_ABORT_POLLING_PERIOD*HZ);
+
+ if (atomic_read(&fcport->nvme_ref_count)) {
+ ret = QLA_FUNCTION_FAILED;
+ ql_log(ql_log_info, fcport->vha, 0x2111,
+ "timed out waiting for fcport=%p to delete\n", fcport);
+ }
+
+ return ret;
+}
+
+void qla_nvme_abort(struct qla_hw_data *ha, srb_t *sp)
+{
+ int rval;
+
+ rval = ha->isp_ops->abort_command(sp);
+ if (!rval) {
+ if (!qla_nvme_wait_on_command(sp))
+ ql_log(ql_log_warn, NULL, 0x2112,
+ "nvme_wait_on_command timed out waiting on sp=%p\n",
+ sp);
+ }
+}
+
+static void qla_nvme_abort_all(fc_port_t *fcport)
+{
+ int que, cnt;
+ unsigned long flags;
+ srb_t *sp;
+ struct qla_hw_data *ha = fcport->vha->hw;
+ struct req_que *req;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (que = 0; que < ha->max_req_queues; que++) {
+ req = ha->req_q_map[que];
+ if (!req)
+ continue;
+ if (!req->outstanding_cmds)
+ continue;
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if ((sp) && ((sp->type == SRB_NVME_CMD) ||
+ (sp->type == SRB_NVME_LS)) &&
+ (sp->fcport == fcport)) {
+ atomic_inc(&sp->ref_count);
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
+ qla_nvme_abort(ha, sp);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ req->outstanding_cmds[cnt] = NULL;
+ sp->done(sp, 1);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void qla_nvme_unregister_remote_port(struct work_struct *work)
+{
+ struct fc_port *fcport = container_of(work, struct fc_port,
+ nvme_del_work);
+ struct nvme_rport *rport, *trport;
+
+ if (!IS_ENABLED(CONFIG_NVME_FC))
+ return;
+
+ list_for_each_entry_safe(rport, trport,
+ &fcport->vha->nvme_rport_list, list) {
+ if (rport->fcport == fcport) {
+ ql_log(ql_log_info, fcport->vha, 0x2113,
+ "%s: fcport=%p\n", __func__, fcport);
+ nvme_fc_unregister_remoteport(
+ fcport->nvme_remote_port);
+ }
+ }
+}
+
+void qla_nvme_delete(scsi_qla_host_t *vha)
+{
+ struct nvme_rport *rport, *trport;
+ fc_port_t *fcport;
+ int nv_ret;
+
+ if (!IS_ENABLED(CONFIG_NVME_FC))
+ return;
+
+ list_for_each_entry_safe(rport, trport, &vha->nvme_rport_list, list) {
+ fcport = rport->fcport;
+
+ ql_log(ql_log_info, fcport->vha, 0x2114, "%s: fcport=%p\n",
+ __func__, fcport);
+
+ nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
+ qla_nvme_wait_on_rport_del(fcport);
+ qla_nvme_abort_all(fcport);
+ }
+
+ if (vha->nvme_local_port) {
+ nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
+ if (nv_ret == 0)
+ ql_log(ql_log_info, vha, 0x2116,
+ "unregistered localport=%p\n",
+ vha->nvme_local_port);
+ else
+ ql_log(ql_log_info, vha, 0x2115,
+ "Unregister of localport failed\n");
+ }
+}
+
+void qla_nvme_register_hba(scsi_qla_host_t *vha)
+{
+ struct nvme_fc_port_template *tmpl;
+ struct qla_hw_data *ha;
+ struct nvme_fc_port_info pinfo;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_NVME_FC))
+ return;
+
+ ha = vha->hw;
+ tmpl = &qla_nvme_fc_transport;
+
+ WARN_ON(vha->nvme_local_port);
+ WARN_ON(ha->max_req_queues < 3);
+
+ qla_nvme_fc_transport.max_hw_queues =
+ min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
+ (uint8_t)(ha->max_req_queues - 2));
+
+ pinfo.node_name = wwn_to_u64(vha->node_name);
+ pinfo.port_name = wwn_to_u64(vha->port_name);
+ pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
+ pinfo.port_id = vha->d_id.b24;
+
+ ql_log(ql_log_info, vha, 0xffff,
+ "register_localport: host-traddr=pn-0x%llx:nn-0x%llx on portID:%x\n",
+ pinfo.port_name, pinfo.node_name, pinfo.port_id);
+ qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
+
+ ret = nvme_fc_register_localport(&pinfo, tmpl,
+ get_device(&ha->pdev->dev), &vha->nvme_local_port);
+ if (ret) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "register_localport failed: ret=%x\n", ret);
+ return;
+ }
+ atomic_set(&vha->nvme_ref_count, 1);
+ vha->nvme_local_port->private = vha;
+ init_waitqueue_head(&vha->nvme_waitQ);
+}
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
new file mode 100644
index 000000000000..dfe56f207b28
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -0,0 +1,132 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2017 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#ifndef __QLA_NVME_H
+#define __QLA_NVME_H
+
+#include <linux/blk-mq.h>
+#include <uapi/scsi/fc/fc_fs.h>
+#include <uapi/scsi/fc/fc_els.h>
+#include <linux/nvme-fc-driver.h>
+
+#define NVME_ATIO_CMD_OFF 32
+#define NVME_FIRST_PACKET_CMDLEN (64 - NVME_ATIO_CMD_OFF)
+#define Q2T_NVME_NUM_TAGS 2048
+#define QLA_MAX_FC_SEGMENTS 64
+
+struct srb;
+struct nvme_private {
+ struct srb *sp;
+ struct nvmefc_ls_req *fd;
+ struct work_struct ls_work;
+ int comp_status;
+};
+
+struct nvme_rport {
+ struct nvme_fc_port_info req;
+ struct list_head list;
+ struct fc_port *fcport;
+};
+
+#define COMMAND_NVME 0x88 /* Command Type FC-NVMe IOCB */
+struct cmd_nvme {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+
+ uint32_t handle; /* System handle. */
+ uint16_t nport_handle; /* N_PORT handle. */
+ uint16_t timeout; /* Command timeout. */
+
+ uint16_t dseg_count; /* Data segment count. */
+ uint16_t nvme_rsp_dsd_len; /* NVMe RSP DSD length */
+
+ uint64_t rsvd;
+
+ uint16_t control_flags; /* Control Flags */
+#define CF_NVME_ENABLE BIT_9
+#define CF_DIF_SEG_DESCR_ENABLE BIT_3
+#define CF_DATA_SEG_DESCR_ENABLE BIT_2
+#define CF_READ_DATA BIT_1
+#define CF_WRITE_DATA BIT_0
+
+ uint16_t nvme_cmnd_dseg_len; /* Data segment length. */
+ uint32_t nvme_cmnd_dseg_address[2]; /* Data segment address. */
+ uint32_t nvme_rsp_dseg_address[2]; /* Data segment address. */
+
+ uint32_t byte_count; /* Total byte count. */
+
+ uint8_t port_id[3]; /* PortID of destination port. */
+ uint8_t vp_index;
+
+ uint32_t nvme_data_dseg_address[2]; /* Data segment address. */
+ uint32_t nvme_data_dseg_len; /* Data segment length. */
+};
+
+#define PT_LS4_REQUEST 0x89 /* Link Service pass-through IOCB (request) */
+struct pt_ls4_request {
+ uint8_t entry_type;
+ uint8_t entry_count;
+ uint8_t sys_define;
+ uint8_t entry_status;
+ uint32_t handle;
+ uint16_t status;
+ uint16_t nport_handle;
+ uint16_t tx_dseg_count;
+ uint8_t vp_index;
+ uint8_t rsvd;
+ uint16_t timeout;
+ uint16_t control_flags;
+#define CF_LS4_SHIFT 13
+#define CF_LS4_ORIGINATOR 0
+#define CF_LS4_RESPONDER 1
+#define CF_LS4_RESPONDER_TERM 2
+
+ uint16_t rx_dseg_count;
+ uint16_t rsvd2;
+ uint32_t exchange_address;
+ uint32_t rsvd3;
+ uint32_t rx_byte_count;
+ uint32_t tx_byte_count;
+ uint32_t dseg0_address[2];
+ uint32_t dseg0_len;
+ uint32_t dseg1_address[2];
+ uint32_t dseg1_len;
+};
+
+#define PT_LS4_UNSOL 0x56 /* pass-up unsolicited rec FC-NVMe request */
+struct pt_ls4_rx_unsol {
+ uint8_t entry_type;
+ uint8_t entry_count;
+ uint16_t rsvd0;
+ uint16_t rsvd1;
+ uint8_t vp_index;
+ uint8_t rsvd2;
+ uint16_t rsvd3;
+ uint16_t nport_handle;
+ uint16_t frame_size;
+ uint16_t rsvd4;
+ uint32_t exchange_address;
+ uint8_t d_id[3];
+ uint8_t r_ctl;
+ uint8_t s_id[3];
+ uint8_t cs_ctl;
+ uint8_t f_ctl[3];
+ uint8_t type;
+ uint16_t seq_cnt;
+ uint8_t df_ctl;
+ uint8_t seq_id;
+ uint16_t rx_id;
+ uint16_t ox_id;
+ uint32_t param;
+ uint32_t desc0;
+#define PT_LS4_PAYLOAD_OFFSET 0x2c
+#define PT_LS4_FIRST_PACKET_LEN 20
+ uint32_t desc_len;
+ uint32_t payload[3];
+};
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 0a1723cc08cf..a77c33987703 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -782,7 +782,7 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
(qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
write_unlock_irqrestore(&ha->hw_lock, flags);
ql_log(ql_log_fatal, vha, 0xb009,
- "%s out of bount memory "
+ "%s out of bound memory "
"access, offset is 0x%llx.\n",
QLA2XXX_DRIVER_NAME, off);
return -1;
@@ -4250,7 +4250,7 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_p3p, vha, 0xb040,
"[%s]: data ptr[%d]: %p, entry_hdr: %p\n"
- "entry_type: 0x%x, captrue_mask: 0x%x\n",
+ "entry_type: 0x%x, capture_mask: 0x%x\n",
__func__, i, data_ptr, entry_hdr,
entry_hdr->entry_type,
entry_hdr->d_ctrl.entry_capture_mask);
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 77624eac95a4..71a41093530e 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -7,6 +7,8 @@
#ifndef __QLA_NX_H
#define __QLA_NX_H
+#include <linux/io-64-nonatomic-lo-hi.h>
+
/*
* Following are the states of the Phantom. Phantom will set them and
* Host will read to check if the fields are correct.
@@ -819,21 +821,6 @@ struct qla82xx_uri_data_desc{
#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0)
#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4)
-#ifndef readq
-static inline u64 readq(void __iomem *addr)
-{
- return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
-}
-#endif
-
-#ifndef writeq
-static inline void writeq(u64 val, void __iomem *addr)
-{
- writel(((u32) (val)), (addr));
- writel(((u32) (val >> 32)), (addr + 4));
-}
-#endif
-
/* Request and response queue size */
#define REQUEST_ENTRY_CNT_82XX 128 /* Number of request entries. */
#define RESPONSE_ENTRY_CNT_82XX 128 /* Number of response entries.*/
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index dc1ec9b61027..0aa9c38bf347 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -1572,7 +1572,7 @@ qla8044_read_reset_template(struct scsi_qla_host *vha)
/* Copy rest of the template */
if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) {
ql_log(ql_log_fatal, vha, 0xb0bd,
- "%s: Failed to read reset tempelate\n", __func__);
+ "%s: Failed to read reset template\n", __func__);
goto exit_read_template_error;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 79f050256c55..df57655779ed 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -120,7 +120,11 @@ MODULE_PARM_DESC(ql2xmaxqdepth,
"Maximum queue depth to set for each LUN. "
"Default is 32.");
+#if (IS_ENABLED(CONFIG_NVME_FC))
+int ql2xenabledif;
+#else
int ql2xenabledif = 2;
+#endif
module_param(ql2xenabledif, int, S_IRUGO);
MODULE_PARM_DESC(ql2xenabledif,
" Enable T10-CRC-DIF:\n"
@@ -129,6 +133,16 @@ MODULE_PARM_DESC(ql2xenabledif,
" 1 -- Enable DIF for all types\n"
" 2 -- Enable DIF for all types, except Type 0.\n");
+#if (IS_ENABLED(CONFIG_NVME_FC))
+int ql2xnvmeenable = 1;
+#else
+int ql2xnvmeenable;
+#endif
+module_param(ql2xnvmeenable, int, 0644);
+MODULE_PARM_DESC(ql2xnvmeenable,
+ "Enables NVME support. "
+ "0 - no NVMe. Default is Y");
+
int ql2xenablehba_err_chk = 2;
module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xenablehba_err_chk,
@@ -224,11 +238,15 @@ MODULE_PARM_DESC(ql2xexlogins,
"Number of extended Logins. "
"0 (Default)- Disabled.");
-int ql2xexchoffld = 0;
-module_param(ql2xexchoffld, uint, S_IRUGO|S_IWUSR);
+int ql2xexchoffld = 1024;
+module_param(ql2xexchoffld, uint, 0644);
MODULE_PARM_DESC(ql2xexchoffld,
- "Number of exchanges to offload. "
- "0 (Default)- Disabled.");
+ "Number of target exchanges.");
+
+int ql2xiniexchg = 1024;
+module_param(ql2xiniexchg, uint, 0644);
+MODULE_PARM_DESC(ql2xiniexchg,
+ "Number of initiator exchanges.");
int ql2xfwholdabts = 0;
module_param(ql2xfwholdabts, int, S_IRUGO);
@@ -263,6 +281,7 @@ static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
static void qla83xx_disable_laser(scsi_qla_host_t *vha);
static int qla2xxx_map_queues(struct Scsi_Host *shost);
+static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
struct scsi_host_template qla2xxx_driver_template = {
.module = THIS_MODULE,
@@ -347,6 +366,28 @@ int qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
struct qla_qpair *qpair);
/* -------------------------------------------------------------------------- */
+static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
+ struct rsp_que *rsp)
+{
+ struct qla_hw_data *ha = vha->hw;
+ rsp->qpair = ha->base_qpair;
+ rsp->req = req;
+ ha->base_qpair->req = req;
+ ha->base_qpair->rsp = rsp;
+ ha->base_qpair->vha = vha;
+ ha->base_qpair->qp_lock_ptr = &ha->hardware_lock;
+ ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
+ ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
+ INIT_LIST_HEAD(&ha->base_qpair->hints_list);
+ ha->base_qpair->enable_class_2 = ql2xenableclass2;
+ /* init qpair to this cpu. Will adjust at run time. */
+ qla_cpu_update(rsp->qpair, smp_processor_id());
+ ha->base_qpair->pdev = ha->pdev;
+
+ if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
+ ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
+}
+
static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
struct rsp_que *rsp)
{
@@ -367,6 +408,15 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
goto fail_rsp_map;
}
+ ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
+ if (ha->base_qpair == NULL) {
+ ql_log(ql_log_warn, vha, 0x00e0,
+ "Failed to allocate base queue pair memory.\n");
+ goto fail_base_qpair;
+ }
+
+ qla_init_base_qpair(vha, req, rsp);
+
if (ql2xmqsupport && ha->max_qpairs) {
ha->queue_pair_map = kcalloc(ha->max_qpairs, sizeof(struct qla_qpair *),
GFP_KERNEL);
@@ -375,14 +425,6 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
"Unable to allocate memory for queue pair ptrs.\n");
goto fail_qpair_map;
}
- ha->base_qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
- if (ha->base_qpair == NULL) {
- ql_log(ql_log_warn, vha, 0x0182,
- "Failed to allocate base queue pair memory.\n");
- goto fail_base_qpair;
- }
- ha->base_qpair->req = req;
- ha->base_qpair->rsp = rsp;
}
/*
@@ -395,9 +437,10 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
set_bit(0, ha->req_qid_map);
return 1;
-fail_base_qpair:
- kfree(ha->queue_pair_map);
fail_qpair_map:
+ kfree(ha->base_qpair);
+ ha->base_qpair = NULL;
+fail_base_qpair:
kfree(ha->rsp_q_map);
ha->rsp_q_map = NULL;
fail_rsp_map:
@@ -447,6 +490,15 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
int cnt;
unsigned long flags;
+ if (ha->queue_pair_map) {
+ kfree(ha->queue_pair_map);
+ ha->queue_pair_map = NULL;
+ }
+ if (ha->base_qpair) {
+ kfree(ha->base_qpair);
+ ha->base_qpair = NULL;
+ }
+
spin_lock_irqsave(&ha->hardware_lock, flags);
for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
if (!test_bit(cnt, ha->req_qid_map))
@@ -658,8 +710,10 @@ qla2x00_sp_free_dma(void *ptr)
}
end:
- CMD_SP(cmd) = NULL;
- qla2x00_rel_sp(sp);
+ if ((sp->type != SRB_NVME_CMD) && (sp->type != SRB_NVME_LS)) {
+ CMD_SP(cmd) = NULL;
+ qla2x00_rel_sp(sp);
+ }
}
void
@@ -1062,9 +1116,9 @@ static inline int test_fcport_count(scsi_qla_host_t *vha)
int res;
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- ql_dbg(ql_dbg_init, vha, 0xffff,
- "tgt %p, fcport_count=%d\n",
- vha, vha->fcport_count);
+ ql_dbg(ql_dbg_init, vha, 0x00ec,
+ "tgt %p, fcport_count=%d\n",
+ vha, vha->fcport_count);
res = (vha->fcport_count == 0);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
@@ -1645,8 +1699,9 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
srb_t *sp;
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
-
- qlt_host_reset_handler(ha);
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_tgt_cmd *cmd;
+ uint8_t trace = 0;
spin_lock_irqsave(&ha->hardware_lock, flags);
for (que = 0; que < ha->max_req_queues; que++) {
@@ -1658,27 +1713,65 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
- /* Don't abort commands in adapter during EEH
- * recovery as it's not accessible/responding.
- */
- if (GET_CMD_SP(sp) && !ha->flags.eeh_busy &&
- (sp->type == SRB_SCSI_CMD)) {
- /* Get a reference to the sp and drop the lock.
- * The reference ensures this sp->done() call
- * - and not the call in qla2xxx_eh_abort() -
- * ends the SCSI command (with result 'res').
- */
- sp_get(sp);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- status = qla2xxx_eh_abort(GET_CMD_SP(sp));
- spin_lock_irqsave(&ha->hardware_lock, flags);
- /* Get rid of extra reference if immediate exit
- * from ql2xxx_eh_abort */
- if (status == FAILED && (qla2x00_isp_reg_stat(ha)))
- atomic_dec(&sp->ref_count);
- }
req->outstanding_cmds[cnt] = NULL;
- sp->done(sp, res);
+ if (sp->cmd_type == TYPE_SRB) {
+ if ((sp->type == SRB_NVME_CMD) ||
+ (sp->type == SRB_NVME_LS)) {
+ sp_get(sp);
+ spin_unlock_irqrestore(
+ &ha->hardware_lock, flags);
+ qla_nvme_abort(ha, sp);
+ spin_lock_irqsave(
+ &ha->hardware_lock, flags);
+ } else if (GET_CMD_SP(sp) &&
+ !ha->flags.eeh_busy &&
+ (sp->type == SRB_SCSI_CMD)) {
+ /*
+ * Don't abort commands in
+ * adapter during EEH
+ * recovery as it's not
+ * accessible/responding.
+ *
+ * Get a reference to the sp
+ * and drop the lock. The
+ * reference ensures this
+ * sp->done() call and not the
+ * call in qla2xxx_eh_abort()
+ * ends the SCSI command (with
+ * result 'res').
+ */
+ sp_get(sp);
+ spin_unlock_irqrestore(
+ &ha->hardware_lock, flags);
+ status = qla2xxx_eh_abort(
+ GET_CMD_SP(sp));
+ spin_lock_irqsave(
+ &ha->hardware_lock, flags);
+ /*
+ * Get rid of extra reference
+ * if immediate exit from
+ * ql2xxx_eh_abort
+ */
+ if (status == FAILED &&
+ (qla2x00_isp_reg_stat(ha)))
+ atomic_dec(
+ &sp->ref_count);
+ }
+ sp->done(sp, res);
+ } else {
+ if (!vha->hw->tgt.tgt_ops || !tgt ||
+ qla_ini_mode_enabled(vha)) {
+ if (!trace)
+ ql_dbg(ql_dbg_tgt_mgt,
+ vha, 0xf003,
+ "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
+ vha->dpc_flags);
+ continue;
+ }
+ cmd = (struct qla_tgt_cmd *)sp;
+ qlt_abort_cmd_on_host_reset(cmd->vha,
+ cmd);
+ }
}
}
}
@@ -1957,7 +2050,7 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
/* Read MSIX vector size of the board */
pci_read_config_word(ha->pdev,
QLA_83XX_PCI_MSIX_CONTROL, &msix);
- ha->msix_count = msix + 1;
+ ha->msix_count = (msix & PCI_MSIX_FLAGS_QSIZE) + 1;
/*
* By default, driver uses at least two msix vectors
* (default & rspq)
@@ -1975,7 +2068,7 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
/* Queue pairs is the max value minus
* the base queue pair */
ha->max_qpairs = ha->max_req_queues - 1;
- ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x00e3,
"Max no of queues pairs: %d.\n", ha->max_qpairs);
}
ql_log_pci(ql_log_info, ha->pdev, 0x011c,
@@ -2653,7 +2746,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
"Memory allocated for ha=%p.\n", ha);
ha->pdev = pdev;
- ha->tgt.enable_class_2 = ql2xenableclass2;
INIT_LIST_HEAD(&ha->tgt.q_full_list);
spin_lock_init(&ha->tgt.q_full_lock);
spin_lock_init(&ha->tgt.sess_lock);
@@ -3073,12 +3165,26 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->can_queue, base_vha->req,
base_vha->mgmt_svr_loop_id, host->sg_tablesize);
- if (ha->mqenable && qla_ini_mode_enabled(base_vha)) {
+ if (ha->mqenable) {
+ bool mq = false;
+ bool startit = false;
ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
- /* Create start of day qpairs for Block MQ */
- if (shost_use_blk_mq(host)) {
+
+ if (QLA_TGT_MODE_ENABLED()) {
+ mq = true;
+ startit = false;
+ }
+
+ if ((ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED) &&
+ shost_use_blk_mq(host)) {
+ mq = true;
+ startit = true;
+ }
+
+ if (mq) {
+ /* Create start of day qpairs for Block MQ */
for (i = 0; i < ha->max_qpairs; i++)
- qla2xxx_create_qpair(base_vha, 5, 0);
+ qla2xxx_create_qpair(base_vha, 5, 0, startit);
}
}
@@ -3451,6 +3557,9 @@ qla2x00_remove_one(struct pci_dev *pdev)
return;
set_bit(UNLOADING, &base_vha->dpc_flags);
+
+ qla_nvme_delete(base_vha);
+
dma_free_coherent(&ha->pdev->dev,
base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
@@ -3601,10 +3710,10 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
} else {
int now;
if (rport) {
- ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
- "%s %8phN. rport %p roles %x \n",
- __func__, fcport->port_name, rport,
- rport->roles);
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x2109,
+ "%s %8phN. rport %p roles %x\n",
+ __func__, fcport->port_name, rport,
+ rport->roles);
fc_remote_port_delete(rport);
}
qlt_do_generation_tick(vha, &now);
@@ -3649,7 +3758,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
if (fcport->login_retry == 0) {
fcport->login_retry = vha->hw->login_retry_count;
- ql_dbg(ql_dbg_disc, vha, 0x2067,
+ ql_dbg(ql_dbg_disc, vha, 0x20a3,
"Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
fcport->port_name, fcport->loop_id, fcport->login_retry);
}
@@ -3673,8 +3782,8 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
{
fc_port_t *fcport;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Mark all dev lost\n");
+ ql_dbg(ql_dbg_disc, vha, 0x20f1,
+ "Mark all dev lost\n");
list_for_each_entry(fcport, &vha->vp_fcports, list) {
fcport->scan_state = 0;
@@ -3986,6 +4095,9 @@ qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha)
if (!ql2xexlogins)
return QLA_SUCCESS;
+ if (!IS_EXLOGIN_OFFLD_CAPABLE(ha))
+ return QLA_SUCCESS;
+
ql_log(ql_log_info, vha, 0xd021, "EXLOGIN count: %d.\n", ql2xexlogins);
max_cnt = 0;
rval = qla_get_exlogin_status(vha, &size, &max_cnt);
@@ -3996,27 +4108,33 @@ qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha)
}
temp = (ql2xexlogins > max_cnt) ? max_cnt : ql2xexlogins;
- ha->exlogin_size = (size * temp);
- ql_log(ql_log_info, vha, 0xd024,
- "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n",
- max_cnt, size, temp);
-
- ql_log(ql_log_info, vha, 0xd025, "EXLOGIN: requested size=0x%x\n",
- ha->exlogin_size);
-
- /* Get consistent memory for extended logins */
- ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev,
- ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL);
- if (!ha->exlogin_buf) {
- ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a,
+ temp *= size;
+
+ if (temp != ha->exlogin_size) {
+ qla2x00_free_exlogin_buffer(ha);
+ ha->exlogin_size = temp;
+
+ ql_log(ql_log_info, vha, 0xd024,
+ "EXLOGIN: max_logins=%d, portdb=0x%x, total=%d.\n",
+ max_cnt, size, temp);
+
+ ql_log(ql_log_info, vha, 0xd025,
+ "EXLOGIN: requested size=0x%x\n", ha->exlogin_size);
+
+ /* Get consistent memory for extended logins */
+ ha->exlogin_buf = dma_alloc_coherent(&ha->pdev->dev,
+ ha->exlogin_size, &ha->exlogin_buf_dma, GFP_KERNEL);
+ if (!ha->exlogin_buf) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0xd02a,
"Failed to allocate memory for exlogin_buf_dma.\n");
- return -ENOMEM;
+ return -ENOMEM;
+ }
}
/* Now configure the dma buffer */
rval = qla_set_exlogin_mem_cfg(vha, ha->exlogin_buf_dma);
if (rval) {
- ql_log(ql_log_fatal, vha, 0x00cf,
+ ql_log(ql_log_fatal, vha, 0xd033,
"Setup extended login buffer ****FAILED****.\n");
qla2x00_free_exlogin_buffer(ha);
}
@@ -4041,19 +4159,50 @@ qla2x00_free_exlogin_buffer(struct qla_hw_data *ha)
}
}
+static void
+qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt)
+{
+ u32 temp;
+ *ret_cnt = FW_DEF_EXCHANGES_CNT;
+
+ if (qla_ini_mode_enabled(vha)) {
+ if (ql2xiniexchg > max_cnt)
+ ql2xiniexchg = max_cnt;
+
+ if (ql2xiniexchg > FW_DEF_EXCHANGES_CNT)
+ *ret_cnt = ql2xiniexchg;
+ } else if (qla_tgt_mode_enabled(vha)) {
+ if (ql2xexchoffld > max_cnt)
+ ql2xexchoffld = max_cnt;
+
+ if (ql2xexchoffld > FW_DEF_EXCHANGES_CNT)
+ *ret_cnt = ql2xexchoffld;
+ } else if (qla_dual_mode_enabled(vha)) {
+ temp = ql2xiniexchg + ql2xexchoffld;
+ if (temp > max_cnt) {
+ ql2xiniexchg -= (temp - max_cnt)/2;
+ ql2xexchoffld -= (((temp - max_cnt)/2) + 1);
+ temp = max_cnt;
+ }
+
+ if (temp > FW_DEF_EXCHANGES_CNT)
+ *ret_cnt = temp;
+ }
+}
+
int
qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
{
int rval;
- uint16_t size, max_cnt, temp;
+ u16 size, max_cnt;
+ u32 temp;
struct qla_hw_data *ha = vha->hw;
- /* Return if we don't need to alloacate any extended logins */
- if (!ql2xexchoffld)
+ if (!ha->flags.exchoffld_enabled)
return QLA_SUCCESS;
- ql_log(ql_log_info, vha, 0xd014,
- "Exchange offload count: %d.\n", ql2xexlogins);
+ if (!IS_EXCHG_OFFLD_CAPABLE(ha))
+ return QLA_SUCCESS;
max_cnt = 0;
rval = qla_get_exchoffld_status(vha, &size, &max_cnt);
@@ -4063,30 +4212,45 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
return rval;
}
- temp = (ql2xexchoffld > max_cnt) ? max_cnt : ql2xexchoffld;
- ha->exchoffld_size = (size * temp);
- ql_log(ql_log_info, vha, 0xd016,
- "Exchange offload: max_count=%d, buffers=0x%x, total=%d.\n",
- max_cnt, size, temp);
-
- ql_log(ql_log_info, vha, 0xd017,
- "Exchange Buffers requested size = 0x%x\n", ha->exchoffld_size);
+ qla2x00_number_of_exch(vha, &temp, max_cnt);
+ temp *= size;
- /* Get consistent memory for extended logins */
- ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev,
- ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL);
- if (!ha->exchoffld_buf) {
- ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
- "Failed to allocate memory for exchoffld_buf_dma.\n");
- return -ENOMEM;
+ if (temp != ha->exchoffld_size) {
+ qla2x00_free_exchoffld_buffer(ha);
+ ha->exchoffld_size = temp;
+
+ ql_log(ql_log_info, vha, 0xd016,
+ "Exchange offload: max_count=%d, buffers=0x%x, total=%d.\n",
+ max_cnt, size, temp);
+
+ ql_log(ql_log_info, vha, 0xd017,
+ "Exchange Buffers requested size = 0x%x\n",
+ ha->exchoffld_size);
+
+ /* Get consistent memory for extended logins */
+ ha->exchoffld_buf = dma_alloc_coherent(&ha->pdev->dev,
+ ha->exchoffld_size, &ha->exchoffld_buf_dma, GFP_KERNEL);
+ if (!ha->exchoffld_buf) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0xd013,
+ "Failed to allocate memory for exchoffld_buf_dma.\n");
+ return -ENOMEM;
+ }
}
/* Now configure the dma buffer */
- rval = qla_set_exchoffld_mem_cfg(vha, ha->exchoffld_buf_dma);
+ rval = qla_set_exchoffld_mem_cfg(vha);
if (rval) {
ql_log(ql_log_fatal, vha, 0xd02e,
"Setup exchange offload buffer ****FAILED****.\n");
qla2x00_free_exchoffld_buffer(ha);
+ } else {
+ /* re-adjust number of target exchange */
+ struct init_cb_81xx *icb = (struct init_cb_81xx *)ha->init_cb;
+
+ if (qla_ini_mode_enabled(vha))
+ icb->exchange_count = 0;
+ else
+ icb->exchange_count = cpu_to_le16(ql2xexchoffld);
}
return rval;
@@ -4292,6 +4456,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
INIT_LIST_HEAD(&vha->plogi_ack_list);
INIT_LIST_HEAD(&vha->qp_list);
INIT_LIST_HEAD(&vha->gnl.fcports);
+ INIT_LIST_HEAD(&vha->nvme_rport_list);
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
@@ -4303,7 +4468,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
vha->gnl.l = dma_alloc_coherent(&ha->pdev->dev,
vha->gnl.size, &vha->gnl.ldma, GFP_KERNEL);
if (!vha->gnl.l) {
- ql_log(ql_log_fatal, vha, 0xffff,
+ ql_log(ql_log_fatal, vha, 0xd04a,
"Alloc failed for name list.\n");
scsi_remove_host(vha->host);
return NULL;
@@ -4581,6 +4746,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
qla24xx_async_gpdb(vha, e->u.fcport.fcport,
e->u.fcport.opt);
break;
+ case QLA_EVT_PRLI:
+ qla24xx_async_prli(vha, e->u.fcport.fcport);
+ break;
case QLA_EVT_GPSC:
qla24xx_async_gpsc(vha, e->u.fcport.fcport);
break;
@@ -4620,7 +4788,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) {
fcport->login_retry--;
if (fcport->flags & FCF_FABRIC_DEVICE) {
- ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x2108,
"%s %8phC DS %d LS %d\n", __func__,
fcport->port_name, fcport->disc_state,
fcport->fw_login_state);
@@ -5800,6 +5968,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
sp = req->outstanding_cmds[index];
if (!sp)
continue;
+ if (sp->cmd_type != TYPE_SRB)
+ continue;
if (sp->type != SRB_SCSI_CMD)
continue;
sfcp = sp->fcport;
@@ -5851,6 +6021,18 @@ qla2x00_timer(scsi_qla_host_t *vha)
if (!list_empty(&vha->work_list))
start_dpc++;
+ /*
+ * FC-NVME
+ * see if the active AEN count has changed from what was last reported.
+ */
+ if (atomic_read(&vha->nvme_active_aen_cnt) != vha->nvme_last_rptd_aen) {
+ vha->nvme_last_rptd_aen =
+ atomic_read(&vha->nvme_active_aen_cnt);
+ ql_log(ql_log_info, vha, 0x3002,
+ "reporting new aen count of %d to the fw\n",
+ vha->nvme_last_rptd_aen);
+ }
+
/* Schedule the DPC routine if needed */
if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index e766d8412384..2a0173e5d10e 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -59,13 +59,20 @@ MODULE_PARM_DESC(qlini_mode,
"when ready "
"\"enabled\" (default) - initiator mode will always stay enabled.");
-static int ql_dm_tgt_ex_pct = 50;
+static int ql_dm_tgt_ex_pct = 0;
module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql_dm_tgt_ex_pct,
"For Dual Mode (qlini_mode=dual), this parameter determines "
"the percentage of exchanges/cmds FW will allocate resources "
"for Target mode.");
+int ql2xuctrlirq = 1;
+module_param(ql2xuctrlirq, int, 0644);
+MODULE_PARM_DESC(ql2xuctrlirq,
+ "User to control IRQ placement via smp_affinity."
+ "Valid with qlini_mode=disabled."
+ "1(default): enable");
+
int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
static int temp_sam_status = SAM_STAT_BUSY;
@@ -110,18 +117,17 @@ enum fcp_resp_rsp_codes {
/* Predefs for callbacks handed to qla2xxx LLD */
static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
struct atio_from_isp *pkt, uint8_t);
-static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
+static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp,
+ response_t *pkt);
static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
int fn, void *iocb, int flags);
-static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
+static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd
*cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
-static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
- struct qla_tgt_cmd *cmd);
static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
struct atio_from_isp *atio, uint16_t status, int qfull);
static void qlt_disable_vha(struct scsi_qla_host *vha);
static void qlt_clear_tgt_db(struct qla_tgt *tgt);
-static void qlt_send_notify_ack(struct scsi_qla_host *vha,
+static void qlt_send_notify_ack(struct qla_qpair *qpair,
struct imm_ntfy_from_isp *ntfy,
uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
@@ -132,6 +138,8 @@ static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
void qlt_unreg_sess(struct fc_port *sess);
static void qlt_24xx_handle_abts(struct scsi_qla_host *,
struct abts_recv_from_24xx *);
+static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *,
+ uint16_t);
/*
* Global Variables
@@ -200,8 +208,8 @@ struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
host = btree_lookup32(&vha->hw->tgt.host_map, key);
if (!host)
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
- "Unable to find host %06x\n", key);
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
+ "Unable to find host %06x\n", key);
return host;
}
@@ -245,26 +253,22 @@ static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
- struct atio_from_isp *atio, uint8_t ha_locked)
+ struct atio_from_isp *atio, uint8_t ha_locked)
{
struct qla_tgt_sess_op *u;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
unsigned long flags;
if (tgt->tgt_stop) {
- ql_dbg(ql_dbg_async, vha, 0xffff,
- "qla_target(%d): dropping unknown ATIO_TYPE7, "
- "because tgt is being stopped", vha->vp_idx);
+ ql_dbg(ql_dbg_async, vha, 0x502c,
+ "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped",
+ vha->vp_idx);
goto out_term;
}
u = kzalloc(sizeof(*u), GFP_ATOMIC);
- if (u == NULL) {
- ql_dbg(ql_dbg_async, vha, 0xffff,
- "Alloc of struct unknown_atio (size %zd) failed", sizeof(*u));
- /* It should be harmless and on the next retry should work well */
+ if (u == NULL)
goto out_term;
- }
u->vha = vha;
memcpy(&u->atio, atio, sizeof(*atio));
@@ -280,7 +284,7 @@ out:
return;
out_term:
- qlt_send_term_exchange(vha, NULL, atio, ha_locked, 0);
+ qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0);
goto out;
}
@@ -295,29 +299,28 @@ static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) {
if (u->aborted) {
- ql_dbg(ql_dbg_async, vha, 0xffff,
- "Freeing unknown %s %p, because of Abort",
+ ql_dbg(ql_dbg_async, vha, 0x502e,
+ "Freeing unknown %s %p, because of Abort\n",
"ATIO_TYPE7", u);
- qlt_send_term_exchange(vha, NULL, &u->atio,
- ha_locked, 0);
+ qlt_send_term_exchange(vha->hw->base_qpair, NULL,
+ &u->atio, ha_locked, 0);
goto abort;
}
host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
if (host != NULL) {
- ql_dbg(ql_dbg_async, vha, 0xffff,
- "Requeuing unknown ATIO_TYPE7 %p", u);
+ ql_dbg(ql_dbg_async, vha, 0x502f,
+ "Requeuing unknown ATIO_TYPE7 %p\n", u);
qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
} else if (tgt->tgt_stop) {
- ql_dbg(ql_dbg_async, vha, 0xffff,
- "Freeing unknown %s %p, because tgt is being stopped",
- "ATIO_TYPE7", u);
- qlt_send_term_exchange(vha, NULL, &u->atio,
- ha_locked, 0);
+ ql_dbg(ql_dbg_async, vha, 0x503a,
+ "Freeing unknown %s %p, because tgt is being stopped\n",
+ "ATIO_TYPE7", u);
+ qlt_send_term_exchange(vha->hw->base_qpair, NULL,
+ &u->atio, ha_locked, 0);
} else {
- ql_dbg(ql_dbg_async, vha, 0xffff,
- "u %p, vha %p, host %p, sched again..", u,
- vha, host);
+ ql_dbg(ql_dbg_async, vha, 0x503d,
+ "Reschedule u %p, vha %p, host %p\n", u, vha, host);
if (!queued) {
queued = 1;
schedule_delayed_work(&vha->unknown_atio_work,
@@ -380,6 +383,8 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
struct imm_ntfy_from_isp *entry =
(struct imm_ntfy_from_isp *)atio;
+ qlt_issue_marker(vha, ha_locked);
+
if ((entry->u.isp24.vp_index != 0xFF) &&
(entry->u.isp24.nport_handle != 0xFFFF)) {
host = qlt_find_host_by_vp_idx(vha,
@@ -411,7 +416,7 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
unsigned long flags;
if (unlikely(!host)) {
- ql_dbg(ql_dbg_tgt, vha, 0xffff,
+ ql_dbg(ql_dbg_tgt, vha, 0xe00a,
"qla_target(%d): Response pkt (ABTS_RECV_24XX) "
"received, with unknown vp_index %d\n",
vha->vp_idx, entry->vp_index);
@@ -437,7 +442,8 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
return false;
}
-void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
+void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
+ struct rsp_que *rsp, response_t *pkt)
{
switch (pkt->entry_type) {
case CTIO_CRC2:
@@ -456,7 +462,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
vha->vp_idx, entry->vp_index);
break;
}
- qlt_response_pkt(host, pkt);
+ qlt_response_pkt(host, rsp, pkt);
break;
}
@@ -474,7 +480,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
vha->vp_idx, entry->u.isp24.vp_index);
break;
}
- qlt_response_pkt(host, pkt);
+ qlt_response_pkt(host, rsp, pkt);
break;
}
@@ -496,7 +502,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
break;
}
}
- qlt_response_pkt(host, pkt);
+ qlt_response_pkt(host, rsp, pkt);
break;
}
@@ -513,7 +519,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
"vp_index %d\n", vha->vp_idx, entry->vp_index);
break;
}
- qlt_response_pkt(host, pkt);
+ qlt_response_pkt(host, rsp, pkt);
break;
}
@@ -530,12 +536,12 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
"vp_index %d\n", vha->vp_idx, entry->vp_index);
break;
}
- qlt_response_pkt(host, pkt);
+ qlt_response_pkt(host, rsp, pkt);
break;
}
default:
- qlt_response_pkt(vha, pkt);
+ qlt_response_pkt(vha, rsp, pkt);
break;
}
@@ -565,13 +571,13 @@ void qla2x00_async_nack_sp_done(void *s, int res)
struct scsi_qla_host *vha = sp->vha;
unsigned long flags;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async done-%s res %x %8phC type %d\n",
- sp->name, res, sp->fcport->port_name, sp->type);
+ ql_dbg(ql_dbg_disc, vha, 0x20f2,
+ "Async done-%s res %x %8phC type %d\n",
+ sp->name, res, sp->fcport->port_name, sp->type);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
sp->fcport->flags &= ~FCF_ASYNC_SENT;
- sp->fcport->chip_reset = vha->hw->chip_reset;
+ sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
switch (sp->type) {
case SRB_NACK_PLOGI:
@@ -593,19 +599,19 @@ void qla2x00_async_nack_sp_done(void *s, int res)
if (!IS_IIDMA_CAPABLE(vha->hw) ||
!vha->hw->flags.gpsc_supported) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post upd_fcport fcp_cnt %d\n",
- __func__, __LINE__,
- sp->fcport->port_name,
- vha->fcport_count);
+ ql_dbg(ql_dbg_disc, vha, 0x20f3,
+ "%s %d %8phC post upd_fcport fcp_cnt %d\n",
+ __func__, __LINE__,
+ sp->fcport->port_name,
+ vha->fcport_count);
qla24xx_post_upd_fcport_work(vha, sp->fcport);
} else {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post gpsc fcp_cnt %d\n",
- __func__, __LINE__,
- sp->fcport->port_name,
- vha->fcport_count);
+ ql_dbg(ql_dbg_disc, vha, 0x20f5,
+ "%s %d %8phC post gpsc fcp_cnt %d\n",
+ __func__, __LINE__,
+ sp->fcport->port_name,
+ vha->fcport_count);
qla24xx_post_gpsc_work(vha, sp->fcport);
}
@@ -664,9 +670,9 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
if (rval != QLA_SUCCESS)
goto done_free_sp;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async-%s %8phC hndl %x %s\n",
- sp->name, fcport->port_name, sp->handle, c);
+ ql_dbg(ql_dbg_disc, vha, 0x20f4,
+ "Async-%s %8phC hndl %x %s\n",
+ sp->name, fcport->port_name, sp->handle, c);
return rval;
@@ -688,7 +694,7 @@ void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
t = qlt_create_sess(vha, e->u.nack.fcport, 0);
mutex_unlock(&vha->vha_tgt.tgt_mutex);
if (t) {
- ql_log(ql_log_info, vha, 0xffff,
+ ql_log(ql_log_info, vha, 0xd034,
"%s create sess success %p", __func__, t);
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
/* create sess has an extra kref */
@@ -757,7 +763,7 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
}
if (!kref_get_unless_zero(&sess->sess_kref)) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x2107,
"%s: kref_get fail sess %8phC \n",
__func__, sess->port_name);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
@@ -957,7 +963,6 @@ static void qlt_free_session_done(struct work_struct *work)
sess->logout_on_delete, sess->keep_nport_handle,
sess->send_els_logo);
-
if (!IS_SW_RESV_ADDR(sess->d_id)) {
if (sess->send_els_logo) {
qlt_port_logo_t logo;
@@ -1026,7 +1031,7 @@ static void qlt_free_session_done(struct work_struct *work)
sess->login_succ = 0;
}
- if (sess->chip_reset != sess->vha->hw->chip_reset)
+ if (sess->chip_reset != ha->base_qpair->chip_reset)
qla2x00_clear_loop_id(sess);
if (sess->conflict) {
@@ -1098,7 +1103,7 @@ void qlt_unreg_sess(struct fc_port *sess)
{
struct scsi_qla_host *vha = sess->vha;
- ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
+ ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
"%s sess %p for deletion %8phC\n",
__func__, sess, sess->port_name);
@@ -1112,6 +1117,9 @@ void qlt_unreg_sess(struct fc_port *sess)
sess->last_rscn_gen = sess->rscn_gen;
sess->last_login_gen = sess->login_gen;
+ if (sess->nvme_flag & NVME_FLAG_REGISTERED)
+ schedule_work(&sess->nvme_del_work);
+
INIT_WORK(&sess->free_work, qlt_free_session_done);
schedule_work(&sess->free_work);
}
@@ -1156,7 +1164,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
static void qla24xx_chk_fcp_state(struct fc_port *sess)
{
- if (sess->chip_reset != sess->vha->hw->chip_reset) {
+ if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) {
sess->logout_on_delete = 0;
sess->logo_ack_needed = 0;
sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
@@ -1288,7 +1296,7 @@ static struct fc_port *qlt_create_sess(
if (fcport->se_sess) {
if (!kref_get_unless_zero(&sess->sess_kref)) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20f6,
"%s: kref_get_unless_zero failed for %8phC\n",
__func__, sess->port_name);
return NULL;
@@ -1310,7 +1318,7 @@ static struct fc_port *qlt_create_sess(
if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
&fcport->port_name[0], sess) < 0) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015,
"(%d) %8phC check_initiator_node_acl failed\n",
vha->vp_idx, fcport->port_name);
return NULL;
@@ -1321,7 +1329,7 @@ static struct fc_port *qlt_create_sess(
* fc_port access across ->tgt.sess_lock reaquire.
*/
if (!kref_get_unless_zero(&sess->sess_kref)) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20f7,
"%s: kref_get_unless_zero failed for %8phC\n",
__func__, sess->port_name);
return NULL;
@@ -1432,6 +1440,8 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
if (npiv_vports) {
mutex_unlock(&qla_tgt_mutex);
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
+ "NPIV is in use. Can not stop target\n");
return -EPERM;
}
}
@@ -1442,7 +1452,7 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
return -EPERM;
}
- ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
vha->host_no, vha);
/*
* Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
@@ -1485,9 +1495,7 @@ EXPORT_SYMBOL(qlt_stop_phase1);
/* Called by tcm_qla2xxx configfs code */
void qlt_stop_phase2(struct qla_tgt *tgt)
{
- struct qla_hw_data *ha = tgt->ha;
- scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
- unsigned long flags;
+ scsi_qla_host_t *vha = tgt->vha;
if (tgt->tgt_stopped) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
@@ -1495,24 +1503,19 @@ void qlt_stop_phase2(struct qla_tgt *tgt)
dump_stack();
return;
}
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
- "Waiting for %d IRQ commands to complete (tgt %p)",
- tgt->irq_cmd_count, tgt);
+ if (!tgt->tgt_stop) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
+ "%s: phase1 stop is not completed\n", __func__);
+ dump_stack();
+ return;
+ }
mutex_lock(&vha->vha_tgt.tgt_mutex);
- spin_lock_irqsave(&ha->hardware_lock, flags);
- while ((tgt->irq_cmd_count != 0) || (tgt->atio_irq_cmd_count != 0)) {
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- udelay(2);
- spin_lock_irqsave(&ha->hardware_lock, flags);
- }
tgt->tgt_stop = 0;
tgt->tgt_stopped = 1;
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
mutex_unlock(&vha->vha_tgt.tgt_mutex);
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished",
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
tgt);
}
EXPORT_SYMBOL(qlt_stop_phase2);
@@ -1521,10 +1524,36 @@ EXPORT_SYMBOL(qlt_stop_phase2);
static void qlt_release(struct qla_tgt *tgt)
{
scsi_qla_host_t *vha = tgt->vha;
+ void *node;
+ u64 key = 0;
+ u16 i;
+ struct qla_qpair_hint *h;
+
+ if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stop &&
+ !tgt->tgt_stopped)
+ qlt_stop_phase1(tgt);
if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
qlt_stop_phase2(tgt);
+ for (i = 0; i < vha->hw->max_qpairs + 1; i++) {
+ unsigned long flags;
+
+ h = &tgt->qphints[i];
+ if (h->qpair) {
+ spin_lock_irqsave(h->qpair->qp_lock_ptr, flags);
+ list_del(&h->hint_elem);
+ spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags);
+ h->qpair = NULL;
+ }
+ }
+ kfree(tgt->qphints);
+
+ btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
+ btree_remove64(&tgt->lun_qpair_map, key);
+
+ btree_destroy64(&tgt->lun_qpair_map);
+
vha->vha_tgt.qla_tgt = NULL;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
@@ -1568,11 +1597,12 @@ static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
-static void qlt_send_notify_ack(struct scsi_qla_host *vha,
+static void qlt_send_notify_ack(struct qla_qpair *qpair,
struct imm_ntfy_from_isp *ntfy,
uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
{
+ struct scsi_qla_host *vha = qpair->vha;
struct qla_hw_data *ha = vha->hw;
request_t *pkt;
struct nack_to_isp *nack;
@@ -1582,11 +1612,7 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
- /* Send marker if required */
- if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
- return;
-
- pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+ pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
if (!pkt) {
ql_dbg(ql_dbg_tgt, vha, 0xe049,
"qla_target(%d): %s failed: unable to allocate "
@@ -1627,16 +1653,17 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
/* Memory Barrier */
wmb();
- qla2x00_start_iocbs(vha, vha->req);
+ qla2x00_start_iocbs(vha, qpair->req);
}
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
-static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
+static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
struct abts_recv_from_24xx *abts, uint32_t status,
bool ids_reversed)
{
+ struct scsi_qla_host *vha = qpair->vha;
struct qla_hw_data *ha = vha->hw;
struct abts_resp_to_24xx *resp;
uint32_t f_ctl;
@@ -1646,11 +1673,8 @@ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
"Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
ha, abts, status);
- /* Send marker if required */
- if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
- return;
-
- resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
+ resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair,
+ NULL);
if (!resp) {
ql_dbg(ql_dbg_tgt, vha, 0xe04a,
"qla_target(%d): %s failed: unable to allocate "
@@ -1706,7 +1730,10 @@ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
/* Memory Barrier */
wmb();
- qla2x00_start_iocbs(vha, vha->req);
+ if (qpair->reqq_start_iocbs)
+ qpair->reqq_start_iocbs(qpair);
+ else
+ qla2x00_start_iocbs(vha, qpair->req);
}
/*
@@ -1719,11 +1746,9 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt, vha, 0xe007,
"Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
- /* Send marker if required */
- if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
- return;
- ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
+ ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(
+ vha->hw->base_qpair, NULL);
if (ctio == NULL) {
ql_dbg(ql_dbg_tgt, vha, 0xe04b,
"qla_target(%d): %s failed: unable to allocate "
@@ -1754,7 +1779,8 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
wmb();
qla2x00_start_iocbs(vha, vha->req);
- qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
+ qlt_24xx_send_abts_resp(vha->hw->base_qpair,
+ (struct abts_recv_from_24xx *)entry,
FCP_TMF_CMPL, true);
}
@@ -1762,13 +1788,13 @@ static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
{
struct qla_tgt_sess_op *op;
struct qla_tgt_cmd *cmd;
+ unsigned long flags;
- spin_lock(&vha->cmd_list_lock);
-
+ spin_lock_irqsave(&vha->cmd_list_lock, flags);
list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
if (tag == op->atio.u.isp24.exchange_addr) {
op->aborted = true;
- spin_unlock(&vha->cmd_list_lock);
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
return 1;
}
}
@@ -1776,7 +1802,7 @@ static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
if (tag == op->atio.u.isp24.exchange_addr) {
op->aborted = true;
- spin_unlock(&vha->cmd_list_lock);
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
return 1;
}
}
@@ -1784,12 +1810,12 @@ static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
if (tag == cmd->atio.u.isp24.exchange_addr) {
cmd->aborted = 1;
- spin_unlock(&vha->cmd_list_lock);
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
return 1;
}
}
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
- spin_unlock(&vha->cmd_list_lock);
return 0;
}
@@ -1799,17 +1825,18 @@ static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
* for the same lun)
*/
static void abort_cmds_for_lun(struct scsi_qla_host *vha,
- uint32_t lun, uint8_t *s_id)
+ u64 lun, uint8_t *s_id)
{
struct qla_tgt_sess_op *op;
struct qla_tgt_cmd *cmd;
uint32_t key;
+ unsigned long flags;
key = sid_to_key(s_id);
- spin_lock(&vha->cmd_list_lock);
+ spin_lock_irqsave(&vha->cmd_list_lock, flags);
list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
uint32_t op_key;
- uint32_t op_lun;
+ u64 op_lun;
op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
op_lun = scsilun_to_int(
@@ -1831,7 +1858,7 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha,
list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
uint32_t cmd_key;
- uint32_t cmd_lun;
+ u64 cmd_lun;
cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
cmd_lun = scsilun_to_int(
@@ -1839,7 +1866,7 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha,
if (cmd_key == key && cmd_lun == lun)
cmd->aborted = 1;
}
- spin_unlock(&vha->cmd_list_lock);
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
}
/* ha->hardware_lock supposed to be held on entry */
@@ -1849,18 +1876,15 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
struct se_session *se_sess = sess->se_sess;
struct qla_tgt_mgmt_cmd *mcmd;
+ struct qla_tgt_cmd *cmd;
struct se_cmd *se_cmd;
- u32 lun = 0;
int rc;
bool found_lun = false;
unsigned long flags;
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
- struct qla_tgt_cmd *cmd =
- container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
if (se_cmd->tag == abts->exchange_addr_to_abort) {
- lun = cmd->unpacked_lun;
found_lun = true;
break;
}
@@ -1871,7 +1895,8 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
if (!found_lun) {
if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
/* send TASK_ABORT response immediately */
- qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false);
+ qlt_24xx_send_abts_resp(ha->base_qpair, abts,
+ FCP_TMF_CMPL, false);
return 0;
} else {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081,
@@ -1894,12 +1919,14 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
}
memset(mcmd, 0, sizeof(*mcmd));
+ cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
mcmd->sess = sess;
memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
- mcmd->reset_count = vha->hw->chip_reset;
+ mcmd->reset_count = ha->base_qpair->chip_reset;
mcmd->tmr_func = QLA_TGT_ABTS;
+ mcmd->qpair = ha->base_qpair;
- rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, mcmd->tmr_func,
+ rc = ha->tgt.tgt_ops->handle_tmr(mcmd, cmd->unpacked_lun, mcmd->tmr_func,
abts->exchange_addr_to_abort);
if (rc != 0) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
@@ -1929,7 +1956,8 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
"qla_target(%d): ABTS: Abort Sequence not "
"supported\n", vha->vp_idx);
- qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+ qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
+ false);
return;
}
@@ -1937,7 +1965,8 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
"qla_target(%d): ABTS: Unknown Exchange "
"Address received\n", vha->vp_idx);
- qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+ qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
+ false);
return;
}
@@ -1963,8 +1992,8 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
if (rc != 0) {
- qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
- false);
+ qlt_24xx_send_abts_resp(ha->base_qpair, abts,
+ FCP_TMF_REJECTED, false);
}
return;
}
@@ -1972,7 +2001,8 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
if (sess->deleted) {
- qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+ qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
+ false);
return;
}
@@ -1981,7 +2011,8 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
"qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
vha->vp_idx, rc);
- qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+ qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
+ false);
return;
}
}
@@ -1989,9 +2020,10 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
-static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
+static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair,
struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
{
+ struct scsi_qla_host *ha = qpair->vha;
struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
struct ctio7_to_24xx *ctio;
uint16_t temp;
@@ -2000,11 +2032,8 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
"Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
ha, atio, resp_code);
- /* Send marker if required */
- if (qlt_issue_marker(ha, 1) != QLA_SUCCESS)
- return;
- ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL);
+ ctio = (struct ctio7_to_24xx *)__qla2x00_alloc_iocbs(qpair, NULL);
if (ctio == NULL) {
ql_dbg(ql_dbg_tgt, ha, 0xe04c,
"qla_target(%d): %s failed: unable to allocate "
@@ -2022,8 +2051,9 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
ctio->exchange_addr = atio->u.isp24.exchange_addr;
- ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
- cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS);
+ temp = (atio->u.isp24.attr << 9)|
+ CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
+ ctio->u.status1.flags = cpu_to_le16(temp);
temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
ctio->u.status1.ox_id = cpu_to_le16(temp);
ctio->u.status1.scsi_status =
@@ -2033,7 +2063,10 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
/* Memory Barrier */
wmb();
- qla2x00_start_iocbs(ha, ha->req);
+ if (qpair->reqq_start_iocbs)
+ qpair->reqq_start_iocbs(qpair);
+ else
+ qla2x00_start_iocbs(ha, qpair->req);
}
void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
@@ -2046,12 +2079,13 @@ EXPORT_SYMBOL(qlt_free_mcmd);
* ha->hardware_lock supposed to be held on entry. Might drop it, then
* reacquire
*/
-void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
+void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
{
struct atio_from_isp *atio = &cmd->atio;
struct ctio7_to_24xx *ctio;
uint16_t temp;
+ struct scsi_qla_host *vha = cmd->vha;
ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
"Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
@@ -2076,8 +2110,9 @@ void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
ctio->exchange_addr = atio->u.isp24.exchange_addr;
- ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
- cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS);
+ temp = (atio->u.isp24.attr << 9) |
+ CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
+ ctio->u.status1.flags = cpu_to_le16(temp);
temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
ctio->u.status1.ox_id = cpu_to_le16(temp);
ctio->u.status1.scsi_status =
@@ -2101,7 +2136,11 @@ void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
/* Memory Barrier */
wmb();
- qla2x00_start_iocbs(vha, vha->req);
+ if (qpair->reqq_start_iocbs)
+ qpair->reqq_start_iocbs(qpair);
+ else
+ qla2x00_start_iocbs(vha, qpair->req);
+
out:
return;
}
@@ -2112,14 +2151,15 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
struct scsi_qla_host *vha = mcmd->sess->vha;
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
+ struct qla_qpair *qpair = mcmd->qpair;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
"TM response mcmd (%p) status %#x state %#x",
mcmd, mcmd->fc_tm_rsp, mcmd->flags);
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
- if (!vha->flags.online || mcmd->reset_count != ha->chip_reset) {
+ if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) {
/*
* Either the port is not online or this request was from
* previous life, just abort the processing.
@@ -2127,9 +2167,9 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
ql_dbg(ql_dbg_async, vha, 0xe100,
"RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
vha->flags.online, qla2x00_reset_active(vha),
- mcmd->reset_count, ha->chip_reset);
+ mcmd->reset_count, qpair->chip_reset);
ha->tgt.tgt_ops->free_mcmd(mcmd);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return;
}
@@ -2140,21 +2180,21 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
ELS_PRLO ||
mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
ELS_TPRLO) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x2106,
"TM response logo %phC status %#x state %#x",
mcmd->sess->port_name, mcmd->fc_tm_rsp,
mcmd->flags);
qlt_schedule_sess_for_deletion_lock(mcmd->sess);
} else {
- qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
- 0, 0, 0, 0, 0, 0);
+ qlt_send_notify_ack(vha->hw->base_qpair,
+ &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
}
} else {
if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
- qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
+ qlt_24xx_send_abts_resp(qpair, &mcmd->orig_iocb.abts,
mcmd->fc_tm_rsp, false);
else
- qlt_24xx_send_task_mgmt_ctio(vha, mcmd,
+ qlt_24xx_send_task_mgmt_ctio(qpair, mcmd,
mcmd->fc_tm_rsp);
}
/*
@@ -2166,7 +2206,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
* qlt_xmit_tm_rsp() returns here..
*/
ha->tgt.tgt_ops->free_mcmd(mcmd);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
}
EXPORT_SYMBOL(qlt_xmit_tm_rsp);
@@ -2178,7 +2218,7 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
BUG_ON(cmd->sg_cnt == 0);
prm->sg = (struct scatterlist *)cmd->sg;
- prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
+ prm->seg_cnt = pci_map_sg(cmd->qpair->pdev, cmd->sg,
cmd->sg_cnt, cmd->dma_data_direction);
if (unlikely(prm->seg_cnt == 0))
goto out_err;
@@ -2190,10 +2230,10 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
* If greater than four sg entries then we need to allocate
* the continuation entries
*/
- if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
+ if (prm->seg_cnt > QLA_TGT_DATASEGS_PER_CMD_24XX)
prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
- prm->tgt->datasegs_per_cmd,
- prm->tgt->datasegs_per_cont);
+ QLA_TGT_DATASEGS_PER_CMD_24XX,
+ QLA_TGT_DATASEGS_PER_CONT_24XX);
} else {
/* DIF */
if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
@@ -2205,7 +2245,7 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
if (cmd->prot_sg_cnt) {
prm->prot_sg = cmd->prot_sg;
- prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev,
+ prm->prot_seg_cnt = pci_map_sg(cmd->qpair->pdev,
cmd->prot_sg, cmd->prot_sg_cnt,
cmd->dma_data_direction);
if (unlikely(prm->prot_seg_cnt == 0))
@@ -2225,7 +2265,7 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
return 0;
out_err:
- ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d,
+ ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe04d,
"qla_target(%d): PCI mapping failed: sg_cnt=%d",
0, prm->cmd->sg_cnt);
return -1;
@@ -2233,53 +2273,50 @@ out_err:
static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
{
- struct qla_hw_data *ha = vha->hw;
-
+ struct qla_hw_data *ha;
+ struct qla_qpair *qpair;
if (!cmd->sg_mapped)
return;
- pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
+ qpair = cmd->qpair;
+
+ pci_unmap_sg(qpair->pdev, cmd->sg, cmd->sg_cnt,
+ cmd->dma_data_direction);
cmd->sg_mapped = 0;
if (cmd->prot_sg_cnt)
- pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
+ pci_unmap_sg(qpair->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
cmd->dma_data_direction);
if (!cmd->ctx)
return;
-
+ ha = vha->hw;
if (cmd->ctx_dsd_alloced)
qla2x00_clean_dsd_pool(ha, cmd->ctx);
dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
}
-static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
+static int qlt_check_reserve_free_req(struct qla_qpair *qpair,
uint32_t req_cnt)
{
- uint32_t cnt, cnt_in;
+ uint32_t cnt;
+ struct req_que *req = qpair->req;
- if (vha->req->cnt < (req_cnt + 2)) {
- cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out);
- cnt_in = (uint16_t)RD_REG_DWORD(vha->req->req_q_in);
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr :
+ RD_REG_DWORD_RELAXED(req->req_q_out));
- if (vha->req->ring_index < cnt)
- vha->req->cnt = cnt - vha->req->ring_index;
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
else
- vha->req->cnt = vha->req->length -
- (vha->req->ring_index - cnt);
-
- if (unlikely(vha->req->cnt < (req_cnt + 2))) {
- ql_dbg(ql_dbg_io, vha, 0x305a,
- "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n",
- vha->vp_idx, vha->req->ring_index,
- vha->req->cnt, req_cnt, cnt, cnt_in,
- vha->req->length);
+ req->cnt = req->length - (req->ring_index - cnt);
+
+ if (unlikely(req->cnt < (req_cnt + 2)))
return -EAGAIN;
- }
}
- vha->req->cnt -= req_cnt;
+ req->cnt -= req_cnt;
return 0;
}
@@ -2287,67 +2324,73 @@ static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
-static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha)
+static inline void *qlt_get_req_pkt(struct req_que *req)
{
/* Adjust ring index. */
- vha->req->ring_index++;
- if (vha->req->ring_index == vha->req->length) {
- vha->req->ring_index = 0;
- vha->req->ring_ptr = vha->req->ring;
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
} else {
- vha->req->ring_ptr++;
+ req->ring_ptr++;
}
- return (cont_entry_t *)vha->req->ring_ptr;
+ return (cont_entry_t *)req->ring_ptr;
}
/* ha->hardware_lock supposed to be held on entry */
-static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
+static inline uint32_t qlt_make_handle(struct qla_qpair *qpair)
{
- struct qla_hw_data *ha = vha->hw;
uint32_t h;
+ int index;
+ uint8_t found = 0;
+ struct req_que *req = qpair->req;
+
+ h = req->current_outstanding_cmd;
+
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ h++;
+ if (h == req->num_outstanding_cmds)
+ h = 1;
- h = ha->tgt.current_handle;
- /* always increment cmd handle */
- do {
- ++h;
- if (h > DEFAULT_OUTSTANDING_COMMANDS)
- h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
- if (h == ha->tgt.current_handle) {
- ql_dbg(ql_dbg_io, vha, 0x305b,
- "qla_target(%d): Ran out of "
- "empty cmd slots in ha %p\n", vha->vp_idx, ha);
- h = QLA_TGT_NULL_HANDLE;
+ if (h == QLA_TGT_SKIP_HANDLE)
+ continue;
+
+ if (!req->outstanding_cmds[h]) {
+ found = 1;
break;
}
- } while ((h == QLA_TGT_NULL_HANDLE) ||
- (h == QLA_TGT_SKIP_HANDLE) ||
- (ha->tgt.cmds[h-1] != NULL));
+ }
- if (h != QLA_TGT_NULL_HANDLE)
- ha->tgt.current_handle = h;
+ if (found) {
+ req->current_outstanding_cmd = h;
+ } else {
+ ql_dbg(ql_dbg_io, qpair->vha, 0x305b,
+ "qla_target(%d): Ran out of empty cmd slots\n",
+ qpair->vha->vp_idx);
+ h = QLA_TGT_NULL_HANDLE;
+ }
return h;
}
/* ha->hardware_lock supposed to be held on entry */
-static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
- struct scsi_qla_host *vha)
+static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
+ struct qla_tgt_prm *prm)
{
uint32_t h;
struct ctio7_to_24xx *pkt;
- struct qla_hw_data *ha = vha->hw;
struct atio_from_isp *atio = &prm->cmd->atio;
uint16_t temp;
- pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
+ pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr;
prm->pkt = pkt;
memset(pkt, 0, sizeof(*pkt));
pkt->entry_type = CTIO_TYPE7;
pkt->entry_count = (uint8_t)prm->req_cnt;
- pkt->vp_index = vha->vp_idx;
+ pkt->vp_index = prm->cmd->vp_idx;
- h = qlt_make_handle(vha);
+ h = qlt_make_handle(qpair);
if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
/*
* CTIO type 7 from the firmware doesn't provide a way to
@@ -2356,16 +2399,18 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
*/
return -EAGAIN;
} else
- ha->tgt.cmds[h - 1] = prm->cmd;
+ qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
- pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
- pkt->nport_handle = prm->cmd->loop_id;
+ pkt->handle = MAKE_HANDLE(qpair->req->id, h);
+ pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
+ pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
pkt->exchange_addr = atio->u.isp24.exchange_addr;
- pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
+ temp = atio->u.isp24.attr << 9;
+ pkt->u.status0.flags |= cpu_to_le16(temp);
temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
pkt->u.status0.ox_id = cpu_to_le16(temp);
pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
@@ -2377,17 +2422,16 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
* ha->hardware_lock supposed to be held on entry. We have already made sure
* that there is sufficient amount of request entries to not drop it.
*/
-static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
- struct scsi_qla_host *vha)
+static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
{
int cnt;
uint32_t *dword_ptr;
- int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
/* Build continuation packets */
while (prm->seg_cnt > 0) {
cont_a64_entry_t *cont_pkt64 =
- (cont_a64_entry_t *)qlt_get_req_pkt(vha);
+ (cont_a64_entry_t *)qlt_get_req_pkt(
+ prm->cmd->qpair->req);
/*
* Make sure that from cont_pkt64 none of
@@ -2401,30 +2445,18 @@ static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
cont_pkt64->entry_count = 1;
cont_pkt64->sys_define = 0;
- if (enable_64bit_addressing) {
- cont_pkt64->entry_type = CONTINUE_A64_TYPE;
- dword_ptr =
- (uint32_t *)&cont_pkt64->dseg_0_address;
- } else {
- cont_pkt64->entry_type = CONTINUE_TYPE;
- dword_ptr =
- (uint32_t *)&((cont_entry_t *)
- cont_pkt64)->dseg_0_address;
- }
+ cont_pkt64->entry_type = CONTINUE_A64_TYPE;
+ dword_ptr = (uint32_t *)&cont_pkt64->dseg_0_address;
/* Load continuation entry data segments */
for (cnt = 0;
- cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
+ cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt;
cnt++, prm->seg_cnt--) {
*dword_ptr++ =
cpu_to_le32(pci_dma_lo32
(sg_dma_address(prm->sg)));
- if (enable_64bit_addressing) {
- *dword_ptr++ =
- cpu_to_le32(pci_dma_hi32
- (sg_dma_address
- (prm->sg)));
- }
+ *dword_ptr++ = cpu_to_le32(pci_dma_hi32
+ (sg_dma_address(prm->sg)));
*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
prm->sg = sg_next(prm->sg);
@@ -2436,12 +2468,10 @@ static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
* ha->hardware_lock supposed to be held on entry. We have already made sure
* that there is sufficient amount of request entries to not drop it.
*/
-static void qlt_load_data_segments(struct qla_tgt_prm *prm,
- struct scsi_qla_host *vha)
+static void qlt_load_data_segments(struct qla_tgt_prm *prm)
{
int cnt;
uint32_t *dword_ptr;
- int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
@@ -2464,21 +2494,20 @@ static void qlt_load_data_segments(struct qla_tgt_prm *prm,
/* Load command entry data segments */
for (cnt = 0;
- (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
+ (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt;
cnt++, prm->seg_cnt--) {
*dword_ptr++ =
cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
- if (enable_64bit_addressing) {
- *dword_ptr++ =
- cpu_to_le32(pci_dma_hi32(
- sg_dma_address(prm->sg)));
- }
+
+ *dword_ptr++ = cpu_to_le32(pci_dma_hi32(
+ sg_dma_address(prm->sg)));
+
*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
prm->sg = sg_next(prm->sg);
}
- qlt_load_cont_data_segments(prm, vha);
+ qlt_load_cont_data_segments(prm);
}
static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
@@ -2498,35 +2527,35 @@ static void qlt_print_dif_err(struct qla_tgt_prm *prm)
/* ASCQ */
switch (prm->sense_buffer[13]) {
case 1:
- ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b,
"BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
"se_cmd=%p tag[%x]",
cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
cmd->atio.u.isp24.exchange_addr);
break;
case 2:
- ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c,
"BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
"se_cmd=%p tag[%x]",
cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
cmd->atio.u.isp24.exchange_addr);
break;
case 3:
- ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f,
"BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
"se_cmd=%p tag[%x]",
cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
cmd->atio.u.isp24.exchange_addr);
break;
default:
- ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xe010,
"BE detected Dif ERR: lba[%llx|%lld] len[%x] "
"se_cmd=%p tag[%x]",
cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
cmd->atio.u.isp24.exchange_addr);
break;
}
- ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xffff, cmd->cdb, 16);
+ ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16);
}
}
@@ -2537,24 +2566,23 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
uint32_t *full_req_cnt)
{
- struct qla_tgt *tgt = cmd->tgt;
- struct scsi_qla_host *vha = tgt->vha;
- struct qla_hw_data *ha = vha->hw;
struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct qla_qpair *qpair = cmd->qpair;
prm->cmd = cmd;
- prm->tgt = tgt;
+ prm->tgt = cmd->tgt;
+ prm->pkt = NULL;
prm->rq_result = scsi_status;
prm->sense_buffer = &cmd->sense_buffer[0];
prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
prm->sg = NULL;
prm->seg_cnt = -1;
prm->req_cnt = 1;
+ prm->residual = 0;
prm->add_status_pkt = 0;
-
- /* Send marker if required */
- if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
- return -EFAULT;
+ prm->prot_sg = NULL;
+ prm->prot_seg_cnt = 0;
+ prm->tot_dsds = 0;
if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
if (qlt_pci_map_calc_cnt(prm) != 0)
@@ -2565,7 +2593,7 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
prm->residual = se_cmd->residual_count;
- ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x305c,
+ ql_dbg_qp(ql_dbg_io + ql_dbg_verbose, qpair, 0x305c,
"Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
prm->residual, se_cmd->tag,
se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
@@ -2573,7 +2601,7 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
prm->rq_result |= SS_RESIDUAL_UNDER;
} else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
prm->residual = se_cmd->residual_count;
- ql_dbg(ql_dbg_io, vha, 0x305d,
+ ql_dbg_qp(ql_dbg_io, qpair, 0x305d,
"Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
prm->residual, se_cmd->tag, se_cmd->t_task_cdb ?
se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result);
@@ -2587,7 +2615,7 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
*/
if (qlt_has_data(cmd)) {
if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
- (IS_FWI2_CAPABLE(ha) &&
+ (IS_FWI2_CAPABLE(cmd->vha->hw) &&
(prm->rq_result != 0))) {
prm->add_status_pkt = 1;
(*full_req_cnt)++;
@@ -2598,17 +2626,17 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
return 0;
}
-static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
- struct qla_tgt_cmd *cmd, int sending_sense)
+static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd,
+ int sending_sense)
{
- if (ha->tgt.enable_class_2)
+ if (cmd->qpair->enable_class_2)
return 0;
if (sending_sense)
return cmd->conf_compl_supported;
else
- return ha->tgt.enable_explicit_conf &&
- cmd->conf_compl_supported;
+ return cmd->qpair->enable_explicit_conf &&
+ cmd->conf_compl_supported;
}
static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
@@ -2617,7 +2645,7 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
(uint32_t)sizeof(ctio->u.status1.sense_data));
ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
- if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
+ if (qlt_need_explicit_conf(prm->cmd, 0)) {
ctio->u.status0.flags |= cpu_to_le16(
CTIO7_FLAGS_EXPLICIT_CONFORM |
CTIO7_FLAGS_CONFORM_REQ);
@@ -2627,9 +2655,9 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
int i;
- if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
+ if (qlt_need_explicit_conf(prm->cmd, 1)) {
if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) {
- ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
+ ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe017,
"Skipping EXPLICIT_CONFORM and "
"CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
"non GOOD status\n");
@@ -2797,7 +2825,7 @@ qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
}
static inline int
-qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
+qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
{
uint32_t *cur_dsd;
uint32_t transfer_length = 0;
@@ -2816,16 +2844,17 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
struct atio_from_isp *atio = &prm->cmd->atio;
struct qla_tc_param tc;
uint16_t t16;
+ scsi_qla_host_t *vha = cmd->vha;
ha = vha->hw;
- pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr;
+ pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr;
prm->pkt = pkt;
memset(pkt, 0, sizeof(*pkt));
- ql_dbg(ql_dbg_tgt, vha, 0xe071,
+ ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071,
"qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
- vha->vp_idx, __func__, se_cmd, se_cmd->prot_op,
+ cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op,
prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
@@ -2888,9 +2917,9 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
/* Update entry type to indicate Command Type CRC_2 IOCB */
pkt->entry_type = CTIO_CRC2;
pkt->entry_count = 1;
- pkt->vp_index = vha->vp_idx;
+ pkt->vp_index = cmd->vp_idx;
- h = qlt_make_handle(vha);
+ h = qlt_make_handle(qpair);
if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
/*
* CTIO type 7 from the firmware doesn't provide a way to
@@ -2899,9 +2928,10 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
*/
return -EAGAIN;
} else
- ha->tgt.cmds[h-1] = prm->cmd;
+ qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
- pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
+ pkt->handle = MAKE_HANDLE(qpair->req->id, h);
+ pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
@@ -3005,7 +3035,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
crc_queuing_error:
/* Cleanup will be performed by the caller */
- vha->hw->tgt.cmds[h - 1] = NULL;
+ qpair->req->outstanding_cmds[h] = NULL;
return QLA_FUNCTION_FAILED;
}
@@ -3018,33 +3048,28 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
uint8_t scsi_status)
{
struct scsi_qla_host *vha = cmd->vha;
- struct qla_hw_data *ha = vha->hw;
+ struct qla_qpair *qpair = cmd->qpair;
struct ctio7_to_24xx *pkt;
struct qla_tgt_prm prm;
uint32_t full_req_cnt = 0;
unsigned long flags = 0;
int res;
- spin_lock_irqsave(&ha->hardware_lock, flags);
if (cmd->sess && cmd->sess->deleted) {
cmd->state = QLA_TGT_STATE_PROCESSED;
if (cmd->sess->logout_completed)
/* no need to terminate. FW already freed exchange. */
qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
else
- qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ qlt_send_term_exchange(qpair, cmd, &cmd->atio, 0, 0);
return 0;
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
- memset(&prm, 0, sizeof(prm));
- ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
- "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n",
+ ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
+ "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n",
(xmit_type & QLA_TGT_XMIT_STATUS) ?
1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
- &cmd->se_cmd);
+ &cmd->se_cmd, qpair->id);
res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
&full_req_cnt);
@@ -3052,39 +3077,39 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
return res;
}
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
if (xmit_type == QLA_TGT_XMIT_STATUS)
- vha->tgt_counters.core_qla_snd_status++;
+ qpair->tgt_counters.core_qla_snd_status++;
else
- vha->tgt_counters.core_qla_que_buf++;
+ qpair->tgt_counters.core_qla_que_buf++;
- if (!ha->flags.fw_started || cmd->reset_count != ha->chip_reset) {
+ if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) {
/*
* Either the port is not online or this request was from
* previous life, just abort the processing.
*/
cmd->state = QLA_TGT_STATE_PROCESSED;
qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
- ql_dbg(ql_dbg_async, vha, 0xe101,
+ ql_dbg_qp(ql_dbg_async, qpair, 0xe101,
"RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
vha->flags.online, qla2x00_reset_active(vha),
- cmd->reset_count, ha->chip_reset);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ cmd->reset_count, qpair->chip_reset);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return 0;
}
/* Does F/W have an IOCBs for this request */
- res = qlt_check_reserve_free_req(vha, full_req_cnt);
+ res = qlt_check_reserve_free_req(qpair, full_req_cnt);
if (unlikely(res))
goto out_unmap_unlock;
if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
- res = qlt_build_ctio_crc2_pkt(&prm, vha);
+ res = qlt_build_ctio_crc2_pkt(qpair, &prm);
else
- res = qlt_24xx_build_ctio_pkt(&prm, vha);
+ res = qlt_24xx_build_ctio_pkt(qpair, &prm);
if (unlikely(res != 0)) {
- vha->req->cnt += full_req_cnt;
+ qpair->req->cnt += full_req_cnt;
goto out_unmap_unlock;
}
@@ -3096,7 +3121,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
CTIO7_FLAGS_STATUS_MODE_0);
if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
- qlt_load_data_segments(&prm, vha);
+ qlt_load_data_segments(&prm);
if (prm.add_status_pkt == 0) {
if (xmit_type & QLA_TGT_XMIT_STATUS) {
@@ -3106,7 +3131,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
cpu_to_le32(prm.residual);
pkt->u.status0.flags |= cpu_to_le16(
CTIO7_FLAGS_SEND_STATUS);
- if (qlt_need_explicit_conf(ha, cmd, 0)) {
+ if (qlt_need_explicit_conf(cmd, 0)) {
pkt->u.status0.flags |=
cpu_to_le16(
CTIO7_FLAGS_EXPLICIT_CONFORM |
@@ -3121,9 +3146,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
* req_pkt().
*/
struct ctio7_to_24xx *ctio =
- (struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
+ (struct ctio7_to_24xx *)qlt_get_req_pkt(
+ qpair->req);
- ql_dbg(ql_dbg_io, vha, 0x305e,
+ ql_dbg_qp(ql_dbg_tgt, qpair, 0x305e,
"Building additional status packet 0x%p.\n",
ctio);
@@ -3150,7 +3176,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
*/
qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
&prm);
- pr_debug("Status CTIO7: %p\n", ctio);
}
} else
qlt_24xx_init_ctio_to_isp(pkt, &prm);
@@ -3161,14 +3186,17 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
/* Memory Barrier */
wmb();
- qla2x00_start_iocbs(vha, vha->req);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (qpair->reqq_start_iocbs)
+ qpair->reqq_start_iocbs(qpair);
+ else
+ qla2x00_start_iocbs(vha, qpair->req);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return 0;
out_unmap_unlock:
qlt_unmap_sg(vha, cmd);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return res;
}
@@ -3178,11 +3206,11 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
{
struct ctio7_to_24xx *pkt;
struct scsi_qla_host *vha = cmd->vha;
- struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt = cmd->tgt;
struct qla_tgt_prm prm;
- unsigned long flags;
+ unsigned long flags = 0;
int res = 0;
+ struct qla_qpair *qpair = cmd->qpair;
memset(&prm, 0, sizeof(prm));
prm.cmd = cmd;
@@ -3190,17 +3218,11 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
prm.sg = NULL;
prm.req_cnt = 1;
- /* Send marker if required */
- if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
- return -EIO;
-
/* Calculate number of entries and segments required */
if (qlt_pci_map_calc_cnt(&prm) != 0)
return -EAGAIN;
- spin_lock_irqsave(&ha->hardware_lock, flags);
-
- if (!ha->flags.fw_started || (cmd->reset_count != ha->chip_reset) ||
+ if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
(cmd->sess && cmd->sess->deleted)) {
/*
* Either the port is not online or this request was from
@@ -3208,25 +3230,25 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
*/
cmd->state = QLA_TGT_STATE_NEED_DATA;
qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
- ql_dbg(ql_dbg_async, vha, 0xe102,
+ ql_dbg_qp(ql_dbg_async, qpair, 0xe102,
"RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
vha->flags.online, qla2x00_reset_active(vha),
- cmd->reset_count, ha->chip_reset);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ cmd->reset_count, qpair->chip_reset);
return 0;
}
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
/* Does F/W have an IOCBs for this request */
- res = qlt_check_reserve_free_req(vha, prm.req_cnt);
+ res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
if (res != 0)
goto out_unlock_free_unmap;
if (cmd->se_cmd.prot_op)
- res = qlt_build_ctio_crc2_pkt(&prm, vha);
+ res = qlt_build_ctio_crc2_pkt(qpair, &prm);
else
- res = qlt_24xx_build_ctio_pkt(&prm, vha);
+ res = qlt_24xx_build_ctio_pkt(qpair, &prm);
if (unlikely(res != 0)) {
- vha->req->cnt += prm.req_cnt;
+ qpair->req->cnt += prm.req_cnt;
goto out_unlock_free_unmap;
}
@@ -3235,21 +3257,24 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
CTIO7_FLAGS_STATUS_MODE_0);
if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
- qlt_load_data_segments(&prm, vha);
+ qlt_load_data_segments(&prm);
cmd->state = QLA_TGT_STATE_NEED_DATA;
cmd->cmd_sent_to_fw = 1;
/* Memory Barrier */
wmb();
- qla2x00_start_iocbs(vha, vha->req);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (qpair->reqq_start_iocbs)
+ qpair->reqq_start_iocbs(qpair);
+ else
+ qla2x00_start_iocbs(vha, qpair->req);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return res;
out_unlock_free_unmap:
qlt_unmap_sg(vha, cmd);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return res;
}
@@ -3260,7 +3285,7 @@ EXPORT_SYMBOL(qlt_rdy_to_xfer);
* it is assumed either hardware_lock or qpair lock is held.
*/
static void
-qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
+qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
struct ctio_crc_from_fw *sts)
{
uint8_t *ap = &sts->actual_dif[0];
@@ -3268,6 +3293,7 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
uint64_t lba = cmd->se_cmd.t_task_lba;
uint8_t scsi_status, sense_key, asc, ascq;
unsigned long flags;
+ struct scsi_qla_host *vha = cmd->vha;
cmd->trc_flags |= TRC_DIF_ERR;
@@ -3286,15 +3312,12 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
/* check appl tag */
if (cmd->e_app_tag != cmd->a_app_tag) {
- ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
- "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
- "Ref[%x|%x], App[%x|%x], "
- "Guard [%x|%x] cmd=%p ox_id[%04x]",
- cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
- cmd->a_ref_tag, cmd->e_ref_tag,
- cmd->a_app_tag, cmd->e_app_tag,
- cmd->a_guard, cmd->e_guard,
- cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d,
+ "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
+ cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+ cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
+ cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
+ cmd->atio.u.isp24.fcp_hdr.ox_id);
cmd->dif_err_code = DIF_ERR_APP;
scsi_status = SAM_STAT_CHECK_CONDITION;
@@ -3305,15 +3328,12 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
/* check ref tag */
if (cmd->e_ref_tag != cmd->a_ref_tag) {
- ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
- "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
- "Ref[%x|%x], App[%x|%x], "
- "Guard[%x|%x] cmd=%p ox_id[%04x] ",
- cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
- cmd->a_ref_tag, cmd->e_ref_tag,
- cmd->a_app_tag, cmd->e_app_tag,
- cmd->a_guard, cmd->e_guard,
- cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e,
+ "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ",
+ cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+ cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
+ cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
+ cmd->atio.u.isp24.fcp_hdr.ox_id);
cmd->dif_err_code = DIF_ERR_REF;
scsi_status = SAM_STAT_CHECK_CONDITION;
@@ -3325,15 +3345,13 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
/* check guard */
if (cmd->e_guard != cmd->a_guard) {
- ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
- "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
- "Ref[%x|%x], App[%x|%x], "
- "Guard [%x|%x] cmd=%p ox_id[%04x]",
- cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
- cmd->a_ref_tag, cmd->e_ref_tag,
- cmd->a_app_tag, cmd->e_app_tag,
- cmd->a_guard, cmd->e_guard,
- cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xe012,
+ "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
+ cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+ cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
+ cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
+ cmd->atio.u.isp24.fcp_hdr.ox_id);
+
cmd->dif_err_code = DIF_ERR_GRD;
scsi_status = SAM_STAT_CHECK_CONDITION;
sense_key = ABORTED_COMMAND;
@@ -3356,7 +3374,8 @@ out:
}
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- qlt_send_resp_ctio(vha, cmd, scsi_status, sense_key, asc, ascq);
+ qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc,
+ ascq);
/* assume scsi status gets out on the wire.
* Will not wait for completion.
*/
@@ -3422,9 +3441,6 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
unsigned long flags = 0;
int rc;
- if (qlt_issue_marker(vha, ha_locked) < 0)
- return;
-
if (ha_locked) {
rc = __qlt_send_term_imm_notif(vha, imm);
@@ -3451,21 +3467,24 @@ done:
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
}
-/* If hardware_lock held on entry, might drop it, then reaquire */
-/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
-static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
+/*
+ * If hardware_lock held on entry, might drop it, then reaquire
+ * This function sends the appropriate CTIO to ISP 2xxx or 24xx
+ */
+static int __qlt_send_term_exchange(struct qla_qpair *qpair,
struct qla_tgt_cmd *cmd,
struct atio_from_isp *atio)
{
+ struct scsi_qla_host *vha = qpair->vha;
struct ctio7_to_24xx *ctio24;
struct qla_hw_data *ha = vha->hw;
request_t *pkt;
int ret = 0;
uint16_t temp;
- ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
+ ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
- pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
+ pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL);
if (pkt == NULL) {
ql_dbg(ql_dbg_tgt, vha, 0xe050,
"qla_target(%d): %s failed: unable to allocate "
@@ -3483,7 +3502,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
ret = 1;
}
- vha->tgt_counters.num_term_xchg_sent++;
+ qpair->tgt_counters.num_term_xchg_sent++;
pkt->entry_count = 1;
pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
@@ -3496,9 +3515,9 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
ctio24->exchange_addr = atio->u.isp24.exchange_addr;
- ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
- cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
- CTIO7_FLAGS_TERMINATE);
+ temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 |
+ CTIO7_FLAGS_TERMINATE;
+ ctio24->u.status1.flags = cpu_to_le16(temp);
temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
ctio24->u.status1.ox_id = cpu_to_le16(temp);
@@ -3511,28 +3530,35 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
/* Memory Barrier */
wmb();
- qla2x00_start_iocbs(vha, vha->req);
+ if (qpair->reqq_start_iocbs)
+ qpair->reqq_start_iocbs(qpair);
+ else
+ qla2x00_start_iocbs(vha, qpair->req);
return ret;
}
-static void qlt_send_term_exchange(struct scsi_qla_host *vha,
+static void qlt_send_term_exchange(struct qla_qpair *qpair,
struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
int ul_abort)
{
+ struct scsi_qla_host *vha;
unsigned long flags = 0;
int rc;
- if (qlt_issue_marker(vha, ha_locked) < 0)
- return;
+ /* why use different vha? NPIV */
+ if (cmd)
+ vha = cmd->vha;
+ else
+ vha = qpair->vha;
if (ha_locked) {
- rc = __qlt_send_term_exchange(vha, cmd, atio);
+ rc = __qlt_send_term_exchange(qpair, cmd, atio);
if (rc == -ENOMEM)
qlt_alloc_qfull_cmd(vha, atio, 0, 0);
goto done;
}
- spin_lock_irqsave(&vha->hw->hardware_lock, flags);
- rc = __qlt_send_term_exchange(vha, cmd, atio);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ rc = __qlt_send_term_exchange(qpair, cmd, atio);
if (rc == -ENOMEM)
qlt_alloc_qfull_cmd(vha, atio, 0, 0);
@@ -3544,7 +3570,7 @@ done:
}
if (!ha_locked)
- spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return;
}
@@ -3616,17 +3642,17 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
* 1) XFER Rdy completion + CMD_T_ABORT
* 2) TCM TMR - drain_state_list
*/
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
- "multiple abort. %p transport_state %x, t_state %x,"
- " se_cmd_flags %x \n", cmd, cmd->se_cmd.transport_state,
- cmd->se_cmd.t_state,cmd->se_cmd.se_cmd_flags);
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016,
+ "multiple abort. %p transport_state %x, t_state %x, "
+ "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state,
+ cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
return EIO;
}
cmd->aborted = 1;
cmd->trc_flags |= TRC_ABORT;
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- qlt_send_term_exchange(vha, cmd, &cmd->atio, 0, 1);
+ qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1);
return 0;
}
EXPORT_SYMBOL(qlt_abort_cmd);
@@ -3665,13 +3691,14 @@ EXPORT_SYMBOL(qlt_free_cmd);
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
-static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
+static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
struct qla_tgt_cmd *cmd, uint32_t status)
{
int term = 0;
+ struct scsi_qla_host *vha = qpair->vha;
if (cmd->se_cmd.prot_op)
- ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xe013,
"Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
"se_cmd=%p tag[%x] op %#x/%s",
cmd->lba, cmd->lba,
@@ -3688,55 +3715,53 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
term = 1;
if (term)
- qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
+ qlt_term_ctio_exchange(qpair, ctio, cmd, status);
return term;
}
-/* ha->hardware_lock supposed to be held on entry */
-static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha,
- uint32_t handle)
-{
- struct qla_hw_data *ha = vha->hw;
-
- handle--;
- if (ha->tgt.cmds[handle] != NULL) {
- struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle];
- ha->tgt.cmds[handle] = NULL;
- return cmd;
- } else
- return NULL;
-}
/* ha->hardware_lock supposed to be held on entry */
static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
- uint32_t handle, void *ctio)
+ struct rsp_que *rsp, uint32_t handle, void *ctio)
{
struct qla_tgt_cmd *cmd = NULL;
+ struct req_que *req;
+ int qid = GET_QID(handle);
+ uint32_t h = handle & ~QLA_TGT_HANDLE_MASK;
- /* Clear out internal marks */
- handle &= ~(CTIO_COMPLETION_HANDLE_MARK |
- CTIO_INTERMEDIATE_HANDLE_MARK);
+ if (unlikely(h == QLA_TGT_SKIP_HANDLE))
+ return NULL;
- if (handle != QLA_TGT_NULL_HANDLE) {
- if (unlikely(handle == QLA_TGT_SKIP_HANDLE))
- return NULL;
+ if (qid == rsp->req->id) {
+ req = rsp->req;
+ } else if (vha->hw->req_q_map[qid]) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a,
+ "qla_target(%d): CTIO completion with different QID %d handle %x\n",
+ vha->vp_idx, rsp->id, handle);
+ req = vha->hw->req_q_map[qid];
+ } else {
+ return NULL;
+ }
+
+ h &= QLA_CMD_HANDLE_MASK;
- /* handle-1 is actually used */
- if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) {
+ if (h != QLA_TGT_NULL_HANDLE) {
+ if (unlikely(h > req->num_outstanding_cmds)) {
ql_dbg(ql_dbg_tgt, vha, 0xe052,
"qla_target(%d): Wrong handle %x received\n",
vha->vp_idx, handle);
return NULL;
}
- cmd = qlt_get_cmd(vha, handle);
+
+ cmd = (struct qla_tgt_cmd *)req->outstanding_cmds[h];
if (unlikely(cmd == NULL)) {
- ql_dbg(ql_dbg_tgt, vha, 0xe053,
- "qla_target(%d): Suspicious: unable to "
- "find the command with handle %x\n", vha->vp_idx,
- handle);
+ ql_dbg(ql_dbg_async, vha, 0xe053,
+ "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
+ vha->vp_idx, handle, req->id, rsp->id);
return NULL;
}
+ req->outstanding_cmds[h] = NULL;
} else if (ctio != NULL) {
/* We can't get loop ID from CTIO7 */
ql_dbg(ql_dbg_tgt, vha, 0xe054,
@@ -3749,33 +3774,30 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
}
/* hardware_lock should be held by caller. */
-static void
+void
qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
{
struct qla_hw_data *ha = vha->hw;
- uint32_t handle;
if (cmd->sg_mapped)
qlt_unmap_sg(vha, cmd);
- handle = qlt_make_handle(vha);
-
/* TODO: fix debug message type and ids. */
if (cmd->state == QLA_TGT_STATE_PROCESSED) {
ql_dbg(ql_dbg_io, vha, 0xff00,
- "HOST-ABORT: handle=%d, state=PROCESSED.\n", handle);
+ "HOST-ABORT: state=PROCESSED.\n");
} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
cmd->write_data_transferred = 0;
cmd->state = QLA_TGT_STATE_DATA_IN;
ql_dbg(ql_dbg_io, vha, 0xff01,
- "HOST-ABORT: handle=%d, state=DATA_IN.\n", handle);
+ "HOST-ABORT: state=DATA_IN.\n");
ha->tgt.tgt_ops->handle_data(cmd);
return;
} else {
ql_dbg(ql_dbg_io, vha, 0xff03,
- "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle,
+ "HOST-ABORT: state=BAD(%d).\n",
cmd->state);
dump_stack();
}
@@ -3784,51 +3806,16 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
ha->tgt.tgt_ops->free_cmd(cmd);
}
-void
-qlt_host_reset_handler(struct qla_hw_data *ha)
-{
- struct qla_tgt_cmd *cmd;
- unsigned long flags;
- scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
- scsi_qla_host_t *vha = NULL;
- struct qla_tgt *tgt = base_vha->vha_tgt.qla_tgt;
- uint32_t i;
-
- if (!base_vha->hw->tgt.tgt_ops)
- return;
-
- if (!tgt || qla_ini_mode_enabled(base_vha)) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
- "Target mode disabled\n");
- return;
- }
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xff10,
- "HOST-ABORT-HNDLR: base_vha->dpc_flags=%lx.\n",
- base_vha->dpc_flags);
-
- spin_lock_irqsave(&ha->hardware_lock, flags);
- for (i = 1; i < DEFAULT_OUTSTANDING_COMMANDS + 1; i++) {
- cmd = qlt_get_cmd(base_vha, i);
- if (!cmd)
- continue;
- /* ha->tgt.cmds entry is cleared by qlt_get_cmd. */
- vha = cmd->vha;
- qlt_abort_cmd_on_host_reset(vha, cmd);
- }
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-}
-
-
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
-static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
- uint32_t status, void *ctio)
+static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
+ struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio)
{
struct qla_hw_data *ha = vha->hw;
struct se_cmd *se_cmd;
struct qla_tgt_cmd *cmd;
+ struct qla_qpair *qpair = rsp->qpair;
if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
/* That could happen only in case of an error/reset/abort */
@@ -3840,7 +3827,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
return;
}
- cmd = qlt_ctio_to_cmd(vha, handle, ctio);
+ cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
if (cmd == NULL)
return;
@@ -3885,7 +3872,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
*/
cmd->sess->logout_on_delete = 0;
cmd->sess->send_els_logo = 1;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20f8,
"%s %d %8phC post del sess\n",
__func__, __LINE__, cmd->sess->port_name);
@@ -3904,7 +3891,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
*((u64 *)&crc->actual_dif[0]),
*((u64 *)&crc->expected_dif[0]));
- qlt_handle_dif_error(vha, cmd, ctio);
+ qlt_handle_dif_error(qpair, cmd, ctio);
return;
}
default:
@@ -3924,7 +3911,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
(!cmd->aborted)) {
cmd->trc_flags |= TRC_CTIO_ERR;
- if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
+ if (qlt_term_ctio_exchange(qpair, ctio, cmd, status))
return;
}
}
@@ -4000,18 +3987,16 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
{
scsi_qla_host_t *vha = cmd->vha;
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct fc_port *sess = cmd->sess;
struct atio_from_isp *atio = &cmd->atio;
unsigned char *cdb;
unsigned long flags;
uint32_t data_length;
int ret, fcp_task_attr, data_dir, bidi = 0;
+ struct qla_qpair *qpair = cmd->qpair;
cmd->cmd_in_wq = 0;
cmd->trc_flags |= TRC_DO_WORK;
- if (tgt->tgt_stop)
- goto out_term;
if (cmd->aborted) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
@@ -4023,8 +4008,6 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
spin_lock_init(&cmd->cmd_lock);
cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
- cmd->unpacked_lun = scsilun_to_int(
- (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
if (atio->u.isp24.fcp_cmnd.rddata &&
atio->u.isp24.fcp_cmnd.wrdata) {
@@ -4062,12 +4045,12 @@ out_term:
* argument to qlt_send_term_exchange() and free the memory here.
*/
cmd->trc_flags |= TRC_DO_WORK_ERR;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_term_exchange(vha, NULL, &cmd->atio, 1, 0);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0);
qlt_decr_num_pend_cmds(vha);
percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
ha->tgt.tgt_ops->put_sess(sess);
@@ -4087,6 +4070,110 @@ static void qlt_do_work(struct work_struct *work)
__qlt_do_work(cmd);
}
+void qlt_clr_qp_table(struct scsi_qla_host *vha)
+{
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ void *node;
+ u64 key = 0;
+
+ ql_log(ql_log_info, vha, 0x706c,
+ "User update Number of Active Qpairs %d\n",
+ ha->tgt.num_act_qpairs);
+
+ spin_lock_irqsave(&ha->tgt.atio_lock, flags);
+
+ btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
+ btree_remove64(&tgt->lun_qpair_map, key);
+
+ ha->base_qpair->lun_cnt = 0;
+ for (key = 0; key < ha->max_qpairs; key++)
+ if (ha->queue_pair_map[key])
+ ha->queue_pair_map[key]->lun_cnt = 0;
+
+ spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
+}
+
+static void qlt_assign_qpair(struct scsi_qla_host *vha,
+ struct qla_tgt_cmd *cmd)
+{
+ struct qla_qpair *qpair, *qp;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct qla_qpair_hint *h;
+
+ if (vha->flags.qpairs_available) {
+ h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun);
+ if (unlikely(!h)) {
+ /* spread lun to qpair ratio evently */
+ int lcnt = 0, rc;
+ struct scsi_qla_host *base_vha =
+ pci_get_drvdata(vha->hw->pdev);
+
+ qpair = vha->hw->base_qpair;
+ if (qpair->lun_cnt == 0) {
+ qpair->lun_cnt++;
+ h = qla_qpair_to_hint(tgt, qpair);
+ BUG_ON(!h);
+ rc = btree_insert64(&tgt->lun_qpair_map,
+ cmd->unpacked_lun, h, GFP_ATOMIC);
+ if (rc) {
+ qpair->lun_cnt--;
+ ql_log(ql_log_info, vha, 0xd037,
+ "Unable to insert lun %llx into lun_qpair_map\n",
+ cmd->unpacked_lun);
+ }
+ goto out;
+ } else {
+ lcnt = qpair->lun_cnt;
+ }
+
+ h = NULL;
+ list_for_each_entry(qp, &base_vha->qp_list,
+ qp_list_elem) {
+ if (qp->lun_cnt == 0) {
+ qp->lun_cnt++;
+ h = qla_qpair_to_hint(tgt, qp);
+ BUG_ON(!h);
+ rc = btree_insert64(&tgt->lun_qpair_map,
+ cmd->unpacked_lun, h, GFP_ATOMIC);
+ if (rc) {
+ qp->lun_cnt--;
+ ql_log(ql_log_info, vha, 0xd038,
+ "Unable to insert lun %llx into lun_qpair_map\n",
+ cmd->unpacked_lun);
+ }
+ qpair = qp;
+ goto out;
+ } else {
+ if (qp->lun_cnt < lcnt) {
+ lcnt = qp->lun_cnt;
+ qpair = qp;
+ continue;
+ }
+ }
+ }
+ BUG_ON(!qpair);
+ qpair->lun_cnt++;
+ h = qla_qpair_to_hint(tgt, qpair);
+ BUG_ON(!h);
+ rc = btree_insert64(&tgt->lun_qpair_map,
+ cmd->unpacked_lun, h, GFP_ATOMIC);
+ if (rc) {
+ qpair->lun_cnt--;
+ ql_log(ql_log_info, vha, 0xd039,
+ "Unable to insert lun %llx into lun_qpair_map\n",
+ cmd->unpacked_lun);
+ }
+ }
+ } else {
+ h = &tgt->qphints[0];
+ }
+out:
+ cmd->qpair = h->qpair;
+ cmd->se_cmd.cpuid = h->cpuid;
+}
+
static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
struct fc_port *sess,
struct atio_from_isp *atio)
@@ -4101,7 +4188,7 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
memset(cmd, 0, sizeof(struct qla_tgt_cmd));
-
+ cmd->cmd_type = TYPE_TGT_CMD;
memcpy(&cmd->atio, atio, sizeof(*atio));
cmd->state = QLA_TGT_STATE_NEW;
cmd->tgt = vha->vha_tgt.qla_tgt;
@@ -4115,14 +4202,15 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
cmd->trc_flags = 0;
cmd->jiffies_at_alloc = get_jiffies_64();
- cmd->reset_count = vha->hw->chip_reset;
+ cmd->unpacked_lun = scsilun_to_int(
+ (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
+ qlt_assign_qpair(vha, cmd);
+ cmd->reset_count = vha->hw->base_qpair->chip_reset;
+ cmd->vp_idx = vha->vp_idx;
return cmd;
}
-static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *,
- uint16_t);
-
static void qlt_create_sess_from_atio(struct work_struct *work)
{
struct qla_tgt_sess_op *op = container_of(work,
@@ -4168,10 +4256,15 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
*/
cmd = qlt_get_tag(vha, sess, &op->atio);
if (!cmd) {
- spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY);
+ struct qla_qpair *qpair = ha->base_qpair;
+
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ qlt_send_busy(qpair, &op->atio, SAM_STAT_BUSY);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
kfree(op);
return;
}
@@ -4184,9 +4277,7 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
kfree(op);
return;
out_term:
- spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_term_exchange(vha, NULL, &op->atio, 1, 0);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ qlt_send_term_exchange(vha->hw->base_qpair, NULL, &op->atio, 0, 0);
kfree(op);
}
@@ -4216,9 +4307,9 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
memcpy(&op->atio, atio, sizeof(*atio));
op->vha = vha;
- spin_lock(&vha->cmd_list_lock);
+ spin_lock_irqsave(&vha->cmd_list_lock, flags);
list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
- spin_unlock(&vha->cmd_list_lock);
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
INIT_WORK(&op->work, qlt_create_sess_from_atio);
queue_work(qla_tgt_wq, &op->work);
@@ -4228,7 +4319,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
/* Another WWN used to have our s_id. Our PLOGI scheduled its
* session deletion, but it's still in sess_del_work wq */
if (sess->deleted) {
- ql_dbg(ql_dbg_io, vha, 0x3061,
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
"New command while old session %p is being deleted\n",
sess);
return -EFAULT;
@@ -4238,7 +4329,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
* Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
*/
if (!kref_get_unless_zero(&sess->sess_kref)) {
- ql_dbg(ql_dbg_tgt, vha, 0xffff,
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
"%s: kref_get fail, %8phC oxid %x \n",
__func__, sess->port_name,
be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
@@ -4257,15 +4348,15 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
cmd->cmd_in_wq = 1;
cmd->trc_flags |= TRC_NEW_CMD;
- cmd->se_cmd.cpuid = ha->msix_count ?
- ha->tgt.rspq_vector_cpuid : WORK_CPU_UNBOUND;
spin_lock_irqsave(&vha->cmd_list_lock, flags);
list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
INIT_WORK(&cmd->work, qlt_do_work);
- if (ha->msix_count) {
+ if (vha->flags.qpairs_available) {
+ queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work);
+ } else if (ha->msix_count) {
if (cmd->atio.u.isp24.fcp_cmnd.rddata)
queue_work_on(smp_processor_id(), qla_tgt_wq,
&cmd->work);
@@ -4275,8 +4366,8 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
} else {
queue_work(qla_tgt_wq, &cmd->work);
}
- return 0;
+ return 0;
}
/* ha->hardware_lock supposed to be held on entry */
@@ -4306,7 +4397,8 @@ static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
}
mcmd->tmr_func = fn;
mcmd->flags = flags;
- mcmd->reset_count = vha->hw->chip_reset;
+ mcmd->reset_count = ha->base_qpair->chip_reset;
+ mcmd->qpair = ha->base_qpair;
switch (fn) {
case QLA_TGT_LUN_RESET:
@@ -4333,13 +4425,12 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt;
struct fc_port *sess;
- uint32_t lun, unpacked_lun;
+ u64 unpacked_lun;
int fn;
unsigned long flags;
tgt = vha->vha_tgt.qla_tgt;
- lun = a->u.isp24.fcp_cmnd.lun;
fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
@@ -4347,7 +4438,8 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
a->u.isp24.fcp_hdr.s_id);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+ unpacked_lun =
+ scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
if (!sess) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
@@ -4370,7 +4462,7 @@ static int __qlt_abort_task(struct scsi_qla_host *vha,
struct atio_from_isp *a = (struct atio_from_isp *)iocb;
struct qla_hw_data *ha = vha->hw;
struct qla_tgt_mgmt_cmd *mcmd;
- uint32_t lun, unpacked_lun;
+ u64 unpacked_lun;
int rc;
mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
@@ -4386,10 +4478,11 @@ static int __qlt_abort_task(struct scsi_qla_host *vha,
memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
sizeof(mcmd->orig_iocb.imm_ntfy));
- lun = a->u.isp24.fcp_cmnd.lun;
- unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
- mcmd->reset_count = vha->hw->chip_reset;
+ unpacked_lun =
+ scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
+ mcmd->reset_count = ha->base_qpair->chip_reset;
mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK;
+ mcmd->qpair = ha->base_qpair;
rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func,
le16_to_cpu(iocb->u.isp2x.seq_id));
@@ -4493,7 +4586,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
* Another wwn used to have our s_id/loop_id
* kill the session, but don't free the loop_id
*/
- ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff,
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01b,
"Invalidating sess %p loop_id %d wwn %llx.\n",
other_sess, other_sess->loop_id, other_wwn);
@@ -4529,12 +4622,13 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
struct qla_tgt_cmd *cmd;
uint32_t key;
int count = 0;
+ unsigned long flags;
key = (((u32)s_id->b.domain << 16) |
((u32)s_id->b.area << 8) |
((u32)s_id->b.al_pa));
- spin_lock(&vha->cmd_list_lock);
+ spin_lock_irqsave(&vha->cmd_list_lock, flags);
list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
@@ -4559,7 +4653,7 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
count++;
}
}
- spin_unlock(&vha->cmd_list_lock);
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
return count;
}
@@ -4672,9 +4766,9 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
(sess->d_id.b24 == port_id.b24));
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__, sess->port_name);
+ ql_dbg(ql_dbg_disc, vha, 0x20f9,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__, sess->port_name);
qlt_schedule_sess_for_deletion_lock(sess);
@@ -4744,7 +4838,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
/* Make session global (not used in fabric mode) */
if (ha->current_topology != ISP_CFG_F) {
if (sess) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20fa,
"%s %d %8phC post nack\n",
__func__, __LINE__, sess->port_name);
qla24xx_post_nack_work(vha, sess, iocb,
@@ -4757,7 +4851,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
}
} else {
if (sess) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20fb,
"%s %d %8phC post nack\n",
__func__, __LINE__, sess->port_name);
qla24xx_post_nack_work(vha, sess, iocb,
@@ -4791,7 +4885,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20fc,
"%s: logo %llx res %d sess %p ",
__func__, wwn, res, sess);
if (res == 0) {
@@ -4815,15 +4909,15 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
{
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
if (tgt->link_reinit_iocb_pending) {
- qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
- 0, 0, 0, 0, 0, 0);
+ qlt_send_notify_ack(ha->base_qpair,
+ &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
tgt->link_reinit_iocb_pending = 0;
}
sess = qla2x00_find_fcport_by_wwpn(vha,
iocb->u.isp24.port_name, 1);
if (sess) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20fd,
"sess %p lid %d|%d DS %d LS %d\n",
sess, sess->loop_id, loop_id,
sess->disc_state, sess->fw_login_state);
@@ -4879,8 +4973,8 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
le16_to_cpu(iocb->u.isp24.nport_handle),
iocb->u.isp24.status_subcode);
if (tgt->link_reinit_iocb_pending) {
- qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
- 0, 0, 0, 0, 0, 0);
+ qlt_send_notify_ack(ha->base_qpair,
+ &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
}
memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
tgt->link_reinit_iocb_pending = 1;
@@ -4974,33 +5068,36 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
}
if (send_notify_ack)
- qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0);
+ qlt_send_notify_ack(ha->base_qpair, iocb, add_flags, 0, 0, 0,
+ 0, 0);
}
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
* This function sends busy to ISP 2xxx or 24xx.
*/
-static int __qlt_send_busy(struct scsi_qla_host *vha,
+static int __qlt_send_busy(struct qla_qpair *qpair,
struct atio_from_isp *atio, uint16_t status)
{
+ struct scsi_qla_host *vha = qpair->vha;
struct ctio7_to_24xx *ctio24;
struct qla_hw_data *ha = vha->hw;
request_t *pkt;
struct fc_port *sess = NULL;
unsigned long flags;
+ u16 temp;
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
atio->u.isp24.fcp_hdr.s_id);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
if (!sess) {
- qlt_send_term_exchange(vha, NULL, atio, 1, 0);
+ qlt_send_term_exchange(qpair, NULL, atio, 1, 0);
return 0;
}
/* Sending marker isn't necessary, since we called from ISR */
- pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+ pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
if (!pkt) {
ql_dbg(ql_dbg_io, vha, 0x3063,
"qla_target(%d): %s failed: unable to allocate "
@@ -5008,7 +5105,7 @@ static int __qlt_send_busy(struct scsi_qla_host *vha,
return -ENOMEM;
}
- vha->tgt_counters.num_q_full_sent++;
+ qpair->tgt_counters.num_q_full_sent++;
pkt->entry_count = 1;
pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
@@ -5021,10 +5118,10 @@ static int __qlt_send_busy(struct scsi_qla_host *vha,
ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
ctio24->exchange_addr = atio->u.isp24.exchange_addr;
- ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
- cpu_to_le16(
+ temp = (atio->u.isp24.attr << 9) |
CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
- CTIO7_FLAGS_DONT_RET_CTIO);
+ CTIO7_FLAGS_DONT_RET_CTIO;
+ ctio24->u.status1.flags = cpu_to_le16(temp);
/*
* CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
* if the explicit conformation is used.
@@ -5033,7 +5130,10 @@ static int __qlt_send_busy(struct scsi_qla_host *vha,
ctio24->u.status1.scsi_status = cpu_to_le16(status);
/* Memory Barrier */
wmb();
- qla2x00_start_iocbs(vha, vha->req);
+ if (qpair->reqq_start_iocbs)
+ qpair->reqq_start_iocbs(qpair);
+ else
+ qla2x00_start_iocbs(vha, qpair->req);
return 0;
}
@@ -5052,6 +5152,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
struct se_session *se_sess;
struct qla_tgt_cmd *cmd;
int tag;
+ unsigned long flags;
if (unlikely(tgt->tgt_stop)) {
ql_dbg(ql_dbg_io, vha, 0x300a,
@@ -5110,8 +5211,9 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
cmd->tgt = vha->vha_tgt.qla_tgt;
cmd->vha = vha;
- cmd->reset_count = vha->hw->chip_reset;
+ cmd->reset_count = ha->base_qpair->chip_reset;
cmd->q_full = 1;
+ cmd->qpair = ha->base_qpair;
if (qfull) {
cmd->q_full = 1;
@@ -5120,6 +5222,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
} else
cmd->term_exchg = 1;
+ spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
vha->hw->tgt.num_qfull_cmds_alloc++;
@@ -5127,35 +5230,41 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
vha->qla_stats.stat_max_qfull_cmds_alloc)
vha->qla_stats.stat_max_qfull_cmds_alloc =
vha->hw->tgt.num_qfull_cmds_alloc;
+ spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
}
int
-qlt_free_qfull_cmds(struct scsi_qla_host *vha)
+qlt_free_qfull_cmds(struct qla_qpair *qpair)
{
+ struct scsi_qla_host *vha = qpair->vha;
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
struct qla_tgt_cmd *cmd, *tcmd;
- struct list_head free_list;
+ struct list_head free_list, q_full_list;
int rc = 0;
if (list_empty(&ha->tgt.q_full_list))
return 0;
INIT_LIST_HEAD(&free_list);
+ INIT_LIST_HEAD(&q_full_list);
- spin_lock_irqsave(&vha->hw->hardware_lock, flags);
-
+ spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
if (list_empty(&ha->tgt.q_full_list)) {
- spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+ spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
return 0;
}
- list_for_each_entry_safe(cmd, tcmd, &ha->tgt.q_full_list, cmd_list) {
+ list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list);
+ spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
+
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) {
if (cmd->q_full)
/* cmd->state is a borrowed field to hold status */
- rc = __qlt_send_busy(vha, &cmd->atio, cmd->state);
+ rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state);
else if (cmd->term_exchg)
- rc = __qlt_send_term_exchange(vha, NULL, &cmd->atio);
+ rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio);
if (rc == -ENOMEM)
break;
@@ -5179,7 +5288,7 @@ qlt_free_qfull_cmds(struct scsi_qla_host *vha)
/* piggy back on hardware_lock for protection */
vha->hw->tgt.num_qfull_cmds_alloc--;
}
- spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
cmd = NULL;
@@ -5190,23 +5299,31 @@ qlt_free_qfull_cmds(struct scsi_qla_host *vha)
*/
qlt_free_cmd(cmd);
}
+
+ if (!list_empty(&q_full_list)) {
+ spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
+ list_splice(&q_full_list, &vha->hw->tgt.q_full_list);
+ spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
+ }
+
return rc;
}
static void
-qlt_send_busy(struct scsi_qla_host *vha,
- struct atio_from_isp *atio, uint16_t status)
+qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio,
+ uint16_t status)
{
int rc = 0;
+ struct scsi_qla_host *vha = qpair->vha;
- rc = __qlt_send_busy(vha, atio, status);
+ rc = __qlt_send_busy(qpair, atio, status);
if (rc == -ENOMEM)
qlt_alloc_qfull_cmd(vha, atio, status, 1);
}
static int
-qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
- struct atio_from_isp *atio, bool ha_locked)
+qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
+ struct atio_from_isp *atio, uint8_t ha_locked)
{
struct qla_hw_data *ha = vha->hw;
uint16_t status;
@@ -5218,7 +5335,7 @@ qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
if (!ha_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
status = temp_sam_status;
- qlt_send_busy(vha, atio, status);
+ qlt_send_busy(qpair, atio, status);
if (!ha_locked)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -5257,16 +5374,17 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
"sending QUEUE_FULL\n", vha->vp_idx);
if (!ha_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
+ qlt_send_busy(ha->base_qpair, atio,
+ SAM_STAT_TASK_SET_FULL);
if (!ha_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
break;
}
-
-
if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
- rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked);
+ rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair,
+ atio, ha_locked);
if (rc != 0) {
tgt->atio_irq_cmd_count--;
return;
@@ -5278,19 +5396,19 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
if (unlikely(rc != 0)) {
if (rc == -ESRCH) {
if (!ha_locked)
- spin_lock_irqsave
- (&ha->hardware_lock, flags);
+ spin_lock_irqsave(&ha->hardware_lock,
+ flags);
#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
- qlt_send_busy(vha, atio, SAM_STAT_BUSY);
+ qlt_send_busy(ha->base_qpair, atio,
+ SAM_STAT_BUSY);
#else
- qlt_send_term_exchange(vha, NULL, atio, 1, 0);
+ qlt_send_term_exchange(ha->base_qpair, NULL,
+ atio, 1, 0);
#endif
-
if (!ha_locked)
- spin_unlock_irqrestore
- (&ha->hardware_lock, flags);
-
+ spin_unlock_irqrestore(
+ &ha->hardware_lock, flags);
} else {
if (tgt->tgt_stop) {
ql_dbg(ql_dbg_tgt, vha, 0xe059,
@@ -5305,7 +5423,8 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
if (!ha_locked)
spin_lock_irqsave(
&ha->hardware_lock, flags);
- qlt_send_busy(vha, atio, SAM_STAT_BUSY);
+ qlt_send_busy(ha->base_qpair,
+ atio, SAM_STAT_BUSY);
if (!ha_locked)
spin_unlock_irqrestore(
&ha->hardware_lock, flags);
@@ -5346,15 +5465,15 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
/* ha->hardware_lock supposed to be held on entry */
/* called via callback from qla2xxx */
-static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
+static void qlt_response_pkt(struct scsi_qla_host *vha,
+ struct rsp_que *rsp, response_t *pkt)
{
- struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
if (unlikely(tgt == NULL)) {
ql_dbg(ql_dbg_tgt, vha, 0xe05d,
- "qla_target(%d): Response pkt %x received, but no "
- "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
+ "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n",
+ vha->vp_idx, pkt->entry_type, vha->hw);
return;
}
@@ -5363,14 +5482,12 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
* Otherwise, some commands can stuck.
*/
- tgt->irq_cmd_count++;
-
switch (pkt->entry_type) {
case CTIO_CRC2:
case CTIO_TYPE7:
{
struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
- qlt_do_ctio_completion(vha, entry->handle,
+ qlt_do_ctio_completion(vha, rsp, entry->handle,
le16_to_cpu(entry->status)|(pkt->entry_status << 16),
entry);
break;
@@ -5389,19 +5506,17 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
break;
}
- rc = qlt_chk_qfull_thresh_hold(vha, atio, true);
- if (rc != 0) {
- tgt->irq_cmd_count--;
+ rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1);
+ if (rc != 0)
return;
- }
rc = qlt_handle_cmd_for_atio(vha, atio);
if (unlikely(rc != 0)) {
if (rc == -ESRCH) {
#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
- qlt_send_busy(vha, atio, 0);
+ qlt_send_busy(rsp->qpair, atio, 0);
#else
- qlt_send_term_exchange(vha, NULL, atio, 1, 0);
+ qlt_send_term_exchange(rsp->qpair, NULL, atio, 1, 0);
#endif
} else {
if (tgt->tgt_stop) {
@@ -5409,14 +5524,14 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
"qla_target: Unable to send "
"command to target, sending TERM "
"EXCHANGE for rsp\n");
- qlt_send_term_exchange(vha, NULL,
+ qlt_send_term_exchange(rsp->qpair, NULL,
atio, 1, 0);
} else {
ql_dbg(ql_dbg_tgt, vha, 0xe060,
"qla_target(%d): Unable to send "
"command to target, sending BUSY "
"status\n", vha->vp_idx);
- qlt_send_busy(vha, atio, 0);
+ qlt_send_busy(rsp->qpair, atio, 0);
}
}
}
@@ -5426,7 +5541,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
case CONTINUE_TGT_IO_TYPE:
{
struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
- qlt_do_ctio_completion(vha, entry->handle,
+ qlt_do_ctio_completion(vha, rsp, entry->handle,
le16_to_cpu(entry->status)|(pkt->entry_status << 16),
entry);
break;
@@ -5435,7 +5550,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
case CTIO_A64_TYPE:
{
struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
- qlt_do_ctio_completion(vha, entry->handle,
+ qlt_do_ctio_completion(vha, rsp, entry->handle,
le16_to_cpu(entry->status)|(pkt->entry_status << 16),
entry);
break;
@@ -5525,7 +5640,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
break;
}
- tgt->irq_cmd_count--;
}
/*
@@ -5538,15 +5652,9 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
int login_code;
- if (!ha->tgt.tgt_ops)
+ if (!tgt || tgt->tgt_stop || tgt->tgt_stopped)
return;
- if (unlikely(tgt == NULL)) {
- ql_dbg(ql_dbg_tgt, vha, 0xe03a,
- "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha);
- return;
- }
-
if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
IS_QLA2100(ha))
return;
@@ -5555,7 +5663,6 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
* Otherwise, some commands can stuck.
*/
- tgt->irq_cmd_count++;
switch (code) {
case MBA_RESET: /* Reset */
@@ -5578,7 +5685,8 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
if (tgt->link_reinit_iocb_pending) {
- qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
+ qlt_send_notify_ack(ha->base_qpair,
+ (void *)&tgt->link_reinit_iocb,
0, 0, 0, 0, 0, 0);
tgt->link_reinit_iocb_pending = 0;
}
@@ -5597,17 +5705,17 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
break;
case MBA_REJECTED_FCP_CMD:
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
- "qla_target(%d): Async event LS_REJECT occurred "
- "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
- le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
- le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017,
+ "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)",
+ vha->vp_idx,
+ le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+ le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
if (le16_to_cpu(mailbox[3]) == 1) {
/* exchange starvation. */
vha->hw->exch_starvation++;
if (vha->hw->exch_starvation > 5) {
- ql_log(ql_log_warn, vha, 0xffff,
+ ql_log(ql_log_warn, vha, 0xd03a,
"Exchange starvation-. Resetting RISC\n");
vha->hw->exch_starvation = 0;
@@ -5643,7 +5751,6 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
break;
}
- tgt->irq_cmd_count--;
}
static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
@@ -5707,12 +5814,12 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
case MODE_DUAL:
if (newfcport) {
if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20fe,
"%s %d %8phC post upd_fcport fcp_cnt %d\n",
__func__, __LINE__, fcport->port_name, vha->fcport_count);
qla24xx_post_upd_fcport_work(vha, fcport);
} else {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
+ ql_dbg(ql_dbg_disc, vha, 0x20ff,
"%s %d %8phC post gpsc fcp_cnt %d\n",
__func__, __LINE__, fcport->port_name, vha->fcport_count);
qla24xx_post_gpsc_work(vha, fcport);
@@ -5838,7 +5945,7 @@ static void qlt_abort_work(struct qla_tgt *tgt,
}
if (!kref_get_unless_zero(&sess->sess_kref)) {
- ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff,
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c,
"%s: kref_get fail %8phC \n",
__func__, sess->port_name);
sess = NULL;
@@ -5861,7 +5968,8 @@ out_term2:
out_term:
spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
+ qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts,
+ FCP_TMF_REJECTED, false);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -5875,7 +5983,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
unsigned long flags;
uint8_t *s_id = NULL; /* to hide compiler warnings */
int rc;
- uint32_t lun, unpacked_lun;
+ u64 unpacked_lun;
int fn;
void *iocb;
@@ -5902,7 +6010,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
}
if (!kref_get_unless_zero(&sess->sess_kref)) {
- ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff,
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020,
"%s: kref_get fail %8phC\n",
__func__, sess->port_name);
sess = NULL;
@@ -5911,9 +6019,9 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
}
iocb = a;
- lun = a->u.isp24.fcp_cmnd.lun;
fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
- unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+ unpacked_lun =
+ scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
ha->tgt.tgt_ops->put_sess(sess);
@@ -5928,7 +6036,7 @@ out_term2:
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
out_term:
- qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
+ qlt_send_term_exchange(ha->base_qpair, NULL, &prm->tm_iocb2, 1, 0);
}
static void qlt_sess_work_fn(struct work_struct *work)
@@ -5976,6 +6084,8 @@ static void qlt_sess_work_fn(struct work_struct *work)
int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
{
struct qla_tgt *tgt;
+ int rc, i;
+ struct qla_qpair_hint *h;
if (!QLA_TGT_MODE_ENABLED())
return 0;
@@ -5998,9 +6108,47 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
return -ENOMEM;
}
+ tgt->qphints = kzalloc((ha->max_qpairs + 1) *
+ sizeof(struct qla_qpair_hint), GFP_KERNEL);
+ if (!tgt->qphints) {
+ kfree(tgt);
+ ql_log(ql_log_warn, base_vha, 0x0197,
+ "Unable to allocate qpair hints.\n");
+ return -ENOMEM;
+ }
+
if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
base_vha->host->hostt->supported_mode |= MODE_TARGET;
+ rc = btree_init64(&tgt->lun_qpair_map);
+ if (rc) {
+ kfree(tgt->qphints);
+ kfree(tgt);
+ ql_log(ql_log_info, base_vha, 0x0198,
+ "Unable to initialize lun_qpair_map btree\n");
+ return -EIO;
+ }
+ h = &tgt->qphints[0];
+ h->qpair = ha->base_qpair;
+ INIT_LIST_HEAD(&h->hint_elem);
+ h->cpuid = ha->base_qpair->cpuid;
+ list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list);
+
+ for (i = 0; i < ha->max_qpairs; i++) {
+ unsigned long flags;
+
+ struct qla_qpair *qpair = ha->queue_pair_map[i];
+ h = &tgt->qphints[i + 1];
+ INIT_LIST_HEAD(&h->hint_elem);
+ if (qpair) {
+ h->qpair = qpair;
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ list_add_tail(&h->hint_elem, &qpair->hints_list);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+ h->cpuid = qpair->cpuid;
+ }
+ }
+
tgt->ha = ha;
tgt->vha = base_vha;
init_waitqueue_head(&tgt->waitQ);
@@ -6015,11 +6163,8 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
"qla_target(%d): using 64 Bit PCI addressing",
base_vha->vp_idx);
- tgt->tgt_enable_64bit_addr = 1;
/* 3 is reserved */
tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
- tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
- tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
mutex_lock(&qla_tgt_mutex);
list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
@@ -6231,7 +6376,6 @@ qlt_enable_vha(struct scsi_qla_host *vha)
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
unsigned long flags;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
- int rspq_ent = QLA83XX_RSPQ_MSIX_ENTRY_NUMBER;
if (!tgt) {
ql_dbg(ql_dbg_tgt, vha, 0xe069,
@@ -6250,17 +6394,6 @@ qlt_enable_vha(struct scsi_qla_host *vha)
qla24xx_disable_vp(vha);
qla24xx_enable_vp(vha);
} else {
- if (ha->msix_entries) {
- ql_dbg(ql_dbg_tgt, vha, 0xffff,
- "%s: host%ld : vector %d cpu %d\n",
- __func__, vha->host_no,
- ha->msix_entries[rspq_ent].vector,
- ha->msix_entries[rspq_ent].cpuid);
-
- ha->tgt.rspq_vector_cpuid =
- ha->msix_entries[rspq_ent].cpuid;
- }
-
set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
qla2xxx_wake_dpc(base_vha);
qla2x00_wait_for_hba_online(base_vha);
@@ -6387,14 +6520,15 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
* can not be trusted. There is no point in passing
* it further up.
*/
- ql_log(ql_log_warn, vha, 0xffff,
+ ql_log(ql_log_warn, vha, 0xd03c,
"corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
pkt->u.isp24.fcp_hdr.s_id,
be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
adjust_corrupted_atio(pkt);
- qlt_send_term_exchange(vha, NULL, pkt, ha_locked, 0);
+ qlt_send_term_exchange(ha->base_qpair, NULL, pkt,
+ ha_locked, 0);
} else {
qlt_24xx_atio_pkt_all_vps(vha,
(struct atio_from_isp *)pkt, ha_locked);
@@ -6445,8 +6579,9 @@ void
qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
{
struct qla_hw_data *ha = vha->hw;
- u32 tmp;
- u16 t;
+
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
if (!ha->tgt.saved_set) {
@@ -6461,24 +6596,10 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
ha->tgt.saved_set = 1;
}
- if (qla_tgt_mode_enabled(vha)) {
+ if (qla_tgt_mode_enabled(vha))
nv->exchange_count = cpu_to_le16(0xFFFF);
- } else { /* dual */
- if (ql_dm_tgt_ex_pct > 100) {
- ql_dm_tgt_ex_pct = 50;
- } else if (ql_dm_tgt_ex_pct == 100) {
- /* leave some for FW */
- ql_dm_tgt_ex_pct = 95;
- }
-
- tmp = ha->orig_fw_xcb_count * ql_dm_tgt_ex_pct;
- tmp = tmp/100;
- if (tmp > 0xffff)
- tmp = 0xffff;
-
- t = tmp & 0xffff;
- nv->exchange_count = cpu_to_le16(t);
- }
+ else /* dual */
+ nv->exchange_count = cpu_to_le16(ql2xexchoffld);
/* Enable target mode */
nv->firmware_options_1 |= cpu_to_le32(BIT_4);
@@ -6522,7 +6643,7 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
return;
}
- if (ha->tgt.enable_class_2) {
+ if (ha->base_qpair->enable_class_2) {
if (vha->flags.init_done)
fc_host_supported_classes(vha->host) =
FC_COS_CLASS2 | FC_COS_CLASS3;
@@ -6563,8 +6684,6 @@ void
qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
{
struct qla_hw_data *ha = vha->hw;
- u32 tmp;
- u16 t;
if (!QLA_TGT_MODE_ENABLED())
return;
@@ -6582,23 +6701,10 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
ha->tgt.saved_set = 1;
}
- if (qla_tgt_mode_enabled(vha)) {
+ if (qla_tgt_mode_enabled(vha))
nv->exchange_count = cpu_to_le16(0xFFFF);
- } else { /* dual */
- if (ql_dm_tgt_ex_pct > 100) {
- ql_dm_tgt_ex_pct = 50;
- } else if (ql_dm_tgt_ex_pct == 100) {
- /* leave some for FW */
- ql_dm_tgt_ex_pct = 95;
- }
-
- tmp = ha->orig_fw_xcb_count * ql_dm_tgt_ex_pct;
- tmp = tmp/100;
- if (tmp > 0xffff)
- tmp = 0xffff;
- t = tmp & 0xffff;
- nv->exchange_count = cpu_to_le16(t);
- }
+ else /* dual */
+ nv->exchange_count = cpu_to_le16(ql2xexchoffld);
/* Enable target mode */
nv->firmware_options_1 |= cpu_to_le32(BIT_4);
@@ -6641,7 +6747,7 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
return;
}
- if (ha->tgt.enable_class_2) {
+ if (ha->base_qpair->enable_class_2) {
if (vha->flags.init_done)
fc_host_supported_classes(vha->host) =
FC_COS_CLASS2 | FC_COS_CLASS3;
@@ -6688,21 +6794,6 @@ qlt_83xx_iospace_config(struct qla_hw_data *ha)
ha->msix_count += 1; /* For ATIO Q */
}
-int
-qlt_24xx_process_response_error(struct scsi_qla_host *vha,
- struct sts_entry_24xx *pkt)
-{
- switch (pkt->entry_type) {
- case ABTS_RECV_24XX:
- case ABTS_RESP_24XX:
- case CTIO_TYPE7:
- case NOTIFY_ACK_TYPE:
- case CTIO_CRC2:
- return 1;
- default:
- return 0;
- }
-}
void
qlt_modify_vp_config(struct scsi_qla_host *vha,
@@ -6744,7 +6835,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
rc = btree_init32(&ha->tgt.host_map);
if (rc)
- ql_log(ql_log_info, base_vha, 0xffff,
+ ql_log(ql_log_info, base_vha, 0xd03d,
"Unable to initialize ha->host_map btree\n");
qlt_update_vp_map(base_vha, SET_VP_IDX);
@@ -6780,7 +6871,8 @@ qlt_handle_abts_recv_work(struct work_struct *work)
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
- if (qla2x00_reset_active(vha) || (op->chip_reset != ha->chip_reset))
+ if (qla2x00_reset_active(vha) ||
+ (op->chip_reset != ha->base_qpair->chip_reset))
return;
spin_lock_irqsave(&ha->tgt.atio_lock, flags);
@@ -6788,14 +6880,15 @@ qlt_handle_abts_recv_work(struct work_struct *work)
spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_response_pkt_all_vps(vha, (response_t *)&op->atio);
+ qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
kfree(op);
}
void
-qlt_handle_abts_recv(struct scsi_qla_host *vha, response_t *pkt)
+qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp,
+ response_t *pkt)
{
struct qla_tgt_sess_op *op;
@@ -6805,13 +6898,14 @@ qlt_handle_abts_recv(struct scsi_qla_host *vha, response_t *pkt)
/* do not reach for ATIO queue here. This is best effort err
* recovery at this point.
*/
- qlt_response_pkt_all_vps(vha, pkt);
+ qlt_response_pkt_all_vps(vha, rsp, pkt);
return;
}
memcpy(&op->atio, pkt, sizeof(*pkt));
op->vha = vha;
- op->chip_reset = vha->hw->chip_reset;
+ op->chip_reset = vha->hw->base_qpair->chip_reset;
+ op->rsp = rsp;
INIT_WORK(&op->work, qlt_handle_abts_recv_work);
queue_work(qla_tgt_wq, &op->work);
return;
@@ -6872,25 +6966,25 @@ qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
case SET_AL_PA:
slot = btree_lookup32(&vha->hw->tgt.host_map, key);
if (!slot) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018,
"Save vha in host_map %p %06x\n", vha, key);
rc = btree_insert32(&vha->hw->tgt.host_map,
key, vha, GFP_ATOMIC);
if (rc)
- ql_log(ql_log_info, vha, 0xffff,
+ ql_log(ql_log_info, vha, 0xd03e,
"Unable to insert s_id into host_map: %06x\n",
key);
return;
}
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
- "replace existing vha in host_map %p %06x\n", vha, key);
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
+ "replace existing vha in host_map %p %06x\n", vha, key);
btree_update32(&vha->hw->tgt.host_map, key, vha);
break;
case RESET_VP_IDX:
vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
break;
case RESET_AL_PA:
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
"clear vha in host_map %p %06x\n", vha, key);
slot = btree_lookup32(&vha->hw->tgt.host_map, key);
if (slot)
@@ -6952,7 +7046,7 @@ int __init qlt_init(void)
sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
qla_tgt_mgmt_cmd), 0, NULL);
if (!qla_tgt_mgmt_cmd_cachep) {
- ql_log(ql_log_fatal, NULL, 0xe06d,
+ ql_log(ql_log_fatal, NULL, 0xd04b,
"kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
return -ENOMEM;
}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index d64420251194..7fe02d036bdf 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -70,6 +70,16 @@
/* Used to mark CTIO as intermediate */
#define CTIO_INTERMEDIATE_HANDLE_MARK BIT_30
+#define QLA_TGT_NULL_HANDLE 0
+
+#define QLA_TGT_HANDLE_MASK 0xF0000000
+#define QLA_QPID_HANDLE_MASK 0x00FF0000 /* qpair id mask */
+#define QLA_CMD_HANDLE_MASK 0x0000FFFF
+#define QLA_TGT_SKIP_HANDLE (0xFFFFFFFF & ~QLA_TGT_HANDLE_MASK)
+
+#define QLA_QPID_HANDLE_SHIFT 16
+#define GET_QID(_h) ((_h & QLA_QPID_HANDLE_MASK) >> QLA_QPID_HANDLE_SHIFT)
+
#ifndef OF_SS_MODE_0
/*
@@ -426,7 +436,7 @@ struct ctio7_to_24xx {
} status0;
struct {
uint16_t sense_length;
- uint16_t flags;
+ __le16 flags;
uint32_t residual;
__le16 ox_id;
uint16_t scsi_status;
@@ -664,6 +674,7 @@ struct abts_resp_from_24xx_fw {
struct qla_tgt_mgmt_cmd;
struct fc_port;
+struct qla_tgt_cmd;
/*
* This structure provides a template of function calls that the
@@ -675,7 +686,7 @@ struct qla_tgt_func_tmpl {
int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
unsigned char *, uint32_t, int, int, int);
void (*handle_data)(struct qla_tgt_cmd *);
- int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint16_t,
+ int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, u64, uint16_t,
uint32_t);
void (*free_cmd)(struct qla_tgt_cmd *);
void (*free_mcmd)(struct qla_tgt_mgmt_cmd *);
@@ -744,11 +755,6 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
#define QLA_TGT_STATE_DATA_IN 2 /* Data arrived + target processing */
#define QLA_TGT_STATE_PROCESSED 3 /* target done processing */
-
-/* Special handles */
-#define QLA_TGT_NULL_HANDLE 0
-#define QLA_TGT_SKIP_HANDLE (0xFFFFFFFF & ~CTIO_COMPLETION_HANDLE_MARK)
-
/* ATIO task_codes field */
#define ATIO_SIMPLE_QUEUE 0
#define ATIO_HEAD_OF_QUEUE 1
@@ -781,22 +787,28 @@ struct qla_port_24xx_data {
uint16_t reserved;
};
+struct qla_qpair_hint {
+ struct list_head hint_elem;
+ struct qla_qpair *qpair;
+ u16 cpuid;
+ uint8_t cmd_cnt;
+};
+
struct qla_tgt {
struct scsi_qla_host *vha;
struct qla_hw_data *ha;
-
+ struct btree_head64 lun_qpair_map;
+ struct qla_qpair_hint *qphints;
/*
* To sync between IRQ handlers and qlt_target_release(). Needed,
* because req_pkt() can drop/reaquire HW lock inside. Protected by
* HW lock.
*/
- int irq_cmd_count;
int atio_irq_cmd_count;
- int datasegs_per_cmd, datasegs_per_cont, sg_tablesize;
+ int sg_tablesize;
/* Target's flags, serialized by pha->hardware_lock */
- unsigned int tgt_enable_64bit_addr:1; /* 64-bits PCI addr enabled */
unsigned int link_reinit_iocb_pending:1;
/*
@@ -832,6 +844,7 @@ struct qla_tgt_sess_op {
struct work_struct work;
struct list_head cmd_list;
bool aborted;
+ struct rsp_que *rsp;
};
enum trace_flags {
@@ -859,10 +872,16 @@ enum trace_flags {
};
struct qla_tgt_cmd {
+ /*
+ * Do not move cmd_type field. it needs to line up with srb->cmd_type
+ */
+ uint8_t cmd_type;
+ uint8_t pad[7];
struct se_cmd se_cmd;
struct fc_port *sess;
+ struct qla_qpair *qpair;
+ uint32_t reset_count;
int state;
- struct work_struct free_work;
struct work_struct work;
/* Sense buffer that will be mapped into outgoing status */
unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
@@ -885,10 +904,10 @@ struct qla_tgt_cmd {
int sg_cnt; /* SG segments count */
int bufflen; /* cmd buffer length */
int offset;
- uint32_t unpacked_lun;
+ u64 unpacked_lun;
enum dma_data_direction dma_data_direction;
- uint32_t reset_count;
+ uint16_t vp_idx;
uint16_t loop_id; /* to save extra sess dereferences */
struct qla_tgt *tgt; /* to save extra sess dereferences */
struct scsi_qla_host *vha;
@@ -939,6 +958,7 @@ struct qla_tgt_mgmt_cmd {
uint16_t tmr_func;
uint8_t fc_tm_rsp;
struct fc_port *sess;
+ struct qla_qpair *qpair;
struct se_cmd se_cmd;
struct work_struct free_work;
unsigned int flags;
@@ -960,7 +980,6 @@ struct qla_tgt_prm {
int seg_cnt;
int req_cnt;
uint16_t rq_result;
- uint16_t scsi_status;
int sense_buffer_len;
int residual;
int add_status_pkt;
@@ -1040,7 +1059,8 @@ static inline void sid_to_portid(const uint8_t *s_id, port_id_t *p)
/*
* Exported symbols from qla_target.c LLD logic used by qla2xxx code..
*/
-extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
+extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, struct rsp_que *,
+ response_t *);
extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
extern int qlt_abort_cmd(struct qla_tgt_cmd *);
@@ -1073,11 +1093,13 @@ extern int qlt_stop_phase1(struct qla_tgt *);
extern void qlt_stop_phase2(struct qla_tgt *);
extern irqreturn_t qla83xx_msix_atio_q(int, void *);
extern void qlt_83xx_iospace_config(struct qla_hw_data *);
-extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
+extern int qlt_free_qfull_cmds(struct qla_qpair *);
extern void qlt_logo_completion_handler(fc_port_t *, int);
extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
-void qlt_send_resp_ctio(scsi_qla_host_t *, struct qla_tgt_cmd *, uint8_t,
+void qlt_send_resp_ctio(struct qla_qpair *, struct qla_tgt_cmd *, uint8_t,
uint8_t, uint8_t, uint8_t);
+extern void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *,
+ struct qla_tgt_cmd *);
#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index c197972a3e2d..33142610882f 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -219,8 +219,6 @@ qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
{
if (buf)
ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
- ql_dbg(ql_dbg_misc + ql_dbg_verbose, NULL, 0xd011,
- "Skipping entry %d\n", ent->hdr.entry_type);
}
static int
@@ -818,6 +816,8 @@ qla27xx_walk_template(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_misc, vha, 0xd01a,
"%s: entry count %lx\n", __func__, count);
while (count--) {
+ if (buf && *len >= vha->hw->fw_dump_len)
+ break;
if (qla27xx_find_entry(ent->hdr.entry_type)(vha, ent, buf, len))
break;
ent = qla27xx_next_entry(ent);
@@ -825,18 +825,20 @@ qla27xx_walk_template(struct scsi_qla_host *vha,
if (count)
ql_dbg(ql_dbg_misc, vha, 0xd018,
- "%s: residual count (%lx)\n", __func__, count);
+ "%s: entry residual count (%lx)\n", __func__, count);
if (ent->hdr.entry_type != ENTRY_TYPE_TMP_END)
ql_dbg(ql_dbg_misc, vha, 0xd019,
- "%s: missing end (%lx)\n", __func__, count);
+ "%s: missing end entry (%lx)\n", __func__, count);
- ql_dbg(ql_dbg_misc, vha, 0xd01b,
- "%s: len=%lx\n", __func__, *len);
+ if (buf && *len != vha->hw->fw_dump_len)
+ ql_dbg(ql_dbg_misc, vha, 0xd01b,
+ "%s: length=%#lx residual=%+ld\n",
+ __func__, *len, vha->hw->fw_dump_len - *len);
if (buf) {
ql_log(ql_log_warn, vha, 0xd015,
- "Firmware dump saved to temp buffer (%ld/%p)\n",
+ "Firmware dump saved to temp buffer (%lu/%p)\n",
vha->host_no, vha->hw->fw_dump);
qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
}
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 45bc84e8e3bf..005a378f7fab 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "9.00.00.00-k"
+#define QLA2XXX_VERSION "10.00.00.00-k"
-#define QLA_DRIVER_MAJOR_VER 9
+#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 0
#define QLA_DRIVER_PATCH_VER 0
#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 7443e4efa3ae..c4b414833b86 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -25,7 +25,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
@@ -284,7 +283,7 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work)
WARN_ON(cmd->trc_flags & TRC_CMD_FREE);
- cmd->vha->tgt_counters.qla_core_ret_sta_ctio++;
+ cmd->qpair->tgt_counters.qla_core_ret_sta_ctio++;
cmd->trc_flags |= TRC_CMD_FREE;
transport_generic_free_cmd(&cmd->se_cmd, 0);
}
@@ -296,7 +295,7 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work)
*/
static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
{
- cmd->vha->tgt_counters.core_qla_free_cmd++;
+ cmd->qpair->tgt_counters.core_qla_free_cmd++;
cmd->cmd_in_wq = 1;
WARN_ON(cmd->trc_flags & TRC_CMD_DONE);
@@ -492,7 +491,7 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
}
#endif
- cmd->vha->tgt_counters.qla_core_sbt_cmd++;
+ cmd->qpair->tgt_counters.qla_core_sbt_cmd++;
return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
cmd->unpacked_lun, data_length, fcp_task_attr,
data_dir, flags);
@@ -520,7 +519,7 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
}
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- cmd->vha->tgt_counters.qla_core_ret_ctio++;
+ cmd->qpair->tgt_counters.qla_core_ret_ctio++;
if (!cmd->write_data_transferred) {
/*
* Check if se_cmd has already been aborted via LUN_RESET, and
@@ -595,7 +594,7 @@ static int tcm_qla2xxx_dif_tags(struct qla_tgt_cmd *cmd,
/*
* Called from qla_target.c:qlt_issue_task_mgmt()
*/
-static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun,
+static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, u64 lun,
uint16_t tmr_func, uint32_t tag)
{
struct fc_port *sess = mcmd->sess;
@@ -686,6 +685,19 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
struct qla_tgt_cmd, se_cmd);
int xmit_type = QLA_TGT_XMIT_STATUS;
+ if (cmd->aborted) {
+ /*
+ * Cmd can loop during Q-full. tcm_qla2xxx_aborted_task
+ * can get ahead of this cmd. tcm_qla2xxx_aborted_task
+ * already kick start the free.
+ */
+ pr_debug(
+ "queue_data_in aborted cmd[%p] refcount %d transport_state %x, t_state %x, se_cmd_flags %x\n",
+ cmd, kref_read(&cmd->se_cmd.cmd_kref),
+ cmd->se_cmd.transport_state, cmd->se_cmd.t_state,
+ cmd->se_cmd.se_cmd_flags);
+ return 0;
+ }
cmd->bufflen = se_cmd->data_length;
cmd->sg = NULL;
cmd->sg_cnt = 0;
@@ -1870,9 +1882,9 @@ static ssize_t tcm_qla2xxx_wwn_version_show(struct config_item *item,
char *page)
{
return sprintf(page,
- "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
- UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname,
- utsname()->machine);
+ "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on %s\n",
+ QLA2XXX_VERSION, utsname()->sysname,
+ utsname()->machine, utsname()->release);
}
CONFIGFS_ATTR_RO(tcm_qla2xxx_wwn_, version);
@@ -1976,9 +1988,9 @@ static int tcm_qla2xxx_register_configfs(void)
{
int ret;
- pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
- UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname,
- utsname()->machine);
+ pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on %s\n",
+ QLA2XXX_VERSION, utsname()->sysname,
+ utsname()->machine, utsname()->release);
ret = target_register_template(&tcm_qla2xxx_ops);
if (ret)
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 61cdd99ae41e..3d38c6d463b8 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -108,14 +108,7 @@ EXPORT_SYMBOL(scsi_sd_pm_domain);
*/
void scsi_put_command(struct scsi_cmnd *cmd)
{
- unsigned long flags;
-
- /* serious error if the command hasn't come from a device list */
- spin_lock_irqsave(&cmd->device->list_lock, flags);
- BUG_ON(list_empty(&cmd->list));
- list_del_init(&cmd->list);
- spin_unlock_irqrestore(&cmd->device->list_lock, flags);
-
+ scsi_del_cmd_from_list(cmd);
BUG_ON(delayed_work_pending(&cmd->abort_work));
}
@@ -807,11 +800,7 @@ MODULE_LICENSE("GPL");
module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
-#ifdef CONFIG_SCSI_MQ_DEFAULT
bool scsi_use_blk_mq = true;
-#else
-bool scsi_use_blk_mq = false;
-#endif
module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
static int __init init_scsi(void)
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 304a7158540f..ea9f40e51f68 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1628,11 +1628,17 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *next;
+ struct scsi_device *sdev;
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
sdev_printk(KERN_INFO, scmd->device, "Device offlined - "
"not ready after error recovery\n");
- scsi_device_set_state(scmd->device, SDEV_OFFLINE);
+ sdev = scmd->device;
+
+ mutex_lock(&sdev->state_mutex);
+ scsi_device_set_state(sdev, SDEV_OFFLINE);
+ mutex_unlock(&sdev->state_mutex);
+
scsi_eh_finish_cmd(scmd, done_q);
}
return;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 550e29f903b7..f6097b89d5d3 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -45,23 +45,23 @@ static struct kmem_cache *scsi_sense_isadma_cache;
static DEFINE_MUTEX(scsi_sense_cache_mutex);
static inline struct kmem_cache *
-scsi_select_sense_cache(struct Scsi_Host *shost)
+scsi_select_sense_cache(bool unchecked_isa_dma)
{
- return shost->unchecked_isa_dma ?
- scsi_sense_isadma_cache : scsi_sense_cache;
+ return unchecked_isa_dma ? scsi_sense_isadma_cache : scsi_sense_cache;
}
-static void scsi_free_sense_buffer(struct Scsi_Host *shost,
- unsigned char *sense_buffer)
+static void scsi_free_sense_buffer(bool unchecked_isa_dma,
+ unsigned char *sense_buffer)
{
- kmem_cache_free(scsi_select_sense_cache(shost), sense_buffer);
+ kmem_cache_free(scsi_select_sense_cache(unchecked_isa_dma),
+ sense_buffer);
}
-static unsigned char *scsi_alloc_sense_buffer(struct Scsi_Host *shost,
+static unsigned char *scsi_alloc_sense_buffer(bool unchecked_isa_dma,
gfp_t gfp_mask, int numa_node)
{
- return kmem_cache_alloc_node(scsi_select_sense_cache(shost), gfp_mask,
- numa_node);
+ return kmem_cache_alloc_node(scsi_select_sense_cache(unchecked_isa_dma),
+ gfp_mask, numa_node);
}
int scsi_init_sense_cache(struct Scsi_Host *shost)
@@ -69,7 +69,7 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
struct kmem_cache *cache;
int ret = 0;
- cache = scsi_select_sense_cache(shost);
+ cache = scsi_select_sense_cache(shost->unchecked_isa_dma);
if (cache)
return 0;
@@ -583,19 +583,9 @@ static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
{
- struct scsi_device *sdev = cmd->device;
- struct Scsi_Host *shost = sdev->host;
- unsigned long flags;
-
scsi_mq_free_sgtables(cmd);
scsi_uninit_cmd(cmd);
-
- if (shost->use_cmd_list) {
- BUG_ON(list_empty(&cmd->list));
- spin_lock_irqsave(&sdev->list_lock, flags);
- list_del_init(&cmd->list);
- spin_unlock_irqrestore(&sdev->list_lock, flags);
- }
+ scsi_del_cmd_from_list(cmd);
}
/*
@@ -1129,12 +1119,41 @@ void scsi_initialize_rq(struct request *rq)
}
EXPORT_SYMBOL(scsi_initialize_rq);
+/* Add a command to the list used by the aacraid and dpt_i2o drivers */
+void scsi_add_cmd_to_list(struct scsi_cmnd *cmd)
+{
+ struct scsi_device *sdev = cmd->device;
+ struct Scsi_Host *shost = sdev->host;
+ unsigned long flags;
+
+ if (shost->use_cmd_list) {
+ spin_lock_irqsave(&sdev->list_lock, flags);
+ list_add_tail(&cmd->list, &sdev->cmd_list);
+ spin_unlock_irqrestore(&sdev->list_lock, flags);
+ }
+}
+
+/* Remove a command from the list used by the aacraid and dpt_i2o drivers */
+void scsi_del_cmd_from_list(struct scsi_cmnd *cmd)
+{
+ struct scsi_device *sdev = cmd->device;
+ struct Scsi_Host *shost = sdev->host;
+ unsigned long flags;
+
+ if (shost->use_cmd_list) {
+ spin_lock_irqsave(&sdev->list_lock, flags);
+ BUG_ON(list_empty(&cmd->list));
+ list_del_init(&cmd->list);
+ spin_unlock_irqrestore(&sdev->list_lock, flags);
+ }
+}
+
/* Called after a request has been started. */
void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
{
void *buf = cmd->sense_buffer;
void *prot = cmd->prot_sdb;
- unsigned long flags;
+ unsigned int unchecked_isa_dma = cmd->flags & SCMD_UNCHECKED_ISA_DMA;
/* zero out the cmd, except for the embedded scsi_request */
memset((char *)cmd + sizeof(cmd->req), 0,
@@ -1143,12 +1162,11 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
cmd->device = dev;
cmd->sense_buffer = buf;
cmd->prot_sdb = prot;
+ cmd->flags = unchecked_isa_dma;
INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
cmd->jiffies_at_alloc = jiffies;
- spin_lock_irqsave(&dev->list_lock, flags);
- list_add_tail(&cmd->list, &dev->cmd_list);
- spin_unlock_irqrestore(&dev->list_lock, flags);
+ scsi_add_cmd_to_list(cmd);
}
static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req)
@@ -1837,46 +1855,33 @@ static inline blk_status_t prep_to_mq(int ret)
}
}
+/* Size in bytes of the sg-list stored in the scsi-mq command-private data. */
+static unsigned int scsi_mq_sgl_size(struct Scsi_Host *shost)
+{
+ return min_t(unsigned int, shost->sg_tablesize, SG_CHUNK_SIZE) *
+ sizeof(struct scatterlist);
+}
+
static int scsi_mq_prep_fn(struct request *req)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
struct scsi_device *sdev = req->q->queuedata;
struct Scsi_Host *shost = sdev->host;
- unsigned char *sense_buf = cmd->sense_buffer;
struct scatterlist *sg;
- /* zero out the cmd, except for the embedded scsi_request */
- memset((char *)cmd + sizeof(cmd->req), 0,
- sizeof(*cmd) - sizeof(cmd->req) + shost->hostt->cmd_size);
+ scsi_init_command(sdev, cmd);
req->special = cmd;
cmd->request = req;
- cmd->device = sdev;
- cmd->sense_buffer = sense_buf;
cmd->tag = req->tag;
-
cmd->prot_op = SCSI_PROT_NORMAL;
- INIT_LIST_HEAD(&cmd->list);
- INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
- cmd->jiffies_at_alloc = jiffies;
-
- if (shost->use_cmd_list) {
- spin_lock_irq(&sdev->list_lock);
- list_add_tail(&cmd->list, &sdev->cmd_list);
- spin_unlock_irq(&sdev->list_lock);
- }
-
sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
cmd->sdb.table.sgl = sg;
if (scsi_host_get_prot(shost)) {
- cmd->prot_sdb = (void *)sg +
- min_t(unsigned int,
- shost->sg_tablesize, SG_CHUNK_SIZE) *
- sizeof(struct scatterlist);
memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
cmd->prot_sdb->table.sgl =
@@ -2000,23 +2005,34 @@ static int scsi_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct Scsi_Host *shost = set->driver_data;
+ const bool unchecked_isa_dma = shost->unchecked_isa_dma;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+ struct scatterlist *sg;
- cmd->sense_buffer =
- scsi_alloc_sense_buffer(shost, GFP_KERNEL, numa_node);
+ if (unchecked_isa_dma)
+ cmd->flags |= SCMD_UNCHECKED_ISA_DMA;
+ cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma,
+ GFP_KERNEL, numa_node);
if (!cmd->sense_buffer)
return -ENOMEM;
cmd->req.sense = cmd->sense_buffer;
+
+ if (scsi_host_get_prot(shost)) {
+ sg = (void *)cmd + sizeof(struct scsi_cmnd) +
+ shost->hostt->cmd_size;
+ cmd->prot_sdb = (void *)sg + scsi_mq_sgl_size(shost);
+ }
+
return 0;
}
static void scsi_exit_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx)
{
- struct Scsi_Host *shost = set->driver_data;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
- scsi_free_sense_buffer(shost, cmd->sense_buffer);
+ scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA,
+ cmd->sense_buffer);
}
static int scsi_map_queues(struct blk_mq_tag_set *set)
@@ -2091,11 +2107,15 @@ EXPORT_SYMBOL_GPL(__scsi_init_queue);
static int scsi_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
{
struct Scsi_Host *shost = q->rq_alloc_data;
+ const bool unchecked_isa_dma = shost->unchecked_isa_dma;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
memset(cmd, 0, sizeof(*cmd));
- cmd->sense_buffer = scsi_alloc_sense_buffer(shost, gfp, NUMA_NO_NODE);
+ if (unchecked_isa_dma)
+ cmd->flags |= SCMD_UNCHECKED_ISA_DMA;
+ cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma, gfp,
+ NUMA_NO_NODE);
if (!cmd->sense_buffer)
goto fail;
cmd->req.sense = cmd->sense_buffer;
@@ -2109,19 +2129,19 @@ static int scsi_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
return 0;
fail_free_sense:
- scsi_free_sense_buffer(shost, cmd->sense_buffer);
+ scsi_free_sense_buffer(unchecked_isa_dma, cmd->sense_buffer);
fail:
return -ENOMEM;
}
static void scsi_exit_rq(struct request_queue *q, struct request *rq)
{
- struct Scsi_Host *shost = q->rq_alloc_data;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
if (cmd->prot_sdb)
kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
- scsi_free_sense_buffer(shost, cmd->sense_buffer);
+ scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA,
+ cmd->sense_buffer);
}
struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
@@ -2179,12 +2199,9 @@ struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
int scsi_mq_setup_tags(struct Scsi_Host *shost)
{
- unsigned int cmd_size, sgl_size, tbl_size;
+ unsigned int cmd_size, sgl_size;
- tbl_size = shost->sg_tablesize;
- if (tbl_size > SG_CHUNK_SIZE)
- tbl_size = SG_CHUNK_SIZE;
- sgl_size = tbl_size * sizeof(struct scatterlist);
+ sgl_size = scsi_mq_sgl_size(shost);
cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
if (scsi_host_get_prot(shost))
cmd_size += sizeof(struct scsi_data_buffer) + sgl_size;
@@ -2614,7 +2631,6 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
case SDEV_QUIESCE:
case SDEV_OFFLINE:
case SDEV_TRANSPORT_OFFLINE:
- case SDEV_BLOCK:
break;
default:
goto illegal;
@@ -2628,6 +2644,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
case SDEV_OFFLINE:
case SDEV_TRANSPORT_OFFLINE:
case SDEV_CANCEL:
+ case SDEV_BLOCK:
case SDEV_CREATED_BLOCK:
break;
default:
@@ -2871,7 +2888,12 @@ static void scsi_wait_for_queuecommand(struct scsi_device *sdev)
int
scsi_device_quiesce(struct scsi_device *sdev)
{
- int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
+ int err;
+
+ mutex_lock(&sdev->state_mutex);
+ err = scsi_device_set_state(sdev, SDEV_QUIESCE);
+ mutex_unlock(&sdev->state_mutex);
+
if (err)
return err;
@@ -2899,10 +2921,11 @@ void scsi_device_resume(struct scsi_device *sdev)
* so assume the state is being managed elsewhere (for example
* device deleted during suspend)
*/
- if (sdev->sdev_state != SDEV_QUIESCE ||
- scsi_device_set_state(sdev, SDEV_RUNNING))
- return;
- scsi_run_queue(sdev->request_queue);
+ mutex_lock(&sdev->state_mutex);
+ if (sdev->sdev_state == SDEV_QUIESCE &&
+ scsi_device_set_state(sdev, SDEV_RUNNING) == 0)
+ scsi_run_queue(sdev->request_queue);
+ mutex_unlock(&sdev->state_mutex);
}
EXPORT_SYMBOL(scsi_device_resume);
@@ -2933,28 +2956,20 @@ scsi_target_resume(struct scsi_target *starget)
EXPORT_SYMBOL(scsi_target_resume);
/**
- * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
- * @sdev: device to block
- * @wait: Whether or not to wait until ongoing .queuecommand() /
- * .queue_rq() calls have finished.
- *
- * Block request made by scsi lld's to temporarily stop all
- * scsi commands on the specified device. May sleep.
+ * scsi_internal_device_block_nowait - try to transition to the SDEV_BLOCK state
+ * @sdev: device to block
*
- * Returns zero if successful or error if not
+ * Pause SCSI command processing on the specified device. Does not sleep.
*
- * Notes:
- * This routine transitions the device to the SDEV_BLOCK state
- * (which must be a legal transition). When the device is in this
- * state, all commands are deferred until the scsi lld reenables
- * the device with scsi_device_unblock or device_block_tmo fires.
+ * Returns zero if successful or a negative error code upon failure.
*
- * To do: avoid that scsi_send_eh_cmnd() calls queuecommand() after
- * scsi_internal_device_block() has blocked a SCSI device and also
- * remove the rport mutex lock and unlock calls from srp_queuecommand().
+ * Notes:
+ * This routine transitions the device to the SDEV_BLOCK state (which must be
+ * a legal transition). When the device is in this state, command processing
+ * is paused until the device leaves the SDEV_BLOCK state. See also
+ * scsi_internal_device_unblock_nowait().
*/
-int
-scsi_internal_device_block(struct scsi_device *sdev, bool wait)
+int scsi_internal_device_block_nowait(struct scsi_device *sdev)
{
struct request_queue *q = sdev->request_queue;
unsigned long flags;
@@ -2974,45 +2989,86 @@ scsi_internal_device_block(struct scsi_device *sdev, bool wait)
* request queue.
*/
if (q->mq_ops) {
- if (wait)
- blk_mq_quiesce_queue(q);
- else
- blk_mq_quiesce_queue_nowait(q);
+ blk_mq_quiesce_queue_nowait(q);
} else {
spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
- if (wait)
- scsi_wait_for_queuecommand(sdev);
}
return 0;
}
-EXPORT_SYMBOL_GPL(scsi_internal_device_block);
-
+EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait);
+
/**
- * scsi_internal_device_unblock - resume a device after a block request
- * @sdev: device to resume
- * @new_state: state to set devices to after unblocking
+ * scsi_internal_device_block - try to transition to the SDEV_BLOCK state
+ * @sdev: device to block
+ *
+ * Pause SCSI command processing on the specified device and wait until all
+ * ongoing scsi_request_fn() / scsi_queue_rq() calls have finished. May sleep.
*
- * Called by scsi lld's or the midlayer to restart the device queue
- * for the previously suspended scsi device. Called from interrupt or
- * normal process context.
+ * Returns zero if successful or a negative error code upon failure.
*
- * Returns zero if successful or error if not.
+ * Note:
+ * This routine transitions the device to the SDEV_BLOCK state (which must be
+ * a legal transition). When the device is in this state, command processing
+ * is paused until the device leaves the SDEV_BLOCK state. See also
+ * scsi_internal_device_unblock().
*
- * Notes:
- * This routine transitions the device to the SDEV_RUNNING state
- * or to one of the offline states (which must be a legal transition)
- * allowing the midlayer to goose the queue for this device.
+ * To do: avoid that scsi_send_eh_cmnd() calls queuecommand() after
+ * scsi_internal_device_block() has blocked a SCSI device and also
+ * remove the rport mutex lock and unlock calls from srp_queuecommand().
*/
-int
-scsi_internal_device_unblock(struct scsi_device *sdev,
- enum scsi_device_state new_state)
+static int scsi_internal_device_block(struct scsi_device *sdev)
{
- struct request_queue *q = sdev->request_queue;
+ struct request_queue *q = sdev->request_queue;
+ int err;
+
+ mutex_lock(&sdev->state_mutex);
+ err = scsi_internal_device_block_nowait(sdev);
+ if (err == 0) {
+ if (q->mq_ops)
+ blk_mq_quiesce_queue(q);
+ else
+ scsi_wait_for_queuecommand(sdev);
+ }
+ mutex_unlock(&sdev->state_mutex);
+
+ return err;
+}
+
+void scsi_start_queue(struct scsi_device *sdev)
+{
+ struct request_queue *q = sdev->request_queue;
unsigned long flags;
+ if (q->mq_ops) {
+ blk_mq_unquiesce_queue(q);
+ } else {
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_start_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
+}
+
+/**
+ * scsi_internal_device_unblock_nowait - resume a device after a block request
+ * @sdev: device to resume
+ * @new_state: state to set the device to after unblocking
+ *
+ * Restart the device queue for a previously suspended SCSI device. Does not
+ * sleep.
+ *
+ * Returns zero if successful or a negative error code upon failure.
+ *
+ * Notes:
+ * This routine transitions the device to the SDEV_RUNNING state or to one of
+ * the offline states (which must be a legal transition) allowing the midlayer
+ * to goose the queue for this device.
+ */
+int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
+ enum scsi_device_state new_state)
+{
/*
* Try to transition the scsi device to SDEV_RUNNING or one of the
* offlined states and goose the device queue if successful.
@@ -3030,22 +3086,42 @@ scsi_internal_device_unblock(struct scsi_device *sdev,
sdev->sdev_state != SDEV_OFFLINE)
return -EINVAL;
- if (q->mq_ops) {
- blk_mq_unquiesce_queue(q);
- } else {
- spin_lock_irqsave(q->queue_lock, flags);
- blk_start_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
- }
+ scsi_start_queue(sdev);
return 0;
}
-EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
+EXPORT_SYMBOL_GPL(scsi_internal_device_unblock_nowait);
+
+/**
+ * scsi_internal_device_unblock - resume a device after a block request
+ * @sdev: device to resume
+ * @new_state: state to set the device to after unblocking
+ *
+ * Restart the device queue for a previously suspended SCSI device. May sleep.
+ *
+ * Returns zero if successful or a negative error code upon failure.
+ *
+ * Notes:
+ * This routine transitions the device to the SDEV_RUNNING state or to one of
+ * the offline states (which must be a legal transition) allowing the midlayer
+ * to goose the queue for this device.
+ */
+static int scsi_internal_device_unblock(struct scsi_device *sdev,
+ enum scsi_device_state new_state)
+{
+ int ret;
+
+ mutex_lock(&sdev->state_mutex);
+ ret = scsi_internal_device_unblock_nowait(sdev, new_state);
+ mutex_unlock(&sdev->state_mutex);
+
+ return ret;
+}
static void
device_block(struct scsi_device *sdev, void *data)
{
- scsi_internal_device_block(sdev, true);
+ scsi_internal_device_block(sdev);
}
static int
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 59ebc1795bb3..c11c1f9c912c 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -80,6 +80,8 @@ int scsi_eh_get_sense(struct list_head *work_q,
int scsi_noretry_cmd(struct scsi_cmnd *scmd);
/* scsi_lib.c */
+extern void scsi_add_cmd_to_list(struct scsi_cmnd *cmd);
+extern void scsi_del_cmd_from_list(struct scsi_cmnd *cmd);
extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
extern void scsi_device_unbusy(struct scsi_device *sdev);
extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
@@ -88,6 +90,7 @@ extern void scsi_run_host_queues(struct Scsi_Host *shost);
extern void scsi_requeue_run_queue(struct work_struct *work);
extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev);
extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev);
+extern void scsi_start_queue(struct scsi_device *sdev);
extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
extern int scsi_init_queue(void);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 69979574004f..fd88dabd599d 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -231,6 +231,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
sdev->id = starget->id;
sdev->lun = lun;
sdev->channel = starget->channel;
+ mutex_init(&sdev->state_mutex);
sdev->sdev_state = SDEV_CREATED;
INIT_LIST_HEAD(&sdev->siblings);
INIT_LIST_HEAD(&sdev->same_target_siblings);
@@ -384,11 +385,12 @@ static void scsi_target_reap_ref_release(struct kref *kref)
= container_of(kref, struct scsi_target, reap_ref);
/*
- * if we get here and the target is still in the CREATED state that
+ * if we get here and the target is still in a CREATED state that
* means it was allocated but never made visible (because a scan
* turned up no LUNs), so don't call device_del() on it.
*/
- if (starget->state != STARGET_CREATED) {
+ if ((starget->state != STARGET_CREATED) &&
+ (starget->state != STARGET_CREATED_REMOVE)) {
transport_remove_device(&starget->dev);
device_del(&starget->dev);
}
@@ -655,8 +657,6 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
if (pass == 1) {
if (BLIST_INQUIRY_36 & *bflags)
next_inquiry_len = 36;
- else if (BLIST_INQUIRY_58 & *bflags)
- next_inquiry_len = 58;
else if (sdev->inquiry_len)
next_inquiry_len = sdev->inquiry_len;
else
@@ -926,15 +926,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
sdev->use_10_for_rw = 1;
- if (*bflags & BLIST_MS_SKIP_PAGE_08)
- sdev->skip_ms_page_8 = 1;
-
- if (*bflags & BLIST_MS_SKIP_PAGE_3F)
- sdev->skip_ms_page_3f = 1;
-
- if (*bflags & BLIST_USE_10_BYTE_MS)
- sdev->use_10_for_ms = 1;
-
/* some devices don't like REPORT SUPPORTED OPERATION CODES
* and will simply timeout causing sd_mod init to take a very
* very long time */
@@ -943,21 +934,19 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
/* set the device running here so that slave configure
* may do I/O */
+ mutex_lock(&sdev->state_mutex);
ret = scsi_device_set_state(sdev, SDEV_RUNNING);
- if (ret) {
+ if (ret)
ret = scsi_device_set_state(sdev, SDEV_BLOCK);
+ mutex_unlock(&sdev->state_mutex);
- if (ret) {
- sdev_printk(KERN_ERR, sdev,
- "in wrong state %s to complete scan\n",
- scsi_device_state_name(sdev->sdev_state));
- return SCSI_SCAN_NO_RESPONSE;
- }
+ if (ret) {
+ sdev_printk(KERN_ERR, sdev,
+ "in wrong state %s to complete scan\n",
+ scsi_device_state_name(sdev->sdev_state));
+ return SCSI_SCAN_NO_RESPONSE;
}
- if (*bflags & BLIST_MS_192_BYTES_FOR_3F)
- sdev->use_192_bytes_for_3f = 1;
-
if (*bflags & BLIST_NOT_LOCKABLE)
sdev->lockable = 0;
@@ -967,9 +956,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
if (*bflags & BLIST_NO_DIF)
sdev->no_dif = 1;
- if (*bflags & BLIST_SYNC_ALUA)
- sdev->synchronous_alua = 1;
-
sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
if (*bflags & BLIST_TRY_VPD_PAGES)
@@ -1108,7 +1094,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
/*
* result contains valid SCSI INQUIRY data.
*/
- if (((result[0] >> 5) == 3) && !(bflags & BLIST_ATTACH_PQ3)) {
+ if ((result[0] >> 5) == 3) {
/*
* For a Peripheral qualifier 3 (011b), the SCSI
* spec says: The device server is not capable of
@@ -1266,11 +1252,7 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
*/
if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN))
max_dev_lun = min(8U, max_dev_lun);
-
- /*
- * Stop scanning at 255 unless BLIST_SCSI3LUN
- */
- if (!(bflags & BLIST_SCSI3LUN))
+ else
max_dev_lun = min(256U, max_dev_lun);
/*
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 82dfe07b1d47..d6984df71f1c 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -719,7 +719,7 @@ static ssize_t
store_state_field(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- int i;
+ int i, ret;
struct scsi_device *sdev = to_scsi_device(dev);
enum scsi_device_state state = 0;
@@ -734,9 +734,11 @@ store_state_field(struct device *dev, struct device_attribute *attr,
if (!state)
return -EINVAL;
- if (scsi_device_set_state(sdev, state))
- return -EINVAL;
- return count;
+ mutex_lock(&sdev->state_mutex);
+ ret = scsi_device_set_state(sdev, state);
+ mutex_unlock(&sdev->state_mutex);
+
+ return ret == 0 ? count : -EINVAL;
}
static ssize_t
@@ -1272,6 +1274,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
void __scsi_remove_device(struct scsi_device *sdev)
{
struct device *dev = &sdev->sdev_gendev;
+ int res;
/*
* This cleanup path is not reentrant and while it is impossible
@@ -1282,7 +1285,25 @@ void __scsi_remove_device(struct scsi_device *sdev)
return;
if (sdev->is_visible) {
- if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0)
+ /*
+ * If scsi_internal_target_block() is running concurrently,
+ * wait until it has finished before changing the device state.
+ */
+ mutex_lock(&sdev->state_mutex);
+ /*
+ * If blocked, we go straight to DEL and restart the queue so
+ * any commands issued during driver shutdown (like sync
+ * cache) are errored immediately.
+ */
+ res = scsi_device_set_state(sdev, SDEV_CANCEL);
+ if (res != 0) {
+ res = scsi_device_set_state(sdev, SDEV_DEL);
+ if (res == 0)
+ scsi_start_queue(sdev);
+ }
+ mutex_unlock(&sdev->state_mutex);
+
+ if (res != 0)
return;
bsg_unregister_queue(sdev->request_queue);
@@ -1298,7 +1319,10 @@ void __scsi_remove_device(struct scsi_device *sdev)
* scsi_run_queue() invocations have finished before tearing down the
* device.
*/
+ mutex_lock(&sdev->state_mutex);
scsi_device_set_state(sdev, SDEV_DEL);
+ mutex_unlock(&sdev->state_mutex);
+
blk_cleanup_queue(sdev->request_queue);
cancel_work_sync(&sdev->requeue_work);
@@ -1370,11 +1394,15 @@ restart:
spin_lock_irqsave(shost->host_lock, flags);
list_for_each_entry(starget, &shost->__targets, siblings) {
if (starget->state == STARGET_DEL ||
- starget->state == STARGET_REMOVE)
+ starget->state == STARGET_REMOVE ||
+ starget->state == STARGET_CREATED_REMOVE)
continue;
if (starget->dev.parent == dev || &starget->dev == dev) {
kref_get(&starget->reap_ref);
- starget->state = STARGET_REMOVE;
+ if (starget->state == STARGET_CREATED)
+ starget->state = STARGET_CREATED_REMOVE;
+ else
+ starget->state = STARGET_REMOVE;
spin_unlock_irqrestore(shost->host_lock, flags);
__scsi_remove_target(starget);
scsi_target_reap(starget);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 1df77453f6b6..7e24aa30c3b0 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -1692,7 +1692,7 @@ fc_private_host_rd_attr(npiv_vports_inuse, "%u\n", 20);
* Host Statistics Management
*/
-/* Show a given an attribute in the statistics group */
+/* Show a given attribute in the statistics group */
static ssize_t
fc_stat_show(const struct device *dev, char *buf, unsigned long offset)
{
@@ -2925,7 +2925,7 @@ EXPORT_SYMBOL(fc_remote_port_add);
* attached to it. However, we want to semi-persist the target id assigned
* to that port if it eventually does exist. The port structure will
* remain (although with minimal information) so that the target id
- * bindings remails.
+ * bindings also remain.
*
* If the remote port is not an FCP Target, it will be fully torn down
* and deallocated, including the fc_remote_port class device.
@@ -2939,7 +2939,7 @@ EXPORT_SYMBOL(fc_remote_port_add);
* If the remote port does not return (signaled by a LLDD call to
* fc_remote_port_add()) within the dev_loss_tmo timeout, then the
* scsi target is removed - killing all outstanding i/o and removing the
- * scsi devices attached ot it. The port structure will be marked Not
+ * scsi devices attached to it. The port structure will be marked Not
* Present and be partially cleared, leaving only enough information to
* recognize the remote port relative to the scsi target id binding if
* it later appears. The port will remain as long as there is a valid
@@ -3058,7 +3058,7 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
* There may have been a delete timer running on the
* port. Ensure that it is cancelled as we now know
* the port is an FCP Target.
- * Note: we know the rport is exists and in an online
+ * Note: we know the rport exists and is in an online
* state as the LLDD would not have had an rport
* reference to pass us.
*
@@ -3319,7 +3319,7 @@ EXPORT_SYMBOL(fc_block_scsi_eh);
* @ret_vport: The pointer to the created vport.
*
* Allocates and creates the vport structure, calls the parent host
- * to instantiate the vport, the completes w/ class and sysfs creation.
+ * to instantiate the vport, this completes w/ class and sysfs creation.
*
* Notes:
* This routine assumes no locks are held on entry.
@@ -3399,7 +3399,7 @@ fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev,
/*
* if the parent isn't the physical adapter's Scsi_Host, ensure
- * the Scsi_Host at least contains ia symlink to the vport.
+ * the Scsi_Host at least contains a symlink to the vport.
*/
if (pdev != &shost->shost_gendev) {
error = sysfs_create_link(&shost->shost_gendev.kobj,
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index 3c5d89852e9f..f617021c94f7 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -554,11 +554,12 @@ int srp_reconnect_rport(struct srp_rport *rport)
* invoking scsi_target_unblock() won't change the state of
* these devices into running so do that explicitly.
*/
- spin_lock_irq(shost->host_lock);
- __shost_for_each_device(sdev, shost)
+ shost_for_each_device(sdev, shost) {
+ mutex_lock(&sdev->state_mutex);
if (sdev->sdev_state == SDEV_OFFLINE)
sdev->sdev_state = SDEV_RUNNING;
- spin_unlock_irq(shost->host_lock);
+ mutex_unlock(&sdev->state_mutex);
+ }
} else if (rport->state == SRP_RPORT_RUNNING) {
/*
* srp_reconnect_rport() has been invoked with fast_io_fail
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b6bb4e0ce0e3..bea36adeee17 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -50,6 +50,7 @@
#include <linux/string_helpers.h>
#include <linux/async.h>
#include <linux/slab.h>
+#include <linux/sed-opal.h>
#include <linux/pm_runtime.h>
#include <linux/pr.h>
#include <linux/t10-pi.h>
@@ -155,7 +156,7 @@ static ssize_t
cache_type_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- int i, ct = -1, rcd, wce, sp;
+ int ct, rcd, wce, sp;
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
char buffer[64];
@@ -178,16 +179,10 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
sdkp->cache_override = 0;
}
- for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) {
- len = strlen(sd_cache_types[i]);
- if (strncmp(sd_cache_types[i], buf, len) == 0 &&
- buf[len] == '\n') {
- ct = i;
- break;
- }
- }
+ ct = sysfs_match_string(sd_cache_types, buf);
if (ct < 0)
return -EINVAL;
+
rcd = ct & 0x01 ? 1 : 0;
wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0;
@@ -227,7 +222,7 @@ manage_start_stop_show(struct device *dev, struct device_attribute *attr,
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
- return snprintf(buf, 20, "%u\n", sdp->manage_start_stop);
+ return sprintf(buf, "%u\n", sdp->manage_start_stop);
}
static ssize_t
@@ -251,7 +246,7 @@ allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
- return snprintf(buf, 40, "%d\n", sdkp->device->allow_restart);
+ return sprintf(buf, "%u\n", sdkp->device->allow_restart);
}
static ssize_t
@@ -279,7 +274,7 @@ cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
struct scsi_disk *sdkp = to_scsi_disk(dev);
int ct = sdkp->RCD + 2*sdkp->WCE;
- return snprintf(buf, 40, "%s\n", sd_cache_types[ct]);
+ return sprintf(buf, "%s\n", sd_cache_types[ct]);
}
static DEVICE_ATTR_RW(cache_type);
@@ -288,7 +283,7 @@ FUA_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
- return snprintf(buf, 20, "%u\n", sdkp->DPOFUA);
+ return sprintf(buf, "%u\n", sdkp->DPOFUA);
}
static DEVICE_ATTR_RO(FUA);
@@ -298,7 +293,7 @@ protection_type_show(struct device *dev, struct device_attribute *attr,
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
- return snprintf(buf, 20, "%u\n", sdkp->protection_type);
+ return sprintf(buf, "%u\n", sdkp->protection_type);
}
static ssize_t
@@ -341,9 +336,9 @@ protection_mode_show(struct device *dev, struct device_attribute *attr,
}
if (!dif && !dix)
- return snprintf(buf, 20, "none\n");
+ return sprintf(buf, "none\n");
- return snprintf(buf, 20, "%s%u\n", dix ? "dix" : "dif", dif);
+ return sprintf(buf, "%s%u\n", dix ? "dix" : "dif", dif);
}
static DEVICE_ATTR_RO(protection_mode);
@@ -352,7 +347,7 @@ app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
- return snprintf(buf, 20, "%u\n", sdkp->ATO);
+ return sprintf(buf, "%u\n", sdkp->ATO);
}
static DEVICE_ATTR_RO(app_tag_own);
@@ -362,10 +357,11 @@ thin_provisioning_show(struct device *dev, struct device_attribute *attr,
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
- return snprintf(buf, 20, "%u\n", sdkp->lbpme);
+ return sprintf(buf, "%u\n", sdkp->lbpme);
}
static DEVICE_ATTR_RO(thin_provisioning);
+/* sysfs_match_string() requires dense arrays */
static const char *lbp_mode[] = {
[SD_LBP_FULL] = "full",
[SD_LBP_UNMAP] = "unmap",
@@ -381,7 +377,7 @@ provisioning_mode_show(struct device *dev, struct device_attribute *attr,
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
- return snprintf(buf, 20, "%s\n", lbp_mode[sdkp->provisioning_mode]);
+ return sprintf(buf, "%s\n", lbp_mode[sdkp->provisioning_mode]);
}
static ssize_t
@@ -390,6 +386,7 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr,
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
+ int mode;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -402,23 +399,17 @@ provisioning_mode_store(struct device *dev, struct device_attribute *attr,
if (sdp->type != TYPE_DISK)
return -EINVAL;
- if (!strncmp(buf, lbp_mode[SD_LBP_UNMAP], 20))
- sd_config_discard(sdkp, SD_LBP_UNMAP);
- else if (!strncmp(buf, lbp_mode[SD_LBP_WS16], 20))
- sd_config_discard(sdkp, SD_LBP_WS16);
- else if (!strncmp(buf, lbp_mode[SD_LBP_WS10], 20))
- sd_config_discard(sdkp, SD_LBP_WS10);
- else if (!strncmp(buf, lbp_mode[SD_LBP_ZERO], 20))
- sd_config_discard(sdkp, SD_LBP_ZERO);
- else if (!strncmp(buf, lbp_mode[SD_LBP_DISABLE], 20))
- sd_config_discard(sdkp, SD_LBP_DISABLE);
- else
+ mode = sysfs_match_string(lbp_mode, buf);
+ if (mode < 0)
return -EINVAL;
+ sd_config_discard(sdkp, mode);
+
return count;
}
static DEVICE_ATTR_RW(provisioning_mode);
+/* sysfs_match_string() requires dense arrays */
static const char *zeroing_mode[] = {
[SD_ZERO_WRITE] = "write",
[SD_ZERO_WS] = "writesame",
@@ -432,7 +423,7 @@ zeroing_mode_show(struct device *dev, struct device_attribute *attr,
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
- return snprintf(buf, 20, "%s\n", zeroing_mode[sdkp->zeroing_mode]);
+ return sprintf(buf, "%s\n", zeroing_mode[sdkp->zeroing_mode]);
}
static ssize_t
@@ -440,21 +431,17 @@ zeroing_mode_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
+ int mode;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- if (!strncmp(buf, zeroing_mode[SD_ZERO_WRITE], 20))
- sdkp->zeroing_mode = SD_ZERO_WRITE;
- else if (!strncmp(buf, zeroing_mode[SD_ZERO_WS], 20))
- sdkp->zeroing_mode = SD_ZERO_WS;
- else if (!strncmp(buf, zeroing_mode[SD_ZERO_WS16_UNMAP], 20))
- sdkp->zeroing_mode = SD_ZERO_WS16_UNMAP;
- else if (!strncmp(buf, zeroing_mode[SD_ZERO_WS10_UNMAP], 20))
- sdkp->zeroing_mode = SD_ZERO_WS10_UNMAP;
- else
+ mode = sysfs_match_string(zeroing_mode, buf);
+ if (mode < 0)
return -EINVAL;
+ sdkp->zeroing_mode = mode;
+
return count;
}
static DEVICE_ATTR_RW(zeroing_mode);
@@ -465,7 +452,7 @@ max_medium_access_timeouts_show(struct device *dev,
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
- return snprintf(buf, 20, "%u\n", sdkp->max_medium_access_timeouts);
+ return sprintf(buf, "%u\n", sdkp->max_medium_access_timeouts);
}
static ssize_t
@@ -491,7 +478,7 @@ max_write_same_blocks_show(struct device *dev, struct device_attribute *attr,
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
- return snprintf(buf, 20, "%u\n", sdkp->max_ws_blocks);
+ return sprintf(buf, "%u\n", sdkp->max_ws_blocks);
}
static ssize_t
@@ -643,6 +630,26 @@ static void scsi_disk_put(struct scsi_disk *sdkp)
mutex_unlock(&sd_ref_mutex);
}
+#ifdef CONFIG_BLK_SED_OPAL
+static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
+ size_t len, bool send)
+{
+ struct scsi_device *sdev = data;
+ u8 cdb[12] = { 0, };
+ int ret;
+
+ cdb[0] = send ? SECURITY_PROTOCOL_OUT : SECURITY_PROTOCOL_IN;
+ cdb[1] = secp;
+ put_unaligned_be16(spsp, &cdb[2]);
+ put_unaligned_be32(len, &cdb[6]);
+
+ ret = scsi_execute_req(sdev, cdb,
+ send ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
+ buffer, len, NULL, SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+ return ret <= 0 ? ret : -EIO;
+}
+#endif /* CONFIG_BLK_SED_OPAL */
+
static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
unsigned int dix, unsigned int dif)
{
@@ -696,6 +703,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
switch (mode) {
+ case SD_LBP_FULL:
case SD_LBP_DISABLE:
blk_queue_max_discard_sectors(q, 0);
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
@@ -1454,6 +1462,9 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
if (error)
goto out;
+ if (is_sed_ioctl(cmd))
+ return sed_ioctl(sdkp->opal_dev, cmd, p);
+
/*
* Send SCSI addressing ioctls directly to mid level, send other
* ioctls to block level and then onto mid level if they can't be
@@ -1818,8 +1829,9 @@ static void sd_eh_reset(struct scsi_cmnd *scmd)
static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
{
struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk);
+ struct scsi_device *sdev = scmd->device;
- if (!scsi_device_online(scmd->device) ||
+ if (!scsi_device_online(sdev) ||
!scsi_medium_access_command(scmd) ||
host_byte(scmd->result) != DID_TIME_OUT ||
eh_disp != SUCCESS)
@@ -1845,7 +1857,9 @@ static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) {
scmd_printk(KERN_ERR, scmd,
"Medium access timeout failure. Offlining disk!\n");
- scsi_device_set_state(scmd->device, SDEV_OFFLINE);
+ mutex_lock(&sdev->state_mutex);
+ scsi_device_set_state(sdev, SDEV_OFFLINE);
+ mutex_unlock(&sdev->state_mutex);
return SUCCESS;
}
@@ -3014,6 +3028,20 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
sdkp->ws10 = 1;
}
+static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer)
+{
+ struct scsi_device *sdev = sdkp->device;
+
+ if (!sdev->security_supported)
+ return;
+
+ if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
+ SECURITY_PROTOCOL_IN) == 1 &&
+ scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
+ SECURITY_PROTOCOL_OUT) == 1)
+ sdkp->security = 1;
+}
+
/**
* sd_revalidate_disk - called the first time a new disk is seen,
* performs disk spin up, read_capacity, etc.
@@ -3067,6 +3095,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_cache_type(sdkp, buffer);
sd_read_app_tag_own(sdkp, buffer);
sd_read_write_same(sdkp, buffer);
+ sd_read_security(sdkp, buffer);
}
sdkp->first_scan = 0;
@@ -3227,6 +3256,12 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
sd_revalidate_disk(gd);
+ if (sdkp->security) {
+ sdkp->opal_dev = init_opal_dev(sdp, &sd_sec_submit);
+ if (sdkp->opal_dev)
+ sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n");
+ }
+
sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
sdp->removable ? "removable " : "");
scsi_autopm_put_device(sdp);
@@ -3376,6 +3411,8 @@ static int sd_remove(struct device *dev)
sd_zbc_remove(sdkp);
+ free_opal_dev(sdkp->opal_dev);
+
blk_register_region(devt, SD_MINORS, NULL,
sd_default_probe, NULL, NULL);
@@ -3528,6 +3565,7 @@ static int sd_suspend_runtime(struct device *dev)
static int sd_resume(struct device *dev)
{
struct scsi_disk *sdkp = dev_get_drvdata(dev);
+ int ret;
if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
return 0;
@@ -3536,7 +3574,10 @@ static int sd_resume(struct device *dev)
return 0;
sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
- return sd_start_stop_device(sdkp, 1);
+ ret = sd_start_stop_device(sdkp, 1);
+ if (!ret)
+ opal_unlock_from_suspend(sdkp->opal_dev);
+ return ret;
}
/**
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 61d02efd366c..99c4dde9b6bf 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -71,6 +71,7 @@ struct scsi_disk {
struct scsi_device *device;
struct device dev;
struct gendisk *disk;
+ struct opal_dev *opal_dev;
#ifdef CONFIG_BLK_DEV_ZONED
unsigned int nr_zones;
unsigned int zone_blocks;
@@ -114,6 +115,7 @@ struct scsi_disk {
unsigned rc_basis: 2;
unsigned zoned: 2;
unsigned urswrz : 1;
+ unsigned security : 1;
unsigned ignore_medium_access_errors : 1;
};
#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 71b4b91d2215..80cfa93e407c 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -249,8 +249,8 @@ static int sgiwd93_probe(struct platform_device *pdev)
hdata = host_to_hostdata(host);
hdata->dev = &pdev->dev;
- hdata->cpu = dma_alloc_noncoherent(&pdev->dev, HPC_DMA_SIZE,
- &hdata->dma, GFP_KERNEL);
+ hdata->cpu = dma_alloc_attrs(&pdev->dev, HPC_DMA_SIZE, &hdata->dma,
+ GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
if (!hdata->cpu) {
printk(KERN_WARNING "sgiwd93: Could not allocate memory for "
"host %d buffer.\n", unit);
@@ -289,7 +289,8 @@ static int sgiwd93_probe(struct platform_device *pdev)
out_irq:
free_irq(irq, host);
out_free:
- dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma);
+ dma_free_attrs(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma,
+ DMA_ATTR_NON_CONSISTENT);
out_put:
scsi_host_put(host);
out:
@@ -305,7 +306,8 @@ static int sgiwd93_remove(struct platform_device *pdev)
scsi_remove_host(host);
free_irq(pd->irq, host);
- dma_free_noncoherent(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma);
+ dma_free_attrs(&pdev->dev, HPC_DMA_SIZE, hdata->cpu, hdata->dma,
+ DMA_ATTR_NON_CONSISTENT);
scsi_host_put(host);
return 0;
}
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index b673825f46b5..07ec8a8877de 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -1,6 +1,6 @@
/*
* driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2016 Microsemi Corporation
+ * Copyright (c) 2016-2017 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
* This program is free software; you can redistribute it and/or modify
@@ -16,6 +16,8 @@
*
*/
+#include <linux/io-64-nonatomic-lo-hi.h>
+
#if !defined(_SMARTPQI_H)
#define _SMARTPQI_H
@@ -61,7 +63,7 @@ struct pqi_device_registers {
/*
* controller registers
*
- * These are defined by the PMC implementation.
+ * These are defined by the Microsemi implementation.
*
* Some registers (those named sis_*) are only used when in
* legacy SIS mode before we transition the controller into
@@ -102,6 +104,12 @@ enum pqi_io_path {
AIO_PATH = 1
};
+enum pqi_irq_mode {
+ IRQ_MODE_NONE,
+ IRQ_MODE_INTX,
+ IRQ_MODE_MSIX
+};
+
struct pqi_sg_descriptor {
__le64 address;
__le32 length;
@@ -484,7 +492,6 @@ struct pqi_raid_error_info {
#define PQI_EVENT_TYPE_LOGICAL_DEVICE 0x5
#define PQI_EVENT_TYPE_AIO_STATE_CHANGE 0xfd
#define PQI_EVENT_TYPE_AIO_CONFIG_CHANGE 0xfe
-#define PQI_EVENT_TYPE_HEARTBEAT 0xff
#pragma pack()
@@ -629,17 +636,70 @@ struct pqi_encryption_info {
u32 encrypt_tweak_upper;
};
-#define PQI_MAX_OUTSTANDING_REQUESTS ((u32)~0)
-#define PQI_MAX_TRANSFER_SIZE (4 * 1024U * 1024U)
+#pragma pack(1)
+
+#define PQI_CONFIG_TABLE_SIGNATURE "CFGTABLE"
+#define PQI_CONFIG_TABLE_MAX_LENGTH ((u16)~0)
+
+/* configuration table section IDs */
+#define PQI_CONFIG_TABLE_SECTION_GENERAL_INFO 0
+#define PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES 1
+#define PQI_CONFIG_TABLE_SECTION_FIRMWARE_ERRATA 2
+#define PQI_CONFIG_TABLE_SECTION_DEBUG 3
+#define PQI_CONFIG_TABLE_SECTION_HEARTBEAT 4
+
+struct pqi_config_table {
+ u8 signature[8]; /* "CFGTABLE" */
+ __le32 first_section_offset; /* offset in bytes from the base */
+ /* address of this table to the */
+ /* first section */
+};
+
+struct pqi_config_table_section_header {
+ __le16 section_id; /* as defined by the */
+ /* PQI_CONFIG_TABLE_SECTION_* */
+ /* manifest constants above */
+ __le16 next_section_offset; /* offset in bytes from base */
+ /* address of the table of the */
+ /* next section or 0 if last entry */
+};
+
+struct pqi_config_table_general_info {
+ struct pqi_config_table_section_header header;
+ __le32 section_length; /* size of this section in bytes */
+ /* including the section header */
+ __le32 max_outstanding_requests; /* max. outstanding */
+ /* commands supported by */
+ /* the controller */
+ __le32 max_sg_size; /* max. transfer size of a single */
+ /* command */
+ __le32 max_sg_per_request; /* max. number of scatter-gather */
+ /* entries supported in a single */
+ /* command */
+};
+
+struct pqi_config_table_debug {
+ struct pqi_config_table_section_header header;
+ __le32 scratchpad;
+};
+
+struct pqi_config_table_heartbeat {
+ struct pqi_config_table_section_header header;
+ __le32 heartbeat_counter;
+};
+
+#define PQI_MAX_OUTSTANDING_REQUESTS ((u32)~0)
+#define PQI_MAX_OUTSTANDING_REQUESTS_KDUMP 32
+#define PQI_MAX_TRANSFER_SIZE (4 * 1024U * 1024U)
+#define PQI_MAX_TRANSFER_SIZE_KDUMP (512 * 1024U)
#define RAID_MAP_MAX_ENTRIES 1024
#define PQI_PHYSICAL_DEVICE_BUS 0
#define PQI_RAID_VOLUME_BUS 1
#define PQI_HBA_BUS 2
-#define PQI_MAX_BUS PQI_HBA_BUS
-
-#pragma pack(1)
+#define PQI_EXTERNAL_RAID_VOLUME_BUS 3
+#define PQI_MAX_BUS PQI_EXTERNAL_RAID_VOLUME_BUS
struct report_lun_header {
__be32 list_length;
@@ -668,7 +728,6 @@ struct report_phys_lun_extended_entry {
};
/* for device_flags field of struct report_phys_lun_extended_entry */
-#define REPORT_PHYS_LUN_DEV_FLAG_NON_DISK 0x1
#define REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED 0x8
struct report_phys_lun_extended {
@@ -726,14 +785,15 @@ struct pqi_scsi_dev {
__be64 wwid;
u8 volume_id[16];
u8 is_physical_device : 1;
+ u8 is_external_raid_device : 1;
u8 target_lun_valid : 1;
- u8 expose_device : 1;
- u8 no_uld_attach : 1;
- u8 aio_enabled : 1; /* only valid for physical disks */
u8 device_gone : 1;
u8 new_device : 1;
u8 keep_device : 1;
u8 volume_offline : 1;
+ bool aio_enabled; /* only valid for physical disks */
+ bool in_reset;
+ bool device_offline;
u8 vendor[8]; /* bytes 8-15 of inquiry data */
u8 model[16]; /* bytes 16-31 of inquiry data */
u64 sas_address;
@@ -747,12 +807,11 @@ struct pqi_scsi_dev {
u8 bay;
u8 box[8];
u16 phys_connector[8];
- int offload_configured; /* I/O accel RAID offload configured */
- int offload_enabled; /* I/O accel RAID offload enabled */
- int offload_enabled_pending;
- int offload_to_mirror; /* Send next I/O accelerator RAID */
- /* offload request to mirror drive. */
- struct raid_map *raid_map; /* I/O accelerator RAID map */
+ bool raid_bypass_configured; /* RAID bypass configured */
+ bool raid_bypass_enabled; /* RAID bypass enabled */
+ int offload_to_mirror; /* Send next RAID bypass request */
+ /* to mirror drive. */
+ struct raid_map *raid_map; /* RAID bypass map */
struct pqi_sas_port *sas_port;
struct scsi_device *sdev;
@@ -761,13 +820,15 @@ struct pqi_scsi_dev {
struct list_head new_device_list_entry;
struct list_head add_list_entry;
struct list_head delete_list_entry;
+
+ atomic_t scsi_cmds_outstanding;
};
/* VPD inquiry pages */
#define SCSI_VPD_SUPPORTED_PAGES 0x0 /* standard page */
#define SCSI_VPD_DEVICE_ID 0x83 /* standard page */
#define CISS_VPD_LV_DEVICE_GEOMETRY 0xc1 /* vendor-specific page */
-#define CISS_VPD_LV_OFFLOAD_STATUS 0xc2 /* vendor-specific page */
+#define CISS_VPD_LV_BYPASS_STATUS 0xc2 /* vendor-specific page */
#define CISS_VPD_LV_STATUS 0xc3 /* vendor-specific page */
#define VPD_PAGE (1 << 8)
@@ -851,7 +912,9 @@ struct pqi_io_request {
void (*io_complete_callback)(struct pqi_io_request *io_request,
void *context);
void *context;
+ u8 raid_bypass : 1;
int status;
+ struct pqi_queue_group *queue_group;
struct scsi_cmnd *scmd;
void *error_info;
struct pqi_sg_descriptor *sg_chain_buffer;
@@ -860,15 +923,7 @@ struct pqi_io_request {
struct list_head request_list_entry;
};
-/* for indexing into the pending_events[] field of struct pqi_ctrl_info */
-#define PQI_EVENT_HEARTBEAT 0
-#define PQI_EVENT_HOTPLUG 1
-#define PQI_EVENT_HARDWARE 2
-#define PQI_EVENT_PHYSICAL_DEVICE 3
-#define PQI_EVENT_LOGICAL_DEVICE 4
-#define PQI_EVENT_AIO_STATE_CHANGE 5
-#define PQI_EVENT_AIO_CONFIG_CHANGE 6
-#define PQI_NUM_SUPPORTED_EVENTS 7
+#define PQI_NUM_SUPPORTED_EVENTS 6
struct pqi_event {
bool pending;
@@ -911,7 +966,7 @@ struct pqi_ctrl_info {
dma_addr_t error_buffer_dma_handle;
size_t sg_chain_buffer_length;
unsigned int num_queue_groups;
- unsigned int num_active_queue_groups;
+ u16 max_hw_queue_index;
u16 num_elements_per_iq;
u16 num_elements_per_oq;
u16 max_inbound_iu_length_per_firmware;
@@ -926,6 +981,7 @@ struct pqi_ctrl_info {
struct pqi_admin_queues admin_queues;
struct pqi_queue_group queue_groups[PQI_MAX_QUEUE_GROUPS];
struct pqi_event_queue event_queue;
+ enum pqi_irq_mode irq_mode;
int max_msix_vectors;
int num_msix_vectors_enabled;
int num_msix_vectors_initialized;
@@ -933,11 +989,12 @@ struct pqi_ctrl_info {
struct Scsi_Host *scsi_host;
struct mutex scan_mutex;
+ struct mutex lun_reset_mutex;
+ bool controller_online;
+ bool block_requests;
u8 inbound_spanning_supported : 1;
u8 outbound_spanning_supported : 1;
u8 pqi_mode_enabled : 1;
- u8 controller_online : 1;
- u8 heartbeat_timer_started : 1;
struct list_head scsi_device_list;
spinlock_t scsi_device_list_lock;
@@ -951,20 +1008,28 @@ struct pqi_ctrl_info {
struct pqi_io_request *io_request_pool;
u16 next_io_request_slot;
- struct pqi_event pending_events[PQI_NUM_SUPPORTED_EVENTS];
+ struct pqi_event events[PQI_NUM_SUPPORTED_EVENTS];
struct work_struct event_work;
atomic_t num_interrupts;
int previous_num_interrupts;
- unsigned int num_heartbeats_requested;
+ u32 previous_heartbeat_count;
+ __le32 __iomem *heartbeat_counter;
struct timer_list heartbeat_timer;
+ struct work_struct ctrl_offline_work;
struct semaphore sync_request_sem;
- struct semaphore lun_reset_sem;
+ atomic_t num_busy_threads;
+ atomic_t num_blocked_threads;
+ wait_queue_head_t block_requests_wait;
+
+ struct list_head raid_bypass_retry_list;
+ spinlock_t raid_bypass_retry_list_lock;
+ struct work_struct raid_bypass_retry_work;
};
enum pqi_ctrl_mode {
- UNKNOWN,
+ SIS_MODE = 0,
PQI_MODE
};
@@ -973,9 +1038,6 @@ enum pqi_ctrl_mode {
*/
#define PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH 27
-/* 0 = no limit */
-#define PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH 0
-
/* CISS commands */
#define CISS_READ 0xc0
#define CISS_REPORT_LOG 0xc2 /* Report Logical LUNs */
@@ -996,13 +1058,13 @@ enum pqi_ctrl_mode {
#define BMIC_WRITE_HOST_WELLNESS 0xa5
#define BMIC_CACHE_FLUSH 0xc2
-#define SA_CACHE_FLUSH 0x01
+#define SA_CACHE_FLUSH 0x1
#define MASKED_DEVICE(lunid) ((lunid)[3] & 0xc0)
-#define CISS_GET_BUS(lunid) ((lunid)[7] & 0x3f)
+#define CISS_GET_LEVEL_2_BUS(lunid) ((lunid)[7] & 0x3f)
#define CISS_GET_LEVEL_2_TARGET(lunid) ((lunid)[6])
#define CISS_GET_DRIVE_NUMBER(lunid) \
- (((CISS_GET_BUS((lunid)) - 1) << 8) + \
+ (((CISS_GET_LEVEL_2_BUS((lunid)) - 1) << 8) + \
CISS_GET_LEVEL_2_TARGET((lunid)))
#define NO_TIMEOUT ((unsigned long) -1)
@@ -1069,9 +1131,9 @@ struct bmic_identify_physical_device {
u8 multi_lun_device_lun_count;
u8 minimum_good_fw_revision[8];
u8 unique_inquiry_bytes[20];
- u8 current_temperature_degreesC;
- u8 temperature_threshold_degreesC;
- u8 max_temperature_degreesC;
+ u8 current_temperature_degrees;
+ u8 temperature_threshold_degrees;
+ u8 max_temperature_degrees;
u8 logical_blocks_per_phys_block_exp;
__le16 current_queue_depth_limit;
u8 switch_name[10];
@@ -1084,10 +1146,22 @@ struct bmic_identify_physical_device {
u8 smart_carrier_authentication;
u8 smart_carrier_app_fw_version;
u8 smart_carrier_bootloader_fw_version;
+ u8 sanitize_flags;
+ u8 encryption_key_flags;
u8 encryption_key_name[64];
__le32 misc_drive_flags;
__le16 dek_index;
- u8 padding[112];
+ __le16 hba_drive_encryption_flags;
+ __le16 max_overwrite_time;
+ __le16 max_block_erase_time;
+ __le16 max_crypto_erase_time;
+ u8 connector_info[5];
+ u8 connector_name[8][8];
+ u8 page_83_identifier[16];
+ u8 maximum_link_rate[256];
+ u8 negotiated_physical_link_rate[256];
+ u8 box_connector_name[8];
+ u8 padding_to_multiple_of_512[9];
};
#pragma pack()
@@ -1099,36 +1173,8 @@ int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node,
void pqi_remove_sas_device(struct pqi_scsi_dev *device);
struct pqi_scsi_dev *pqi_find_device_by_sas_rphy(
struct pqi_ctrl_info *ctrl_info, struct sas_rphy *rphy);
+void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd);
extern struct sas_function_template pqi_sas_transport_functions;
-#if !defined(readq)
-#define readq readq
-static inline u64 readq(const volatile void __iomem *addr)
-{
- u32 lower32;
- u32 upper32;
-
- lower32 = readl(addr);
- upper32 = readl(addr + 4);
-
- return ((u64)upper32 << 32) | lower32;
-}
-#endif
-
-#if !defined(writeq)
-#define writeq writeq
-static inline void writeq(u64 value, volatile void __iomem *addr)
-{
- u32 lower32;
- u32 upper32;
-
- lower32 = lower_32_bits(value);
- upper32 = upper_32_bits(value);
-
- writel(lower32, addr);
- writel(upper32, addr + 4);
-}
-#endif
-
#endif /* _SMARTPQI_H */
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 657ad15682a3..cb8f886e705c 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -1,6 +1,6 @@
/*
* driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2016 Microsemi Corporation
+ * Copyright (c) 2016-2017 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
* This program is free software; you can redistribute it and/or modify
@@ -24,6 +24,7 @@
#include <linux/sched.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
+#include <linux/reboot.h>
#include <linux/cciss_ioctl.h>
#include <linux/blk-mq-pci.h>
#include <scsi/scsi_host.h>
@@ -39,15 +40,18 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "0.9.13-370"
-#define DRIVER_MAJOR 0
-#define DRIVER_MINOR 9
-#define DRIVER_RELEASE 13
-#define DRIVER_REVISION 370
+#define DRIVER_VERSION "1.0.4-100"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_RELEASE 4
+#define DRIVER_REVISION 100
-#define DRIVER_NAME "Microsemi PQI Driver (v" DRIVER_VERSION ")"
+#define DRIVER_NAME "Microsemi PQI Driver (v" \
+ DRIVER_VERSION BUILD_TIMESTAMP ")"
#define DRIVER_NAME_SHORT "smartpqi"
+#define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
+
MODULE_AUTHOR("Microsemi");
MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
DRIVER_VERSION);
@@ -55,12 +59,9 @@ MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
-#define PQI_ENABLE_MULTI_QUEUE_SUPPORT 0
-
-static char *hpe_branded_controller = "HPE Smart Array Controller";
-static char *microsemi_branded_controller = "Microsemi Smart Family Controller";
-
static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
+static void pqi_ctrl_offline_worker(struct work_struct *work);
+static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info);
static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
static void pqi_scan_start(struct Scsi_Host *shost);
static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
@@ -72,7 +73,7 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
unsigned int cdb_length, struct pqi_queue_group *queue_group,
- struct pqi_encryption_info *encryption_info);
+ struct pqi_encryption_info *encryption_info, bool raid_bypass);
/* for flags argument to pqi_submit_raid_request_synchronous() */
#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
@@ -81,12 +82,66 @@ static struct scsi_transport_template *pqi_sas_transport_template;
static atomic_t pqi_controller_count = ATOMIC_INIT(0);
+enum pqi_lockup_action {
+ NONE,
+ REBOOT,
+ PANIC
+};
+
+static enum pqi_lockup_action pqi_lockup_action = NONE;
+
+static struct {
+ enum pqi_lockup_action action;
+ char *name;
+} pqi_lockup_actions[] = {
+ {
+ .action = NONE,
+ .name = "none",
+ },
+ {
+ .action = REBOOT,
+ .name = "reboot",
+ },
+ {
+ .action = PANIC,
+ .name = "panic",
+ },
+};
+
+static unsigned int pqi_supported_event_types[] = {
+ PQI_EVENT_TYPE_HOTPLUG,
+ PQI_EVENT_TYPE_HARDWARE,
+ PQI_EVENT_TYPE_PHYSICAL_DEVICE,
+ PQI_EVENT_TYPE_LOGICAL_DEVICE,
+ PQI_EVENT_TYPE_AIO_STATE_CHANGE,
+ PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
+};
+
static int pqi_disable_device_id_wildcards;
module_param_named(disable_device_id_wildcards,
- pqi_disable_device_id_wildcards, int, S_IRUGO | S_IWUSR);
+ pqi_disable_device_id_wildcards, int, 0644);
MODULE_PARM_DESC(disable_device_id_wildcards,
"Disable device ID wildcards.");
+static int pqi_disable_heartbeat;
+module_param_named(disable_heartbeat,
+ pqi_disable_heartbeat, int, 0644);
+MODULE_PARM_DESC(disable_heartbeat,
+ "Disable heartbeat.");
+
+static int pqi_disable_ctrl_shutdown;
+module_param_named(disable_ctrl_shutdown,
+ pqi_disable_ctrl_shutdown, int, 0644);
+MODULE_PARM_DESC(disable_ctrl_shutdown,
+ "Disable controller shutdown when controller locked up.");
+
+static char *pqi_lockup_action_param;
+module_param_named(lockup_action,
+ pqi_lockup_action_param, charp, 0644);
+MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n"
+ "\t\tSupported: none, reboot, panic\n"
+ "\t\tDefault: none");
+
static char *raid_levels[] = {
"RAID-0",
"RAID-4",
@@ -102,7 +157,7 @@ static char *pqi_raid_level_to_string(u8 raid_level)
if (raid_level < ARRAY_SIZE(raid_levels))
return raid_levels[raid_level];
- return "";
+ return "RAID UNKNOWN";
}
#define SA_RAID_0 0
@@ -117,6 +172,7 @@ static char *pqi_raid_level_to_string(u8 raid_level)
static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
{
+ pqi_prep_for_scsi_done(scmd);
scmd->scsi_done(scmd);
}
@@ -137,6 +193,11 @@ static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
return !device->is_physical_device;
}
+static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
+{
+ return scsi3addr[2] != 0;
+}
+
static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
{
return !ctrl_info->controller_online;
@@ -166,12 +227,124 @@ static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
sis_write_driver_scratch(ctrl_info, mode);
}
-#define PQI_RESCAN_WORK_INTERVAL (10 * HZ)
+static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->block_requests = true;
+ scsi_block_requests(ctrl_info->scsi_host);
+}
+
+static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
+{
+ ctrl_info->block_requests = false;
+ wake_up_all(&ctrl_info->block_requests_wait);
+ pqi_retry_raid_bypass_requests(ctrl_info);
+ scsi_unblock_requests(ctrl_info->scsi_host);
+}
+
+static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
+{
+ return ctrl_info->block_requests;
+}
+
+static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
+ unsigned long timeout_msecs)
+{
+ unsigned long remaining_msecs;
+
+ if (!pqi_ctrl_blocked(ctrl_info))
+ return timeout_msecs;
+
+ atomic_inc(&ctrl_info->num_blocked_threads);
+
+ if (timeout_msecs == NO_TIMEOUT) {
+ wait_event(ctrl_info->block_requests_wait,
+ !pqi_ctrl_blocked(ctrl_info));
+ remaining_msecs = timeout_msecs;
+ } else {
+ unsigned long remaining_jiffies;
+
+ remaining_jiffies =
+ wait_event_timeout(ctrl_info->block_requests_wait,
+ !pqi_ctrl_blocked(ctrl_info),
+ msecs_to_jiffies(timeout_msecs));
+ remaining_msecs = jiffies_to_msecs(remaining_jiffies);
+ }
+
+ atomic_dec(&ctrl_info->num_blocked_threads);
+
+ return remaining_msecs;
+}
+
+static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
+{
+ atomic_inc(&ctrl_info->num_busy_threads);
+}
+
+static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
+{
+ atomic_dec(&ctrl_info->num_busy_threads);
+}
+
+static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
+{
+ while (atomic_read(&ctrl_info->num_busy_threads) >
+ atomic_read(&ctrl_info->num_blocked_threads))
+ usleep_range(1000, 2000);
+}
+
+static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
+{
+ return device->device_offline;
+}
+
+static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
+{
+ device->in_reset = true;
+}
+
+static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
+{
+ device->in_reset = false;
+}
+
+static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
+{
+ return device->in_reset;
+}
+
+static inline void pqi_schedule_rescan_worker_with_delay(
+ struct pqi_ctrl_info *ctrl_info, unsigned long delay)
+{
+ if (pqi_ctrl_offline(ctrl_info))
+ return;
+
+ schedule_delayed_work(&ctrl_info->rescan_work, delay);
+}
static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
{
- schedule_delayed_work(&ctrl_info->rescan_work,
- PQI_RESCAN_WORK_INTERVAL);
+ pqi_schedule_rescan_worker_with_delay(ctrl_info, 0);
+}
+
+#define PQI_RESCAN_WORK_DELAY (10 * HZ)
+
+static inline void pqi_schedule_rescan_worker_delayed(
+ struct pqi_ctrl_info *ctrl_info)
+{
+ pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY);
+}
+
+static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
+{
+ cancel_delayed_work_sync(&ctrl_info->rescan_work);
+}
+
+static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
+{
+ if (!ctrl_info->heartbeat_counter)
+ return 0;
+
+ return readl(ctrl_info->heartbeat_counter);
}
static int pqi_map_single(struct pci_dev *pci_dev,
@@ -280,7 +453,6 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
default:
dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
cmd);
- WARN_ON(cmd);
break;
}
@@ -305,6 +477,14 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
buffer, buffer_length, pci_dir);
}
+static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
+{
+ io_request->scmd = NULL;
+ io_request->status = 0;
+ io_request->error_info = NULL;
+ io_request->raid_bypass = false;
+}
+
static struct pqi_io_request *pqi_alloc_io_request(
struct pqi_ctrl_info *ctrl_info)
{
@@ -322,9 +502,7 @@ static struct pqi_io_request *pqi_alloc_io_request(
/* benignly racy */
ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
- io_request->scmd = NULL;
- io_request->status = 0;
- io_request->error_info = NULL;
+ pqi_reinit_io_request(io_request);
return io_request;
}
@@ -500,7 +678,7 @@ static int pqi_write_driver_version_to_host_wellness(
buffer->driver_version_tag[1] = 'V';
put_unaligned_le16(sizeof(buffer->driver_version),
&buffer->driver_version_length);
- strncpy(buffer->driver_version, DRIVER_VERSION,
+ strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
sizeof(buffer->driver_version) - 1);
buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
buffer->end_tag[0] = 'Z';
@@ -586,6 +764,9 @@ static void pqi_update_time_worker(struct work_struct *work)
ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
update_time_work);
+ if (pqi_ctrl_offline(ctrl_info))
+ return;
+
rc = pqi_write_current_time_to_host_wellness(ctrl_info);
if (rc)
dev_warn(&ctrl_info->pci_dev->dev,
@@ -601,6 +782,12 @@ static inline void pqi_schedule_update_time_worker(
schedule_delayed_work(&ctrl_info->update_time_work, 0);
}
+static inline void pqi_cancel_update_time_worker(
+ struct pqi_ctrl_info *ctrl_info)
+{
+ cancel_delayed_work_sync(&ctrl_info->update_time_work);
+}
+
static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
void *buffer, size_t buffer_length)
{
@@ -771,6 +958,9 @@ static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
{
u8 *scsi3addr;
u32 lunid;
+ int bus;
+ int target;
+ int lun;
scsi3addr = device->scsi3addr;
lunid = get_unaligned_le32(scsi3addr);
@@ -783,8 +973,16 @@ static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
}
if (pqi_is_logical_device(device)) {
- pqi_set_bus_target_lun(device, PQI_RAID_VOLUME_BUS, 0,
- lunid & 0x3fff);
+ if (device->is_external_raid_device) {
+ bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
+ target = (lunid >> 16) & 0x3fff;
+ lun = lunid & 0xff;
+ } else {
+ bus = PQI_RAID_VOLUME_BUS;
+ target = 0;
+ lun = lunid & 0x3fff;
+ }
+ pqi_set_bus_target_lun(device, bus, target, lun);
device->target_lun_valid = true;
return;
}
@@ -878,7 +1076,10 @@ static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
return 0;
bad_raid_map:
- dev_warn(&ctrl_info->pci_dev->dev, "%s\n", err_msg);
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "scsi %d:%d:%d:%d %s\n",
+ ctrl_info->scsi_host->host_no,
+ device->bus, device->target, device->lun, err_msg);
return -EINVAL;
}
@@ -924,35 +1125,33 @@ error:
return rc;
}
-static void pqi_get_offload_status(struct pqi_ctrl_info *ctrl_info,
+static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device)
{
int rc;
u8 *buffer;
- u8 offload_status;
+ u8 bypass_status;
buffer = kmalloc(64, GFP_KERNEL);
if (!buffer)
return;
rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
- VPD_PAGE | CISS_VPD_LV_OFFLOAD_STATUS, buffer, 64);
+ VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64);
if (rc)
goto out;
-#define OFFLOAD_STATUS_BYTE 4
-#define OFFLOAD_CONFIGURED_BIT 0x1
-#define OFFLOAD_ENABLED_BIT 0x2
+#define RAID_BYPASS_STATUS 4
+#define RAID_BYPASS_CONFIGURED 0x1
+#define RAID_BYPASS_ENABLED 0x2
- offload_status = buffer[OFFLOAD_STATUS_BYTE];
- device->offload_configured =
- !!(offload_status & OFFLOAD_CONFIGURED_BIT);
- if (device->offload_configured) {
- device->offload_enabled_pending =
- !!(offload_status & OFFLOAD_ENABLED_BIT);
- if (pqi_get_raid_map(ctrl_info, device))
- device->offload_enabled_pending = false;
- }
+ bypass_status = buffer[RAID_BYPASS_STATUS];
+ device->raid_bypass_configured =
+ (bypass_status & RAID_BYPASS_CONFIGURED) != 0;
+ if (device->raid_bypass_configured &&
+ (bypass_status & RAID_BYPASS_ENABLED) &&
+ pqi_get_raid_map(ctrl_info, device) == 0)
+ device->raid_bypass_enabled = true;
out:
kfree(buffer);
@@ -1016,15 +1215,19 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
scsi_sanitize_inquiry_string(&buffer[16], 16);
device->devtype = buffer[0] & 0x1f;
- memcpy(device->vendor, &buffer[8],
- sizeof(device->vendor));
- memcpy(device->model, &buffer[16],
- sizeof(device->model));
+ memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
+ memcpy(device->model, &buffer[16], sizeof(device->model));
if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
- pqi_get_raid_level(ctrl_info, device);
- pqi_get_offload_status(ctrl_info, device);
- pqi_get_volume_status(ctrl_info, device);
+ if (device->is_external_raid_device) {
+ device->raid_level = SA_RAID_UNKNOWN;
+ device->volume_status = CISS_LV_OK;
+ device->volume_offline = false;
+ } else {
+ pqi_get_raid_level(ctrl_info, device);
+ pqi_get_raid_bypass_status(ctrl_info, device);
+ pqi_get_volume_status(ctrl_info, device);
+ }
}
out:
@@ -1138,8 +1341,7 @@ static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
status = "Volume undergoing encryption re-keying process";
break;
case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
- status =
- "Encrypted volume inaccessible - disabled on ctrl";
+ status = "Volume encrypted but encryption is disabled";
break;
case CISS_LV_PENDING_ENCRYPTION:
status = "Volume pending migration to encrypted state";
@@ -1166,85 +1368,6 @@ static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
device->bus, device->target, device->lun, status);
}
-static struct pqi_scsi_dev *pqi_find_disk_by_aio_handle(
- struct pqi_ctrl_info *ctrl_info, u32 aio_handle)
-{
- struct pqi_scsi_dev *device;
-
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
- scsi_device_list_entry) {
- if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
- continue;
- if (pqi_is_logical_device(device))
- continue;
- if (device->aio_handle == aio_handle)
- return device;
- }
-
- return NULL;
-}
-
-static void pqi_update_logical_drive_queue_depth(
- struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *logical_drive)
-{
- unsigned int i;
- struct raid_map *raid_map;
- struct raid_map_disk_data *disk_data;
- struct pqi_scsi_dev *phys_disk;
- unsigned int num_phys_disks;
- unsigned int num_raid_map_entries;
- unsigned int queue_depth;
-
- logical_drive->queue_depth = PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH;
-
- raid_map = logical_drive->raid_map;
- if (!raid_map)
- return;
-
- disk_data = raid_map->disk_data;
- num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
- (get_unaligned_le16(&raid_map->data_disks_per_row) +
- get_unaligned_le16(&raid_map->metadata_disks_per_row));
- num_raid_map_entries = num_phys_disks *
- get_unaligned_le16(&raid_map->row_cnt);
-
- queue_depth = 0;
- for (i = 0; i < num_raid_map_entries; i++) {
- phys_disk = pqi_find_disk_by_aio_handle(ctrl_info,
- disk_data[i].aio_handle);
-
- if (!phys_disk) {
- dev_warn(&ctrl_info->pci_dev->dev,
- "failed to find physical disk for logical drive %016llx\n",
- get_unaligned_be64(logical_drive->scsi3addr));
- logical_drive->offload_enabled = false;
- logical_drive->offload_enabled_pending = false;
- kfree(raid_map);
- logical_drive->raid_map = NULL;
- return;
- }
-
- queue_depth += phys_disk->queue_depth;
- }
-
- logical_drive->queue_depth = queue_depth;
-}
-
-static void pqi_update_all_logical_drive_queue_depths(
- struct pqi_ctrl_info *ctrl_info)
-{
- struct pqi_scsi_dev *device;
-
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
- scsi_device_list_entry) {
- if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
- continue;
- if (!pqi_is_logical_device(device))
- continue;
- pqi_update_logical_drive_queue_depth(ctrl_info, device);
- }
-}
-
static void pqi_rescan_worker(struct work_struct *work)
{
struct pqi_ctrl_info *ctrl_info;
@@ -1336,24 +1459,65 @@ static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
return DEVICE_NOT_FOUND;
}
+#define PQI_DEV_INFO_BUFFER_LENGTH 128
+
static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
char *action, struct pqi_scsi_dev *device)
{
- dev_info(&ctrl_info->pci_dev->dev,
- "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
- action,
- ctrl_info->scsi_host->host_no,
- device->bus,
- device->target,
- device->lun,
+ ssize_t count;
+ char buffer[PQI_DEV_INFO_BUFFER_LENGTH];
+
+ count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH,
+ "%d:%d:", ctrl_info->scsi_host->host_no, device->bus);
+
+ if (device->target_lun_valid)
+ count += snprintf(buffer + count,
+ PQI_DEV_INFO_BUFFER_LENGTH - count,
+ "%d:%d",
+ device->target,
+ device->lun);
+ else
+ count += snprintf(buffer + count,
+ PQI_DEV_INFO_BUFFER_LENGTH - count,
+ "-:-");
+
+ if (pqi_is_logical_device(device))
+ count += snprintf(buffer + count,
+ PQI_DEV_INFO_BUFFER_LENGTH - count,
+ " %08x%08x",
+ *((u32 *)&device->scsi3addr),
+ *((u32 *)&device->scsi3addr[4]));
+ else
+ count += snprintf(buffer + count,
+ PQI_DEV_INFO_BUFFER_LENGTH - count,
+ " %016llx", device->sas_address);
+
+ count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count,
+ " %s %.8s %.16s ",
scsi_device_type(device->devtype),
device->vendor,
- device->model,
- pqi_raid_level_to_string(device->raid_level),
- device->offload_configured ? '+' : '-',
- device->offload_enabled_pending ? '+' : '-',
- device->expose_device ? '+' : '-',
- device->queue_depth);
+ device->model);
+
+ if (pqi_is_logical_device(device)) {
+ if (device->devtype == TYPE_DISK)
+ count += snprintf(buffer + count,
+ PQI_DEV_INFO_BUFFER_LENGTH - count,
+ "SSDSmartPathCap%c En%c %-12s",
+ device->raid_bypass_configured ? '+' : '-',
+ device->raid_bypass_enabled ? '+' : '-',
+ pqi_raid_level_to_string(device->raid_level));
+ } else {
+ count += snprintf(buffer + count,
+ PQI_DEV_INFO_BUFFER_LENGTH - count,
+ "AIO%c", device->aio_enabled ? '+' : '-');
+ if (device->devtype == TYPE_DISK ||
+ device->devtype == TYPE_ZBC)
+ count += snprintf(buffer + count,
+ PQI_DEV_INFO_BUFFER_LENGTH - count,
+ " qd=%-6d", device->queue_depth);
+ }
+
+ dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer);
}
/* Assumes the SCSI device list lock is held. */
@@ -1373,8 +1537,8 @@ static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
/* By definition, the scsi3addr and wwid fields are already the same. */
existing_device->is_physical_device = new_device->is_physical_device;
- existing_device->expose_device = new_device->expose_device;
- existing_device->no_uld_attach = new_device->no_uld_attach;
+ existing_device->is_external_raid_device =
+ new_device->is_external_raid_device;
existing_device->aio_enabled = new_device->aio_enabled;
memcpy(existing_device->vendor, new_device->vendor,
sizeof(existing_device->vendor));
@@ -1392,13 +1556,13 @@ static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
sizeof(existing_device->box));
memcpy(existing_device->phys_connector, new_device->phys_connector,
sizeof(existing_device->phys_connector));
- existing_device->offload_configured = new_device->offload_configured;
- existing_device->offload_enabled = false;
- existing_device->offload_enabled_pending =
- new_device->offload_enabled_pending;
existing_device->offload_to_mirror = 0;
kfree(existing_device->raid_map);
existing_device->raid_map = new_device->raid_map;
+ existing_device->raid_bypass_configured =
+ new_device->raid_bypass_configured;
+ existing_device->raid_bypass_enabled =
+ new_device->raid_bypass_enabled;
/* To prevent this from being freed later. */
new_device->raid_map = NULL;
@@ -1440,11 +1604,8 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device;
struct pqi_scsi_dev *next;
struct pqi_scsi_dev *matching_device;
- struct list_head add_list;
- struct list_head delete_list;
-
- INIT_LIST_HEAD(&add_list);
- INIT_LIST_HEAD(&delete_list);
+ LIST_HEAD(add_list);
+ LIST_HEAD(delete_list);
/*
* The idea here is to do as little work as possible while holding the
@@ -1490,9 +1651,6 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
*/
device->new_device = true;
break;
- default:
- WARN_ON(find_result);
- break;
}
}
@@ -1519,26 +1677,19 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
device->keep_device = true;
}
- pqi_update_all_logical_drive_queue_depths(ctrl_info);
-
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
- scsi_device_list_entry)
- device->offload_enabled =
- device->offload_enabled_pending;
-
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
/* Remove all devices that have gone away. */
list_for_each_entry_safe(device, next, &delete_list,
delete_list_entry) {
- if (device->sdev)
- pqi_remove_device(ctrl_info, device);
if (device->volume_offline) {
pqi_dev_info(ctrl_info, "offline", device);
pqi_show_volume_status(ctrl_info, device);
} else {
pqi_dev_info(ctrl_info, "removed", device);
}
+ if (device->sdev)
+ pqi_remove_device(ctrl_info, device);
list_del(&device->delete_list_entry);
pqi_free_device(device);
}
@@ -1559,7 +1710,8 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
/* Expose any new devices. */
list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
- if (device->expose_device && !device->sdev) {
+ if (!device->sdev) {
+ pqi_dev_info(ctrl_info, "added", device);
rc = pqi_add_device(ctrl_info, device);
if (rc) {
dev_warn(&ctrl_info->pci_dev->dev,
@@ -1568,10 +1720,8 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
device->bus, device->target,
device->lun);
pqi_fixup_botched_add(ctrl_info, device);
- continue;
}
}
- pqi_dev_info(ctrl_info, "added", device);
}
}
@@ -1591,8 +1741,8 @@ static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
/*
* Only support the HBA controller itself as a RAID
* controller. If it's a RAID controller other than
- * the HBA itself (an external RAID controller, MSA500
- * or similar), we don't support it.
+ * the HBA itself (an external RAID controller, for
+ * example), we don't support it.
*/
if (pqi_is_hba_lunid(device->scsi3addr))
is_supported = true;
@@ -1602,43 +1752,20 @@ static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
return is_supported;
}
-static inline bool pqi_skip_device(u8 *scsi3addr,
- struct report_phys_lun_extended_entry *phys_lun_ext_entry)
+static inline bool pqi_skip_device(u8 *scsi3addr)
{
- u8 device_flags;
-
- if (!MASKED_DEVICE(scsi3addr))
- return false;
-
- /* The device is masked. */
-
- device_flags = phys_lun_ext_entry->device_flags;
-
- if (device_flags & REPORT_PHYS_LUN_DEV_FLAG_NON_DISK) {
- /*
- * It's a non-disk device. We ignore all devices of this type
- * when they're masked.
- */
+ /* Ignore all masked devices. */
+ if (MASKED_DEVICE(scsi3addr))
return true;
- }
return false;
}
-static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
-{
- /* Expose all devices except for physical devices that are masked. */
- if (device->is_physical_device && MASKED_DEVICE(device->scsi3addr))
- return false;
-
- return true;
-}
-
static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
{
int i;
int rc;
- struct list_head new_device_list_head;
+ LIST_HEAD(new_device_list_head);
struct report_phys_lun_extended *physdev_list = NULL;
struct report_log_lun_extended *logdev_list = NULL;
struct report_phys_lun_extended_entry *phys_lun_ext_entry;
@@ -1654,9 +1781,7 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
bool is_physical_device;
u8 *scsi3addr;
static char *out_of_memory_msg =
- "out of memory, device discovery stopped";
-
- INIT_LIST_HEAD(&new_device_list_head);
+ "failed to allocate memory, device discovery stopped";
rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
if (rc)
@@ -1732,8 +1857,7 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
scsi3addr = log_lun_ext_entry->lunid;
}
- if (is_physical_device &&
- pqi_skip_device(scsi3addr, phys_lun_ext_entry))
+ if (is_physical_device && pqi_skip_device(scsi3addr))
continue;
if (device)
@@ -1744,7 +1868,9 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
device->is_physical_device = is_physical_device;
- device->raid_level = SA_RAID_UNKNOWN;
+ if (!is_physical_device)
+ device->is_external_raid_device =
+ pqi_is_external_raid_addr(scsi3addr);
/* Gather information about the device. */
rc = pqi_get_device_info(ctrl_info, device);
@@ -1754,9 +1880,16 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
goto out;
}
if (rc) {
- dev_warn(&ctrl_info->pci_dev->dev,
- "obtaining device info failed, skipping device %016llx\n",
- get_unaligned_be64(device->scsi3addr));
+ if (device->is_physical_device)
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "obtaining device info failed, skipping physical device %016llx\n",
+ get_unaligned_be64(
+ &phys_lun_ext_entry->wwid));
+ else
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "obtaining device info failed, skipping logical device %08x%08x\n",
+ *((u32 *)&device->scsi3addr),
+ *((u32 *)&device->scsi3addr[4]));
rc = 0;
continue;
}
@@ -1766,8 +1899,6 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
pqi_assign_bus_target_lun(device);
- device->expose_device = pqi_expose_device(device);
-
if (device->is_physical_device) {
device->wwid = phys_lun_ext_entry->wwid;
if ((phys_lun_ext_entry->device_flags &
@@ -1823,19 +1954,25 @@ static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
{
unsigned long flags;
struct pqi_scsi_dev *device;
- struct pqi_scsi_dev *next;
- spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ while (1) {
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
+ struct pqi_scsi_dev, scsi_device_list_entry);
+ if (device)
+ list_del(&device->scsi_device_list_entry);
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
+ flags);
+
+ if (!device)
+ break;
- list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
- scsi_device_list_entry) {
if (device->sdev)
pqi_remove_device(ctrl_info, device);
- list_del(&device->scsi_device_list_entry);
pqi_free_device(device);
}
-
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
}
static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
@@ -1849,7 +1986,7 @@ static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
rc = pqi_update_scsi_devices(ctrl_info);
if (rc)
- pqi_schedule_rescan_worker(ctrl_info);
+ pqi_schedule_rescan_worker_delayed(ctrl_info);
mutex_unlock(&ctrl_info->scan_mutex);
@@ -1873,6 +2010,18 @@ static int pqi_scan_finished(struct Scsi_Host *shost,
return !mutex_is_locked(&ctrl_info->scan_mutex);
}
+static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
+{
+ mutex_lock(&ctrl_info->scan_mutex);
+ mutex_unlock(&ctrl_info->scan_mutex);
+}
+
+static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
+{
+ mutex_lock(&ctrl_info->lun_reset_mutex);
+ mutex_unlock(&ctrl_info->lun_reset_mutex);
+}
+
static inline void pqi_set_encryption_info(
struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
u64 first_block)
@@ -1895,7 +2044,7 @@ static inline void pqi_set_encryption_info(
}
/*
- * Attempt to perform offload RAID mapping for a logical volume I/O.
+ * Attempt to perform RAID bypass mapping for a logical volume I/O.
*/
#define PQI_RAID_BYPASS_INELIGIBLE 1
@@ -2227,7 +2376,7 @@ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
}
return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
- cdb, cdb_length, queue_group, encryption_info_ptr);
+ cdb, cdb_length, queue_group, encryption_info_ptr, true);
}
#define PQI_STATUS_IDLE 0x0
@@ -2299,23 +2448,26 @@ static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
struct pqi_scsi_dev *device;
device = io_request->scmd->device->hostdata;
- device->offload_enabled = false;
+ device->raid_bypass_enabled = false;
+ device->aio_enabled = false;
}
-static inline void pqi_take_device_offline(struct scsi_device *sdev)
+static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
{
struct pqi_ctrl_info *ctrl_info;
struct pqi_scsi_dev *device;
- if (scsi_device_online(sdev)) {
- scsi_device_set_state(sdev, SDEV_OFFLINE);
- ctrl_info = shost_to_hba(sdev->host);
- schedule_delayed_work(&ctrl_info->rescan_work, 0);
- device = sdev->hostdata;
- dev_err(&ctrl_info->pci_dev->dev, "offlined scsi %d:%d:%d:%d\n",
- ctrl_info->scsi_host->host_no, device->bus,
- device->target, device->lun);
- }
+ device = sdev->hostdata;
+ if (device->device_offline)
+ return;
+
+ device->device_offline = true;
+ scsi_device_set_state(sdev, SDEV_OFFLINE);
+ ctrl_info = shost_to_hba(sdev->host);
+ pqi_schedule_rescan_worker(ctrl_info);
+ dev_err(&ctrl_info->pci_dev->dev, "offlined %s scsi %d:%d:%d:%d\n",
+ path, ctrl_info->scsi_host->host_no, device->bus,
+ device->target, device->lun);
}
static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
@@ -2337,13 +2489,43 @@ static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
scsi_status = error_info->status;
host_byte = DID_OK;
- if (error_info->data_out_result == PQI_DATA_IN_OUT_UNDERFLOW) {
+ switch (error_info->data_out_result) {
+ case PQI_DATA_IN_OUT_GOOD:
+ break;
+ case PQI_DATA_IN_OUT_UNDERFLOW:
xfer_count =
get_unaligned_le32(&error_info->data_out_transferred);
residual_count = scsi_bufflen(scmd) - xfer_count;
scsi_set_resid(scmd, residual_count);
if (xfer_count < scmd->underflow)
host_byte = DID_SOFT_ERROR;
+ break;
+ case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
+ case PQI_DATA_IN_OUT_ABORTED:
+ host_byte = DID_ABORT;
+ break;
+ case PQI_DATA_IN_OUT_TIMEOUT:
+ host_byte = DID_TIME_OUT;
+ break;
+ case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
+ case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
+ case PQI_DATA_IN_OUT_BUFFER_ERROR:
+ case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
+ case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
+ case PQI_DATA_IN_OUT_ERROR:
+ case PQI_DATA_IN_OUT_HARDWARE_ERROR:
+ case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
+ case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
+ case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
+ case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
+ case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
+ case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
+ case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
+ case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
+ case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
+ default:
+ host_byte = DID_ERROR;
+ break;
}
sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
@@ -2360,7 +2542,7 @@ static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
sshdr.sense_key == HARDWARE_ERROR &&
sshdr.asc == 0x3e &&
sshdr.ascq == 0x1) {
- pqi_take_device_offline(scmd->device);
+ pqi_take_device_offline(scmd->device, "RAID");
host_byte = DID_NO_CONNECT;
}
@@ -2419,9 +2601,11 @@ static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
break;
case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
case PQI_AIO_STATUS_INVALID_DEVICE:
- device_offline = true;
- pqi_take_device_offline(scmd->device);
- host_byte = DID_NO_CONNECT;
+ if (!io_request->raid_bypass) {
+ device_offline = true;
+ pqi_take_device_offline(scmd->device, "AIO");
+ host_byte = DID_NO_CONNECT;
+ }
scsi_status = SAM_STAT_CHECK_CONDITION;
break;
case PQI_AIO_STATUS_IO_ERROR:
@@ -2547,7 +2731,6 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
dev_err(&ctrl_info->pci_dev->dev,
"unexpected IU type: 0x%x\n",
response->header.iu_type);
- WARN_ON(response->header.iu_type);
break;
}
@@ -2583,23 +2766,18 @@ static inline unsigned int pqi_num_elements_free(unsigned int pi,
return elements_in_queue - num_elements_used - 1;
}
-#define PQI_EVENT_ACK_TIMEOUT 30
-
-static void pqi_start_event_ack(struct pqi_ctrl_info *ctrl_info,
+static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
struct pqi_event_acknowledge_request *iu, size_t iu_length)
{
pqi_index_t iq_pi;
pqi_index_t iq_ci;
unsigned long flags;
void *next_element;
- unsigned long timeout;
struct pqi_queue_group *queue_group;
queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
- timeout = (PQI_EVENT_ACK_TIMEOUT * HZ) + jiffies;
-
while (1) {
spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
@@ -2613,11 +2791,8 @@ static void pqi_start_event_ack(struct pqi_ctrl_info *ctrl_info,
spin_unlock_irqrestore(
&queue_group->submit_lock[RAID_PATH], flags);
- if (time_after(jiffies, timeout)) {
- dev_err(&ctrl_info->pci_dev->dev,
- "sending event acknowledge timed out\n");
+ if (pqi_ctrl_offline(ctrl_info))
return;
- }
}
next_element = queue_group->iq_element_array[RAID_PATH] +
@@ -2626,7 +2801,6 @@ static void pqi_start_event_ack(struct pqi_ctrl_info *ctrl_info,
memcpy(next_element, iu, iu_length);
iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
-
queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
/*
@@ -2652,152 +2826,105 @@ static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
request.event_id = event->event_id;
request.additional_event_id = event->additional_event_id;
- pqi_start_event_ack(ctrl_info, &request, sizeof(request));
+ pqi_send_event_ack(ctrl_info, &request, sizeof(request));
}
static void pqi_event_worker(struct work_struct *work)
{
unsigned int i;
struct pqi_ctrl_info *ctrl_info;
- struct pqi_event *pending_event;
- bool got_non_heartbeat_event = false;
+ struct pqi_event *event;
ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
- pending_event = ctrl_info->pending_events;
- for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
- if (pending_event->pending) {
- pending_event->pending = false;
- pqi_acknowledge_event(ctrl_info, pending_event);
- if (i != PQI_EVENT_HEARTBEAT)
- got_non_heartbeat_event = true;
- }
- pending_event++;
- }
-
- if (got_non_heartbeat_event)
- pqi_schedule_rescan_worker(ctrl_info);
-}
-
-static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
-{
- unsigned int i;
- unsigned int path;
- struct pqi_queue_group *queue_group;
- unsigned long flags;
- struct pqi_io_request *io_request;
- struct pqi_io_request *next;
- struct scsi_cmnd *scmd;
-
- ctrl_info->controller_online = false;
- dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
-
- for (i = 0; i < ctrl_info->num_queue_groups; i++) {
- queue_group = &ctrl_info->queue_groups[i];
-
- for (path = 0; path < 2; path++) {
- spin_lock_irqsave(
- &queue_group->submit_lock[path], flags);
-
- list_for_each_entry_safe(io_request, next,
- &queue_group->request_list[path],
- request_list_entry) {
-
- scmd = io_request->scmd;
- if (scmd) {
- set_host_byte(scmd, DID_NO_CONNECT);
- pqi_scsi_done(scmd);
- }
+ pqi_ctrl_busy(ctrl_info);
+ pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
+ if (pqi_ctrl_offline(ctrl_info))
+ goto out;
- list_del(&io_request->request_list_entry);
- }
+ pqi_schedule_rescan_worker_delayed(ctrl_info);
- spin_unlock_irqrestore(
- &queue_group->submit_lock[path], flags);
+ event = ctrl_info->events;
+ for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
+ if (event->pending) {
+ event->pending = false;
+ pqi_acknowledge_event(ctrl_info, event);
}
+ event++;
}
+
+out:
+ pqi_ctrl_unbusy(ctrl_info);
}
-#define PQI_HEARTBEAT_TIMER_INTERVAL (5 * HZ)
-#define PQI_MAX_HEARTBEAT_REQUESTS 5
+#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
static void pqi_heartbeat_timer_handler(unsigned long data)
{
int num_interrupts;
+ u32 heartbeat_count;
struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info))
+ return;
+
num_interrupts = atomic_read(&ctrl_info->num_interrupts);
+ heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
if (num_interrupts == ctrl_info->previous_num_interrupts) {
- ctrl_info->num_heartbeats_requested++;
- if (ctrl_info->num_heartbeats_requested >
- PQI_MAX_HEARTBEAT_REQUESTS) {
+ if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "no heartbeat detected - last heartbeat count: %u\n",
+ heartbeat_count);
pqi_take_ctrl_offline(ctrl_info);
return;
}
- ctrl_info->pending_events[PQI_EVENT_HEARTBEAT].pending = true;
- schedule_work(&ctrl_info->event_work);
} else {
- ctrl_info->num_heartbeats_requested = 0;
+ ctrl_info->previous_num_interrupts = num_interrupts;
}
- ctrl_info->previous_num_interrupts = num_interrupts;
+ ctrl_info->previous_heartbeat_count = heartbeat_count;
mod_timer(&ctrl_info->heartbeat_timer,
jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
}
static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
{
+ if (!ctrl_info->heartbeat_counter)
+ return;
+
ctrl_info->previous_num_interrupts =
atomic_read(&ctrl_info->num_interrupts);
+ ctrl_info->previous_heartbeat_count =
+ pqi_read_heartbeat_counter(ctrl_info);
- init_timer(&ctrl_info->heartbeat_timer);
ctrl_info->heartbeat_timer.expires =
jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
add_timer(&ctrl_info->heartbeat_timer);
- ctrl_info->heartbeat_timer_started = true;
}
static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
{
- if (ctrl_info->heartbeat_timer_started)
- del_timer_sync(&ctrl_info->heartbeat_timer);
+ del_timer_sync(&ctrl_info->heartbeat_timer);
}
-static int pqi_event_type_to_event_index(unsigned int event_type)
+static inline int pqi_event_type_to_event_index(unsigned int event_type)
{
int index;
- switch (event_type) {
- case PQI_EVENT_TYPE_HEARTBEAT:
- index = PQI_EVENT_HEARTBEAT;
- break;
- case PQI_EVENT_TYPE_HOTPLUG:
- index = PQI_EVENT_HOTPLUG;
- break;
- case PQI_EVENT_TYPE_HARDWARE:
- index = PQI_EVENT_HARDWARE;
- break;
- case PQI_EVENT_TYPE_PHYSICAL_DEVICE:
- index = PQI_EVENT_PHYSICAL_DEVICE;
- break;
- case PQI_EVENT_TYPE_LOGICAL_DEVICE:
- index = PQI_EVENT_LOGICAL_DEVICE;
- break;
- case PQI_EVENT_TYPE_AIO_STATE_CHANGE:
- index = PQI_EVENT_AIO_STATE_CHANGE;
- break;
- case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE:
- index = PQI_EVENT_AIO_CONFIG_CHANGE;
- break;
- default:
- index = -1;
- break;
- }
+ for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
+ if (event_type == pqi_supported_event_types[index])
+ return index;
- return index;
+ return -1;
+}
+
+static inline bool pqi_is_supported_event(unsigned int event_type)
+{
+ return pqi_event_type_to_event_index(event_type) != -1;
}
static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
@@ -2807,13 +2934,11 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
pqi_index_t oq_ci;
struct pqi_event_queue *event_queue;
struct pqi_event_response *response;
- struct pqi_event *pending_event;
- bool need_delayed_work;
+ struct pqi_event *event;
int event_index;
event_queue = &ctrl_info->event_queue;
num_events = 0;
- need_delayed_work = false;
oq_ci = event_queue->oq_ci_copy;
while (1) {
@@ -2830,17 +2955,12 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
if (event_index >= 0) {
if (response->request_acknowlege) {
- pending_event =
- &ctrl_info->pending_events[event_index];
- pending_event->event_type =
- response->event_type;
- pending_event->event_id = response->event_id;
- pending_event->additional_event_id =
+ event = &ctrl_info->events[event_index];
+ event->pending = true;
+ event->event_type = response->event_type;
+ event->event_id = response->event_id;
+ event->additional_event_id =
response->additional_event_id;
- if (event_index != PQI_EVENT_HEARTBEAT) {
- pending_event->pending = true;
- need_delayed_work = true;
- }
}
}
@@ -2850,14 +2970,112 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
if (num_events) {
event_queue->oq_ci_copy = oq_ci;
writel(oq_ci, event_queue->oq_ci);
-
- if (need_delayed_work)
- schedule_work(&ctrl_info->event_work);
+ schedule_work(&ctrl_info->event_work);
}
return num_events;
}
+#define PQI_LEGACY_INTX_MASK 0x1
+
+static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
+ bool enable_intx)
+{
+ u32 intx_mask;
+ struct pqi_device_registers __iomem *pqi_registers;
+ volatile void __iomem *register_addr;
+
+ pqi_registers = ctrl_info->pqi_registers;
+
+ if (enable_intx)
+ register_addr = &pqi_registers->legacy_intx_mask_clear;
+ else
+ register_addr = &pqi_registers->legacy_intx_mask_set;
+
+ intx_mask = readl(register_addr);
+ intx_mask |= PQI_LEGACY_INTX_MASK;
+ writel(intx_mask, register_addr);
+}
+
+static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
+ enum pqi_irq_mode new_mode)
+{
+ switch (ctrl_info->irq_mode) {
+ case IRQ_MODE_MSIX:
+ switch (new_mode) {
+ case IRQ_MODE_MSIX:
+ break;
+ case IRQ_MODE_INTX:
+ pqi_configure_legacy_intx(ctrl_info, true);
+ sis_disable_msix(ctrl_info);
+ sis_enable_intx(ctrl_info);
+ break;
+ case IRQ_MODE_NONE:
+ sis_disable_msix(ctrl_info);
+ break;
+ }
+ break;
+ case IRQ_MODE_INTX:
+ switch (new_mode) {
+ case IRQ_MODE_MSIX:
+ pqi_configure_legacy_intx(ctrl_info, false);
+ sis_disable_intx(ctrl_info);
+ sis_enable_msix(ctrl_info);
+ break;
+ case IRQ_MODE_INTX:
+ break;
+ case IRQ_MODE_NONE:
+ pqi_configure_legacy_intx(ctrl_info, false);
+ sis_disable_intx(ctrl_info);
+ break;
+ }
+ break;
+ case IRQ_MODE_NONE:
+ switch (new_mode) {
+ case IRQ_MODE_MSIX:
+ sis_enable_msix(ctrl_info);
+ break;
+ case IRQ_MODE_INTX:
+ pqi_configure_legacy_intx(ctrl_info, true);
+ sis_enable_intx(ctrl_info);
+ break;
+ case IRQ_MODE_NONE:
+ break;
+ }
+ break;
+ }
+
+ ctrl_info->irq_mode = new_mode;
+}
+
+#define PQI_LEGACY_INTX_PENDING 0x1
+
+static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
+{
+ bool valid_irq;
+ u32 intx_status;
+
+ switch (ctrl_info->irq_mode) {
+ case IRQ_MODE_MSIX:
+ valid_irq = true;
+ break;
+ case IRQ_MODE_INTX:
+ intx_status =
+ readl(&ctrl_info->pqi_registers->legacy_intx_status);
+ if (intx_status & PQI_LEGACY_INTX_PENDING)
+ valid_irq = true;
+ else
+ valid_irq = false;
+ break;
+ case IRQ_MODE_NONE:
+ default:
+ valid_irq = false;
+ break;
+ }
+
+ return valid_irq;
+}
+
static irqreturn_t pqi_irq_handler(int irq, void *data)
{
struct pqi_ctrl_info *ctrl_info;
@@ -2867,7 +3085,7 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
queue_group = data;
ctrl_info = queue_group->ctrl_info;
- if (!ctrl_info || !queue_group->oq_ci)
+ if (!pqi_is_valid_irq(ctrl_info))
return IRQ_NONE;
num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
@@ -2886,19 +3104,19 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
{
- struct pci_dev *pdev = ctrl_info->pci_dev;
+ struct pci_dev *pci_dev = ctrl_info->pci_dev;
int i;
int rc;
- ctrl_info->event_irq = pci_irq_vector(pdev, 0);
+ ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
- rc = request_irq(pci_irq_vector(pdev, i), pqi_irq_handler, 0,
+ rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
if (rc) {
- dev_err(&pdev->dev,
+ dev_err(&pci_dev->dev,
"irq %u init failed with error %d\n",
- pci_irq_vector(pdev, i), rc);
+ pci_irq_vector(pci_dev, i), rc);
return rc;
}
ctrl_info->num_msix_vectors_initialized++;
@@ -2907,23 +3125,44 @@ static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
return 0;
}
+static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
+{
+ int i;
+
+ for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
+ free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
+ &ctrl_info->queue_groups[i]);
+
+ ctrl_info->num_msix_vectors_initialized = 0;
+}
+
static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
{
- int ret;
+ int num_vectors_enabled;
- ret = pci_alloc_irq_vectors(ctrl_info->pci_dev,
+ num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
- if (ret < 0) {
+ if (num_vectors_enabled < 0) {
dev_err(&ctrl_info->pci_dev->dev,
- "MSI-X init failed with error %d\n", ret);
- return ret;
+ "MSI-X init failed with error %d\n",
+ num_vectors_enabled);
+ return num_vectors_enabled;
}
- ctrl_info->num_msix_vectors_enabled = ret;
+ ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
+ ctrl_info->irq_mode = IRQ_MODE_MSIX;
return 0;
}
+static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
+{
+ if (ctrl_info->num_msix_vectors_enabled) {
+ pci_free_irq_vectors(ctrl_info->pci_dev);
+ ctrl_info->num_msix_vectors_enabled = 0;
+ }
+}
+
static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
{
unsigned int i;
@@ -2976,16 +3215,15 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
alloc_length = (size_t)aligned_pointer +
PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
+ alloc_length += PQI_EXTRA_SGL_MEMORY;
+
ctrl_info->queue_memory_base =
dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
alloc_length,
&ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
- if (!ctrl_info->queue_memory_base) {
- dev_err(&ctrl_info->pci_dev->dev,
- "failed to allocate memory for PQI admin queues\n");
+ if (!ctrl_info->queue_memory_base)
return -ENOMEM;
- }
ctrl_info->queue_memory_length = alloc_length;
@@ -3235,6 +3473,8 @@ static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
writel(iq_pi, admin_queues->iq_pi);
}
+#define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
+
static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
struct pqi_general_admin_response *response)
{
@@ -3246,7 +3486,7 @@ static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
admin_queues = &ctrl_info->admin_queues;
oq_ci = admin_queues->oq_ci_copy;
- timeout = (3 * HZ) + jiffies;
+ timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies;
while (1) {
oq_pi = *admin_queues->oq_pi;
@@ -3257,6 +3497,8 @@ static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
"timed out waiting for admin response\n");
return -ETIMEDOUT;
}
+ if (!sis_is_firmware_running(ctrl_info))
+ return -ENXIO;
usleep_range(1000, 2000);
}
@@ -3287,9 +3529,11 @@ static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
spin_lock_irqsave(&queue_group->submit_lock[path], flags);
- if (io_request)
+ if (io_request) {
+ io_request->queue_group = queue_group;
list_add_tail(&io_request->request_list_entry,
&queue_group->request_list[path]);
+ }
iq_pi = queue_group->iq_pi_copy[path];
@@ -3348,6 +3592,30 @@ static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
}
+#define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
+
+static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
+ struct completion *wait)
+{
+ int rc;
+
+ while (1) {
+ if (wait_for_completion_io_timeout(wait,
+ PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
+ rc = 0;
+ break;
+ }
+
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info)) {
+ rc = -ENXIO;
+ break;
+ }
+ }
+
+ return rc;
+}
+
static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
void *context)
{
@@ -3371,7 +3639,7 @@ static int pqi_submit_raid_request_synchronous_with_io_request(
io_request);
if (timeout_msecs == NO_TIMEOUT) {
- wait_for_completion_io(&wait);
+ pqi_wait_for_completion_io(ctrl_info, &wait);
} else {
if (!wait_for_completion_io_timeout(&wait,
msecs_to_jiffies(timeout_msecs))) {
@@ -3418,6 +3686,18 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
}
}
+ pqi_ctrl_busy(ctrl_info);
+ timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
+ if (timeout_msecs == 0) {
+ rc = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (pqi_ctrl_offline(ctrl_info)) {
+ rc = -ENXIO;
+ goto out;
+ }
+
io_request = pqi_alloc_io_request(ctrl_info);
put_unaligned_le16(io_request->index,
@@ -3458,6 +3738,8 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
pqi_free_io_request(io_request);
+out:
+ pqi_ctrl_unbusy(ctrl_info);
up(&ctrl_info->sync_request_sem);
return rc;
@@ -3688,16 +3970,15 @@ static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
return 0;
}
-static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info)
+static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
+ unsigned int group_number)
{
- unsigned int i;
int rc;
struct pqi_queue_group *queue_group;
struct pqi_general_admin_request request;
struct pqi_general_admin_response response;
- i = ctrl_info->num_active_queue_groups;
- queue_group = &ctrl_info->queue_groups[i];
+ queue_group = &ctrl_info->queue_groups[group_number];
/*
* Create IQ (Inbound Queue - host to device queue) for
@@ -3827,8 +4108,6 @@ static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info)
get_unaligned_le64(
&response.data.create_operational_oq.oq_ci_offset);
- ctrl_info->num_active_queue_groups++;
-
return 0;
delete_inbound_queue_aio:
@@ -3855,7 +4134,7 @@ static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
}
for (i = 0; i < ctrl_info->num_queue_groups; i++) {
- rc = pqi_create_queue_group(ctrl_info);
+ rc = pqi_create_queue_group(ctrl_info, i);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
"error creating queue group number %u/%u\n",
@@ -3871,11 +4150,13 @@ static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
(offsetof(struct pqi_event_config, descriptors) + \
(PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
-static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info)
+static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
+ bool enable_events)
{
int rc;
unsigned int i;
struct pqi_event_config *event_config;
+ struct pqi_event_descriptor *event_descriptor;
struct pqi_general_management_request request;
event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
@@ -3909,9 +4190,15 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info)
if (rc)
goto out;
- for (i = 0; i < event_config->num_event_descriptors; i++)
- put_unaligned_le16(ctrl_info->event_queue.oq_id,
- &event_config->descriptors[i].oq_id);
+ for (i = 0; i < event_config->num_event_descriptors; i++) {
+ event_descriptor = &event_config->descriptors[i];
+ if (enable_events &&
+ pqi_is_supported_event(event_descriptor->event_type))
+ put_unaligned_le16(ctrl_info->event_queue.oq_id,
+ &event_descriptor->oq_id);
+ else
+ put_unaligned_le16(0, &event_descriptor->oq_id);
+ }
memset(&request, 0, sizeof(request));
@@ -3942,6 +4229,16 @@ out:
return rc;
}
+static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
+{
+ return pqi_configure_events(ctrl_info, true);
+}
+
+static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
+{
+ return pqi_configure_events(ctrl_info, false);
+}
+
static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
{
unsigned int i;
@@ -4056,8 +4353,12 @@ static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
ctrl_info->error_buffer_length =
ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
- max_transfer_size =
- min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE);
+ if (reset_devices)
+ max_transfer_size = min(ctrl_info->max_transfer_size,
+ PQI_MAX_TRANSFER_SIZE_KDUMP);
+ else
+ max_transfer_size = min(ctrl_info->max_transfer_size,
+ PQI_MAX_TRANSFER_SIZE);
max_sg_entries = max_transfer_size / PAGE_SIZE;
@@ -4069,28 +4370,35 @@ static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
ctrl_info->sg_chain_buffer_length =
- max_sg_entries * sizeof(struct pqi_sg_descriptor);
+ (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
+ PQI_EXTRA_SGL_MEMORY;
ctrl_info->sg_tablesize = max_sg_entries;
ctrl_info->max_sectors = max_transfer_size / 512;
}
static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
{
- int num_cpus;
- int max_queue_groups;
int num_queue_groups;
u16 num_elements_per_iq;
u16 num_elements_per_oq;
- max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
- ctrl_info->max_outbound_queues - 1);
- max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
+ if (reset_devices) {
+ num_queue_groups = 1;
+ } else {
+ int num_cpus;
+ int max_queue_groups;
+
+ max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
+ ctrl_info->max_outbound_queues - 1);
+ max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
- num_cpus = num_online_cpus();
- num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
- num_queue_groups = min(num_queue_groups, max_queue_groups);
+ num_cpus = num_online_cpus();
+ num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
+ num_queue_groups = min(num_queue_groups, max_queue_groups);
+ }
ctrl_info->num_queue_groups = num_queue_groups;
+ ctrl_info->max_hw_queue_index = num_queue_groups - 1;
/*
* Make sure that the max. inbound IU length is an even multiple
@@ -4276,21 +4584,18 @@ static void pqi_raid_io_complete(struct pqi_io_request *io_request,
pqi_scsi_done(scmd);
}
-static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
+static int pqi_raid_submit_scsi_cmd_with_io_request(
+ struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
struct pqi_queue_group *queue_group)
{
int rc;
size_t cdb_length;
- struct pqi_io_request *io_request;
struct pqi_raid_path_request *request;
- io_request = pqi_alloc_io_request(ctrl_info);
io_request->io_complete_callback = pqi_raid_io_complete;
io_request->scmd = scmd;
- scmd->host_scribble = (unsigned char *)io_request;
-
request = io_request->iu;
memset(request, 0,
offsetof(struct pqi_raid_path_request, sg_descriptors));
@@ -4355,7 +4660,6 @@ static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
dev_err(&ctrl_info->pci_dev->dev,
"unknown data direction: %d\n",
scmd->sc_data_direction);
- WARN_ON(scmd->sc_data_direction);
break;
}
@@ -4370,6 +4674,176 @@ static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
return 0;
}
+static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
+ struct pqi_queue_group *queue_group)
+{
+ struct pqi_io_request *io_request;
+
+ io_request = pqi_alloc_io_request(ctrl_info);
+
+ return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
+ device, scmd, queue_group);
+}
+
+static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info)
+{
+ if (!pqi_ctrl_blocked(ctrl_info))
+ schedule_work(&ctrl_info->raid_bypass_retry_work);
+}
+
+static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
+{
+ struct scsi_cmnd *scmd;
+ struct pqi_scsi_dev *device;
+ struct pqi_ctrl_info *ctrl_info;
+
+ if (!io_request->raid_bypass)
+ return false;
+
+ scmd = io_request->scmd;
+ if ((scmd->result & 0xff) == SAM_STAT_GOOD)
+ return false;
+ if (host_byte(scmd->result) == DID_NO_CONNECT)
+ return false;
+
+ device = scmd->device->hostdata;
+ if (pqi_device_offline(device))
+ return false;
+
+ ctrl_info = shost_to_hba(scmd->device->host);
+ if (pqi_ctrl_offline(ctrl_info))
+ return false;
+
+ return true;
+}
+
+static inline void pqi_add_to_raid_bypass_retry_list(
+ struct pqi_ctrl_info *ctrl_info,
+ struct pqi_io_request *io_request, bool at_head)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
+ if (at_head)
+ list_add(&io_request->request_list_entry,
+ &ctrl_info->raid_bypass_retry_list);
+ else
+ list_add_tail(&io_request->request_list_entry,
+ &ctrl_info->raid_bypass_retry_list);
+ spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
+}
+
+static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request,
+ void *context)
+{
+ struct scsi_cmnd *scmd;
+
+ scmd = io_request->scmd;
+ pqi_free_io_request(io_request);
+ pqi_scsi_done(scmd);
+}
+
+static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request)
+{
+ struct scsi_cmnd *scmd;
+ struct pqi_ctrl_info *ctrl_info;
+
+ io_request->io_complete_callback = pqi_queued_raid_bypass_complete;
+ scmd = io_request->scmd;
+ scmd->result = 0;
+ ctrl_info = shost_to_hba(scmd->device->host);
+
+ pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false);
+ pqi_schedule_bypass_retry(ctrl_info);
+}
+
+static int pqi_retry_raid_bypass(struct pqi_io_request *io_request)
+{
+ struct scsi_cmnd *scmd;
+ struct pqi_scsi_dev *device;
+ struct pqi_ctrl_info *ctrl_info;
+ struct pqi_queue_group *queue_group;
+
+ scmd = io_request->scmd;
+ device = scmd->device->hostdata;
+ if (pqi_device_in_reset(device)) {
+ pqi_free_io_request(io_request);
+ set_host_byte(scmd, DID_RESET);
+ pqi_scsi_done(scmd);
+ return 0;
+ }
+
+ ctrl_info = shost_to_hba(scmd->device->host);
+ queue_group = io_request->queue_group;
+
+ pqi_reinit_io_request(io_request);
+
+ return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request,
+ device, scmd, queue_group);
+}
+
+static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request(
+ struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned long flags;
+ struct pqi_io_request *io_request;
+
+ spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
+ io_request = list_first_entry_or_null(
+ &ctrl_info->raid_bypass_retry_list,
+ struct pqi_io_request, request_list_entry);
+ if (io_request)
+ list_del(&io_request->request_list_entry);
+ spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
+
+ return io_request;
+}
+
+static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ struct pqi_io_request *io_request;
+
+ pqi_ctrl_busy(ctrl_info);
+
+ while (1) {
+ if (pqi_ctrl_blocked(ctrl_info))
+ break;
+ io_request = pqi_next_queued_raid_bypass_request(ctrl_info);
+ if (!io_request)
+ break;
+ rc = pqi_retry_raid_bypass(io_request);
+ if (rc) {
+ pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request,
+ true);
+ pqi_schedule_bypass_retry(ctrl_info);
+ break;
+ }
+ }
+
+ pqi_ctrl_unbusy(ctrl_info);
+}
+
+static void pqi_raid_bypass_retry_worker(struct work_struct *work)
+{
+ struct pqi_ctrl_info *ctrl_info;
+
+ ctrl_info = container_of(work, struct pqi_ctrl_info,
+ raid_bypass_retry_work);
+ pqi_retry_raid_bypass_requests(ctrl_info);
+}
+
+static void pqi_clear_all_queued_raid_bypass_retries(
+ struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags);
+ INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
+ spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags);
+}
+
static void pqi_aio_io_complete(struct pqi_io_request *io_request,
void *context)
{
@@ -4379,6 +4853,10 @@ static void pqi_aio_io_complete(struct pqi_io_request *io_request,
scsi_dma_unmap(scmd);
if (io_request->status == -EAGAIN)
set_host_byte(scmd, DID_IMM_RETRY);
+ else if (pqi_raid_bypass_retry_needed(io_request)) {
+ pqi_queue_raid_bypass_retry(io_request);
+ return;
+ }
pqi_free_io_request(io_request);
pqi_scsi_done(scmd);
}
@@ -4388,13 +4866,13 @@ static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
struct pqi_queue_group *queue_group)
{
return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
- scmd->cmnd, scmd->cmd_len, queue_group, NULL);
+ scmd->cmnd, scmd->cmd_len, queue_group, NULL, false);
}
static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
unsigned int cdb_length, struct pqi_queue_group *queue_group,
- struct pqi_encryption_info *encryption_info)
+ struct pqi_encryption_info *encryption_info, bool raid_bypass)
{
int rc;
struct pqi_io_request *io_request;
@@ -4403,8 +4881,7 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
io_request = pqi_alloc_io_request(ctrl_info);
io_request->io_complete_callback = pqi_aio_io_complete;
io_request->scmd = scmd;
-
- scmd->host_scribble = (unsigned char *)io_request;
+ io_request->raid_bypass = raid_bypass;
request = io_request->iu;
memset(request, 0,
@@ -4438,7 +4915,6 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
dev_err(&ctrl_info->pci_dev->dev,
"unknown data direction: %d\n",
scmd->sc_data_direction);
- WARN_ON(scmd->sc_data_direction);
break;
}
@@ -4463,47 +4939,74 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
return 0;
}
+static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
+ struct scsi_cmnd *scmd)
+{
+ u16 hw_queue;
+
+ hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
+ if (hw_queue > ctrl_info->max_hw_queue_index)
+ hw_queue = 0;
+
+ return hw_queue;
+}
+
+/*
+ * This function gets called just before we hand the completed SCSI request
+ * back to the SML.
+ */
+
+void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
+{
+ struct pqi_scsi_dev *device;
+
+ device = scmd->device->hostdata;
+ atomic_dec(&device->scsi_cmds_outstanding);
+}
+
static int pqi_scsi_queue_command(struct Scsi_Host *shost,
struct scsi_cmnd *scmd)
{
int rc;
struct pqi_ctrl_info *ctrl_info;
struct pqi_scsi_dev *device;
- u16 hwq;
+ u16 hw_queue;
struct pqi_queue_group *queue_group;
bool raid_bypassed;
device = scmd->device->hostdata;
ctrl_info = shost_to_hba(shost);
+ atomic_inc(&device->scsi_cmds_outstanding);
+
if (pqi_ctrl_offline(ctrl_info)) {
set_host_byte(scmd, DID_NO_CONNECT);
pqi_scsi_done(scmd);
return 0;
}
+ pqi_ctrl_busy(ctrl_info);
+ if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) {
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
/*
* This is necessary because the SML doesn't zero out this field during
* error recovery.
*/
scmd->result = 0;
- hwq = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
- if (hwq >= ctrl_info->num_queue_groups)
- hwq = 0;
-
- queue_group = &ctrl_info->queue_groups[hwq];
+ hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
+ queue_group = &ctrl_info->queue_groups[hw_queue];
if (pqi_is_logical_device(device)) {
raid_bypassed = false;
- if (device->offload_enabled &&
+ if (device->raid_bypass_enabled &&
!blk_rq_is_passthrough(scmd->request)) {
rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
scmd, queue_group);
- if (rc == 0 ||
- rc == SCSI_MLQUEUE_HOST_BUSY ||
- rc == SAM_STAT_CHECK_CONDITION ||
- rc == SAM_STAT_RESERVATION_CONFLICT)
+ if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY)
raid_bypassed = true;
}
if (!raid_bypassed)
@@ -4518,9 +5021,162 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
queue_group);
}
+out:
+ pqi_ctrl_unbusy(ctrl_info);
+ if (rc)
+ atomic_dec(&device->scsi_cmds_outstanding);
+
return rc;
}
+static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_queue_group *queue_group)
+{
+ unsigned int path;
+ unsigned long flags;
+ bool list_is_empty;
+
+ for (path = 0; path < 2; path++) {
+ while (1) {
+ spin_lock_irqsave(
+ &queue_group->submit_lock[path], flags);
+ list_is_empty =
+ list_empty(&queue_group->request_list[path]);
+ spin_unlock_irqrestore(
+ &queue_group->submit_lock[path], flags);
+ if (list_is_empty)
+ break;
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENXIO;
+ usleep_range(1000, 2000);
+ }
+ }
+
+ return 0;
+}
+
+static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+ unsigned int i;
+ unsigned int path;
+ struct pqi_queue_group *queue_group;
+ pqi_index_t iq_pi;
+ pqi_index_t iq_ci;
+
+ for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+ queue_group = &ctrl_info->queue_groups[i];
+
+ rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
+ if (rc)
+ return rc;
+
+ for (path = 0; path < 2; path++) {
+ iq_pi = queue_group->iq_pi_copy[path];
+
+ while (1) {
+ iq_ci = *queue_group->iq_ci[path];
+ if (iq_ci == iq_pi)
+ break;
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENXIO;
+ usleep_range(1000, 2000);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device)
+{
+ unsigned int i;
+ unsigned int path;
+ struct pqi_queue_group *queue_group;
+ unsigned long flags;
+ struct pqi_io_request *io_request;
+ struct pqi_io_request *next;
+ struct scsi_cmnd *scmd;
+ struct pqi_scsi_dev *scsi_device;
+
+ for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+ queue_group = &ctrl_info->queue_groups[i];
+
+ for (path = 0; path < 2; path++) {
+ spin_lock_irqsave(
+ &queue_group->submit_lock[path], flags);
+
+ list_for_each_entry_safe(io_request, next,
+ &queue_group->request_list[path],
+ request_list_entry) {
+ scmd = io_request->scmd;
+ if (!scmd)
+ continue;
+
+ scsi_device = scmd->device->hostdata;
+ if (scsi_device != device)
+ continue;
+
+ list_del(&io_request->request_list_entry);
+ set_host_byte(scmd, DID_RESET);
+ pqi_scsi_done(scmd);
+ }
+
+ spin_unlock_irqrestore(
+ &queue_group->submit_lock[path], flags);
+ }
+ }
+}
+
+static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device)
+{
+ while (atomic_read(&device->scsi_cmds_outstanding)) {
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENXIO;
+ usleep_range(1000, 2000);
+ }
+
+ return 0;
+}
+
+static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info)
+{
+ bool io_pending;
+ unsigned long flags;
+ struct pqi_scsi_dev *device;
+
+ while (1) {
+ io_pending = false;
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+ list_for_each_entry(device, &ctrl_info->scsi_device_list,
+ scsi_device_list_entry) {
+ if (atomic_read(&device->scsi_cmds_outstanding)) {
+ io_pending = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
+ flags);
+
+ if (!io_pending)
+ break;
+
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info))
+ return -ENXIO;
+
+ usleep_range(1000, 2000);
+ }
+
+ return 0;
+}
+
static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
void *context)
{
@@ -4535,7 +5191,6 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device, struct completion *wait)
{
int rc;
- unsigned int wait_secs = 0;
while (1) {
if (wait_for_completion_io_timeout(wait,
@@ -4546,16 +5201,9 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
pqi_check_ctrl_health(ctrl_info);
if (pqi_ctrl_offline(ctrl_info)) {
- rc = -ETIMEDOUT;
+ rc = -ENXIO;
break;
}
-
- wait_secs += PQI_LUN_RESET_TIMEOUT_SECS;
-
- dev_err(&ctrl_info->pci_dev->dev,
- "resetting scsi %d:%d:%d:%d - waiting %u seconds\n",
- ctrl_info->scsi_host->host_no, device->bus,
- device->target, device->lun, wait_secs);
}
return rc;
@@ -4569,8 +5217,6 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
DECLARE_COMPLETION_ONSTACK(wait);
struct pqi_task_management_request *request;
- down(&ctrl_info->lun_reset_sem);
-
io_request = pqi_alloc_io_request(ctrl_info);
io_request->io_complete_callback = pqi_lun_reset_complete;
io_request->context = &wait;
@@ -4595,7 +5241,6 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
rc = io_request->status;
pqi_free_io_request(io_request);
- up(&ctrl_info->lun_reset_sem);
return rc;
}
@@ -4607,11 +5252,9 @@ static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
{
int rc;
- pqi_check_ctrl_health(ctrl_info);
- if (pqi_ctrl_offline(ctrl_info))
- return FAILED;
-
rc = pqi_lun_reset(ctrl_info, device);
+ if (rc == 0)
+ rc = pqi_device_wait_for_pending_io(ctrl_info, device);
return rc == 0 ? SUCCESS : FAILED;
}
@@ -4619,23 +5262,46 @@ static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
{
int rc;
+ struct Scsi_Host *shost;
struct pqi_ctrl_info *ctrl_info;
struct pqi_scsi_dev *device;
- ctrl_info = shost_to_hba(scmd->device->host);
+ shost = scmd->device->host;
+ ctrl_info = shost_to_hba(shost);
device = scmd->device->hostdata;
dev_err(&ctrl_info->pci_dev->dev,
"resetting scsi %d:%d:%d:%d\n",
- ctrl_info->scsi_host->host_no,
- device->bus, device->target, device->lun);
+ shost->host_no, device->bus, device->target, device->lun);
+
+ pqi_check_ctrl_health(ctrl_info);
+ if (pqi_ctrl_offline(ctrl_info)) {
+ rc = FAILED;
+ goto out;
+ }
+
+ mutex_lock(&ctrl_info->lun_reset_mutex);
+
+ pqi_ctrl_block_requests(ctrl_info);
+ pqi_ctrl_wait_until_quiesced(ctrl_info);
+ pqi_fail_io_queued_for_device(ctrl_info, device);
+ rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
+ pqi_device_reset_start(device);
+ pqi_ctrl_unblock_requests(ctrl_info);
+
+ if (rc)
+ rc = FAILED;
+ else
+ rc = pqi_device_reset(ctrl_info, device);
+
+ pqi_device_reset_done(device);
- rc = pqi_device_reset(ctrl_info, device);
+ mutex_unlock(&ctrl_info->lun_reset_mutex);
+out:
dev_err(&ctrl_info->pci_dev->dev,
"reset of scsi %d:%d:%d:%d: %s\n",
- ctrl_info->scsi_host->host_no,
- device->bus, device->target, device->lun,
+ shost->host_no, device->bus, device->target, device->lun,
rc == SUCCESS ? "SUCCESS" : "FAILED");
return rc;
@@ -4667,7 +5333,7 @@ static int pqi_slave_alloc(struct scsi_device *sdev)
sdev_id(sdev), sdev->lun);
}
- if (device && device->expose_device) {
+ if (device) {
sdev->hostdata = device;
device->sdev = sdev;
if (device->queue_depth) {
@@ -4682,17 +5348,6 @@ static int pqi_slave_alloc(struct scsi_device *sdev)
return 0;
}
-static int pqi_slave_configure(struct scsi_device *sdev)
-{
- struct pqi_scsi_dev *device;
-
- device = sdev->hostdata;
- if (!device->expose_device)
- sdev->no_uld_attach = true;
-
- return 0;
-}
-
static int pqi_map_queues(struct Scsi_Host *shost)
{
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
@@ -5005,12 +5660,55 @@ static ssize_t pqi_host_rescan_store(struct device *dev,
return count;
}
-static DEVICE_ATTR(version, S_IRUGO, pqi_version_show, NULL);
-static DEVICE_ATTR(rescan, S_IWUSR, NULL, pqi_host_rescan_store);
+static ssize_t pqi_lockup_action_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ int count = 0;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
+ if (pqi_lockup_actions[i].action == pqi_lockup_action)
+ count += snprintf(buffer + count, PAGE_SIZE - count,
+ "[%s] ", pqi_lockup_actions[i].name);
+ else
+ count += snprintf(buffer + count, PAGE_SIZE - count,
+ "%s ", pqi_lockup_actions[i].name);
+ }
+
+ count += snprintf(buffer + count, PAGE_SIZE - count, "\n");
+
+ return count;
+}
+
+static ssize_t pqi_lockup_action_store(struct device *dev,
+ struct device_attribute *attr, const char *buffer, size_t count)
+{
+ unsigned int i;
+ char *action_name;
+ char action_name_buffer[32];
+
+ strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer));
+ action_name = strstrip(action_name_buffer);
+
+ for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
+ if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) {
+ pqi_lockup_action = pqi_lockup_actions[i].action;
+ return count;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(version, 0444, pqi_version_show, NULL);
+static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
+static DEVICE_ATTR(lockup_action, 0644,
+ pqi_lockup_action_show, pqi_lockup_action_store);
static struct device_attribute *pqi_shost_attrs[] = {
&dev_attr_version,
&dev_attr_rescan,
+ &dev_attr_lockup_action,
NULL
};
@@ -5055,7 +5753,7 @@ static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
- buffer[0] = device->offload_enabled ? '1' : '0';
+ buffer[0] = device->raid_bypass_enabled ? '1' : '0';
buffer[1] = '\n';
buffer[2] = '\0';
@@ -5064,13 +5762,41 @@ static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
return 2;
}
-static DEVICE_ATTR(sas_address, S_IRUGO, pqi_sas_address_show, NULL);
-static DEVICE_ATTR(ssd_smart_path_enabled, S_IRUGO,
+static ssize_t pqi_raid_level_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct scsi_device *sdev;
+ struct pqi_scsi_dev *device;
+ unsigned long flags;
+ char *raid_level;
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = sdev->hostdata;
+
+ if (pqi_is_logical_device(device))
+ raid_level = pqi_raid_level_to_string(device->raid_level);
+ else
+ raid_level = "N/A";
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
+}
+
+static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
+static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
pqi_ssd_smart_path_enabled_show, NULL);
+static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
static struct device_attribute *pqi_sdev_attrs[] = {
&dev_attr_sas_address,
&dev_attr_ssd_smart_path_enabled,
+ &dev_attr_raid_level,
NULL
};
@@ -5086,7 +5812,6 @@ static struct scsi_host_template pqi_driver_template = {
.eh_device_reset_handler = pqi_eh_device_reset_handler,
.ioctl = pqi_ioctl,
.slave_alloc = pqi_slave_alloc,
- .slave_configure = pqi_slave_configure,
.map_queues = pqi_map_queues,
.sdev_attrs = pqi_sdev_attrs,
.shost_attrs = pqi_shost_attrs,
@@ -5217,49 +5942,113 @@ out:
return rc;
}
-static int pqi_kdump_init(struct pqi_ctrl_info *ctrl_info)
+static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
{
- if (!sis_is_firmware_running(ctrl_info))
- return -ENXIO;
+ u32 table_length;
+ u32 section_offset;
+ void __iomem *table_iomem_addr;
+ struct pqi_config_table *config_table;
+ struct pqi_config_table_section_header *section;
+
+ table_length = ctrl_info->config_table_length;
+
+ config_table = kmalloc(table_length, GFP_KERNEL);
+ if (!config_table) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "failed to allocate memory for PQI configuration table\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Copy the config table contents from I/O memory space into the
+ * temporary buffer.
+ */
+ table_iomem_addr = ctrl_info->iomem_base +
+ ctrl_info->config_table_offset;
+ memcpy_fromio(config_table, table_iomem_addr, table_length);
+
+ section_offset =
+ get_unaligned_le32(&config_table->first_section_offset);
+
+ while (section_offset) {
+ section = (void *)config_table + section_offset;
- if (pqi_get_ctrl_mode(ctrl_info) == PQI_MODE) {
- sis_disable_msix(ctrl_info);
- if (pqi_reset(ctrl_info) == 0)
- sis_reenable_sis_mode(ctrl_info);
+ switch (get_unaligned_le16(&section->section_id)) {
+ case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
+ if (pqi_disable_heartbeat)
+ dev_warn(&ctrl_info->pci_dev->dev,
+ "heartbeat disabled by module parameter\n");
+ else
+ ctrl_info->heartbeat_counter =
+ table_iomem_addr +
+ section_offset +
+ offsetof(
+ struct pqi_config_table_heartbeat,
+ heartbeat_counter);
+ break;
+ }
+
+ section_offset =
+ get_unaligned_le16(&section->next_section_offset);
}
+ kfree(config_table);
+
return 0;
}
-static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
+/* Switches the controller from PQI mode back into SIS mode. */
+
+static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
{
int rc;
- if (reset_devices) {
- rc = pqi_kdump_init(ctrl_info);
- if (rc)
- return rc;
+ pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
+ rc = pqi_reset(ctrl_info);
+ if (rc)
+ return rc;
+ sis_reenable_sis_mode(ctrl_info);
+ pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
+
+ return 0;
+}
+
+/*
+ * If the controller isn't already in SIS mode, this function forces it into
+ * SIS mode.
+ */
+
+static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
+{
+ if (!sis_is_firmware_running(ctrl_info))
+ return -ENXIO;
+
+ if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
+ return 0;
+
+ if (sis_is_kernel_up(ctrl_info)) {
+ pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
+ return 0;
}
- /*
- * When the controller comes out of reset, it is always running
- * in legacy SIS mode. This is so that it can be compatible
- * with legacy drivers shipped with OSes. So we have to talk
- * to it using SIS commands at first. Once we are satisified
- * that the controller supports PQI, we transition it into PQI
- * mode.
- */
+ return pqi_revert_to_sis_mode(ctrl_info);
+}
+
+static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+
+ rc = pqi_force_sis_mode(ctrl_info);
+ if (rc)
+ return rc;
/*
* Wait until the controller is ready to start accepting SIS
* commands.
*/
rc = sis_wait_for_ctrl_ready(ctrl_info);
- if (rc) {
- dev_err(&ctrl_info->pci_dev->dev,
- "error initializing SIS interface\n");
+ if (rc)
return rc;
- }
/*
* Get the controller properties. This allows us to determine
@@ -5279,9 +6068,17 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
return rc;
}
- if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS)
- ctrl_info->max_outstanding_requests =
- PQI_MAX_OUTSTANDING_REQUESTS;
+ if (reset_devices) {
+ if (ctrl_info->max_outstanding_requests >
+ PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
+ ctrl_info->max_outstanding_requests =
+ PQI_MAX_OUTSTANDING_REQUESTS_KDUMP;
+ } else {
+ if (ctrl_info->max_outstanding_requests >
+ PQI_MAX_OUTSTANDING_REQUESTS)
+ ctrl_info->max_outstanding_requests =
+ PQI_MAX_OUTSTANDING_REQUESTS;
+ }
pqi_calculate_io_resources(ctrl_info);
@@ -5316,10 +6113,14 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
ctrl_info->pqi_mode_enabled = true;
pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
+ rc = pqi_process_config_table(ctrl_info);
+ if (rc)
+ return rc;
+
rc = pqi_alloc_admin_queues(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
- "error allocating admin queues\n");
+ "failed to allocate admin queues\n");
return rc;
}
@@ -5358,8 +6159,11 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
return rc;
rc = pqi_alloc_operational_queues(ctrl_info);
- if (rc)
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "failed to allocate operational queues\n");
return rc;
+ }
pqi_init_operational_queues(ctrl_info);
@@ -5371,19 +6175,18 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
if (rc)
return rc;
- sis_enable_msix(ctrl_info);
+ pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
+
+ ctrl_info->controller_online = true;
+ pqi_start_heartbeat_timer(ctrl_info);
- rc = pqi_configure_events(ctrl_info);
+ rc = pqi_enable_events(ctrl_info);
if (rc) {
dev_err(&ctrl_info->pci_dev->dev,
- "error configuring events\n");
+ "error enabling events\n");
return rc;
}
- pqi_start_heartbeat_timer(ctrl_info);
-
- ctrl_info->controller_online = true;
-
/* Register with the SCSI subsystem. */
rc = pqi_register_scsi(ctrl_info);
if (rc)
@@ -5410,6 +6213,119 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
return 0;
}
+static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned int i;
+ struct pqi_admin_queues *admin_queues;
+ struct pqi_event_queue *event_queue;
+
+ admin_queues = &ctrl_info->admin_queues;
+ admin_queues->iq_pi_copy = 0;
+ admin_queues->oq_ci_copy = 0;
+ *admin_queues->oq_pi = 0;
+
+ for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+ ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
+ ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
+ ctrl_info->queue_groups[i].oq_ci_copy = 0;
+
+ *ctrl_info->queue_groups[i].iq_ci[RAID_PATH] = 0;
+ *ctrl_info->queue_groups[i].iq_ci[AIO_PATH] = 0;
+ *ctrl_info->queue_groups[i].oq_pi = 0;
+ }
+
+ event_queue = &ctrl_info->event_queue;
+ *event_queue->oq_pi = 0;
+ event_queue->oq_ci_copy = 0;
+}
+
+static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
+{
+ int rc;
+
+ rc = pqi_force_sis_mode(ctrl_info);
+ if (rc)
+ return rc;
+
+ /*
+ * Wait until the controller is ready to start accepting SIS
+ * commands.
+ */
+ rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
+ if (rc)
+ return rc;
+
+ /*
+ * If the function we are about to call succeeds, the
+ * controller will transition from legacy SIS mode
+ * into PQI mode.
+ */
+ rc = sis_init_base_struct_addr(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error initializing PQI mode\n");
+ return rc;
+ }
+
+ /* Wait for the controller to complete the SIS -> PQI transition. */
+ rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "transition to PQI mode failed\n");
+ return rc;
+ }
+
+ /* From here on, we are running in PQI mode. */
+ ctrl_info->pqi_mode_enabled = true;
+ pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
+
+ pqi_reinit_queues(ctrl_info);
+
+ rc = pqi_create_admin_queues(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error creating admin queues\n");
+ return rc;
+ }
+
+ rc = pqi_create_queues(ctrl_info);
+ if (rc)
+ return rc;
+
+ pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
+
+ ctrl_info->controller_online = true;
+ pqi_start_heartbeat_timer(ctrl_info);
+ pqi_ctrl_unblock_requests(ctrl_info);
+
+ rc = pqi_enable_events(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error enabling events\n");
+ return rc;
+ }
+
+ rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "error updating host wellness\n");
+ return rc;
+ }
+
+ pqi_schedule_update_time_worker(ctrl_info);
+
+ pqi_scan_scsi_devices(ctrl_info);
+
+ return 0;
+}
+
+static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
+ u16 timeout)
+{
+ return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
+}
+
static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
{
int rc;
@@ -5450,12 +6366,23 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
goto release_regions;
}
- ctrl_info->registers = ctrl_info->iomem_base;
- ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
+#define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
+
+ /* Increase the PCIe completion timeout. */
+ rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
+ PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "failed to set PCIe completion timeout\n");
+ goto release_regions;
+ }
/* Enable bus mastering. */
pci_set_master(ctrl_info->pci_dev);
+ ctrl_info->registers = ctrl_info->iomem_base;
+ ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
+
pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
return 0;
@@ -5472,7 +6399,8 @@ static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
{
iounmap(ctrl_info->iomem_base);
pci_release_regions(ctrl_info->pci_dev);
- pci_disable_device(ctrl_info->pci_dev);
+ if (pci_is_enabled(ctrl_info->pci_dev))
+ pci_disable_device(ctrl_info->pci_dev);
pci_set_drvdata(ctrl_info->pci_dev, NULL);
}
@@ -5486,6 +6414,7 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
return NULL;
mutex_init(&ctrl_info->scan_mutex);
+ mutex_init(&ctrl_info->lun_reset_mutex);
INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
spin_lock_init(&ctrl_info->scsi_device_list_lock);
@@ -5496,11 +6425,20 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
+ init_timer(&ctrl_info->heartbeat_timer);
+ INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker);
+
sema_init(&ctrl_info->sync_request_sem,
PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
- sema_init(&ctrl_info->lun_reset_sem, PQI_RESERVED_IO_SLOTS_LUN_RESET);
+ init_waitqueue_head(&ctrl_info->block_requests_wait);
+
+ INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list);
+ spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock);
+ INIT_WORK(&ctrl_info->raid_bypass_retry_work,
+ pqi_raid_bypass_retry_worker);
ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
+ ctrl_info->irq_mode = IRQ_MODE_NONE;
ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
return ctrl_info;
@@ -5513,14 +6451,8 @@ static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
{
- int i;
-
- for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
- free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
- &ctrl_info->queue_groups[i]);
- }
-
- pci_free_irq_vectors(ctrl_info->pci_dev);
+ pqi_free_irqs(ctrl_info);
+ pqi_disable_msix_interrupts(ctrl_info);
}
static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
@@ -5550,73 +6482,142 @@ static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
{
- cancel_delayed_work_sync(&ctrl_info->rescan_work);
- cancel_delayed_work_sync(&ctrl_info->update_time_work);
+ pqi_cancel_rescan_worker(ctrl_info);
+ pqi_cancel_update_time_worker(ctrl_info);
pqi_remove_all_scsi_devices(ctrl_info);
pqi_unregister_scsi(ctrl_info);
+ if (ctrl_info->pqi_mode_enabled)
+ pqi_revert_to_sis_mode(ctrl_info);
+ pqi_free_ctrl_resources(ctrl_info);
+}
- if (ctrl_info->pqi_mode_enabled) {
- sis_disable_msix(ctrl_info);
- if (pqi_reset(ctrl_info) == 0)
- sis_reenable_sis_mode(ctrl_info);
+static void pqi_perform_lockup_action(void)
+{
+ switch (pqi_lockup_action) {
+ case PANIC:
+ panic("FATAL: Smart Family Controller lockup detected");
+ break;
+ case REBOOT:
+ emergency_restart();
+ break;
+ case NONE:
+ default:
+ break;
+ }
+}
+
+static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = {
+ .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR,
+ .status = SAM_STAT_CHECK_CONDITION,
+};
+
+static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
+{
+ unsigned int i;
+ struct pqi_io_request *io_request;
+ struct scsi_cmnd *scmd;
+
+ for (i = 0; i < ctrl_info->max_io_slots; i++) {
+ io_request = &ctrl_info->io_request_pool[i];
+ if (atomic_read(&io_request->refcount) == 0)
+ continue;
+
+ scmd = io_request->scmd;
+ if (scmd) {
+ set_host_byte(scmd, DID_NO_CONNECT);
+ } else {
+ io_request->status = -ENXIO;
+ io_request->error_info =
+ &pqi_ctrl_offline_raid_error_info;
+ }
+
+ io_request->io_complete_callback(io_request,
+ io_request->context);
}
- pqi_free_ctrl_resources(ctrl_info);
}
-static void pqi_print_ctrl_info(struct pci_dev *pdev,
+static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
+{
+ pqi_perform_lockup_action();
+ pqi_stop_heartbeat_timer(ctrl_info);
+ pqi_free_interrupts(ctrl_info);
+ pqi_cancel_rescan_worker(ctrl_info);
+ pqi_cancel_update_time_worker(ctrl_info);
+ pqi_ctrl_wait_until_quiesced(ctrl_info);
+ pqi_fail_all_outstanding_requests(ctrl_info);
+ pqi_clear_all_queued_raid_bypass_retries(ctrl_info);
+ pqi_ctrl_unblock_requests(ctrl_info);
+}
+
+static void pqi_ctrl_offline_worker(struct work_struct *work)
+{
+ struct pqi_ctrl_info *ctrl_info;
+
+ ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work);
+ pqi_take_ctrl_offline_deferred(ctrl_info);
+}
+
+static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
+{
+ if (!ctrl_info->controller_online)
+ return;
+
+ ctrl_info->controller_online = false;
+ ctrl_info->pqi_mode_enabled = false;
+ pqi_ctrl_block_requests(ctrl_info);
+ if (!pqi_disable_ctrl_shutdown)
+ sis_shutdown_ctrl(ctrl_info);
+ pci_disable_device(ctrl_info->pci_dev);
+ dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
+ schedule_work(&ctrl_info->ctrl_offline_work);
+}
+
+static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
char *ctrl_description;
- if (id->driver_data) {
+ if (id->driver_data)
ctrl_description = (char *)id->driver_data;
- } else {
- switch (id->subvendor) {
- case PCI_VENDOR_ID_HP:
- ctrl_description = hpe_branded_controller;
- break;
- case PCI_VENDOR_ID_ADAPTEC2:
- default:
- ctrl_description = microsemi_branded_controller;
- break;
- }
- }
+ else
+ ctrl_description = "Microsemi Smart Family Controller";
- dev_info(&pdev->dev, "%s found\n", ctrl_description);
+ dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
}
-static int pqi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int pqi_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *id)
{
int rc;
int node;
struct pqi_ctrl_info *ctrl_info;
- pqi_print_ctrl_info(pdev, id);
+ pqi_print_ctrl_info(pci_dev, id);
if (pqi_disable_device_id_wildcards &&
id->subvendor == PCI_ANY_ID &&
id->subdevice == PCI_ANY_ID) {
- dev_warn(&pdev->dev,
+ dev_warn(&pci_dev->dev,
"controller not probed because device ID wildcards are disabled\n");
return -ENODEV;
}
if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
- dev_warn(&pdev->dev,
+ dev_warn(&pci_dev->dev,
"controller device ID matched using wildcards\n");
- node = dev_to_node(&pdev->dev);
+ node = dev_to_node(&pci_dev->dev);
if (node == NUMA_NO_NODE)
- set_dev_node(&pdev->dev, 0);
+ set_dev_node(&pci_dev->dev, 0);
ctrl_info = pqi_alloc_ctrl_info(node);
if (!ctrl_info) {
- dev_err(&pdev->dev,
+ dev_err(&pci_dev->dev,
"failed to allocate controller info block\n");
return -ENOMEM;
}
- ctrl_info->pci_dev = pdev;
+ ctrl_info->pci_dev = pci_dev;
rc = pqi_pci_init(ctrl_info);
if (rc)
@@ -5634,23 +6635,23 @@ error:
return rc;
}
-static void pqi_pci_remove(struct pci_dev *pdev)
+static void pqi_pci_remove(struct pci_dev *pci_dev)
{
struct pqi_ctrl_info *ctrl_info;
- ctrl_info = pci_get_drvdata(pdev);
+ ctrl_info = pci_get_drvdata(pci_dev);
if (!ctrl_info)
return;
pqi_remove_ctrl(ctrl_info);
}
-static void pqi_shutdown(struct pci_dev *pdev)
+static void pqi_shutdown(struct pci_dev *pci_dev)
{
int rc;
struct pqi_ctrl_info *ctrl_info;
- ctrl_info = pci_get_drvdata(pdev);
+ ctrl_info = pci_get_drvdata(pci_dev);
if (!ctrl_info)
goto error;
@@ -5663,115 +6664,284 @@ static void pqi_shutdown(struct pci_dev *pdev)
return;
error:
- dev_warn(&pdev->dev,
+ dev_warn(&pci_dev->dev,
"unable to flush controller cache\n");
}
+static void pqi_process_lockup_action_param(void)
+{
+ unsigned int i;
+
+ if (!pqi_lockup_action_param)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) {
+ if (strcmp(pqi_lockup_action_param,
+ pqi_lockup_actions[i].name) == 0) {
+ pqi_lockup_action = pqi_lockup_actions[i].action;
+ return;
+ }
+ }
+
+ pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
+ DRIVER_NAME_SHORT, pqi_lockup_action_param);
+}
+
+static void pqi_process_module_params(void)
+{
+ pqi_process_lockup_action_param();
+}
+
+static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
+{
+ struct pqi_ctrl_info *ctrl_info;
+
+ ctrl_info = pci_get_drvdata(pci_dev);
+
+ pqi_disable_events(ctrl_info);
+ pqi_cancel_update_time_worker(ctrl_info);
+ pqi_cancel_rescan_worker(ctrl_info);
+ pqi_wait_until_scan_finished(ctrl_info);
+ pqi_wait_until_lun_reset_finished(ctrl_info);
+ pqi_flush_cache(ctrl_info);
+ pqi_ctrl_block_requests(ctrl_info);
+ pqi_ctrl_wait_until_quiesced(ctrl_info);
+ pqi_wait_until_inbound_queues_empty(ctrl_info);
+ pqi_ctrl_wait_for_pending_io(ctrl_info);
+ pqi_stop_heartbeat_timer(ctrl_info);
+
+ if (state.event == PM_EVENT_FREEZE)
+ return 0;
+
+ pci_save_state(pci_dev);
+ pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
+
+ ctrl_info->controller_online = false;
+ ctrl_info->pqi_mode_enabled = false;
+
+ return 0;
+}
+
+static __maybe_unused int pqi_resume(struct pci_dev *pci_dev)
+{
+ int rc;
+ struct pqi_ctrl_info *ctrl_info;
+
+ ctrl_info = pci_get_drvdata(pci_dev);
+
+ if (pci_dev->current_state != PCI_D0) {
+ ctrl_info->max_hw_queue_index = 0;
+ pqi_free_interrupts(ctrl_info);
+ pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
+ rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
+ IRQF_SHARED, DRIVER_NAME_SHORT,
+ &ctrl_info->queue_groups[0]);
+ if (rc) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "irq %u init failed with error %d\n",
+ pci_dev->irq, rc);
+ return rc;
+ }
+ pqi_start_heartbeat_timer(ctrl_info);
+ pqi_ctrl_unblock_requests(ctrl_info);
+ return 0;
+ }
+
+ pci_set_power_state(pci_dev, PCI_D0);
+ pci_restore_state(pci_dev);
+
+ return pqi_ctrl_init_resume(ctrl_info);
+}
+
/* Define the PCI IDs for the controllers that we support. */
static const struct pci_device_id pqi_pci_id_table[] = {
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x152d, 0x8a22)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x152d, 0x8a23)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x152d, 0x8a24)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x152d, 0x8a36)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ 0x152d, 0x8a37)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x0110)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_HP, 0x0600)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0605)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_HP, 0x0601)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0800)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_HP, 0x0602)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0801)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_HP, 0x0603)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0802)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_HP, 0x0650)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0803)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_HP, 0x0651)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0804)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_HP, 0x0652)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0805)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_HP, 0x0653)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0806)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_HP, 0x0654)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0900)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_HP, 0x0655)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0901)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_HP, 0x0700)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0902)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_HP, 0x0701)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0903)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_ADAPTEC2, 0x0800)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0904)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_ADAPTEC2, 0x0801)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0905)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_ADAPTEC2, 0x0802)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0906)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_ADAPTEC2, 0x0803)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0907)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_ADAPTEC2, 0x0804)
+ PCI_VENDOR_ID_ADAPTEC2, 0x0908)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_ADAPTEC2, 0x0805)
+ PCI_VENDOR_ID_ADAPTEC2, 0x1200)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_ADAPTEC2, 0x0900)
+ PCI_VENDOR_ID_ADAPTEC2, 0x1201)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_ADAPTEC2, 0x0901)
+ PCI_VENDOR_ID_ADAPTEC2, 0x1202)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_ADAPTEC2, 0x0902)
+ PCI_VENDOR_ID_ADAPTEC2, 0x1280)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_ADAPTEC2, 0x0903)
+ PCI_VENDOR_ID_ADAPTEC2, 0x1281)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_ADAPTEC2, 0x0904)
+ PCI_VENDOR_ID_ADAPTEC2, 0x1300)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_ADAPTEC2, 0x0905)
+ PCI_VENDOR_ID_ADAPTEC2, 0x1301)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
- PCI_VENDOR_ID_ADAPTEC2, 0x0906)
+ PCI_VENDOR_ID_ADAPTEC2, 0x1380)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0600)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0601)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0602)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0603)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0604)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0606)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0650)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0651)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0652)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0653)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0654)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0655)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0656)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0657)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0700)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_HP, 0x0701)
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
@@ -5808,6 +6978,10 @@ static struct pci_driver pqi_pci_driver = {
.probe = pqi_pci_probe,
.remove = pqi_pci_remove,
.shutdown = pqi_shutdown,
+#if defined(CONFIG_PM)
+ .suspend = pqi_suspend,
+ .resume = pqi_resume,
+#endif
};
static int __init pqi_init(void)
@@ -5821,6 +6995,8 @@ static int __init pqi_init(void)
if (!pqi_sas_transport_template)
return -ENODEV;
+ pqi_process_module_params();
+
rc = pci_register_driver(&pqi_pci_driver);
if (rc)
sas_release_transport(pqi_sas_transport_template);
@@ -6173,6 +7349,9 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct pqi_event_config,
descriptors) != 4);
+ BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
+ ARRAY_SIZE(pqi_supported_event_types));
+
BUILD_BUG_ON(offsetof(struct pqi_event_response,
header.iu_type) != 0);
BUILD_BUG_ON(offsetof(struct pqi_event_response,
@@ -6246,6 +7425,22 @@ static void __attribute__((unused)) verify_structures(void)
BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
controller_mode) != 292);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
+ phys_bay_in_box) != 115);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
+ device_type) != 120);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
+ redundant_path_present_map) != 1736);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
+ active_path_number) != 1738);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
+ alternate_paths_phys_connector) != 1739);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
+ alternate_paths_phys_box_on_port) != 1755);
+ BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
+ current_queue_depth_limit) != 1796);
+ BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
+
BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
@@ -6260,4 +7455,6 @@ static void __attribute__((unused)) verify_structures(void)
PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
+ BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >=
+ PQI_MAX_OUTSTANDING_REQUESTS_KDUMP);
}
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index 52ca4f93f1b2..0d89d3728b43 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -1,6 +1,6 @@
/*
* driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2016 Microsemi Corporation
+ * Copyright (c) 2016-2017 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index 71408f9e8f75..e55dfcf200e5 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -1,6 +1,6 @@
/*
* driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2016 Microsemi Corporation
+ * Copyright (c) 2016-2017 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
* This program is free software; you can redistribute it and/or modify
@@ -33,7 +33,9 @@
/* for submission of legacy SIS commands */
#define SIS_REENABLE_SIS_MODE 0x1
#define SIS_ENABLE_MSIX 0x40
+#define SIS_ENABLE_INTX 0x80
#define SIS_SOFT_RESET 0x100
+#define SIS_TRIGGER_SHUTDOWN 0x800000
#define SIS_CMD_READY 0x200
#define SIS_CMD_COMPLETE 0x1000
#define SIS_CLEAR_CTRL_TO_HOST_DOORBELL 0x1000
@@ -55,6 +57,7 @@
#define SIS_CTRL_KERNEL_UP 0x80
#define SIS_CTRL_KERNEL_PANIC 0x100
#define SIS_CTRL_READY_TIMEOUT_SECS 30
+#define SIS_CTRL_READY_RESUME_TIMEOUT_SECS 90
#define SIS_CTRL_READY_POLL_INTERVAL_MSECS 10
#pragma pack(1)
@@ -78,12 +81,13 @@ struct sis_base_struct {
#pragma pack()
-int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info)
+static int sis_wait_for_ctrl_ready_with_timeout(struct pqi_ctrl_info *ctrl_info,
+ unsigned int timeout_secs)
{
unsigned long timeout;
u32 status;
- timeout = (SIS_CTRL_READY_TIMEOUT_SECS * HZ) + jiffies;
+ timeout = (timeout_secs * HZ) + jiffies;
while (1) {
status = readl(&ctrl_info->registers->sis_firmware_status);
@@ -98,14 +102,30 @@ int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info)
if (status & SIS_CTRL_KERNEL_UP)
break;
}
- if (time_after(jiffies, timeout))
+ if (time_after(jiffies, timeout)) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "controller not ready after %u seconds\n",
+ timeout_secs);
return -ETIMEDOUT;
+ }
msleep(SIS_CTRL_READY_POLL_INTERVAL_MSECS);
}
return 0;
}
+int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info)
+{
+ return sis_wait_for_ctrl_ready_with_timeout(ctrl_info,
+ SIS_CTRL_READY_TIMEOUT_SECS);
+}
+
+int sis_wait_for_ctrl_ready_resume(struct pqi_ctrl_info *ctrl_info)
+{
+ return sis_wait_for_ctrl_ready_with_timeout(ctrl_info,
+ SIS_CTRL_READY_RESUME_TIMEOUT_SECS);
+}
+
bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info)
{
bool running;
@@ -126,6 +146,12 @@ bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info)
return running;
}
+bool sis_is_kernel_up(struct pqi_ctrl_info *ctrl_info)
+{
+ return readl(&ctrl_info->registers->sis_firmware_status) &
+ SIS_CTRL_KERNEL_UP;
+}
+
/* used for passing command parameters/results when issuing SIS commands */
struct sis_sync_cmd_params {
u32 mailbox[6]; /* mailboxes 0-5 */
@@ -308,6 +334,34 @@ out:
return rc;
}
+#define SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS 30
+
+static void sis_wait_for_doorbell_bit_to_clear(
+ struct pqi_ctrl_info *ctrl_info, u32 bit)
+{
+ u32 doorbell_register;
+ unsigned long timeout;
+
+ timeout = (SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS * HZ) + jiffies;
+
+ while (1) {
+ doorbell_register =
+ readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell);
+ if ((doorbell_register & bit) == 0)
+ break;
+ if (readl(&ctrl_info->registers->sis_firmware_status) &
+ SIS_CTRL_KERNEL_PANIC)
+ break;
+ if (time_after(jiffies, timeout)) {
+ dev_err(&ctrl_info->pci_dev->dev,
+ "doorbell register bit 0x%x not cleared\n",
+ bit);
+ break;
+ }
+ usleep_range(1000, 2000);
+ }
+}
+
/* Enable MSI-X interrupts on the controller. */
void sis_enable_msix(struct pqi_ctrl_info *ctrl_info)
@@ -320,6 +374,8 @@ void sis_enable_msix(struct pqi_ctrl_info *ctrl_info)
writel(doorbell_register,
&ctrl_info->registers->sis_host_to_ctrl_doorbell);
+
+ sis_wait_for_doorbell_bit_to_clear(ctrl_info, SIS_ENABLE_MSIX);
}
/* Disable MSI-X interrupts on the controller. */
@@ -336,12 +392,48 @@ void sis_disable_msix(struct pqi_ctrl_info *ctrl_info)
&ctrl_info->registers->sis_host_to_ctrl_doorbell);
}
+void sis_enable_intx(struct pqi_ctrl_info *ctrl_info)
+{
+ u32 doorbell_register;
+
+ doorbell_register =
+ readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell);
+ doorbell_register |= SIS_ENABLE_INTX;
+
+ writel(doorbell_register,
+ &ctrl_info->registers->sis_host_to_ctrl_doorbell);
+
+ sis_wait_for_doorbell_bit_to_clear(ctrl_info, SIS_ENABLE_INTX);
+}
+
+void sis_disable_intx(struct pqi_ctrl_info *ctrl_info)
+{
+ u32 doorbell_register;
+
+ doorbell_register =
+ readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell);
+ doorbell_register &= ~SIS_ENABLE_INTX;
+
+ writel(doorbell_register,
+ &ctrl_info->registers->sis_host_to_ctrl_doorbell);
+}
+
void sis_soft_reset(struct pqi_ctrl_info *ctrl_info)
{
writel(SIS_SOFT_RESET,
&ctrl_info->registers->sis_host_to_ctrl_doorbell);
}
+void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info)
+{
+ if (readl(&ctrl_info->registers->sis_firmware_status) &
+ SIS_CTRL_KERNEL_PANIC)
+ return;
+
+ writel(SIS_TRIGGER_SHUTDOWN,
+ &ctrl_info->registers->sis_host_to_ctrl_doorbell);
+}
+
#define SIS_MODE_READY_TIMEOUT_SECS 30
int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info)
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
index bd6e7b08338e..983184b69373 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.h
+++ b/drivers/scsi/smartpqi/smartpqi_sis.h
@@ -1,6 +1,6 @@
/*
* driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2016 Microsemi Corporation
+ * Copyright (c) 2016-2017 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
* This program is free software; you can redistribute it and/or modify
@@ -20,13 +20,18 @@
#define _SMARTPQI_SIS_H
int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info);
+int sis_wait_for_ctrl_ready_resume(struct pqi_ctrl_info *ctrl_info);
bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info);
+bool sis_is_kernel_up(struct pqi_ctrl_info *ctrl_info);
int sis_get_ctrl_properties(struct pqi_ctrl_info *ctrl_info);
int sis_get_pqi_capabilities(struct pqi_ctrl_info *ctrl_info);
int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info);
void sis_enable_msix(struct pqi_ctrl_info *ctrl_info);
void sis_disable_msix(struct pqi_ctrl_info *ctrl_info);
+void sis_enable_intx(struct pqi_ctrl_info *ctrl_info);
+void sis_disable_intx(struct pqi_ctrl_info *ctrl_info);
void sis_soft_reset(struct pqi_ctrl_info *ctrl_info);
+void sis_shutdown_ctrl(struct pqi_ctrl_info *ctrl_info);
int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info);
void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value);
u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info);
diff --git a/drivers/scsi/snic/snic_isr.c b/drivers/scsi/snic/snic_isr.c
index d859501e4ccd..c4da3673f2ae 100644
--- a/drivers/scsi/snic/snic_isr.c
+++ b/drivers/scsi/snic/snic_isr.c
@@ -141,7 +141,7 @@ snic_request_intr(struct snic *snic)
snic->msix[i].devid);
if (ret) {
SNIC_HOST_ERR(snic->shost,
- "MSI-X: requrest_irq(%d) failed %d\n",
+ "MSI-X: request_irq(%d) failed %d\n",
i,
ret);
snic_free_intr(snic);
@@ -151,7 +151,7 @@ snic_request_intr(struct snic *snic)
}
return ret;
-} /* end of snic_requrest_intr */
+} /* end of snic_request_intr */
int
snic_set_intr_mode(struct snic *snic)
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
index da979a73baa0..d8a376b7882d 100644
--- a/drivers/scsi/snic/snic_scsi.c
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -359,8 +359,6 @@ snic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
SNIC_SCSI_DBG(shost, "sc %p Tag %d (sc %0x) lun %lld in snic_qcmd\n",
sc, snic_cmd_tag(sc), sc->cmnd[0], sc->device->lun);
- memset(scsi_cmd_priv(sc), 0, sizeof(struct snic_internal_io_state));
-
ret = snic_issue_scsi_req(snic, tgt, sc);
if (ret) {
SNIC_HOST_ERR(shost, "Failed to Q, Scsi Req w/ err %d.\n", ret);
@@ -1262,7 +1260,7 @@ snic_io_cmpl_handler(struct vnic_dev *vdev,
default:
SNIC_BUG_ON(1);
SNIC_SCSI_DBG(snic->shost,
- "Unknown Firmwqre completion request type %d\n",
+ "Unknown Firmware completion request type %d\n",
fwreq->hdr.type);
break;
}
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index ae966dc3bbc5..3cc8d67783a1 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1149,13 +1149,9 @@ static void storvsc_on_receive(struct storvsc_device *stor_device,
static void storvsc_on_channel_callback(void *context)
{
struct vmbus_channel *channel = (struct vmbus_channel *)context;
+ const struct vmpacket_descriptor *desc;
struct hv_device *device;
struct storvsc_device *stor_device;
- u32 bytes_recvd;
- u64 request_id;
- unsigned char packet[ALIGN(sizeof(struct vstor_packet), 8)];
- struct storvsc_cmd_request *request;
- int ret;
if (channel->primary_channel != NULL)
device = channel->primary_channel->device_obj;
@@ -1166,32 +1162,22 @@ static void storvsc_on_channel_callback(void *context)
if (!stor_device)
return;
- do {
- ret = vmbus_recvpacket(channel, packet,
- ALIGN((sizeof(struct vstor_packet) -
- vmscsi_size_delta), 8),
- &bytes_recvd, &request_id);
- if (ret == 0 && bytes_recvd > 0) {
-
- request = (struct storvsc_cmd_request *)
- (unsigned long)request_id;
-
- if ((request == &stor_device->init_request) ||
- (request == &stor_device->reset_request)) {
-
- memcpy(&request->vstor_packet, packet,
- (sizeof(struct vstor_packet) -
- vmscsi_size_delta));
- complete(&request->wait_event);
- } else {
- storvsc_on_receive(stor_device,
- (struct vstor_packet *)packet,
- request);
- }
+ foreach_vmbus_pkt(desc, channel) {
+ void *packet = hv_pkt_data(desc);
+ struct storvsc_cmd_request *request;
+
+ request = (struct storvsc_cmd_request *)
+ ((unsigned long)desc->trans_id);
+
+ if (request == &stor_device->init_request ||
+ request == &stor_device->reset_request) {
+ memcpy(&request->vstor_packet, packet,
+ (sizeof(struct vstor_packet) - vmscsi_size_delta));
+ complete(&request->wait_event);
} else {
- break;
+ storvsc_on_receive(stor_device, packet, request);
}
- } while (1);
+ }
}
static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size,
@@ -1220,13 +1206,13 @@ static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size,
static int storvsc_dev_remove(struct hv_device *device)
{
struct storvsc_device *stor_device;
- unsigned long flags;
stor_device = hv_get_drvdata(device);
- spin_lock_irqsave(&device->channel->inbound_lock, flags);
stor_device->destroy = true;
- spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
+
+ /* Make sure flag is set before waiting */
+ wmb();
/*
* At this point, all outbound traffic should be disable. We
@@ -1243,9 +1229,7 @@ static int storvsc_dev_remove(struct hv_device *device)
* we have drained - to drain the outgoing packets, we need to
* allow incoming packets.
*/
- spin_lock_irqsave(&device->channel->inbound_lock, flags);
hv_set_drvdata(device, NULL);
- spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
/* Close the channel */
vmbus_close(device->channel);
@@ -1511,6 +1495,10 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
*/
static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd)
{
+#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
+ if (scmnd->device->host->transportt == fc_transport_template)
+ return fc_eh_timed_out(scmnd);
+#endif
return BLK_EH_RESET_TIMER;
}
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index 7b6d4c2087d7..747ee64a78e1 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -566,6 +566,7 @@ static int esp_sbus_probe(struct platform_device *op)
struct device_node *dp = op->dev.of_node;
struct platform_device *dma_of = NULL;
int hme = 0;
+ int ret;
if (dp->parent &&
(!strcmp(dp->parent->name, "espdma") ||
@@ -580,7 +581,11 @@ static int esp_sbus_probe(struct platform_device *op)
if (!dma_of)
return -ENODEV;
- return esp_sbus_probe_one(op, dma_of, hme);
+ ret = esp_sbus_probe_one(op, dma_of, hme);
+ if (ret)
+ put_device(&dma_of->dev);
+
+ return ret;
}
static int esp_sbus_remove(struct platform_device *op)
@@ -613,6 +618,8 @@ static int esp_sbus_remove(struct platform_device *op)
dev_set_drvdata(&op->dev, NULL);
+ put_device(&dma_of->dev);
+
return 0;
}
diff --git a/drivers/scsi/ufs/tc-dwc-g210-pci.c b/drivers/scsi/ufs/tc-dwc-g210-pci.c
index c09a0fef0fe6..325d5e14fc0d 100644
--- a/drivers/scsi/ufs/tc-dwc-g210-pci.c
+++ b/drivers/scsi/ufs/tc-dwc-g210-pci.c
@@ -130,8 +130,6 @@ tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- INIT_LIST_HEAD(&hba->clk_list_head);
-
hba->vops = &tc_dwc_g210_pci_hba_vops;
err = ufshcd_init(hba, mmio_base, pdev->irq);
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index 52b546fb509b..925b0ec7ec54 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -37,7 +37,42 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
-#ifdef CONFIG_PM
+static int ufs_intel_disable_lcc(struct ufs_hba *hba)
+{
+ u32 attr = UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE);
+ u32 lcc_enable = 0;
+
+ ufshcd_dme_get(hba, attr, &lcc_enable);
+ if (lcc_enable)
+ ufshcd_dme_set(hba, attr, 0);
+
+ return 0;
+}
+
+static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ int err = 0;
+
+ switch (status) {
+ case PRE_CHANGE:
+ err = ufs_intel_disable_lcc(hba);
+ break;
+ case POST_CHANGE:
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
+ .name = "intel-pci",
+ .link_startup_notify = ufs_intel_link_startup_notify,
+};
+
+#ifdef CONFIG_PM_SLEEP
/**
* ufshcd_pci_suspend - suspend power management function
* @pdev: pointer to PCI device handle
@@ -62,7 +97,9 @@ static int ufshcd_pci_resume(struct device *dev)
{
return ufshcd_system_resume(dev_get_drvdata(dev));
}
+#endif /* !CONFIG_PM_SLEEP */
+#ifdef CONFIG_PM
static int ufshcd_pci_runtime_suspend(struct device *dev)
{
return ufshcd_runtime_suspend(dev_get_drvdata(dev));
@@ -75,13 +112,7 @@ static int ufshcd_pci_runtime_idle(struct device *dev)
{
return ufshcd_runtime_idle(dev_get_drvdata(dev));
}
-#else /* !CONFIG_PM */
-#define ufshcd_pci_suspend NULL
-#define ufshcd_pci_resume NULL
-#define ufshcd_pci_runtime_suspend NULL
-#define ufshcd_pci_runtime_resume NULL
-#define ufshcd_pci_runtime_idle NULL
-#endif /* CONFIG_PM */
+#endif /* !CONFIG_PM */
/**
* ufshcd_pci_shutdown - main function to put the controller in reset state
@@ -143,7 +174,7 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- INIT_LIST_HEAD(&hba->clk_list_head);
+ hba->vops = (struct ufs_hba_variant_ops *)id->driver_data;
err = ufshcd_init(hba, mmio_base, pdev->irq);
if (err) {
@@ -160,15 +191,16 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
static const struct dev_pm_ops ufshcd_pci_pm_ops = {
- .suspend = ufshcd_pci_suspend,
- .resume = ufshcd_pci_resume,
- .runtime_suspend = ufshcd_pci_runtime_suspend,
- .runtime_resume = ufshcd_pci_runtime_resume,
- .runtime_idle = ufshcd_pci_runtime_idle,
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_pci_suspend,
+ ufshcd_pci_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend,
+ ufshcd_pci_runtime_resume,
+ ufshcd_pci_runtime_idle)
};
static const struct pci_device_id ufshcd_pci_tbl[] = {
{ PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
{ } /* terminate list */
};
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 8e5e6c04c035..e82bde077296 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -58,8 +58,6 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
if (!np)
goto out;
- INIT_LIST_HEAD(&hba->clk_list_head);
-
cnt = of_property_count_strings(np, "clock-names");
if (!cnt || (cnt == -EINVAL)) {
dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index ffe8d8608818..5bc9dc14e075 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -314,7 +314,7 @@ static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
- if (!head || list_empty(head))
+ if (list_empty(head))
return;
list_for_each_entry(clki, head, list) {
@@ -869,7 +869,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
ktime_t start = ktime_get();
bool clk_state_changed = false;
- if (!head || list_empty(head))
+ if (list_empty(head))
goto out;
ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
@@ -943,7 +943,7 @@ static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
- if (!head || list_empty(head))
+ if (list_empty(head))
return false;
list_for_each_entry(clki, head, list) {
@@ -5809,7 +5809,8 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
do {
spin_lock_irqsave(hba->host->host_lock, flags);
if (!(work_pending(&hba->eh_work) ||
- hba->ufshcd_state == UFSHCD_STATE_RESET))
+ hba->ufshcd_state == UFSHCD_STATE_RESET ||
+ hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
break;
spin_unlock_irqrestore(hba->host->host_lock, flags);
dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
@@ -6752,7 +6753,7 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
ktime_t start = ktime_get();
bool clk_state_changed = false;
- if (!head || list_empty(head))
+ if (list_empty(head))
goto out;
ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
@@ -6818,7 +6819,7 @@ static int ufshcd_init_clocks(struct ufs_hba *hba)
struct device *dev = hba->dev;
struct list_head *head = &hba->clk_list_head;
- if (!head || list_empty(head))
+ if (list_empty(head))
goto out;
list_for_each_entry(clki, head, list) {
@@ -7811,6 +7812,8 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
hba->dev = dev;
*hba_handle = hba;
+ INIT_LIST_HEAD(&hba->clk_list_head);
+
out_error:
return err;
}
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index f8dbfeee6c63..8b93197daefe 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -547,7 +547,6 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
dev_dbg(&sc->device->sdev_gendev,
"cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
- memset(cmd, 0, sizeof(*cmd));
cmd->sc = sc;
BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
@@ -796,6 +795,16 @@ static int virtscsi_map_queues(struct Scsi_Host *shost)
return blk_mq_virtio_map_queues(&shost->tag_set, vscsi->vdev, 2);
}
+/*
+ * The host guarantees to respond to each command, although I/O
+ * latencies might be higher than on bare metal. Reset the timer
+ * unconditionally to give the host a chance to perform EH.
+ */
+static enum blk_eh_timer_return virtscsi_eh_timed_out(struct scsi_cmnd *scmnd)
+{
+ return BLK_EH_RESET_TIMER;
+}
+
static struct scsi_host_template virtscsi_host_template_single = {
.module = THIS_MODULE,
.name = "Virtio SCSI HBA",
@@ -806,6 +815,7 @@ static struct scsi_host_template virtscsi_host_template_single = {
.change_queue_depth = virtscsi_change_queue_depth,
.eh_abort_handler = virtscsi_abort,
.eh_device_reset_handler = virtscsi_device_reset,
+ .eh_timed_out = virtscsi_eh_timed_out,
.slave_alloc = virtscsi_device_alloc,
.can_queue = 1024,
@@ -826,6 +836,7 @@ static struct scsi_host_template virtscsi_host_template_multi = {
.change_queue_depth = virtscsi_change_queue_depth,
.eh_abort_handler = virtscsi_abort,
.eh_device_reset_handler = virtscsi_device_reset,
+ .eh_timed_out = virtscsi_eh_timed_out,
.can_queue = 1024,
.dma_boundary = UINT_MAX,
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index a6a8b60d4902..36f59a1be7e9 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -534,7 +534,6 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
int err;
sc->result = 0;
- memset(shadow, 0, sizeof(*shadow));
shadow->sc = sc;
shadow->act = VSCSIIF_ACT_SCSI_CDB;
diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c
index 35bbe288ddb4..a638c3048207 100644
--- a/drivers/sh/intc/virq.c
+++ b/drivers/sh/intc/virq.c
@@ -94,10 +94,8 @@ static int add_virq_to_pirq(unsigned int irq, unsigned int virq)
}
entry = kzalloc(sizeof(struct intc_virq_list), GFP_ATOMIC);
- if (!entry) {
- pr_err("can't allocate VIRQ mapping for %d\n", virq);
+ if (!entry)
return -ENOMEM;
- }
entry->irq = virq;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index d4d2d8d9f3e7..cda10719d1d1 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -253,10 +253,6 @@ static int spidev_message(struct spidev_data *spidev,
goto done;
}
k_tmp->rx_buf = rx_buf;
- if (!access_ok(VERIFY_WRITE, (u8 __user *)
- (uintptr_t) u_tmp->rx_buf,
- u_tmp->len))
- goto done;
rx_buf += k_tmp->len;
}
if (u_tmp->tx_buf) {
@@ -304,7 +300,7 @@ static int spidev_message(struct spidev_data *spidev,
rx_buf = spidev->rx_buffer;
for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
if (u_tmp->rx_buf) {
- if (__copy_to_user((u8 __user *)
+ if (copy_to_user((u8 __user *)
(uintptr_t) u_tmp->rx_buf, rx_buf,
u_tmp->len)) {
status = -EFAULT;
@@ -346,7 +342,6 @@ spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc,
static long
spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
- int err = 0;
int retval = 0;
struct spidev_data *spidev;
struct spi_device *spi;
@@ -358,19 +353,6 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC)
return -ENOTTY;
- /* Check access direction once here; don't repeat below.
- * IOC_DIR is from the user perspective, while access_ok is
- * from the kernel perspective; so they look reversed.
- */
- if (_IOC_DIR(cmd) & _IOC_READ)
- err = !access_ok(VERIFY_WRITE,
- (void __user *)arg, _IOC_SIZE(cmd));
- if (err == 0 && _IOC_DIR(cmd) & _IOC_WRITE)
- err = !access_ok(VERIFY_READ,
- (void __user *)arg, _IOC_SIZE(cmd));
- if (err)
- return -EFAULT;
-
/* guard against device removal before, or while,
* we issue this ioctl.
*/
@@ -393,31 +375,31 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
switch (cmd) {
/* read requests */
case SPI_IOC_RD_MODE:
- retval = __put_user(spi->mode & SPI_MODE_MASK,
+ retval = put_user(spi->mode & SPI_MODE_MASK,
(__u8 __user *)arg);
break;
case SPI_IOC_RD_MODE32:
- retval = __put_user(spi->mode & SPI_MODE_MASK,
+ retval = put_user(spi->mode & SPI_MODE_MASK,
(__u32 __user *)arg);
break;
case SPI_IOC_RD_LSB_FIRST:
- retval = __put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0,
+ retval = put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0,
(__u8 __user *)arg);
break;
case SPI_IOC_RD_BITS_PER_WORD:
- retval = __put_user(spi->bits_per_word, (__u8 __user *)arg);
+ retval = put_user(spi->bits_per_word, (__u8 __user *)arg);
break;
case SPI_IOC_RD_MAX_SPEED_HZ:
- retval = __put_user(spidev->speed_hz, (__u32 __user *)arg);
+ retval = put_user(spidev->speed_hz, (__u32 __user *)arg);
break;
/* write requests */
case SPI_IOC_WR_MODE:
case SPI_IOC_WR_MODE32:
if (cmd == SPI_IOC_WR_MODE)
- retval = __get_user(tmp, (u8 __user *)arg);
+ retval = get_user(tmp, (u8 __user *)arg);
else
- retval = __get_user(tmp, (u32 __user *)arg);
+ retval = get_user(tmp, (u32 __user *)arg);
if (retval == 0) {
u32 save = spi->mode;
@@ -436,7 +418,7 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
break;
case SPI_IOC_WR_LSB_FIRST:
- retval = __get_user(tmp, (__u8 __user *)arg);
+ retval = get_user(tmp, (__u8 __user *)arg);
if (retval == 0) {
u32 save = spi->mode;
@@ -453,7 +435,7 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
break;
case SPI_IOC_WR_BITS_PER_WORD:
- retval = __get_user(tmp, (__u8 __user *)arg);
+ retval = get_user(tmp, (__u8 __user *)arg);
if (retval == 0) {
u8 save = spi->bits_per_word;
@@ -466,7 +448,7 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
break;
case SPI_IOC_WR_MAX_SPEED_HZ:
- retval = __get_user(tmp, (__u32 __user *)arg);
+ retval = get_user(tmp, (__u32 __user *)arg);
if (retval == 0) {
u32 save = spi->max_speed_hz;
@@ -516,8 +498,6 @@ spidev_compat_ioc_message(struct file *filp, unsigned int cmd,
struct spi_ioc_transfer *ioc;
u_ioc = (struct spi_ioc_transfer __user *) compat_ptr(arg);
- if (!access_ok(VERIFY_READ, u_ioc, _IOC_SIZE(cmd)))
- return -EFAULT;
/* guard against device removal before, or while,
* we issue this ioctl.
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index dbda4d9a08e7..f8c25ee082ef 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -27,6 +27,8 @@ source "drivers/staging/media/cxd2099/Kconfig"
source "drivers/staging/media/davinci_vpfe/Kconfig"
+source "drivers/staging/media/imx/Kconfig"
+
source "drivers/staging/media/omap4iss/Kconfig"
# Keep LIRC at the end, as it has sub-menus
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index c04600c81264..ac090c5fce30 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_I2C_BCM2048) += bcm2048/
obj-$(CONFIG_DVB_CXD2099) += cxd2099/
+obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx/
obj-$(CONFIG_LIRC_STAGING) += lirc/
obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/
obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
diff --git a/drivers/staging/media/atomisp/i2c/Makefile b/drivers/staging/media/atomisp/i2c/Makefile
index 466517c7c8e6..be13fab92175 100644
--- a/drivers/staging/media/atomisp/i2c/Makefile
+++ b/drivers/staging/media/atomisp/i2c/Makefile
@@ -19,3 +19,9 @@ obj-$(CONFIG_VIDEO_AP1302) += ap1302.o
obj-$(CONFIG_VIDEO_LM3554) += lm3554.o
+# HACK! While this driver is in bad shape, don't enable several warnings
+# that would be otherwise enabled with W=1
+ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
+ccflags-y += $(call cc-disable-warning, unused-const-variable)
+ccflags-y += $(call cc-disable-warning, missing-prototypes)
+ccflags-y += $(call cc-disable-warning, missing-declarations)
diff --git a/drivers/staging/media/atomisp/i2c/gc0310.c b/drivers/staging/media/atomisp/i2c/gc0310.c
index 1ec616a15086..350fd7fd5b86 100644
--- a/drivers/staging/media/atomisp/i2c/gc0310.c
+++ b/drivers/staging/media/atomisp/i2c/gc0310.c
@@ -1455,6 +1455,7 @@ out_free:
static struct acpi_device_id gc0310_acpi_match[] = {
{"XXGC0310"},
+ {"INT0310"},
{},
};
diff --git a/drivers/staging/media/atomisp/i2c/imx/Makefile b/drivers/staging/media/atomisp/i2c/imx/Makefile
index 6b13a3a66e49..b6578f09546e 100644
--- a/drivers/staging/media/atomisp/i2c/imx/Makefile
+++ b/drivers/staging/media/atomisp/i2c/imx/Makefile
@@ -4,3 +4,10 @@ imx1x5-objs := imx.o drv201.o ad5816g.o dw9714.o dw9719.o dw9718.o vcm.o otp.o o
ov8858_driver-objs := ../ov8858.o dw9718.o vcm.o
obj-$(CONFIG_VIDEO_OV8858) += ov8858_driver.o
+
+# HACK! While this driver is in bad shape, don't enable several warnings
+# that would be otherwise enabled with W=1
+ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
+ccflags-y += $(call cc-disable-warning, unused-const-variable)
+ccflags-y += $(call cc-disable-warning, missing-prototypes)
+ccflags-y += $(call cc-disable-warning, missing-declarations)
diff --git a/drivers/staging/media/atomisp/i2c/lm3554.c b/drivers/staging/media/atomisp/i2c/lm3554.c
index dd9c9c3ffff7..2b170c07aaba 100644
--- a/drivers/staging/media/atomisp/i2c/lm3554.c
+++ b/drivers/staging/media/atomisp/i2c/lm3554.c
@@ -497,7 +497,7 @@ static const struct v4l2_ctrl_ops ctrl_ops = {
.g_volatile_ctrl = lm3554_g_volatile_ctrl
};
-struct v4l2_ctrl_config lm3554_controls[] = {
+static const struct v4l2_ctrl_config lm3554_controls[] = {
{
.ops = &ctrl_ops,
.id = V4L2_CID_FLASH_TIMEOUT,
@@ -825,7 +825,7 @@ static int lm3554_gpio_uninit(struct i2c_client *client)
return 0;
}
-void *lm3554_platform_data_func(struct i2c_client *client)
+static void *lm3554_platform_data_func(struct i2c_client *client)
{
static struct lm3554_platform_data platform_data;
diff --git a/drivers/staging/media/atomisp/i2c/mt9m114.c b/drivers/staging/media/atomisp/i2c/mt9m114.c
index ced175c268d1..3fa915313e53 100644
--- a/drivers/staging/media/atomisp/i2c/mt9m114.c
+++ b/drivers/staging/media/atomisp/i2c/mt9m114.c
@@ -1499,7 +1499,7 @@ static struct v4l2_ctrl_config mt9m114_controls[] = {
.type = V4L2_CTRL_TYPE_MENU,
.min = 0,
.max = 3,
- .step = 1,
+ .step = 0,
.def = 1,
.flags = 0,
},
diff --git a/drivers/staging/media/atomisp/i2c/ov2680.c b/drivers/staging/media/atomisp/i2c/ov2680.c
index 566091035c64..3cabfe54c669 100644
--- a/drivers/staging/media/atomisp/i2c/ov2680.c
+++ b/drivers/staging/media/atomisp/i2c/ov2680.c
@@ -885,11 +885,12 @@ static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
if (flag) {
ret = dev->platform_data->gpio0_ctrl(sd, 1);
usleep_range(10000, 15000);
- ret |= dev->platform_data->gpio1_ctrl(sd, 1);
+ /* Ignore return from second gpio, it may not be there */
+ dev->platform_data->gpio1_ctrl(sd, 1);
usleep_range(10000, 15000);
} else {
- ret = dev->platform_data->gpio1_ctrl(sd, 0);
- ret |= dev->platform_data->gpio0_ctrl(sd, 0);
+ dev->platform_data->gpio1_ctrl(sd, 0);
+ ret = dev->platform_data->gpio0_ctrl(sd, 0);
}
return ret;
}
@@ -1190,9 +1191,8 @@ static int ov2680_detect(struct i2c_client *client)
OV2680_SC_CMMN_SUB_ID, &high);
revision = (u8) high & 0x0f;
- dev_err(&client->dev, "sensor_revision id = 0x%x\n", id);
- dev_err(&client->dev, "detect ov2680 success\n");
- dev_err(&client->dev, "################5##########\n");
+ dev_info(&client->dev, "sensor_revision id = 0x%x\n", id);
+
return 0;
}
@@ -1447,8 +1447,6 @@ static int ov2680_probe(struct i2c_client *client,
void *pdata;
unsigned int i;
- printk("++++ov2680_probe++++\n");
- dev_info(&client->dev, "++++ov2680_probe++++\n");
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
dev_err(&client->dev, "out of memory\n");
@@ -1521,6 +1519,7 @@ out_free:
static struct acpi_device_id ov2680_acpi_match[] = {
{"XXOV2680"},
+ {"OVTI2680"},
{},
};
MODULE_DEVICE_TABLE(acpi, ov2680_acpi_match);
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/Makefile b/drivers/staging/media/atomisp/i2c/ov5693/Makefile
index c9c0e1245858..4e3833aaec05 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/Makefile
+++ b/drivers/staging/media/atomisp/i2c/ov5693/Makefile
@@ -1 +1,8 @@
obj-$(CONFIG_VIDEO_OV5693) += ov5693.o
+
+# HACK! While this driver is in bad shape, don't enable several warnings
+# that would be otherwise enabled with W=1
+ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
+ccflags-y += $(call cc-disable-warning, unused-const-variable)
+ccflags-y += $(call cc-disable-warning, missing-prototypes)
+ccflags-y += $(call cc-disable-warning, missing-declarations)
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.c b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.c
index 5e9dafe7cc32..d6447398f5ef 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.c
+++ b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.c
@@ -706,7 +706,7 @@ static int ov5693_read_otp_reg_array(struct i2c_client *client, u16 size,
{
u16 index;
int ret;
- u16 *pVal = 0;
+ u16 *pVal = NULL;
for (index = 0; index <= size; index++) {
pVal = (u16 *) (buf + index);
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/Makefile b/drivers/staging/media/atomisp/pci/atomisp2/Makefile
index f126a89a08e9..726eaa293c55 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/Makefile
+++ b/drivers/staging/media/atomisp/pci/atomisp2/Makefile
@@ -108,7 +108,6 @@ atomisp-objs += \
css2400/sh_css_metadata.o \
css2400/base/refcount/src/refcount.o \
css2400/base/circbuf/src/circbuf.o \
- css2400/sh_css_irq.o \
css2400/camera/pipe/src/pipe_binarydesc.o \
css2400/camera/pipe/src/pipe_util.o \
css2400/camera/pipe/src/pipe_stagedesc.o \
@@ -353,3 +352,9 @@ DEFINES += -DSYSTEM_hive_isp_css_2400_system -DISP2400
ccflags-y += $(INCLUDES) $(DEFINES) -fno-common
+# HACK! While this driver is in bad shape, don't enable several warnings
+# that would be otherwise enabled with W=1
+ccflags-y += -Wno-unused-const-variable -Wno-missing-prototypes \
+ -Wno-unused-but-set-variable -Wno-missing-declarations \
+ -Wno-suggest-attribute=format -Wno-missing-prototypes \
+ -Wno-implicit-fallthrough
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c
index b830b241e2e6..ad2c610d2ce3 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c
@@ -2506,7 +2506,6 @@ static void __configure_capture_pp_input(struct atomisp_sub_device *asd,
struct ia_css_pipe_extra_config *pipe_extra_configs =
&stream_env->pipe_extra_configs[pipe_id];
unsigned int hor_ds_factor = 0, ver_ds_factor = 0;
-#define CEIL_DIV(a, b) ((b) ? ((a) + (b) - 1) / (b) : 0)
if (width == 0 && height == 0)
return;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
index 7ce8803cf6f9..c151c848cf8f 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
@@ -130,9 +130,9 @@ static int atomisp_q_one_metadata_buffer(struct atomisp_sub_device *asd,
return 0;
}
-int atomisp_q_one_s3a_buffer(struct atomisp_sub_device *asd,
- enum atomisp_input_stream_id stream_id,
- enum atomisp_css_pipe_id css_pipe_id)
+static int atomisp_q_one_s3a_buffer(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id css_pipe_id)
{
struct atomisp_s3a_buf *s3a_buf;
struct list_head *s3a_list;
@@ -172,9 +172,9 @@ int atomisp_q_one_s3a_buffer(struct atomisp_sub_device *asd,
return 0;
}
-int atomisp_q_one_dis_buffer(struct atomisp_sub_device *asd,
- enum atomisp_input_stream_id stream_id,
- enum atomisp_css_pipe_id css_pipe_id)
+static int atomisp_q_one_dis_buffer(struct atomisp_sub_device *asd,
+ enum atomisp_input_stream_id stream_id,
+ enum atomisp_css_pipe_id css_pipe_id)
{
struct atomisp_dis_buf *dis_buf;
unsigned long irqflags;
@@ -744,7 +744,7 @@ static void atomisp_subdev_init_struct(struct atomisp_sub_device *asd)
/*
* file operation functions
*/
-unsigned int atomisp_subdev_users(struct atomisp_sub_device *asd)
+static unsigned int atomisp_subdev_users(struct atomisp_sub_device *asd)
{
return asd->video_out_preview.users +
asd->video_out_vf.users +
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c
index 6064bb823a47..aa0526ebaff1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c
@@ -683,7 +683,7 @@ static int atomisp_s_input(struct file *file, void *fh, unsigned int input)
int ret;
rt_mutex_lock(&isp->mutex);
- if (input >= ATOM_ISP_MAX_INPUTS || input > isp->input_cnt) {
+ if (input >= ATOM_ISP_MAX_INPUTS || input >= isp->input_cnt) {
dev_dbg(isp->dev, "input_cnt: %d\n", isp->input_cnt);
ret = -EINVAL;
goto error;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c
index 996d1bdebad4..48b96048cab4 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_tpg.c
@@ -56,6 +56,7 @@ static int tpg_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
+
if (format->pad)
return -EINVAL;
/* only raw8 grbg is supported by TPG */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c
index e3fdbdba0b34..a543def739fc 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c
@@ -51,12 +51,12 @@
/* G-Min addition: pull this in from intel_mid_pm.h */
#define CSTATE_EXIT_LATENCY_C1 1
-static uint skip_fwload = 0;
+static uint skip_fwload;
module_param(skip_fwload, uint, 0644);
MODULE_PARM_DESC(skip_fwload, "Skip atomisp firmware load");
/* set reserved memory pool size in page */
-unsigned int repool_pgnr;
+static unsigned int repool_pgnr;
module_param(repool_pgnr, uint, 0644);
MODULE_PARM_DESC(repool_pgnr,
"Set the reserved memory pool size in page (default:0)");
@@ -384,7 +384,7 @@ done:
* WA for DDR DVFS enable/disable
* By default, ISP will force DDR DVFS 1600MHz before disable DVFS
*/
-void punit_ddr_dvfs_enable(bool enable)
+static void punit_ddr_dvfs_enable(bool enable)
{
int reg = intel_mid_msgbus_read32(PUNIT_PORT, MRFLD_ISPSSDVFS);
int door_bell = 1 << 8;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/Makefile b/drivers/staging/media/atomisp/pci/atomisp2/css2400/Makefile
index 04defaafa02c..ee5631b0e635 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/Makefile
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/Makefile
@@ -1,4 +1,2 @@
ccflags-y += -DISP2400B0
ISP2400B0 := y
-
-include $(srctree)/$(src)/../Makefile.common
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h
index 48d84bc0ad9e..f74b405b0f39 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/math_support.h
@@ -62,15 +62,15 @@
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#ifdef ISP2401
-#define ROUND_DIV(a, b) ((b) ? ((a) + ((b) >> 1)) / (b) : 0)
+#define ROUND_DIV(a, b) (((b) != 0) ? ((a) + ((b) >> 1)) / (b) : 0)
#endif
-#define CEIL_DIV(a, b) ((b) ? ((a) + (b) - 1) / (b) : 0)
+#define CEIL_DIV(a, b) (((b) != 0) ? ((a) + (b) - 1) / (b) : 0)
#define CEIL_MUL(a, b) (CEIL_DIV(a, b) * (b))
#define CEIL_MUL2(a, b) (((a) + (b) - 1) & ~((b) - 1))
#define CEIL_SHIFT(a, b) (((a) + (1 << (b)) - 1)>>(b))
#define CEIL_SHIFT_MUL(a, b) (CEIL_SHIFT(a, b) << (b))
#ifdef ISP2401
-#define ROUND_HALF_DOWN_DIV(a, b) ((b) ? ((a) + (b / 2) - 1) / (b) : 0)
+#define ROUND_HALF_DOWN_DIV(a, b) (((b) != 0) ? ((a) + (b / 2) - 1) / (b) : 0)
#define ROUND_HALF_DOWN_MUL(a, b) (ROUND_HALF_DOWN_DIV(a, b) * (b))
#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h
index 568631698a3d..c53241a7a281 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/hive_isp_css_include/string_support.h
@@ -72,9 +72,8 @@ static size_t strnlen_s(
return 0;
}
- for (ix=0;
- ((src_str[ix] != '\0') && (ix< max_len));
- ++ix) /*Nothing else to do*/;
+ for (ix = 0; ix < max_len && src_str[ix] != '\0'; ix++)
+ ;
/* On Error, it will return src_size == max_len*/
return ix;
@@ -118,7 +117,7 @@ STORAGE_CLASS_INLINE int strncpy_s(
/* dest_str is big enough for the len */
strncpy(dest_str, src_str, len);
- dest_str[len+1] = '\0';
+ dest_str[len] = '\0';
return 0;
}
@@ -158,7 +157,7 @@ STORAGE_CLASS_INLINE int strcpy_s(
/* dest_str is big enough for the len */
strncpy(dest_str, src_str, len);
- dest_str[len+1] = '\0';
+ dest_str[len] = '\0';
return 0;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu_private.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu_private.h
index 7c8500903b5c..1021e4f380a5 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu_private.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/ia_css_mmu_private.h
@@ -1,4 +1,3 @@
-#ifdef ISP2401
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
@@ -28,4 +27,3 @@ void
sh_css_mmu_set_page_table_base_index(hrt_data base_index);
#endif /* __IA_CSS_MMU_PRIVATE_H */
-#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c
index 0daab1176865..9478c12abe89 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c
@@ -265,9 +265,9 @@ ia_css_translate_dvs_statistics(
assert(isp_stats->hor_proj != NULL);
assert(isp_stats->ver_proj != NULL);
- IA_CSS_ENTER("hproj=%p, vproj=%p, haddr=%x, vaddr=%x",
- host_stats->hor_proj, host_stats->ver_proj,
- isp_stats->hor_proj, isp_stats->ver_proj);
+ IA_CSS_ENTER("hproj=%p, vproj=%p, haddr=%p, vaddr=%p",
+ host_stats->hor_proj, host_stats->ver_proj,
+ isp_stats->hor_proj, isp_stats->ver_proj);
hor_num_isp = host_stats->grid.aligned_height;
ver_num_isp = host_stats->grid.aligned_width;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c
index 5a0c103e9eb7..9bccb6473154 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c
@@ -213,7 +213,7 @@ ia_css_translate_dvs2_statistics(
"hor_coefs.even_real=%p, hor_coefs.even_imag=%p, "
"ver_coefs.odd_real=%p, ver_coefs.odd_imag=%p, "
"ver_coefs.even_real=%p, ver_coefs.even_imag=%p, "
- "haddr=%x, vaddr=%x",
+ "haddr=%p, vaddr=%p",
host_stats->hor_prod.odd_real, host_stats->hor_prod.odd_imag,
host_stats->hor_prod.even_real, host_stats->hor_prod.even_imag,
host_stats->ver_prod.odd_real, host_stats->ver_prod.odd_imag,
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c
index 804c19ab4485..222a7bd7f176 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c
@@ -55,7 +55,7 @@ ia_css_tnr_dump(
"tnr_coef", tnr->coef);
ia_css_debug_dtrace(level, "\t%-32s = %d\n",
"tnr_threshold_Y", tnr->threshold_Y);
- ia_css_debug_dtrace(level, "\t%-32s = %d\n"
+ ia_css_debug_dtrace(level, "\t%-32s = %d\n",
"tnr_threshold_C", tnr->threshold_C);
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_const.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_const.h
index 005eaaa9eb6c..2f215dc2ac32 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_const.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_const.h
@@ -398,17 +398,6 @@ more details.
* so the calc for the output buffer vmem size is:
* ((width[vectors]/num_of_stripes) + 2[vectors])
*/
-#if defined(HAS_RES_MGR)
-#define MAX_VECTORS_PER_OUTPUT_LINE \
- (CEIL_DIV(CEIL_DIV(ISP_MAX_OUTPUT_WIDTH, ISP_NUM_STRIPES) + ISP_LEFT_PADDING, ISP_VEC_NELEMS) + \
- ITERATOR_VECTOR_INCREMENT)
-
-#define MAX_VECTORS_PER_INPUT_LINE CEIL_DIV(ISP_MAX_INPUT_WIDTH, ISP_VEC_NELEMS)
-#define MAX_VECTORS_PER_INPUT_STRIPE (CEIL_ROUND_DIV_STRIPE(CEIL_DIV(ISP_MAX_INPUT_WIDTH, ISP_VEC_NELEMS) , \
- ISP_NUM_STRIPES, \
- ISP_LEFT_PADDING_VECS) + \
- ITERATOR_VECTOR_INCREMENT)
-#else /* !defined(HAS_RES_MGR)*/
#define MAX_VECTORS_PER_OUTPUT_LINE \
CEIL_DIV(CEIL_DIV(ISP_MAX_OUTPUT_WIDTH, ISP_NUM_STRIPES) + ISP_LEFT_PADDING, ISP_VEC_NELEMS)
@@ -417,7 +406,6 @@ more details.
#define MAX_VECTORS_PER_INPUT_STRIPE CEIL_ROUND_DIV_STRIPE(MAX_VECTORS_PER_INPUT_LINE, \
ISP_NUM_STRIPES, \
ISP_LEFT_PADDING_VECS)
-#endif /* HAS_RES_MGR */
/* Add 2 for left croppping */
@@ -470,15 +458,11 @@ more details.
#define RAW_BUF_LINES ((ENABLE_RAW_BINNING || ENABLE_FIXED_BAYER_DS) ? 4 : 2)
-#if defined(HAS_RES_MGR)
-#define RAW_BUF_STRIDE (MAX_VECTORS_PER_INPUT_STRIPE)
-#else /* !defined(HAS_RES_MGR) */
#define RAW_BUF_STRIDE \
(BINARY_ID == SH_CSS_BINARY_ID_POST_ISP ? MAX_VECTORS_PER_INPUT_CHUNK : \
ISP_NUM_STRIPES > 1 ? MAX_VECTORS_PER_INPUT_STRIPE+_ISP_EXTRA_PADDING_VECS : \
!ENABLE_CONTINUOUS ? MAX_VECTORS_PER_INPUT_LINE : \
MAX_VECTORS_PER_INPUT_CHUNK)
-#endif /* HAS_RES_MGR */
/* [isp vmem] table size[vectors] per line per color (GR,R,B,GB),
multiples of NWAY */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_exprs.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_exprs.h
index 8b59a8caec52..e625ba62cc15 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_exprs.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/isp/modes/interface/isp_exprs.h
@@ -214,24 +214,6 @@ more details.
/******* STRIPING-RELATED MACROS *******/
#define NO_STRIPING (ISP_NUM_STRIPES == 1)
-#if defined(HAS_RES_MGR)
-
-#define ISP_OUTPUT_CHUNK_VECS ISP_INTERNAL_WIDTH_VECS
-
-#if defined(__ISP)
-#define VECTORS_PER_LINE ISP_INTERNAL_WIDTH_VECS
-#else
-#define VECTORS_PER_LINE \
- (NO_STRIPING ? ISP_INTERNAL_WIDTH_VECS \
- : ISP_IO_STRIPE_WIDTH_VECS(ISP_INTERNAL_WIDTH_VECS, ISP_LEFT_PADDING_VECS, ISP_NUM_STRIPES, ISP_MIN_STRIPE_WIDTH) )
-#endif
-
-#define VECTORS_PER_INPUT_LINE \
- (NO_STRIPING ? ISP_INPUT_WIDTH_VECS \
- : ISP_IO_STRIPE_WIDTH_VECS(ISP_INPUT_WIDTH_VECS, ISP_LEFT_PADDING_VECS, ISP_NUM_STRIPES, ISP_MIN_STRIPE_WIDTH) )
-
-#else
-
#define ISP_OUTPUT_CHUNK_VECS \
(NO_STRIPING ? CEIL_DIV_CHUNKS(ISP_OUTPUT_VECS_EXTRA_CROP, OUTPUT_NUM_CHUNKS) \
: ISP_IO_STRIPE_WIDTH_VECS(ISP_OUTPUT_VECS_EXTRA_CROP, ISP_LEFT_PADDING_VECS, ISP_NUM_STRIPES, ISP_MIN_STRIPE_WIDTH) )
@@ -244,7 +226,6 @@ more details.
(NO_STRIPING ? ISP_INPUT_WIDTH_VECS \
: ISP_IO_STRIPE_WIDTH_VECS(ISP_INPUT_WIDTH_VECS, ISP_LEFT_PADDING_VECS, ISP_NUM_STRIPES, ISP_MIN_STRIPE_WIDTH)+_ISP_EXTRA_PADDING_VECS)
-#endif
#define ISP_MAX_VF_OUTPUT_STRIPE_VECS \
(NO_STRIPING ? ISP_MAX_VF_OUTPUT_VECS \
@@ -282,11 +263,7 @@ more details.
#define OUTPUT_VECTORS_PER_CHUNK CEIL_DIV_CHUNKS(VECTORS_PER_LINE,OUTPUT_NUM_CHUNKS)
/* should be even?? */
-#if !defined(HAS_RES_MGR)
#define OUTPUT_C_VECTORS_PER_CHUNK CEIL_DIV(OUTPUT_VECTORS_PER_CHUNK, 2)
-#else
-#define OUTPUT_C_VECTORS_PER_CHUNK CEIL_DIV(MAX_VECTORS_PER_CHUNK, 2)
-#endif
#ifndef ISP2401
/**** SCTBL defs *******/
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c
index a8b93a756e41..9f8a125f0d74 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/binary/src/binary.c
@@ -36,10 +36,6 @@
#endif
#include "camera/pipe/interface/ia_css_pipe_binarydesc.h"
-#if defined(HAS_RES_MGR)
-#include <components/resolutions_mgr/src/host/resolutions_mgr.host.h>
-#include <components/acc_cluster/acc_dvs_stat/host/dvs_stat.host.h>
-#endif
#include "memory_access.h"
@@ -110,10 +106,6 @@ ia_css_binary_internal_res(const struct ia_css_frame_info *in_info,
internal_res->height = __ISP_INTERNAL_HEIGHT(isp_tmp_internal_height,
info->pipeline.top_cropping,
binary_dvs_env.height);
-#if defined(HAS_RES_MGR)
- internal_res->height = (bds_out_info == NULL) ? internal_res->height : bds_out_info->res.height;
- internal_res->width = (bds_out_info == NULL) ? internal_res->width: bds_out_info->res.width;
-#endif
}
#ifndef ISP2401
@@ -787,25 +779,6 @@ ia_css_binary_dvs_stat_grid_info(
struct ia_css_grid_info *info,
struct ia_css_pipe *pipe)
{
-#if defined(HAS_RES_MGR)
- struct ia_css_dvs_stat_grid_info *dvs_stat_info;
- unsigned int i;
-
- assert(binary != NULL);
- assert(info != NULL);
- dvs_stat_info = &info->dvs_grid.dvs_stat_grid_info;
-
- if (binary->info->sp.enable.dvs_stats) {
- for (i = 0; i < IA_CSS_SKC_DVS_STAT_NUM_OF_LEVELS; i++) {
- dvs_stat_info->grd_cfg[i].grd_start.enable = 1;
- }
- ia_css_dvs_stat_grid_calculate(pipe, dvs_stat_info);
- }
- else {
- memset(dvs_stat_info, 0, sizeof(struct ia_css_dvs_stat_grid_info));
- }
-
-#endif
(void)pipe;
sh_css_binary_common_grid_info(binary, info);
return;
@@ -1088,9 +1061,6 @@ binary_in_frame_padded_width(int in_frame_width,
/* in other cases, the left padding pixels are always 128 */
nr_of_left_paddings = 2*ISP_VEC_NELEMS;
#endif
-#if defined(HAS_RES_MGR)
- (void)dvs_env_width;
-#endif
if (need_scaling) {
/* In SDV use-case, we need to match left-padding of
* primary and the video binary. */
@@ -1101,9 +1071,7 @@ binary_in_frame_padded_width(int in_frame_width,
2*ISP_VEC_NELEMS);
} else {
/* Different than before, we do left&right padding. */
-#if !defined(HAS_RES_MGR) /* dvs env is included already */
in_frame_width += dvs_env_width;
-#endif
rval =
CEIL_MUL(in_frame_width +
(left_cropping ? nr_of_left_paddings : 0),
@@ -1214,10 +1182,8 @@ ia_css_binary_fill_info(const struct ia_css_binary_xinfo *xinfo,
binary->in_frame_info.res.width = in_info->res.width + info->pipeline.left_cropping;
binary->in_frame_info.res.height = in_info->res.height + info->pipeline.top_cropping;
-#if !defined(HAS_RES_MGR) /* dvs env is included already */
binary->in_frame_info.res.width += dvs_env_width;
binary->in_frame_info.res.height += dvs_env_height;
-#endif
binary->in_frame_info.padded_width =
binary_in_frame_padded_width(in_info->res.width,
@@ -1658,7 +1624,7 @@ ia_css_binary_find(struct ia_css_binary_descr *descr,
candidate->internal.max_height);
continue;
}
- if (!candidate->enable.ds && need_ds & !(xcandidate->num_output_pins > 1)) {
+ if (!candidate->enable.ds && need_ds && !(xcandidate->num_output_pins > 1)) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_binary_find() [%d] continue: !%d && %d\n",
__LINE__, candidate->enable.ds, (int)need_ds);
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c
index ed33d4c4c84a..5d40afd482f5 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/bufq/src/bufq.c
@@ -239,7 +239,7 @@ static ia_css_queue_t *bufq_get_qhandle(
enum sh_css_queue_id id,
int thread)
{
- ia_css_queue_t *q = 0;
+ ia_css_queue_t *q = NULL;
switch (type) {
case sh_css_host2sp_buffer_queue:
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h
index be7df3a30c21..91c105cc6204 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/interface/ia_css_debug.h
@@ -137,6 +137,7 @@ ia_css_debug_vdtrace(unsigned int level, const char *fmt, va_list args)
sh_css_vprint(fmt, args);
}
+__printf(2, 3)
extern void ia_css_debug_dtrace(unsigned int level, const char *fmt, ...);
/*! @brief Dump sp thread's stack contents
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c
index 030810bd0878..0fa7cb2423d8 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/debug/src/ia_css_debug.c
@@ -176,7 +176,6 @@ void ia_css_debug_dtrace(unsigned int level, const char *fmt, ...)
va_end(ap);
}
-#if !defined(HRT_UNSCHED)
static void debug_dump_long_array_formatted(
const sp_ID_t sp_id,
hrt_address stack_sp_addr,
@@ -249,12 +248,6 @@ void ia_css_debug_dump_sp_stack_info(void)
{
debug_dump_sp_stack_info(SP0_ID);
}
-#else
-/* Empty def for crun */
-void ia_css_debug_dump_sp_stack_info(void)
-{
-}
-#endif /* #if !HRT_UNSCHED */
void ia_css_debug_set_dtrace_level(const unsigned int trace_level)
@@ -3148,8 +3141,8 @@ ia_css_debug_dump_pipe_config(
ia_css_debug_dump_frame_info(&config->vf_output_info[i],
"vf_output_info");
}
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "acc_extension: 0x%x\n",
- config->acc_extension);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "acc_extension: %p\n",
+ config->acc_extension);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "num_acc_stages: %d\n",
config->num_acc_stages);
ia_css_debug_dump_capture_config(&config->default_capture_config);
@@ -3179,7 +3172,7 @@ ia_css_debug_dump_stream_config_source(
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "timeout: %d\n",
config->source.port.timeout);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "compression: %d\n",
- config->source.port.compression);
+ config->source.port.compression.type);
break;
case IA_CSS_INPUT_MODE_TPG:
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "source.tpg\n");
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c
index b36d7b00ebe8..d9178e80dab2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/spctrl/src/spctrl.c
@@ -57,17 +57,11 @@ enum ia_css_err ia_css_spctrl_load_fw(sp_ID_t sp_id,
hrt_vaddress code_addr = mmgr_NULL;
struct ia_css_sp_init_dmem_cfg *init_dmem_cfg;
- if ((sp_id >= N_SP_ID) || (spctrl_cfg == 0))
+ if ((sp_id >= N_SP_ID) || (spctrl_cfg == NULL))
return IA_CSS_ERR_INVALID_ARGUMENTS;
spctrl_cofig_info[sp_id].code_addr = mmgr_NULL;
-#if defined(HRT_UNSCHED)
- (void)init_dmem_cfg;
- code_addr = mmgr_malloc(1);
- if (code_addr == mmgr_NULL)
- return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
-#else
init_dmem_cfg = &spctrl_cofig_info[sp_id].dmem_config;
init_dmem_cfg->dmem_data_addr = spctrl_cfg->dmem_data_addr;
init_dmem_cfg->dmem_bss_addr = spctrl_cfg->dmem_bss_addr;
@@ -104,7 +98,7 @@ enum ia_css_err ia_css_spctrl_load_fw(sp_ID_t sp_id,
code_addr = mmgr_NULL;
return IA_CSS_ERR_INTERNAL_ERROR;
}
-#endif
+
spctrl_cofig_info[sp_id].sp_entry = spctrl_cfg->sp_entry;
spctrl_cofig_info[sp_id].code_addr = code_addr;
spctrl_cofig_info[sp_id].program_name = spctrl_cfg->program_name;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c
index 73c76583610a..471f2be974e2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c
@@ -64,7 +64,7 @@
#include "input_system.h"
#endif
#include "mmu_device.h" /* mmu_set_page_table_base_index(), ... */
-//#include "ia_css_mmu_private.h" /* sh_css_mmu_set_page_table_base_index() */
+#include "ia_css_mmu_private.h" /* sh_css_mmu_set_page_table_base_index() */
#include "gdc_device.h" /* HRT_GDC_N */
#include "dma.h" /* dma_set_max_burst_size() */
#include "irq.h" /* virq */
@@ -98,18 +98,8 @@ static int thread_alive;
#include "isp/modes/interface/input_buf.isp.h"
-#if defined(HAS_BL)
-#include "support/bootloader/interface/ia_css_blctrl.h"
-#endif
-#if defined(HAS_RES_MGR)
-#include "components/acc_cluster/gen/host/acc_cluster.host.h"
-#endif
-
/* Name of the sp program: should not be built-in */
#define SP_PROG_NAME "sp"
-#if defined(HAS_BL)
-#define BL_PROG_NAME "bootloader"
-#endif
/* Size of Refcount List */
#define REFCOUNT_SIZE 1000
@@ -252,11 +242,6 @@ ia_css_reset_defaults(struct sh_css* css);
static void
sh_css_init_host_sp_control_vars(void);
-#ifndef ISP2401
-static void
-sh_css_mmu_set_page_table_base_index(hrt_data base_index);
-
-#endif
static enum ia_css_err set_num_primary_stages(unsigned int *num, enum ia_css_pipe_version version);
static bool
@@ -385,13 +370,8 @@ sh_css_hmm_buffer_record_uninit(void);
static void
sh_css_hmm_buffer_record_reset(struct sh_css_hmm_buffer_record *buffer_record);
-#ifndef ISP2401
-static bool
-sh_css_hmm_buffer_record_acquire(struct ia_css_rmgr_vbuf_handle *h_vbuf,
-#else
static struct sh_css_hmm_buffer_record
*sh_css_hmm_buffer_record_acquire(struct ia_css_rmgr_vbuf_handle *h_vbuf,
-#endif
enum ia_css_buffer_type type,
hrt_address kernel_ptr);
@@ -1475,30 +1455,17 @@ static void start_pipe(
copy_ovrd,
input_mode,
&me->stream->config.metadata_config,
-#ifndef ISP2401
&me->stream->info.metadata_info
-#else
- &me->stream->info.metadata_info,
-#endif
#if !defined(HAS_NO_INPUT_SYSTEM)
-#ifndef ISP2401
- , (input_mode==IA_CSS_INPUT_MODE_MEMORY)?
-#else
- (input_mode == IA_CSS_INPUT_MODE_MEMORY) ?
-#endif
+ ,(input_mode==IA_CSS_INPUT_MODE_MEMORY) ?
(mipi_port_ID_t)0 :
-#ifndef ISP2401
me->stream->config.source.port.port
-#else
- me->stream->config.source.port.port,
#endif
+#ifdef ISP2401
+ ,&me->config.internal_frame_origin_bqs_on_sctbl,
+ me->stream->isp_params_configs
#endif
-#ifndef ISP2401
- );
-#else
- &me->config.internal_frame_origin_bqs_on_sctbl,
- me->stream->isp_params_configs);
-#endif
+ );
if (me->config.mode != IA_CSS_PIPE_MODE_COPY) {
struct ia_css_pipeline_stage *stage;
@@ -1571,34 +1538,7 @@ enable_interrupts(enum ia_css_irq_type irq_type)
}
#endif
-#if defined(HAS_BL)
-static bool sh_css_setup_blctrl_config(const struct ia_css_fw_info *fw,
- const char *program,
- ia_css_blctrl_cfg *blctrl_cfg)
-{
- if((fw == NULL)||(blctrl_cfg == NULL))
- return false;
- blctrl_cfg->bl_entry = 0;
- blctrl_cfg->program_name = (char *)(program);
-
-#if !defined(HRT_UNSCHED)
- blctrl_cfg->ddr_data_offset = fw->blob.data_source;
- blctrl_cfg->dmem_data_addr = fw->blob.data_target;
- blctrl_cfg->dmem_bss_addr = fw->blob.bss_target;
- blctrl_cfg->data_size = fw->blob.data_size ;
- blctrl_cfg->bss_size = fw->blob.bss_size;
- blctrl_cfg->blctrl_state_dmem_addr = fw->info.bl.sw_state;
- blctrl_cfg->blctrl_dma_cmd_list = fw->info.bl.dma_cmd_list;
- blctrl_cfg->blctrl_nr_of_dma_cmds = fw->info.bl.num_dma_cmds;
-
- blctrl_cfg->code_size = fw->blob.size;
- blctrl_cfg->code = fw->blob.code;
- blctrl_cfg->bl_entry = fw->info.bl.bl_entry; /* entry function ptr on Bootloader */
-#endif
- return true;
-}
-#endif
static bool sh_css_setup_spctrl_config(const struct ia_css_fw_info *fw,
const char * program,
ia_css_spctrl_cfg *spctrl_cfg)
@@ -1608,7 +1548,6 @@ static bool sh_css_setup_spctrl_config(const struct ia_css_fw_info *fw,
spctrl_cfg->sp_entry = 0;
spctrl_cfg->program_name = (char *)(program);
-#if !defined(HRT_UNSCHED)
spctrl_cfg->ddr_data_offset = fw->blob.data_source;
spctrl_cfg->dmem_data_addr = fw->blob.data_target;
spctrl_cfg->dmem_bss_addr = fw->blob.bss_target;
@@ -1621,7 +1560,7 @@ static bool sh_css_setup_spctrl_config(const struct ia_css_fw_info *fw,
spctrl_cfg->code_size = fw->blob.size;
spctrl_cfg->code = fw->blob.code;
spctrl_cfg->sp_entry = fw->info.sp.sp_entry; /* entry function ptr on SP */
-#endif
+
return true;
}
void
@@ -1708,9 +1647,6 @@ ia_css_init(const struct ia_css_env *env,
{
enum ia_css_err err;
ia_css_spctrl_cfg spctrl_cfg;
-#if defined(HAS_BL)
- ia_css_blctrl_cfg blctrl_cfg;
-#endif
void (*flush_func)(struct ia_css_acc_fw *fw);
hrt_data select, enable;
@@ -1863,26 +1799,6 @@ ia_css_init(const struct ia_css_env *env,
return err;
}
-#if defined(HAS_BL)
- if (!sh_css_setup_blctrl_config(&sh_css_bl_fw, BL_PROG_NAME, &blctrl_cfg))
- return IA_CSS_ERR_INTERNAL_ERROR;
- err = ia_css_blctrl_load_fw(&blctrl_cfg);
- if (err != IA_CSS_SUCCESS) {
- IA_CSS_LEAVE_ERR(err);
- return err;
- }
-
-#ifdef ISP2401
- err = ia_css_blctrl_add_target_fw_info(&sh_css_sp_fw, IA_CSS_SP0,
- get_sp_code_addr(SP0_ID));
-
-#endif
- if (err != IA_CSS_SUCCESS) {
- IA_CSS_LEAVE_ERR(err);
- return err;
- }
-#endif /* HAS_BL */
-
#if WITH_PC_MONITORING
if (!thread_alive) {
thread_alive++;
@@ -2003,7 +1919,7 @@ ia_css_enable_isys_event_queue(bool enable)
void *sh_css_malloc(size_t size)
{
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sh_css_malloc() enter: size=%d\n",size);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sh_css_malloc() enter: size=%zu\n",size);
/* FIXME: This first test can probably go away */
if (size == 0)
return NULL;
@@ -2016,7 +1932,7 @@ void *sh_css_calloc(size_t N, size_t size)
{
void *p;
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sh_css_calloc() enter: N=%d, size=%d\n",N,size);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sh_css_calloc() enter: N=%zu, size=%zu\n",N,size);
/* FIXME: this test can probably go away */
if (size > 0) {
@@ -2059,7 +1975,8 @@ map_sp_threads(struct ia_css_stream *stream, bool map)
enum ia_css_pipe_id pipe_id;
assert(stream != NULL);
- IA_CSS_ENTER_PRIVATE("stream = %p, map = %p", stream, map);
+ IA_CSS_ENTER_PRIVATE("stream = %p, map = %s",
+ stream, map ? "true" : "false");
if (stream == NULL) {
IA_CSS_LEAVE_ERR_PRIVATE(IA_CSS_ERR_INVALID_ARGUMENTS);
@@ -2611,15 +2528,8 @@ ia_css_pipe_destroy(struct ia_css_pipe *pipe)
break;
}
-#ifndef ISP2401
- if (pipe->scaler_pp_lut != mmgr_NULL) {
- hmm_free(pipe->scaler_pp_lut);
- pipe->scaler_pp_lut = mmgr_NULL;
- }
-#else
sh_css_params_free_gdc_lut(pipe->scaler_pp_lut);
pipe->scaler_pp_lut = mmgr_NULL;
-#endif
my_css.active_pipes[ia_css_pipe_get_pipe_num(pipe)] = NULL;
sh_css_pipe_free_shading_table(pipe);
@@ -2666,9 +2576,6 @@ ia_css_uninit(void)
}
ia_css_spctrl_unload_fw(SP0_ID);
sh_css_sp_set_sp_running(false);
-#if defined(HAS_BL)
- ia_css_blctrl_unload_fw();
-#endif
#if defined(USE_INPUT_SYSTEM_VERSION_2) || defined(USE_INPUT_SYSTEM_VERSION_2401)
/* check and free any remaining mipi frames */
free_mipi_frames(NULL);
@@ -2683,23 +2590,6 @@ ia_css_uninit(void)
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_uninit() leave: return_void\n");
}
-#ifndef ISP2401
-/* Deprecated, this is an HRT backend function (memory_access.h) */
-static void
-sh_css_mmu_set_page_table_base_index(hrt_data base_index)
-{
- int i;
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "sh_css_mmu_set_page_table_base_index() enter: base_index=0x%08x\n",base_index);
- my_css.page_table_base_index = base_index;
- for (i = 0; i < (int)N_MMU_ID; i++) {
- mmu_ID_t mmu_id = (mmu_ID_t)i;
- mmu_set_page_table_base_index(mmu_id, base_index);
- mmu_invalidate_cache(mmu_id);
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "sh_css_mmu_set_page_table_base_index() leave: return_void\n");
-}
-
-#endif
#if defined(HAS_IRQ_MAP_VERSION_2)
enum ia_css_err ia_css_irq_translate(
unsigned int *irq_infos)
@@ -2766,7 +2656,7 @@ enum ia_css_err ia_css_irq_translate(
*irq_infos = infos;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_irq_translate() "
- "leave: irq_infos=%p\n", infos);
+ "leave: irq_infos=%u\n", infos);
return IA_CSS_SUCCESS;
}
@@ -3004,11 +2894,8 @@ load_preview_binaries(struct ia_css_pipe *pipe)
#endif
/* preview only have 1 output pin now */
struct ia_css_frame_info *pipe_out_info = &pipe->output_info[0];
-#ifdef ISP2401
struct ia_css_preview_settings *mycs = &pipe->pipe_settings.preview;
-#endif
-
IA_CSS_ENTER_PRIVATE("");
assert(pipe != NULL);
assert(pipe->stream != NULL);
@@ -3020,11 +2907,7 @@ load_preview_binaries(struct ia_css_pipe *pipe)
sensor = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
#endif
-#ifndef ISP2401
- if (pipe->pipe_settings.preview.preview_binary.info)
-#else
if (mycs->preview_binary.info)
-#endif
return IA_CSS_SUCCESS;
err = ia_css_util_check_input(&pipe->stream->config, false, false);
@@ -3077,12 +2960,7 @@ load_preview_binaries(struct ia_css_pipe *pipe)
&prev_vf_info);
if (err != IA_CSS_SUCCESS)
return err;
- err = ia_css_binary_find(&preview_descr,
-#ifndef ISP2401
- &pipe->pipe_settings.preview.preview_binary);
-#else
- &mycs->preview_binary);
-#endif
+ err = ia_css_binary_find(&preview_descr, &mycs->preview_binary);
if (err != IA_CSS_SUCCESS)
return err;
@@ -3098,24 +2976,15 @@ load_preview_binaries(struct ia_css_pipe *pipe)
#endif
/* The vf_pp binary is needed when (further) YUV downscaling is required */
-#ifndef ISP2401
- need_vf_pp |= pipe->pipe_settings.preview.preview_binary.out_frame_info[0].res.width != pipe_out_info->res.width;
- need_vf_pp |= pipe->pipe_settings.preview.preview_binary.out_frame_info[0].res.height != pipe_out_info->res.height;
-#else
need_vf_pp |= mycs->preview_binary.out_frame_info[0].res.width != pipe_out_info->res.width;
need_vf_pp |= mycs->preview_binary.out_frame_info[0].res.height != pipe_out_info->res.height;
-#endif
/* When vf_pp is needed, then the output format of the selected
* preview binary must be yuv_line. If this is not the case,
* then the preview binary selection is done again.
*/
if (need_vf_pp &&
-#ifndef ISP2401
- (pipe->pipe_settings.preview.preview_binary.out_frame_info[0].format != IA_CSS_FRAME_FORMAT_YUV_LINE)) {
-#else
(mycs->preview_binary.out_frame_info[0].format != IA_CSS_FRAME_FORMAT_YUV_LINE)) {
-#endif
/* Preview step 2 */
if (pipe->vf_yuv_ds_input_info.res.width)
@@ -3136,11 +3005,7 @@ load_preview_binaries(struct ia_css_pipe *pipe)
if (err != IA_CSS_SUCCESS)
return err;
err = ia_css_binary_find(&preview_descr,
-#ifndef ISP2401
- &pipe->pipe_settings.preview.preview_binary);
-#else
&mycs->preview_binary);
-#endif
if (err != IA_CSS_SUCCESS)
return err;
}
@@ -3150,18 +3015,10 @@ load_preview_binaries(struct ia_css_pipe *pipe)
/* Viewfinder post-processing */
ia_css_pipe_get_vfpp_binarydesc(pipe, &vf_pp_descr,
-#ifndef ISP2401
- &pipe->pipe_settings.preview.preview_binary.out_frame_info[0],
-#else
&mycs->preview_binary.out_frame_info[0],
-#endif
pipe_out_info);
err = ia_css_binary_find(&vf_pp_descr,
-#ifndef ISP2401
- &pipe->pipe_settings.preview.vf_pp_binary);
-#else
&mycs->vf_pp_binary);
-#endif
if (err != IA_CSS_SUCCESS)
return err;
}
@@ -3187,13 +3044,8 @@ load_preview_binaries(struct ia_css_pipe *pipe)
/* Copy */
if (need_isp_copy_binary) {
err = load_copy_binary(pipe,
-#ifndef ISP2401
- &pipe->pipe_settings.preview.copy_binary,
- &pipe->pipe_settings.preview.preview_binary);
-#else
&mycs->copy_binary,
&mycs->preview_binary);
-#endif
if (err != IA_CSS_SUCCESS)
return err;
}
@@ -4499,22 +4351,10 @@ ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
}
if (return_err == IA_CSS_SUCCESS) {
-#ifndef ISP2401
- bool found_record = false;
- found_record = sh_css_hmm_buffer_record_acquire(
-#else
- struct sh_css_hmm_buffer_record *hmm_buffer_record = NULL;
-
- hmm_buffer_record = sh_css_hmm_buffer_record_acquire(
-#endif
- h_vbuf, buf_type,
- HOST_ADDRESS(ddr_buffer.kernel_ptr));
-#ifndef ISP2401
- if (found_record == true) {
-#else
- if (hmm_buffer_record) {
-#endif
- IA_CSS_LOG("send vbuf=0x%x", h_vbuf);
+ if (sh_css_hmm_buffer_record_acquire(
+ h_vbuf, buf_type,
+ HOST_ADDRESS(ddr_buffer.kernel_ptr))) {
+ IA_CSS_LOG("send vbuf=%p", h_vbuf);
} else {
return_err = IA_CSS_ERR_INTERNAL_ERROR;
IA_CSS_ERROR("hmm_buffer_record[]: no available slots\n");
@@ -4624,7 +4464,7 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
ia_css_rmgr_rel_vbuf(hmm_buffer_pool, &hmm_buffer_record->h_vbuf);
sh_css_hmm_buffer_record_reset(hmm_buffer_record);
} else {
- IA_CSS_ERROR("hmm_buffer_record not found (0x%p) buf_type(%d)",
+ IA_CSS_ERROR("hmm_buffer_record not found (0x%u) buf_type(%d)",
ddr_buffer_addr, buf_type);
IA_CSS_LEAVE_ERR(IA_CSS_ERR_INTERNAL_ERROR);
return IA_CSS_ERR_INTERNAL_ERROR;
@@ -4640,8 +4480,8 @@ ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
if ((ddr_buffer.kernel_ptr == 0) ||
(kernel_ptr != HOST_ADDRESS(ddr_buffer.kernel_ptr))) {
IA_CSS_ERROR("kernel_ptr invalid");
- IA_CSS_ERROR("expected: (0x%p)", kernel_ptr);
- IA_CSS_ERROR("actual: (0x%p)", HOST_ADDRESS(ddr_buffer.kernel_ptr));
+ IA_CSS_ERROR("expected: (0x%llx)", (u64)kernel_ptr);
+ IA_CSS_ERROR("actual: (0x%llx)", (u64)HOST_ADDRESS(ddr_buffer.kernel_ptr));
IA_CSS_ERROR("buf_type: %d\n", buf_type);
IA_CSS_LEAVE_ERR(IA_CSS_ERR_INTERNAL_ERROR);
return IA_CSS_ERR_INTERNAL_ERROR;
@@ -6316,9 +6156,6 @@ static enum ia_css_err load_primary_binaries(
#else
*pipe_vf_out_info;
#endif
-#if defined(HAS_RES_MGR)
- struct ia_css_frame_info bds_out_info;
-#endif
enum ia_css_err err = IA_CSS_SUCCESS;
struct ia_css_capture_settings *mycs;
unsigned int i;
@@ -6440,10 +6277,6 @@ static enum ia_css_err load_primary_binaries(
&cas_scaler_descr.out_info[i],
&cas_scaler_descr.internal_out_info[i],
&cas_scaler_descr.vf_info[i]);
-#if defined(HAS_RES_MGR)
- bds_out_info.res = pipe->config.bayer_ds_out_res;
- yuv_scaler_descr.bds_out_info = &bds_out_info;
-#endif
err = ia_css_binary_find(&yuv_scaler_descr,
&mycs->yuv_scaler_binary[i]);
if (err != IA_CSS_SUCCESS) {
@@ -6494,10 +6327,6 @@ static enum ia_css_err load_primary_binaries(
&capture_pp_descr, &prim_out_info,
#endif
&capt_pp_out_info, &vf_info);
-#if defined(HAS_RES_MGR)
- bds_out_info.res = pipe->config.bayer_ds_out_res;
- capture_pp_descr.bds_out_info = &bds_out_info;
-#endif
err = ia_css_binary_find(&capture_pp_descr,
&mycs->capture_pp_binary);
if (err != IA_CSS_SUCCESS) {
@@ -6533,10 +6362,6 @@ static enum ia_css_err load_primary_binaries(
if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0] && (i == mycs->num_primary_stage - 1))
local_vf_info = &vf_info;
ia_css_pipe_get_primary_binarydesc(pipe, &prim_descr[i], &prim_in_info, &prim_out_info, local_vf_info, i);
-#if defined(HAS_RES_MGR)
- bds_out_info.res = pipe->config.bayer_ds_out_res;
- prim_descr[i].bds_out_info = &bds_out_info;
-#endif
err = ia_css_binary_find(&prim_descr[i], &mycs->primary_binary[i]);
if (err != IA_CSS_SUCCESS) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
@@ -6570,10 +6395,6 @@ static enum ia_css_err load_primary_binaries(
ia_css_pipe_get_vfpp_binarydesc(pipe,
&vf_pp_descr, vf_pp_in_info, pipe_vf_out_info);
-#if defined(HAS_RES_MGR)
- bds_out_info.res = pipe->config.bayer_ds_out_res;
- vf_pp_descr.bds_out_info = &bds_out_info;
-#endif
err = ia_css_binary_find(&vf_pp_descr, &mycs->vf_pp_binary);
if (err != IA_CSS_SUCCESS) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
@@ -6621,7 +6442,7 @@ allocate_delay_frames(struct ia_css_pipe *pipe)
IA_CSS_ENTER_PRIVATE("");
if (pipe == NULL) {
- IA_CSS_ERROR("Invalid args - pipe %x", pipe);
+ IA_CSS_ERROR("Invalid args - pipe %p", pipe);
return IA_CSS_ERR_INVALID_ARGUMENTS;
}
@@ -8628,9 +8449,7 @@ remove_firmware(struct ia_css_fw_info **l, struct ia_css_fw_info *firmware)
return; /* removing single and multiple firmware is handled in acc_unload_extension() */
}
-#if !defined(HRT_UNSCHED)
-static enum ia_css_err
-upload_isp_code(struct ia_css_fw_info *firmware)
+static enum ia_css_err upload_isp_code(struct ia_css_fw_info *firmware)
{
hrt_vaddress binary;
@@ -8658,12 +8477,10 @@ upload_isp_code(struct ia_css_fw_info *firmware)
return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
return IA_CSS_SUCCESS;
}
-#endif
static enum ia_css_err
acc_load_extension(struct ia_css_fw_info *firmware)
{
-#if !defined(HRT_UNSCHED)
enum ia_css_err err;
struct ia_css_fw_info *hd = firmware;
while (hd){
@@ -8672,7 +8489,6 @@ acc_load_extension(struct ia_css_fw_info *firmware)
return err;
hd = hd->next;
}
-#endif
if (firmware == NULL)
return IA_CSS_ERR_INVALID_ARGUMENTS;
@@ -9879,9 +9695,6 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
/* take over effective info */
effective_res = curr_pipe->config.input_effective_res;
-#endif
-
-#ifndef ISP2401
err = ia_css_util_check_res(
effective_res.width,
effective_res.height);
@@ -9902,13 +9715,6 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
if (err != IA_CSS_SUCCESS)
goto ERR;
-#if defined(HAS_RES_MGR)
- /* update acc configuration - striping info is ready */
- err = ia_css_update_cfg_stripe_info(curr_pipe);
- if (err != IA_CSS_SUCCESS)
- goto ERR;
-#endif
-
/* handle each pipe */
pipe_info = &curr_pipe->info;
for (j = 0; j < IA_CSS_PIPE_MAX_OUTPUT_STAGE; j++) {
@@ -10587,39 +10393,6 @@ ia_css_pipe_get_isp_pipe_version(const struct ia_css_pipe *pipe)
return (unsigned int)pipe->config.isp_pipe_version;
}
-#if defined(HAS_BL)
-#define BL_START_TIMEOUT_US 30000000
-static enum ia_css_err
-ia_css_start_bl(void)
-{
- enum ia_css_err err = IA_CSS_SUCCESS;
- unsigned long timeout;
-
- IA_CSS_ENTER("");
- sh_css_start_bl();
- /* waiting for the Bootloader to complete execution */
- timeout = BL_START_TIMEOUT_US;
- while((ia_css_blctrl_get_state() == BOOTLOADER_BUSY) && timeout) {
- timeout--;
- hrt_sleep();
- }
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "Bootloader state %d\n", ia_css_blctrl_get_state());
- if (timeout == 0) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
- "Bootloader Execution Timeout\n");
- err = IA_CSS_ERR_INTERNAL_ERROR;
- }
- if (ia_css_blctrl_get_state() != BOOTLOADER_OK) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
- "Bootloader Execution Failed\n");
- err = IA_CSS_ERR_INTERNAL_ERROR;
- }
- IA_CSS_LEAVE_ERR(err);
- return err;
-}
-#endif
-
#define SP_START_TIMEOUT_US 30000000
enum ia_css_err
@@ -10629,15 +10402,6 @@ ia_css_start_sp(void)
enum ia_css_err err = IA_CSS_SUCCESS;
IA_CSS_ENTER("");
-#if defined(HAS_BL)
- /* Starting bootloader before Sp0 and Sp1
- * and not exposing CSS API */
- err = ia_css_start_bl();
- if (err != IA_CSS_SUCCESS) {
- IA_CSS_LEAVE("Bootloader fails");
- return err;
- }
-#endif
sh_css_sp_start_isp();
/* waiting for the SP is completely started */
@@ -11291,23 +11055,14 @@ sh_css_hmm_buffer_record_reset(struct sh_css_hmm_buffer_record *buffer_record)
buffer_record->kernel_ptr = 0;
}
-#ifndef ISP2401
-static bool
-sh_css_hmm_buffer_record_acquire(struct ia_css_rmgr_vbuf_handle *h_vbuf,
-#else
static struct sh_css_hmm_buffer_record
*sh_css_hmm_buffer_record_acquire(struct ia_css_rmgr_vbuf_handle *h_vbuf,
-#endif
enum ia_css_buffer_type type,
hrt_address kernel_ptr)
{
int i;
struct sh_css_hmm_buffer_record *buffer_record = NULL;
-#ifndef ISP2401
- bool found_record = false;
-#else
struct sh_css_hmm_buffer_record *out_buffer_record = NULL;
-#endif
assert(h_vbuf != NULL);
assert((type > IA_CSS_BUFFER_TYPE_INVALID) && (type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE));
@@ -11320,21 +11075,13 @@ static struct sh_css_hmm_buffer_record
buffer_record->type = type;
buffer_record->h_vbuf = h_vbuf;
buffer_record->kernel_ptr = kernel_ptr;
-#ifndef ISP2401
- found_record = true;
-#else
out_buffer_record = buffer_record;
-#endif
break;
}
buffer_record++;
}
-#ifndef ISP2401
- return found_record;
-#else
return out_buffer_record;
-#endif
}
static struct sh_css_hmm_buffer_record
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c
index 34cc56f0b471..eecd8cf71951 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c
@@ -63,9 +63,6 @@ static const char *release_version = STR(irci_ecr-master_20150911_0724);
static char FW_rel_ver_name[MAX_FW_REL_VER_NAME] = "---";
struct ia_css_fw_info sh_css_sp_fw;
-#if defined(HAS_BL)
-struct ia_css_fw_info sh_css_bl_fw;
-#endif /* HAS_BL */
struct ia_css_blob_descr *sh_css_blob_info; /* Only ISP blob info (no SP) */
unsigned sh_css_num_binaries; /* This includes 1 SP binary */
@@ -95,12 +92,7 @@ setup_binary(struct ia_css_fw_info *fw, const char *fw_data, struct ia_css_fw_in
*sh_css_fw = *fw;
-#if defined(HRT_UNSCHED)
- sh_css_fw->blob.code = vmalloc(1);
-#else
sh_css_fw->blob.code = vmalloc(fw->blob.size);
-#endif
-
if (sh_css_fw->blob.code == NULL)
return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
@@ -137,12 +129,7 @@ sh_css_load_blob_info(const char *fw, const struct ia_css_fw_info *bi, struct ia
bd->blob = blob;
bd->header = *bi;
- if ((bi->type == ia_css_isp_firmware) || (bi->type == ia_css_sp_firmware)
-#if defined(HAS_BL)
- || (bi->type == ia_css_bootloader_firmware)
-#endif /* HAS_BL */
- )
- {
+ if (bi->type == ia_css_isp_firmware || bi->type == ia_css_sp_firmware) {
char *namebuffer;
int namelength = (int)strlen(name);
@@ -242,9 +229,9 @@ sh_css_load_firmware(const char *fw_data,
sh_css_num_binaries = file_header->binary_nr;
/* Only allocate memory for ISP blob info */
- if (sh_css_num_binaries > (NUM_OF_SPS + NUM_OF_BLS)) {
+ if (sh_css_num_binaries > NUM_OF_SPS) {
sh_css_blob_info = kmalloc(
- (sh_css_num_binaries - (NUM_OF_SPS + NUM_OF_BLS)) *
+ (sh_css_num_binaries - NUM_OF_SPS) *
sizeof(*sh_css_blob_info), GFP_KERNEL);
if (sh_css_blob_info == NULL)
return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
@@ -279,25 +266,16 @@ sh_css_load_firmware(const char *fw_data,
err = setup_binary(bi, fw_data, &sh_css_sp_fw, i);
if (err != IA_CSS_SUCCESS)
return err;
-#if defined(HAS_BL)
- } else if (bi->type == ia_css_bootloader_firmware) {
- if (i != BOOTLOADER_FIRMWARE)
- return IA_CSS_ERR_INTERNAL_ERROR;
- err = setup_binary(bi, fw_data, &sh_css_bl_fw, i);
- if (err != IA_CSS_SUCCESS)
- return err;
- IA_CSS_LOG("Bootloader binary recognized\n");
-#endif
} else {
- /* All subsequent binaries (including bootloaders) (i>NUM_OF_SPS+NUM_OF_BLS) are ISP firmware */
- if (i < (NUM_OF_SPS + NUM_OF_BLS))
+ /* All subsequent binaries (including bootloaders) (i>NUM_OF_SPS) are ISP firmware */
+ if (i < NUM_OF_SPS)
return IA_CSS_ERR_INTERNAL_ERROR;
if (bi->type != ia_css_isp_firmware)
return IA_CSS_ERR_INTERNAL_ERROR;
if (sh_css_blob_info == NULL) /* cannot happen but KW does not see this */
return IA_CSS_ERR_INTERNAL_ERROR;
- sh_css_blob_info[i-(NUM_OF_SPS + NUM_OF_BLS)] = bd;
+ sh_css_blob_info[i - NUM_OF_SPS] = bd;
}
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h
index e2b6f06ed099..5b2b78f96dc5 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_internal.h
@@ -154,18 +154,11 @@
/* Number of SP's */
#define NUM_OF_SPS 1
-#if defined(HAS_BL)
-#define NUM_OF_BLS 1
-#else
#define NUM_OF_BLS 0
-#endif
/* Enum for order of Binaries */
enum sh_css_order_binaries {
SP_FIRMWARE = 0,
-#if defined(HAS_BL)
- BOOTLOADER_FIRMWARE,
-#endif
ISP_FIRMWARE
};
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_irq.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_irq.c
deleted file mode 100644
index 37e954aea36f..000000000000
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_irq.c
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Support for Intel Camera Imaging ISP subsystem.
- * Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-/* This file will contain the code to implement the functions declared in ia_css_irq.h
- and associated helper functions */
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.c
index 7e3893c6c08a..36aaa3019a15 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mipi.c
@@ -681,7 +681,7 @@ send_mipi_frames(struct ia_css_pipe *pipe)
unsigned int port = 0;
#endif
- IA_CSS_ENTER_PRIVATE("pipe=%d", pipe);
+ IA_CSS_ENTER_PRIVATE("pipe=%p", pipe);
assert(pipe != NULL);
assert(pipe->stream != NULL);
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mmu.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mmu.c
index 6de8472f1b07..237e38b2f0c1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mmu.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_mmu.c
@@ -13,16 +13,12 @@
*/
#include "ia_css_mmu.h"
-#ifdef ISP2401
#include "ia_css_mmu_private.h"
-#endif
#include <ia_css_debug.h>
#include "sh_css_sp.h"
#include "sh_css_firmware.h"
#include "sp.h"
-#ifdef ISP2401
#include "mmu_device.h"
-#endif
void
ia_css_mmu_invalidate_cache(void)
@@ -44,7 +40,6 @@ ia_css_mmu_invalidate_cache(void)
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_mmu_invalidate_cache() leave\n");
}
-#ifdef ISP2401
/* Deprecated, this is an HRT backend function (memory_access.h) */
void
@@ -59,4 +54,3 @@ sh_css_mmu_set_page_table_base_index(hrt_data base_index)
}
IA_CSS_LEAVE_PRIVATE("");
}
-#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c
index 561f4a7236f7..48224370b8bf 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_params.c
@@ -3356,15 +3356,8 @@ enum ia_css_err ia_css_pipe_set_bci_scaler_lut(struct ia_css_pipe *pipe,
}
/* Free any existing tables. */
-#ifndef ISP2401
- if (pipe->scaler_pp_lut != mmgr_NULL) {
- hmm_free(pipe->scaler_pp_lut);
- pipe->scaler_pp_lut = mmgr_NULL;
- }
-#else
sh_css_params_free_gdc_lut(pipe->scaler_pp_lut);
pipe->scaler_pp_lut = mmgr_NULL;
-#endif
#ifndef ISP2401
if (store) {
@@ -3375,7 +3368,7 @@ enum ia_css_err ia_css_pipe_set_bci_scaler_lut(struct ia_css_pipe *pipe,
#endif
if (pipe->scaler_pp_lut == mmgr_NULL) {
#ifndef ISP2401
- IA_CSS_LEAVE("lut(%p) err=%d", pipe->scaler_pp_lut, err);
+ IA_CSS_LEAVE("lut(%u) err=%d", pipe->scaler_pp_lut, err);
return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
#else
ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
@@ -3397,7 +3390,7 @@ enum ia_css_err ia_css_pipe_set_bci_scaler_lut(struct ia_css_pipe *pipe,
#endif
}
- IA_CSS_LEAVE("lut(%p) err=%d", pipe->scaler_pp_lut, err);
+ IA_CSS_LEAVE("lut(%u) err=%d", pipe->scaler_pp_lut, err);
return err;
}
@@ -3437,7 +3430,7 @@ enum ia_css_err sh_css_params_map_and_store_default_gdc_lut(void)
mmgr_store(default_gdc_lut, (int *)interleaved_lut_temp,
sizeof(zoom_table));
- IA_CSS_LEAVE_PRIVATE("lut(%p) err=%d", default_gdc_lut, err);
+ IA_CSS_LEAVE_PRIVATE("lut(%u) err=%d", default_gdc_lut, err);
return err;
}
@@ -3445,15 +3438,8 @@ void sh_css_params_free_default_gdc_lut(void)
{
IA_CSS_ENTER_PRIVATE("void");
-#ifndef ISP2401
- if (default_gdc_lut != mmgr_NULL) {
- hmm_free(default_gdc_lut);
- default_gdc_lut = mmgr_NULL;
- }
-#else
sh_css_params_free_gdc_lut(default_gdc_lut);
default_gdc_lut = mmgr_NULL;
-#endif
IA_CSS_LEAVE_PRIVATE("void");
@@ -3859,7 +3845,7 @@ sh_css_param_update_isp_params(struct ia_css_pipe *curr_pipe,
/* When API change is implemented making good distinction between
* stream config and pipe config this skipping code can be moved out of the #ifdef */
if (pipe_in && (pipe != pipe_in)) {
- IA_CSS_LOG("skipping pipe %x", pipe);
+ IA_CSS_LOG("skipping pipe %p", pipe);
continue;
}
@@ -4590,7 +4576,7 @@ free_ia_css_isp_parameter_set_info(
unsigned int i;
hrt_vaddress *addrs = (hrt_vaddress *)&isp_params_info.mem_map;
- IA_CSS_ENTER_PRIVATE("ptr = %p", ptr);
+ IA_CSS_ENTER_PRIVATE("ptr = %u", ptr);
/* sanity check - ptr must be valid */
if (!ia_css_refcount_is_valid(ptr)) {
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c
index 57295397da3e..05eeff58a229 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c
@@ -43,6 +43,7 @@ struct hmm_bo_device bo_device;
struct hmm_pool dynamic_pool;
struct hmm_pool reserved_pool;
static ia_css_ptr dummy_ptr;
+static bool hmm_initialized;
struct _hmm_mem_stat hmm_mem_stat;
/* p: private
@@ -186,6 +187,8 @@ int hmm_init(void)
if (ret)
dev_err(atomisp_dev, "hmm_bo_device_init failed.\n");
+ hmm_initialized = true;
+
/*
* As hmm use NULL to indicate invalid ISP virtual address,
* and ISP_VM_START is defined to 0 too, so we allocate
@@ -193,7 +196,7 @@ int hmm_init(void)
* at the beginning, to avoid hmm_alloc return 0 in the
* further allocation.
*/
- dummy_ptr = hmm_alloc(1, HMM_BO_PRIVATE, 0, 0, HMM_UNCACHED);
+ dummy_ptr = hmm_alloc(1, HMM_BO_PRIVATE, 0, NULL, HMM_UNCACHED);
if (!ret) {
ret = sysfs_create_group(&atomisp_dev->kobj,
@@ -217,6 +220,7 @@ void hmm_cleanup(void)
dummy_ptr = 0;
hmm_bo_device_exit(&bo_device);
+ hmm_initialized = false;
}
ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
@@ -229,7 +233,7 @@ ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
/* Check if we are initialized. In the ideal world we wouldn't need
this but we can tackle it once the driver is a lot cleaner */
- if (!dummy_ptr)
+ if (!hmm_initialized)
hmm_init();
/*Get page number from size*/
pgnr = size_to_pgnr_ceil(bytes);
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c
index 7dff22f59e29..2e78976bb2ac 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c
+++ b/drivers/staging/media/atomisp/pci/atomisp2/hrt/hive_isp_css_mm_hrt.c
@@ -55,7 +55,7 @@ static ia_css_ptr __hrt_isp_css_mm_alloc(size_t bytes, void *userptr,
if (type == HRT_USR_PTR) {
if (userptr == NULL)
return hmm_alloc(bytes, HMM_BO_PRIVATE, 0,
- 0, cached);
+ NULL, cached);
else {
if (num_pages < ((__page_align(bytes)) >> PAGE_SHIFT))
dev_err(atomisp_dev,
@@ -94,7 +94,7 @@ ia_css_ptr hrt_isp_css_mm_alloc_user_ptr(size_t bytes, void *userptr,
ia_css_ptr hrt_isp_css_mm_alloc_cached(size_t bytes)
{
if (my_userptr == NULL)
- return hmm_alloc(bytes, HMM_BO_PRIVATE, 0, 0,
+ return hmm_alloc(bytes, HMM_BO_PRIVATE, 0, NULL,
HMM_CACHED);
else {
if (my_num_pages < ((__page_align(bytes)) >> PAGE_SHIFT))
diff --git a/drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c
index 5b4506a71126..edaae93af8f9 100644
--- a/drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c
+++ b/drivers/staging/media/atomisp/platform/intel-mid/atomisp_gmin_platform.c
@@ -51,7 +51,7 @@ struct gmin_subdev {
static struct gmin_subdev gmin_subdevs[MAX_SUBDEVS];
-static enum { PMIC_UNSET = 0, PMIC_REGULATOR, PMIC_AXP, PMIC_TI ,
+static enum { PMIC_UNSET = 0, PMIC_REGULATOR, PMIC_AXP, PMIC_TI,
PMIC_CRYSTALCOVE } pmic_id;
/* The atomisp uses type==0 for the end-of-list marker, so leave space. */
@@ -119,7 +119,7 @@ static int af_power_ctrl(struct v4l2_subdev *subdev, int flag)
/*
* The power here is used for dw9817,
* regulator is from rear sensor
- */
+ */
if (gs->v2p8_vcm_reg) {
if (flag)
return regulator_enable(gs->v2p8_vcm_reg);
@@ -152,13 +152,13 @@ const struct camera_af_platform_data *camera_get_af_platform_data(void)
EXPORT_SYMBOL_GPL(camera_get_af_platform_data);
int atomisp_register_i2c_module(struct v4l2_subdev *subdev,
- struct camera_sensor_platform_data *plat_data,
- enum intel_v4l2_subdev_type type)
+ struct camera_sensor_platform_data *plat_data,
+ enum intel_v4l2_subdev_type type)
{
int i;
struct i2c_board_info *bi;
struct gmin_subdev *gs;
- struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
struct acpi_device *adev;
dev_info(&client->dev, "register atomisp i2c module type %d\n", type);
@@ -167,12 +167,13 @@ int atomisp_register_i2c_module(struct v4l2_subdev *subdev,
* uses ACPI runtime power management for camera devices, but
* we don't. Disable it, or else the rails will be needlessly
* tickled during suspend/resume. This has caused power and
- * performance issues on multiple devices. */
+ * performance issues on multiple devices.
+ */
adev = ACPI_COMPANION(&client->dev);
if (adev)
adev->power.flags.power_resources = 0;
- for (i=0; i < MAX_SUBDEVS; i++)
+ for (i = 0; i < MAX_SUBDEVS; i++)
if (!pdata.subdevs[i].type)
break;
@@ -182,7 +183,8 @@ int atomisp_register_i2c_module(struct v4l2_subdev *subdev,
/* Note subtlety of initialization order: at the point where
* this registration API gets called, the platform data
* callbacks have probably already been invoked, so the
- * gmin_subdev struct is already initialized for us. */
+ * gmin_subdev struct is already initialized for us.
+ */
gs = find_gmin_subdev(subdev);
pdata.subdevs[i].type = type;
@@ -206,8 +208,10 @@ struct v4l2_subdev *atomisp_gmin_find_subdev(struct i2c_adapter *adapter,
struct i2c_board_info *board_info)
{
int i;
- for (i=0; i < MAX_SUBDEVS && pdata.subdevs[i].type; i++) {
+
+ for (i = 0; i < MAX_SUBDEVS && pdata.subdevs[i].type; i++) {
struct intel_v4l2_subdev_table *sd = &pdata.subdevs[i];
+
if (sd->v4l2_subdev.i2c_adapter_id == adapter->nr &&
sd->v4l2_subdev.board_info.addr == board_info->addr)
return sd->subdev;
@@ -261,7 +265,8 @@ static const struct gmin_cfg_var ffrd8_vars[] = {
};
/* Cribbed from MCG defaults in the mt9m114 driver, not actually verified
- * vs. T100 hardware */
+ * vs. T100 hardware
+ */
static const struct gmin_cfg_var t100_vars[] = {
{ "INT33F0:00_CsiPort", "0" },
{ "INT33F0:00_CsiLanes", "1" },
@@ -270,45 +275,45 @@ static const struct gmin_cfg_var t100_vars[] = {
};
static const struct gmin_cfg_var mrd7_vars[] = {
- {"INT33F8:00_CamType", "1"},
- {"INT33F8:00_CsiPort", "1"},
- {"INT33F8:00_CsiLanes","2"},
- {"INT33F8:00_CsiFmt","13"},
- {"INT33F8:00_CsiBayer", "0"},
- {"INT33F8:00_CamClk", "0"},
- {"INT33F9:00_CamType", "1"},
- {"INT33F9:00_CsiPort", "0"},
- {"INT33F9:00_CsiLanes","1"},
- {"INT33F9:00_CsiFmt","13"},
- {"INT33F9:00_CsiBayer", "0"},
- {"INT33F9:00_CamClk", "1"},
- {},
+ {"INT33F8:00_CamType", "1"},
+ {"INT33F8:00_CsiPort", "1"},
+ {"INT33F8:00_CsiLanes", "2"},
+ {"INT33F8:00_CsiFmt", "13"},
+ {"INT33F8:00_CsiBayer", "0"},
+ {"INT33F8:00_CamClk", "0"},
+ {"INT33F9:00_CamType", "1"},
+ {"INT33F9:00_CsiPort", "0"},
+ {"INT33F9:00_CsiLanes", "1"},
+ {"INT33F9:00_CsiFmt", "13"},
+ {"INT33F9:00_CsiBayer", "0"},
+ {"INT33F9:00_CamClk", "1"},
+ {},
};
static const struct gmin_cfg_var ecs7_vars[] = {
- {"INT33BE:00_CsiPort", "1"},
- {"INT33BE:00_CsiLanes","2"},
- {"INT33BE:00_CsiFmt","13"},
- {"INT33BE:00_CsiBayer", "2"},
- {"INT33BE:00_CamClk", "0"},
- {"INT33F0:00_CsiPort", "0"},
- {"INT33F0:00_CsiLanes","1"},
- {"INT33F0:00_CsiFmt","13"},
- {"INT33F0:00_CsiBayer", "0"},
- {"INT33F0:00_CamClk", "1"},
- {"gmin_V2P8GPIO","402"},
- {},
+ {"INT33BE:00_CsiPort", "1"},
+ {"INT33BE:00_CsiLanes", "2"},
+ {"INT33BE:00_CsiFmt", "13"},
+ {"INT33BE:00_CsiBayer", "2"},
+ {"INT33BE:00_CamClk", "0"},
+ {"INT33F0:00_CsiPort", "0"},
+ {"INT33F0:00_CsiLanes", "1"},
+ {"INT33F0:00_CsiFmt", "13"},
+ {"INT33F0:00_CsiBayer", "0"},
+ {"INT33F0:00_CamClk", "1"},
+ {"gmin_V2P8GPIO", "402"},
+ {},
};
static const struct gmin_cfg_var i8880_vars[] = {
- {"XXOV2680:00_CsiPort", "1"},
- {"XXOV2680:00_CsiLanes","1"},
- {"XXOV2680:00_CamClk","0"},
- {"XXGC0310:00_CsiPort", "0"},
- {"XXGC0310:00_CsiLanes", "1"},
- {"XXGC0310:00_CamClk", "1"},
- {},
+ {"XXOV2680:00_CsiPort", "1"},
+ {"XXOV2680:00_CsiLanes", "1"},
+ {"XXOV2680:00_CamClk", "0"},
+ {"XXGC0310:00_CsiPort", "0"},
+ {"XXGC0310:00_CsiLanes", "1"},
+ {"XXGC0310:00_CamClk", "1"},
+ {},
};
static const struct {
@@ -317,9 +322,9 @@ static const struct {
} hard_vars[] = {
{ "BYT-T FFD8", ffrd8_vars },
{ "T100TA", t100_vars },
- { "MRD7", mrd7_vars },
- { "ST70408", ecs7_vars },
- { "VTA0803", i8880_vars },
+ { "MRD7", mrd7_vars },
+ { "ST70408", ecs7_vars },
+ { "VTA0803", i8880_vars },
};
@@ -343,19 +348,17 @@ static struct gmin_subdev *gmin_subdev_add(struct v4l2_subdev *subdev)
{
int i, ret;
struct device *dev;
- struct i2c_client *client = v4l2_get_subdevdata(subdev);
-
- if (!pmic_id) {
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
- pmic_id = PMIC_REGULATOR;
- }
+ if (!pmic_id)
+ pmic_id = PMIC_REGULATOR;
if (!client)
return NULL;
dev = &client->dev;
- for (i=0; i < MAX_SUBDEVS && gmin_subdevs[i].subdev; i++)
+ for (i = 0; i < MAX_SUBDEVS && gmin_subdevs[i].subdev; i++)
;
if (i >= MAX_SUBDEVS)
return NULL;
@@ -401,7 +404,8 @@ static struct gmin_subdev *gmin_subdev_add(struct v4l2_subdev *subdev)
* API is broken with the current drivers, returning
* "1" for a regulator that will then emit a
* "unbalanced disable" WARNing if we try to disable
- * it. */
+ * it.
+ */
}
return &gmin_subdevs[i];
@@ -410,7 +414,8 @@ static struct gmin_subdev *gmin_subdev_add(struct v4l2_subdev *subdev)
static struct gmin_subdev *find_gmin_subdev(struct v4l2_subdev *subdev)
{
int i;
- for (i=0; i < MAX_SUBDEVS; i++)
+
+ for (i = 0; i < MAX_SUBDEVS; i++)
if (gmin_subdevs[i].subdev == subdev)
return &gmin_subdevs[i];
return gmin_subdev_add(subdev);
@@ -419,6 +424,7 @@ static struct gmin_subdev *find_gmin_subdev(struct v4l2_subdev *subdev)
static int gmin_gpio0_ctrl(struct v4l2_subdev *subdev, int on)
{
struct gmin_subdev *gs = find_gmin_subdev(subdev);
+
if (gs && gs->gpio0) {
gpiod_set_value(gs->gpio0, on);
return 0;
@@ -429,6 +435,7 @@ static int gmin_gpio0_ctrl(struct v4l2_subdev *subdev, int on)
static int gmin_gpio1_ctrl(struct v4l2_subdev *subdev, int on)
{
struct gmin_subdev *gs = find_gmin_subdev(subdev);
+
if (gs && gs->gpio1) {
gpiod_set_value(gs->gpio1, on);
return 0;
@@ -436,7 +443,7 @@ static int gmin_gpio1_ctrl(struct v4l2_subdev *subdev, int on)
return -EINVAL;
}
-int gmin_v1p2_ctrl(struct v4l2_subdev *subdev, int on)
+static int gmin_v1p2_ctrl(struct v4l2_subdev *subdev, int on)
{
struct gmin_subdev *gs = find_gmin_subdev(subdev);
@@ -455,7 +462,8 @@ int gmin_v1p2_ctrl(struct v4l2_subdev *subdev, int on)
return -EINVAL;
}
-int gmin_v1p8_ctrl(struct v4l2_subdev *subdev, int on)
+
+static int gmin_v1p8_ctrl(struct v4l2_subdev *subdev, int on)
{
struct gmin_subdev *gs = find_gmin_subdev(subdev);
int ret;
@@ -481,7 +489,7 @@ int gmin_v1p8_ctrl(struct v4l2_subdev *subdev, int on)
gpio_set_value(v1p8_gpio, on);
if (gs->v1p8_reg) {
- regulator_set_voltage(gs->v1p8_reg, 1800000, 1800000);
+ regulator_set_voltage(gs->v1p8_reg, 1800000, 1800000);
if (on)
return regulator_enable(gs->v1p8_reg);
else
@@ -491,7 +499,7 @@ int gmin_v1p8_ctrl(struct v4l2_subdev *subdev, int on)
return -EINVAL;
}
-int gmin_v2p8_ctrl(struct v4l2_subdev *subdev, int on)
+static int gmin_v2p8_ctrl(struct v4l2_subdev *subdev, int on)
{
struct gmin_subdev *gs = find_gmin_subdev(subdev);
int ret;
@@ -517,7 +525,7 @@ int gmin_v2p8_ctrl(struct v4l2_subdev *subdev, int on)
gpio_set_value(v2p8_gpio, on);
if (gs->v2p8_reg) {
- regulator_set_voltage(gs->v2p8_reg, 2900000, 2900000);
+ regulator_set_voltage(gs->v2p8_reg, 2900000, 2900000);
if (on)
return regulator_enable(gs->v2p8_reg);
else
@@ -527,10 +535,11 @@ int gmin_v2p8_ctrl(struct v4l2_subdev *subdev, int on)
return -EINVAL;
}
-int gmin_flisclk_ctrl(struct v4l2_subdev *subdev, int on)
+static int gmin_flisclk_ctrl(struct v4l2_subdev *subdev, int on)
{
int ret = 0;
struct gmin_subdev *gs = find_gmin_subdev(subdev);
+
if (on)
ret = vlv2_plat_set_clock_freq(gs->clock_num, gs->clock_src);
if (ret)
@@ -595,6 +604,7 @@ struct camera_sensor_platform_data *gmin_camera_platform_data(
enum atomisp_bayer_order csi_bayer)
{
struct gmin_subdev *gs = find_gmin_subdev(subdev);
+
gs->csi_fmt = csi_format;
gs->csi_bayer = csi_bayer;
@@ -617,30 +627,31 @@ EXPORT_SYMBOL_GPL(atomisp_gmin_register_vcm_control);
/* Retrieves a device-specific configuration variable. The dev
* argument should be a device with an ACPI companion, as all
- * configuration is based on firmware ID. */
-int gmin_get_config_var(struct device *dev, const char *var, char *out, size_t *out_len)
+ * configuration is based on firmware ID.
+ */
+int gmin_get_config_var(struct device *dev, const char *var, char *out,
+ size_t *out_len)
{
char var8[CFG_VAR_NAME_MAX];
efi_char16_t var16[CFG_VAR_NAME_MAX];
struct efivar_entry *ev;
- u32 efiattr_dummy;
int i, j, ret;
- unsigned long efilen;
- if (dev && ACPI_COMPANION(dev))
- dev = &ACPI_COMPANION(dev)->dev;
+ if (dev && ACPI_COMPANION(dev))
+ dev = &ACPI_COMPANION(dev)->dev;
- if (dev)
- ret = snprintf(var8, sizeof(var8), "%s_%s", dev_name(dev), var);
- else
- ret = snprintf(var8, sizeof(var8), "gmin_%s", var);
+ if (dev)
+ ret = snprintf(var8, sizeof(var8), "%s_%s", dev_name(dev), var);
+ else
+ ret = snprintf(var8, sizeof(var8), "gmin_%s", var);
if (ret < 0 || ret >= sizeof(var8) - 1)
return -EINVAL;
/* First check a hard-coded list of board-specific variables.
* Some device firmwares lack the ability to set EFI variables at
- * runtime. */
+ * runtime.
+ */
for (i = 0; i < ARRAY_SIZE(hard_vars); i++) {
if (dmi_match(DMI_BOARD_NAME, hard_vars[i].dmi_board_name)) {
for (j = 0; hard_vars[i].vars[j].name; j++) {
@@ -665,7 +676,8 @@ int gmin_get_config_var(struct device *dev, const char *var, char *out, size_t *
}
/* Our variable names are ASCII by construction, but EFI names
- * are wide chars. Convert and zero-pad. */
+ * are wide chars. Convert and zero-pad.
+ */
memset(var16, 0, sizeof(var16));
for (i = 0; i < sizeof(var8) && var8[i]; i++)
var16[i] = var8[i];
@@ -678,21 +690,25 @@ int gmin_get_config_var(struct device *dev, const char *var, char *out, size_t *
* implementation simply uses VariableName and VendorGuid from
* the struct and ignores the rest, but it seems like there
* ought to be an "official" efivar_entry registered
- * somewhere? */
+ * somewhere?
+ */
ev = kzalloc(sizeof(*ev), GFP_KERNEL);
if (!ev)
return -ENOMEM;
memcpy(&ev->var.VariableName, var16, sizeof(var16));
ev->var.VendorGuid = GMIN_CFG_VAR_EFI_GUID;
-
- efilen = *out_len;
- ret = efivar_entry_get(ev, &efiattr_dummy, &efilen, out);
+ ev->var.DataSize = *out_len;
+
+ ret = efivar_entry_get(ev, &ev->var.Attributes,
+ &ev->var.DataSize, ev->var.Data);
+ if (ret == 0) {
+ memcpy(out, ev->var.Data, ev->var.DataSize);
+ *out_len = ev->var.DataSize;
+ } else if (dev) {
+ dev_warn(dev, "Failed to find gmin variable %s\n", var8);
+ }
kfree(ev);
- *out_len = efilen;
-
- if (ret)
- dev_warn(dev, "Failed to find gmin variable %s\n", var8);
return ret;
}
@@ -718,38 +734,39 @@ EXPORT_SYMBOL_GPL(gmin_get_var_int);
int camera_sensor_csi(struct v4l2_subdev *sd, u32 port,
u32 lanes, u32 format, u32 bayer_order, int flag)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct camera_mipi_info *csi = NULL;
-
- if (flag) {
- csi = kzalloc(sizeof(*csi), GFP_KERNEL);
- if (!csi) {
- dev_err(&client->dev, "out of memory\n");
- return -ENOMEM;
- }
- csi->port = port;
- csi->num_lanes = lanes;
- csi->input_format = format;
- csi->raw_bayer_order = bayer_order;
- v4l2_set_subdev_hostdata(sd, (void *)csi);
- csi->metadata_format = ATOMISP_INPUT_FORMAT_EMBEDDED;
- csi->metadata_effective_width = NULL;
- dev_info(&client->dev,
- "camera pdata: port: %d lanes: %d order: %8.8x\n",
- port, lanes, bayer_order);
- } else {
- csi = v4l2_get_subdev_hostdata(sd);
- kfree(csi);
- }
-
- return 0;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct camera_mipi_info *csi = NULL;
+
+ if (flag) {
+ csi = kzalloc(sizeof(*csi), GFP_KERNEL);
+ if (!csi) {
+ dev_err(&client->dev, "out of memory\n");
+ return -ENOMEM;
+ }
+ csi->port = port;
+ csi->num_lanes = lanes;
+ csi->input_format = format;
+ csi->raw_bayer_order = bayer_order;
+ v4l2_set_subdev_hostdata(sd, (void *)csi);
+ csi->metadata_format = ATOMISP_INPUT_FORMAT_EMBEDDED;
+ csi->metadata_effective_width = NULL;
+ dev_info(&client->dev,
+ "camera pdata: port: %d lanes: %d order: %8.8x\n",
+ port, lanes, bayer_order);
+ } else {
+ csi = v4l2_get_subdev_hostdata(sd);
+ kfree(csi);
+ }
+
+ return 0;
}
EXPORT_SYMBOL_GPL(camera_sensor_csi);
/* PCI quirk: The BYT ISP advertises PCI runtime PM but it doesn't
* work. Disable so the kernel framework doesn't hang the device
* trying. The driver itself does direct calls to the PUNIT to manage
- * ISP power. */
+ * ISP power.
+ */
static void isp_pm_cap_fixup(struct pci_dev *dev)
{
dev_info(&dev->dev, "Disabling PCI power management on camera ISP\n");
diff --git a/drivers/staging/media/atomisp/platform/intel-mid/intel_mid_pcihelpers.c b/drivers/staging/media/atomisp/platform/intel-mid/intel_mid_pcihelpers.c
index a6c0f5f8c3f8..cd452cc20fea 100644
--- a/drivers/staging/media/atomisp/platform/intel-mid/intel_mid_pcihelpers.c
+++ b/drivers/staging/media/atomisp/platform/intel-mid/intel_mid_pcihelpers.c
@@ -5,7 +5,8 @@
/* G-Min addition: "platform_is()" lives in intel_mid_pm.h in the MCG
* tree, but it's just platform ID info and we don't want to pull in
- * the whole SFI-based PM architecture. */
+ * the whole SFI-based PM architecture.
+ */
#define INTEL_ATOM_MRST 0x26
#define INTEL_ATOM_MFLD 0x27
#define INTEL_ATOM_CLV 0x35
@@ -22,7 +23,7 @@
#endif
static inline int platform_is(u8 model)
{
- return (boot_cpu_data.x86_model == model);
+ return (boot_cpu_data.x86_model == model);
}
#include "../../include/asm/intel_mid_pcihelpers.h"
@@ -32,7 +33,6 @@ static DEFINE_SPINLOCK(msgbus_lock);
static struct pci_dev *pci_root;
static struct pm_qos_request pm_qos;
-int qos;
#define DW_I2C_NEED_QOS (platform_is(INTEL_ATOM_BYT))
@@ -136,8 +136,8 @@ u32 intel_mid_msgbus_read32(u8 port, u32 addr)
return data;
}
-
EXPORT_SYMBOL(intel_mid_msgbus_read32);
+
void intel_mid_msgbus_write32(u8 port, u32 addr, u32 data)
{
unsigned long irq_flags;
@@ -171,8 +171,8 @@ EXPORT_SYMBOL(intel_mid_soc_stepping);
static bool is_south_complex_device(struct pci_dev *dev)
{
- unsigned base_class = dev->class >> 16;
- unsigned sub_class = (dev->class & SUB_CLASS_MASK) >> 8;
+ unsigned int base_class = dev->class >> 16;
+ unsigned int sub_class = (dev->class & SUB_CLASS_MASK) >> 8;
/* other than camera, pci bridges and display,
* everything else are south complex devices.
diff --git a/drivers/staging/media/cxd2099/cxd2099.c b/drivers/staging/media/cxd2099/cxd2099.c
index 18186d0fa1a6..370ecb959543 100644
--- a/drivers/staging/media/cxd2099/cxd2099.c
+++ b/drivers/staging/media/cxd2099/cxd2099.c
@@ -473,7 +473,7 @@ static int slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
{
struct cxd *ci = ca->data;
- dev_info(&ci->i2c->dev, "slot_shutdown\n");
+ dev_info(&ci->i2c->dev, "%s\n", __func__);
mutex_lock(&ci->lock);
write_regm(ci, 0x09, 0x08, 0x08);
write_regm(ci, 0x20, 0x80, 0x80); /* Reset CAM Mode */
@@ -564,7 +564,7 @@ static int read_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount)
campoll(ci);
mutex_unlock(&ci->lock);
- dev_info(&ci->i2c->dev, "read_data\n");
+ dev_info(&ci->i2c->dev, "%s\n", __func__);
if (!ci->dr)
return 0;
@@ -584,7 +584,7 @@ static int write_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount)
struct cxd *ci = ca->data;
mutex_lock(&ci->lock);
- dev_info(&ci->i2c->dev, "write_data %d\n", ecount);
+ dev_info(&ci->i2c->dev, "%s %d\n", __func__, ecount);
write_reg(ci, 0x0d, ecount >> 8);
write_reg(ci, 0x0e, ecount & 0xff);
write_block(ci, 0x11, ebuf, ecount);
diff --git a/drivers/staging/media/imx/Kconfig b/drivers/staging/media/imx/Kconfig
new file mode 100644
index 000000000000..7eff50bcea39
--- /dev/null
+++ b/drivers/staging/media/imx/Kconfig
@@ -0,0 +1,21 @@
+config VIDEO_IMX_MEDIA
+ tristate "i.MX5/6 V4L2 media core driver"
+ depends on MEDIA_CONTROLLER && VIDEO_V4L2 && ARCH_MXC && IMX_IPUV3_CORE
+ select V4L2_FWNODE
+ ---help---
+ Say yes here to enable support for video4linux media controller
+ driver for the i.MX5/6 SOC.
+
+if VIDEO_IMX_MEDIA
+menu "i.MX5/6 Media Sub devices"
+
+config VIDEO_IMX_CSI
+ tristate "i.MX5/6 Camera Sensor Interface driver"
+ depends on VIDEO_IMX_MEDIA && VIDEO_DEV && I2C
+ select VIDEOBUF2_DMA_CONTIG
+ default y
+ ---help---
+ A video4linux camera sensor interface driver for i.MX5/6.
+
+endmenu
+endif
diff --git a/drivers/staging/media/imx/Makefile b/drivers/staging/media/imx/Makefile
new file mode 100644
index 000000000000..3569625b6305
--- /dev/null
+++ b/drivers/staging/media/imx/Makefile
@@ -0,0 +1,12 @@
+imx-media-objs := imx-media-dev.o imx-media-internal-sd.o imx-media-of.o
+imx-media-common-objs := imx-media-utils.o imx-media-fim.o
+imx-media-ic-objs := imx-ic-common.o imx-ic-prp.o imx-ic-prpencvf.o
+
+obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media.o
+obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-common.o
+obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-capture.o
+obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-vdic.o
+obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-ic.o
+
+obj-$(CONFIG_VIDEO_IMX_CSI) += imx-media-csi.o
+obj-$(CONFIG_VIDEO_IMX_CSI) += imx6-mipi-csi2.o
diff --git a/drivers/staging/media/imx/TODO b/drivers/staging/media/imx/TODO
new file mode 100644
index 000000000000..0bee3132b26f
--- /dev/null
+++ b/drivers/staging/media/imx/TODO
@@ -0,0 +1,23 @@
+
+- Clean up and move the ov5642 subdev driver to drivers/media/i2c, or
+ merge support for OV5642 into drivers/media/i2c/ov5640.c, and create
+ the binding docs for it.
+
+- The Frame Interval Monitor could be exported to v4l2-core for
+ general use.
+
+- At driver load time, the device-tree node that is the original source
+ (the "sensor"), is parsed to record its media bus configuration, and
+ this info is required in imx-media-csi.c to setup the CSI.
+ Laurent Pinchart argues that instead the CSI subdev should call its
+ neighbor's g_mbus_config op (which should be propagated if necessary)
+ to get this info. However Hans Verkuil is planning to remove the
+ g_mbus_config op. For now this driver uses the parsed DT mbus config
+ method until this issue is resolved.
+
+- This media driver supports inheriting V4L2 controls to the
+ video capture devices, from the subdevices in the capture device's
+ pipeline. The controls for each capture device are updated in the
+ link_notify callback when the pipeline is modified. It should be
+ decided whether this feature is useful enough to make it generally
+ available by exporting to v4l2-core.
diff --git a/drivers/staging/media/imx/imx-ic-common.c b/drivers/staging/media/imx/imx-ic-common.c
new file mode 100644
index 000000000000..cfdd4900a3be
--- /dev/null
+++ b/drivers/staging/media/imx/imx-ic-common.c
@@ -0,0 +1,113 @@
+/*
+ * V4L2 Image Converter Subdev for Freescale i.MX5/6 SOC
+ *
+ * Copyright (c) 2014-2016 Mentor Graphics Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include "imx-media.h"
+#include "imx-ic.h"
+
+#define IC_TASK_PRP IC_NUM_TASKS
+#define IC_NUM_OPS (IC_NUM_TASKS + 1)
+
+static struct imx_ic_ops *ic_ops[IC_NUM_OPS] = {
+ [IC_TASK_PRP] = &imx_ic_prp_ops,
+ [IC_TASK_ENCODER] = &imx_ic_prpencvf_ops,
+ [IC_TASK_VIEWFINDER] = &imx_ic_prpencvf_ops,
+};
+
+static int imx_ic_probe(struct platform_device *pdev)
+{
+ struct imx_media_internal_sd_platformdata *pdata;
+ struct imx_ic_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, &priv->sd);
+ priv->dev = &pdev->dev;
+
+ /* get our ipu_id, grp_id and IC task id */
+ pdata = priv->dev->platform_data;
+ priv->ipu_id = pdata->ipu_id;
+ switch (pdata->grp_id) {
+ case IMX_MEDIA_GRP_ID_IC_PRP:
+ priv->task_id = IC_TASK_PRP;
+ break;
+ case IMX_MEDIA_GRP_ID_IC_PRPENC:
+ priv->task_id = IC_TASK_ENCODER;
+ break;
+ case IMX_MEDIA_GRP_ID_IC_PRPVF:
+ priv->task_id = IC_TASK_VIEWFINDER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ v4l2_subdev_init(&priv->sd, ic_ops[priv->task_id]->subdev_ops);
+ v4l2_set_subdevdata(&priv->sd, priv);
+ priv->sd.internal_ops = ic_ops[priv->task_id]->internal_ops;
+ priv->sd.entity.ops = ic_ops[priv->task_id]->entity_ops;
+ priv->sd.entity.function = MEDIA_ENT_F_PROC_VIDEO_SCALER;
+ priv->sd.dev = &pdev->dev;
+ priv->sd.owner = THIS_MODULE;
+ priv->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+ priv->sd.grp_id = pdata->grp_id;
+ strncpy(priv->sd.name, pdata->sd_name, sizeof(priv->sd.name));
+
+ ret = ic_ops[priv->task_id]->init(priv);
+ if (ret)
+ return ret;
+
+ ret = v4l2_async_register_subdev(&priv->sd);
+ if (ret)
+ ic_ops[priv->task_id]->remove(priv);
+
+ return ret;
+}
+
+static int imx_ic_remove(struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = platform_get_drvdata(pdev);
+ struct imx_ic_priv *priv = container_of(sd, struct imx_ic_priv, sd);
+
+ v4l2_info(sd, "Removing\n");
+
+ ic_ops[priv->task_id]->remove(priv);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+
+ return 0;
+}
+
+static const struct platform_device_id imx_ic_ids[] = {
+ { .name = "imx-ipuv3-ic" },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, imx_ic_ids);
+
+static struct platform_driver imx_ic_driver = {
+ .probe = imx_ic_probe,
+ .remove = imx_ic_remove,
+ .id_table = imx_ic_ids,
+ .driver = {
+ .name = "imx-ipuv3-ic",
+ },
+};
+module_platform_driver(imx_ic_driver);
+
+MODULE_DESCRIPTION("i.MX IC subdev driver");
+MODULE_AUTHOR("Steve Longerbeam <[email protected]>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:imx-ipuv3-ic");
diff --git a/drivers/staging/media/imx/imx-ic-prp.c b/drivers/staging/media/imx/imx-ic-prp.c
new file mode 100644
index 000000000000..c2bb5ef2acb4
--- /dev/null
+++ b/drivers/staging/media/imx/imx-ic-prp.c
@@ -0,0 +1,518 @@
+/*
+ * V4L2 Capture IC Preprocess Subdev for Freescale i.MX5/6 SOC
+ *
+ * This subdevice handles capture of video frames from the CSI or VDIC,
+ * which are routed directly to the Image Converter preprocess tasks,
+ * for resizing, colorspace conversion, and rotation.
+ *
+ * Copyright (c) 2012-2017 Mentor Graphics Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/imx.h>
+#include "imx-media.h"
+#include "imx-ic.h"
+
+/*
+ * Min/Max supported width and heights.
+ */
+#define MIN_W 176
+#define MIN_H 144
+#define MAX_W 4096
+#define MAX_H 4096
+#define W_ALIGN 4 /* multiple of 16 pixels */
+#define H_ALIGN 1 /* multiple of 2 lines */
+#define S_ALIGN 1 /* multiple of 2 */
+
+struct prp_priv {
+ struct imx_media_dev *md;
+ struct imx_ic_priv *ic_priv;
+ struct media_pad pad[PRP_NUM_PADS];
+
+ /* lock to protect all members below */
+ struct mutex lock;
+
+ /* IPU units we require */
+ struct ipu_soc *ipu;
+
+ struct v4l2_subdev *src_sd;
+ struct v4l2_subdev *sink_sd_prpenc;
+ struct v4l2_subdev *sink_sd_prpvf;
+
+ /* the CSI id at link validate */
+ int csi_id;
+
+ struct v4l2_mbus_framefmt format_mbus;
+ struct v4l2_fract frame_interval;
+
+ int stream_count;
+};
+
+static inline struct prp_priv *sd_to_priv(struct v4l2_subdev *sd)
+{
+ struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
+
+ return ic_priv->prp_priv;
+}
+
+static int prp_start(struct prp_priv *priv)
+{
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+ bool src_is_vdic;
+
+ priv->ipu = priv->md->ipu[ic_priv->ipu_id];
+
+ /* set IC to receive from CSI or VDI depending on source */
+ src_is_vdic = !!(priv->src_sd->grp_id & IMX_MEDIA_GRP_ID_VDIC);
+
+ ipu_set_ic_src_mux(priv->ipu, priv->csi_id, src_is_vdic);
+
+ return 0;
+}
+
+static void prp_stop(struct prp_priv *priv)
+{
+}
+
+static struct v4l2_mbus_framefmt *
+__prp_get_fmt(struct prp_priv *priv, struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&ic_priv->sd, cfg, pad);
+ else
+ return &priv->format_mbus;
+}
+
+/*
+ * V4L2 subdev operations.
+ */
+
+static int prp_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+ struct v4l2_mbus_framefmt *infmt;
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ switch (code->pad) {
+ case PRP_SINK_PAD:
+ ret = imx_media_enum_ipu_format(&code->code, code->index,
+ CS_SEL_ANY);
+ break;
+ case PRP_SRC_PAD_PRPENC:
+ case PRP_SRC_PAD_PRPVF:
+ if (code->index != 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ infmt = __prp_get_fmt(priv, cfg, PRP_SINK_PAD, code->which);
+ code->code = infmt->code;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+ struct v4l2_mbus_framefmt *fmt;
+ int ret = 0;
+
+ if (sdformat->pad >= PRP_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ fmt = __prp_get_fmt(priv, cfg, sdformat->pad, sdformat->which);
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ sdformat->format = *fmt;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+ struct v4l2_mbus_framefmt *fmt, *infmt;
+ const struct imx_media_pixfmt *cc;
+ int ret = 0;
+ u32 code;
+
+ if (sdformat->pad >= PRP_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ if (priv->stream_count > 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ infmt = __prp_get_fmt(priv, cfg, PRP_SINK_PAD, sdformat->which);
+
+ switch (sdformat->pad) {
+ case PRP_SINK_PAD:
+ v4l_bound_align_image(&sdformat->format.width, MIN_W, MAX_W,
+ W_ALIGN, &sdformat->format.height,
+ MIN_H, MAX_H, H_ALIGN, S_ALIGN);
+
+ cc = imx_media_find_ipu_format(sdformat->format.code,
+ CS_SEL_ANY);
+ if (!cc) {
+ imx_media_enum_ipu_format(&code, 0, CS_SEL_ANY);
+ cc = imx_media_find_ipu_format(code, CS_SEL_ANY);
+ sdformat->format.code = cc->codes[0];
+ }
+
+ imx_media_fill_default_mbus_fields(&sdformat->format, infmt,
+ true);
+ break;
+ case PRP_SRC_PAD_PRPENC:
+ case PRP_SRC_PAD_PRPVF:
+ /* Output pads mirror input pad */
+ sdformat->format = *infmt;
+ break;
+ }
+
+ fmt = __prp_get_fmt(priv, cfg, sdformat->pad, sdformat->which);
+ *fmt = sdformat->format;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
+ struct prp_priv *priv = ic_priv->prp_priv;
+ struct v4l2_subdev *remote_sd;
+ int ret = 0;
+
+ dev_dbg(ic_priv->dev, "link setup %s -> %s", remote->entity->name,
+ local->entity->name);
+
+ remote_sd = media_entity_to_v4l2_subdev(remote->entity);
+
+ mutex_lock(&priv->lock);
+
+ if (local->flags & MEDIA_PAD_FL_SINK) {
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (priv->src_sd) {
+ ret = -EBUSY;
+ goto out;
+ }
+ if (priv->sink_sd_prpenc && (remote_sd->grp_id &
+ IMX_MEDIA_GRP_ID_VDIC)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ priv->src_sd = remote_sd;
+ } else {
+ priv->src_sd = NULL;
+ }
+
+ goto out;
+ }
+
+ /* this is a source pad */
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ switch (local->index) {
+ case PRP_SRC_PAD_PRPENC:
+ if (priv->sink_sd_prpenc) {
+ ret = -EBUSY;
+ goto out;
+ }
+ if (priv->src_sd && (priv->src_sd->grp_id &
+ IMX_MEDIA_GRP_ID_VDIC)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ priv->sink_sd_prpenc = remote_sd;
+ break;
+ case PRP_SRC_PAD_PRPVF:
+ if (priv->sink_sd_prpvf) {
+ ret = -EBUSY;
+ goto out;
+ }
+ priv->sink_sd_prpvf = remote_sd;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ } else {
+ switch (local->index) {
+ case PRP_SRC_PAD_PRPENC:
+ priv->sink_sd_prpenc = NULL;
+ break;
+ case PRP_SRC_PAD_PRPVF:
+ priv->sink_sd_prpvf = NULL;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ }
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
+ struct prp_priv *priv = ic_priv->prp_priv;
+ struct imx_media_subdev *csi;
+ int ret;
+
+ ret = v4l2_subdev_link_validate_default(sd, link,
+ source_fmt, sink_fmt);
+ if (ret)
+ return ret;
+
+ csi = imx_media_find_upstream_subdev(priv->md, &ic_priv->sd.entity,
+ IMX_MEDIA_GRP_ID_CSI);
+ if (IS_ERR(csi))
+ csi = NULL;
+
+ mutex_lock(&priv->lock);
+
+ if (priv->src_sd->grp_id & IMX_MEDIA_GRP_ID_VDIC) {
+ /*
+ * the ->PRPENC link cannot be enabled if the source
+ * is the VDIC
+ */
+ if (priv->sink_sd_prpenc)
+ ret = -EINVAL;
+ goto out;
+ } else {
+ /* the source is a CSI */
+ if (!csi) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (csi) {
+ switch (csi->sd->grp_id) {
+ case IMX_MEDIA_GRP_ID_CSI0:
+ priv->csi_id = 0;
+ break;
+ case IMX_MEDIA_GRP_ID_CSI1:
+ priv->csi_id = 1;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ } else {
+ priv->csi_id = 0;
+ }
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
+ struct prp_priv *priv = ic_priv->prp_priv;
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ if (!priv->src_sd || (!priv->sink_sd_prpenc && !priv->sink_sd_prpvf)) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ /*
+ * enable/disable streaming only if stream_count is
+ * going from 0 to 1 / 1 to 0.
+ */
+ if (priv->stream_count != !enable)
+ goto update_count;
+
+ dev_dbg(ic_priv->dev, "stream %s\n", enable ? "ON" : "OFF");
+
+ if (enable)
+ ret = prp_start(priv);
+ else
+ prp_stop(priv);
+ if (ret)
+ goto out;
+
+ /* start/stop upstream */
+ ret = v4l2_subdev_call(priv->src_sd, video, s_stream, enable);
+ ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
+ if (ret) {
+ if (enable)
+ prp_stop(priv);
+ goto out;
+ }
+
+update_count:
+ priv->stream_count += enable ? 1 : -1;
+ if (priv->stream_count < 0)
+ priv->stream_count = 0;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+
+ if (fi->pad >= PRP_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+ fi->interval = priv->frame_interval;
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int prp_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+
+ if (fi->pad >= PRP_NUM_PADS)
+ return -EINVAL;
+
+ /* No limits on frame interval */
+ mutex_lock(&priv->lock);
+ priv->frame_interval = fi->interval;
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+/*
+ * retrieve our pads parsed from the OF graph by the media device
+ */
+static int prp_registered(struct v4l2_subdev *sd)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+ int i, ret;
+ u32 code;
+
+ /* get media device */
+ priv->md = dev_get_drvdata(sd->v4l2_dev->dev);
+
+ for (i = 0; i < PRP_NUM_PADS; i++) {
+ priv->pad[i].flags = (i == PRP_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+ }
+
+ /* init default frame interval */
+ priv->frame_interval.numerator = 1;
+ priv->frame_interval.denominator = 30;
+
+ /* set a default mbus format */
+ imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
+ ret = imx_media_init_mbus_fmt(&priv->format_mbus, 640, 480, code,
+ V4L2_FIELD_NONE, NULL);
+ if (ret)
+ return ret;
+
+ return media_entity_pads_init(&sd->entity, PRP_NUM_PADS, priv->pad);
+}
+
+static const struct v4l2_subdev_pad_ops prp_pad_ops = {
+ .enum_mbus_code = prp_enum_mbus_code,
+ .get_fmt = prp_get_fmt,
+ .set_fmt = prp_set_fmt,
+ .link_validate = prp_link_validate,
+};
+
+static const struct v4l2_subdev_video_ops prp_video_ops = {
+ .g_frame_interval = prp_g_frame_interval,
+ .s_frame_interval = prp_s_frame_interval,
+ .s_stream = prp_s_stream,
+};
+
+static const struct media_entity_operations prp_entity_ops = {
+ .link_setup = prp_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_ops prp_subdev_ops = {
+ .video = &prp_video_ops,
+ .pad = &prp_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops prp_internal_ops = {
+ .registered = prp_registered,
+};
+
+static int prp_init(struct imx_ic_priv *ic_priv)
+{
+ struct prp_priv *priv;
+
+ priv = devm_kzalloc(ic_priv->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->lock);
+ ic_priv->prp_priv = priv;
+ priv->ic_priv = ic_priv;
+
+ return 0;
+}
+
+static void prp_remove(struct imx_ic_priv *ic_priv)
+{
+ struct prp_priv *priv = ic_priv->prp_priv;
+
+ mutex_destroy(&priv->lock);
+}
+
+struct imx_ic_ops imx_ic_prp_ops = {
+ .subdev_ops = &prp_subdev_ops,
+ .internal_ops = &prp_internal_ops,
+ .entity_ops = &prp_entity_ops,
+ .init = prp_init,
+ .remove = prp_remove,
+};
diff --git a/drivers/staging/media/imx/imx-ic-prpencvf.c b/drivers/staging/media/imx/imx-ic-prpencvf.c
new file mode 100644
index 000000000000..ed363fe3b3d0
--- /dev/null
+++ b/drivers/staging/media/imx/imx-ic-prpencvf.c
@@ -0,0 +1,1309 @@
+/*
+ * V4L2 Capture IC Preprocess Subdev for Freescale i.MX5/6 SOC
+ *
+ * This subdevice handles capture of video frames from the CSI or VDIC,
+ * which are routed directly to the Image Converter preprocess tasks,
+ * for resizing, colorspace conversion, and rotation.
+ *
+ * Copyright (c) 2012-2017 Mentor Graphics Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+#include <media/imx.h>
+#include "imx-media.h"
+#include "imx-ic.h"
+
+/*
+ * Min/Max supported width and heights.
+ *
+ * We allow planar output, so we have to align width at the source pad
+ * by 16 pixels to meet IDMAC alignment requirements for possible planar
+ * output.
+ *
+ * TODO: move this into pad format negotiation, if capture device
+ * has not requested a planar format, we should allow 8 pixel
+ * alignment at the source pad.
+ */
+#define MIN_W_SINK 176
+#define MIN_H_SINK 144
+#define MAX_W_SINK 4096
+#define MAX_H_SINK 4096
+#define W_ALIGN_SINK 3 /* multiple of 8 pixels */
+#define H_ALIGN_SINK 1 /* multiple of 2 lines */
+
+#define MAX_W_SRC 1024
+#define MAX_H_SRC 1024
+#define W_ALIGN_SRC 4 /* multiple of 16 pixels */
+#define H_ALIGN_SRC 1 /* multiple of 2 lines */
+
+#define S_ALIGN 1 /* multiple of 2 */
+
+struct prp_priv {
+ struct imx_media_dev *md;
+ struct imx_ic_priv *ic_priv;
+ struct media_pad pad[PRPENCVF_NUM_PADS];
+ /* the video device at output pad */
+ struct imx_media_video_dev *vdev;
+
+ /* lock to protect all members below */
+ struct mutex lock;
+
+ /* IPU units we require */
+ struct ipu_soc *ipu;
+ struct ipu_ic *ic;
+ struct ipuv3_channel *out_ch;
+ struct ipuv3_channel *rot_in_ch;
+ struct ipuv3_channel *rot_out_ch;
+
+ /* active vb2 buffers to send to video dev sink */
+ struct imx_media_buffer *active_vb2_buf[2];
+ struct imx_media_dma_buf underrun_buf;
+
+ int ipu_buf_num; /* ipu double buffer index: 0-1 */
+
+ /* the sink for the captured frames */
+ struct media_entity *sink;
+ /* the source subdev */
+ struct v4l2_subdev *src_sd;
+
+ struct v4l2_mbus_framefmt format_mbus[PRPENCVF_NUM_PADS];
+ const struct imx_media_pixfmt *cc[PRPENCVF_NUM_PADS];
+ struct v4l2_fract frame_interval;
+
+ struct imx_media_dma_buf rot_buf[2];
+
+ /* controls */
+ struct v4l2_ctrl_handler ctrl_hdlr;
+ int rotation; /* degrees */
+ bool hflip;
+ bool vflip;
+
+ /* derived from rotation, hflip, vflip controls */
+ enum ipu_rotate_mode rot_mode;
+
+ spinlock_t irqlock; /* protect eof_irq handler */
+
+ struct timer_list eof_timeout_timer;
+ int eof_irq;
+ int nfb4eof_irq;
+
+ int stream_count;
+ bool last_eof; /* waiting for last EOF at stream off */
+ bool nfb4eof; /* NFB4EOF encountered during streaming */
+ struct completion last_eof_comp;
+};
+
+static const struct prp_channels {
+ u32 out_ch;
+ u32 rot_in_ch;
+ u32 rot_out_ch;
+} prp_channel[] = {
+ [IC_TASK_ENCODER] = {
+ .out_ch = IPUV3_CHANNEL_IC_PRP_ENC_MEM,
+ .rot_in_ch = IPUV3_CHANNEL_MEM_ROT_ENC,
+ .rot_out_ch = IPUV3_CHANNEL_ROT_ENC_MEM,
+ },
+ [IC_TASK_VIEWFINDER] = {
+ .out_ch = IPUV3_CHANNEL_IC_PRP_VF_MEM,
+ .rot_in_ch = IPUV3_CHANNEL_MEM_ROT_VF,
+ .rot_out_ch = IPUV3_CHANNEL_ROT_VF_MEM,
+ },
+};
+
+static inline struct prp_priv *sd_to_priv(struct v4l2_subdev *sd)
+{
+ struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
+
+ return ic_priv->task_priv;
+}
+
+static void prp_put_ipu_resources(struct prp_priv *priv)
+{
+ if (!IS_ERR_OR_NULL(priv->ic))
+ ipu_ic_put(priv->ic);
+ priv->ic = NULL;
+
+ if (!IS_ERR_OR_NULL(priv->out_ch))
+ ipu_idmac_put(priv->out_ch);
+ priv->out_ch = NULL;
+
+ if (!IS_ERR_OR_NULL(priv->rot_in_ch))
+ ipu_idmac_put(priv->rot_in_ch);
+ priv->rot_in_ch = NULL;
+
+ if (!IS_ERR_OR_NULL(priv->rot_out_ch))
+ ipu_idmac_put(priv->rot_out_ch);
+ priv->rot_out_ch = NULL;
+}
+
+static int prp_get_ipu_resources(struct prp_priv *priv)
+{
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+ int ret, task = ic_priv->task_id;
+
+ priv->ipu = priv->md->ipu[ic_priv->ipu_id];
+
+ priv->ic = ipu_ic_get(priv->ipu, task);
+ if (IS_ERR(priv->ic)) {
+ v4l2_err(&ic_priv->sd, "failed to get IC\n");
+ ret = PTR_ERR(priv->ic);
+ goto out;
+ }
+
+ priv->out_ch = ipu_idmac_get(priv->ipu,
+ prp_channel[task].out_ch);
+ if (IS_ERR(priv->out_ch)) {
+ v4l2_err(&ic_priv->sd, "could not get IDMAC channel %u\n",
+ prp_channel[task].out_ch);
+ ret = PTR_ERR(priv->out_ch);
+ goto out;
+ }
+
+ priv->rot_in_ch = ipu_idmac_get(priv->ipu,
+ prp_channel[task].rot_in_ch);
+ if (IS_ERR(priv->rot_in_ch)) {
+ v4l2_err(&ic_priv->sd, "could not get IDMAC channel %u\n",
+ prp_channel[task].rot_in_ch);
+ ret = PTR_ERR(priv->rot_in_ch);
+ goto out;
+ }
+
+ priv->rot_out_ch = ipu_idmac_get(priv->ipu,
+ prp_channel[task].rot_out_ch);
+ if (IS_ERR(priv->rot_out_ch)) {
+ v4l2_err(&ic_priv->sd, "could not get IDMAC channel %u\n",
+ prp_channel[task].rot_out_ch);
+ ret = PTR_ERR(priv->rot_out_ch);
+ goto out;
+ }
+
+ return 0;
+out:
+ prp_put_ipu_resources(priv);
+ return ret;
+}
+
+static void prp_vb2_buf_done(struct prp_priv *priv, struct ipuv3_channel *ch)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ struct imx_media_buffer *done, *next;
+ struct vb2_buffer *vb;
+ dma_addr_t phys;
+
+ done = priv->active_vb2_buf[priv->ipu_buf_num];
+ if (done) {
+ vb = &done->vbuf.vb2_buf;
+ vb->timestamp = ktime_get_ns();
+ vb2_buffer_done(vb, priv->nfb4eof ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ }
+
+ priv->nfb4eof = false;
+
+ /* get next queued buffer */
+ next = imx_media_capture_device_next_buf(vdev);
+ if (next) {
+ phys = vb2_dma_contig_plane_dma_addr(&next->vbuf.vb2_buf, 0);
+ priv->active_vb2_buf[priv->ipu_buf_num] = next;
+ } else {
+ phys = priv->underrun_buf.phys;
+ priv->active_vb2_buf[priv->ipu_buf_num] = NULL;
+ }
+
+ if (ipu_idmac_buffer_is_ready(ch, priv->ipu_buf_num))
+ ipu_idmac_clear_buffer(ch, priv->ipu_buf_num);
+
+ ipu_cpmem_set_buffer(ch, priv->ipu_buf_num, phys);
+}
+
+static irqreturn_t prp_eof_interrupt(int irq, void *dev_id)
+{
+ struct prp_priv *priv = dev_id;
+ struct ipuv3_channel *channel;
+
+ spin_lock(&priv->irqlock);
+
+ if (priv->last_eof) {
+ complete(&priv->last_eof_comp);
+ priv->last_eof = false;
+ goto unlock;
+ }
+
+ channel = (ipu_rot_mode_is_irt(priv->rot_mode)) ?
+ priv->rot_out_ch : priv->out_ch;
+
+ prp_vb2_buf_done(priv, channel);
+
+ /* select new IPU buf */
+ ipu_idmac_select_buffer(channel, priv->ipu_buf_num);
+ /* toggle IPU double-buffer index */
+ priv->ipu_buf_num ^= 1;
+
+ /* bump the EOF timeout timer */
+ mod_timer(&priv->eof_timeout_timer,
+ jiffies + msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
+
+unlock:
+ spin_unlock(&priv->irqlock);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t prp_nfb4eof_interrupt(int irq, void *dev_id)
+{
+ struct prp_priv *priv = dev_id;
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+
+ spin_lock(&priv->irqlock);
+
+ /*
+ * this is not an unrecoverable error, just mark
+ * the next captured frame with vb2 error flag.
+ */
+ priv->nfb4eof = true;
+
+ v4l2_err(&ic_priv->sd, "NFB4EOF\n");
+
+ spin_unlock(&priv->irqlock);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * EOF timeout timer function.
+ */
+/*
+ * EOF timeout timer function. This is an unrecoverable condition
+ * without a stream restart.
+ */
+static void prp_eof_timeout(unsigned long data)
+{
+ struct prp_priv *priv = (struct prp_priv *)data;
+ struct imx_media_video_dev *vdev = priv->vdev;
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+
+ v4l2_err(&ic_priv->sd, "EOF timeout\n");
+
+ /* signal a fatal error to capture device */
+ imx_media_capture_device_error(vdev);
+}
+
+static void prp_setup_vb2_buf(struct prp_priv *priv, dma_addr_t *phys)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ struct imx_media_buffer *buf;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ buf = imx_media_capture_device_next_buf(vdev);
+ if (buf) {
+ priv->active_vb2_buf[i] = buf;
+ phys[i] = vb2_dma_contig_plane_dma_addr(
+ &buf->vbuf.vb2_buf, 0);
+ } else {
+ priv->active_vb2_buf[i] = NULL;
+ phys[i] = priv->underrun_buf.phys;
+ }
+ }
+}
+
+static void prp_unsetup_vb2_buf(struct prp_priv *priv,
+ enum vb2_buffer_state return_status)
+{
+ struct imx_media_buffer *buf;
+ int i;
+
+ /* return any remaining active frames with return_status */
+ for (i = 0; i < 2; i++) {
+ buf = priv->active_vb2_buf[i];
+ if (buf) {
+ struct vb2_buffer *vb = &buf->vbuf.vb2_buf;
+
+ vb->timestamp = ktime_get_ns();
+ vb2_buffer_done(vb, return_status);
+ }
+ }
+}
+
+static int prp_setup_channel(struct prp_priv *priv,
+ struct ipuv3_channel *channel,
+ enum ipu_rotate_mode rot_mode,
+ dma_addr_t addr0, dma_addr_t addr1,
+ bool rot_swap_width_height)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ const struct imx_media_pixfmt *outcc;
+ struct v4l2_mbus_framefmt *infmt;
+ unsigned int burst_size;
+ struct ipu_image image;
+ int ret;
+
+ infmt = &priv->format_mbus[PRPENCVF_SINK_PAD];
+ outcc = vdev->cc;
+
+ ipu_cpmem_zero(channel);
+
+ memset(&image, 0, sizeof(image));
+ image.pix = vdev->fmt.fmt.pix;
+ image.rect.width = image.pix.width;
+ image.rect.height = image.pix.height;
+
+ if (rot_swap_width_height) {
+ swap(image.pix.width, image.pix.height);
+ swap(image.rect.width, image.rect.height);
+ /* recalc stride using swapped width */
+ image.pix.bytesperline = outcc->planar ?
+ image.pix.width :
+ (image.pix.width * outcc->bpp) >> 3;
+ }
+
+ image.phys0 = addr0;
+ image.phys1 = addr1;
+
+ ret = ipu_cpmem_set_image(channel, &image);
+ if (ret)
+ return ret;
+
+ if (channel == priv->rot_in_ch ||
+ channel == priv->rot_out_ch) {
+ burst_size = 8;
+ ipu_cpmem_set_block_mode(channel);
+ } else {
+ burst_size = (image.pix.width & 0xf) ? 8 : 16;
+ }
+
+ ipu_cpmem_set_burstsize(channel, burst_size);
+
+ if (rot_mode)
+ ipu_cpmem_set_rotation(channel, rot_mode);
+
+ if (image.pix.field == V4L2_FIELD_NONE &&
+ V4L2_FIELD_HAS_BOTH(infmt->field) &&
+ channel == priv->out_ch)
+ ipu_cpmem_interlaced_scan(channel, image.pix.bytesperline);
+
+ ret = ipu_ic_task_idma_init(priv->ic, channel,
+ image.pix.width, image.pix.height,
+ burst_size, rot_mode);
+ if (ret)
+ return ret;
+
+ ipu_cpmem_set_axi_id(channel, 1);
+
+ ipu_idmac_set_double_buffer(channel, true);
+
+ return 0;
+}
+
+static int prp_setup_rotation(struct prp_priv *priv)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+ const struct imx_media_pixfmt *outcc, *incc;
+ struct v4l2_mbus_framefmt *infmt;
+ struct v4l2_pix_format *outfmt;
+ dma_addr_t phys[2];
+ int ret;
+
+ infmt = &priv->format_mbus[PRPENCVF_SINK_PAD];
+ outfmt = &vdev->fmt.fmt.pix;
+ incc = priv->cc[PRPENCVF_SINK_PAD];
+ outcc = vdev->cc;
+
+ ret = imx_media_alloc_dma_buf(priv->md, &priv->rot_buf[0],
+ outfmt->sizeimage);
+ if (ret) {
+ v4l2_err(&ic_priv->sd, "failed to alloc rot_buf[0], %d\n", ret);
+ return ret;
+ }
+ ret = imx_media_alloc_dma_buf(priv->md, &priv->rot_buf[1],
+ outfmt->sizeimage);
+ if (ret) {
+ v4l2_err(&ic_priv->sd, "failed to alloc rot_buf[1], %d\n", ret);
+ goto free_rot0;
+ }
+
+ ret = ipu_ic_task_init(priv->ic,
+ infmt->width, infmt->height,
+ outfmt->height, outfmt->width,
+ incc->cs, outcc->cs);
+ if (ret) {
+ v4l2_err(&ic_priv->sd, "ipu_ic_task_init failed, %d\n", ret);
+ goto free_rot1;
+ }
+
+ /* init the IC-PRP-->MEM IDMAC channel */
+ ret = prp_setup_channel(priv, priv->out_ch, IPU_ROTATE_NONE,
+ priv->rot_buf[0].phys, priv->rot_buf[1].phys,
+ true);
+ if (ret) {
+ v4l2_err(&ic_priv->sd,
+ "prp_setup_channel(out_ch) failed, %d\n", ret);
+ goto free_rot1;
+ }
+
+ /* init the MEM-->IC-PRP ROT IDMAC channel */
+ ret = prp_setup_channel(priv, priv->rot_in_ch, priv->rot_mode,
+ priv->rot_buf[0].phys, priv->rot_buf[1].phys,
+ true);
+ if (ret) {
+ v4l2_err(&ic_priv->sd,
+ "prp_setup_channel(rot_in_ch) failed, %d\n", ret);
+ goto free_rot1;
+ }
+
+ prp_setup_vb2_buf(priv, phys);
+
+ /* init the destination IC-PRP ROT-->MEM IDMAC channel */
+ ret = prp_setup_channel(priv, priv->rot_out_ch, IPU_ROTATE_NONE,
+ phys[0], phys[1],
+ false);
+ if (ret) {
+ v4l2_err(&ic_priv->sd,
+ "prp_setup_channel(rot_out_ch) failed, %d\n", ret);
+ goto unsetup_vb2;
+ }
+
+ /* now link IC-PRP-->MEM to MEM-->IC-PRP ROT */
+ ipu_idmac_link(priv->out_ch, priv->rot_in_ch);
+
+ /* enable the IC */
+ ipu_ic_enable(priv->ic);
+
+ /* set buffers ready */
+ ipu_idmac_select_buffer(priv->out_ch, 0);
+ ipu_idmac_select_buffer(priv->out_ch, 1);
+ ipu_idmac_select_buffer(priv->rot_out_ch, 0);
+ ipu_idmac_select_buffer(priv->rot_out_ch, 1);
+
+ /* enable the channels */
+ ipu_idmac_enable_channel(priv->out_ch);
+ ipu_idmac_enable_channel(priv->rot_in_ch);
+ ipu_idmac_enable_channel(priv->rot_out_ch);
+
+ /* and finally enable the IC PRP task */
+ ipu_ic_task_enable(priv->ic);
+
+ return 0;
+
+unsetup_vb2:
+ prp_unsetup_vb2_buf(priv, VB2_BUF_STATE_QUEUED);
+free_rot1:
+ imx_media_free_dma_buf(priv->md, &priv->rot_buf[1]);
+free_rot0:
+ imx_media_free_dma_buf(priv->md, &priv->rot_buf[0]);
+ return ret;
+}
+
+static void prp_unsetup_rotation(struct prp_priv *priv)
+{
+ ipu_ic_task_disable(priv->ic);
+
+ ipu_idmac_disable_channel(priv->out_ch);
+ ipu_idmac_disable_channel(priv->rot_in_ch);
+ ipu_idmac_disable_channel(priv->rot_out_ch);
+
+ ipu_idmac_unlink(priv->out_ch, priv->rot_in_ch);
+
+ ipu_ic_disable(priv->ic);
+
+ imx_media_free_dma_buf(priv->md, &priv->rot_buf[0]);
+ imx_media_free_dma_buf(priv->md, &priv->rot_buf[1]);
+}
+
+static int prp_setup_norotation(struct prp_priv *priv)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+ const struct imx_media_pixfmt *outcc, *incc;
+ struct v4l2_mbus_framefmt *infmt;
+ struct v4l2_pix_format *outfmt;
+ dma_addr_t phys[2];
+ int ret;
+
+ infmt = &priv->format_mbus[PRPENCVF_SINK_PAD];
+ outfmt = &vdev->fmt.fmt.pix;
+ incc = priv->cc[PRPENCVF_SINK_PAD];
+ outcc = vdev->cc;
+
+ ret = ipu_ic_task_init(priv->ic,
+ infmt->width, infmt->height,
+ outfmt->width, outfmt->height,
+ incc->cs, outcc->cs);
+ if (ret) {
+ v4l2_err(&ic_priv->sd, "ipu_ic_task_init failed, %d\n", ret);
+ return ret;
+ }
+
+ prp_setup_vb2_buf(priv, phys);
+
+ /* init the IC PRP-->MEM IDMAC channel */
+ ret = prp_setup_channel(priv, priv->out_ch, priv->rot_mode,
+ phys[0], phys[1], false);
+ if (ret) {
+ v4l2_err(&ic_priv->sd,
+ "prp_setup_channel(out_ch) failed, %d\n", ret);
+ goto unsetup_vb2;
+ }
+
+ ipu_cpmem_dump(priv->out_ch);
+ ipu_ic_dump(priv->ic);
+ ipu_dump(priv->ipu);
+
+ ipu_ic_enable(priv->ic);
+
+ /* set buffers ready */
+ ipu_idmac_select_buffer(priv->out_ch, 0);
+ ipu_idmac_select_buffer(priv->out_ch, 1);
+
+ /* enable the channels */
+ ipu_idmac_enable_channel(priv->out_ch);
+
+ /* enable the IC task */
+ ipu_ic_task_enable(priv->ic);
+
+ return 0;
+
+unsetup_vb2:
+ prp_unsetup_vb2_buf(priv, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+
+static void prp_unsetup_norotation(struct prp_priv *priv)
+{
+ ipu_ic_task_disable(priv->ic);
+ ipu_idmac_disable_channel(priv->out_ch);
+ ipu_ic_disable(priv->ic);
+}
+
+static void prp_unsetup(struct prp_priv *priv,
+ enum vb2_buffer_state state)
+{
+ if (ipu_rot_mode_is_irt(priv->rot_mode))
+ prp_unsetup_rotation(priv);
+ else
+ prp_unsetup_norotation(priv);
+
+ prp_unsetup_vb2_buf(priv, state);
+}
+
+static int prp_start(struct prp_priv *priv)
+{
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+ struct imx_media_video_dev *vdev = priv->vdev;
+ struct v4l2_pix_format *outfmt;
+ int ret;
+
+ ret = prp_get_ipu_resources(priv);
+ if (ret)
+ return ret;
+
+ outfmt = &vdev->fmt.fmt.pix;
+
+ ret = imx_media_alloc_dma_buf(priv->md, &priv->underrun_buf,
+ outfmt->sizeimage);
+ if (ret)
+ goto out_put_ipu;
+
+ priv->ipu_buf_num = 0;
+
+ /* init EOF completion waitq */
+ init_completion(&priv->last_eof_comp);
+ priv->last_eof = false;
+ priv->nfb4eof = false;
+
+ if (ipu_rot_mode_is_irt(priv->rot_mode))
+ ret = prp_setup_rotation(priv);
+ else
+ ret = prp_setup_norotation(priv);
+ if (ret)
+ goto out_free_underrun;
+
+ priv->nfb4eof_irq = ipu_idmac_channel_irq(priv->ipu,
+ priv->out_ch,
+ IPU_IRQ_NFB4EOF);
+ ret = devm_request_irq(ic_priv->dev, priv->nfb4eof_irq,
+ prp_nfb4eof_interrupt, 0,
+ "imx-ic-prp-nfb4eof", priv);
+ if (ret) {
+ v4l2_err(&ic_priv->sd,
+ "Error registering NFB4EOF irq: %d\n", ret);
+ goto out_unsetup;
+ }
+
+ if (ipu_rot_mode_is_irt(priv->rot_mode))
+ priv->eof_irq = ipu_idmac_channel_irq(
+ priv->ipu, priv->rot_out_ch, IPU_IRQ_EOF);
+ else
+ priv->eof_irq = ipu_idmac_channel_irq(
+ priv->ipu, priv->out_ch, IPU_IRQ_EOF);
+
+ ret = devm_request_irq(ic_priv->dev, priv->eof_irq,
+ prp_eof_interrupt, 0,
+ "imx-ic-prp-eof", priv);
+ if (ret) {
+ v4l2_err(&ic_priv->sd,
+ "Error registering eof irq: %d\n", ret);
+ goto out_free_nfb4eof_irq;
+ }
+
+ /* start the EOF timeout timer */
+ mod_timer(&priv->eof_timeout_timer,
+ jiffies + msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
+
+ return 0;
+
+out_free_nfb4eof_irq:
+ devm_free_irq(ic_priv->dev, priv->nfb4eof_irq, priv);
+out_unsetup:
+ prp_unsetup(priv, VB2_BUF_STATE_QUEUED);
+out_free_underrun:
+ imx_media_free_dma_buf(priv->md, &priv->underrun_buf);
+out_put_ipu:
+ prp_put_ipu_resources(priv);
+ return ret;
+}
+
+static void prp_stop(struct prp_priv *priv)
+{
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+ unsigned long flags;
+ int ret;
+
+ /* mark next EOF interrupt as the last before stream off */
+ spin_lock_irqsave(&priv->irqlock, flags);
+ priv->last_eof = true;
+ spin_unlock_irqrestore(&priv->irqlock, flags);
+
+ /*
+ * and then wait for interrupt handler to mark completion.
+ */
+ ret = wait_for_completion_timeout(
+ &priv->last_eof_comp,
+ msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
+ if (ret == 0)
+ v4l2_warn(&ic_priv->sd, "wait last EOF timeout\n");
+
+ devm_free_irq(ic_priv->dev, priv->eof_irq, priv);
+ devm_free_irq(ic_priv->dev, priv->nfb4eof_irq, priv);
+
+ prp_unsetup(priv, VB2_BUF_STATE_ERROR);
+
+ imx_media_free_dma_buf(priv->md, &priv->underrun_buf);
+
+ /* cancel the EOF timeout timer */
+ del_timer_sync(&priv->eof_timeout_timer);
+
+ prp_put_ipu_resources(priv);
+}
+
+static struct v4l2_mbus_framefmt *
+__prp_get_fmt(struct prp_priv *priv, struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&ic_priv->sd, cfg, pad);
+ else
+ return &priv->format_mbus[pad];
+}
+
+/*
+ * Applies IC resizer and IDMAC alignment restrictions to output
+ * rectangle given the input rectangle, and depending on given
+ * rotation mode.
+ *
+ * The IC resizer cannot downsize more than 4:1. Note also that
+ * for 90 or 270 rotation, _both_ output width and height must
+ * be aligned by W_ALIGN_SRC, because the intermediate rotation
+ * buffer swaps output width/height, and the final output buffer
+ * does not.
+ *
+ * Returns true if the output rectangle was modified.
+ */
+static bool prp_bound_align_output(struct v4l2_mbus_framefmt *outfmt,
+ struct v4l2_mbus_framefmt *infmt,
+ enum ipu_rotate_mode rot_mode)
+{
+ u32 orig_width = outfmt->width;
+ u32 orig_height = outfmt->height;
+
+ if (ipu_rot_mode_is_irt(rot_mode))
+ v4l_bound_align_image(&outfmt->width,
+ infmt->height / 4, MAX_H_SRC,
+ W_ALIGN_SRC,
+ &outfmt->height,
+ infmt->width / 4, MAX_W_SRC,
+ W_ALIGN_SRC, S_ALIGN);
+ else
+ v4l_bound_align_image(&outfmt->width,
+ infmt->width / 4, MAX_W_SRC,
+ W_ALIGN_SRC,
+ &outfmt->height,
+ infmt->height / 4, MAX_H_SRC,
+ H_ALIGN_SRC, S_ALIGN);
+
+ return outfmt->width != orig_width || outfmt->height != orig_height;
+}
+
+/*
+ * V4L2 subdev operations.
+ */
+
+static int prp_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->pad >= PRPENCVF_NUM_PADS)
+ return -EINVAL;
+
+ return imx_media_enum_ipu_format(&code->code, code->index, CS_SEL_ANY);
+}
+
+static int prp_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+ struct v4l2_mbus_framefmt *fmt;
+ int ret = 0;
+
+ if (sdformat->pad >= PRPENCVF_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ fmt = __prp_get_fmt(priv, cfg, sdformat->pad, sdformat->which);
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ sdformat->format = *fmt;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static void prp_try_fmt(struct prp_priv *priv,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat,
+ const struct imx_media_pixfmt **cc)
+{
+ struct v4l2_mbus_framefmt *infmt;
+
+ *cc = imx_media_find_ipu_format(sdformat->format.code, CS_SEL_ANY);
+ if (!*cc) {
+ u32 code;
+
+ imx_media_enum_ipu_format(&code, 0, CS_SEL_ANY);
+ *cc = imx_media_find_ipu_format(code, CS_SEL_ANY);
+ sdformat->format.code = (*cc)->codes[0];
+ }
+
+ infmt = __prp_get_fmt(priv, cfg, PRPENCVF_SINK_PAD, sdformat->which);
+
+ if (sdformat->pad == PRPENCVF_SRC_PAD) {
+ if (sdformat->format.field != V4L2_FIELD_NONE)
+ sdformat->format.field = infmt->field;
+
+ prp_bound_align_output(&sdformat->format, infmt,
+ priv->rot_mode);
+
+ /* propagate colorimetry from sink */
+ sdformat->format.colorspace = infmt->colorspace;
+ sdformat->format.xfer_func = infmt->xfer_func;
+ sdformat->format.quantization = infmt->quantization;
+ sdformat->format.ycbcr_enc = infmt->ycbcr_enc;
+ } else {
+ v4l_bound_align_image(&sdformat->format.width,
+ MIN_W_SINK, MAX_W_SINK, W_ALIGN_SINK,
+ &sdformat->format.height,
+ MIN_H_SINK, MAX_H_SINK, H_ALIGN_SINK,
+ S_ALIGN);
+
+ imx_media_fill_default_mbus_fields(&sdformat->format, infmt,
+ true);
+ }
+}
+
+static int prp_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+ struct imx_media_video_dev *vdev = priv->vdev;
+ const struct imx_media_pixfmt *cc;
+ struct v4l2_pix_format vdev_fmt;
+ struct v4l2_mbus_framefmt *fmt;
+ int ret = 0;
+
+ if (sdformat->pad >= PRPENCVF_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ if (priv->stream_count > 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ prp_try_fmt(priv, cfg, sdformat, &cc);
+
+ fmt = __prp_get_fmt(priv, cfg, sdformat->pad, sdformat->which);
+ *fmt = sdformat->format;
+
+ /* propagate a default format to source pad */
+ if (sdformat->pad == PRPENCVF_SINK_PAD) {
+ const struct imx_media_pixfmt *outcc;
+ struct v4l2_mbus_framefmt *outfmt;
+ struct v4l2_subdev_format format;
+
+ format.pad = PRPENCVF_SRC_PAD;
+ format.which = sdformat->which;
+ format.format = sdformat->format;
+ prp_try_fmt(priv, cfg, &format, &outcc);
+
+ outfmt = __prp_get_fmt(priv, cfg, PRPENCVF_SRC_PAD,
+ sdformat->which);
+ *outfmt = format.format;
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ priv->cc[PRPENCVF_SRC_PAD] = outcc;
+ }
+
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_TRY)
+ goto out;
+
+ priv->cc[sdformat->pad] = cc;
+
+ /* propagate output pad format to capture device */
+ imx_media_mbus_fmt_to_pix_fmt(&vdev_fmt,
+ &priv->format_mbus[PRPENCVF_SRC_PAD],
+ priv->cc[PRPENCVF_SRC_PAD]);
+ mutex_unlock(&priv->lock);
+ imx_media_capture_device_set_format(vdev, &vdev_fmt);
+
+ return 0;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+ struct v4l2_subdev_format format = {0};
+ const struct imx_media_pixfmt *cc;
+ int ret = 0;
+
+ if (fse->pad >= PRPENCVF_NUM_PADS || fse->index != 0)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ format.pad = fse->pad;
+ format.which = fse->which;
+ format.format.code = fse->code;
+ format.format.width = 1;
+ format.format.height = 1;
+ prp_try_fmt(priv, cfg, &format, &cc);
+ fse->min_width = format.format.width;
+ fse->min_height = format.format.height;
+
+ if (format.format.code != fse->code) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ format.format.code = fse->code;
+ format.format.width = -1;
+ format.format.height = -1;
+ prp_try_fmt(priv, cfg, &format, &cc);
+ fse->max_width = format.format.width;
+ fse->max_height = format.format.height;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
+ struct prp_priv *priv = ic_priv->task_priv;
+ struct v4l2_subdev *remote_sd;
+ int ret = 0;
+
+ dev_dbg(ic_priv->dev, "link setup %s -> %s", remote->entity->name,
+ local->entity->name);
+
+ mutex_lock(&priv->lock);
+
+ if (local->flags & MEDIA_PAD_FL_SINK) {
+ if (!is_media_entity_v4l2_subdev(remote->entity)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ remote_sd = media_entity_to_v4l2_subdev(remote->entity);
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (priv->src_sd) {
+ ret = -EBUSY;
+ goto out;
+ }
+ priv->src_sd = remote_sd;
+ } else {
+ priv->src_sd = NULL;
+ }
+
+ goto out;
+ }
+
+ /* this is the source pad */
+
+ /* the remote must be the device node */
+ if (!is_media_entity_v4l2_video_device(remote->entity)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (priv->sink) {
+ ret = -EBUSY;
+ goto out;
+ }
+ } else {
+ priv->sink = NULL;
+ goto out;
+ }
+
+ priv->sink = remote->entity;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct prp_priv *priv = container_of(ctrl->handler,
+ struct prp_priv, ctrl_hdlr);
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+ enum ipu_rotate_mode rot_mode;
+ int rotation, ret = 0;
+ bool hflip, vflip;
+
+ mutex_lock(&priv->lock);
+
+ rotation = priv->rotation;
+ hflip = priv->hflip;
+ vflip = priv->vflip;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ hflip = (ctrl->val == 1);
+ break;
+ case V4L2_CID_VFLIP:
+ vflip = (ctrl->val == 1);
+ break;
+ case V4L2_CID_ROTATE:
+ rotation = ctrl->val;
+ break;
+ default:
+ v4l2_err(&ic_priv->sd, "Invalid control\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = ipu_degrees_to_rot_mode(&rot_mode, rotation, hflip, vflip);
+ if (ret)
+ goto out;
+
+ if (rot_mode != priv->rot_mode) {
+ struct v4l2_mbus_framefmt outfmt, infmt;
+
+ /* can't change rotation mid-streaming */
+ if (priv->stream_count > 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ outfmt = priv->format_mbus[PRPENCVF_SRC_PAD];
+ infmt = priv->format_mbus[PRPENCVF_SINK_PAD];
+
+ if (prp_bound_align_output(&outfmt, &infmt, rot_mode)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ priv->rot_mode = rot_mode;
+ priv->rotation = rotation;
+ priv->hflip = hflip;
+ priv->vflip = vflip;
+ }
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops prp_ctrl_ops = {
+ .s_ctrl = prp_s_ctrl,
+};
+
+static int prp_init_controls(struct prp_priv *priv)
+{
+ struct imx_ic_priv *ic_priv = priv->ic_priv;
+ struct v4l2_ctrl_handler *hdlr = &priv->ctrl_hdlr;
+ int ret;
+
+ v4l2_ctrl_handler_init(hdlr, 3);
+
+ v4l2_ctrl_new_std(hdlr, &prp_ctrl_ops, V4L2_CID_HFLIP,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdlr, &prp_ctrl_ops, V4L2_CID_VFLIP,
+ 0, 1, 1, 0);
+ v4l2_ctrl_new_std(hdlr, &prp_ctrl_ops, V4L2_CID_ROTATE,
+ 0, 270, 90, 0);
+
+ ic_priv->sd.ctrl_handler = hdlr;
+
+ if (hdlr->error) {
+ ret = hdlr->error;
+ goto out_free;
+ }
+
+ v4l2_ctrl_handler_setup(hdlr);
+ return 0;
+
+out_free:
+ v4l2_ctrl_handler_free(hdlr);
+ return ret;
+}
+
+static int prp_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
+ struct prp_priv *priv = ic_priv->task_priv;
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ if (!priv->src_sd || !priv->sink) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ /*
+ * enable/disable streaming only if stream_count is
+ * going from 0 to 1 / 1 to 0.
+ */
+ if (priv->stream_count != !enable)
+ goto update_count;
+
+ dev_dbg(ic_priv->dev, "stream %s\n", enable ? "ON" : "OFF");
+
+ if (enable)
+ ret = prp_start(priv);
+ else
+ prp_stop(priv);
+ if (ret)
+ goto out;
+
+ /* start/stop upstream */
+ ret = v4l2_subdev_call(priv->src_sd, video, s_stream, enable);
+ ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
+ if (ret) {
+ if (enable)
+ prp_stop(priv);
+ goto out;
+ }
+
+update_count:
+ priv->stream_count += enable ? 1 : -1;
+ if (priv->stream_count < 0)
+ priv->stream_count = 0;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int prp_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+
+ if (fi->pad >= PRPENCVF_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+ fi->interval = priv->frame_interval;
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int prp_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+
+ if (fi->pad >= PRPENCVF_NUM_PADS)
+ return -EINVAL;
+
+ /* No limits on frame interval */
+ mutex_lock(&priv->lock);
+ priv->frame_interval = fi->interval;
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+/*
+ * retrieve our pads parsed from the OF graph by the media device
+ */
+static int prp_registered(struct v4l2_subdev *sd)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+ int i, ret;
+ u32 code;
+
+ /* get media device */
+ priv->md = dev_get_drvdata(sd->v4l2_dev->dev);
+
+ for (i = 0; i < PRPENCVF_NUM_PADS; i++) {
+ priv->pad[i].flags = (i == PRPENCVF_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+
+ /* set a default mbus format */
+ imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
+ ret = imx_media_init_mbus_fmt(&priv->format_mbus[i],
+ 640, 480, code, V4L2_FIELD_NONE,
+ &priv->cc[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* init default frame interval */
+ priv->frame_interval.numerator = 1;
+ priv->frame_interval.denominator = 30;
+
+ ret = media_entity_pads_init(&sd->entity, PRPENCVF_NUM_PADS,
+ priv->pad);
+ if (ret)
+ return ret;
+
+ ret = imx_media_capture_device_register(priv->vdev);
+ if (ret)
+ return ret;
+
+ ret = imx_media_add_video_device(priv->md, priv->vdev);
+ if (ret)
+ goto unreg;
+
+ ret = prp_init_controls(priv);
+ if (ret)
+ goto unreg;
+
+ return 0;
+unreg:
+ imx_media_capture_device_unregister(priv->vdev);
+ return ret;
+}
+
+static void prp_unregistered(struct v4l2_subdev *sd)
+{
+ struct prp_priv *priv = sd_to_priv(sd);
+
+ imx_media_capture_device_unregister(priv->vdev);
+ v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
+}
+
+static const struct v4l2_subdev_pad_ops prp_pad_ops = {
+ .enum_mbus_code = prp_enum_mbus_code,
+ .enum_frame_size = prp_enum_frame_size,
+ .get_fmt = prp_get_fmt,
+ .set_fmt = prp_set_fmt,
+};
+
+static const struct v4l2_subdev_video_ops prp_video_ops = {
+ .g_frame_interval = prp_g_frame_interval,
+ .s_frame_interval = prp_s_frame_interval,
+ .s_stream = prp_s_stream,
+};
+
+static const struct media_entity_operations prp_entity_ops = {
+ .link_setup = prp_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_ops prp_subdev_ops = {
+ .video = &prp_video_ops,
+ .pad = &prp_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops prp_internal_ops = {
+ .registered = prp_registered,
+ .unregistered = prp_unregistered,
+};
+
+static int prp_init(struct imx_ic_priv *ic_priv)
+{
+ struct prp_priv *priv;
+
+ priv = devm_kzalloc(ic_priv->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ic_priv->task_priv = priv;
+ priv->ic_priv = ic_priv;
+
+ spin_lock_init(&priv->irqlock);
+ init_timer(&priv->eof_timeout_timer);
+ priv->eof_timeout_timer.data = (unsigned long)priv;
+ priv->eof_timeout_timer.function = prp_eof_timeout;
+
+ priv->vdev = imx_media_capture_device_init(&ic_priv->sd,
+ PRPENCVF_SRC_PAD);
+ if (IS_ERR(priv->vdev))
+ return PTR_ERR(priv->vdev);
+
+ mutex_init(&priv->lock);
+
+ return 0;
+}
+
+static void prp_remove(struct imx_ic_priv *ic_priv)
+{
+ struct prp_priv *priv = ic_priv->task_priv;
+
+ mutex_destroy(&priv->lock);
+ imx_media_capture_device_remove(priv->vdev);
+}
+
+struct imx_ic_ops imx_ic_prpencvf_ops = {
+ .subdev_ops = &prp_subdev_ops,
+ .internal_ops = &prp_internal_ops,
+ .entity_ops = &prp_entity_ops,
+ .init = prp_init,
+ .remove = prp_remove,
+};
diff --git a/drivers/staging/media/imx/imx-ic.h b/drivers/staging/media/imx/imx-ic.h
new file mode 100644
index 000000000000..6b2267bda8ab
--- /dev/null
+++ b/drivers/staging/media/imx/imx-ic.h
@@ -0,0 +1,38 @@
+/*
+ * V4L2 Image Converter Subdev for Freescale i.MX5/6 SOC
+ *
+ * Copyright (c) 2016 Mentor Graphics Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef _IMX_IC_H
+#define _IMX_IC_H
+
+#include <media/v4l2-subdev.h>
+
+struct imx_ic_priv {
+ struct device *dev;
+ struct v4l2_subdev sd;
+ int ipu_id;
+ int task_id;
+ void *prp_priv;
+ void *task_priv;
+};
+
+struct imx_ic_ops {
+ const struct v4l2_subdev_ops *subdev_ops;
+ const struct v4l2_subdev_internal_ops *internal_ops;
+ const struct media_entity_operations *entity_ops;
+
+ int (*init)(struct imx_ic_priv *ic_priv);
+ void (*remove)(struct imx_ic_priv *ic_priv);
+};
+
+extern struct imx_ic_ops imx_ic_prp_ops;
+extern struct imx_ic_ops imx_ic_prpencvf_ops;
+extern struct imx_ic_ops imx_ic_pp_ops;
+
+#endif
diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
new file mode 100644
index 000000000000..ddab4c249da2
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-capture.c
@@ -0,0 +1,775 @@
+/*
+ * Video Capture Subdev for Freescale i.MX5/6 SOC
+ *
+ * Copyright (c) 2012-2016 Mentor Graphics Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+#include <video/imx-ipu-v3.h>
+#include <media/imx.h>
+#include "imx-media.h"
+
+struct capture_priv {
+ struct imx_media_video_dev vdev;
+
+ struct v4l2_subdev *src_sd;
+ int src_sd_pad;
+ struct device *dev;
+
+ struct imx_media_dev *md;
+
+ struct media_pad vdev_pad;
+
+ struct mutex mutex; /* capture device mutex */
+
+ /* the videobuf2 queue */
+ struct vb2_queue q;
+ /* list of ready imx_media_buffer's from q */
+ struct list_head ready_q;
+ /* protect ready_q */
+ spinlock_t q_lock;
+
+ /* controls inherited from subdevs */
+ struct v4l2_ctrl_handler ctrl_hdlr;
+
+ /* misc status */
+ bool stop; /* streaming is stopping */
+};
+
+#define to_capture_priv(v) container_of(v, struct capture_priv, vdev)
+
+/* In bytes, per queue */
+#define VID_MEM_LIMIT SZ_64M
+
+static struct vb2_ops capture_qops;
+
+/*
+ * Video ioctls follow
+ */
+
+static int vidioc_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct capture_priv *priv = video_drvdata(file);
+
+ strncpy(cap->driver, "imx-media-capture", sizeof(cap->driver) - 1);
+ strncpy(cap->card, "imx-media-capture", sizeof(cap->card) - 1);
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", priv->src_sd->name);
+
+ return 0;
+}
+
+static int capture_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ const struct imx_media_pixfmt *cc;
+ struct v4l2_subdev_frame_size_enum fse = {
+ .index = fsize->index,
+ .pad = priv->src_sd_pad,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ cc = imx_media_find_format(fsize->pixel_format, CS_SEL_ANY, true);
+ if (!cc)
+ return -EINVAL;
+
+ fse.code = cc->codes[0];
+
+ ret = v4l2_subdev_call(priv->src_sd, pad, enum_frame_size, NULL, &fse);
+ if (ret)
+ return ret;
+
+ if (fse.min_width == fse.max_width &&
+ fse.min_height == fse.max_height) {
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = fse.min_width;
+ fsize->discrete.height = fse.min_height;
+ } else {
+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
+ fsize->stepwise.min_width = fse.min_width;
+ fsize->stepwise.max_width = fse.max_width;
+ fsize->stepwise.min_height = fse.min_height;
+ fsize->stepwise.max_height = fse.max_height;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.step_height = 1;
+ }
+
+ return 0;
+}
+
+static int capture_enum_frameintervals(struct file *file, void *fh,
+ struct v4l2_frmivalenum *fival)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ const struct imx_media_pixfmt *cc;
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .index = fival->index,
+ .pad = priv->src_sd_pad,
+ .width = fival->width,
+ .height = fival->height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ cc = imx_media_find_format(fival->pixel_format, CS_SEL_ANY, true);
+ if (!cc)
+ return -EINVAL;
+
+ fie.code = cc->codes[0];
+
+ ret = v4l2_subdev_call(priv->src_sd, pad, enum_frame_interval, NULL, &fie);
+ if (ret)
+ return ret;
+
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete = fie.interval;
+
+ return 0;
+}
+
+static int capture_enum_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ const struct imx_media_pixfmt *cc_src;
+ struct v4l2_subdev_format fmt_src;
+ u32 fourcc;
+ int ret;
+
+ fmt_src.pad = priv->src_sd_pad;
+ fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(priv->src_sd, pad, get_fmt, NULL, &fmt_src);
+ if (ret) {
+ v4l2_err(priv->src_sd, "failed to get src_sd format\n");
+ return ret;
+ }
+
+ cc_src = imx_media_find_ipu_format(fmt_src.format.code, CS_SEL_ANY);
+ if (!cc_src)
+ cc_src = imx_media_find_mbus_format(fmt_src.format.code,
+ CS_SEL_ANY, true);
+ if (!cc_src)
+ return -EINVAL;
+
+ if (cc_src->bayer) {
+ if (f->index != 0)
+ return -EINVAL;
+ fourcc = cc_src->fourcc;
+ } else {
+ u32 cs_sel = (cc_src->cs == IPUV3_COLORSPACE_YUV) ?
+ CS_SEL_YUV : CS_SEL_RGB;
+
+ ret = imx_media_enum_format(&fourcc, f->index, cs_sel);
+ if (ret)
+ return ret;
+ }
+
+ f->pixelformat = fourcc;
+
+ return 0;
+}
+
+static int capture_g_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct capture_priv *priv = video_drvdata(file);
+
+ *f = priv->vdev.fmt;
+
+ return 0;
+}
+
+static int capture_try_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ struct v4l2_subdev_format fmt_src;
+ const struct imx_media_pixfmt *cc, *cc_src;
+ int ret;
+
+ fmt_src.pad = priv->src_sd_pad;
+ fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(priv->src_sd, pad, get_fmt, NULL, &fmt_src);
+ if (ret)
+ return ret;
+
+ cc_src = imx_media_find_ipu_format(fmt_src.format.code, CS_SEL_ANY);
+ if (!cc_src)
+ cc_src = imx_media_find_mbus_format(fmt_src.format.code,
+ CS_SEL_ANY, true);
+ if (!cc_src)
+ return -EINVAL;
+
+ if (cc_src->bayer) {
+ cc = cc_src;
+ } else {
+ u32 fourcc, cs_sel;
+
+ cs_sel = (cc_src->cs == IPUV3_COLORSPACE_YUV) ?
+ CS_SEL_YUV : CS_SEL_RGB;
+ fourcc = f->fmt.pix.pixelformat;
+
+ cc = imx_media_find_format(fourcc, cs_sel, false);
+ if (!cc) {
+ imx_media_enum_format(&fourcc, 0, cs_sel);
+ cc = imx_media_find_format(fourcc, cs_sel, false);
+ }
+ }
+
+ imx_media_mbus_fmt_to_pix_fmt(&f->fmt.pix, &fmt_src.format, cc);
+
+ return 0;
+}
+
+static int capture_s_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ int ret;
+
+ if (vb2_is_busy(&priv->q)) {
+ v4l2_err(priv->src_sd, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ ret = capture_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ priv->vdev.fmt.fmt.pix = f->fmt.pix;
+ priv->vdev.cc = imx_media_find_format(f->fmt.pix.pixelformat,
+ CS_SEL_ANY, true);
+
+ return 0;
+}
+
+static int capture_querystd(struct file *file, void *fh, v4l2_std_id *std)
+{
+ struct capture_priv *priv = video_drvdata(file);
+
+ return v4l2_subdev_call(priv->src_sd, video, querystd, std);
+}
+
+static int capture_g_std(struct file *file, void *fh, v4l2_std_id *std)
+{
+ struct capture_priv *priv = video_drvdata(file);
+
+ return v4l2_subdev_call(priv->src_sd, video, g_std, std);
+}
+
+static int capture_s_std(struct file *file, void *fh, v4l2_std_id std)
+{
+ struct capture_priv *priv = video_drvdata(file);
+
+ if (vb2_is_busy(&priv->q))
+ return -EBUSY;
+
+ return v4l2_subdev_call(priv->src_sd, video, s_std, std);
+}
+
+static int capture_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ struct v4l2_subdev_frame_interval fi;
+ int ret;
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ memset(&fi, 0, sizeof(fi));
+ fi.pad = priv->src_sd_pad;
+ ret = v4l2_subdev_call(priv->src_sd, video, g_frame_interval, &fi);
+ if (ret < 0)
+ return ret;
+
+ a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ a->parm.capture.timeperframe = fi.interval;
+
+ return 0;
+}
+
+static int capture_s_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ struct v4l2_subdev_frame_interval fi;
+ int ret;
+
+ if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ memset(&fi, 0, sizeof(fi));
+ fi.pad = priv->src_sd_pad;
+ fi.interval = a->parm.capture.timeperframe;
+ ret = v4l2_subdev_call(priv->src_sd, video, s_frame_interval, &fi);
+ if (ret < 0)
+ return ret;
+
+ a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ a->parm.capture.timeperframe = fi.interval;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops capture_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+
+ .vidioc_enum_framesizes = capture_enum_framesizes,
+ .vidioc_enum_frameintervals = capture_enum_frameintervals,
+
+ .vidioc_enum_fmt_vid_cap = capture_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = capture_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = capture_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = capture_s_fmt_vid_cap,
+
+ .vidioc_querystd = capture_querystd,
+ .vidioc_g_std = capture_g_std,
+ .vidioc_s_std = capture_s_std,
+
+ .vidioc_g_parm = capture_g_parm,
+ .vidioc_s_parm = capture_s_parm,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+/*
+ * Queue operations
+ */
+
+static int capture_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers,
+ unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct capture_priv *priv = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format *pix = &priv->vdev.fmt.fmt.pix;
+ unsigned int count = *nbuffers;
+
+ if (vq->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (*nplanes) {
+ if (*nplanes != 1 || sizes[0] < pix->sizeimage)
+ return -EINVAL;
+ count += vq->num_buffers;
+ }
+
+ count = min_t(__u32, VID_MEM_LIMIT / pix->sizeimage, count);
+
+ if (*nplanes)
+ *nbuffers = (count < vq->num_buffers) ? 0 :
+ count - vq->num_buffers;
+ else
+ *nbuffers = count;
+
+ *nplanes = 1;
+ sizes[0] = pix->sizeimage;
+
+ return 0;
+}
+
+static int capture_buf_init(struct vb2_buffer *vb)
+{
+ struct imx_media_buffer *buf = to_imx_media_vb(vb);
+
+ INIT_LIST_HEAD(&buf->list);
+
+ return 0;
+}
+
+static int capture_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct capture_priv *priv = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format *pix = &priv->vdev.fmt.fmt.pix;
+
+ if (vb2_plane_size(vb, 0) < pix->sizeimage) {
+ v4l2_err(priv->src_sd,
+ "data will not fit into plane (%lu < %lu)\n",
+ vb2_plane_size(vb, 0), (long)pix->sizeimage);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, pix->sizeimage);
+
+ return 0;
+}
+
+static void capture_buf_queue(struct vb2_buffer *vb)
+{
+ struct capture_priv *priv = vb2_get_drv_priv(vb->vb2_queue);
+ struct imx_media_buffer *buf = to_imx_media_vb(vb);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->q_lock, flags);
+
+ list_add_tail(&buf->list, &priv->ready_q);
+
+ spin_unlock_irqrestore(&priv->q_lock, flags);
+}
+
+static int capture_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct capture_priv *priv = vb2_get_drv_priv(vq);
+ struct imx_media_buffer *buf, *tmp;
+ unsigned long flags;
+ int ret;
+
+ if (vb2_is_streaming(vq))
+ return 0;
+
+ ret = imx_media_pipeline_set_stream(priv->md, &priv->src_sd->entity,
+ true);
+ if (ret) {
+ v4l2_err(priv->src_sd, "pipeline start failed with %d\n", ret);
+ goto return_bufs;
+ }
+
+ priv->stop = false;
+
+ return 0;
+
+return_bufs:
+ spin_lock_irqsave(&priv->q_lock, flags);
+ list_for_each_entry_safe(buf, tmp, &priv->ready_q, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vbuf.vb2_buf, VB2_BUF_STATE_QUEUED);
+ }
+ spin_unlock_irqrestore(&priv->q_lock, flags);
+ return ret;
+}
+
+static void capture_stop_streaming(struct vb2_queue *vq)
+{
+ struct capture_priv *priv = vb2_get_drv_priv(vq);
+ struct imx_media_buffer *frame;
+ unsigned long flags;
+ int ret;
+
+ if (!vb2_is_streaming(vq))
+ return;
+
+ spin_lock_irqsave(&priv->q_lock, flags);
+ priv->stop = true;
+ spin_unlock_irqrestore(&priv->q_lock, flags);
+
+ ret = imx_media_pipeline_set_stream(priv->md, &priv->src_sd->entity,
+ false);
+ if (ret)
+ v4l2_warn(priv->src_sd, "pipeline stop failed with %d\n", ret);
+
+ /* release all active buffers */
+ spin_lock_irqsave(&priv->q_lock, flags);
+ while (!list_empty(&priv->ready_q)) {
+ frame = list_entry(priv->ready_q.next,
+ struct imx_media_buffer, list);
+ list_del(&frame->list);
+ vb2_buffer_done(&frame->vbuf.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irqrestore(&priv->q_lock, flags);
+}
+
+static struct vb2_ops capture_qops = {
+ .queue_setup = capture_queue_setup,
+ .buf_init = capture_buf_init,
+ .buf_prepare = capture_buf_prepare,
+ .buf_queue = capture_buf_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = capture_start_streaming,
+ .stop_streaming = capture_stop_streaming,
+};
+
+/*
+ * File operations
+ */
+static int capture_open(struct file *file)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ struct video_device *vfd = priv->vdev.vfd;
+ int ret;
+
+ if (mutex_lock_interruptible(&priv->mutex))
+ return -ERESTARTSYS;
+
+ ret = v4l2_fh_open(file);
+ if (ret)
+ v4l2_err(priv->src_sd, "v4l2_fh_open failed\n");
+
+ ret = v4l2_pipeline_pm_use(&vfd->entity, 1);
+ if (ret)
+ v4l2_fh_release(file);
+
+ mutex_unlock(&priv->mutex);
+ return ret;
+}
+
+static int capture_release(struct file *file)
+{
+ struct capture_priv *priv = video_drvdata(file);
+ struct video_device *vfd = priv->vdev.vfd;
+ struct vb2_queue *vq = &priv->q;
+ int ret = 0;
+
+ mutex_lock(&priv->mutex);
+
+ if (file->private_data == vq->owner) {
+ vb2_queue_release(vq);
+ vq->owner = NULL;
+ }
+
+ v4l2_pipeline_pm_use(&vfd->entity, 0);
+
+ v4l2_fh_release(file);
+ mutex_unlock(&priv->mutex);
+ return ret;
+}
+
+static const struct v4l2_file_operations capture_fops = {
+ .owner = THIS_MODULE,
+ .open = capture_open,
+ .release = capture_release,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+static struct video_device capture_videodev = {
+ .fops = &capture_fops,
+ .ioctl_ops = &capture_ioctl_ops,
+ .minor = -1,
+ .release = video_device_release,
+ .vfl_dir = VFL_DIR_RX,
+ .tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM,
+ .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING,
+};
+
+void imx_media_capture_device_set_format(struct imx_media_video_dev *vdev,
+ struct v4l2_pix_format *pix)
+{
+ struct capture_priv *priv = to_capture_priv(vdev);
+
+ mutex_lock(&priv->mutex);
+ priv->vdev.fmt.fmt.pix = *pix;
+ priv->vdev.cc = imx_media_find_format(pix->pixelformat, CS_SEL_ANY,
+ true);
+ mutex_unlock(&priv->mutex);
+}
+EXPORT_SYMBOL_GPL(imx_media_capture_device_set_format);
+
+struct imx_media_buffer *
+imx_media_capture_device_next_buf(struct imx_media_video_dev *vdev)
+{
+ struct capture_priv *priv = to_capture_priv(vdev);
+ struct imx_media_buffer *buf = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->q_lock, flags);
+
+ /* get next queued buffer */
+ if (!list_empty(&priv->ready_q)) {
+ buf = list_entry(priv->ready_q.next, struct imx_media_buffer,
+ list);
+ list_del(&buf->list);
+ }
+
+ spin_unlock_irqrestore(&priv->q_lock, flags);
+
+ return buf;
+}
+EXPORT_SYMBOL_GPL(imx_media_capture_device_next_buf);
+
+void imx_media_capture_device_error(struct imx_media_video_dev *vdev)
+{
+ struct capture_priv *priv = to_capture_priv(vdev);
+ struct vb2_queue *vq = &priv->q;
+ unsigned long flags;
+
+ if (!vb2_is_streaming(vq))
+ return;
+
+ spin_lock_irqsave(&priv->q_lock, flags);
+ vb2_queue_error(vq);
+ spin_unlock_irqrestore(&priv->q_lock, flags);
+}
+EXPORT_SYMBOL_GPL(imx_media_capture_device_error);
+
+int imx_media_capture_device_register(struct imx_media_video_dev *vdev)
+{
+ struct capture_priv *priv = to_capture_priv(vdev);
+ struct v4l2_subdev *sd = priv->src_sd;
+ struct video_device *vfd = vdev->vfd;
+ struct vb2_queue *vq = &priv->q;
+ struct v4l2_subdev_format fmt_src;
+ int ret;
+
+ /* get media device */
+ priv->md = dev_get_drvdata(sd->v4l2_dev->dev);
+
+ vfd->v4l2_dev = sd->v4l2_dev;
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
+ if (ret) {
+ v4l2_err(sd, "Failed to register video device\n");
+ return ret;
+ }
+
+ vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ vq->drv_priv = priv;
+ vq->buf_struct_size = sizeof(struct imx_media_buffer);
+ vq->ops = &capture_qops;
+ vq->mem_ops = &vb2_dma_contig_memops;
+ vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vq->lock = &priv->mutex;
+ vq->min_buffers_needed = 2;
+ vq->dev = priv->dev;
+
+ ret = vb2_queue_init(vq);
+ if (ret) {
+ v4l2_err(sd, "vb2_queue_init failed\n");
+ goto unreg;
+ }
+
+ INIT_LIST_HEAD(&priv->ready_q);
+
+ priv->vdev_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vfd->entity, 1, &priv->vdev_pad);
+ if (ret) {
+ v4l2_err(sd, "failed to init dev pad\n");
+ goto unreg;
+ }
+
+ /* create the link from the src_sd devnode pad to device node */
+ ret = media_create_pad_link(&sd->entity, priv->src_sd_pad,
+ &vfd->entity, 0, 0);
+ if (ret) {
+ v4l2_err(sd, "failed to create link to device node\n");
+ goto unreg;
+ }
+
+ /* setup default format */
+ fmt_src.pad = priv->src_sd_pad;
+ fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt_src);
+ if (ret) {
+ v4l2_err(sd, "failed to get src_sd format\n");
+ goto unreg;
+ }
+
+ vdev->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ imx_media_mbus_fmt_to_pix_fmt(&vdev->fmt.fmt.pix,
+ &fmt_src.format, NULL);
+ vdev->cc = imx_media_find_format(vdev->fmt.fmt.pix.pixelformat,
+ CS_SEL_ANY, false);
+
+ v4l2_info(sd, "Registered %s as /dev/%s\n", vfd->name,
+ video_device_node_name(vfd));
+
+ vfd->ctrl_handler = &priv->ctrl_hdlr;
+
+ return 0;
+unreg:
+ video_unregister_device(vfd);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(imx_media_capture_device_register);
+
+void imx_media_capture_device_unregister(struct imx_media_video_dev *vdev)
+{
+ struct capture_priv *priv = to_capture_priv(vdev);
+ struct video_device *vfd = priv->vdev.vfd;
+
+ mutex_lock(&priv->mutex);
+
+ if (video_is_registered(vfd)) {
+ video_unregister_device(vfd);
+ media_entity_cleanup(&vfd->entity);
+ }
+
+ mutex_unlock(&priv->mutex);
+}
+EXPORT_SYMBOL_GPL(imx_media_capture_device_unregister);
+
+struct imx_media_video_dev *
+imx_media_capture_device_init(struct v4l2_subdev *src_sd, int pad)
+{
+ struct capture_priv *priv;
+ struct video_device *vfd;
+
+ priv = devm_kzalloc(src_sd->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ priv->src_sd = src_sd;
+ priv->src_sd_pad = pad;
+ priv->dev = src_sd->dev;
+
+ mutex_init(&priv->mutex);
+ spin_lock_init(&priv->q_lock);
+
+ snprintf(capture_videodev.name, sizeof(capture_videodev.name),
+ "%s capture", src_sd->name);
+
+ vfd = video_device_alloc();
+ if (!vfd)
+ return ERR_PTR(-ENOMEM);
+
+ *vfd = capture_videodev;
+ vfd->lock = &priv->mutex;
+ vfd->queue = &priv->q;
+ priv->vdev.vfd = vfd;
+
+ video_set_drvdata(vfd, priv);
+
+ v4l2_ctrl_handler_init(&priv->ctrl_hdlr, 0);
+
+ return &priv->vdev;
+}
+EXPORT_SYMBOL_GPL(imx_media_capture_device_init);
+
+void imx_media_capture_device_remove(struct imx_media_video_dev *vdev)
+{
+ struct capture_priv *priv = to_capture_priv(vdev);
+
+ v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
+}
+EXPORT_SYMBOL_GPL(imx_media_capture_device_remove);
+
+MODULE_DESCRIPTION("i.MX5/6 v4l2 video capture interface driver");
+MODULE_AUTHOR("Steve Longerbeam <[email protected]>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
new file mode 100644
index 000000000000..a2d26693912e
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -0,0 +1,1817 @@
+/*
+ * V4L2 Capture CSI Subdev for Freescale i.MX5/6 SOC
+ *
+ * Copyright (c) 2014-2017 Mentor Graphics Inc.
+ * Copyright (C) 2017 Pengutronix, Philipp Zabel <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/delay.h>
+#include <linux/gcd.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+#include <video/imx-ipu-v3.h>
+#include <media/imx.h>
+#include "imx-media.h"
+
+/*
+ * Min/Max supported width and heights.
+ *
+ * We allow planar output, so we have to align width by 16 pixels
+ * to meet IDMAC alignment requirements.
+ *
+ * TODO: move this into pad format negotiation, if capture device
+ * has not requested planar formats, we should allow 8 pixel
+ * alignment.
+ */
+#define MIN_W 176
+#define MIN_H 144
+#define MAX_W 4096
+#define MAX_H 4096
+#define W_ALIGN 4 /* multiple of 16 pixels */
+#define H_ALIGN 1 /* multiple of 2 lines */
+#define S_ALIGN 1 /* multiple of 2 */
+
+/*
+ * struct csi_skip_desc - CSI frame skipping descriptor
+ * @keep - number of frames kept per max_ratio frames
+ * @max_ratio - width of skip_smfc, written to MAX_RATIO bitfield
+ * @skip_smfc - skip pattern written to the SKIP_SMFC bitfield
+ */
+struct csi_skip_desc {
+ u8 keep;
+ u8 max_ratio;
+ u8 skip_smfc;
+};
+
+struct csi_priv {
+ struct device *dev;
+ struct ipu_soc *ipu;
+ struct imx_media_dev *md;
+ struct v4l2_subdev sd;
+ struct media_pad pad[CSI_NUM_PADS];
+ /* the video device at IDMAC output pad */
+ struct imx_media_video_dev *vdev;
+ struct imx_media_fim *fim;
+ int csi_id;
+ int smfc_id;
+
+ /* lock to protect all members below */
+ struct mutex lock;
+
+ int active_output_pad;
+
+ struct ipuv3_channel *idmac_ch;
+ struct ipu_smfc *smfc;
+ struct ipu_csi *csi;
+
+ struct v4l2_mbus_framefmt format_mbus[CSI_NUM_PADS];
+ const struct imx_media_pixfmt *cc[CSI_NUM_PADS];
+ struct v4l2_fract frame_interval[CSI_NUM_PADS];
+ struct v4l2_rect crop;
+ struct v4l2_rect compose;
+ const struct csi_skip_desc *skip;
+
+ /* active vb2 buffers to send to video dev sink */
+ struct imx_media_buffer *active_vb2_buf[2];
+ struct imx_media_dma_buf underrun_buf;
+
+ int ipu_buf_num; /* ipu double buffer index: 0-1 */
+
+ /* the sink for the captured frames */
+ struct media_entity *sink;
+ enum ipu_csi_dest dest;
+ /* the source subdev */
+ struct v4l2_subdev *src_sd;
+
+ /* the mipi virtual channel number at link validate */
+ int vc_num;
+
+ /* the attached sensor at stream on */
+ struct imx_media_subdev *sensor;
+
+ spinlock_t irqlock; /* protect eof_irq handler */
+ struct timer_list eof_timeout_timer;
+ int eof_irq;
+ int nfb4eof_irq;
+
+ struct v4l2_ctrl_handler ctrl_hdlr;
+
+ int stream_count; /* streaming counter */
+ bool last_eof; /* waiting for last EOF at stream off */
+ bool nfb4eof; /* NFB4EOF encountered during streaming */
+ struct completion last_eof_comp;
+};
+
+static inline struct csi_priv *sd_to_dev(struct v4l2_subdev *sdev)
+{
+ return container_of(sdev, struct csi_priv, sd);
+}
+
+static void csi_idmac_put_ipu_resources(struct csi_priv *priv)
+{
+ if (!IS_ERR_OR_NULL(priv->idmac_ch))
+ ipu_idmac_put(priv->idmac_ch);
+ priv->idmac_ch = NULL;
+
+ if (!IS_ERR_OR_NULL(priv->smfc))
+ ipu_smfc_put(priv->smfc);
+ priv->smfc = NULL;
+}
+
+static int csi_idmac_get_ipu_resources(struct csi_priv *priv)
+{
+ int ch_num, ret;
+
+ ch_num = IPUV3_CHANNEL_CSI0 + priv->smfc_id;
+
+ priv->smfc = ipu_smfc_get(priv->ipu, ch_num);
+ if (IS_ERR(priv->smfc)) {
+ v4l2_err(&priv->sd, "failed to get SMFC\n");
+ ret = PTR_ERR(priv->smfc);
+ goto out;
+ }
+
+ priv->idmac_ch = ipu_idmac_get(priv->ipu, ch_num);
+ if (IS_ERR(priv->idmac_ch)) {
+ v4l2_err(&priv->sd, "could not get IDMAC channel %u\n",
+ ch_num);
+ ret = PTR_ERR(priv->idmac_ch);
+ goto out;
+ }
+
+ return 0;
+out:
+ csi_idmac_put_ipu_resources(priv);
+ return ret;
+}
+
+static void csi_vb2_buf_done(struct csi_priv *priv)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ struct imx_media_buffer *done, *next;
+ struct vb2_buffer *vb;
+ dma_addr_t phys;
+
+ done = priv->active_vb2_buf[priv->ipu_buf_num];
+ if (done) {
+ vb = &done->vbuf.vb2_buf;
+ vb->timestamp = ktime_get_ns();
+ vb2_buffer_done(vb, priv->nfb4eof ?
+ VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+ }
+
+ priv->nfb4eof = false;
+
+ /* get next queued buffer */
+ next = imx_media_capture_device_next_buf(vdev);
+ if (next) {
+ phys = vb2_dma_contig_plane_dma_addr(&next->vbuf.vb2_buf, 0);
+ priv->active_vb2_buf[priv->ipu_buf_num] = next;
+ } else {
+ phys = priv->underrun_buf.phys;
+ priv->active_vb2_buf[priv->ipu_buf_num] = NULL;
+ }
+
+ if (ipu_idmac_buffer_is_ready(priv->idmac_ch, priv->ipu_buf_num))
+ ipu_idmac_clear_buffer(priv->idmac_ch, priv->ipu_buf_num);
+
+ ipu_cpmem_set_buffer(priv->idmac_ch, priv->ipu_buf_num, phys);
+}
+
+static irqreturn_t csi_idmac_eof_interrupt(int irq, void *dev_id)
+{
+ struct csi_priv *priv = dev_id;
+
+ spin_lock(&priv->irqlock);
+
+ if (priv->last_eof) {
+ complete(&priv->last_eof_comp);
+ priv->last_eof = false;
+ goto unlock;
+ }
+
+ if (priv->fim) {
+ struct timespec cur_ts;
+
+ ktime_get_ts(&cur_ts);
+ /* call frame interval monitor */
+ imx_media_fim_eof_monitor(priv->fim, &cur_ts);
+ }
+
+ csi_vb2_buf_done(priv);
+
+ /* select new IPU buf */
+ ipu_idmac_select_buffer(priv->idmac_ch, priv->ipu_buf_num);
+ /* toggle IPU double-buffer index */
+ priv->ipu_buf_num ^= 1;
+
+ /* bump the EOF timeout timer */
+ mod_timer(&priv->eof_timeout_timer,
+ jiffies + msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
+
+unlock:
+ spin_unlock(&priv->irqlock);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t csi_idmac_nfb4eof_interrupt(int irq, void *dev_id)
+{
+ struct csi_priv *priv = dev_id;
+
+ spin_lock(&priv->irqlock);
+
+ /*
+ * this is not an unrecoverable error, just mark
+ * the next captured frame with vb2 error flag.
+ */
+ priv->nfb4eof = true;
+
+ v4l2_err(&priv->sd, "NFB4EOF\n");
+
+ spin_unlock(&priv->irqlock);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * EOF timeout timer function. This is an unrecoverable condition
+ * without a stream restart.
+ */
+static void csi_idmac_eof_timeout(unsigned long data)
+{
+ struct csi_priv *priv = (struct csi_priv *)data;
+ struct imx_media_video_dev *vdev = priv->vdev;
+
+ v4l2_err(&priv->sd, "EOF timeout\n");
+
+ /* signal a fatal error to capture device */
+ imx_media_capture_device_error(vdev);
+}
+
+static void csi_idmac_setup_vb2_buf(struct csi_priv *priv, dma_addr_t *phys)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ struct imx_media_buffer *buf;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ buf = imx_media_capture_device_next_buf(vdev);
+ if (buf) {
+ priv->active_vb2_buf[i] = buf;
+ phys[i] = vb2_dma_contig_plane_dma_addr(
+ &buf->vbuf.vb2_buf, 0);
+ } else {
+ priv->active_vb2_buf[i] = NULL;
+ phys[i] = priv->underrun_buf.phys;
+ }
+ }
+}
+
+static void csi_idmac_unsetup_vb2_buf(struct csi_priv *priv,
+ enum vb2_buffer_state return_status)
+{
+ struct imx_media_buffer *buf;
+ int i;
+
+ /* return any remaining active frames with return_status */
+ for (i = 0; i < 2; i++) {
+ buf = priv->active_vb2_buf[i];
+ if (buf) {
+ struct vb2_buffer *vb = &buf->vbuf.vb2_buf;
+
+ vb->timestamp = ktime_get_ns();
+ vb2_buffer_done(vb, return_status);
+ }
+ }
+}
+
+/* init the SMFC IDMAC channel */
+static int csi_idmac_setup_channel(struct csi_priv *priv)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ struct v4l2_fwnode_endpoint *sensor_ep;
+ struct v4l2_mbus_framefmt *infmt;
+ struct ipu_image image;
+ u32 passthrough_bits;
+ dma_addr_t phys[2];
+ bool passthrough;
+ u32 burst_size;
+ int ret;
+
+ infmt = &priv->format_mbus[CSI_SINK_PAD];
+ sensor_ep = &priv->sensor->sensor_ep;
+
+ ipu_cpmem_zero(priv->idmac_ch);
+
+ memset(&image, 0, sizeof(image));
+ image.pix = vdev->fmt.fmt.pix;
+ image.rect.width = image.pix.width;
+ image.rect.height = image.pix.height;
+
+ csi_idmac_setup_vb2_buf(priv, phys);
+
+ image.phys0 = phys[0];
+ image.phys1 = phys[1];
+
+ /*
+ * Check for conditions that require the IPU to handle the
+ * data internally as generic data, aka passthrough mode:
+ * - raw bayer formats
+ * - the sensor bus is 16-bit parallel
+ */
+ switch (image.pix.pixelformat) {
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ burst_size = 8;
+ passthrough = true;
+ passthrough_bits = 8;
+ break;
+ case V4L2_PIX_FMT_SBGGR16:
+ case V4L2_PIX_FMT_SGBRG16:
+ case V4L2_PIX_FMT_SGRBG16:
+ case V4L2_PIX_FMT_SRGGB16:
+ burst_size = 4;
+ passthrough = true;
+ passthrough_bits = 16;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_NV12:
+ burst_size = (image.pix.width & 0x3f) ?
+ ((image.pix.width & 0x1f) ?
+ ((image.pix.width & 0xf) ? 8 : 16) : 32) : 64;
+ passthrough = (sensor_ep->bus_type != V4L2_MBUS_CSI2 &&
+ sensor_ep->bus.parallel.bus_width >= 16);
+ passthrough_bits = 16;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ burst_size = (image.pix.width & 0x1f) ?
+ ((image.pix.width & 0xf) ? 8 : 16) : 32;
+ passthrough = (sensor_ep->bus_type != V4L2_MBUS_CSI2 &&
+ sensor_ep->bus.parallel.bus_width >= 16);
+ passthrough_bits = 16;
+ break;
+ default:
+ burst_size = (image.pix.width & 0xf) ? 8 : 16;
+ passthrough = (sensor_ep->bus_type != V4L2_MBUS_CSI2 &&
+ sensor_ep->bus.parallel.bus_width >= 16);
+ passthrough_bits = 16;
+ break;
+ }
+
+ if (passthrough) {
+ ipu_cpmem_set_resolution(priv->idmac_ch, image.rect.width,
+ image.rect.height);
+ ipu_cpmem_set_stride(priv->idmac_ch, image.pix.bytesperline);
+ ipu_cpmem_set_buffer(priv->idmac_ch, 0, image.phys0);
+ ipu_cpmem_set_buffer(priv->idmac_ch, 1, image.phys1);
+ ipu_cpmem_set_format_passthrough(priv->idmac_ch,
+ passthrough_bits);
+ } else {
+ ret = ipu_cpmem_set_image(priv->idmac_ch, &image);
+ if (ret)
+ goto unsetup_vb2;
+ }
+
+ ipu_cpmem_set_burstsize(priv->idmac_ch, burst_size);
+
+ /*
+ * Set the channel for the direct CSI-->memory via SMFC
+ * use-case to very high priority, by enabling the watermark
+ * signal in the SMFC, enabling WM in the channel, and setting
+ * the channel priority to high.
+ *
+ * Refer to the i.mx6 rev. D TRM Table 36-8: Calculated priority
+ * value.
+ *
+ * The WM's are set very low by intention here to ensure that
+ * the SMFC FIFOs do not overflow.
+ */
+ ipu_smfc_set_watermark(priv->smfc, 0x02, 0x01);
+ ipu_cpmem_set_high_priority(priv->idmac_ch);
+ ipu_idmac_enable_watermark(priv->idmac_ch, true);
+ ipu_cpmem_set_axi_id(priv->idmac_ch, 0);
+
+ burst_size = passthrough ?
+ (burst_size >> 3) - 1 : (burst_size >> 2) - 1;
+
+ ipu_smfc_set_burstsize(priv->smfc, burst_size);
+
+ if (image.pix.field == V4L2_FIELD_NONE &&
+ V4L2_FIELD_HAS_BOTH(infmt->field))
+ ipu_cpmem_interlaced_scan(priv->idmac_ch,
+ image.pix.bytesperline);
+
+ ipu_idmac_set_double_buffer(priv->idmac_ch, true);
+
+ return 0;
+
+unsetup_vb2:
+ csi_idmac_unsetup_vb2_buf(priv, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+
+static void csi_idmac_unsetup(struct csi_priv *priv,
+ enum vb2_buffer_state state)
+{
+ ipu_idmac_disable_channel(priv->idmac_ch);
+ ipu_smfc_disable(priv->smfc);
+
+ csi_idmac_unsetup_vb2_buf(priv, state);
+}
+
+static int csi_idmac_setup(struct csi_priv *priv)
+{
+ int ret;
+
+ ret = csi_idmac_setup_channel(priv);
+ if (ret)
+ return ret;
+
+ ipu_cpmem_dump(priv->idmac_ch);
+ ipu_dump(priv->ipu);
+
+ ipu_smfc_enable(priv->smfc);
+
+ /* set buffers ready */
+ ipu_idmac_select_buffer(priv->idmac_ch, 0);
+ ipu_idmac_select_buffer(priv->idmac_ch, 1);
+
+ /* enable the channels */
+ ipu_idmac_enable_channel(priv->idmac_ch);
+
+ return 0;
+}
+
+static int csi_idmac_start(struct csi_priv *priv)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ struct v4l2_pix_format *outfmt;
+ int ret;
+
+ ret = csi_idmac_get_ipu_resources(priv);
+ if (ret)
+ return ret;
+
+ ipu_smfc_map_channel(priv->smfc, priv->csi_id, priv->vc_num);
+
+ outfmt = &vdev->fmt.fmt.pix;
+
+ ret = imx_media_alloc_dma_buf(priv->md, &priv->underrun_buf,
+ outfmt->sizeimage);
+ if (ret)
+ goto out_put_ipu;
+
+ priv->ipu_buf_num = 0;
+
+ /* init EOF completion waitq */
+ init_completion(&priv->last_eof_comp);
+ priv->last_eof = false;
+ priv->nfb4eof = false;
+
+ ret = csi_idmac_setup(priv);
+ if (ret) {
+ v4l2_err(&priv->sd, "csi_idmac_setup failed: %d\n", ret);
+ goto out_free_dma_buf;
+ }
+
+ priv->nfb4eof_irq = ipu_idmac_channel_irq(priv->ipu,
+ priv->idmac_ch,
+ IPU_IRQ_NFB4EOF);
+ ret = devm_request_irq(priv->dev, priv->nfb4eof_irq,
+ csi_idmac_nfb4eof_interrupt, 0,
+ "imx-smfc-nfb4eof", priv);
+ if (ret) {
+ v4l2_err(&priv->sd,
+ "Error registering NFB4EOF irq: %d\n", ret);
+ goto out_unsetup;
+ }
+
+ priv->eof_irq = ipu_idmac_channel_irq(priv->ipu, priv->idmac_ch,
+ IPU_IRQ_EOF);
+
+ ret = devm_request_irq(priv->dev, priv->eof_irq,
+ csi_idmac_eof_interrupt, 0,
+ "imx-smfc-eof", priv);
+ if (ret) {
+ v4l2_err(&priv->sd,
+ "Error registering eof irq: %d\n", ret);
+ goto out_free_nfb4eof_irq;
+ }
+
+ /* start the EOF timeout timer */
+ mod_timer(&priv->eof_timeout_timer,
+ jiffies + msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
+
+ return 0;
+
+out_free_nfb4eof_irq:
+ devm_free_irq(priv->dev, priv->nfb4eof_irq, priv);
+out_unsetup:
+ csi_idmac_unsetup(priv, VB2_BUF_STATE_QUEUED);
+out_free_dma_buf:
+ imx_media_free_dma_buf(priv->md, &priv->underrun_buf);
+out_put_ipu:
+ csi_idmac_put_ipu_resources(priv);
+ return ret;
+}
+
+static void csi_idmac_stop(struct csi_priv *priv)
+{
+ unsigned long flags;
+ int ret;
+
+ /* mark next EOF interrupt as the last before stream off */
+ spin_lock_irqsave(&priv->irqlock, flags);
+ priv->last_eof = true;
+ spin_unlock_irqrestore(&priv->irqlock, flags);
+
+ /*
+ * and then wait for interrupt handler to mark completion.
+ */
+ ret = wait_for_completion_timeout(
+ &priv->last_eof_comp, msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
+ if (ret == 0)
+ v4l2_warn(&priv->sd, "wait last EOF timeout\n");
+
+ devm_free_irq(priv->dev, priv->eof_irq, priv);
+ devm_free_irq(priv->dev, priv->nfb4eof_irq, priv);
+
+ csi_idmac_unsetup(priv, VB2_BUF_STATE_ERROR);
+
+ imx_media_free_dma_buf(priv->md, &priv->underrun_buf);
+
+ /* cancel the EOF timeout timer */
+ del_timer_sync(&priv->eof_timeout_timer);
+
+ csi_idmac_put_ipu_resources(priv);
+}
+
+/* Update the CSI whole sensor and active windows */
+static int csi_setup(struct csi_priv *priv)
+{
+ struct v4l2_mbus_framefmt *infmt, *outfmt;
+ struct v4l2_mbus_config sensor_mbus_cfg;
+ struct v4l2_fwnode_endpoint *sensor_ep;
+ struct v4l2_mbus_framefmt if_fmt;
+
+ infmt = &priv->format_mbus[CSI_SINK_PAD];
+ outfmt = &priv->format_mbus[priv->active_output_pad];
+ sensor_ep = &priv->sensor->sensor_ep;
+
+ /* compose mbus_config from sensor endpoint */
+ sensor_mbus_cfg.type = sensor_ep->bus_type;
+ sensor_mbus_cfg.flags = (sensor_ep->bus_type == V4L2_MBUS_CSI2) ?
+ sensor_ep->bus.mipi_csi2.flags :
+ sensor_ep->bus.parallel.flags;
+
+ /*
+ * we need to pass input sensor frame to CSI interface, but
+ * with translated field type from output format
+ */
+ if_fmt = *infmt;
+ if_fmt.field = outfmt->field;
+
+ ipu_csi_set_window(priv->csi, &priv->crop);
+
+ ipu_csi_set_downsize(priv->csi,
+ priv->crop.width == 2 * priv->compose.width,
+ priv->crop.height == 2 * priv->compose.height);
+
+ ipu_csi_init_interface(priv->csi, &sensor_mbus_cfg, &if_fmt);
+
+ ipu_csi_set_dest(priv->csi, priv->dest);
+
+ if (priv->dest == IPU_CSI_DEST_IDMAC)
+ ipu_csi_set_skip_smfc(priv->csi, priv->skip->skip_smfc,
+ priv->skip->max_ratio - 1, 0);
+
+ ipu_csi_dump(priv->csi);
+
+ return 0;
+}
+
+static int csi_start(struct csi_priv *priv)
+{
+ struct v4l2_fract *output_fi, *input_fi;
+ u32 bad_frames = 0;
+ int ret;
+
+ if (!priv->sensor) {
+ v4l2_err(&priv->sd, "no sensor attached\n");
+ return -EINVAL;
+ }
+
+ output_fi = &priv->frame_interval[priv->active_output_pad];
+ input_fi = &priv->frame_interval[CSI_SINK_PAD];
+
+ ret = v4l2_subdev_call(priv->sensor->sd, sensor,
+ g_skip_frames, &bad_frames);
+ if (!ret && bad_frames) {
+ u32 delay_usec;
+
+ /*
+ * This sensor has bad frames when it is turned on,
+ * add a delay to avoid them before enabling the CSI
+ * hardware. Especially for sensors with a bt.656 interface,
+ * any shifts in the SAV/EAV sync codes will cause the CSI
+ * to lose vert/horiz sync.
+ */
+ delay_usec = DIV_ROUND_UP_ULL(
+ (u64)USEC_PER_SEC * input_fi->numerator * bad_frames,
+ input_fi->denominator);
+ usleep_range(delay_usec, delay_usec + 1000);
+ }
+
+ if (priv->dest == IPU_CSI_DEST_IDMAC) {
+ ret = csi_idmac_start(priv);
+ if (ret)
+ return ret;
+ }
+
+ ret = csi_setup(priv);
+ if (ret)
+ goto idmac_stop;
+
+ /* start the frame interval monitor */
+ if (priv->fim && priv->dest == IPU_CSI_DEST_IDMAC) {
+ ret = imx_media_fim_set_stream(priv->fim, output_fi, true);
+ if (ret)
+ goto idmac_stop;
+ }
+
+ ret = ipu_csi_enable(priv->csi);
+ if (ret) {
+ v4l2_err(&priv->sd, "CSI enable error: %d\n", ret);
+ goto fim_off;
+ }
+
+ return 0;
+
+fim_off:
+ if (priv->fim && priv->dest == IPU_CSI_DEST_IDMAC)
+ imx_media_fim_set_stream(priv->fim, NULL, false);
+idmac_stop:
+ if (priv->dest == IPU_CSI_DEST_IDMAC)
+ csi_idmac_stop(priv);
+ return ret;
+}
+
+static void csi_stop(struct csi_priv *priv)
+{
+ if (priv->dest == IPU_CSI_DEST_IDMAC) {
+ csi_idmac_stop(priv);
+
+ /* stop the frame interval monitor */
+ if (priv->fim)
+ imx_media_fim_set_stream(priv->fim, NULL, false);
+ }
+
+ ipu_csi_disable(priv->csi);
+}
+
+static const struct csi_skip_desc csi_skip[12] = {
+ { 1, 1, 0x00 }, /* Keep all frames */
+ { 5, 6, 0x10 }, /* Skip every sixth frame */
+ { 4, 5, 0x08 }, /* Skip every fifth frame */
+ { 3, 4, 0x04 }, /* Skip every fourth frame */
+ { 2, 3, 0x02 }, /* Skip every third frame */
+ { 3, 5, 0x0a }, /* Skip frames 1 and 3 of every 5 */
+ { 1, 2, 0x01 }, /* Skip every second frame */
+ { 2, 5, 0x0b }, /* Keep frames 1 and 4 of every 5 */
+ { 1, 3, 0x03 }, /* Keep one in three frames */
+ { 1, 4, 0x07 }, /* Keep one in four frames */
+ { 1, 5, 0x0f }, /* Keep one in five frames */
+ { 1, 6, 0x1f }, /* Keep one in six frames */
+};
+
+static void csi_apply_skip_interval(const struct csi_skip_desc *skip,
+ struct v4l2_fract *interval)
+{
+ unsigned int div;
+
+ interval->numerator *= skip->max_ratio;
+ interval->denominator *= skip->keep;
+
+ /* Reduce fraction to lowest terms */
+ div = gcd(interval->numerator, interval->denominator);
+ if (div > 1) {
+ interval->numerator /= div;
+ interval->denominator /= div;
+ }
+}
+
+/*
+ * Find the skip pattern to produce the output frame interval closest to the
+ * requested one, for the given input frame interval. Updates the output frame
+ * interval to the exact value.
+ */
+static const struct csi_skip_desc *csi_find_best_skip(struct v4l2_fract *in,
+ struct v4l2_fract *out)
+{
+ const struct csi_skip_desc *skip = &csi_skip[0], *best_skip = skip;
+ u32 min_err = UINT_MAX;
+ u64 want_us;
+ int i;
+
+ /* Default to 1:1 ratio */
+ if (out->numerator == 0 || out->denominator == 0 ||
+ in->numerator == 0 || in->denominator == 0) {
+ *out = *in;
+ return best_skip;
+ }
+
+ want_us = div_u64((u64)USEC_PER_SEC * out->numerator, out->denominator);
+
+ /* Find the reduction closest to the requested time per frame */
+ for (i = 0; i < ARRAY_SIZE(csi_skip); i++, skip++) {
+ u64 tmp, err;
+
+ tmp = div_u64((u64)USEC_PER_SEC * in->numerator *
+ skip->max_ratio, in->denominator * skip->keep);
+
+ err = abs((s64)tmp - want_us);
+ if (err < min_err) {
+ min_err = err;
+ best_skip = skip;
+ }
+ }
+
+ *out = *in;
+ csi_apply_skip_interval(best_skip, out);
+
+ return best_skip;
+}
+
+/*
+ * V4L2 subdev operations.
+ */
+
+static int csi_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+
+ if (fi->pad >= CSI_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ fi->interval = priv->frame_interval[fi->pad];
+
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int csi_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_fract *input_fi;
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ input_fi = &priv->frame_interval[CSI_SINK_PAD];
+
+ switch (fi->pad) {
+ case CSI_SINK_PAD:
+ /* No limits on input frame interval */
+ /* Reset output intervals and frame skipping ratio to 1:1 */
+ priv->frame_interval[CSI_SRC_PAD_IDMAC] = fi->interval;
+ priv->frame_interval[CSI_SRC_PAD_DIRECT] = fi->interval;
+ priv->skip = &csi_skip[0];
+ break;
+ case CSI_SRC_PAD_IDMAC:
+ /*
+ * frame interval at IDMAC output pad depends on input
+ * interval, modified by frame skipping.
+ */
+ priv->skip = csi_find_best_skip(input_fi, &fi->interval);
+ break;
+ case CSI_SRC_PAD_DIRECT:
+ /*
+ * frame interval at DIRECT output pad is same as input
+ * interval.
+ */
+ fi->interval = *input_fi;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ priv->frame_interval[fi->pad] = fi->interval;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ if (!priv->src_sd || !priv->sink) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ /*
+ * enable/disable streaming only if stream_count is
+ * going from 0 to 1 / 1 to 0.
+ */
+ if (priv->stream_count != !enable)
+ goto update_count;
+
+ if (enable) {
+ /* upstream must be started first, before starting CSI */
+ ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
+ ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
+ if (ret)
+ goto out;
+
+ dev_dbg(priv->dev, "stream ON\n");
+ ret = csi_start(priv);
+ if (ret) {
+ v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
+ goto out;
+ }
+ } else {
+ dev_dbg(priv->dev, "stream OFF\n");
+ /* CSI must be stopped first, then stop upstream */
+ csi_stop(priv);
+ v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
+ }
+
+update_count:
+ priv->stream_count += enable ? 1 : -1;
+ if (priv->stream_count < 0)
+ priv->stream_count = 0;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev *remote_sd;
+ int ret = 0;
+
+ dev_dbg(priv->dev, "link setup %s -> %s\n", remote->entity->name,
+ local->entity->name);
+
+ mutex_lock(&priv->lock);
+
+ if (local->flags & MEDIA_PAD_FL_SINK) {
+ if (!is_media_entity_v4l2_subdev(remote->entity)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ remote_sd = media_entity_to_v4l2_subdev(remote->entity);
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (priv->src_sd) {
+ ret = -EBUSY;
+ goto out;
+ }
+ priv->src_sd = remote_sd;
+ } else {
+ priv->src_sd = NULL;
+ }
+
+ goto out;
+ }
+
+ /* this is a source pad */
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (priv->sink) {
+ ret = -EBUSY;
+ goto out;
+ }
+ } else {
+ v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
+ v4l2_ctrl_handler_init(&priv->ctrl_hdlr, 0);
+ priv->sink = NULL;
+ goto out;
+ }
+
+ /* record which output pad is now active */
+ priv->active_output_pad = local->index;
+
+ /* set CSI destination */
+ if (local->index == CSI_SRC_PAD_IDMAC) {
+ if (!is_media_entity_v4l2_video_device(remote->entity)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (priv->fim) {
+ ret = imx_media_fim_add_controls(priv->fim);
+ if (ret)
+ goto out;
+ }
+
+ priv->dest = IPU_CSI_DEST_IDMAC;
+ } else {
+ if (!is_media_entity_v4l2_subdev(remote->entity)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ remote_sd = media_entity_to_v4l2_subdev(remote->entity);
+ switch (remote_sd->grp_id) {
+ case IMX_MEDIA_GRP_ID_VDIC:
+ priv->dest = IPU_CSI_DEST_VDIC;
+ break;
+ case IMX_MEDIA_GRP_ID_IC_PRP:
+ priv->dest = IPU_CSI_DEST_IC;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ priv->sink = remote->entity;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_fwnode_endpoint *sensor_ep;
+ const struct imx_media_pixfmt *incc;
+ struct imx_media_subdev *sensor;
+ bool is_csi2;
+ int ret;
+
+ ret = v4l2_subdev_link_validate_default(sd, link,
+ source_fmt, sink_fmt);
+ if (ret)
+ return ret;
+
+ sensor = __imx_media_find_sensor(priv->md, &priv->sd.entity);
+ if (IS_ERR(sensor)) {
+ v4l2_err(&priv->sd, "no sensor attached\n");
+ return PTR_ERR(priv->sensor);
+ }
+
+ mutex_lock(&priv->lock);
+
+ priv->sensor = sensor;
+ sensor_ep = &priv->sensor->sensor_ep;
+ is_csi2 = (sensor_ep->bus_type == V4L2_MBUS_CSI2);
+ incc = priv->cc[CSI_SINK_PAD];
+
+ if (priv->dest != IPU_CSI_DEST_IDMAC &&
+ (incc->bayer || (!is_csi2 &&
+ sensor_ep->bus.parallel.bus_width >= 16))) {
+ v4l2_err(&priv->sd,
+ "bayer/16-bit parallel buses must go to IDMAC pad\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (is_csi2) {
+ int vc_num = 0;
+ /*
+ * NOTE! It seems the virtual channels from the mipi csi-2
+ * receiver are used only for routing by the video mux's,
+ * or for hard-wired routing to the CSI's. Once the stream
+ * enters the CSI's however, they are treated internally
+ * in the IPU as virtual channel 0.
+ */
+#if 0
+ mutex_unlock(&priv->lock);
+ vc_num = imx_media_find_mipi_csi2_channel(priv->md,
+ &priv->sd.entity);
+ if (vc_num < 0)
+ return vc_num;
+ mutex_lock(&priv->lock);
+#endif
+ ipu_csi_set_mipi_datatype(priv->csi, vc_num,
+ &priv->format_mbus[CSI_SINK_PAD]);
+ }
+
+ /* select either parallel or MIPI-CSI2 as input to CSI */
+ ipu_set_csi_src_mux(priv->ipu, priv->csi_id, is_csi2);
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+__csi_get_fmt(struct csi_priv *priv, struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&priv->sd, cfg, pad);
+ else
+ return &priv->format_mbus[pad];
+}
+
+static struct v4l2_rect *
+__csi_get_crop(struct csi_priv *priv, struct v4l2_subdev_pad_config *cfg,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_crop(&priv->sd, cfg, CSI_SINK_PAD);
+ else
+ return &priv->crop;
+}
+
+static struct v4l2_rect *
+__csi_get_compose(struct csi_priv *priv, struct v4l2_subdev_pad_config *cfg,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_compose(&priv->sd, cfg,
+ CSI_SINK_PAD);
+ else
+ return &priv->compose;
+}
+
+static void csi_try_crop(struct csi_priv *priv,
+ struct v4l2_rect *crop,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_mbus_framefmt *infmt,
+ struct imx_media_subdev *sensor)
+{
+ struct v4l2_fwnode_endpoint *sensor_ep;
+
+ sensor_ep = &sensor->sensor_ep;
+
+ crop->width = min_t(__u32, infmt->width, crop->width);
+ if (crop->left + crop->width > infmt->width)
+ crop->left = infmt->width - crop->width;
+ /* adjust crop left/width to h/w alignment restrictions */
+ crop->left &= ~0x3;
+ crop->width &= ~0x7;
+
+ /*
+ * FIXME: not sure why yet, but on interlaced bt.656,
+ * changing the vertical cropping causes loss of vertical
+ * sync, so fix it to NTSC/PAL active lines. NTSC contains
+ * 2 extra lines of active video that need to be cropped.
+ */
+ if (sensor_ep->bus_type == V4L2_MBUS_BT656 &&
+ (V4L2_FIELD_HAS_BOTH(infmt->field) ||
+ infmt->field == V4L2_FIELD_ALTERNATE)) {
+ crop->height = infmt->height;
+ crop->top = (infmt->height == 480) ? 2 : 0;
+ } else {
+ crop->height = min_t(__u32, infmt->height, crop->height);
+ if (crop->top + crop->height > infmt->height)
+ crop->top = infmt->height - crop->height;
+ }
+}
+
+static int csi_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ const struct imx_media_pixfmt *incc;
+ struct v4l2_mbus_framefmt *infmt;
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ infmt = __csi_get_fmt(priv, cfg, CSI_SINK_PAD, code->which);
+ incc = imx_media_find_mbus_format(infmt->code, CS_SEL_ANY, true);
+
+ switch (code->pad) {
+ case CSI_SINK_PAD:
+ ret = imx_media_enum_mbus_format(&code->code, code->index,
+ CS_SEL_ANY, true);
+ break;
+ case CSI_SRC_PAD_DIRECT:
+ case CSI_SRC_PAD_IDMAC:
+ if (incc->bayer) {
+ if (code->index != 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ code->code = infmt->code;
+ } else {
+ u32 cs_sel = (incc->cs == IPUV3_COLORSPACE_YUV) ?
+ CS_SEL_YUV : CS_SEL_RGB;
+ ret = imx_media_enum_ipu_format(&code->code,
+ code->index,
+ cs_sel);
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_rect *crop;
+ int ret = 0;
+
+ if (fse->pad >= CSI_NUM_PADS ||
+ fse->index > (fse->pad == CSI_SINK_PAD ? 0 : 3))
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ if (fse->pad == CSI_SINK_PAD) {
+ fse->min_width = MIN_W;
+ fse->max_width = MAX_W;
+ fse->min_height = MIN_H;
+ fse->max_height = MAX_H;
+ } else {
+ crop = __csi_get_crop(priv, cfg, fse->which);
+
+ fse->min_width = fse->max_width = fse->index & 1 ?
+ crop->width / 2 : crop->width;
+ fse->min_height = fse->max_height = fse->index & 2 ?
+ crop->height / 2 : crop->height;
+ }
+
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_enum_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_fract *input_fi;
+ struct v4l2_rect *crop;
+ int ret = 0;
+
+ if (fie->pad >= CSI_NUM_PADS ||
+ fie->index >= (fie->pad != CSI_SRC_PAD_IDMAC ?
+ 1 : ARRAY_SIZE(csi_skip)))
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ input_fi = &priv->frame_interval[CSI_SINK_PAD];
+ crop = __csi_get_crop(priv, cfg, fie->which);
+
+ if ((fie->width != crop->width && fie->width != crop->width / 2) ||
+ (fie->height != crop->height && fie->height != crop->height / 2)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ fie->interval = *input_fi;
+
+ if (fie->pad == CSI_SRC_PAD_IDMAC)
+ csi_apply_skip_interval(&csi_skip[fie->index],
+ &fie->interval);
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *fmt;
+ int ret = 0;
+
+ if (sdformat->pad >= CSI_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ fmt = __csi_get_fmt(priv, cfg, sdformat->pad, sdformat->which);
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ sdformat->format = *fmt;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static void csi_try_fmt(struct csi_priv *priv,
+ struct imx_media_subdev *sensor,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat,
+ struct v4l2_rect *crop,
+ struct v4l2_rect *compose,
+ const struct imx_media_pixfmt **cc)
+{
+ const struct imx_media_pixfmt *incc;
+ struct v4l2_mbus_framefmt *infmt;
+ u32 code;
+
+ infmt = __csi_get_fmt(priv, cfg, CSI_SINK_PAD, sdformat->which);
+
+ switch (sdformat->pad) {
+ case CSI_SRC_PAD_DIRECT:
+ case CSI_SRC_PAD_IDMAC:
+ incc = imx_media_find_mbus_format(infmt->code,
+ CS_SEL_ANY, true);
+
+ sdformat->format.width = compose->width;
+ sdformat->format.height = compose->height;
+
+ if (incc->bayer) {
+ sdformat->format.code = infmt->code;
+ *cc = incc;
+ } else {
+ u32 cs_sel = (incc->cs == IPUV3_COLORSPACE_YUV) ?
+ CS_SEL_YUV : CS_SEL_RGB;
+
+ *cc = imx_media_find_ipu_format(sdformat->format.code,
+ cs_sel);
+ if (!*cc) {
+ imx_media_enum_ipu_format(&code, 0, cs_sel);
+ *cc = imx_media_find_ipu_format(code, cs_sel);
+ sdformat->format.code = (*cc)->codes[0];
+ }
+ }
+
+ if (sdformat->pad == CSI_SRC_PAD_DIRECT ||
+ sdformat->format.field != V4L2_FIELD_NONE)
+ sdformat->format.field = infmt->field;
+
+ /*
+ * translate V4L2_FIELD_ALTERNATE to SEQ_TB or SEQ_BT
+ * depending on input height (assume NTSC top-bottom
+ * order if 480 lines, otherwise PAL bottom-top order).
+ */
+ if (sdformat->format.field == V4L2_FIELD_ALTERNATE) {
+ sdformat->format.field = (infmt->height == 480) ?
+ V4L2_FIELD_SEQ_TB : V4L2_FIELD_SEQ_BT;
+ }
+
+ /* propagate colorimetry from sink */
+ sdformat->format.colorspace = infmt->colorspace;
+ sdformat->format.xfer_func = infmt->xfer_func;
+ sdformat->format.quantization = infmt->quantization;
+ sdformat->format.ycbcr_enc = infmt->ycbcr_enc;
+ break;
+ case CSI_SINK_PAD:
+ v4l_bound_align_image(&sdformat->format.width, MIN_W, MAX_W,
+ W_ALIGN, &sdformat->format.height,
+ MIN_H, MAX_H, H_ALIGN, S_ALIGN);
+
+ /* Reset crop and compose rectangles */
+ crop->left = 0;
+ crop->top = 0;
+ crop->width = sdformat->format.width;
+ crop->height = sdformat->format.height;
+ csi_try_crop(priv, crop, cfg, &sdformat->format, sensor);
+ compose->left = 0;
+ compose->top = 0;
+ compose->width = crop->width;
+ compose->height = crop->height;
+
+ *cc = imx_media_find_mbus_format(sdformat->format.code,
+ CS_SEL_ANY, true);
+ if (!*cc) {
+ imx_media_enum_mbus_format(&code, 0,
+ CS_SEL_ANY, false);
+ *cc = imx_media_find_mbus_format(code,
+ CS_SEL_ANY, false);
+ sdformat->format.code = (*cc)->codes[0];
+ }
+
+ imx_media_fill_default_mbus_fields(
+ &sdformat->format, infmt,
+ priv->active_output_pad == CSI_SRC_PAD_DIRECT);
+ break;
+ }
+}
+
+static int csi_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct imx_media_video_dev *vdev = priv->vdev;
+ const struct imx_media_pixfmt *cc;
+ struct imx_media_subdev *sensor;
+ struct v4l2_pix_format vdev_fmt;
+ struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_rect *crop, *compose;
+ int ret = 0;
+
+ if (sdformat->pad >= CSI_NUM_PADS)
+ return -EINVAL;
+
+ sensor = imx_media_find_sensor(priv->md, &priv->sd.entity);
+ if (IS_ERR(sensor)) {
+ v4l2_err(&priv->sd, "no sensor attached\n");
+ return PTR_ERR(sensor);
+ }
+
+ mutex_lock(&priv->lock);
+
+ if (priv->stream_count > 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ crop = __csi_get_crop(priv, cfg, sdformat->which);
+ compose = __csi_get_compose(priv, cfg, sdformat->which);
+
+ csi_try_fmt(priv, sensor, cfg, sdformat, crop, compose, &cc);
+
+ fmt = __csi_get_fmt(priv, cfg, sdformat->pad, sdformat->which);
+ *fmt = sdformat->format;
+
+ if (sdformat->pad == CSI_SINK_PAD) {
+ int pad;
+
+ /* propagate format to source pads */
+ for (pad = CSI_SINK_PAD + 1; pad < CSI_NUM_PADS; pad++) {
+ const struct imx_media_pixfmt *outcc;
+ struct v4l2_mbus_framefmt *outfmt;
+ struct v4l2_subdev_format format;
+
+ format.pad = pad;
+ format.which = sdformat->which;
+ format.format = sdformat->format;
+ csi_try_fmt(priv, sensor, cfg, &format, NULL, compose,
+ &outcc);
+
+ outfmt = __csi_get_fmt(priv, cfg, pad, sdformat->which);
+ *outfmt = format.format;
+
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ priv->cc[pad] = outcc;
+ }
+ }
+
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_TRY)
+ goto out;
+
+ priv->cc[sdformat->pad] = cc;
+
+ /* propagate IDMAC output pad format to capture device */
+ imx_media_mbus_fmt_to_pix_fmt(&vdev_fmt,
+ &priv->format_mbus[CSI_SRC_PAD_IDMAC],
+ priv->cc[CSI_SRC_PAD_IDMAC]);
+ mutex_unlock(&priv->lock);
+ imx_media_capture_device_set_format(vdev, &vdev_fmt);
+
+ return 0;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *infmt;
+ struct v4l2_rect *crop, *compose;
+ int ret = 0;
+
+ if (sel->pad != CSI_SINK_PAD)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ infmt = __csi_get_fmt(priv, cfg, CSI_SINK_PAD, sel->which);
+ crop = __csi_get_crop(priv, cfg, sel->which);
+ compose = __csi_get_compose(priv, cfg, sel->which);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = infmt->width;
+ sel->r.height = infmt->height;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *crop;
+ break;
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ sel->r.left = 0;
+ sel->r.top = 0;
+ sel->r.width = crop->width;
+ sel->r.height = crop->height;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ sel->r = *compose;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_set_scale(u32 *compose, u32 crop, u32 flags)
+{
+ if ((flags & (V4L2_SEL_FLAG_LE | V4L2_SEL_FLAG_GE)) ==
+ (V4L2_SEL_FLAG_LE | V4L2_SEL_FLAG_GE) &&
+ *compose != crop && *compose != crop / 2)
+ return -ERANGE;
+
+ if (*compose <= crop / 2 ||
+ (*compose < crop * 3 / 4 && !(flags & V4L2_SEL_FLAG_GE)) ||
+ (*compose < crop && (flags & V4L2_SEL_FLAG_LE)))
+ *compose = crop / 2;
+ else
+ *compose = crop;
+
+ return 0;
+}
+
+static int csi_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *infmt;
+ struct v4l2_rect *crop, *compose;
+ struct imx_media_subdev *sensor;
+ int pad, ret = 0;
+
+ if (sel->pad != CSI_SINK_PAD)
+ return -EINVAL;
+
+ sensor = imx_media_find_sensor(priv->md, &priv->sd.entity);
+ if (IS_ERR(sensor)) {
+ v4l2_err(&priv->sd, "no sensor attached\n");
+ return PTR_ERR(sensor);
+ }
+
+ mutex_lock(&priv->lock);
+
+ if (priv->stream_count > 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ infmt = __csi_get_fmt(priv, cfg, CSI_SINK_PAD, sel->which);
+ crop = __csi_get_crop(priv, cfg, sel->which);
+ compose = __csi_get_compose(priv, cfg, sel->which);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ /*
+ * Modifying the crop rectangle always changes the format on
+ * the source pads. If the KEEP_CONFIG flag is set, just return
+ * the current crop rectangle.
+ */
+ if (sel->flags & V4L2_SEL_FLAG_KEEP_CONFIG) {
+ sel->r = priv->crop;
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
+ *crop = sel->r;
+ goto out;
+ }
+
+ csi_try_crop(priv, &sel->r, cfg, infmt, sensor);
+
+ *crop = sel->r;
+
+ /* Reset scaling to 1:1 */
+ compose->width = crop->width;
+ compose->height = crop->height;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ /*
+ * Modifying the compose rectangle always changes the format on
+ * the source pads. If the KEEP_CONFIG flag is set, just return
+ * the current compose rectangle.
+ */
+ if (sel->flags & V4L2_SEL_FLAG_KEEP_CONFIG) {
+ sel->r = priv->compose;
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
+ *compose = sel->r;
+ goto out;
+ }
+
+ sel->r.left = 0;
+ sel->r.top = 0;
+ ret = csi_set_scale(&sel->r.width, crop->width, sel->flags);
+ if (ret)
+ goto out;
+ ret = csi_set_scale(&sel->r.height, crop->height, sel->flags);
+ if (ret)
+ goto out;
+
+ *compose = sel->r;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Reset source pads to sink compose rectangle */
+ for (pad = CSI_SINK_PAD + 1; pad < CSI_NUM_PADS; pad++) {
+ struct v4l2_mbus_framefmt *outfmt;
+
+ outfmt = __csi_get_fmt(priv, cfg, pad, sel->which);
+ outfmt->width = compose->width;
+ outfmt->height = compose->height;
+ }
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int csi_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ if (sub->type != V4L2_EVENT_IMX_FRAME_INTERVAL_ERROR)
+ return -EINVAL;
+ if (sub->id != 0)
+ return -EINVAL;
+
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+}
+
+static int csi_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_unsubscribe(fh, sub);
+}
+
+/*
+ * retrieve our pads parsed from the OF graph by the media device
+ */
+static int csi_registered(struct v4l2_subdev *sd)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+ int i, ret;
+ u32 code;
+
+ /* get media device */
+ priv->md = dev_get_drvdata(sd->v4l2_dev->dev);
+
+ /* get handle to IPU CSI */
+ priv->csi = ipu_csi_get(priv->ipu, priv->csi_id);
+ if (IS_ERR(priv->csi)) {
+ v4l2_err(&priv->sd, "failed to get CSI%d\n", priv->csi_id);
+ return PTR_ERR(priv->csi);
+ }
+
+ for (i = 0; i < CSI_NUM_PADS; i++) {
+ priv->pad[i].flags = (i == CSI_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+
+ code = 0;
+ if (i != CSI_SINK_PAD)
+ imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
+
+ /* set a default mbus format */
+ ret = imx_media_init_mbus_fmt(&priv->format_mbus[i],
+ 640, 480, code, V4L2_FIELD_NONE,
+ &priv->cc[i]);
+ if (ret)
+ goto put_csi;
+
+ /* init default frame interval */
+ priv->frame_interval[i].numerator = 1;
+ priv->frame_interval[i].denominator = 30;
+ }
+
+ /* disable frame skipping */
+ priv->skip = &csi_skip[0];
+
+ /* init default crop and compose rectangle sizes */
+ priv->crop.width = 640;
+ priv->crop.height = 480;
+ priv->compose.width = 640;
+ priv->compose.height = 480;
+
+ priv->fim = imx_media_fim_init(&priv->sd);
+ if (IS_ERR(priv->fim)) {
+ ret = PTR_ERR(priv->fim);
+ goto put_csi;
+ }
+
+ ret = media_entity_pads_init(&sd->entity, CSI_NUM_PADS, priv->pad);
+ if (ret)
+ goto free_fim;
+
+ ret = imx_media_capture_device_register(priv->vdev);
+ if (ret)
+ goto free_fim;
+
+ ret = imx_media_add_video_device(priv->md, priv->vdev);
+ if (ret)
+ goto unreg;
+
+ return 0;
+unreg:
+ imx_media_capture_device_unregister(priv->vdev);
+free_fim:
+ if (priv->fim)
+ imx_media_fim_free(priv->fim);
+put_csi:
+ ipu_csi_put(priv->csi);
+ return ret;
+}
+
+static void csi_unregistered(struct v4l2_subdev *sd)
+{
+ struct csi_priv *priv = v4l2_get_subdevdata(sd);
+
+ imx_media_capture_device_unregister(priv->vdev);
+
+ if (priv->fim)
+ imx_media_fim_free(priv->fim);
+
+ if (!IS_ERR_OR_NULL(priv->csi))
+ ipu_csi_put(priv->csi);
+}
+
+static const struct media_entity_operations csi_entity_ops = {
+ .link_setup = csi_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_core_ops csi_core_ops = {
+ .subscribe_event = csi_subscribe_event,
+ .unsubscribe_event = csi_unsubscribe_event,
+};
+
+static const struct v4l2_subdev_video_ops csi_video_ops = {
+ .g_frame_interval = csi_g_frame_interval,
+ .s_frame_interval = csi_s_frame_interval,
+ .s_stream = csi_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops csi_pad_ops = {
+ .enum_mbus_code = csi_enum_mbus_code,
+ .enum_frame_size = csi_enum_frame_size,
+ .enum_frame_interval = csi_enum_frame_interval,
+ .get_fmt = csi_get_fmt,
+ .set_fmt = csi_set_fmt,
+ .get_selection = csi_get_selection,
+ .set_selection = csi_set_selection,
+ .link_validate = csi_link_validate,
+};
+
+static const struct v4l2_subdev_ops csi_subdev_ops = {
+ .core = &csi_core_ops,
+ .video = &csi_video_ops,
+ .pad = &csi_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops csi_internal_ops = {
+ .registered = csi_registered,
+ .unregistered = csi_unregistered,
+};
+
+static int imx_csi_probe(struct platform_device *pdev)
+{
+ struct ipu_client_platformdata *pdata;
+ struct pinctrl *pinctrl;
+ struct csi_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, &priv->sd);
+ priv->dev = &pdev->dev;
+
+ ret = dma_set_coherent_mask(priv->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ /* get parent IPU */
+ priv->ipu = dev_get_drvdata(priv->dev->parent);
+
+ /* get our CSI id */
+ pdata = priv->dev->platform_data;
+ priv->csi_id = pdata->csi;
+ priv->smfc_id = (priv->csi_id == 0) ? 0 : 2;
+
+ init_timer(&priv->eof_timeout_timer);
+ priv->eof_timeout_timer.data = (unsigned long)priv;
+ priv->eof_timeout_timer.function = csi_idmac_eof_timeout;
+ spin_lock_init(&priv->irqlock);
+
+ v4l2_subdev_init(&priv->sd, &csi_subdev_ops);
+ v4l2_set_subdevdata(&priv->sd, priv);
+ priv->sd.internal_ops = &csi_internal_ops;
+ priv->sd.entity.ops = &csi_entity_ops;
+ priv->sd.entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
+ priv->sd.dev = &pdev->dev;
+ priv->sd.fwnode = of_fwnode_handle(pdata->of_node);
+ priv->sd.owner = THIS_MODULE;
+ priv->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+ priv->sd.grp_id = priv->csi_id ?
+ IMX_MEDIA_GRP_ID_CSI1 : IMX_MEDIA_GRP_ID_CSI0;
+ imx_media_grp_id_to_sd_name(priv->sd.name, sizeof(priv->sd.name),
+ priv->sd.grp_id, ipu_get_num(priv->ipu));
+
+ priv->vdev = imx_media_capture_device_init(&priv->sd,
+ CSI_SRC_PAD_IDMAC);
+ if (IS_ERR(priv->vdev))
+ return PTR_ERR(priv->vdev);
+
+ mutex_init(&priv->lock);
+
+ v4l2_ctrl_handler_init(&priv->ctrl_hdlr, 0);
+ priv->sd.ctrl_handler = &priv->ctrl_hdlr;
+
+ /*
+ * The IPUv3 driver did not assign an of_node to this
+ * device. As a result, pinctrl does not automatically
+ * configure our pin groups, so we need to do that manually
+ * here, after setting this device's of_node.
+ */
+ priv->dev->of_node = pdata->of_node;
+ pinctrl = devm_pinctrl_get_select_default(priv->dev);
+
+ ret = v4l2_async_register_subdev(&priv->sd);
+ if (ret)
+ goto free;
+
+ return 0;
+free:
+ v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
+ mutex_destroy(&priv->lock);
+ imx_media_capture_device_remove(priv->vdev);
+ return ret;
+}
+
+static int imx_csi_remove(struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = platform_get_drvdata(pdev);
+ struct csi_priv *priv = sd_to_dev(sd);
+
+ v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
+ mutex_destroy(&priv->lock);
+ imx_media_capture_device_remove(priv->vdev);
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+
+ return 0;
+}
+
+static const struct platform_device_id imx_csi_ids[] = {
+ { .name = "imx-ipuv3-csi" },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, imx_csi_ids);
+
+static struct platform_driver imx_csi_driver = {
+ .probe = imx_csi_probe,
+ .remove = imx_csi_remove,
+ .id_table = imx_csi_ids,
+ .driver = {
+ .name = "imx-ipuv3-csi",
+ },
+};
+module_platform_driver(imx_csi_driver);
+
+MODULE_DESCRIPTION("i.MX CSI subdev driver");
+MODULE_AUTHOR("Steve Longerbeam <[email protected]>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:imx-ipuv3-csi");
diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
new file mode 100644
index 000000000000..48cbc7716758
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-dev.c
@@ -0,0 +1,667 @@
+/*
+ * V4L2 Media Controller Driver for Freescale i.MX5/6 SOC
+ *
+ * Copyright (c) 2016 Mentor Graphics Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <video/imx-ipu-v3.h>
+#include <media/imx.h>
+#include "imx-media.h"
+
+static inline struct imx_media_dev *notifier2dev(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct imx_media_dev, subdev_notifier);
+}
+
+/*
+ * Find a subdev by device node or device name. This is called during
+ * driver load to form the async subdev list and bind them.
+ */
+struct imx_media_subdev *
+imx_media_find_async_subdev(struct imx_media_dev *imxmd,
+ struct device_node *np,
+ const char *devname)
+{
+ struct fwnode_handle *fwnode = np ? of_fwnode_handle(np) : NULL;
+ struct imx_media_subdev *imxsd;
+ int i;
+
+ for (i = 0; i < imxmd->subdev_notifier.num_subdevs; i++) {
+ imxsd = &imxmd->subdev[i];
+ switch (imxsd->asd.match_type) {
+ case V4L2_ASYNC_MATCH_FWNODE:
+ if (fwnode && imxsd->asd.match.fwnode.fwnode == fwnode)
+ return imxsd;
+ break;
+ case V4L2_ASYNC_MATCH_DEVNAME:
+ if (devname &&
+ !strcmp(imxsd->asd.match.device_name.name, devname))
+ return imxsd;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return NULL;
+}
+
+
+/*
+ * Adds a subdev to the async subdev list. If np is non-NULL, adds
+ * the async as a V4L2_ASYNC_MATCH_FWNODE match type, otherwise as
+ * a V4L2_ASYNC_MATCH_DEVNAME match type using the dev_name of the
+ * given platform_device. This is called during driver load when
+ * forming the async subdev list.
+ */
+struct imx_media_subdev *
+imx_media_add_async_subdev(struct imx_media_dev *imxmd,
+ struct device_node *np,
+ struct platform_device *pdev)
+{
+ struct imx_media_subdev *imxsd;
+ struct v4l2_async_subdev *asd;
+ const char *devname = NULL;
+ int sd_idx;
+
+ mutex_lock(&imxmd->mutex);
+
+ if (pdev)
+ devname = dev_name(&pdev->dev);
+
+ /* return NULL if this subdev already added */
+ if (imx_media_find_async_subdev(imxmd, np, devname)) {
+ dev_dbg(imxmd->md.dev, "%s: already added %s\n",
+ __func__, np ? np->name : devname);
+ imxsd = NULL;
+ goto out;
+ }
+
+ sd_idx = imxmd->subdev_notifier.num_subdevs;
+ if (sd_idx >= IMX_MEDIA_MAX_SUBDEVS) {
+ dev_err(imxmd->md.dev, "%s: too many subdevs! can't add %s\n",
+ __func__, np ? np->name : devname);
+ imxsd = ERR_PTR(-ENOSPC);
+ goto out;
+ }
+
+ imxsd = &imxmd->subdev[sd_idx];
+
+ asd = &imxsd->asd;
+ if (np) {
+ asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
+ asd->match.fwnode.fwnode = of_fwnode_handle(np);
+ } else {
+ asd->match_type = V4L2_ASYNC_MATCH_DEVNAME;
+ strncpy(imxsd->devname, devname, sizeof(imxsd->devname));
+ asd->match.device_name.name = imxsd->devname;
+ imxsd->pdev = pdev;
+ }
+
+ imxmd->async_ptrs[sd_idx] = asd;
+ imxmd->subdev_notifier.num_subdevs++;
+
+ dev_dbg(imxmd->md.dev, "%s: added %s, match type %s\n",
+ __func__, np ? np->name : devname, np ? "FWNODE" : "DEVNAME");
+
+out:
+ mutex_unlock(&imxmd->mutex);
+ return imxsd;
+}
+
+/*
+ * Adds an imx-media link to a subdev pad's link list. This is called
+ * during driver load when forming the links between subdevs.
+ *
+ * @pad: the local pad
+ * @remote_node: the device node of the remote subdev
+ * @remote_devname: the device name of the remote subdev
+ * @local_pad: local pad index
+ * @remote_pad: remote pad index
+ */
+int imx_media_add_pad_link(struct imx_media_dev *imxmd,
+ struct imx_media_pad *pad,
+ struct device_node *remote_node,
+ const char *remote_devname,
+ int local_pad, int remote_pad)
+{
+ struct imx_media_link *link;
+ int link_idx, ret = 0;
+
+ mutex_lock(&imxmd->mutex);
+
+ link_idx = pad->num_links;
+ if (link_idx >= IMX_MEDIA_MAX_LINKS) {
+ dev_err(imxmd->md.dev, "%s: too many links!\n", __func__);
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ link = &pad->link[link_idx];
+
+ link->remote_sd_node = remote_node;
+ if (remote_devname)
+ strncpy(link->remote_devname, remote_devname,
+ sizeof(link->remote_devname));
+
+ link->local_pad = local_pad;
+ link->remote_pad = remote_pad;
+
+ pad->num_links++;
+out:
+ mutex_unlock(&imxmd->mutex);
+ return ret;
+}
+
+/*
+ * get IPU from this CSI and add it to the list of IPUs
+ * the media driver will control.
+ */
+static int imx_media_get_ipu(struct imx_media_dev *imxmd,
+ struct v4l2_subdev *csi_sd)
+{
+ struct ipu_soc *ipu;
+ int ipu_id;
+
+ ipu = dev_get_drvdata(csi_sd->dev->parent);
+ if (!ipu) {
+ v4l2_err(&imxmd->v4l2_dev,
+ "CSI %s has no parent IPU!\n", csi_sd->name);
+ return -ENODEV;
+ }
+
+ ipu_id = ipu_get_num(ipu);
+ if (ipu_id > 1) {
+ v4l2_err(&imxmd->v4l2_dev, "invalid IPU id %d!\n", ipu_id);
+ return -ENODEV;
+ }
+
+ if (!imxmd->ipu[ipu_id])
+ imxmd->ipu[ipu_id] = ipu;
+
+ return 0;
+}
+
+/* async subdev bound notifier */
+static int imx_media_subdev_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct imx_media_dev *imxmd = notifier2dev(notifier);
+ struct device_node *np = to_of_node(sd->fwnode);
+ struct imx_media_subdev *imxsd;
+ int ret = 0;
+
+ mutex_lock(&imxmd->mutex);
+
+ imxsd = imx_media_find_async_subdev(imxmd, np, dev_name(sd->dev));
+ if (!imxsd) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (sd->grp_id & IMX_MEDIA_GRP_ID_CSI) {
+ ret = imx_media_get_ipu(imxmd, sd);
+ if (ret)
+ goto out_unlock;
+ } else if (sd->entity.function == MEDIA_ENT_F_VID_MUX) {
+ /* this is a video mux */
+ sd->grp_id = IMX_MEDIA_GRP_ID_VIDMUX;
+ } else if (imxsd->num_sink_pads == 0) {
+ /*
+ * this is an original source of video frames, it
+ * could be a camera sensor, an analog decoder, or
+ * a bridge device (HDMI -> MIPI CSI-2 for example).
+ * This group ID is used to locate the entity that
+ * is the original source of video in a pipeline.
+ */
+ sd->grp_id = IMX_MEDIA_GRP_ID_SENSOR;
+ }
+
+ /* attach the subdev */
+ imxsd->sd = sd;
+out:
+ if (ret)
+ v4l2_warn(&imxmd->v4l2_dev,
+ "Received unknown subdev %s\n", sd->name);
+ else
+ v4l2_info(&imxmd->v4l2_dev,
+ "Registered subdev %s\n", sd->name);
+
+out_unlock:
+ mutex_unlock(&imxmd->mutex);
+ return ret;
+}
+
+/*
+ * Create a single source->sink media link given a subdev and a single
+ * link from one of its source pads. Called after all subdevs have
+ * registered.
+ */
+static int imx_media_create_link(struct imx_media_dev *imxmd,
+ struct imx_media_subdev *src,
+ struct imx_media_link *link)
+{
+ struct imx_media_subdev *sink;
+ u16 source_pad, sink_pad;
+ int ret;
+
+ sink = imx_media_find_async_subdev(imxmd, link->remote_sd_node,
+ link->remote_devname);
+ if (!sink) {
+ v4l2_warn(&imxmd->v4l2_dev, "%s: no sink for %s:%d\n",
+ __func__, src->sd->name, link->local_pad);
+ return 0;
+ }
+
+ source_pad = link->local_pad;
+ sink_pad = link->remote_pad;
+
+ v4l2_info(&imxmd->v4l2_dev, "%s: %s:%d -> %s:%d\n", __func__,
+ src->sd->name, source_pad, sink->sd->name, sink_pad);
+
+ ret = media_create_pad_link(&src->sd->entity, source_pad,
+ &sink->sd->entity, sink_pad, 0);
+ if (ret)
+ v4l2_err(&imxmd->v4l2_dev,
+ "create_pad_link failed: %d\n", ret);
+
+ return ret;
+}
+
+/*
+ * create the media links from all imx-media pads and their links.
+ * Called after all subdevs have registered.
+ */
+static int imx_media_create_links(struct imx_media_dev *imxmd)
+{
+ struct imx_media_subdev *imxsd;
+ struct imx_media_link *link;
+ struct imx_media_pad *pad;
+ int num_pads, i, j, k;
+ int ret = 0;
+
+ for (i = 0; i < imxmd->num_subdevs; i++) {
+ imxsd = &imxmd->subdev[i];
+ num_pads = imxsd->num_sink_pads + imxsd->num_src_pads;
+
+ for (j = 0; j < num_pads; j++) {
+ pad = &imxsd->pad[j];
+
+ /* only create the source->sink links */
+ if (!(pad->pad.flags & MEDIA_PAD_FL_SOURCE))
+ continue;
+
+ for (k = 0; k < pad->num_links; k++) {
+ link = &pad->link[k];
+
+ ret = imx_media_create_link(imxmd, imxsd, link);
+ if (ret)
+ goto out;
+ }
+ }
+ }
+
+out:
+ return ret;
+}
+
+/*
+ * adds given video device to given imx-media source pad vdev list.
+ * Continues upstream from the pad entity's sink pads.
+ */
+static int imx_media_add_vdev_to_pad(struct imx_media_dev *imxmd,
+ struct imx_media_video_dev *vdev,
+ struct media_pad *srcpad)
+{
+ struct media_entity *entity = srcpad->entity;
+ struct imx_media_subdev *imxsd;
+ struct imx_media_pad *imxpad;
+ struct media_link *link;
+ struct v4l2_subdev *sd;
+ int i, vdev_idx, ret;
+
+ /* skip this entity if not a v4l2_subdev */
+ if (!is_media_entity_v4l2_subdev(entity))
+ return 0;
+
+ sd = media_entity_to_v4l2_subdev(entity);
+ imxsd = imx_media_find_subdev_by_sd(imxmd, sd);
+ if (IS_ERR(imxsd))
+ return PTR_ERR(imxsd);
+
+ imxpad = &imxsd->pad[srcpad->index];
+ vdev_idx = imxpad->num_vdevs;
+
+ /* just return if we've been here before */
+ for (i = 0; i < vdev_idx; i++)
+ if (vdev == imxpad->vdev[i])
+ return 0;
+
+ if (vdev_idx >= IMX_MEDIA_MAX_VDEVS) {
+ dev_err(imxmd->md.dev, "can't add %s to pad %s:%u\n",
+ vdev->vfd->entity.name, entity->name, srcpad->index);
+ return -ENOSPC;
+ }
+
+ dev_dbg(imxmd->md.dev, "adding %s to pad %s:%u\n",
+ vdev->vfd->entity.name, entity->name, srcpad->index);
+ imxpad->vdev[vdev_idx] = vdev;
+ imxpad->num_vdevs++;
+
+ /* move upstream from this entity's sink pads */
+ for (i = 0; i < entity->num_pads; i++) {
+ struct media_pad *pad = &entity->pads[i];
+
+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
+ continue;
+
+ list_for_each_entry(link, &entity->links, list) {
+ if (link->sink != pad)
+ continue;
+ ret = imx_media_add_vdev_to_pad(imxmd, vdev,
+ link->source);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* form the vdev lists in all imx-media source pads */
+static int imx_media_create_pad_vdev_lists(struct imx_media_dev *imxmd)
+{
+ struct imx_media_video_dev *vdev;
+ struct media_link *link;
+ int i, ret;
+
+ for (i = 0; i < imxmd->num_vdevs; i++) {
+ vdev = imxmd->vdev[i];
+ link = list_first_entry(&vdev->vfd->entity.links,
+ struct media_link, list);
+ ret = imx_media_add_vdev_to_pad(imxmd, vdev, link->source);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/* async subdev complete notifier */
+static int imx_media_probe_complete(struct v4l2_async_notifier *notifier)
+{
+ struct imx_media_dev *imxmd = notifier2dev(notifier);
+ int i, ret;
+
+ mutex_lock(&imxmd->mutex);
+
+ /* make sure all subdevs were bound */
+ for (i = 0; i < imxmd->num_subdevs; i++) {
+ if (!imxmd->subdev[i].sd) {
+ v4l2_err(&imxmd->v4l2_dev, "unbound subdev!\n");
+ ret = -ENODEV;
+ goto unlock;
+ }
+ }
+
+ ret = imx_media_create_links(imxmd);
+ if (ret)
+ goto unlock;
+
+ ret = imx_media_create_pad_vdev_lists(imxmd);
+ if (ret)
+ goto unlock;
+
+ ret = v4l2_device_register_subdev_nodes(&imxmd->v4l2_dev);
+unlock:
+ mutex_unlock(&imxmd->mutex);
+ if (ret)
+ return ret;
+
+ return media_device_register(&imxmd->md);
+}
+
+/*
+ * adds controls to a video device from an entity subdevice.
+ * Continues upstream from the entity's sink pads.
+ */
+static int imx_media_inherit_controls(struct imx_media_dev *imxmd,
+ struct video_device *vfd,
+ struct media_entity *entity)
+{
+ int i, ret = 0;
+
+ if (is_media_entity_v4l2_subdev(entity)) {
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+
+ dev_dbg(imxmd->md.dev,
+ "adding controls to %s from %s\n",
+ vfd->entity.name, sd->entity.name);
+
+ ret = v4l2_ctrl_add_handler(vfd->ctrl_handler,
+ sd->ctrl_handler,
+ NULL);
+ if (ret)
+ return ret;
+ }
+
+ /* move upstream */
+ for (i = 0; i < entity->num_pads; i++) {
+ struct media_pad *pad, *spad = &entity->pads[i];
+
+ if (!(spad->flags & MEDIA_PAD_FL_SINK))
+ continue;
+
+ pad = media_entity_remote_pad(spad);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ continue;
+
+ ret = imx_media_inherit_controls(imxmd, vfd, pad->entity);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int imx_media_link_notify(struct media_link *link, u32 flags,
+ unsigned int notification)
+{
+ struct media_entity *source = link->source->entity;
+ struct imx_media_subdev *imxsd;
+ struct imx_media_pad *imxpad;
+ struct imx_media_dev *imxmd;
+ struct video_device *vfd;
+ struct v4l2_subdev *sd;
+ int i, pad_idx, ret;
+
+ ret = v4l2_pipeline_link_notify(link, flags, notification);
+ if (ret)
+ return ret;
+
+ /* don't bother if source is not a subdev */
+ if (!is_media_entity_v4l2_subdev(source))
+ return 0;
+
+ sd = media_entity_to_v4l2_subdev(source);
+ pad_idx = link->source->index;
+
+ imxmd = dev_get_drvdata(sd->v4l2_dev->dev);
+
+ imxsd = imx_media_find_subdev_by_sd(imxmd, sd);
+ if (IS_ERR(imxsd))
+ return PTR_ERR(imxsd);
+ imxpad = &imxsd->pad[pad_idx];
+
+ /*
+ * Before disabling a link, reset controls for all video
+ * devices reachable from this link.
+ *
+ * After enabling a link, refresh controls for all video
+ * devices reachable from this link.
+ */
+ if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH &&
+ !(flags & MEDIA_LNK_FL_ENABLED)) {
+ for (i = 0; i < imxpad->num_vdevs; i++) {
+ vfd = imxpad->vdev[i]->vfd;
+ dev_dbg(imxmd->md.dev,
+ "reset controls for %s\n",
+ vfd->entity.name);
+ v4l2_ctrl_handler_free(vfd->ctrl_handler);
+ v4l2_ctrl_handler_init(vfd->ctrl_handler, 0);
+ }
+ } else if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
+ (link->flags & MEDIA_LNK_FL_ENABLED)) {
+ for (i = 0; i < imxpad->num_vdevs; i++) {
+ vfd = imxpad->vdev[i]->vfd;
+ dev_dbg(imxmd->md.dev,
+ "refresh controls for %s\n",
+ vfd->entity.name);
+ ret = imx_media_inherit_controls(imxmd, vfd,
+ &vfd->entity);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static const struct media_device_ops imx_media_md_ops = {
+ .link_notify = imx_media_link_notify,
+};
+
+static int imx_media_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct imx_media_subdev *csi[4] = {0};
+ struct imx_media_dev *imxmd;
+ int ret;
+
+ imxmd = devm_kzalloc(dev, sizeof(*imxmd), GFP_KERNEL);
+ if (!imxmd)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, imxmd);
+
+ strlcpy(imxmd->md.model, "imx-media", sizeof(imxmd->md.model));
+ imxmd->md.ops = &imx_media_md_ops;
+ imxmd->md.dev = dev;
+
+ mutex_init(&imxmd->mutex);
+
+ imxmd->v4l2_dev.mdev = &imxmd->md;
+ strlcpy(imxmd->v4l2_dev.name, "imx-media",
+ sizeof(imxmd->v4l2_dev.name));
+
+ media_device_init(&imxmd->md);
+
+ ret = v4l2_device_register(dev, &imxmd->v4l2_dev);
+ if (ret < 0) {
+ v4l2_err(&imxmd->v4l2_dev,
+ "Failed to register v4l2_device: %d\n", ret);
+ goto cleanup;
+ }
+
+ dev_set_drvdata(imxmd->v4l2_dev.dev, imxmd);
+
+ ret = imx_media_of_parse(imxmd, &csi, node);
+ if (ret) {
+ v4l2_err(&imxmd->v4l2_dev,
+ "imx_media_of_parse failed with %d\n", ret);
+ goto unreg_dev;
+ }
+
+ ret = imx_media_add_internal_subdevs(imxmd, csi);
+ if (ret) {
+ v4l2_err(&imxmd->v4l2_dev,
+ "add_internal_subdevs failed with %d\n", ret);
+ goto unreg_dev;
+ }
+
+ /* no subdevs? just bail */
+ imxmd->num_subdevs = imxmd->subdev_notifier.num_subdevs;
+ if (imxmd->num_subdevs == 0) {
+ ret = -ENODEV;
+ goto unreg_dev;
+ }
+
+ /* prepare the async subdev notifier and register it */
+ imxmd->subdev_notifier.subdevs = imxmd->async_ptrs;
+ imxmd->subdev_notifier.bound = imx_media_subdev_bound;
+ imxmd->subdev_notifier.complete = imx_media_probe_complete;
+ ret = v4l2_async_notifier_register(&imxmd->v4l2_dev,
+ &imxmd->subdev_notifier);
+ if (ret) {
+ v4l2_err(&imxmd->v4l2_dev,
+ "v4l2_async_notifier_register failed with %d\n", ret);
+ goto del_int;
+ }
+
+ return 0;
+
+del_int:
+ imx_media_remove_internal_subdevs(imxmd);
+unreg_dev:
+ v4l2_device_unregister(&imxmd->v4l2_dev);
+cleanup:
+ media_device_cleanup(&imxmd->md);
+ return ret;
+}
+
+static int imx_media_remove(struct platform_device *pdev)
+{
+ struct imx_media_dev *imxmd =
+ (struct imx_media_dev *)platform_get_drvdata(pdev);
+
+ v4l2_info(&imxmd->v4l2_dev, "Removing imx-media\n");
+
+ v4l2_async_notifier_unregister(&imxmd->subdev_notifier);
+ imx_media_remove_internal_subdevs(imxmd);
+ v4l2_device_unregister(&imxmd->v4l2_dev);
+ media_device_unregister(&imxmd->md);
+ media_device_cleanup(&imxmd->md);
+
+ return 0;
+}
+
+static const struct of_device_id imx_media_dt_ids[] = {
+ { .compatible = "fsl,imx-capture-subsystem" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_media_dt_ids);
+
+static struct platform_driver imx_media_pdrv = {
+ .probe = imx_media_probe,
+ .remove = imx_media_remove,
+ .driver = {
+ .name = "imx-media",
+ .of_match_table = imx_media_dt_ids,
+ },
+};
+
+module_platform_driver(imx_media_pdrv);
+
+MODULE_DESCRIPTION("i.MX5/6 v4l2 media controller driver");
+MODULE_AUTHOR("Steve Longerbeam <[email protected]>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/imx/imx-media-fim.c b/drivers/staging/media/imx/imx-media-fim.c
new file mode 100644
index 000000000000..47275ef803f3
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-fim.c
@@ -0,0 +1,494 @@
+/*
+ * Frame Interval Monitor.
+ *
+ * Copyright (c) 2016 Mentor Graphics Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+#include <media/imx.h>
+#include "imx-media.h"
+
+enum {
+ FIM_CL_ENABLE = 0,
+ FIM_CL_NUM,
+ FIM_CL_TOLERANCE_MIN,
+ FIM_CL_TOLERANCE_MAX,
+ FIM_CL_NUM_SKIP,
+ FIM_NUM_CONTROLS,
+};
+
+enum {
+ FIM_CL_ICAP_EDGE = 0,
+ FIM_CL_ICAP_CHANNEL,
+ FIM_NUM_ICAP_CONTROLS,
+};
+
+#define FIM_CL_ENABLE_DEF 0 /* FIM disabled by default */
+#define FIM_CL_NUM_DEF 8 /* average 8 frames */
+#define FIM_CL_NUM_SKIP_DEF 2 /* skip 2 frames after restart */
+#define FIM_CL_TOLERANCE_MIN_DEF 50 /* usec */
+#define FIM_CL_TOLERANCE_MAX_DEF 0 /* no max tolerance (unbounded) */
+
+struct imx_media_fim {
+ struct imx_media_dev *md;
+
+ /* the owning subdev of this fim instance */
+ struct v4l2_subdev *sd;
+
+ /* FIM's control handler */
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* control clusters */
+ struct v4l2_ctrl *ctrl[FIM_NUM_CONTROLS];
+ struct v4l2_ctrl *icap_ctrl[FIM_NUM_ICAP_CONTROLS];
+
+ spinlock_t lock; /* protect control values */
+
+ /* current control values */
+ bool enabled;
+ int num_avg;
+ int num_skip;
+ unsigned long tolerance_min; /* usec */
+ unsigned long tolerance_max; /* usec */
+ /* input capture method of measuring FI */
+ int icap_channel;
+ int icap_flags;
+
+ int counter;
+ struct timespec last_ts;
+ unsigned long sum; /* usec */
+ unsigned long nominal; /* usec */
+
+ struct completion icap_first_event;
+ bool stream_on;
+};
+
+#define icap_enabled(fim) ((fim)->icap_flags != IRQ_TYPE_NONE)
+
+static void update_fim_nominal(struct imx_media_fim *fim,
+ const struct v4l2_fract *fi)
+{
+ if (fi->denominator == 0) {
+ dev_dbg(fim->sd->dev, "no frame interval, FIM disabled\n");
+ fim->enabled = false;
+ return;
+ }
+
+ fim->nominal = DIV_ROUND_CLOSEST_ULL(1000000ULL * (u64)fi->numerator,
+ fi->denominator);
+
+ dev_dbg(fim->sd->dev, "FI=%lu usec\n", fim->nominal);
+}
+
+static void reset_fim(struct imx_media_fim *fim, bool curval)
+{
+ struct v4l2_ctrl *icap_chan = fim->icap_ctrl[FIM_CL_ICAP_CHANNEL];
+ struct v4l2_ctrl *icap_edge = fim->icap_ctrl[FIM_CL_ICAP_EDGE];
+ struct v4l2_ctrl *en = fim->ctrl[FIM_CL_ENABLE];
+ struct v4l2_ctrl *num = fim->ctrl[FIM_CL_NUM];
+ struct v4l2_ctrl *skip = fim->ctrl[FIM_CL_NUM_SKIP];
+ struct v4l2_ctrl *tol_min = fim->ctrl[FIM_CL_TOLERANCE_MIN];
+ struct v4l2_ctrl *tol_max = fim->ctrl[FIM_CL_TOLERANCE_MAX];
+
+ if (curval) {
+ fim->enabled = en->cur.val;
+ fim->icap_flags = icap_edge->cur.val;
+ fim->icap_channel = icap_chan->cur.val;
+ fim->num_avg = num->cur.val;
+ fim->num_skip = skip->cur.val;
+ fim->tolerance_min = tol_min->cur.val;
+ fim->tolerance_max = tol_max->cur.val;
+ } else {
+ fim->enabled = en->val;
+ fim->icap_flags = icap_edge->val;
+ fim->icap_channel = icap_chan->val;
+ fim->num_avg = num->val;
+ fim->num_skip = skip->val;
+ fim->tolerance_min = tol_min->val;
+ fim->tolerance_max = tol_max->val;
+ }
+
+ /* disable tolerance range if max <= min */
+ if (fim->tolerance_max <= fim->tolerance_min)
+ fim->tolerance_max = 0;
+
+ /* num_skip must be >= 1 if input capture not used */
+ if (!icap_enabled(fim))
+ fim->num_skip = max_t(int, fim->num_skip, 1);
+
+ fim->counter = -fim->num_skip;
+ fim->sum = 0;
+}
+
+static void send_fim_event(struct imx_media_fim *fim, unsigned long error)
+{
+ static const struct v4l2_event ev = {
+ .type = V4L2_EVENT_IMX_FRAME_INTERVAL_ERROR,
+ };
+
+ v4l2_subdev_notify_event(fim->sd, &ev);
+}
+
+/*
+ * Monitor an averaged frame interval. If the average deviates too much
+ * from the nominal frame rate, send the frame interval error event. The
+ * frame intervals are averaged in order to quiet noise from
+ * (presumably random) interrupt latency.
+ */
+static void frame_interval_monitor(struct imx_media_fim *fim,
+ struct timespec *ts)
+{
+ unsigned long interval, error, error_avg;
+ bool send_event = false;
+ struct timespec diff;
+
+ if (!fim->enabled || ++fim->counter <= 0)
+ goto out_update_ts;
+
+ diff = timespec_sub(*ts, fim->last_ts);
+ interval = diff.tv_sec * 1000 * 1000 + diff.tv_nsec / 1000;
+ error = abs(interval - fim->nominal);
+
+ if (fim->tolerance_max && error >= fim->tolerance_max) {
+ dev_dbg(fim->sd->dev,
+ "FIM: %lu ignored, out of tolerance bounds\n",
+ error);
+ fim->counter--;
+ goto out_update_ts;
+ }
+
+ fim->sum += error;
+
+ if (fim->counter == fim->num_avg) {
+ error_avg = DIV_ROUND_CLOSEST(fim->sum, fim->num_avg);
+
+ if (error_avg > fim->tolerance_min)
+ send_event = true;
+
+ dev_dbg(fim->sd->dev, "FIM: error: %lu usec%s\n",
+ error_avg, send_event ? " (!!!)" : "");
+
+ fim->counter = 0;
+ fim->sum = 0;
+ }
+
+out_update_ts:
+ fim->last_ts = *ts;
+ if (send_event)
+ send_fim_event(fim, error_avg);
+}
+
+#ifdef CONFIG_IMX_GPT_ICAP
+/*
+ * Input Capture method of measuring frame intervals. Not subject
+ * to interrupt latency.
+ */
+static void fim_input_capture_handler(int channel, void *dev_id,
+ struct timespec *ts)
+{
+ struct imx_media_fim *fim = dev_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fim->lock, flags);
+
+ frame_interval_monitor(fim, ts);
+
+ if (!completion_done(&fim->icap_first_event))
+ complete(&fim->icap_first_event);
+
+ spin_unlock_irqrestore(&fim->lock, flags);
+}
+
+static int fim_request_input_capture(struct imx_media_fim *fim)
+{
+ init_completion(&fim->icap_first_event);
+
+ return mxc_request_input_capture(fim->icap_channel,
+ fim_input_capture_handler,
+ fim->icap_flags, fim);
+}
+
+static void fim_free_input_capture(struct imx_media_fim *fim)
+{
+ mxc_free_input_capture(fim->icap_channel, fim);
+}
+
+#else /* CONFIG_IMX_GPT_ICAP */
+
+static int fim_request_input_capture(struct imx_media_fim *fim)
+{
+ return 0;
+}
+
+static void fim_free_input_capture(struct imx_media_fim *fim)
+{
+}
+
+#endif /* CONFIG_IMX_GPT_ICAP */
+
+/*
+ * In case we are monitoring the first frame interval after streamon
+ * (when fim->num_skip = 0), we need a valid fim->last_ts before we
+ * can begin. This only applies to the input capture method. It is not
+ * possible to accurately measure the first FI after streamon using the
+ * EOF method, so fim->num_skip minimum is set to 1 in that case, so this
+ * function is a noop when the EOF method is used.
+ */
+static void fim_acquire_first_ts(struct imx_media_fim *fim)
+{
+ unsigned long ret;
+
+ if (!fim->enabled || fim->num_skip > 0)
+ return;
+
+ ret = wait_for_completion_timeout(
+ &fim->icap_first_event,
+ msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
+ if (ret == 0)
+ v4l2_warn(fim->sd, "wait first icap event timeout\n");
+}
+
+/* FIM Controls */
+static int fim_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct imx_media_fim *fim = container_of(ctrl->handler,
+ struct imx_media_fim,
+ ctrl_handler);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&fim->lock, flags);
+
+ switch (ctrl->id) {
+ case V4L2_CID_IMX_FIM_ENABLE:
+ break;
+ case V4L2_CID_IMX_FIM_ICAP_EDGE:
+ if (fim->stream_on)
+ ret = -EBUSY;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (!ret)
+ reset_fim(fim, false);
+
+ spin_unlock_irqrestore(&fim->lock, flags);
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops fim_ctrl_ops = {
+ .s_ctrl = fim_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config fim_ctrl[] = {
+ [FIM_CL_ENABLE] = {
+ .ops = &fim_ctrl_ops,
+ .id = V4L2_CID_IMX_FIM_ENABLE,
+ .name = "FIM Enable",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .def = FIM_CL_ENABLE_DEF,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ },
+ [FIM_CL_NUM] = {
+ .ops = &fim_ctrl_ops,
+ .id = V4L2_CID_IMX_FIM_NUM,
+ .name = "FIM Num Average",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = FIM_CL_NUM_DEF,
+ .min = 1, /* no averaging */
+ .max = 64, /* average 64 frames */
+ .step = 1,
+ },
+ [FIM_CL_TOLERANCE_MIN] = {
+ .ops = &fim_ctrl_ops,
+ .id = V4L2_CID_IMX_FIM_TOLERANCE_MIN,
+ .name = "FIM Tolerance Min",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = FIM_CL_TOLERANCE_MIN_DEF,
+ .min = 2,
+ .max = 200,
+ .step = 1,
+ },
+ [FIM_CL_TOLERANCE_MAX] = {
+ .ops = &fim_ctrl_ops,
+ .id = V4L2_CID_IMX_FIM_TOLERANCE_MAX,
+ .name = "FIM Tolerance Max",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = FIM_CL_TOLERANCE_MAX_DEF,
+ .min = 0,
+ .max = 500,
+ .step = 1,
+ },
+ [FIM_CL_NUM_SKIP] = {
+ .ops = &fim_ctrl_ops,
+ .id = V4L2_CID_IMX_FIM_NUM_SKIP,
+ .name = "FIM Num Skip",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = FIM_CL_NUM_SKIP_DEF,
+ .min = 0, /* skip no frames */
+ .max = 256, /* skip 256 frames */
+ .step = 1,
+ },
+};
+
+static const struct v4l2_ctrl_config fim_icap_ctrl[] = {
+ [FIM_CL_ICAP_EDGE] = {
+ .ops = &fim_ctrl_ops,
+ .id = V4L2_CID_IMX_FIM_ICAP_EDGE,
+ .name = "FIM Input Capture Edge",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = IRQ_TYPE_NONE, /* input capture disabled by default */
+ .min = IRQ_TYPE_NONE,
+ .max = IRQ_TYPE_EDGE_BOTH,
+ .step = 1,
+ },
+ [FIM_CL_ICAP_CHANNEL] = {
+ .ops = &fim_ctrl_ops,
+ .id = V4L2_CID_IMX_FIM_ICAP_CHANNEL,
+ .name = "FIM Input Capture Channel",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = 0,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ },
+};
+
+static int init_fim_controls(struct imx_media_fim *fim)
+{
+ struct v4l2_ctrl_handler *hdlr = &fim->ctrl_handler;
+ int i, ret;
+
+ v4l2_ctrl_handler_init(hdlr, FIM_NUM_CONTROLS + FIM_NUM_ICAP_CONTROLS);
+
+ for (i = 0; i < FIM_NUM_CONTROLS; i++)
+ fim->ctrl[i] = v4l2_ctrl_new_custom(hdlr,
+ &fim_ctrl[i],
+ NULL);
+ for (i = 0; i < FIM_NUM_ICAP_CONTROLS; i++)
+ fim->icap_ctrl[i] = v4l2_ctrl_new_custom(hdlr,
+ &fim_icap_ctrl[i],
+ NULL);
+ if (hdlr->error) {
+ ret = hdlr->error;
+ goto err_free;
+ }
+
+ v4l2_ctrl_cluster(FIM_NUM_CONTROLS, fim->ctrl);
+ v4l2_ctrl_cluster(FIM_NUM_ICAP_CONTROLS, fim->icap_ctrl);
+
+ return 0;
+err_free:
+ v4l2_ctrl_handler_free(hdlr);
+ return ret;
+}
+
+/*
+ * Monitor frame intervals via EOF interrupt. This method is
+ * subject to uncertainty errors introduced by interrupt latency.
+ *
+ * This is a noop if the Input Capture method is being used, since
+ * the frame_interval_monitor() is called by the input capture event
+ * callback handler in that case.
+ */
+void imx_media_fim_eof_monitor(struct imx_media_fim *fim, struct timespec *ts)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fim->lock, flags);
+
+ if (!icap_enabled(fim))
+ frame_interval_monitor(fim, ts);
+
+ spin_unlock_irqrestore(&fim->lock, flags);
+}
+EXPORT_SYMBOL_GPL(imx_media_fim_eof_monitor);
+
+/* Called by the subdev in its s_stream callback */
+int imx_media_fim_set_stream(struct imx_media_fim *fim,
+ const struct v4l2_fract *fi,
+ bool on)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ v4l2_ctrl_lock(fim->ctrl[FIM_CL_ENABLE]);
+
+ if (fim->stream_on == on)
+ goto out;
+
+ if (on) {
+ spin_lock_irqsave(&fim->lock, flags);
+ reset_fim(fim, true);
+ update_fim_nominal(fim, fi);
+ spin_unlock_irqrestore(&fim->lock, flags);
+
+ if (icap_enabled(fim)) {
+ ret = fim_request_input_capture(fim);
+ if (ret)
+ goto out;
+ fim_acquire_first_ts(fim);
+ }
+ } else {
+ if (icap_enabled(fim))
+ fim_free_input_capture(fim);
+ }
+
+ fim->stream_on = on;
+out:
+ v4l2_ctrl_unlock(fim->ctrl[FIM_CL_ENABLE]);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(imx_media_fim_set_stream);
+
+int imx_media_fim_add_controls(struct imx_media_fim *fim)
+{
+ /* add the FIM controls to the calling subdev ctrl handler */
+ return v4l2_ctrl_add_handler(fim->sd->ctrl_handler,
+ &fim->ctrl_handler, NULL);
+}
+EXPORT_SYMBOL_GPL(imx_media_fim_add_controls);
+
+/* Called by the subdev in its subdev registered callback */
+struct imx_media_fim *imx_media_fim_init(struct v4l2_subdev *sd)
+{
+ struct imx_media_fim *fim;
+ int ret;
+
+ fim = devm_kzalloc(sd->dev, sizeof(*fim), GFP_KERNEL);
+ if (!fim)
+ return ERR_PTR(-ENOMEM);
+
+ /* get media device */
+ fim->md = dev_get_drvdata(sd->v4l2_dev->dev);
+ fim->sd = sd;
+
+ spin_lock_init(&fim->lock);
+
+ ret = init_fim_controls(fim);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return fim;
+}
+EXPORT_SYMBOL_GPL(imx_media_fim_init);
+
+void imx_media_fim_free(struct imx_media_fim *fim)
+{
+ v4l2_ctrl_handler_free(&fim->ctrl_handler);
+}
+EXPORT_SYMBOL_GPL(imx_media_fim_free);
diff --git a/drivers/staging/media/imx/imx-media-internal-sd.c b/drivers/staging/media/imx/imx-media-internal-sd.c
new file mode 100644
index 000000000000..cdfbf40dfcbe
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-internal-sd.c
@@ -0,0 +1,349 @@
+/*
+ * Media driver for Freescale i.MX5/6 SOC
+ *
+ * Adds the internal subdevices and the media links between them.
+ *
+ * Copyright (c) 2016 Mentor Graphics Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/platform_device.h>
+#include "imx-media.h"
+
+enum isd_enum {
+ isd_csi0 = 0,
+ isd_csi1,
+ isd_vdic,
+ isd_ic_prp,
+ isd_ic_prpenc,
+ isd_ic_prpvf,
+ num_isd,
+};
+
+static const struct internal_subdev_id {
+ enum isd_enum index;
+ const char *name;
+ u32 grp_id;
+} isd_id[num_isd] = {
+ [isd_csi0] = {
+ .index = isd_csi0,
+ .grp_id = IMX_MEDIA_GRP_ID_CSI0,
+ .name = "imx-ipuv3-csi",
+ },
+ [isd_csi1] = {
+ .index = isd_csi1,
+ .grp_id = IMX_MEDIA_GRP_ID_CSI1,
+ .name = "imx-ipuv3-csi",
+ },
+ [isd_vdic] = {
+ .index = isd_vdic,
+ .grp_id = IMX_MEDIA_GRP_ID_VDIC,
+ .name = "imx-ipuv3-vdic",
+ },
+ [isd_ic_prp] = {
+ .index = isd_ic_prp,
+ .grp_id = IMX_MEDIA_GRP_ID_IC_PRP,
+ .name = "imx-ipuv3-ic",
+ },
+ [isd_ic_prpenc] = {
+ .index = isd_ic_prpenc,
+ .grp_id = IMX_MEDIA_GRP_ID_IC_PRPENC,
+ .name = "imx-ipuv3-ic",
+ },
+ [isd_ic_prpvf] = {
+ .index = isd_ic_prpvf,
+ .grp_id = IMX_MEDIA_GRP_ID_IC_PRPVF,
+ .name = "imx-ipuv3-ic",
+ },
+};
+
+struct internal_link {
+ const struct internal_subdev_id *remote_id;
+ int remote_pad;
+};
+
+struct internal_pad {
+ bool devnode; /* does this pad link to a device node */
+ struct internal_link link[IMX_MEDIA_MAX_LINKS];
+};
+
+static const struct internal_subdev {
+ const struct internal_subdev_id *id;
+ struct internal_pad pad[IMX_MEDIA_MAX_PADS];
+ int num_sink_pads;
+ int num_src_pads;
+} internal_subdev[num_isd] = {
+ [isd_csi0] = {
+ .id = &isd_id[isd_csi0],
+ .num_sink_pads = CSI_NUM_SINK_PADS,
+ .num_src_pads = CSI_NUM_SRC_PADS,
+ .pad[CSI_SRC_PAD_DIRECT] = {
+ .link = {
+ {
+ .remote_id = &isd_id[isd_ic_prp],
+ .remote_pad = PRP_SINK_PAD,
+ }, {
+ .remote_id = &isd_id[isd_vdic],
+ .remote_pad = VDIC_SINK_PAD_DIRECT,
+ },
+ },
+ },
+ .pad[CSI_SRC_PAD_IDMAC] = {
+ .devnode = true,
+ },
+ },
+
+ [isd_csi1] = {
+ .id = &isd_id[isd_csi1],
+ .num_sink_pads = CSI_NUM_SINK_PADS,
+ .num_src_pads = CSI_NUM_SRC_PADS,
+ .pad[CSI_SRC_PAD_DIRECT] = {
+ .link = {
+ {
+ .remote_id = &isd_id[isd_ic_prp],
+ .remote_pad = PRP_SINK_PAD,
+ }, {
+ .remote_id = &isd_id[isd_vdic],
+ .remote_pad = VDIC_SINK_PAD_DIRECT,
+ },
+ },
+ },
+ .pad[CSI_SRC_PAD_IDMAC] = {
+ .devnode = true,
+ },
+ },
+
+ [isd_vdic] = {
+ .id = &isd_id[isd_vdic],
+ .num_sink_pads = VDIC_NUM_SINK_PADS,
+ .num_src_pads = VDIC_NUM_SRC_PADS,
+ .pad[VDIC_SINK_PAD_IDMAC] = {
+ .devnode = true,
+ },
+ .pad[VDIC_SRC_PAD_DIRECT] = {
+ .link = {
+ {
+ .remote_id = &isd_id[isd_ic_prp],
+ .remote_pad = PRP_SINK_PAD,
+ },
+ },
+ },
+ },
+
+ [isd_ic_prp] = {
+ .id = &isd_id[isd_ic_prp],
+ .num_sink_pads = PRP_NUM_SINK_PADS,
+ .num_src_pads = PRP_NUM_SRC_PADS,
+ .pad[PRP_SRC_PAD_PRPENC] = {
+ .link = {
+ {
+ .remote_id = &isd_id[isd_ic_prpenc],
+ .remote_pad = 0,
+ },
+ },
+ },
+ .pad[PRP_SRC_PAD_PRPVF] = {
+ .link = {
+ {
+ .remote_id = &isd_id[isd_ic_prpvf],
+ .remote_pad = 0,
+ },
+ },
+ },
+ },
+
+ [isd_ic_prpenc] = {
+ .id = &isd_id[isd_ic_prpenc],
+ .num_sink_pads = PRPENCVF_NUM_SINK_PADS,
+ .num_src_pads = PRPENCVF_NUM_SRC_PADS,
+ .pad[PRPENCVF_SRC_PAD] = {
+ .devnode = true,
+ },
+ },
+
+ [isd_ic_prpvf] = {
+ .id = &isd_id[isd_ic_prpvf],
+ .num_sink_pads = PRPENCVF_NUM_SINK_PADS,
+ .num_src_pads = PRPENCVF_NUM_SRC_PADS,
+ .pad[PRPENCVF_SRC_PAD] = {
+ .devnode = true,
+ },
+ },
+};
+
+/* form a device name given a group id and ipu id */
+static inline void isd_id_to_devname(char *devname, int sz,
+ const struct internal_subdev_id *id,
+ int ipu_id)
+{
+ int pdev_id = ipu_id * num_isd + id->index;
+
+ snprintf(devname, sz, "%s.%d", id->name, pdev_id);
+}
+
+/* adds the links from given internal subdev */
+static int add_internal_links(struct imx_media_dev *imxmd,
+ const struct internal_subdev *isd,
+ struct imx_media_subdev *imxsd,
+ int ipu_id)
+{
+ int i, num_pads, ret;
+
+ num_pads = isd->num_sink_pads + isd->num_src_pads;
+
+ for (i = 0; i < num_pads; i++) {
+ const struct internal_pad *intpad = &isd->pad[i];
+ struct imx_media_pad *pad = &imxsd->pad[i];
+ int j;
+
+ /* init the pad flags for this internal subdev */
+ pad->pad.flags = (i < isd->num_sink_pads) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+ /* export devnode pad flag to the subdevs */
+ pad->devnode = intpad->devnode;
+
+ for (j = 0; ; j++) {
+ const struct internal_link *link;
+ char remote_devname[32];
+
+ link = &intpad->link[j];
+
+ if (!link->remote_id)
+ break;
+
+ isd_id_to_devname(remote_devname,
+ sizeof(remote_devname),
+ link->remote_id, ipu_id);
+
+ ret = imx_media_add_pad_link(imxmd, pad,
+ NULL, remote_devname,
+ i, link->remote_pad);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* register an internal subdev as a platform device */
+static struct imx_media_subdev *
+add_internal_subdev(struct imx_media_dev *imxmd,
+ const struct internal_subdev *isd,
+ int ipu_id)
+{
+ struct imx_media_internal_sd_platformdata pdata;
+ struct platform_device_info pdevinfo = {0};
+ struct imx_media_subdev *imxsd;
+ struct platform_device *pdev;
+
+ pdata.grp_id = isd->id->grp_id;
+
+ /* the id of IPU this subdev will control */
+ pdata.ipu_id = ipu_id;
+
+ /* create subdev name */
+ imx_media_grp_id_to_sd_name(pdata.sd_name, sizeof(pdata.sd_name),
+ pdata.grp_id, ipu_id);
+
+ pdevinfo.name = isd->id->name;
+ pdevinfo.id = ipu_id * num_isd + isd->id->index;
+ pdevinfo.parent = imxmd->md.dev;
+ pdevinfo.data = &pdata;
+ pdevinfo.size_data = sizeof(pdata);
+ pdevinfo.dma_mask = DMA_BIT_MASK(32);
+
+ pdev = platform_device_register_full(&pdevinfo);
+ if (IS_ERR(pdev))
+ return ERR_CAST(pdev);
+
+ imxsd = imx_media_add_async_subdev(imxmd, NULL, pdev);
+ if (IS_ERR(imxsd))
+ return imxsd;
+
+ imxsd->num_sink_pads = isd->num_sink_pads;
+ imxsd->num_src_pads = isd->num_src_pads;
+
+ return imxsd;
+}
+
+/* adds the internal subdevs in one ipu */
+static int add_ipu_internal_subdevs(struct imx_media_dev *imxmd,
+ struct imx_media_subdev *csi0,
+ struct imx_media_subdev *csi1,
+ int ipu_id)
+{
+ enum isd_enum i;
+ int ret;
+
+ for (i = 0; i < num_isd; i++) {
+ const struct internal_subdev *isd = &internal_subdev[i];
+ struct imx_media_subdev *imxsd;
+
+ /*
+ * the CSIs are represented in the device-tree, so those
+ * devices are added already, and are added to the async
+ * subdev list by of_parse_subdev(), so we are given those
+ * subdevs as csi0 and csi1.
+ */
+ switch (isd->id->grp_id) {
+ case IMX_MEDIA_GRP_ID_CSI0:
+ imxsd = csi0;
+ break;
+ case IMX_MEDIA_GRP_ID_CSI1:
+ imxsd = csi1;
+ break;
+ default:
+ imxsd = add_internal_subdev(imxmd, isd, ipu_id);
+ break;
+ }
+
+ if (IS_ERR(imxsd))
+ return PTR_ERR(imxsd);
+
+ /* add the links from this subdev */
+ if (imxsd) {
+ ret = add_internal_links(imxmd, isd, imxsd, ipu_id);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int imx_media_add_internal_subdevs(struct imx_media_dev *imxmd,
+ struct imx_media_subdev *csi[4])
+{
+ int ret;
+
+ ret = add_ipu_internal_subdevs(imxmd, csi[0], csi[1], 0);
+ if (ret)
+ goto remove;
+
+ ret = add_ipu_internal_subdevs(imxmd, csi[2], csi[3], 1);
+ if (ret)
+ goto remove;
+
+ return 0;
+
+remove:
+ imx_media_remove_internal_subdevs(imxmd);
+ return ret;
+}
+
+void imx_media_remove_internal_subdevs(struct imx_media_dev *imxmd)
+{
+ struct imx_media_subdev *imxsd;
+ int i;
+
+ for (i = 0; i < imxmd->subdev_notifier.num_subdevs; i++) {
+ imxsd = &imxmd->subdev[i];
+ if (!imxsd->pdev)
+ continue;
+ platform_device_unregister(imxsd->pdev);
+ }
+}
diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c
new file mode 100644
index 000000000000..b026fe66467c
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-of.c
@@ -0,0 +1,270 @@
+/*
+ * Media driver for Freescale i.MX5/6 SOC
+ *
+ * Open Firmware parsing.
+ *
+ * Copyright (c) 2016 Mentor Graphics Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/of_platform.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+#include <linux/of_graph.h>
+#include <video/imx-ipu-v3.h>
+#include "imx-media.h"
+
+static int of_add_pad_link(struct imx_media_dev *imxmd,
+ struct imx_media_pad *pad,
+ struct device_node *local_sd_node,
+ struct device_node *remote_sd_node,
+ int local_pad, int remote_pad)
+{
+ dev_dbg(imxmd->md.dev, "%s: adding %s:%d -> %s:%d\n", __func__,
+ local_sd_node->name, local_pad,
+ remote_sd_node->name, remote_pad);
+
+ return imx_media_add_pad_link(imxmd, pad, remote_sd_node, NULL,
+ local_pad, remote_pad);
+}
+
+static void of_parse_sensor(struct imx_media_dev *imxmd,
+ struct imx_media_subdev *sensor,
+ struct device_node *sensor_np)
+{
+ struct device_node *endpoint;
+
+ endpoint = of_graph_get_next_endpoint(sensor_np, NULL);
+ if (endpoint) {
+ v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint),
+ &sensor->sensor_ep);
+ of_node_put(endpoint);
+ }
+}
+
+static int of_get_port_count(const struct device_node *np)
+{
+ struct device_node *ports, *child;
+ int num = 0;
+
+ /* check if this node has a ports subnode */
+ ports = of_get_child_by_name(np, "ports");
+ if (ports)
+ np = ports;
+
+ for_each_child_of_node(np, child)
+ if (of_node_cmp(child->name, "port") == 0)
+ num++;
+
+ of_node_put(ports);
+ return num;
+}
+
+/*
+ * find the remote device node and remote port id (remote pad #)
+ * given local endpoint node
+ */
+static void of_get_remote_pad(struct device_node *epnode,
+ struct device_node **remote_node,
+ int *remote_pad)
+{
+ struct device_node *rp, *rpp;
+ struct device_node *remote;
+
+ rp = of_graph_get_remote_port(epnode);
+ rpp = of_graph_get_remote_port_parent(epnode);
+
+ if (of_device_is_compatible(rpp, "fsl,imx6q-ipu")) {
+ /* the remote is one of the CSI ports */
+ remote = rp;
+ *remote_pad = 0;
+ of_node_put(rpp);
+ } else {
+ remote = rpp;
+ if (of_property_read_u32(rp, "reg", remote_pad))
+ *remote_pad = 0;
+ of_node_put(rp);
+ }
+
+ if (!of_device_is_available(remote)) {
+ of_node_put(remote);
+ *remote_node = NULL;
+ } else {
+ *remote_node = remote;
+ }
+}
+
+static struct imx_media_subdev *
+of_parse_subdev(struct imx_media_dev *imxmd, struct device_node *sd_np,
+ bool is_csi_port)
+{
+ struct imx_media_subdev *imxsd;
+ int i, num_pads, ret;
+
+ if (!of_device_is_available(sd_np)) {
+ dev_dbg(imxmd->md.dev, "%s: %s not enabled\n", __func__,
+ sd_np->name);
+ return NULL;
+ }
+
+ /* register this subdev with async notifier */
+ imxsd = imx_media_add_async_subdev(imxmd, sd_np, NULL);
+ if (IS_ERR_OR_NULL(imxsd))
+ return imxsd;
+
+ if (is_csi_port) {
+ /*
+ * the ipu-csi has one sink port and two source ports.
+ * The source ports are not represented in the device tree,
+ * but are described by the internal pads and links later.
+ */
+ num_pads = CSI_NUM_PADS;
+ imxsd->num_sink_pads = CSI_NUM_SINK_PADS;
+ } else if (of_device_is_compatible(sd_np, "fsl,imx6-mipi-csi2")) {
+ num_pads = of_get_port_count(sd_np);
+ /* the mipi csi2 receiver has only one sink port */
+ imxsd->num_sink_pads = 1;
+ } else if (of_device_is_compatible(sd_np, "video-mux")) {
+ num_pads = of_get_port_count(sd_np);
+ /* for the video mux, all but the last port are sinks */
+ imxsd->num_sink_pads = num_pads - 1;
+ } else {
+ num_pads = of_get_port_count(sd_np);
+ if (num_pads != 1) {
+ dev_warn(imxmd->md.dev,
+ "%s: unknown device %s with %d ports\n",
+ __func__, sd_np->name, num_pads);
+ return NULL;
+ }
+
+ /*
+ * we got to this node from this single source port,
+ * there are no sink pads.
+ */
+ imxsd->num_sink_pads = 0;
+ }
+
+ if (imxsd->num_sink_pads >= num_pads)
+ return ERR_PTR(-EINVAL);
+
+ imxsd->num_src_pads = num_pads - imxsd->num_sink_pads;
+
+ dev_dbg(imxmd->md.dev, "%s: %s has %d pads (%d sink, %d src)\n",
+ __func__, sd_np->name, num_pads,
+ imxsd->num_sink_pads, imxsd->num_src_pads);
+
+ /*
+ * With no sink, this subdev node is the original source
+ * of video, parse it's media bus for use by the pipeline.
+ */
+ if (imxsd->num_sink_pads == 0)
+ of_parse_sensor(imxmd, imxsd, sd_np);
+
+ for (i = 0; i < num_pads; i++) {
+ struct device_node *epnode = NULL, *port, *remote_np;
+ struct imx_media_subdev *remote_imxsd;
+ struct imx_media_pad *pad;
+ int remote_pad;
+
+ /* init this pad */
+ pad = &imxsd->pad[i];
+ pad->pad.flags = (i < imxsd->num_sink_pads) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+
+ if (is_csi_port)
+ port = (i < imxsd->num_sink_pads) ? sd_np : NULL;
+ else
+ port = of_graph_get_port_by_id(sd_np, i);
+ if (!port)
+ continue;
+
+ for_each_child_of_node(port, epnode) {
+ of_get_remote_pad(epnode, &remote_np, &remote_pad);
+ if (!remote_np)
+ continue;
+
+ ret = of_add_pad_link(imxmd, pad, sd_np, remote_np,
+ i, remote_pad);
+ if (ret) {
+ imxsd = ERR_PTR(ret);
+ break;
+ }
+
+ if (i < imxsd->num_sink_pads) {
+ /* follow sink endpoints upstream */
+ remote_imxsd = of_parse_subdev(imxmd,
+ remote_np,
+ false);
+ if (IS_ERR(remote_imxsd)) {
+ imxsd = remote_imxsd;
+ break;
+ }
+ }
+
+ of_node_put(remote_np);
+ }
+
+ if (port != sd_np)
+ of_node_put(port);
+ if (IS_ERR(imxsd)) {
+ of_node_put(remote_np);
+ of_node_put(epnode);
+ break;
+ }
+ }
+
+ return imxsd;
+}
+
+int imx_media_of_parse(struct imx_media_dev *imxmd,
+ struct imx_media_subdev *(*csi)[4],
+ struct device_node *np)
+{
+ struct imx_media_subdev *lcsi;
+ struct device_node *csi_np;
+ u32 ipu_id, csi_id;
+ int i, ret;
+
+ for (i = 0; ; i++) {
+ csi_np = of_parse_phandle(np, "ports", i);
+ if (!csi_np)
+ break;
+
+ lcsi = of_parse_subdev(imxmd, csi_np, true);
+ if (IS_ERR(lcsi)) {
+ ret = PTR_ERR(lcsi);
+ goto err_put;
+ }
+
+ ret = of_property_read_u32(csi_np, "reg", &csi_id);
+ if (ret) {
+ dev_err(imxmd->md.dev,
+ "%s: csi port missing reg property!\n",
+ __func__);
+ goto err_put;
+ }
+
+ ipu_id = of_alias_get_id(csi_np->parent, "ipu");
+ of_node_put(csi_np);
+
+ if (ipu_id > 1 || csi_id > 1) {
+ dev_err(imxmd->md.dev,
+ "%s: invalid ipu/csi id (%u/%u)\n",
+ __func__, ipu_id, csi_id);
+ return -EINVAL;
+ }
+
+ (*csi)[ipu_id * 2 + csi_id] = lcsi;
+ }
+
+ return 0;
+err_put:
+ of_node_put(csi_np);
+ return ret;
+}
diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c
new file mode 100644
index 000000000000..59523872a886
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-utils.c
@@ -0,0 +1,896 @@
+/*
+ * V4L2 Media Controller Driver for Freescale i.MX5/6 SOC
+ *
+ * Copyright (c) 2016 Mentor Graphics Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/module.h>
+#include "imx-media.h"
+
+/*
+ * List of supported pixel formats for the subdevs.
+ *
+ * In all of these tables, the non-mbus formats (with no
+ * mbus codes) must all fall at the end of the table.
+ */
+
+static const struct imx_media_pixfmt yuv_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .codes = {
+ MEDIA_BUS_FMT_UYVY8_2X8,
+ MEDIA_BUS_FMT_UYVY8_1X16
+ },
+ .cs = IPUV3_COLORSPACE_YUV,
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .codes = {
+ MEDIA_BUS_FMT_YUYV8_2X8,
+ MEDIA_BUS_FMT_YUYV8_1X16
+ },
+ .cs = IPUV3_COLORSPACE_YUV,
+ .bpp = 16,
+ },
+ /***
+ * non-mbus YUV formats start here. NOTE! when adding non-mbus
+ * formats, NUM_NON_MBUS_YUV_FORMATS must be updated below.
+ ***/
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .cs = IPUV3_COLORSPACE_YUV,
+ .bpp = 12,
+ .planar = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YVU420,
+ .cs = IPUV3_COLORSPACE_YUV,
+ .bpp = 12,
+ .planar = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUV422P,
+ .cs = IPUV3_COLORSPACE_YUV,
+ .bpp = 16,
+ .planar = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .cs = IPUV3_COLORSPACE_YUV,
+ .bpp = 12,
+ .planar = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .cs = IPUV3_COLORSPACE_YUV,
+ .bpp = 16,
+ .planar = true,
+ },
+};
+
+#define NUM_NON_MBUS_YUV_FORMATS 5
+#define NUM_YUV_FORMATS ARRAY_SIZE(yuv_formats)
+#define NUM_MBUS_YUV_FORMATS (NUM_YUV_FORMATS - NUM_NON_MBUS_YUV_FORMATS)
+
+static const struct imx_media_pixfmt rgb_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .codes = {MEDIA_BUS_FMT_RGB565_2X8_LE},
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .codes = {
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_RGB888_2X12_LE
+ },
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 24,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .codes = {MEDIA_BUS_FMT_ARGB8888_1X32},
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 32,
+ .ipufmt = true,
+ },
+ /*** raw bayer formats start here ***/
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .codes = {MEDIA_BUS_FMT_SBGGR8_1X8},
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 8,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .codes = {MEDIA_BUS_FMT_SGBRG8_1X8},
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 8,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .codes = {MEDIA_BUS_FMT_SGRBG8_1X8},
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 8,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .codes = {MEDIA_BUS_FMT_SRGGB8_1X8},
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 8,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SBGGR16,
+ .codes = {
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ MEDIA_BUS_FMT_SBGGR14_1X14,
+ MEDIA_BUS_FMT_SBGGR16_1X16
+ },
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 16,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGBRG16,
+ .codes = {
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ MEDIA_BUS_FMT_SGBRG14_1X14,
+ MEDIA_BUS_FMT_SGBRG16_1X16,
+ },
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 16,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SGRBG16,
+ .codes = {
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_SGRBG14_1X14,
+ MEDIA_BUS_FMT_SGRBG16_1X16,
+ },
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 16,
+ .bayer = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_SRGGB16,
+ .codes = {
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+ MEDIA_BUS_FMT_SRGGB14_1X14,
+ MEDIA_BUS_FMT_SRGGB16_1X16,
+ },
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 16,
+ .bayer = true,
+ },
+ /***
+ * non-mbus RGB formats start here. NOTE! when adding non-mbus
+ * formats, NUM_NON_MBUS_RGB_FORMATS must be updated below.
+ ***/
+ {
+ .fourcc = V4L2_PIX_FMT_BGR24,
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 24,
+ }, {
+ .fourcc = V4L2_PIX_FMT_BGR32,
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 32,
+ },
+};
+
+#define NUM_NON_MBUS_RGB_FORMATS 2
+#define NUM_RGB_FORMATS ARRAY_SIZE(rgb_formats)
+#define NUM_MBUS_RGB_FORMATS (NUM_RGB_FORMATS - NUM_NON_MBUS_RGB_FORMATS)
+
+static const struct imx_media_pixfmt ipu_yuv_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUV32,
+ .codes = {MEDIA_BUS_FMT_AYUV8_1X32},
+ .cs = IPUV3_COLORSPACE_YUV,
+ .bpp = 32,
+ .ipufmt = true,
+ },
+};
+
+#define NUM_IPU_YUV_FORMATS ARRAY_SIZE(ipu_yuv_formats)
+
+static const struct imx_media_pixfmt ipu_rgb_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .codes = {MEDIA_BUS_FMT_ARGB8888_1X32},
+ .cs = IPUV3_COLORSPACE_RGB,
+ .bpp = 32,
+ .ipufmt = true,
+ },
+};
+
+#define NUM_IPU_RGB_FORMATS ARRAY_SIZE(ipu_rgb_formats)
+
+static void init_mbus_colorimetry(struct v4l2_mbus_framefmt *mbus,
+ const struct imx_media_pixfmt *fmt)
+{
+ mbus->colorspace = (fmt->cs == IPUV3_COLORSPACE_RGB) ?
+ V4L2_COLORSPACE_SRGB : V4L2_COLORSPACE_SMPTE170M;
+ mbus->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(mbus->colorspace);
+ mbus->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(mbus->colorspace);
+ mbus->quantization =
+ V4L2_MAP_QUANTIZATION_DEFAULT(fmt->cs == IPUV3_COLORSPACE_RGB,
+ mbus->colorspace,
+ mbus->ycbcr_enc);
+}
+
+static const struct imx_media_pixfmt *find_format(u32 fourcc,
+ u32 code,
+ enum codespace_sel cs_sel,
+ bool allow_non_mbus,
+ bool allow_bayer)
+{
+ const struct imx_media_pixfmt *array, *fmt, *ret = NULL;
+ u32 array_size;
+ int i, j;
+
+ switch (cs_sel) {
+ case CS_SEL_YUV:
+ array_size = NUM_YUV_FORMATS;
+ array = yuv_formats;
+ break;
+ case CS_SEL_RGB:
+ array_size = NUM_RGB_FORMATS;
+ array = rgb_formats;
+ break;
+ case CS_SEL_ANY:
+ array_size = NUM_YUV_FORMATS + NUM_RGB_FORMATS;
+ array = yuv_formats;
+ break;
+ default:
+ return NULL;
+ }
+
+ for (i = 0; i < array_size; i++) {
+ if (cs_sel == CS_SEL_ANY && i >= NUM_YUV_FORMATS)
+ fmt = &rgb_formats[i - NUM_YUV_FORMATS];
+ else
+ fmt = &array[i];
+
+ if ((!allow_non_mbus && fmt->codes[0] == 0) ||
+ (!allow_bayer && fmt->bayer))
+ continue;
+
+ if (fourcc && fmt->fourcc == fourcc) {
+ ret = fmt;
+ goto out;
+ }
+
+ for (j = 0; code && fmt->codes[j]; j++) {
+ if (code == fmt->codes[j]) {
+ ret = fmt;
+ goto out;
+ }
+ }
+ }
+
+out:
+ return ret;
+}
+
+static int enum_format(u32 *fourcc, u32 *code, u32 index,
+ enum codespace_sel cs_sel,
+ bool allow_non_mbus,
+ bool allow_bayer)
+{
+ const struct imx_media_pixfmt *fmt;
+ u32 mbus_yuv_sz = NUM_MBUS_YUV_FORMATS;
+ u32 mbus_rgb_sz = NUM_MBUS_RGB_FORMATS;
+ u32 yuv_sz = NUM_YUV_FORMATS;
+ u32 rgb_sz = NUM_RGB_FORMATS;
+
+ switch (cs_sel) {
+ case CS_SEL_YUV:
+ if (index >= yuv_sz ||
+ (!allow_non_mbus && index >= mbus_yuv_sz))
+ return -EINVAL;
+ fmt = &yuv_formats[index];
+ break;
+ case CS_SEL_RGB:
+ if (index >= rgb_sz ||
+ (!allow_non_mbus && index >= mbus_rgb_sz))
+ return -EINVAL;
+ fmt = &rgb_formats[index];
+ if (!allow_bayer && fmt->bayer)
+ return -EINVAL;
+ break;
+ case CS_SEL_ANY:
+ if (!allow_non_mbus) {
+ if (index >= mbus_yuv_sz) {
+ index -= mbus_yuv_sz;
+ if (index >= mbus_rgb_sz)
+ return -EINVAL;
+ fmt = &rgb_formats[index];
+ if (!allow_bayer && fmt->bayer)
+ return -EINVAL;
+ } else {
+ fmt = &yuv_formats[index];
+ }
+ } else {
+ if (index >= yuv_sz + rgb_sz)
+ return -EINVAL;
+ if (index >= yuv_sz) {
+ fmt = &rgb_formats[index - yuv_sz];
+ if (!allow_bayer && fmt->bayer)
+ return -EINVAL;
+ } else {
+ fmt = &yuv_formats[index];
+ }
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (fourcc)
+ *fourcc = fmt->fourcc;
+ if (code)
+ *code = fmt->codes[0];
+
+ return 0;
+}
+
+const struct imx_media_pixfmt *
+imx_media_find_format(u32 fourcc, enum codespace_sel cs_sel, bool allow_bayer)
+{
+ return find_format(fourcc, 0, cs_sel, true, allow_bayer);
+}
+EXPORT_SYMBOL_GPL(imx_media_find_format);
+
+int imx_media_enum_format(u32 *fourcc, u32 index, enum codespace_sel cs_sel)
+{
+ return enum_format(fourcc, NULL, index, cs_sel, true, false);
+}
+EXPORT_SYMBOL_GPL(imx_media_enum_format);
+
+const struct imx_media_pixfmt *
+imx_media_find_mbus_format(u32 code, enum codespace_sel cs_sel,
+ bool allow_bayer)
+{
+ return find_format(0, code, cs_sel, false, allow_bayer);
+}
+EXPORT_SYMBOL_GPL(imx_media_find_mbus_format);
+
+int imx_media_enum_mbus_format(u32 *code, u32 index, enum codespace_sel cs_sel,
+ bool allow_bayer)
+{
+ return enum_format(NULL, code, index, cs_sel, false, allow_bayer);
+}
+EXPORT_SYMBOL_GPL(imx_media_enum_mbus_format);
+
+const struct imx_media_pixfmt *
+imx_media_find_ipu_format(u32 code, enum codespace_sel cs_sel)
+{
+ const struct imx_media_pixfmt *array, *fmt, *ret = NULL;
+ u32 array_size;
+ int i, j;
+
+ switch (cs_sel) {
+ case CS_SEL_YUV:
+ array_size = NUM_IPU_YUV_FORMATS;
+ array = ipu_yuv_formats;
+ break;
+ case CS_SEL_RGB:
+ array_size = NUM_IPU_RGB_FORMATS;
+ array = ipu_rgb_formats;
+ break;
+ case CS_SEL_ANY:
+ array_size = NUM_IPU_YUV_FORMATS + NUM_IPU_RGB_FORMATS;
+ array = ipu_yuv_formats;
+ break;
+ default:
+ return NULL;
+ }
+
+ for (i = 0; i < array_size; i++) {
+ if (cs_sel == CS_SEL_ANY && i >= NUM_IPU_YUV_FORMATS)
+ fmt = &ipu_rgb_formats[i - NUM_IPU_YUV_FORMATS];
+ else
+ fmt = &array[i];
+
+ for (j = 0; code && fmt->codes[j]; j++) {
+ if (code == fmt->codes[j]) {
+ ret = fmt;
+ goto out;
+ }
+ }
+ }
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(imx_media_find_ipu_format);
+
+int imx_media_enum_ipu_format(u32 *code, u32 index, enum codespace_sel cs_sel)
+{
+ switch (cs_sel) {
+ case CS_SEL_YUV:
+ if (index >= NUM_IPU_YUV_FORMATS)
+ return -EINVAL;
+ *code = ipu_yuv_formats[index].codes[0];
+ break;
+ case CS_SEL_RGB:
+ if (index >= NUM_IPU_RGB_FORMATS)
+ return -EINVAL;
+ *code = ipu_rgb_formats[index].codes[0];
+ break;
+ case CS_SEL_ANY:
+ if (index >= NUM_IPU_YUV_FORMATS + NUM_IPU_RGB_FORMATS)
+ return -EINVAL;
+ if (index >= NUM_IPU_YUV_FORMATS) {
+ index -= NUM_IPU_YUV_FORMATS;
+ *code = ipu_rgb_formats[index].codes[0];
+ } else {
+ *code = ipu_yuv_formats[index].codes[0];
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(imx_media_enum_ipu_format);
+
+int imx_media_init_mbus_fmt(struct v4l2_mbus_framefmt *mbus,
+ u32 width, u32 height, u32 code, u32 field,
+ const struct imx_media_pixfmt **cc)
+{
+ const struct imx_media_pixfmt *lcc;
+
+ mbus->width = width;
+ mbus->height = height;
+ mbus->field = field;
+ if (code == 0)
+ imx_media_enum_mbus_format(&code, 0, CS_SEL_YUV, false);
+ lcc = imx_media_find_mbus_format(code, CS_SEL_ANY, false);
+ if (!lcc) {
+ lcc = imx_media_find_ipu_format(code, CS_SEL_ANY);
+ if (!lcc)
+ return -EINVAL;
+ }
+
+ mbus->code = code;
+ init_mbus_colorimetry(mbus, lcc);
+ if (cc)
+ *cc = lcc;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(imx_media_init_mbus_fmt);
+
+/*
+ * Check whether the field and colorimetry parameters in tryfmt are
+ * uninitialized, and if so fill them with the values from fmt,
+ * or if tryfmt->colorspace has been initialized, all the default
+ * colorimetry params can be derived from tryfmt->colorspace.
+ *
+ * tryfmt->code must be set on entry.
+ *
+ * If this format is destined to be routed through the Image Converter,
+ * quantization and Y`CbCr encoding must be fixed. The IC expects and
+ * produces fixed quantization and Y`CbCr encoding at its input and output
+ * (full range for RGB, limited range for YUV, and V4L2_YCBCR_ENC_601).
+ */
+void imx_media_fill_default_mbus_fields(struct v4l2_mbus_framefmt *tryfmt,
+ struct v4l2_mbus_framefmt *fmt,
+ bool ic_route)
+{
+ const struct imx_media_pixfmt *cc;
+ bool is_rgb = false;
+
+ cc = imx_media_find_mbus_format(tryfmt->code, CS_SEL_ANY, true);
+ if (!cc)
+ cc = imx_media_find_ipu_format(tryfmt->code, CS_SEL_ANY);
+ if (cc && cc->cs != IPUV3_COLORSPACE_YUV)
+ is_rgb = true;
+
+ /* fill field if necessary */
+ if (tryfmt->field == V4L2_FIELD_ANY)
+ tryfmt->field = fmt->field;
+
+ /* fill colorimetry if necessary */
+ if (tryfmt->colorspace == V4L2_COLORSPACE_DEFAULT) {
+ tryfmt->colorspace = fmt->colorspace;
+ tryfmt->xfer_func = fmt->xfer_func;
+ tryfmt->ycbcr_enc = fmt->ycbcr_enc;
+ tryfmt->quantization = fmt->quantization;
+ } else {
+ if (tryfmt->xfer_func == V4L2_XFER_FUNC_DEFAULT) {
+ tryfmt->xfer_func =
+ V4L2_MAP_XFER_FUNC_DEFAULT(tryfmt->colorspace);
+ }
+ if (tryfmt->ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT) {
+ tryfmt->ycbcr_enc =
+ V4L2_MAP_YCBCR_ENC_DEFAULT(tryfmt->colorspace);
+ }
+ if (tryfmt->quantization == V4L2_QUANTIZATION_DEFAULT) {
+ tryfmt->quantization =
+ V4L2_MAP_QUANTIZATION_DEFAULT(
+ is_rgb, tryfmt->colorspace,
+ tryfmt->ycbcr_enc);
+ }
+ }
+
+ if (ic_route) {
+ tryfmt->quantization = is_rgb ?
+ V4L2_QUANTIZATION_FULL_RANGE :
+ V4L2_QUANTIZATION_LIM_RANGE;
+ tryfmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ }
+}
+EXPORT_SYMBOL_GPL(imx_media_fill_default_mbus_fields);
+
+int imx_media_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
+ struct v4l2_mbus_framefmt *mbus,
+ const struct imx_media_pixfmt *cc)
+{
+ u32 stride;
+
+ if (!cc) {
+ cc = imx_media_find_ipu_format(mbus->code, CS_SEL_ANY);
+ if (!cc)
+ cc = imx_media_find_mbus_format(mbus->code, CS_SEL_ANY,
+ true);
+ if (!cc)
+ return -EINVAL;
+ }
+
+ /*
+ * TODO: the IPU currently does not support the AYUV32 format,
+ * so until it does convert to a supported YUV format.
+ */
+ if (cc->ipufmt && cc->cs == IPUV3_COLORSPACE_YUV) {
+ u32 code;
+
+ imx_media_enum_mbus_format(&code, 0, CS_SEL_YUV, false);
+ cc = imx_media_find_mbus_format(code, CS_SEL_YUV, false);
+ }
+
+ stride = cc->planar ? mbus->width : (mbus->width * cc->bpp) >> 3;
+
+ pix->width = mbus->width;
+ pix->height = mbus->height;
+ pix->pixelformat = cc->fourcc;
+ pix->colorspace = mbus->colorspace;
+ pix->xfer_func = mbus->xfer_func;
+ pix->ycbcr_enc = mbus->ycbcr_enc;
+ pix->quantization = mbus->quantization;
+ pix->field = mbus->field;
+ pix->bytesperline = stride;
+ pix->sizeimage = (pix->width * pix->height * cc->bpp) >> 3;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(imx_media_mbus_fmt_to_pix_fmt);
+
+int imx_media_mbus_fmt_to_ipu_image(struct ipu_image *image,
+ struct v4l2_mbus_framefmt *mbus)
+{
+ int ret;
+
+ memset(image, 0, sizeof(*image));
+
+ ret = imx_media_mbus_fmt_to_pix_fmt(&image->pix, mbus, NULL);
+ if (ret)
+ return ret;
+
+ image->rect.width = mbus->width;
+ image->rect.height = mbus->height;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(imx_media_mbus_fmt_to_ipu_image);
+
+int imx_media_ipu_image_to_mbus_fmt(struct v4l2_mbus_framefmt *mbus,
+ struct ipu_image *image)
+{
+ const struct imx_media_pixfmt *fmt;
+
+ fmt = imx_media_find_format(image->pix.pixelformat, CS_SEL_ANY, true);
+ if (!fmt)
+ return -EINVAL;
+
+ memset(mbus, 0, sizeof(*mbus));
+ mbus->width = image->pix.width;
+ mbus->height = image->pix.height;
+ mbus->code = fmt->codes[0];
+ mbus->field = image->pix.field;
+ mbus->colorspace = image->pix.colorspace;
+ mbus->xfer_func = image->pix.xfer_func;
+ mbus->ycbcr_enc = image->pix.ycbcr_enc;
+ mbus->quantization = image->pix.quantization;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(imx_media_ipu_image_to_mbus_fmt);
+
+void imx_media_free_dma_buf(struct imx_media_dev *imxmd,
+ struct imx_media_dma_buf *buf)
+{
+ if (buf->virt)
+ dma_free_coherent(imxmd->md.dev, buf->len,
+ buf->virt, buf->phys);
+
+ buf->virt = NULL;
+ buf->phys = 0;
+}
+EXPORT_SYMBOL_GPL(imx_media_free_dma_buf);
+
+int imx_media_alloc_dma_buf(struct imx_media_dev *imxmd,
+ struct imx_media_dma_buf *buf,
+ int size)
+{
+ imx_media_free_dma_buf(imxmd, buf);
+
+ buf->len = PAGE_ALIGN(size);
+ buf->virt = dma_alloc_coherent(imxmd->md.dev, buf->len, &buf->phys,
+ GFP_DMA | GFP_KERNEL);
+ if (!buf->virt) {
+ dev_err(imxmd->md.dev, "failed to alloc dma buffer\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(imx_media_alloc_dma_buf);
+
+/* form a subdev name given a group id and ipu id */
+void imx_media_grp_id_to_sd_name(char *sd_name, int sz, u32 grp_id, int ipu_id)
+{
+ int id;
+
+ switch (grp_id) {
+ case IMX_MEDIA_GRP_ID_CSI0...IMX_MEDIA_GRP_ID_CSI1:
+ id = (grp_id >> IMX_MEDIA_GRP_ID_CSI_BIT) - 1;
+ snprintf(sd_name, sz, "ipu%d_csi%d", ipu_id + 1, id);
+ break;
+ case IMX_MEDIA_GRP_ID_VDIC:
+ snprintf(sd_name, sz, "ipu%d_vdic", ipu_id + 1);
+ break;
+ case IMX_MEDIA_GRP_ID_IC_PRP:
+ snprintf(sd_name, sz, "ipu%d_ic_prp", ipu_id + 1);
+ break;
+ case IMX_MEDIA_GRP_ID_IC_PRPENC:
+ snprintf(sd_name, sz, "ipu%d_ic_prpenc", ipu_id + 1);
+ break;
+ case IMX_MEDIA_GRP_ID_IC_PRPVF:
+ snprintf(sd_name, sz, "ipu%d_ic_prpvf", ipu_id + 1);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(imx_media_grp_id_to_sd_name);
+
+struct imx_media_subdev *
+imx_media_find_subdev_by_sd(struct imx_media_dev *imxmd,
+ struct v4l2_subdev *sd)
+{
+ struct imx_media_subdev *imxsd;
+ int i;
+
+ for (i = 0; i < imxmd->num_subdevs; i++) {
+ imxsd = &imxmd->subdev[i];
+ if (sd == imxsd->sd)
+ return imxsd;
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(imx_media_find_subdev_by_sd);
+
+struct imx_media_subdev *
+imx_media_find_subdev_by_id(struct imx_media_dev *imxmd, u32 grp_id)
+{
+ struct imx_media_subdev *imxsd;
+ int i;
+
+ for (i = 0; i < imxmd->num_subdevs; i++) {
+ imxsd = &imxmd->subdev[i];
+ if (imxsd->sd && imxsd->sd->grp_id == grp_id)
+ return imxsd;
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+EXPORT_SYMBOL_GPL(imx_media_find_subdev_by_id);
+
+/*
+ * Adds a video device to the master video device list. This is called by
+ * an async subdev that owns a video device when it is registered.
+ */
+int imx_media_add_video_device(struct imx_media_dev *imxmd,
+ struct imx_media_video_dev *vdev)
+{
+ int vdev_idx, ret = 0;
+
+ mutex_lock(&imxmd->mutex);
+
+ vdev_idx = imxmd->num_vdevs;
+ if (vdev_idx >= IMX_MEDIA_MAX_VDEVS) {
+ dev_err(imxmd->md.dev,
+ "%s: too many video devices! can't add %s\n",
+ __func__, vdev->vfd->name);
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ imxmd->vdev[vdev_idx] = vdev;
+ imxmd->num_vdevs++;
+out:
+ mutex_unlock(&imxmd->mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(imx_media_add_video_device);
+
+/*
+ * Search upstream or downstream for a subdevice in the current pipeline
+ * with given grp_id, starting from start_entity. Returns the subdev's
+ * source/sink pad that it was reached from. Must be called with
+ * mdev->graph_mutex held.
+ */
+static struct media_pad *
+find_pipeline_pad(struct imx_media_dev *imxmd,
+ struct media_entity *start_entity,
+ u32 grp_id, bool upstream)
+{
+ struct media_entity *me = start_entity;
+ struct media_pad *pad = NULL;
+ struct v4l2_subdev *sd;
+ int i;
+
+ for (i = 0; i < me->num_pads; i++) {
+ struct media_pad *spad = &me->pads[i];
+
+ if ((upstream && !(spad->flags & MEDIA_PAD_FL_SINK)) ||
+ (!upstream && !(spad->flags & MEDIA_PAD_FL_SOURCE)))
+ continue;
+
+ pad = media_entity_remote_pad(spad);
+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
+ continue;
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+ if (sd->grp_id & grp_id)
+ return pad;
+
+ return find_pipeline_pad(imxmd, pad->entity, grp_id, upstream);
+ }
+
+ return NULL;
+}
+
+/*
+ * Search upstream for a subdev in the current pipeline with
+ * given grp_id. Must be called with mdev->graph_mutex held.
+ */
+static struct v4l2_subdev *
+find_upstream_subdev(struct imx_media_dev *imxmd,
+ struct media_entity *start_entity,
+ u32 grp_id)
+{
+ struct v4l2_subdev *sd;
+ struct media_pad *pad;
+
+ if (is_media_entity_v4l2_subdev(start_entity)) {
+ sd = media_entity_to_v4l2_subdev(start_entity);
+ if (sd->grp_id & grp_id)
+ return sd;
+ }
+
+ pad = find_pipeline_pad(imxmd, start_entity, grp_id, true);
+
+ return pad ? media_entity_to_v4l2_subdev(pad->entity) : NULL;
+}
+
+
+/*
+ * Find the upstream mipi-csi2 virtual channel reached from the given
+ * start entity in the current pipeline.
+ * Must be called with mdev->graph_mutex held.
+ */
+int imx_media_find_mipi_csi2_channel(struct imx_media_dev *imxmd,
+ struct media_entity *start_entity)
+{
+ struct media_pad *pad;
+ int ret = -EPIPE;
+
+ pad = find_pipeline_pad(imxmd, start_entity, IMX_MEDIA_GRP_ID_CSI2,
+ true);
+ if (pad) {
+ ret = pad->index - 1;
+ dev_dbg(imxmd->md.dev, "found vc%d from %s\n",
+ ret, start_entity->name);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(imx_media_find_mipi_csi2_channel);
+
+/*
+ * Find a subdev reached upstream from the given start entity in
+ * the current pipeline.
+ * Must be called with mdev->graph_mutex held.
+ */
+struct imx_media_subdev *
+imx_media_find_upstream_subdev(struct imx_media_dev *imxmd,
+ struct media_entity *start_entity,
+ u32 grp_id)
+{
+ struct v4l2_subdev *sd;
+
+ sd = find_upstream_subdev(imxmd, start_entity, grp_id);
+ if (!sd)
+ return ERR_PTR(-ENODEV);
+
+ return imx_media_find_subdev_by_sd(imxmd, sd);
+}
+EXPORT_SYMBOL_GPL(imx_media_find_upstream_subdev);
+
+struct imx_media_subdev *
+__imx_media_find_sensor(struct imx_media_dev *imxmd,
+ struct media_entity *start_entity)
+{
+ return imx_media_find_upstream_subdev(imxmd, start_entity,
+ IMX_MEDIA_GRP_ID_SENSOR);
+}
+EXPORT_SYMBOL_GPL(__imx_media_find_sensor);
+
+struct imx_media_subdev *
+imx_media_find_sensor(struct imx_media_dev *imxmd,
+ struct media_entity *start_entity)
+{
+ struct imx_media_subdev *sensor;
+
+ mutex_lock(&imxmd->md.graph_mutex);
+ sensor = __imx_media_find_sensor(imxmd, start_entity);
+ mutex_unlock(&imxmd->md.graph_mutex);
+
+ return sensor;
+}
+EXPORT_SYMBOL_GPL(imx_media_find_sensor);
+
+/*
+ * Turn current pipeline streaming on/off starting from entity.
+ */
+int imx_media_pipeline_set_stream(struct imx_media_dev *imxmd,
+ struct media_entity *entity,
+ bool on)
+{
+ struct v4l2_subdev *sd;
+ int ret = 0;
+
+ if (!is_media_entity_v4l2_subdev(entity))
+ return -EINVAL;
+ sd = media_entity_to_v4l2_subdev(entity);
+
+ mutex_lock(&imxmd->md.graph_mutex);
+
+ if (on) {
+ ret = __media_pipeline_start(entity, &imxmd->pipe);
+ if (ret)
+ goto out;
+ ret = v4l2_subdev_call(sd, video, s_stream, 1);
+ if (ret)
+ __media_pipeline_stop(entity);
+ } else {
+ v4l2_subdev_call(sd, video, s_stream, 0);
+ if (entity->pipe)
+ __media_pipeline_stop(entity);
+ }
+
+out:
+ mutex_unlock(&imxmd->md.graph_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(imx_media_pipeline_set_stream);
+
+MODULE_DESCRIPTION("i.MX5/6 v4l2 media controller driver");
+MODULE_AUTHOR("Steve Longerbeam <[email protected]>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/imx/imx-media-vdic.c b/drivers/staging/media/imx/imx-media-vdic.c
new file mode 100644
index 000000000000..7eabdc4aa79f
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media-vdic.c
@@ -0,0 +1,1009 @@
+/*
+ * V4L2 Deinterlacer Subdev for Freescale i.MX5/6 SOC
+ *
+ * Copyright (c) 2017 Mentor Graphics Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+#include <media/imx.h>
+#include "imx-media.h"
+
+/*
+ * This subdev implements two different video pipelines:
+ *
+ * CSI -> VDIC
+ *
+ * In this pipeline, the CSI sends a single interlaced field F(n-1)
+ * directly to the VDIC (and optionally the following field F(n)
+ * can be sent to memory via IDMAC channel 13). This pipeline only works
+ * in VDIC's high motion mode, which only requires a single field for
+ * processing. The other motion modes (low and medium) require three
+ * fields, so this pipeline does not work in those modes. Also, it is
+ * not clear how this pipeline can deal with the various field orders
+ * (sequential BT/TB, interlaced BT/TB).
+ *
+ * MEM -> CH8,9,10 -> VDIC
+ *
+ * In this pipeline, previous field F(n-1), current field F(n), and next
+ * field F(n+1) are transferred to the VDIC via IDMAC channels 8,9,10.
+ * These memory buffers can come from a video output or mem2mem device.
+ * All motion modes are supported by this pipeline.
+ *
+ * The "direct" CSI->VDIC pipeline requires no DMA, but it can only be
+ * used in high motion mode.
+ */
+
+struct vdic_priv;
+
+struct vdic_pipeline_ops {
+ int (*setup)(struct vdic_priv *priv);
+ void (*start)(struct vdic_priv *priv);
+ void (*stop)(struct vdic_priv *priv);
+ void (*disable)(struct vdic_priv *priv);
+};
+
+/*
+ * Min/Max supported width and heights.
+ */
+#define MIN_W 176
+#define MIN_H 144
+#define MAX_W_VDIC 968
+#define MAX_H_VDIC 2048
+#define W_ALIGN 4 /* multiple of 16 pixels */
+#define H_ALIGN 1 /* multiple of 2 lines */
+#define S_ALIGN 1 /* multiple of 2 */
+
+struct vdic_priv {
+ struct device *dev;
+ struct ipu_soc *ipu;
+ struct imx_media_dev *md;
+ struct v4l2_subdev sd;
+ struct media_pad pad[VDIC_NUM_PADS];
+ int ipu_id;
+
+ /* lock to protect all members below */
+ struct mutex lock;
+
+ /* IPU units we require */
+ struct ipu_vdi *vdi;
+
+ int active_input_pad;
+
+ struct ipuv3_channel *vdi_in_ch_p; /* F(n-1) transfer channel */
+ struct ipuv3_channel *vdi_in_ch; /* F(n) transfer channel */
+ struct ipuv3_channel *vdi_in_ch_n; /* F(n+1) transfer channel */
+
+ /* pipeline operations */
+ struct vdic_pipeline_ops *ops;
+
+ /* current and previous input buffers indirect path */
+ struct imx_media_buffer *curr_in_buf;
+ struct imx_media_buffer *prev_in_buf;
+
+ /*
+ * translated field type, input line stride, and field size
+ * for indirect path
+ */
+ u32 fieldtype;
+ u32 in_stride;
+ u32 field_size;
+
+ /* the source (a video device or subdev) */
+ struct media_entity *src;
+ /* the sink that will receive the progressive out buffers */
+ struct v4l2_subdev *sink_sd;
+
+ struct v4l2_mbus_framefmt format_mbus[VDIC_NUM_PADS];
+ const struct imx_media_pixfmt *cc[VDIC_NUM_PADS];
+ struct v4l2_fract frame_interval[VDIC_NUM_PADS];
+
+ /* the video device at IDMAC input pad */
+ struct imx_media_video_dev *vdev;
+
+ bool csi_direct; /* using direct CSI->VDIC->IC pipeline */
+
+ /* motion select control */
+ struct v4l2_ctrl_handler ctrl_hdlr;
+ enum ipu_motion_sel motion;
+
+ int stream_count;
+};
+
+static void vdic_put_ipu_resources(struct vdic_priv *priv)
+{
+ if (!IS_ERR_OR_NULL(priv->vdi_in_ch_p))
+ ipu_idmac_put(priv->vdi_in_ch_p);
+ priv->vdi_in_ch_p = NULL;
+
+ if (!IS_ERR_OR_NULL(priv->vdi_in_ch))
+ ipu_idmac_put(priv->vdi_in_ch);
+ priv->vdi_in_ch = NULL;
+
+ if (!IS_ERR_OR_NULL(priv->vdi_in_ch_n))
+ ipu_idmac_put(priv->vdi_in_ch_n);
+ priv->vdi_in_ch_n = NULL;
+
+ if (!IS_ERR_OR_NULL(priv->vdi))
+ ipu_vdi_put(priv->vdi);
+ priv->vdi = NULL;
+}
+
+static int vdic_get_ipu_resources(struct vdic_priv *priv)
+{
+ int ret, err_chan;
+
+ priv->ipu = priv->md->ipu[priv->ipu_id];
+
+ priv->vdi = ipu_vdi_get(priv->ipu);
+ if (IS_ERR(priv->vdi)) {
+ v4l2_err(&priv->sd, "failed to get VDIC\n");
+ ret = PTR_ERR(priv->vdi);
+ goto out;
+ }
+
+ if (!priv->csi_direct) {
+ priv->vdi_in_ch_p = ipu_idmac_get(priv->ipu,
+ IPUV3_CHANNEL_MEM_VDI_PREV);
+ if (IS_ERR(priv->vdi_in_ch_p)) {
+ err_chan = IPUV3_CHANNEL_MEM_VDI_PREV;
+ ret = PTR_ERR(priv->vdi_in_ch_p);
+ goto out_err_chan;
+ }
+
+ priv->vdi_in_ch = ipu_idmac_get(priv->ipu,
+ IPUV3_CHANNEL_MEM_VDI_CUR);
+ if (IS_ERR(priv->vdi_in_ch)) {
+ err_chan = IPUV3_CHANNEL_MEM_VDI_CUR;
+ ret = PTR_ERR(priv->vdi_in_ch);
+ goto out_err_chan;
+ }
+
+ priv->vdi_in_ch_n = ipu_idmac_get(priv->ipu,
+ IPUV3_CHANNEL_MEM_VDI_NEXT);
+ if (IS_ERR(priv->vdi_in_ch_n)) {
+ err_chan = IPUV3_CHANNEL_MEM_VDI_NEXT;
+ ret = PTR_ERR(priv->vdi_in_ch_n);
+ goto out_err_chan;
+ }
+ }
+
+ return 0;
+
+out_err_chan:
+ v4l2_err(&priv->sd, "could not get IDMAC channel %u\n", err_chan);
+out:
+ vdic_put_ipu_resources(priv);
+ return ret;
+}
+
+/*
+ * This function is currently unused, but will be called when the
+ * output/mem2mem device at the IDMAC input pad sends us a new
+ * buffer. It kicks off the IDMAC read channels to bring in the
+ * buffer fields from memory and begin the conversions.
+ */
+static void __maybe_unused prepare_vdi_in_buffers(struct vdic_priv *priv,
+ struct imx_media_buffer *curr)
+{
+ dma_addr_t prev_phys, curr_phys, next_phys;
+ struct imx_media_buffer *prev;
+ struct vb2_buffer *curr_vb, *prev_vb;
+ u32 fs = priv->field_size;
+ u32 is = priv->in_stride;
+
+ /* current input buffer is now previous */
+ priv->prev_in_buf = priv->curr_in_buf;
+ priv->curr_in_buf = curr;
+ prev = priv->prev_in_buf ? priv->prev_in_buf : curr;
+
+ prev_vb = &prev->vbuf.vb2_buf;
+ curr_vb = &curr->vbuf.vb2_buf;
+
+ switch (priv->fieldtype) {
+ case V4L2_FIELD_SEQ_TB:
+ prev_phys = vb2_dma_contig_plane_dma_addr(prev_vb, 0);
+ curr_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0) + fs;
+ next_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0);
+ break;
+ case V4L2_FIELD_SEQ_BT:
+ prev_phys = vb2_dma_contig_plane_dma_addr(prev_vb, 0) + fs;
+ curr_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0);
+ next_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0) + fs;
+ break;
+ case V4L2_FIELD_INTERLACED_BT:
+ prev_phys = vb2_dma_contig_plane_dma_addr(prev_vb, 0) + is;
+ curr_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0);
+ next_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0) + is;
+ break;
+ default:
+ /* assume V4L2_FIELD_INTERLACED_TB */
+ prev_phys = vb2_dma_contig_plane_dma_addr(prev_vb, 0);
+ curr_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0) + is;
+ next_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0);
+ break;
+ }
+
+ ipu_cpmem_set_buffer(priv->vdi_in_ch_p, 0, prev_phys);
+ ipu_cpmem_set_buffer(priv->vdi_in_ch, 0, curr_phys);
+ ipu_cpmem_set_buffer(priv->vdi_in_ch_n, 0, next_phys);
+
+ ipu_idmac_select_buffer(priv->vdi_in_ch_p, 0);
+ ipu_idmac_select_buffer(priv->vdi_in_ch, 0);
+ ipu_idmac_select_buffer(priv->vdi_in_ch_n, 0);
+}
+
+static int setup_vdi_channel(struct vdic_priv *priv,
+ struct ipuv3_channel *channel,
+ dma_addr_t phys0, dma_addr_t phys1)
+{
+ struct imx_media_video_dev *vdev = priv->vdev;
+ unsigned int burst_size;
+ struct ipu_image image;
+ int ret;
+
+ ipu_cpmem_zero(channel);
+
+ memset(&image, 0, sizeof(image));
+ image.pix = vdev->fmt.fmt.pix;
+ /* one field to VDIC channels */
+ image.pix.height /= 2;
+ image.rect.width = image.pix.width;
+ image.rect.height = image.pix.height;
+ image.phys0 = phys0;
+ image.phys1 = phys1;
+
+ ret = ipu_cpmem_set_image(channel, &image);
+ if (ret)
+ return ret;
+
+ burst_size = (image.pix.width & 0xf) ? 8 : 16;
+ ipu_cpmem_set_burstsize(channel, burst_size);
+
+ ipu_cpmem_set_axi_id(channel, 1);
+
+ ipu_idmac_set_double_buffer(channel, false);
+
+ return 0;
+}
+
+static int vdic_setup_direct(struct vdic_priv *priv)
+{
+ /* set VDIC to receive from CSI for direct path */
+ ipu_fsu_link(priv->ipu, IPUV3_CHANNEL_CSI_DIRECT,
+ IPUV3_CHANNEL_CSI_VDI_PREV);
+
+ return 0;
+}
+
+static void vdic_start_direct(struct vdic_priv *priv)
+{
+}
+
+static void vdic_stop_direct(struct vdic_priv *priv)
+{
+}
+
+static void vdic_disable_direct(struct vdic_priv *priv)
+{
+ ipu_fsu_unlink(priv->ipu, IPUV3_CHANNEL_CSI_DIRECT,
+ IPUV3_CHANNEL_CSI_VDI_PREV);
+}
+
+static int vdic_setup_indirect(struct vdic_priv *priv)
+{
+ struct v4l2_mbus_framefmt *infmt;
+ const struct imx_media_pixfmt *incc;
+ int in_size, ret;
+
+ infmt = &priv->format_mbus[VDIC_SINK_PAD_IDMAC];
+ incc = priv->cc[VDIC_SINK_PAD_IDMAC];
+
+ in_size = (infmt->width * incc->bpp * infmt->height) >> 3;
+
+ /* 1/2 full image size */
+ priv->field_size = in_size / 2;
+ priv->in_stride = incc->planar ?
+ infmt->width : (infmt->width * incc->bpp) >> 3;
+
+ priv->prev_in_buf = NULL;
+ priv->curr_in_buf = NULL;
+
+ priv->fieldtype = infmt->field;
+
+ /* init the vdi-in channels */
+ ret = setup_vdi_channel(priv, priv->vdi_in_ch_p, 0, 0);
+ if (ret)
+ return ret;
+ ret = setup_vdi_channel(priv, priv->vdi_in_ch, 0, 0);
+ if (ret)
+ return ret;
+ return setup_vdi_channel(priv, priv->vdi_in_ch_n, 0, 0);
+}
+
+static void vdic_start_indirect(struct vdic_priv *priv)
+{
+ /* enable the channels */
+ ipu_idmac_enable_channel(priv->vdi_in_ch_p);
+ ipu_idmac_enable_channel(priv->vdi_in_ch);
+ ipu_idmac_enable_channel(priv->vdi_in_ch_n);
+}
+
+static void vdic_stop_indirect(struct vdic_priv *priv)
+{
+ /* disable channels */
+ ipu_idmac_disable_channel(priv->vdi_in_ch_p);
+ ipu_idmac_disable_channel(priv->vdi_in_ch);
+ ipu_idmac_disable_channel(priv->vdi_in_ch_n);
+}
+
+static void vdic_disable_indirect(struct vdic_priv *priv)
+{
+}
+
+static struct vdic_pipeline_ops direct_ops = {
+ .setup = vdic_setup_direct,
+ .start = vdic_start_direct,
+ .stop = vdic_stop_direct,
+ .disable = vdic_disable_direct,
+};
+
+static struct vdic_pipeline_ops indirect_ops = {
+ .setup = vdic_setup_indirect,
+ .start = vdic_start_indirect,
+ .stop = vdic_stop_indirect,
+ .disable = vdic_disable_indirect,
+};
+
+static int vdic_start(struct vdic_priv *priv)
+{
+ struct v4l2_mbus_framefmt *infmt;
+ int ret;
+
+ infmt = &priv->format_mbus[priv->active_input_pad];
+
+ priv->ops = priv->csi_direct ? &direct_ops : &indirect_ops;
+
+ ret = vdic_get_ipu_resources(priv);
+ if (ret)
+ return ret;
+
+ /*
+ * init the VDIC.
+ *
+ * note we don't give infmt->code to ipu_vdi_setup(). The VDIC
+ * only supports 4:2:2 or 4:2:0, and this subdev will only
+ * negotiate 4:2:2 at its sink pads.
+ */
+ ipu_vdi_setup(priv->vdi, MEDIA_BUS_FMT_UYVY8_2X8,
+ infmt->width, infmt->height);
+ ipu_vdi_set_field_order(priv->vdi, V4L2_STD_UNKNOWN, infmt->field);
+ ipu_vdi_set_motion(priv->vdi, priv->motion);
+
+ ret = priv->ops->setup(priv);
+ if (ret)
+ goto out_put_ipu;
+
+ ipu_vdi_enable(priv->vdi);
+
+ priv->ops->start(priv);
+
+ return 0;
+
+out_put_ipu:
+ vdic_put_ipu_resources(priv);
+ return ret;
+}
+
+static void vdic_stop(struct vdic_priv *priv)
+{
+ priv->ops->stop(priv);
+ ipu_vdi_disable(priv->vdi);
+ priv->ops->disable(priv);
+
+ vdic_put_ipu_resources(priv);
+}
+
+/*
+ * V4L2 subdev operations.
+ */
+
+static int vdic_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vdic_priv *priv = container_of(ctrl->handler,
+ struct vdic_priv, ctrl_hdlr);
+ enum ipu_motion_sel motion;
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ switch (ctrl->id) {
+ case V4L2_CID_DEINTERLACING_MODE:
+ motion = ctrl->val;
+ if (motion != priv->motion) {
+ /* can't change motion control mid-streaming */
+ if (priv->stream_count > 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+ priv->motion = motion;
+ }
+ break;
+ default:
+ v4l2_err(&priv->sd, "Invalid control\n");
+ ret = -EINVAL;
+ }
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops vdic_ctrl_ops = {
+ .s_ctrl = vdic_s_ctrl,
+};
+
+static const char * const vdic_ctrl_motion_menu[] = {
+ "No Motion Compensation",
+ "Low Motion",
+ "Medium Motion",
+ "High Motion",
+};
+
+static int vdic_init_controls(struct vdic_priv *priv)
+{
+ struct v4l2_ctrl_handler *hdlr = &priv->ctrl_hdlr;
+ int ret;
+
+ v4l2_ctrl_handler_init(hdlr, 1);
+
+ v4l2_ctrl_new_std_menu_items(hdlr, &vdic_ctrl_ops,
+ V4L2_CID_DEINTERLACING_MODE,
+ HIGH_MOTION, 0, HIGH_MOTION,
+ vdic_ctrl_motion_menu);
+
+ priv->sd.ctrl_handler = hdlr;
+
+ if (hdlr->error) {
+ ret = hdlr->error;
+ goto out_free;
+ }
+
+ v4l2_ctrl_handler_setup(hdlr);
+ return 0;
+
+out_free:
+ v4l2_ctrl_handler_free(hdlr);
+ return ret;
+}
+
+static int vdic_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev *src_sd = NULL;
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ if (!priv->src || !priv->sink_sd) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ if (priv->csi_direct)
+ src_sd = media_entity_to_v4l2_subdev(priv->src);
+
+ /*
+ * enable/disable streaming only if stream_count is
+ * going from 0 to 1 / 1 to 0.
+ */
+ if (priv->stream_count != !enable)
+ goto update_count;
+
+ dev_dbg(priv->dev, "stream %s\n", enable ? "ON" : "OFF");
+
+ if (enable)
+ ret = vdic_start(priv);
+ else
+ vdic_stop(priv);
+ if (ret)
+ goto out;
+
+ if (src_sd) {
+ /* start/stop upstream */
+ ret = v4l2_subdev_call(src_sd, video, s_stream, enable);
+ ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
+ if (ret) {
+ if (enable)
+ vdic_stop(priv);
+ goto out;
+ }
+ }
+
+update_count:
+ priv->stream_count += enable ? 1 : -1;
+ if (priv->stream_count < 0)
+ priv->stream_count = 0;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+__vdic_get_fmt(struct vdic_priv *priv, struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&priv->sd, cfg, pad);
+ else
+ return &priv->format_mbus[pad];
+}
+
+static int vdic_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->pad >= VDIC_NUM_PADS)
+ return -EINVAL;
+
+ return imx_media_enum_ipu_format(&code->code, code->index, CS_SEL_YUV);
+}
+
+static int vdic_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *fmt;
+ int ret = 0;
+
+ if (sdformat->pad >= VDIC_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ fmt = __vdic_get_fmt(priv, cfg, sdformat->pad, sdformat->which);
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ sdformat->format = *fmt;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static void vdic_try_fmt(struct vdic_priv *priv,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat,
+ const struct imx_media_pixfmt **cc)
+{
+ struct v4l2_mbus_framefmt *infmt;
+
+ *cc = imx_media_find_ipu_format(sdformat->format.code, CS_SEL_YUV);
+ if (!*cc) {
+ u32 code;
+
+ imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
+ *cc = imx_media_find_ipu_format(code, CS_SEL_YUV);
+ sdformat->format.code = (*cc)->codes[0];
+ }
+
+ infmt = __vdic_get_fmt(priv, cfg, priv->active_input_pad,
+ sdformat->which);
+
+ switch (sdformat->pad) {
+ case VDIC_SRC_PAD_DIRECT:
+ sdformat->format = *infmt;
+ /* output is always progressive! */
+ sdformat->format.field = V4L2_FIELD_NONE;
+ break;
+ case VDIC_SINK_PAD_DIRECT:
+ case VDIC_SINK_PAD_IDMAC:
+ v4l_bound_align_image(&sdformat->format.width,
+ MIN_W, MAX_W_VDIC, W_ALIGN,
+ &sdformat->format.height,
+ MIN_H, MAX_H_VDIC, H_ALIGN, S_ALIGN);
+
+ imx_media_fill_default_mbus_fields(&sdformat->format, infmt,
+ true);
+
+ /* input must be interlaced! Choose SEQ_TB if not */
+ if (!V4L2_FIELD_HAS_BOTH(sdformat->format.field))
+ sdformat->format.field = V4L2_FIELD_SEQ_TB;
+ break;
+ }
+}
+
+static int vdic_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+ const struct imx_media_pixfmt *cc;
+ struct v4l2_mbus_framefmt *fmt;
+ int ret = 0;
+
+ if (sdformat->pad >= VDIC_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ if (priv->stream_count > 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ vdic_try_fmt(priv, cfg, sdformat, &cc);
+
+ fmt = __vdic_get_fmt(priv, cfg, sdformat->pad, sdformat->which);
+ *fmt = sdformat->format;
+
+ /* propagate format to source pad */
+ if (sdformat->pad == VDIC_SINK_PAD_DIRECT ||
+ sdformat->pad == VDIC_SINK_PAD_IDMAC) {
+ const struct imx_media_pixfmt *outcc;
+ struct v4l2_mbus_framefmt *outfmt;
+ struct v4l2_subdev_format format;
+
+ format.pad = VDIC_SRC_PAD_DIRECT;
+ format.which = sdformat->which;
+ format.format = sdformat->format;
+ vdic_try_fmt(priv, cfg, &format, &outcc);
+
+ outfmt = __vdic_get_fmt(priv, cfg, VDIC_SRC_PAD_DIRECT,
+ sdformat->which);
+ *outfmt = format.format;
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ priv->cc[VDIC_SRC_PAD_DIRECT] = outcc;
+ }
+
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ priv->cc[sdformat->pad] = cc;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int vdic_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_subdev *remote_sd;
+ int ret = 0;
+
+ dev_dbg(priv->dev, "link setup %s -> %s", remote->entity->name,
+ local->entity->name);
+
+ mutex_lock(&priv->lock);
+
+ if (local->flags & MEDIA_PAD_FL_SOURCE) {
+ if (!is_media_entity_v4l2_subdev(remote->entity)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ remote_sd = media_entity_to_v4l2_subdev(remote->entity);
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (priv->sink_sd) {
+ ret = -EBUSY;
+ goto out;
+ }
+ priv->sink_sd = remote_sd;
+ } else {
+ priv->sink_sd = NULL;
+ }
+
+ goto out;
+ }
+
+ /* this is a sink pad */
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (priv->src) {
+ ret = -EBUSY;
+ goto out;
+ }
+ } else {
+ priv->src = NULL;
+ goto out;
+ }
+
+ if (local->index == VDIC_SINK_PAD_IDMAC) {
+ struct imx_media_video_dev *vdev = priv->vdev;
+
+ if (!is_media_entity_v4l2_video_device(remote->entity)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!vdev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ priv->csi_direct = false;
+ } else {
+ if (!is_media_entity_v4l2_subdev(remote->entity)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ remote_sd = media_entity_to_v4l2_subdev(remote->entity);
+
+ /* direct pad must connect to a CSI */
+ if (!(remote_sd->grp_id & IMX_MEDIA_GRP_ID_CSI) ||
+ remote->index != CSI_SRC_PAD_DIRECT) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ priv->csi_direct = true;
+ }
+
+ priv->src = remote->entity;
+ /* record which input pad is now active */
+ priv->active_input_pad = local->index;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int vdic_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+ int ret;
+
+ ret = v4l2_subdev_link_validate_default(sd, link,
+ source_fmt, sink_fmt);
+ if (ret)
+ return ret;
+
+ mutex_lock(&priv->lock);
+
+ if (priv->csi_direct && priv->motion != HIGH_MOTION) {
+ v4l2_err(&priv->sd,
+ "direct CSI pipeline requires high motion\n");
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int vdic_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+
+ if (fi->pad >= VDIC_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
+
+ fi->interval = priv->frame_interval[fi->pad];
+
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int vdic_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+ struct v4l2_fract *input_fi, *output_fi;
+ int ret = 0;
+
+ mutex_lock(&priv->lock);
+
+ input_fi = &priv->frame_interval[priv->active_input_pad];
+ output_fi = &priv->frame_interval[VDIC_SRC_PAD_DIRECT];
+
+ switch (fi->pad) {
+ case VDIC_SINK_PAD_DIRECT:
+ case VDIC_SINK_PAD_IDMAC:
+ /* No limits on input frame interval */
+ /* Reset output interval */
+ *output_fi = fi->interval;
+ if (priv->csi_direct)
+ output_fi->denominator *= 2;
+ break;
+ case VDIC_SRC_PAD_DIRECT:
+ /*
+ * frame rate at output pad is double input
+ * rate when using direct CSI->VDIC pipeline.
+ *
+ * TODO: implement VDIC frame skipping
+ */
+ fi->interval = *input_fi;
+ if (priv->csi_direct)
+ fi->interval.denominator *= 2;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ priv->frame_interval[fi->pad] = fi->interval;
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+/*
+ * retrieve our pads parsed from the OF graph by the media device
+ */
+static int vdic_registered(struct v4l2_subdev *sd)
+{
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+ int i, ret;
+ u32 code;
+
+ /* get media device */
+ priv->md = dev_get_drvdata(sd->v4l2_dev->dev);
+
+ for (i = 0; i < VDIC_NUM_PADS; i++) {
+ priv->pad[i].flags = (i == VDIC_SRC_PAD_DIRECT) ?
+ MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
+
+ code = 0;
+ if (i != VDIC_SINK_PAD_IDMAC)
+ imx_media_enum_ipu_format(&code, 0, CS_SEL_YUV);
+
+ /* set a default mbus format */
+ ret = imx_media_init_mbus_fmt(&priv->format_mbus[i],
+ 640, 480, code, V4L2_FIELD_NONE,
+ &priv->cc[i]);
+ if (ret)
+ return ret;
+
+ /* init default frame interval */
+ priv->frame_interval[i].numerator = 1;
+ priv->frame_interval[i].denominator = 30;
+ if (i == VDIC_SRC_PAD_DIRECT)
+ priv->frame_interval[i].denominator *= 2;
+ }
+
+ priv->active_input_pad = VDIC_SINK_PAD_DIRECT;
+
+ ret = vdic_init_controls(priv);
+ if (ret)
+ return ret;
+
+ ret = media_entity_pads_init(&sd->entity, VDIC_NUM_PADS, priv->pad);
+ if (ret)
+ v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
+
+ return ret;
+}
+
+static void vdic_unregistered(struct v4l2_subdev *sd)
+{
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+
+ v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
+}
+
+static const struct v4l2_subdev_pad_ops vdic_pad_ops = {
+ .enum_mbus_code = vdic_enum_mbus_code,
+ .get_fmt = vdic_get_fmt,
+ .set_fmt = vdic_set_fmt,
+ .link_validate = vdic_link_validate,
+};
+
+static const struct v4l2_subdev_video_ops vdic_video_ops = {
+ .g_frame_interval = vdic_g_frame_interval,
+ .s_frame_interval = vdic_s_frame_interval,
+ .s_stream = vdic_s_stream,
+};
+
+static const struct media_entity_operations vdic_entity_ops = {
+ .link_setup = vdic_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_ops vdic_subdev_ops = {
+ .video = &vdic_video_ops,
+ .pad = &vdic_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops vdic_internal_ops = {
+ .registered = vdic_registered,
+ .unregistered = vdic_unregistered,
+};
+
+static int imx_vdic_probe(struct platform_device *pdev)
+{
+ struct imx_media_internal_sd_platformdata *pdata;
+ struct vdic_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, &priv->sd);
+ priv->dev = &pdev->dev;
+
+ pdata = priv->dev->platform_data;
+ priv->ipu_id = pdata->ipu_id;
+
+ v4l2_subdev_init(&priv->sd, &vdic_subdev_ops);
+ v4l2_set_subdevdata(&priv->sd, priv);
+ priv->sd.internal_ops = &vdic_internal_ops;
+ priv->sd.entity.ops = &vdic_entity_ops;
+ priv->sd.entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
+ priv->sd.dev = &pdev->dev;
+ priv->sd.owner = THIS_MODULE;
+ priv->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ /* get our group id */
+ priv->sd.grp_id = pdata->grp_id;
+ strncpy(priv->sd.name, pdata->sd_name, sizeof(priv->sd.name));
+
+ mutex_init(&priv->lock);
+
+ ret = v4l2_async_register_subdev(&priv->sd);
+ if (ret)
+ goto free;
+
+ return 0;
+free:
+ mutex_destroy(&priv->lock);
+ return ret;
+}
+
+static int imx_vdic_remove(struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = platform_get_drvdata(pdev);
+ struct vdic_priv *priv = v4l2_get_subdevdata(sd);
+
+ v4l2_info(sd, "Removing\n");
+
+ v4l2_async_unregister_subdev(sd);
+ mutex_destroy(&priv->lock);
+ media_entity_cleanup(&sd->entity);
+
+ return 0;
+}
+
+static const struct platform_device_id imx_vdic_ids[] = {
+ { .name = "imx-ipuv3-vdic" },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, imx_vdic_ids);
+
+static struct platform_driver imx_vdic_driver = {
+ .probe = imx_vdic_probe,
+ .remove = imx_vdic_remove,
+ .id_table = imx_vdic_ids,
+ .driver = {
+ .name = "imx-ipuv3-vdic",
+ },
+};
+module_platform_driver(imx_vdic_driver);
+
+MODULE_DESCRIPTION("i.MX VDIC subdev driver");
+MODULE_AUTHOR("Steve Longerbeam <[email protected]>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:imx-ipuv3-vdic");
diff --git a/drivers/staging/media/imx/imx-media.h b/drivers/staging/media/imx/imx-media.h
new file mode 100644
index 000000000000..d409170632bd
--- /dev/null
+++ b/drivers/staging/media/imx/imx-media.h
@@ -0,0 +1,325 @@
+/*
+ * V4L2 Media Controller Driver for Freescale i.MX5/6 SOC
+ *
+ * Copyright (c) 2016 Mentor Graphics Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef _IMX_MEDIA_H
+#define _IMX_MEDIA_H
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+#include <video/imx-ipu-v3.h>
+
+/*
+ * This is somewhat arbitrary, but we need at least:
+ * - 4 video devices per IPU
+ * - 3 IC subdevs per IPU
+ * - 1 VDIC subdev per IPU
+ * - 2 CSI subdevs per IPU
+ * - 1 mipi-csi2 receiver subdev
+ * - 2 video-mux subdevs
+ * - 2 camera sensor subdevs per IPU (1 parallel, 1 mipi-csi2)
+ *
+ */
+/* max video devices */
+#define IMX_MEDIA_MAX_VDEVS 8
+/* max subdevices */
+#define IMX_MEDIA_MAX_SUBDEVS 32
+/* max pads per subdev */
+#define IMX_MEDIA_MAX_PADS 16
+/* max links per pad */
+#define IMX_MEDIA_MAX_LINKS 8
+
+/*
+ * Pad definitions for the subdevs with multiple source or
+ * sink pads
+ */
+
+/* ipu_csi */
+enum {
+ CSI_SINK_PAD = 0,
+ CSI_SRC_PAD_DIRECT,
+ CSI_SRC_PAD_IDMAC,
+ CSI_NUM_PADS,
+};
+
+#define CSI_NUM_SINK_PADS 1
+#define CSI_NUM_SRC_PADS 2
+
+/* ipu_vdic */
+enum {
+ VDIC_SINK_PAD_DIRECT = 0,
+ VDIC_SINK_PAD_IDMAC,
+ VDIC_SRC_PAD_DIRECT,
+ VDIC_NUM_PADS,
+};
+
+#define VDIC_NUM_SINK_PADS 2
+#define VDIC_NUM_SRC_PADS 1
+
+/* ipu_ic_prp */
+enum {
+ PRP_SINK_PAD = 0,
+ PRP_SRC_PAD_PRPENC,
+ PRP_SRC_PAD_PRPVF,
+ PRP_NUM_PADS,
+};
+
+#define PRP_NUM_SINK_PADS 1
+#define PRP_NUM_SRC_PADS 2
+
+/* ipu_ic_prpencvf */
+enum {
+ PRPENCVF_SINK_PAD = 0,
+ PRPENCVF_SRC_PAD,
+ PRPENCVF_NUM_PADS,
+};
+
+#define PRPENCVF_NUM_SINK_PADS 1
+#define PRPENCVF_NUM_SRC_PADS 1
+
+/* How long to wait for EOF interrupts in the buffer-capture subdevs */
+#define IMX_MEDIA_EOF_TIMEOUT 1000
+
+struct imx_media_pixfmt {
+ u32 fourcc;
+ u32 codes[4];
+ int bpp; /* total bpp */
+ enum ipu_color_space cs;
+ bool planar; /* is a planar format */
+ bool bayer; /* is a raw bayer format */
+ bool ipufmt; /* is one of the IPU internal formats */
+};
+
+struct imx_media_buffer {
+ struct vb2_v4l2_buffer vbuf; /* v4l buffer must be first */
+ struct list_head list;
+};
+
+struct imx_media_video_dev {
+ struct video_device *vfd;
+
+ /* the user format */
+ struct v4l2_format fmt;
+ const struct imx_media_pixfmt *cc;
+};
+
+static inline struct imx_media_buffer *to_imx_media_vb(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ return container_of(vbuf, struct imx_media_buffer, vbuf);
+}
+
+struct imx_media_link {
+ struct device_node *remote_sd_node;
+ char remote_devname[32];
+ int local_pad;
+ int remote_pad;
+};
+
+struct imx_media_pad {
+ struct media_pad pad;
+ struct imx_media_link link[IMX_MEDIA_MAX_LINKS];
+ bool devnode; /* does this pad link to a device node */
+ int num_links;
+
+ /*
+ * list of video devices that can be reached from this pad,
+ * list is only valid for source pads.
+ */
+ struct imx_media_video_dev *vdev[IMX_MEDIA_MAX_VDEVS];
+ int num_vdevs;
+};
+
+struct imx_media_internal_sd_platformdata {
+ char sd_name[V4L2_SUBDEV_NAME_SIZE];
+ u32 grp_id;
+ int ipu_id;
+};
+
+struct imx_media_subdev {
+ struct v4l2_async_subdev asd;
+ struct v4l2_subdev *sd; /* set when bound */
+
+ struct imx_media_pad pad[IMX_MEDIA_MAX_PADS];
+ int num_sink_pads;
+ int num_src_pads;
+
+ /* the platform device if this is an internal subdev */
+ struct platform_device *pdev;
+ /* the devname is needed for async devname match */
+ char devname[32];
+
+ /* if this is a sensor */
+ struct v4l2_fwnode_endpoint sensor_ep;
+};
+
+struct imx_media_dev {
+ struct media_device md;
+ struct v4l2_device v4l2_dev;
+
+ /* the pipeline object */
+ struct media_pipeline pipe;
+
+ struct mutex mutex; /* protect elements below */
+
+ /* master subdevice list */
+ struct imx_media_subdev subdev[IMX_MEDIA_MAX_SUBDEVS];
+ int num_subdevs;
+
+ /* master video device list */
+ struct imx_media_video_dev *vdev[IMX_MEDIA_MAX_VDEVS];
+ int num_vdevs;
+
+ /* IPUs this media driver control, valid after subdevs bound */
+ struct ipu_soc *ipu[2];
+
+ /* for async subdev registration */
+ struct v4l2_async_subdev *async_ptrs[IMX_MEDIA_MAX_SUBDEVS];
+ struct v4l2_async_notifier subdev_notifier;
+};
+
+enum codespace_sel {
+ CS_SEL_YUV = 0,
+ CS_SEL_RGB,
+ CS_SEL_ANY,
+};
+
+const struct imx_media_pixfmt *
+imx_media_find_format(u32 fourcc, enum codespace_sel cs_sel, bool allow_bayer);
+int imx_media_enum_format(u32 *fourcc, u32 index, enum codespace_sel cs_sel);
+const struct imx_media_pixfmt *
+imx_media_find_mbus_format(u32 code, enum codespace_sel cs_sel,
+ bool allow_bayer);
+int imx_media_enum_mbus_format(u32 *code, u32 index, enum codespace_sel cs_sel,
+ bool allow_bayer);
+const struct imx_media_pixfmt *
+imx_media_find_ipu_format(u32 code, enum codespace_sel cs_sel);
+int imx_media_enum_ipu_format(u32 *code, u32 index, enum codespace_sel cs_sel);
+
+int imx_media_init_mbus_fmt(struct v4l2_mbus_framefmt *mbus,
+ u32 width, u32 height, u32 code, u32 field,
+ const struct imx_media_pixfmt **cc);
+void imx_media_fill_default_mbus_fields(struct v4l2_mbus_framefmt *tryfmt,
+ struct v4l2_mbus_framefmt *fmt,
+ bool ic_route);
+int imx_media_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
+ struct v4l2_mbus_framefmt *mbus,
+ const struct imx_media_pixfmt *cc);
+int imx_media_mbus_fmt_to_ipu_image(struct ipu_image *image,
+ struct v4l2_mbus_framefmt *mbus);
+int imx_media_ipu_image_to_mbus_fmt(struct v4l2_mbus_framefmt *mbus,
+ struct ipu_image *image);
+
+struct imx_media_subdev *
+imx_media_find_async_subdev(struct imx_media_dev *imxmd,
+ struct device_node *np,
+ const char *devname);
+struct imx_media_subdev *
+imx_media_add_async_subdev(struct imx_media_dev *imxmd,
+ struct device_node *np,
+ struct platform_device *pdev);
+int imx_media_add_pad_link(struct imx_media_dev *imxmd,
+ struct imx_media_pad *pad,
+ struct device_node *remote_node,
+ const char *remote_devname,
+ int local_pad, int remote_pad);
+
+void imx_media_grp_id_to_sd_name(char *sd_name, int sz,
+ u32 grp_id, int ipu_id);
+
+int imx_media_add_internal_subdevs(struct imx_media_dev *imxmd,
+ struct imx_media_subdev *csi[4]);
+void imx_media_remove_internal_subdevs(struct imx_media_dev *imxmd);
+
+struct imx_media_subdev *
+imx_media_find_subdev_by_sd(struct imx_media_dev *imxmd,
+ struct v4l2_subdev *sd);
+struct imx_media_subdev *
+imx_media_find_subdev_by_id(struct imx_media_dev *imxmd,
+ u32 grp_id);
+int imx_media_add_video_device(struct imx_media_dev *imxmd,
+ struct imx_media_video_dev *vdev);
+int imx_media_find_mipi_csi2_channel(struct imx_media_dev *imxmd,
+ struct media_entity *start_entity);
+struct imx_media_subdev *
+imx_media_find_upstream_subdev(struct imx_media_dev *imxmd,
+ struct media_entity *start_entity,
+ u32 grp_id);
+struct imx_media_subdev *
+__imx_media_find_sensor(struct imx_media_dev *imxmd,
+ struct media_entity *start_entity);
+struct imx_media_subdev *
+imx_media_find_sensor(struct imx_media_dev *imxmd,
+ struct media_entity *start_entity);
+
+struct imx_media_dma_buf {
+ void *virt;
+ dma_addr_t phys;
+ unsigned long len;
+};
+
+void imx_media_free_dma_buf(struct imx_media_dev *imxmd,
+ struct imx_media_dma_buf *buf);
+int imx_media_alloc_dma_buf(struct imx_media_dev *imxmd,
+ struct imx_media_dma_buf *buf,
+ int size);
+
+int imx_media_pipeline_set_stream(struct imx_media_dev *imxmd,
+ struct media_entity *entity,
+ bool on);
+
+/* imx-media-fim.c */
+struct imx_media_fim;
+void imx_media_fim_eof_monitor(struct imx_media_fim *fim, struct timespec *ts);
+int imx_media_fim_set_stream(struct imx_media_fim *fim,
+ const struct v4l2_fract *frame_interval,
+ bool on);
+int imx_media_fim_add_controls(struct imx_media_fim *fim);
+struct imx_media_fim *imx_media_fim_init(struct v4l2_subdev *sd);
+void imx_media_fim_free(struct imx_media_fim *fim);
+
+/* imx-media-of.c */
+struct imx_media_subdev *
+imx_media_of_find_subdev(struct imx_media_dev *imxmd,
+ struct device_node *np,
+ const char *name);
+int imx_media_of_parse(struct imx_media_dev *dev,
+ struct imx_media_subdev *(*csi)[4],
+ struct device_node *np);
+
+/* imx-media-capture.c */
+struct imx_media_video_dev *
+imx_media_capture_device_init(struct v4l2_subdev *src_sd, int pad);
+void imx_media_capture_device_remove(struct imx_media_video_dev *vdev);
+int imx_media_capture_device_register(struct imx_media_video_dev *vdev);
+void imx_media_capture_device_unregister(struct imx_media_video_dev *vdev);
+struct imx_media_buffer *
+imx_media_capture_device_next_buf(struct imx_media_video_dev *vdev);
+void imx_media_capture_device_set_format(struct imx_media_video_dev *vdev,
+ struct v4l2_pix_format *pix);
+void imx_media_capture_device_error(struct imx_media_video_dev *vdev);
+
+/* subdev group ids */
+#define IMX_MEDIA_GRP_ID_SENSOR (1 << 8)
+#define IMX_MEDIA_GRP_ID_VIDMUX (1 << 9)
+#define IMX_MEDIA_GRP_ID_CSI2 (1 << 10)
+#define IMX_MEDIA_GRP_ID_CSI_BIT 11
+#define IMX_MEDIA_GRP_ID_CSI (0x3 << IMX_MEDIA_GRP_ID_CSI_BIT)
+#define IMX_MEDIA_GRP_ID_CSI0 (1 << IMX_MEDIA_GRP_ID_CSI_BIT)
+#define IMX_MEDIA_GRP_ID_CSI1 (2 << IMX_MEDIA_GRP_ID_CSI_BIT)
+#define IMX_MEDIA_GRP_ID_VDIC (1 << 13)
+#define IMX_MEDIA_GRP_ID_IC_PRP (1 << 14)
+#define IMX_MEDIA_GRP_ID_IC_PRPENC (1 << 15)
+#define IMX_MEDIA_GRP_ID_IC_PRPVF (1 << 16)
+
+#endif
diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
new file mode 100644
index 000000000000..5061f3f524fd
--- /dev/null
+++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
@@ -0,0 +1,698 @@
+/*
+ * MIPI CSI-2 Receiver Subdev for Freescale i.MX6 SOC.
+ *
+ * Copyright (c) 2012-2017 Mentor Graphics Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+#include "imx-media.h"
+
+/*
+ * there must be 5 pads: 1 input pad from sensor, and
+ * the 4 virtual channel output pads
+ */
+#define CSI2_SINK_PAD 0
+#define CSI2_NUM_SINK_PADS 1
+#define CSI2_NUM_SRC_PADS 4
+#define CSI2_NUM_PADS 5
+
+/*
+ * The default maximum bit-rate per lane in Mbps, if the
+ * source subdev does not provide V4L2_CID_LINK_FREQ.
+ */
+#define CSI2_DEFAULT_MAX_MBPS 849
+
+struct csi2_dev {
+ struct device *dev;
+ struct v4l2_subdev sd;
+ struct media_pad pad[CSI2_NUM_PADS];
+ struct clk *dphy_clk;
+ struct clk *pllref_clk;
+ struct clk *pix_clk; /* what is this? */
+ void __iomem *base;
+ struct v4l2_fwnode_bus_mipi_csi2 bus;
+
+ /* lock to protect all members below */
+ struct mutex lock;
+
+ struct v4l2_mbus_framefmt format_mbus;
+
+ int stream_count;
+ struct v4l2_subdev *src_sd;
+ bool sink_linked[CSI2_NUM_SRC_PADS];
+};
+
+#define DEVICE_NAME "imx6-mipi-csi2"
+
+/* Register offsets */
+#define CSI2_VERSION 0x000
+#define CSI2_N_LANES 0x004
+#define CSI2_PHY_SHUTDOWNZ 0x008
+#define CSI2_DPHY_RSTZ 0x00c
+#define CSI2_RESETN 0x010
+#define CSI2_PHY_STATE 0x014
+#define PHY_STOPSTATEDATA_BIT 4
+#define PHY_STOPSTATEDATA(n) BIT(PHY_STOPSTATEDATA_BIT + (n))
+#define PHY_RXCLKACTIVEHS BIT(8)
+#define PHY_RXULPSCLKNOT BIT(9)
+#define PHY_STOPSTATECLK BIT(10)
+#define CSI2_DATA_IDS_1 0x018
+#define CSI2_DATA_IDS_2 0x01c
+#define CSI2_ERR1 0x020
+#define CSI2_ERR2 0x024
+#define CSI2_MSK1 0x028
+#define CSI2_MSK2 0x02c
+#define CSI2_PHY_TST_CTRL0 0x030
+#define PHY_TESTCLR BIT(0)
+#define PHY_TESTCLK BIT(1)
+#define CSI2_PHY_TST_CTRL1 0x034
+#define PHY_TESTEN BIT(16)
+/*
+ * i.MX CSI2IPU Gasket registers follow. The CSI2IPU gasket is
+ * not part of the MIPI CSI-2 core, but its registers fall in the
+ * same register map range.
+ */
+#define CSI2IPU_GASKET 0xf00
+#define CSI2IPU_YUV422_YUYV BIT(2)
+
+static inline struct csi2_dev *sd_to_dev(struct v4l2_subdev *sdev)
+{
+ return container_of(sdev, struct csi2_dev, sd);
+}
+
+/*
+ * The required sequence of MIPI CSI-2 startup as specified in the i.MX6
+ * reference manual is as follows:
+ *
+ * 1. Deassert presetn signal (global reset).
+ * It's not clear what this "global reset" signal is (maybe APB
+ * global reset), but in any case this step would be probably
+ * be carried out during driver load in csi2_probe().
+ *
+ * 2. Configure MIPI Camera Sensor to put all Tx lanes in LP-11 state.
+ * This must be carried out by the MIPI sensor's s_power(ON) subdev
+ * op.
+ *
+ * 3. D-PHY initialization.
+ * 4. CSI2 Controller programming (Set N_LANES, deassert PHY_SHUTDOWNZ,
+ * deassert PHY_RSTZ, deassert CSI2_RESETN).
+ * 5. Read the PHY status register (PHY_STATE) to confirm that all data and
+ * clock lanes of the D-PHY are in LP-11 state.
+ * 6. Configure the MIPI Camera Sensor to start transmitting a clock on the
+ * D-PHY clock lane.
+ * 7. CSI2 Controller programming - Read the PHY status register (PHY_STATE)
+ * to confirm that the D-PHY is receiving a clock on the D-PHY clock lane.
+ *
+ * All steps 3 through 7 are carried out by csi2_s_stream(ON) here. Step
+ * 6 is accomplished by calling the source subdev's s_stream(ON) between
+ * steps 5 and 7.
+ */
+
+static void csi2_enable(struct csi2_dev *csi2, bool enable)
+{
+ if (enable) {
+ writel(0x1, csi2->base + CSI2_PHY_SHUTDOWNZ);
+ writel(0x1, csi2->base + CSI2_DPHY_RSTZ);
+ writel(0x1, csi2->base + CSI2_RESETN);
+ } else {
+ writel(0x0, csi2->base + CSI2_PHY_SHUTDOWNZ);
+ writel(0x0, csi2->base + CSI2_DPHY_RSTZ);
+ writel(0x0, csi2->base + CSI2_RESETN);
+ }
+}
+
+static void csi2_set_lanes(struct csi2_dev *csi2)
+{
+ int lanes = csi2->bus.num_data_lanes;
+
+ writel(lanes - 1, csi2->base + CSI2_N_LANES);
+}
+
+static void dw_mipi_csi2_phy_write(struct csi2_dev *csi2,
+ u32 test_code, u32 test_data)
+{
+ /* Clear PHY test interface */
+ writel(PHY_TESTCLR, csi2->base + CSI2_PHY_TST_CTRL0);
+ writel(0x0, csi2->base + CSI2_PHY_TST_CTRL1);
+ writel(0x0, csi2->base + CSI2_PHY_TST_CTRL0);
+
+ /* Raise test interface strobe signal */
+ writel(PHY_TESTCLK, csi2->base + CSI2_PHY_TST_CTRL0);
+
+ /* Configure address write on falling edge and lower strobe signal */
+ writel(PHY_TESTEN | test_code, csi2->base + CSI2_PHY_TST_CTRL1);
+ writel(0x0, csi2->base + CSI2_PHY_TST_CTRL0);
+
+ /* Configure data write on rising edge and raise strobe signal */
+ writel(test_data, csi2->base + CSI2_PHY_TST_CTRL1);
+ writel(PHY_TESTCLK, csi2->base + CSI2_PHY_TST_CTRL0);
+
+ /* Clear strobe signal */
+ writel(0x0, csi2->base + CSI2_PHY_TST_CTRL0);
+}
+
+/*
+ * This table is based on the table documented at
+ * https://community.nxp.com/docs/DOC-94312. It assumes
+ * a 27MHz D-PHY pll reference clock.
+ */
+static const struct {
+ u32 max_mbps;
+ u32 hsfreqrange_sel;
+} hsfreq_map[] = {
+ { 90, 0x00}, {100, 0x20}, {110, 0x40}, {125, 0x02},
+ {140, 0x22}, {150, 0x42}, {160, 0x04}, {180, 0x24},
+ {200, 0x44}, {210, 0x06}, {240, 0x26}, {250, 0x46},
+ {270, 0x08}, {300, 0x28}, {330, 0x48}, {360, 0x2a},
+ {400, 0x4a}, {450, 0x0c}, {500, 0x2c}, {550, 0x0e},
+ {600, 0x2e}, {650, 0x10}, {700, 0x30}, {750, 0x12},
+ {800, 0x32}, {850, 0x14}, {900, 0x34}, {950, 0x54},
+ {1000, 0x74},
+};
+
+static int max_mbps_to_hsfreqrange_sel(u32 max_mbps)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hsfreq_map); i++)
+ if (hsfreq_map[i].max_mbps > max_mbps)
+ return hsfreq_map[i].hsfreqrange_sel;
+
+ return -EINVAL;
+}
+
+static int csi2_dphy_init(struct csi2_dev *csi2)
+{
+ struct v4l2_ctrl *ctrl;
+ u32 mbps_per_lane;
+ int sel;
+
+ ctrl = v4l2_ctrl_find(csi2->src_sd->ctrl_handler,
+ V4L2_CID_LINK_FREQ);
+ if (!ctrl)
+ mbps_per_lane = CSI2_DEFAULT_MAX_MBPS;
+ else
+ mbps_per_lane = DIV_ROUND_UP_ULL(2 * ctrl->qmenu_int[ctrl->val],
+ USEC_PER_SEC);
+
+ sel = max_mbps_to_hsfreqrange_sel(mbps_per_lane);
+ if (sel < 0)
+ return sel;
+
+ dw_mipi_csi2_phy_write(csi2, 0x44, sel);
+
+ return 0;
+}
+
+/*
+ * Waits for ultra-low-power state on D-PHY clock lane. This is currently
+ * unused and may not be needed at all, but keep around just in case.
+ */
+static int __maybe_unused csi2_dphy_wait_ulp(struct csi2_dev *csi2)
+{
+ u32 reg;
+ int ret;
+
+ /* wait for ULP on clock lane */
+ ret = readl_poll_timeout(csi2->base + CSI2_PHY_STATE, reg,
+ !(reg & PHY_RXULPSCLKNOT), 0, 500000);
+ if (ret) {
+ v4l2_err(&csi2->sd, "ULP timeout, phy_state = 0x%08x\n", reg);
+ return ret;
+ }
+
+ /* wait until no errors on bus */
+ ret = readl_poll_timeout(csi2->base + CSI2_ERR1, reg,
+ reg == 0x0, 0, 500000);
+ if (ret) {
+ v4l2_err(&csi2->sd, "stable bus timeout, err1 = 0x%08x\n", reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Waits for low-power LP-11 state on data and clock lanes. */
+static int csi2_dphy_wait_stopstate(struct csi2_dev *csi2)
+{
+ u32 mask, reg;
+ int ret;
+
+ mask = PHY_STOPSTATECLK |
+ ((csi2->bus.num_data_lanes - 1) << PHY_STOPSTATEDATA_BIT);
+
+ ret = readl_poll_timeout(csi2->base + CSI2_PHY_STATE, reg,
+ (reg & mask) == mask, 0, 500000);
+ if (ret) {
+ v4l2_err(&csi2->sd, "LP-11 timeout, phy_state = 0x%08x\n", reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Wait for active clock on the clock lane. */
+static int csi2_dphy_wait_clock_lane(struct csi2_dev *csi2)
+{
+ u32 reg;
+ int ret;
+
+ ret = readl_poll_timeout(csi2->base + CSI2_PHY_STATE, reg,
+ (reg & PHY_RXCLKACTIVEHS), 0, 500000);
+ if (ret) {
+ v4l2_err(&csi2->sd, "clock lane timeout, phy_state = 0x%08x\n",
+ reg);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Setup the i.MX CSI2IPU Gasket */
+static void csi2ipu_gasket_init(struct csi2_dev *csi2)
+{
+ u32 reg = 0;
+
+ switch (csi2->format_mbus.code) {
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ reg = CSI2IPU_YUV422_YUYV;
+ break;
+ default:
+ break;
+ }
+
+ writel(reg, csi2->base + CSI2IPU_GASKET);
+}
+
+static int csi2_start(struct csi2_dev *csi2)
+{
+ int ret;
+
+ ret = clk_prepare_enable(csi2->pix_clk);
+ if (ret)
+ return ret;
+
+ /* setup the gasket */
+ csi2ipu_gasket_init(csi2);
+
+ /* Step 3 */
+ ret = csi2_dphy_init(csi2);
+ if (ret)
+ goto err_disable_clk;
+
+ /* Step 4 */
+ csi2_set_lanes(csi2);
+ csi2_enable(csi2, true);
+
+ /* Step 5 */
+ ret = csi2_dphy_wait_stopstate(csi2);
+ if (ret)
+ goto err_assert_reset;
+
+ /* Step 6 */
+ ret = v4l2_subdev_call(csi2->src_sd, video, s_stream, 1);
+ ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
+ if (ret)
+ goto err_assert_reset;
+
+ /* Step 7 */
+ ret = csi2_dphy_wait_clock_lane(csi2);
+ if (ret)
+ goto err_stop_upstream;
+
+ return 0;
+
+err_stop_upstream:
+ v4l2_subdev_call(csi2->src_sd, video, s_stream, 0);
+err_assert_reset:
+ csi2_enable(csi2, false);
+err_disable_clk:
+ clk_disable_unprepare(csi2->pix_clk);
+ return ret;
+}
+
+static void csi2_stop(struct csi2_dev *csi2)
+{
+ /* stop upstream */
+ v4l2_subdev_call(csi2->src_sd, video, s_stream, 0);
+
+ csi2_enable(csi2, false);
+ clk_disable_unprepare(csi2->pix_clk);
+}
+
+/*
+ * V4L2 subdev operations.
+ */
+
+static int csi2_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct csi2_dev *csi2 = sd_to_dev(sd);
+ int i, ret = 0;
+
+ mutex_lock(&csi2->lock);
+
+ if (!csi2->src_sd) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ for (i = 0; i < CSI2_NUM_SRC_PADS; i++) {
+ if (csi2->sink_linked[i])
+ break;
+ }
+ if (i >= CSI2_NUM_SRC_PADS) {
+ ret = -EPIPE;
+ goto out;
+ }
+
+ /*
+ * enable/disable streaming only if stream_count is
+ * going from 0 to 1 / 1 to 0.
+ */
+ if (csi2->stream_count != !enable)
+ goto update_count;
+
+ dev_dbg(csi2->dev, "stream %s\n", enable ? "ON" : "OFF");
+ if (enable)
+ ret = csi2_start(csi2);
+ else
+ csi2_stop(csi2);
+ if (ret)
+ goto out;
+
+update_count:
+ csi2->stream_count += enable ? 1 : -1;
+ if (csi2->stream_count < 0)
+ csi2->stream_count = 0;
+out:
+ mutex_unlock(&csi2->lock);
+ return ret;
+}
+
+static int csi2_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct csi2_dev *csi2 = sd_to_dev(sd);
+ struct v4l2_subdev *remote_sd;
+ int ret = 0;
+
+ dev_dbg(csi2->dev, "link setup %s -> %s", remote->entity->name,
+ local->entity->name);
+
+ remote_sd = media_entity_to_v4l2_subdev(remote->entity);
+
+ mutex_lock(&csi2->lock);
+
+ if (local->flags & MEDIA_PAD_FL_SOURCE) {
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (csi2->sink_linked[local->index - 1]) {
+ ret = -EBUSY;
+ goto out;
+ }
+ csi2->sink_linked[local->index - 1] = true;
+ } else {
+ csi2->sink_linked[local->index - 1] = false;
+ }
+ } else {
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (csi2->src_sd) {
+ ret = -EBUSY;
+ goto out;
+ }
+ csi2->src_sd = remote_sd;
+ } else {
+ csi2->src_sd = NULL;
+ }
+ }
+
+out:
+ mutex_unlock(&csi2->lock);
+ return ret;
+}
+
+static int csi2_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct csi2_dev *csi2 = sd_to_dev(sd);
+ struct v4l2_mbus_framefmt *fmt;
+
+ mutex_lock(&csi2->lock);
+
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt = v4l2_subdev_get_try_format(&csi2->sd, cfg,
+ sdformat->pad);
+ else
+ fmt = &csi2->format_mbus;
+
+ sdformat->format = *fmt;
+
+ mutex_unlock(&csi2->lock);
+
+ return 0;
+}
+
+static int csi2_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *sdformat)
+{
+ struct csi2_dev *csi2 = sd_to_dev(sd);
+ int ret = 0;
+
+ if (sdformat->pad >= CSI2_NUM_PADS)
+ return -EINVAL;
+
+ mutex_lock(&csi2->lock);
+
+ if (csi2->stream_count > 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Output pads mirror active input pad, no limits on input pads */
+ if (sdformat->pad != CSI2_SINK_PAD)
+ sdformat->format = csi2->format_mbus;
+
+ if (sdformat->which == V4L2_SUBDEV_FORMAT_TRY)
+ cfg->try_fmt = sdformat->format;
+ else
+ csi2->format_mbus = sdformat->format;
+out:
+ mutex_unlock(&csi2->lock);
+ return ret;
+}
+
+/*
+ * retrieve our pads parsed from the OF graph by the media device
+ */
+static int csi2_registered(struct v4l2_subdev *sd)
+{
+ struct csi2_dev *csi2 = sd_to_dev(sd);
+ int i, ret;
+
+ for (i = 0; i < CSI2_NUM_PADS; i++) {
+ csi2->pad[i].flags = (i == CSI2_SINK_PAD) ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+ }
+
+ /* set a default mbus format */
+ ret = imx_media_init_mbus_fmt(&csi2->format_mbus,
+ 640, 480, 0, V4L2_FIELD_NONE, NULL);
+ if (ret)
+ return ret;
+
+ return media_entity_pads_init(&sd->entity, CSI2_NUM_PADS, csi2->pad);
+}
+
+static const struct media_entity_operations csi2_entity_ops = {
+ .link_setup = csi2_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_video_ops csi2_video_ops = {
+ .s_stream = csi2_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops csi2_pad_ops = {
+ .get_fmt = csi2_get_fmt,
+ .set_fmt = csi2_set_fmt,
+};
+
+static const struct v4l2_subdev_ops csi2_subdev_ops = {
+ .video = &csi2_video_ops,
+ .pad = &csi2_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops csi2_internal_ops = {
+ .registered = csi2_registered,
+};
+
+static int csi2_parse_endpoints(struct csi2_dev *csi2)
+{
+ struct device_node *node = csi2->dev->of_node;
+ struct device_node *epnode;
+ struct v4l2_fwnode_endpoint ep;
+
+ epnode = of_graph_get_endpoint_by_regs(node, 0, -1);
+ if (!epnode) {
+ v4l2_err(&csi2->sd, "failed to get sink endpoint node\n");
+ return -EINVAL;
+ }
+
+ v4l2_fwnode_endpoint_parse(of_fwnode_handle(epnode), &ep);
+ of_node_put(epnode);
+
+ if (ep.bus_type != V4L2_MBUS_CSI2) {
+ v4l2_err(&csi2->sd, "invalid bus type, must be MIPI CSI2\n");
+ return -EINVAL;
+ }
+
+ csi2->bus = ep.bus.mipi_csi2;
+
+ dev_dbg(csi2->dev, "data lanes: %d\n", csi2->bus.num_data_lanes);
+ dev_dbg(csi2->dev, "flags: 0x%08x\n", csi2->bus.flags);
+ return 0;
+}
+
+static int csi2_probe(struct platform_device *pdev)
+{
+ struct csi2_dev *csi2;
+ struct resource *res;
+ int ret;
+
+ csi2 = devm_kzalloc(&pdev->dev, sizeof(*csi2), GFP_KERNEL);
+ if (!csi2)
+ return -ENOMEM;
+
+ csi2->dev = &pdev->dev;
+
+ v4l2_subdev_init(&csi2->sd, &csi2_subdev_ops);
+ v4l2_set_subdevdata(&csi2->sd, &pdev->dev);
+ csi2->sd.internal_ops = &csi2_internal_ops;
+ csi2->sd.entity.ops = &csi2_entity_ops;
+ csi2->sd.dev = &pdev->dev;
+ csi2->sd.owner = THIS_MODULE;
+ csi2->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ strcpy(csi2->sd.name, DEVICE_NAME);
+ csi2->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ csi2->sd.grp_id = IMX_MEDIA_GRP_ID_CSI2;
+
+ ret = csi2_parse_endpoints(csi2);
+ if (ret)
+ return ret;
+
+ csi2->pllref_clk = devm_clk_get(&pdev->dev, "ref");
+ if (IS_ERR(csi2->pllref_clk)) {
+ v4l2_err(&csi2->sd, "failed to get pll reference clock\n");
+ ret = PTR_ERR(csi2->pllref_clk);
+ return ret;
+ }
+
+ csi2->dphy_clk = devm_clk_get(&pdev->dev, "dphy");
+ if (IS_ERR(csi2->dphy_clk)) {
+ v4l2_err(&csi2->sd, "failed to get dphy clock\n");
+ ret = PTR_ERR(csi2->dphy_clk);
+ return ret;
+ }
+
+ csi2->pix_clk = devm_clk_get(&pdev->dev, "pix");
+ if (IS_ERR(csi2->pix_clk)) {
+ v4l2_err(&csi2->sd, "failed to get pixel clock\n");
+ ret = PTR_ERR(csi2->pix_clk);
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ v4l2_err(&csi2->sd, "failed to get platform resources\n");
+ return -ENODEV;
+ }
+
+ csi2->base = devm_ioremap(&pdev->dev, res->start, PAGE_SIZE);
+ if (!csi2->base) {
+ v4l2_err(&csi2->sd, "failed to map CSI-2 registers\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&csi2->lock);
+
+ ret = clk_prepare_enable(csi2->pllref_clk);
+ if (ret) {
+ v4l2_err(&csi2->sd, "failed to enable pllref_clk\n");
+ goto rmmutex;
+ }
+
+ ret = clk_prepare_enable(csi2->dphy_clk);
+ if (ret) {
+ v4l2_err(&csi2->sd, "failed to enable dphy_clk\n");
+ goto pllref_off;
+ }
+
+ platform_set_drvdata(pdev, &csi2->sd);
+
+ ret = v4l2_async_register_subdev(&csi2->sd);
+ if (ret)
+ goto dphy_off;
+
+ return 0;
+
+dphy_off:
+ clk_disable_unprepare(csi2->dphy_clk);
+pllref_off:
+ clk_disable_unprepare(csi2->pllref_clk);
+rmmutex:
+ mutex_destroy(&csi2->lock);
+ return ret;
+}
+
+static int csi2_remove(struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = platform_get_drvdata(pdev);
+ struct csi2_dev *csi2 = sd_to_dev(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ clk_disable_unprepare(csi2->dphy_clk);
+ clk_disable_unprepare(csi2->pllref_clk);
+ mutex_destroy(&csi2->lock);
+ media_entity_cleanup(&sd->entity);
+
+ return 0;
+}
+
+static const struct of_device_id csi2_dt_ids[] = {
+ { .compatible = "fsl,imx6-mipi-csi2", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, csi2_dt_ids);
+
+static struct platform_driver csi2_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = csi2_dt_ids,
+ },
+ .probe = csi2_probe,
+ .remove = csi2_remove,
+};
+
+module_platform_driver(csi2_driver);
+
+MODULE_DESCRIPTION("i.MX5/6 MIPI CSI-2 Receiver driver");
+MODULE_AUTHOR("Steve Longerbeam <[email protected]>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/lirc/TODO b/drivers/staging/media/lirc/TODO
index cbea5d84fed3..a97800a8e127 100644
--- a/drivers/staging/media/lirc/TODO
+++ b/drivers/staging/media/lirc/TODO
@@ -1,13 +1,36 @@
-- All drivers should either be ported to ir-core, or dropped entirely
- (see drivers/media/IR/mceusb.c vs. lirc_mceusb.c in lirc cvs for an
- example of a previously completed port).
-
-- lirc_bt829 uses registers on a Mach64 VT, which has a separate kernel
- framebuffer driver (atyfb) and userland X driver (mach64). It can't
- simply be converted to a normal PCI driver, but ideally it should be
- coordinated with the other drivers.
-
-Please send patches to:
-Jarod Wilson <[email protected]>
-Greg Kroah-Hartman <[email protected]>
+1. Both ir-kbd-i2c and lirc_zilog provide support for RX events for
+the chips supported by lirc_zilog. Before moving lirc_zilog out of staging:
+
+a. ir-kbd-i2c needs a module parameter added to allow the user to tell
+ ir-kbd-i2c to ignore Z8 IR units.
+
+b. lirc_zilog should provide Rx key presses to the rc core like ir-kbd-i2c
+ does.
+
+
+2. lirc_zilog module ref-counting need examination. It has not been
+verified that cdev and lirc_dev will take the proper module references on
+lirc_zilog to prevent removal of lirc_zilog when the /dev/lircN device node
+is open.
+
+(The good news is ref-counting of lirc_zilog internal structures appears to be
+complete. Testing has shown the cx18 module can be unloaded out from under
+irw + lircd + lirc_dev, with the /dev/lirc0 device node open, with no adverse
+effects. The cx18 module could then be reloaded and irw properly began
+receiving button presses again and ir_send worked without error.)
+
+
+3. Bridge drivers, if able, should provide a chip reset() callback
+to lirc_zilog via struct IR_i2c_init_data. cx18 and ivtv already have routines
+to perform Z8 chip resets via GPIO manipulations. This would allow lirc_zilog
+to bring the chip back to normal when it hangs, in the same places the
+original lirc_pvr150 driver code does. This is not strictly needed, so it
+is not required to move lirc_zilog out of staging.
+
+Note: Both lirc_zilog and ir-kbd-i2c support the Zilog Z8 for IR, as programmed
+and installed on Hauppauge products. When working on either module, developers
+must consider at least the following bridge drivers which mention an IR Rx unit
+at address 0x71 (indicative of a Z8):
+
+ ivtv cx18 hdpvr pvrusb2 bt8xx cx88 saa7134
diff --git a/drivers/staging/media/lirc/TODO.lirc_zilog b/drivers/staging/media/lirc/TODO.lirc_zilog
deleted file mode 100644
index a97800a8e127..000000000000
--- a/drivers/staging/media/lirc/TODO.lirc_zilog
+++ /dev/null
@@ -1,36 +0,0 @@
-1. Both ir-kbd-i2c and lirc_zilog provide support for RX events for
-the chips supported by lirc_zilog. Before moving lirc_zilog out of staging:
-
-a. ir-kbd-i2c needs a module parameter added to allow the user to tell
- ir-kbd-i2c to ignore Z8 IR units.
-
-b. lirc_zilog should provide Rx key presses to the rc core like ir-kbd-i2c
- does.
-
-
-2. lirc_zilog module ref-counting need examination. It has not been
-verified that cdev and lirc_dev will take the proper module references on
-lirc_zilog to prevent removal of lirc_zilog when the /dev/lircN device node
-is open.
-
-(The good news is ref-counting of lirc_zilog internal structures appears to be
-complete. Testing has shown the cx18 module can be unloaded out from under
-irw + lircd + lirc_dev, with the /dev/lirc0 device node open, with no adverse
-effects. The cx18 module could then be reloaded and irw properly began
-receiving button presses again and ir_send worked without error.)
-
-
-3. Bridge drivers, if able, should provide a chip reset() callback
-to lirc_zilog via struct IR_i2c_init_data. cx18 and ivtv already have routines
-to perform Z8 chip resets via GPIO manipulations. This would allow lirc_zilog
-to bring the chip back to normal when it hangs, in the same places the
-original lirc_pvr150 driver code does. This is not strictly needed, so it
-is not required to move lirc_zilog out of staging.
-
-Note: Both lirc_zilog and ir-kbd-i2c support the Zilog Z8 for IR, as programmed
-and installed on Hauppauge products. When working on either module, developers
-must consider at least the following bridge drivers which mention an IR Rx unit
-at address 0x71 (indicative of a Z8):
-
- ivtv cx18 hdpvr pvrusb2 bt8xx cx88 saa7134
-
diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
index 8ce1db04414a..015e41bd036e 100644
--- a/drivers/staging/media/lirc/lirc_zilog.c
+++ b/drivers/staging/media/lirc/lirc_zilog.c
@@ -156,7 +156,6 @@ static struct mutex tx_data_lock;
/* module parameters */
static bool debug; /* debug output */
static bool tx_only; /* only handle the IR Tx function */
-static int minor = -1; /* minor number */
/* struct IR reference counting */
@@ -184,10 +183,11 @@ static void release_ir_device(struct kref *ref)
* ir->open_count == 0 - happens on final close()
* ir_lock, tx_ref_lock, rx_ref_lock, all released
*/
- if (ir->l.minor >= 0 && ir->l.minor < MAX_IRCTL_DEVICES) {
+ if (ir->l.minor >= 0) {
lirc_unregister_driver(ir->l.minor);
- ir->l.minor = MAX_IRCTL_DEVICES;
+ ir->l.minor = -1;
}
+
if (kfifo_initialized(&ir->rbuf.fifo))
lirc_buffer_free(&ir->rbuf);
list_del(&ir->list);
@@ -215,7 +215,7 @@ static struct IR_rx *get_ir_rx(struct IR *ir)
spin_lock(&ir->rx_ref_lock);
rx = ir->rx;
- if (rx != NULL)
+ if (rx)
kref_get(&rx->ref);
spin_unlock(&ir->rx_ref_lock);
return rx;
@@ -277,7 +277,7 @@ static struct IR_tx *get_ir_tx(struct IR *ir)
spin_lock(&ir->tx_ref_lock);
tx = ir->tx;
- if (tx != NULL)
+ if (tx)
kref_get(&tx->ref);
spin_unlock(&ir->tx_ref_lock);
return tx;
@@ -327,12 +327,12 @@ static int add_to_buf(struct IR *ir)
}
rx = get_ir_rx(ir);
- if (rx == NULL)
+ if (!rx)
return -ENXIO;
/* Ensure our rx->c i2c_client remains valid for the duration */
mutex_lock(&rx->client_lock);
- if (rx->c == NULL) {
+ if (!rx->c) {
mutex_unlock(&rx->client_lock);
put_ir_rx(rx, false);
return -ENXIO;
@@ -388,7 +388,7 @@ static int add_to_buf(struct IR *ir)
break;
}
schedule_timeout((100 * HZ + 999) / 1000);
- if (tx != NULL)
+ if (tx)
tx->need_boot = 1;
++failures;
@@ -444,7 +444,7 @@ static int add_to_buf(struct IR *ir)
} while (!lirc_buffer_full(rbuf));
mutex_unlock(&rx->client_lock);
- if (tx != NULL)
+ if (tx)
put_ir_tx(tx, false);
put_ir_rx(rx, false);
return ret;
@@ -472,7 +472,7 @@ static int lirc_thread(void *arg)
/* if device not opened, we can sleep half a second */
if (atomic_read(&ir->open_count) == 0) {
- schedule_timeout(HZ/2);
+ schedule_timeout(HZ / 2);
continue;
}
@@ -497,18 +497,9 @@ static int lirc_thread(void *arg)
return 0;
}
-static int set_use_inc(void *data)
-{
- return 0;
-}
-
-static void set_use_dec(void *data)
-{
-}
-
/* safe read of a uint32 (always network byte order) */
static int read_uint32(unsigned char **data,
- unsigned char *endp, unsigned int *val)
+ unsigned char *endp, unsigned int *val)
{
if (*data + 4 > endp)
return 0;
@@ -520,7 +511,7 @@ static int read_uint32(unsigned char **data,
/* safe read of a uint8 */
static int read_uint8(unsigned char **data,
- unsigned char *endp, unsigned char *val)
+ unsigned char *endp, unsigned char *val)
{
if (*data + 1 > endp)
return 0;
@@ -530,7 +521,7 @@ static int read_uint8(unsigned char **data,
/* safe skipping of N bytes */
static int skip(unsigned char **data,
- unsigned char *endp, unsigned int distance)
+ unsigned char *endp, unsigned int distance)
{
if (*data + distance > endp)
return 0;
@@ -540,7 +531,7 @@ static int skip(unsigned char **data,
/* decompress key data into the given buffer */
static int get_key_data(unsigned char *buf,
- unsigned int codeset, unsigned int key)
+ unsigned int codeset, unsigned int key)
{
unsigned char *data, *endp, *diffs, *key_block;
unsigned char keys, ndiffs, id;
@@ -554,9 +545,9 @@ static int get_key_data(unsigned char *buf,
if (!read_uint32(&data, tx_data->endp, &i))
goto corrupt;
- if (i == codeset)
+ if (i == codeset) {
break;
- else if (codeset > i) {
+ } else if (codeset > i) {
base = pos + 1;
--lim;
}
@@ -772,7 +763,7 @@ static int fw_load(struct IR_tx *tx)
/* Parse the file */
tx_data = vmalloc(sizeof(*tx_data));
- if (tx_data == NULL) {
+ if (!tx_data) {
release_firmware(fw_entry);
ret = -ENOMEM;
goto out;
@@ -781,7 +772,7 @@ static int fw_load(struct IR_tx *tx)
/* Copy the data so hotplug doesn't get confused and timeout */
tx_data->datap = vmalloc(fw_entry->size);
- if (tx_data->datap == NULL) {
+ if (!tx_data->datap) {
release_firmware(fw_entry);
vfree(tx_data);
ret = -ENOMEM;
@@ -810,7 +801,7 @@ static int fw_load(struct IR_tx *tx)
goto corrupt;
if (!read_uint32(&data, tx_data->endp,
- &tx_data->num_code_sets))
+ &tx_data->num_code_sets))
goto corrupt;
dev_dbg(tx->ir->l.dev, "%u IR blaster codesets loaded\n",
@@ -818,7 +809,7 @@ static int fw_load(struct IR_tx *tx)
tx_data->code_sets = vmalloc(
tx_data->num_code_sets * sizeof(char *));
- if (tx_data->code_sets == NULL) {
+ if (!tx_data->code_sets) {
fw_unload_locked();
ret = -ENOMEM;
goto out;
@@ -866,12 +857,12 @@ static int fw_load(struct IR_tx *tx)
* global fixed
*/
if (!skip(&data, tx_data->endp,
- 1 + TX_BLOCK_SIZE - num_global_fixed))
+ 1 + TX_BLOCK_SIZE - num_global_fixed))
goto corrupt;
/* Then we have keys-1 blocks of key id+diffs */
if (!skip(&data, tx_data->endp,
- (ndiffs + 1) * (keys - 1)))
+ (ndiffs + 1) * (keys - 1)))
goto corrupt;
}
ret = 0;
@@ -905,7 +896,7 @@ static ssize_t read(struct file *filep, char __user *outbuf, size_t n,
}
rx = get_ir_rx(ir);
- if (rx == NULL)
+ if (!rx)
return -ENXIO;
/*
@@ -990,8 +981,9 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
"failed to get data for code %u, key %u -- check lircd.conf entries\n",
code, key);
return ret;
- } else if (ret != 0)
+ } else if (ret != 0) {
return ret;
+ }
/* Send the data block */
ret = send_data_block(tx, data_block);
@@ -1065,7 +1057,7 @@ static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
break;
dev_dbg(tx->ir->l.dev,
"NAK expected: i2c_master_send failed with %d (try %d)\n",
- ret, i+1);
+ ret, i + 1);
}
if (ret != 1) {
dev_err(tx->ir->l.dev,
@@ -1111,12 +1103,12 @@ static ssize_t write(struct file *filep, const char __user *buf, size_t n,
/* Get a struct IR_tx reference */
tx = get_ir_tx(ir);
- if (tx == NULL)
+ if (!tx)
return -ENXIO;
/* Ensure our tx->c i2c_client remains valid for the duration */
mutex_lock(&tx->client_lock);
- if (tx->c == NULL) {
+ if (!tx->c) {
mutex_unlock(&tx->client_lock);
put_ir_tx(tx, false);
return -ENXIO;
@@ -1188,8 +1180,9 @@ static ssize_t write(struct file *filep, const char __user *buf, size_t n,
schedule_timeout((100 * HZ + 999) / 1000);
tx->need_boot = 1;
++failures;
- } else
+ } else {
i += sizeof(int);
+ }
}
/* Release i2c bus */
@@ -1212,15 +1205,15 @@ static unsigned int poll(struct file *filep, poll_table *wait)
struct lirc_buffer *rbuf = ir->l.rbuf;
unsigned int ret;
- dev_dbg(ir->l.dev, "poll called\n");
+ dev_dbg(ir->l.dev, "%s called\n", __func__);
rx = get_ir_rx(ir);
- if (rx == NULL) {
+ if (!rx) {
/*
* Revisit this, if our poll function ever reports writeable
* status for Tx
*/
- dev_dbg(ir->l.dev, "poll result = POLLERR\n");
+ dev_dbg(ir->l.dev, "%s result = POLLERR\n", __func__);
return POLLERR;
}
@@ -1231,9 +1224,9 @@ static unsigned int poll(struct file *filep, poll_table *wait)
poll_wait(filep, &rbuf->wait_poll, wait);
/* Indicate what ops could happen immediately without blocking */
- ret = lirc_buffer_empty(rbuf) ? 0 : (POLLIN|POLLRDNORM);
+ ret = lirc_buffer_empty(rbuf) ? 0 : (POLLIN | POLLRDNORM);
- dev_dbg(ir->l.dev, "poll result = %s\n",
+ dev_dbg(ir->l.dev, "%s result = %s\n", __func__,
ret ? "POLLIN|POLLRDNORM" : "none");
return ret;
}
@@ -1255,15 +1248,15 @@ static long ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
result = put_user(features, uptr);
break;
case LIRC_GET_REC_MODE:
- if (!(features&LIRC_CAN_REC_MASK))
+ if (!(features & LIRC_CAN_REC_MASK))
return -ENOSYS;
result = put_user(LIRC_REC2MODE
- (features&LIRC_CAN_REC_MASK),
+ (features & LIRC_CAN_REC_MASK),
uptr);
break;
case LIRC_SET_REC_MODE:
- if (!(features&LIRC_CAN_REC_MASK))
+ if (!(features & LIRC_CAN_REC_MASK))
return -ENOSYS;
result = get_user(mode, uptr);
@@ -1271,13 +1264,13 @@ static long ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
result = -EINVAL;
break;
case LIRC_GET_SEND_MODE:
- if (!(features&LIRC_CAN_SEND_MASK))
+ if (!(features & LIRC_CAN_SEND_MASK))
return -ENOSYS;
result = put_user(LIRC_MODE_PULSE, uptr);
break;
case LIRC_SET_SEND_MODE:
- if (!(features&LIRC_CAN_SEND_MASK))
+ if (!(features & LIRC_CAN_SEND_MASK))
return -ENOSYS;
result = get_user(mode, uptr);
@@ -1322,7 +1315,7 @@ static int open(struct inode *node, struct file *filep)
/* find our IR struct */
ir = get_ir_device_by_minor(minor);
- if (ir == NULL)
+ if (!ir)
return -ENODEV;
atomic_inc(&ir->open_count);
@@ -1340,8 +1333,9 @@ static int close(struct inode *node, struct file *filep)
/* find our IR struct */
struct IR *ir = filep->private_data;
- if (ir == NULL) {
- pr_err("ir: close: no private_data attached to the file!\n");
+ if (!ir) {
+ pr_err("ir: %s: no private_data attached to the file!\n",
+ __func__);
return -ENODEV;
}
@@ -1394,10 +1388,7 @@ static struct lirc_driver lirc_template = {
.minor = -1,
.code_length = 13,
.buffer_size = BUFLEN / 2,
- .sample_rate = 0, /* tell lirc_dev to not start its own kthread */
.chunk_size = 2,
- .set_use_inc = set_use_inc,
- .set_use_dec = set_use_dec,
.fops = &lirc_fops,
.owner = THIS_MODULE,
};
@@ -1407,7 +1398,7 @@ static int ir_remove(struct i2c_client *client)
if (strncmp("ir_tx_z8", client->name, 8) == 0) {
struct IR_tx *tx = i2c_get_clientdata(client);
- if (tx != NULL) {
+ if (tx) {
mutex_lock(&tx->client_lock);
tx->c = NULL;
mutex_unlock(&tx->client_lock);
@@ -1416,7 +1407,7 @@ static int ir_remove(struct i2c_client *client)
} else if (strncmp("ir_rx_z8", client->name, 8) == 0) {
struct IR_rx *rx = i2c_get_clientdata(client);
- if (rx != NULL) {
+ if (rx) {
mutex_lock(&rx->client_lock);
rx->c = NULL;
mutex_unlock(&rx->client_lock);
@@ -1426,7 +1417,6 @@ static int ir_remove(struct i2c_client *client)
return 0;
}
-
/* ir_devices_lock must be held */
static struct IR *get_ir_device_by_adapter(struct i2c_adapter *adapter)
{
@@ -1467,14 +1457,14 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
return -ENXIO;
pr_info("probing IR %s on %s (i2c-%d)\n",
- tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
+ tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
mutex_lock(&ir_devices_lock);
/* Use a single struct IR instance for both the Rx and Tx functions */
ir = get_ir_device_by_adapter(adap);
- if (ir == NULL) {
- ir = kzalloc(sizeof(struct IR), GFP_KERNEL);
+ if (!ir) {
+ ir = kzalloc(sizeof(*ir), GFP_KERNEL);
if (!ir) {
ret = -ENOMEM;
goto out_no_ir;
@@ -1514,7 +1504,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
rx = get_ir_rx(ir);
/* Set up a struct IR_tx instance */
- tx = kzalloc(sizeof(struct IR_tx), GFP_KERNEL);
+ tx = kzalloc(sizeof(*tx), GFP_KERNEL);
if (!tx) {
ret = -ENOMEM;
goto out_put_xx;
@@ -1547,7 +1537,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
fw_load(tx);
/* Proceed only if the Rx client is also ready or not needed */
- if (rx == NULL && !tx_only) {
+ if (!rx && !tx_only) {
dev_info(tx->ir->l.dev,
"probe of IR Tx on %s (i2c-%d) done. Waiting on IR Rx.\n",
adap->name, adap->nr);
@@ -1558,7 +1548,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
tx = get_ir_tx(ir);
/* Set up a struct IR_rx instance */
- rx = kzalloc(sizeof(struct IR_rx), GFP_KERNEL);
+ rx = kzalloc(sizeof(*rx), GFP_KERNEL);
if (!rx) {
ret = -ENOMEM;
goto out_put_xx;
@@ -1601,20 +1591,19 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
/* Proceed only if the Tx client is also ready */
- if (tx == NULL) {
+ if (!tx) {
pr_info("probe of IR Rx on %s (i2c-%d) done. Waiting on IR Tx.\n",
- adap->name, adap->nr);
+ adap->name, adap->nr);
goto out_ok;
}
}
/* register with lirc */
- ir->l.minor = minor; /* module option: user requested minor number */
ir->l.minor = lirc_register_driver(&ir->l);
- if (ir->l.minor < 0 || ir->l.minor >= MAX_IRCTL_DEVICES) {
+ if (ir->l.minor < 0) {
dev_err(tx->ir->l.dev,
- "%s: \"minor\" must be between 0 and %d (%d)!\n",
- __func__, MAX_IRCTL_DEVICES-1, ir->l.minor);
+ "%s: lirc_register_driver() failed: %i\n",
+ __func__, ir->l.minor);
ret = -EBADRQC;
goto out_put_xx;
}
@@ -1623,9 +1612,9 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
adap->name, adap->nr, ir->l.minor);
out_ok:
- if (rx != NULL)
+ if (rx)
put_ir_rx(rx, true);
- if (tx != NULL)
+ if (tx)
put_ir_tx(tx, true);
put_ir_device(ir, true);
dev_info(ir->l.dev,
@@ -1635,10 +1624,10 @@ out_ok:
return 0;
out_put_xx:
- if (rx != NULL)
+ if (rx)
put_ir_rx(rx, true);
out_put_tx:
- if (tx != NULL)
+ if (tx)
put_ir_tx(tx, true);
out_put_ir:
put_ir_device(ir, true);
@@ -1686,9 +1675,6 @@ MODULE_LICENSE("GPL");
/* for compat with old name, which isn't all that accurate anymore */
MODULE_ALIAS("lirc_pvr150");
-module_param(minor, int, 0444);
-MODULE_PARM_DESC(minor, "Preferred minor device number");
-
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Enable debugging messages");
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
index e8cf0b97bf02..3637ddf909a4 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
@@ -353,9 +353,8 @@ static int snd_bcm2835_pcm_ack(struct snd_pcm_substream *substream)
struct snd_pcm_indirect *pcm_indirect = &alsa_stream->pcm_indirect;
pcm_indirect->hw_queue_size = runtime->hw.buffer_bytes_max;
- snd_pcm_indirect_playback_transfer(substream, pcm_indirect,
- snd_bcm2835_pcm_transfer);
- return 0;
+ return snd_pcm_indirect_playback_transfer(substream, pcm_indirect,
+ snd_bcm2835_pcm_transfer);
}
/* trigger callback */
diff --git a/drivers/thermal/intel_bxt_pmic_thermal.c b/drivers/thermal/intel_bxt_pmic_thermal.c
index 0f19a393ddd8..ef6b32242ccb 100644
--- a/drivers/thermal/intel_bxt_pmic_thermal.c
+++ b/drivers/thermal/intel_bxt_pmic_thermal.c
@@ -241,7 +241,7 @@ static int pmic_thermal_probe(struct platform_device *pdev)
}
regmap = pmic->regmap;
- regmap_irq_chip = pmic->irq_chip_data_level2;
+ regmap_irq_chip = pmic->irq_chip_data;
pmic_irq_count = 0;
while ((irq = platform_get_irq(pdev, pmic_irq_count)) != -ENXIO) {
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index a309bcffffcd..b5c98e5bf524 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -9,10 +9,13 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License.
*/
+#include <linux/acpi.h>
+#include <linux/dmi.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/property.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/slab.h>
@@ -60,8 +63,50 @@
#define UART_EXAR_MPIOSEL_15_8 0x99 /* MPIOSEL[15:8] */
#define UART_EXAR_MPIOOD_15_8 0x9a /* MPIOOD[15:8] */
+#define UART_EXAR_RS485_DLY(x) ((x) << 4)
+
+/*
+ * IOT2040 MPIO wiring semantics:
+ *
+ * MPIO Port Function
+ * ---- ---- --------
+ * 0 2 Mode bit 0
+ * 1 2 Mode bit 1
+ * 2 2 Terminate bus
+ * 3 - <reserved>
+ * 4 3 Mode bit 0
+ * 5 3 Mode bit 1
+ * 6 3 Terminate bus
+ * 7 - <reserved>
+ * 8 2 Enable
+ * 9 3 Enable
+ * 10 - Red LED
+ * 11..15 - <unused>
+ */
+
+/* IOT2040 MPIOs 0..7 */
+#define IOT2040_UART_MODE_RS232 0x01
+#define IOT2040_UART_MODE_RS485 0x02
+#define IOT2040_UART_MODE_RS422 0x03
+#define IOT2040_UART_TERMINATE_BUS 0x04
+
+#define IOT2040_UART1_MASK 0x0f
+#define IOT2040_UART2_SHIFT 4
+
+#define IOT2040_UARTS_DEFAULT_MODE 0x11 /* both RS232 */
+#define IOT2040_UARTS_GPIO_LO_MODE 0x88 /* reserved pins as input */
+
+/* IOT2040 MPIOs 8..15 */
+#define IOT2040_UARTS_ENABLE 0x03
+#define IOT2040_UARTS_GPIO_HI_MODE 0xF8 /* enable & LED as outputs */
+
struct exar8250;
+struct exar8250_platform {
+ int (*rs485_config)(struct uart_port *, struct serial_rs485 *);
+ int (*register_gpio)(struct pci_dev *, struct uart_8250_port *);
+};
+
/**
* struct exar8250_board - board information
* @num_ports: number of serial ports
@@ -194,7 +239,8 @@ static void setup_gpio(struct pci_dev *pcidev, u8 __iomem *p)
}
static void *
-xr17v35x_register_gpio(struct pci_dev *pcidev)
+__xr17v35x_register_gpio(struct pci_dev *pcidev,
+ const struct property_entry *properties)
{
struct platform_device *pdev;
@@ -202,8 +248,11 @@ xr17v35x_register_gpio(struct pci_dev *pcidev)
if (!pdev)
return NULL;
- platform_set_drvdata(pdev, pcidev);
- if (platform_device_add(pdev) < 0) {
+ pdev->dev.parent = &pcidev->dev;
+ ACPI_COMPANION_SET(&pdev->dev, ACPI_COMPANION(&pcidev->dev));
+
+ if (platform_device_add_properties(pdev, properties) < 0 ||
+ platform_device_add(pdev) < 0) {
platform_device_put(pdev);
return NULL;
}
@@ -211,17 +260,131 @@ xr17v35x_register_gpio(struct pci_dev *pcidev)
return pdev;
}
+static const struct property_entry exar_gpio_properties[] = {
+ PROPERTY_ENTRY_U32("linux,first-pin", 0),
+ PROPERTY_ENTRY_U32("ngpios", 16),
+ { }
+};
+
+static int xr17v35x_register_gpio(struct pci_dev *pcidev,
+ struct uart_8250_port *port)
+{
+ if (pcidev->vendor == PCI_VENDOR_ID_EXAR)
+ port->port.private_data =
+ __xr17v35x_register_gpio(pcidev, exar_gpio_properties);
+
+ return 0;
+}
+
+static const struct exar8250_platform exar8250_default_platform = {
+ .register_gpio = xr17v35x_register_gpio,
+};
+
+static int iot2040_rs485_config(struct uart_port *port,
+ struct serial_rs485 *rs485)
+{
+ bool is_rs485 = !!(rs485->flags & SER_RS485_ENABLED);
+ u8 __iomem *p = port->membase;
+ u8 mask = IOT2040_UART1_MASK;
+ u8 mode, value;
+
+ if (is_rs485) {
+ if (rs485->flags & SER_RS485_RX_DURING_TX)
+ mode = IOT2040_UART_MODE_RS422;
+ else
+ mode = IOT2040_UART_MODE_RS485;
+
+ if (rs485->flags & SER_RS485_TERMINATE_BUS)
+ mode |= IOT2040_UART_TERMINATE_BUS;
+ } else {
+ mode = IOT2040_UART_MODE_RS232;
+ }
+
+ if (port->line == 3) {
+ mask <<= IOT2040_UART2_SHIFT;
+ mode <<= IOT2040_UART2_SHIFT;
+ }
+
+ value = readb(p + UART_EXAR_MPIOLVL_7_0);
+ value &= ~mask;
+ value |= mode;
+ writeb(value, p + UART_EXAR_MPIOLVL_7_0);
+
+ value = readb(p + UART_EXAR_FCTR);
+ if (is_rs485)
+ value |= UART_FCTR_EXAR_485;
+ else
+ value &= ~UART_FCTR_EXAR_485;
+ writeb(value, p + UART_EXAR_FCTR);
+
+ if (is_rs485)
+ writeb(UART_EXAR_RS485_DLY(4), p + UART_MSR);
+
+ port->rs485 = *rs485;
+
+ return 0;
+}
+
+static const struct property_entry iot2040_gpio_properties[] = {
+ PROPERTY_ENTRY_U32("linux,first-pin", 10),
+ PROPERTY_ENTRY_U32("ngpios", 1),
+ { }
+};
+
+static int iot2040_register_gpio(struct pci_dev *pcidev,
+ struct uart_8250_port *port)
+{
+ u8 __iomem *p = port->port.membase;
+
+ writeb(IOT2040_UARTS_DEFAULT_MODE, p + UART_EXAR_MPIOLVL_7_0);
+ writeb(IOT2040_UARTS_GPIO_LO_MODE, p + UART_EXAR_MPIOSEL_7_0);
+ writeb(IOT2040_UARTS_ENABLE, p + UART_EXAR_MPIOLVL_15_8);
+ writeb(IOT2040_UARTS_GPIO_HI_MODE, p + UART_EXAR_MPIOSEL_15_8);
+
+ port->port.private_data =
+ __xr17v35x_register_gpio(pcidev, iot2040_gpio_properties);
+
+ return 0;
+}
+
+static const struct exar8250_platform iot2040_platform = {
+ .rs485_config = iot2040_rs485_config,
+ .register_gpio = iot2040_register_gpio,
+};
+
+static const struct dmi_system_id exar_platforms[] = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
+ DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
+ "6ES7647-0AA00-1YA2"),
+ },
+ .driver_data = (void *)&iot2040_platform,
+ },
+ {}
+};
+
static int
pci_xr17v35x_setup(struct exar8250 *priv, struct pci_dev *pcidev,
struct uart_8250_port *port, int idx)
{
const struct exar8250_board *board = priv->board;
+ const struct exar8250_platform *platform;
+ const struct dmi_system_id *dmi_match;
unsigned int offset = idx * 0x400;
unsigned int baud = 7812500;
u8 __iomem *p;
int ret;
+ dmi_match = dmi_first_match(exar_platforms);
+ if (dmi_match)
+ platform = dmi_match->driver_data;
+ else
+ platform = &exar8250_default_platform;
+
port->port.uartclk = baud * 16;
+ port->port.rs485_config = platform->rs485_config;
+
/*
* Setup the uart clock for the devices on expansion slot to
* half the clock speed of the main chip (which is 125MHz)
@@ -244,10 +407,10 @@ pci_xr17v35x_setup(struct exar8250 *priv, struct pci_dev *pcidev,
/* Setup Multipurpose Input/Output pins. */
setup_gpio(pcidev, p);
- port->port.private_data = xr17v35x_register_gpio(pcidev);
+ ret = platform->register_gpio(pcidev, port);
}
- return 0;
+ return ret;
}
static void pci_xr17v35x_exit(struct pci_dev *pcidev)
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 0e7d0e81a7cb..ebe27595c4af 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1966,27 +1966,21 @@ static int proc_disconnectsignal_compat(struct usb_dev_state *ps, void __user *a
static int get_urb32(struct usbdevfs_urb *kurb,
struct usbdevfs_urb32 __user *uurb)
{
- __u32 uptr;
- if (!access_ok(VERIFY_READ, uurb, sizeof(*uurb)) ||
- __get_user(kurb->type, &uurb->type) ||
- __get_user(kurb->endpoint, &uurb->endpoint) ||
- __get_user(kurb->status, &uurb->status) ||
- __get_user(kurb->flags, &uurb->flags) ||
- __get_user(kurb->buffer_length, &uurb->buffer_length) ||
- __get_user(kurb->actual_length, &uurb->actual_length) ||
- __get_user(kurb->start_frame, &uurb->start_frame) ||
- __get_user(kurb->number_of_packets, &uurb->number_of_packets) ||
- __get_user(kurb->error_count, &uurb->error_count) ||
- __get_user(kurb->signr, &uurb->signr))
+ struct usbdevfs_urb32 urb32;
+ if (copy_from_user(&urb32, uurb, sizeof(*uurb)))
return -EFAULT;
-
- if (__get_user(uptr, &uurb->buffer))
- return -EFAULT;
- kurb->buffer = compat_ptr(uptr);
- if (__get_user(uptr, &uurb->usercontext))
- return -EFAULT;
- kurb->usercontext = compat_ptr(uptr);
-
+ kurb->type = urb32.type;
+ kurb->endpoint = urb32.endpoint;
+ kurb->status = urb32.status;
+ kurb->flags = urb32.flags;
+ kurb->buffer = compat_ptr(urb32.buffer);
+ kurb->buffer_length = urb32.buffer_length;
+ kurb->actual_length = urb32.actual_length;
+ kurb->start_frame = urb32.start_frame;
+ kurb->number_of_packets = urb32.number_of_packets;
+ kurb->error_count = urb32.error_count;
+ kurb->signr = urb32.signr;
+ kurb->usercontext = compat_ptr(urb32.usercontext);
return 0;
}
@@ -2198,18 +2192,14 @@ static int proc_ioctl_default(struct usb_dev_state *ps, void __user *arg)
#ifdef CONFIG_COMPAT
static int proc_ioctl_compat(struct usb_dev_state *ps, compat_uptr_t arg)
{
- struct usbdevfs_ioctl32 __user *uioc;
+ struct usbdevfs_ioctl32 ioc32;
struct usbdevfs_ioctl ctrl;
- u32 udata;
- uioc = compat_ptr((long)arg);
- if (!access_ok(VERIFY_READ, uioc, sizeof(*uioc)) ||
- __get_user(ctrl.ifno, &uioc->ifno) ||
- __get_user(ctrl.ioctl_code, &uioc->ioctl_code) ||
- __get_user(udata, &uioc->data))
+ if (copy_from_user(&ioc32, compat_ptr(arg), sizeof(ioc32)))
return -EFAULT;
- ctrl.data = compat_ptr(udata);
-
+ ctrl.ifno = ioc32.ifno;
+ ctrl.ioctl_code = ioc32.ioctl_code;
+ ctrl.data = compat_ptr(ioc32.data);
return proc_ioctl(ps, &ctrl);
}
#endif
diff --git a/drivers/usb/gadget/function/u_uac1_legacy.c b/drivers/usb/gadget/function/u_uac1_legacy.c
index 8aa76b4dc117..fa4684a1c54c 100644
--- a/drivers/usb/gadget/function/u_uac1_legacy.c
+++ b/drivers/usb/gadget/function/u_uac1_legacy.c
@@ -157,7 +157,6 @@ size_t u_audio_playback(struct gaudio *card, void *buf, size_t count)
struct gaudio_snd_dev *snd = &card->playback;
struct snd_pcm_substream *substream = snd->substream;
struct snd_pcm_runtime *runtime = substream->runtime;
- mm_segment_t old_fs;
ssize_t result;
snd_pcm_sframes_t frames;
@@ -174,15 +173,11 @@ try_again:
}
frames = bytes_to_frames(runtime, count);
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- result = snd_pcm_lib_write(snd->substream, (void __user *)buf, frames);
+ result = snd_pcm_kernel_write(snd->substream, buf, frames);
if (result != frames) {
ERROR(card, "Playback error: %d\n", (int)result);
- set_fs(old_fs);
goto try_again;
}
- set_fs(old_fs);
return 0;
}
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 0dde49c35dd2..1adae9eab831 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1461,6 +1461,9 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
t2 |= PORT_WKOC_E | PORT_WKCONN_E;
t2 &= ~PORT_WKDISC_E;
}
+ if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) &&
+ (hcd->speed < HCD_USB3))
+ t2 &= ~PORT_WAKE_BITS;
} else
t2 &= ~PORT_WAKE_BITS;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 783e6687bf4a..53882e2babbb 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -54,6 +54,11 @@
#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0
+#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
+#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
+#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
+#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc
+
static const char hcd_name[] = "xhci_hcd";
static struct hc_driver __read_mostly xhci_pci_hc_driver;
@@ -135,6 +140,13 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->vendor == PCI_VENDOR_ID_AMD)
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ if ((pdev->vendor == PCI_VENDOR_ID_AMD) &&
+ ((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) ||
+ (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) ||
+ (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) ||
+ (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1)))
+ xhci->quirks |= XHCI_U2_DISABLE_WAKE;
+
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
xhci->quirks |= XHCI_LPM_SUPPORT;
xhci->quirks |= XHCI_INTEL_HOST;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 650a2d9d4aec..3c6da1f93c84 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1819,6 +1819,7 @@ struct xhci_hcd {
/* For controller with a broken Port Disable implementation */
#define XHCI_BROKEN_PORT_PED (1 << 25)
#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
+#define XHCI_U2_DISABLE_WAKE (1 << 27)
unsigned int num_active_eps;
unsigned int limit_active_eps;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 3bf61acfc26b..ebe51f11105d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1877,6 +1877,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&four_g_w100_blacklist
},
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
+ { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 4ac137d070fb..ebc0beea69d6 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -158,6 +158,7 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */
+ {DEVICE_SWI(0x1199, 0x9063)}, /* Sierra Wireless EM7305 */
{DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */
{DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */
{DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */
diff --git a/drivers/usb/typec/typec_wcove.c b/drivers/usb/typec/typec_wcove.c
index c2ce25289027..e9c4e784a9cb 100644
--- a/drivers/usb/typec/typec_wcove.c
+++ b/drivers/usb/typec/typec_wcove.c
@@ -303,7 +303,7 @@ static int wcove_typec_probe(struct platform_device *pdev)
wcove->dev = &pdev->dev;
wcove->regmap = pmic->regmap;
- ret = regmap_irq_get_virq(pmic->irq_chip_data_level2,
+ ret = regmap_irq_get_virq(pmic->irq_chip_data_chgr,
platform_get_irq(pdev, 0));
if (ret < 0)
return ret;
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 510e559c060e..e7315bf14d60 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -18,7 +18,7 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
-#include <linux/i2c/adp8860.h>
+#include <linux/platform_data/adp8860.h>
#define ADP8860_EXT_FEATURES
#define ADP8860_USE_LEDS
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 21acac90fd77..058d1def2d1f 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -18,7 +18,7 @@
#include <linux/workqueue.h>
#include <linux/slab.h>
-#include <linux/i2c/adp8870.h>
+#include <linux/platform_data/adp8870.h>
#define ADP8870_EXT_FEATURES
#define ADP8870_USE_LEDS
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 288318ad21dd..8049e7656daa 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -134,7 +134,7 @@ static ssize_t bl_power_store(struct device *dev, struct device_attribute *attr,
{
int rc;
struct backlight_device *bd = to_backlight_device(dev);
- unsigned long power;
+ unsigned long power, old_power;
rc = kstrtoul(buf, 0, &power);
if (rc)
@@ -145,10 +145,16 @@ static ssize_t bl_power_store(struct device *dev, struct device_attribute *attr,
if (bd->ops) {
pr_debug("set power to %lu\n", power);
if (bd->props.power != power) {
+ old_power = bd->props.power;
bd->props.power = power;
- backlight_update_status(bd);
+ rc = backlight_update_status(bd);
+ if (rc)
+ bd->props.power = old_power;
+ else
+ rc = count;
+ } else {
+ rc = count;
}
- rc = count;
}
mutex_unlock(&bd->ops_lock);
@@ -176,8 +182,7 @@ int backlight_device_set_brightness(struct backlight_device *bd,
else {
pr_debug("set brightness to %lu\n", brightness);
bd->props.brightness = brightness;
- backlight_update_status(bd);
- rc = 0;
+ rc = backlight_update_status(bd);
}
}
mutex_unlock(&bd->ops_lock);
diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c
index 35df2c1a8a63..8de42f617d16 100644
--- a/drivers/video/fbdev/au1100fb.c
+++ b/drivers/video/fbdev/au1100fb.c
@@ -532,10 +532,6 @@ failed:
clk_disable_unprepare(fbdev->lcdclk);
clk_put(fbdev->lcdclk);
}
- if (fbdev->fb_mem) {
- dma_free_noncoherent(&dev->dev, fbdev->fb_len, fbdev->fb_mem,
- fbdev->fb_phys);
- }
if (fbdev->info.cmap.len != 0) {
fb_dealloc_cmap(&fbdev->info.cmap);
}
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
index 6c2b2ca4a909..5f04b4096c42 100644
--- a/drivers/video/fbdev/au1200fb.c
+++ b/drivers/video/fbdev/au1200fb.c
@@ -1694,9 +1694,10 @@ static int au1200fb_drv_probe(struct platform_device *dev)
/* Allocate the framebuffer to the maximum screen size */
fbdev->fb_len = (win->w[plane].xres * win->w[plane].yres * bpp) / 8;
- fbdev->fb_mem = dmam_alloc_noncoherent(&dev->dev,
+ fbdev->fb_mem = dmam_alloc_attrs(&dev->dev,
PAGE_ALIGN(fbdev->fb_len),
- &fbdev->fb_phys, GFP_KERNEL);
+ &fbdev->fb_phys, GFP_KERNEL,
+ DMA_ATTR_NON_CONSISTENT);
if (!fbdev->fb_mem) {
print_err("fail to allocate frambuffer (size: %dK))",
fbdev->fb_len / 1024);
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 069fe7960df1..5324358f110f 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1331,22 +1331,13 @@ static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
static int fb_get_fscreeninfo(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
- mm_segment_t old_fs;
struct fb_fix_screeninfo fix;
- struct fb_fix_screeninfo32 __user *fix32;
- int err;
-
- fix32 = compat_ptr(arg);
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- err = do_fb_ioctl(info, cmd, (unsigned long) &fix);
- set_fs(old_fs);
- if (!err)
- err = do_fscreeninfo_to_user(&fix, fix32);
-
- return err;
+ if (!lock_fb_info(info))
+ return -ENODEV;
+ fix = info->fix;
+ unlock_fb_info(info);
+ return do_fscreeninfo_to_user(&fix, compat_ptr(arg));
}
static long fb_compat_ioctl(struct file *file, unsigned int cmd,
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index b827a8113e26..ff01bed7112f 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -408,7 +408,7 @@ static void efifb_fixup_resources(struct pci_dev *dev)
if (!base)
return;
- for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
+ for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
struct resource *res = &dev->resource[i];
if (!(res->flags & IORESOURCE_MEM))
diff --git a/drivers/video/fbdev/hpfb.c b/drivers/video/fbdev/hpfb.c
index 16f16f5e1a4b..9230db9ea94b 100644
--- a/drivers/video/fbdev/hpfb.c
+++ b/drivers/video/fbdev/hpfb.c
@@ -377,7 +377,6 @@ static struct dio_driver hpfb_driver = {
int __init hpfb_init(void)
{
unsigned int sid;
- mm_segment_t fs;
unsigned char i;
int err;
@@ -402,10 +401,7 @@ int __init hpfb_init(void)
if (err)
return err;
- fs = get_fs();
- set_fs(KERNEL_DS);
- err = get_user(i, (unsigned char *)INTFBVADDR + DIO_IDOFF);
- set_fs(fs);
+ err = probe_kernel_read(&i, (unsigned char *)INTFBVADDR + DIO_IDOFF, 1);
if (!err && (i == DIO_ID_FBUFFER) && topcat_sid_ok(sid = DIO_SECID(INTFBVADDR))) {
if (!request_mem_region(INTFBPADDR, DIO_DEVSIZE, "Internal Topcat"))
diff --git a/drivers/video/fbdev/jz4740_fb.c b/drivers/video/fbdev/jz4740_fb.c
index 87790e9644d0..b57df83fdbd3 100644
--- a/drivers/video/fbdev/jz4740_fb.c
+++ b/drivers/video/fbdev/jz4740_fb.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -27,7 +28,6 @@
#include <linux/dma-mapping.h>
#include <asm/mach-jz4740/jz4740_fb.h>
-#include <asm/mach-jz4740/gpio.h>
#define JZ_REG_LCD_CFG 0x00
#define JZ_REG_LCD_VSYNC 0x04
@@ -146,93 +146,6 @@ static const struct fb_fix_screeninfo jzfb_fix = {
.accel = FB_ACCEL_NONE,
};
-static const struct jz_gpio_bulk_request jz_lcd_ctrl_pins[] = {
- JZ_GPIO_BULK_PIN(LCD_PCLK),
- JZ_GPIO_BULK_PIN(LCD_HSYNC),
- JZ_GPIO_BULK_PIN(LCD_VSYNC),
- JZ_GPIO_BULK_PIN(LCD_DE),
- JZ_GPIO_BULK_PIN(LCD_PS),
- JZ_GPIO_BULK_PIN(LCD_REV),
- JZ_GPIO_BULK_PIN(LCD_CLS),
- JZ_GPIO_BULK_PIN(LCD_SPL),
-};
-
-static const struct jz_gpio_bulk_request jz_lcd_data_pins[] = {
- JZ_GPIO_BULK_PIN(LCD_DATA0),
- JZ_GPIO_BULK_PIN(LCD_DATA1),
- JZ_GPIO_BULK_PIN(LCD_DATA2),
- JZ_GPIO_BULK_PIN(LCD_DATA3),
- JZ_GPIO_BULK_PIN(LCD_DATA4),
- JZ_GPIO_BULK_PIN(LCD_DATA5),
- JZ_GPIO_BULK_PIN(LCD_DATA6),
- JZ_GPIO_BULK_PIN(LCD_DATA7),
- JZ_GPIO_BULK_PIN(LCD_DATA8),
- JZ_GPIO_BULK_PIN(LCD_DATA9),
- JZ_GPIO_BULK_PIN(LCD_DATA10),
- JZ_GPIO_BULK_PIN(LCD_DATA11),
- JZ_GPIO_BULK_PIN(LCD_DATA12),
- JZ_GPIO_BULK_PIN(LCD_DATA13),
- JZ_GPIO_BULK_PIN(LCD_DATA14),
- JZ_GPIO_BULK_PIN(LCD_DATA15),
- JZ_GPIO_BULK_PIN(LCD_DATA16),
- JZ_GPIO_BULK_PIN(LCD_DATA17),
-};
-
-static unsigned int jzfb_num_ctrl_pins(struct jzfb *jzfb)
-{
- unsigned int num;
-
- switch (jzfb->pdata->lcd_type) {
- case JZ_LCD_TYPE_GENERIC_16_BIT:
- num = 4;
- break;
- case JZ_LCD_TYPE_GENERIC_18_BIT:
- num = 4;
- break;
- case JZ_LCD_TYPE_8BIT_SERIAL:
- num = 3;
- break;
- case JZ_LCD_TYPE_SPECIAL_TFT_1:
- case JZ_LCD_TYPE_SPECIAL_TFT_2:
- case JZ_LCD_TYPE_SPECIAL_TFT_3:
- num = 8;
- break;
- default:
- num = 0;
- break;
- }
- return num;
-}
-
-static unsigned int jzfb_num_data_pins(struct jzfb *jzfb)
-{
- unsigned int num;
-
- switch (jzfb->pdata->lcd_type) {
- case JZ_LCD_TYPE_GENERIC_16_BIT:
- num = 16;
- break;
- case JZ_LCD_TYPE_GENERIC_18_BIT:
- num = 18;
- break;
- case JZ_LCD_TYPE_8BIT_SERIAL:
- num = 8;
- break;
- case JZ_LCD_TYPE_SPECIAL_TFT_1:
- case JZ_LCD_TYPE_SPECIAL_TFT_2:
- case JZ_LCD_TYPE_SPECIAL_TFT_3:
- if (jzfb->pdata->bpp == 18)
- num = 18;
- else
- num = 16;
- break;
- default:
- num = 0;
- break;
- }
- return num;
-}
-
/* Based on CNVT_TOHW macro from skeletonfb.c */
static inline uint32_t jzfb_convert_color_to_hw(unsigned val,
struct fb_bitfield *bf)
@@ -487,8 +400,7 @@ static void jzfb_enable(struct jzfb *jzfb)
clk_prepare_enable(jzfb->ldclk);
- jz_gpio_bulk_resume(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
- jz_gpio_bulk_resume(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
+ pinctrl_pm_select_default_state(&jzfb->pdev->dev);
writel(0, jzfb->base + JZ_REG_LCD_STATE);
@@ -511,8 +423,7 @@ static void jzfb_disable(struct jzfb *jzfb)
ctrl = readl(jzfb->base + JZ_REG_LCD_STATE);
} while (!(ctrl & JZ_LCD_STATE_DISABLED));
- jz_gpio_bulk_suspend(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
- jz_gpio_bulk_suspend(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
+ pinctrl_pm_select_sleep_state(&jzfb->pdev->dev);
clk_disable_unprepare(jzfb->ldclk);
}
@@ -701,9 +612,6 @@ static int jzfb_probe(struct platform_device *pdev)
fb->mode = NULL;
jzfb_set_par(fb);
- jz_gpio_bulk_request(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
- jz_gpio_bulk_request(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
-
ret = register_framebuffer(fb);
if (ret) {
dev_err(&pdev->dev, "Failed to register framebuffer: %d\n", ret);
@@ -715,9 +623,6 @@ static int jzfb_probe(struct platform_device *pdev)
return 0;
err_free_devmem:
- jz_gpio_bulk_free(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
- jz_gpio_bulk_free(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
-
fb_dealloc_cmap(&fb->cmap);
jzfb_free_devmem(jzfb);
err_framebuffer_release:
@@ -731,9 +636,6 @@ static int jzfb_remove(struct platform_device *pdev)
jzfb_blank(FB_BLANK_POWERDOWN, jzfb->fb);
- jz_gpio_bulk_free(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb));
- jz_gpio_bulk_free(jz_lcd_data_pins, jzfb_num_data_pins(jzfb));
-
fb_dealloc_cmap(&jzfb->fb->cmap);
jzfb_free_devmem(jzfb);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 8b9049dac094..e6e31a16f68f 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1688,7 +1688,7 @@ config MEN_A21_WDT
config WATCHDOG_RTAS
tristate "RTAS watchdog"
- depends on PPC_RTAS || (PPC64 && COMPILE_TEST)
+ depends on PPC_RTAS
help
This driver adds watchdog support for the RTAS watchdog.
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 2e567d8433b3..b241bfa529ce 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1303,10 +1303,9 @@ void rebind_evtchn_irq(int evtchn, int irq)
}
/* Rebind an evtchn so that it gets delivered to a specific cpu */
-static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
+int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu)
{
struct evtchn_bind_vcpu bind_vcpu;
- int evtchn = evtchn_from_irq(irq);
int masked;
if (!VALID_EVTCHN(evtchn))
@@ -1338,12 +1337,13 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
return 0;
}
+EXPORT_SYMBOL_GPL(xen_rebind_evtchn_to_cpu);
static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
bool force)
{
unsigned tcpu = cpumask_first_and(dest, cpu_online_mask);
- int ret = rebind_irq_to_cpu(data->irq, tcpu);
+ int ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu);
if (!ret)
irq_data_update_effective_affinity(data, cpumask_of(tcpu));
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 10f1ef582659..9729a64ea1a9 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -421,6 +421,36 @@ static void evtchn_unbind_from_user(struct per_user_data *u,
del_evtchn(u, evtchn);
}
+static DEFINE_PER_CPU(int, bind_last_selected_cpu);
+
+static void evtchn_bind_interdom_next_vcpu(int evtchn)
+{
+ unsigned int selected_cpu, irq;
+ struct irq_desc *desc;
+ unsigned long flags;
+
+ irq = irq_from_evtchn(evtchn);
+ desc = irq_to_desc(irq);
+
+ if (!desc)
+ return;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ selected_cpu = this_cpu_read(bind_last_selected_cpu);
+ selected_cpu = cpumask_next_and(selected_cpu,
+ desc->irq_common_data.affinity, cpu_online_mask);
+
+ if (unlikely(selected_cpu >= nr_cpu_ids))
+ selected_cpu = cpumask_first_and(desc->irq_common_data.affinity,
+ cpu_online_mask);
+
+ this_cpu_write(bind_last_selected_cpu, selected_cpu);
+
+ /* unmask expects irqs to be disabled */
+ xen_rebind_evtchn_to_cpu(evtchn, selected_cpu);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+}
+
static long evtchn_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
@@ -478,8 +508,10 @@ static long evtchn_ioctl(struct file *file,
break;
rc = evtchn_bind_to_user(u, bind_interdomain.local_port);
- if (rc == 0)
+ if (rc == 0) {
rc = bind_interdomain.local_port;
+ evtchn_bind_interdom_next_vcpu(rc);
+ }
break;
}
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 9e35032351a0..c425d03d37d2 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -278,8 +278,16 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path,
err = xenbus_transaction_start(&xbt);
if (err)
return;
- if (xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key) < 0) {
- pr_err("Unable to read sysrq code in control/sysrq\n");
+ err = xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key);
+ if (err < 0) {
+ /*
+ * The Xenstore watch fires directly after registering it and
+ * after a suspend/resume cycle. So ENOENT is no error but
+ * might happen in those cases.
+ */
+ if (err != -ENOENT)
+ pr_err("Error %d reading sysrq code in control/sysrq\n",
+ err);
xenbus_transaction_end(xbt, 1);
return;
}
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 8dab0d3dc172..82fc54f8eb77 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -67,6 +67,8 @@ static unsigned long dma_alloc_coherent_mask(struct device *dev,
}
#endif
+#define XEN_SWIOTLB_ERROR_CODE (~(dma_addr_t)0x0)
+
static char *xen_io_tlb_start, *xen_io_tlb_end;
static unsigned long xen_io_tlb_nslabs;
/*
@@ -295,7 +297,8 @@ error:
free_pages((unsigned long)xen_io_tlb_start, order);
return rc;
}
-void *
+
+static void *
xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
unsigned long attrs)
@@ -346,9 +349,8 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
memset(ret, 0, size);
return ret;
}
-EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
-void
+static void
xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
dma_addr_t dev_addr, unsigned long attrs)
{
@@ -369,8 +371,6 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
}
-EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
-
/*
* Map a single buffer of the indicated size for DMA in streaming mode. The
@@ -379,7 +379,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
* Once the device is given the dma address, the device owns this memory until
* either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
*/
-dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
+static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
@@ -412,7 +412,7 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
attrs);
if (map == SWIOTLB_MAP_ERROR)
- return DMA_ERROR_CODE;
+ return XEN_SWIOTLB_ERROR_CODE;
dev_addr = xen_phys_to_bus(map);
xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
@@ -427,9 +427,8 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
- return DMA_ERROR_CODE;
+ return XEN_SWIOTLB_ERROR_CODE;
}
-EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
/*
* Unmap a single streaming mode DMA translation. The dma_addr and size must
@@ -467,13 +466,12 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
dma_mark_clean(phys_to_virt(paddr), size);
}
-void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
+static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
}
-EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
/*
* Make physical memory consistent for a single streaming mode DMA translation
@@ -516,7 +514,6 @@ xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
{
xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
}
-EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu);
void
xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
@@ -524,7 +521,25 @@ xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
{
xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
}
-EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
+
+/*
+ * Unmap a set of streaming mode DMA translations. Again, cpu read rules
+ * concerning calls here are the same as for swiotlb_unmap_page() above.
+ */
+static void
+xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct scatterlist *sg;
+ int i;
+
+ BUG_ON(dir == DMA_NONE);
+
+ for_each_sg(sgl, sg, nelems, i)
+ xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
+
+}
/*
* Map a set of buffers described by scatterlist in streaming mode for DMA.
@@ -542,7 +557,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
* Device ownership issues as mentioned above for xen_swiotlb_map_page are the
* same here.
*/
-int
+static int
xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
unsigned long attrs)
@@ -599,27 +614,6 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
}
return nelems;
}
-EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);
-
-/*
- * Unmap a set of streaming mode DMA translations. Again, cpu read rules
- * concerning calls here are the same as for swiotlb_unmap_page() above.
- */
-void
-xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(dir == DMA_NONE);
-
- for_each_sg(sgl, sg, nelems, i)
- xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
-
-}
-EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
/*
* Make physical memory consistent for a set of streaming mode DMA translations
@@ -641,21 +635,19 @@ xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
sg_dma_len(sg), dir, target);
}
-void
+static void
xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
}
-EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu);
-void
+static void
xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
}
-EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
/*
* Return whether the given device DMA address mask can be supported
@@ -663,31 +655,18 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
* during bus mastering, then you would pass 0x00ffffff as the mask to
* this function.
*/
-int
+static int
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
}
-EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);
-
-int
-xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
-{
- if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask))
- return -EIO;
-
- *dev->dma_mask = dma_mask;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
/*
* Create userspace mapping for the DMA-coherent memory.
* This function should be called with the pages from the current domain only,
* passing pages mapped from other domains would lead to memory corruption.
*/
-int
+static int
xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
@@ -699,13 +678,12 @@ xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
#endif
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
}
-EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap);
/*
* This function should be called with the pages from the current domain only,
* passing pages mapped from other domains would lead to memory corruption.
*/
-int
+static int
xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t handle, size_t size,
unsigned long attrs)
@@ -727,4 +705,25 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
#endif
return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size);
}
-EXPORT_SYMBOL_GPL(xen_swiotlb_get_sgtable);
+
+static int xen_swiotlb_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == XEN_SWIOTLB_ERROR_CODE;
+}
+
+const struct dma_map_ops xen_swiotlb_dma_ops = {
+ .alloc = xen_swiotlb_alloc_coherent,
+ .free = xen_swiotlb_free_coherent,
+ .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = xen_swiotlb_sync_single_for_device,
+ .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
+ .map_sg = xen_swiotlb_map_sg_attrs,
+ .unmap_sg = xen_swiotlb_unmap_sg_attrs,
+ .map_page = xen_swiotlb_map_page,
+ .unmap_page = xen_swiotlb_unmap_page,
+ .dma_supported = xen_swiotlb_dma_supported,
+ .mmap = xen_swiotlb_dma_mmap,
+ .get_sgtable = xen_swiotlb_get_sgtable,
+ .mapping_error = xen_swiotlb_mapping_error,
+};
diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
index 84106f9c456c..9d314bba7c4e 100644
--- a/drivers/xen/sys-hypervisor.c
+++ b/drivers/xen/sys-hypervisor.c
@@ -50,6 +50,35 @@ static int __init xen_sysfs_type_init(void)
return sysfs_create_file(hypervisor_kobj, &type_attr.attr);
}
+static ssize_t guest_type_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ const char *type;
+
+ switch (xen_domain_type) {
+ case XEN_NATIVE:
+ /* ARM only. */
+ type = "Xen";
+ break;
+ case XEN_PV_DOMAIN:
+ type = "PV";
+ break;
+ case XEN_HVM_DOMAIN:
+ type = xen_pvh_domain() ? "PVH" : "HVM";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sprintf(buffer, "%s\n", type);
+}
+
+HYPERVISOR_ATTR_RO(guest_type);
+
+static int __init xen_sysfs_guest_type_init(void)
+{
+ return sysfs_create_file(hypervisor_kobj, &guest_type_attr.attr);
+}
+
/* xen version attributes */
static ssize_t major_show(struct hyp_sysfs_attr *attr, char *buffer)
{
@@ -327,12 +356,40 @@ static ssize_t features_show(struct hyp_sysfs_attr *attr, char *buffer)
HYPERVISOR_ATTR_RO(features);
+static ssize_t buildid_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ ssize_t ret;
+ struct xen_build_id *buildid;
+
+ ret = HYPERVISOR_xen_version(XENVER_build_id, NULL);
+ if (ret < 0) {
+ if (ret == -EPERM)
+ ret = sprintf(buffer, "<denied>");
+ return ret;
+ }
+
+ buildid = kmalloc(sizeof(*buildid) + ret, GFP_KERNEL);
+ if (!buildid)
+ return -ENOMEM;
+
+ buildid->len = ret;
+ ret = HYPERVISOR_xen_version(XENVER_build_id, buildid);
+ if (ret > 0)
+ ret = sprintf(buffer, "%s", buildid->buf);
+ kfree(buildid);
+
+ return ret;
+}
+
+HYPERVISOR_ATTR_RO(buildid);
+
static struct attribute *xen_properties_attrs[] = {
&capabilities_attr.attr,
&changeset_attr.attr,
&virtual_start_attr.attr,
&pagesize_attr.attr,
&features_attr.attr,
+ &buildid_attr.attr,
NULL
};
@@ -471,6 +528,9 @@ static int __init hyper_sysfs_init(void)
ret = xen_sysfs_type_init();
if (ret)
goto out;
+ ret = xen_sysfs_guest_type_init();
+ if (ret)
+ goto guest_type_out;
ret = xen_sysfs_version_init();
if (ret)
goto version_out;
@@ -502,6 +562,8 @@ uuid_out:
comp_out:
sysfs_remove_group(hypervisor_kobj, &version_group);
version_out:
+ sysfs_remove_file(hypervisor_kobj, &guest_type_attr.attr);
+guest_type_out:
sysfs_remove_file(hypervisor_kobj, &type_attr.attr);
out:
return ret;
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
index 856ada5d39c9..5b081a01779d 100644
--- a/drivers/xen/xenbus/xenbus_comms.c
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -299,17 +299,7 @@ static int process_msg(void)
mutex_lock(&xb_write_mutex);
list_for_each_entry(req, &xs_reply_list, list) {
if (req->msg.req_id == state.msg.req_id) {
- if (req->state == xb_req_state_wait_reply) {
- req->msg.type = state.msg.type;
- req->msg.len = state.msg.len;
- req->body = state.body;
- req->state = xb_req_state_got_reply;
- list_del(&req->list);
- req->cb(req);
- } else {
- list_del(&req->list);
- kfree(req);
- }
+ list_del(&req->list);
err = 0;
break;
}
@@ -317,6 +307,15 @@ static int process_msg(void)
mutex_unlock(&xb_write_mutex);
if (err)
goto out;
+
+ if (req->state == xb_req_state_wait_reply) {
+ req->msg.type = state.msg.type;
+ req->msg.len = state.msg.len;
+ req->body = state.body;
+ req->state = xb_req_state_got_reply;
+ req->cb(req);
+ } else
+ kfree(req);
}
mutex_unlock(&xs_response_mutex);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index a7df151f8aba..9941dc8342df 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -632,7 +632,7 @@ int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
struct block_device *bdev = I_BDEV(bd_inode);
int error;
- error = filemap_write_and_wait_range(filp->f_mapping, start, end);
+ error = file_write_and_wait_range(filp, start, end);
if (error)
return error;
@@ -1751,6 +1751,7 @@ static int blkdev_open(struct inode * inode, struct file * filp)
return -ENOMEM;
filp->f_mapping = bdev->bd_inode->i_mapping;
+ filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
return blkdev_get(bdev, filp->f_mode, filp);
}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 5abcbdc743fa..086dcbadce09 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1256,9 +1256,9 @@ void clean_tree_block(struct btrfs_fs_info *fs_info,
btrfs_assert_tree_locked(buf);
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
- __percpu_counter_add(&fs_info->dirty_metadata_bytes,
- -buf->len,
- fs_info->dirty_metadata_batch);
+ percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
+ -buf->len,
+ fs_info->dirty_metadata_batch);
/* ugh, clear_extent_buffer_dirty needs to lock the page */
btrfs_set_lock_blocking(buf);
clear_extent_buffer_dirty(buf);
@@ -4047,9 +4047,9 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
buf->start, transid, fs_info->generation);
was_dirty = set_extent_buffer_dirty(buf);
if (!was_dirty)
- __percpu_counter_add(&fs_info->dirty_metadata_bytes,
- buf->len,
- fs_info->dirty_metadata_batch);
+ percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
+ buf->len,
+ fs_info->dirty_metadata_batch);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) {
btrfs_print_leaf(fs_info, buf);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 7a18b5762ac9..556484cf5d93 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3577,9 +3577,9 @@ lock_extent_buffer_for_io(struct extent_buffer *eb,
set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
spin_unlock(&eb->refs_lock);
btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
- __percpu_counter_add(&fs_info->dirty_metadata_bytes,
- -eb->len,
- fs_info->dirty_metadata_batch);
+ percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
+ -eb->len,
+ fs_info->dirty_metadata_batch);
ret = 1;
} else {
spin_unlock(&eb->refs_lock);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 24338702ea5b..a85d7903fbdd 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2032,7 +2032,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
struct btrfs_log_ctx ctx;
- int ret = 0;
+ int ret = 0, err;
bool full_sync = 0;
u64 len;
@@ -2051,7 +2051,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
*/
ret = start_ordered_ops(inode, start, end);
if (ret)
- return ret;
+ goto out;
inode_lock(inode);
atomic_inc(&root->log_batch);
@@ -2156,10 +2156,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* An ordered extent might have started before and completed
* already with io errors, in which case the inode was not
* updated and we end up here. So check the inode's mapping
- * flags for any errors that might have happened while doing
- * writeback of file data.
+ * for any errors that might have happened since we last
+ * checked called fsync.
*/
- ret = filemap_check_errors(inode->i_mapping);
+ ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
inode_unlock(inode);
goto out;
}
@@ -2248,6 +2248,9 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
ret = btrfs_end_transaction(trans);
}
out:
+ err = file_check_and_advance_wb_err(file);
+ if (!ret)
+ ret = err;
return ret > 0 ? -EIO : ret;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 8d050314591c..06dea7c89bbd 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1763,8 +1763,8 @@ static void btrfs_set_bit_hook(void *private_data,
if (btrfs_is_testing(fs_info))
return;
- __percpu_counter_add(&fs_info->delalloc_bytes, len,
- fs_info->delalloc_batch);
+ percpu_counter_add_batch(&fs_info->delalloc_bytes, len,
+ fs_info->delalloc_batch);
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->delalloc_bytes += len;
if (*bits & EXTENT_DEFRAG)
@@ -1838,8 +1838,8 @@ static void btrfs_clear_bit_hook(void *private_data,
&inode->vfs_inode,
state->start, len);
- __percpu_counter_add(&fs_info->delalloc_bytes, -len,
- fs_info->delalloc_batch);
+ percpu_counter_add_batch(&fs_info->delalloc_bytes, -len,
+ fs_info->delalloc_batch);
spin_lock(&inode->lock);
inode->delalloc_bytes -= len;
if (do_list && inode->delalloc_bytes == 0 &&
diff --git a/fs/buffer.c b/fs/buffer.c
index 5c2cba8d2387..5234b15377c2 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -178,7 +178,7 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
set_buffer_uptodate(bh);
} else {
buffer_io_error(bh, ", lost sync page write");
- set_buffer_write_io_error(bh);
+ mark_buffer_write_io_error(bh);
clear_buffer_uptodate(bh);
}
unlock_buffer(bh);
@@ -352,8 +352,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
set_buffer_uptodate(bh);
} else {
buffer_io_error(bh, ", lost async page write");
- mapping_set_error(page->mapping, -EIO);
- set_buffer_write_io_error(bh);
+ mark_buffer_write_io_error(bh);
clear_buffer_uptodate(bh);
SetPageError(page);
}
@@ -481,8 +480,6 @@ static void __remove_assoc_queue(struct buffer_head *bh)
{
list_del_init(&bh->b_assoc_buffers);
WARN_ON(!bh->b_assoc_map);
- if (buffer_write_io_error(bh))
- set_bit(AS_EIO, &bh->b_assoc_map->flags);
bh->b_assoc_map = NULL;
}
@@ -1181,6 +1178,17 @@ void mark_buffer_dirty(struct buffer_head *bh)
}
EXPORT_SYMBOL(mark_buffer_dirty);
+void mark_buffer_write_io_error(struct buffer_head *bh)
+{
+ set_buffer_write_io_error(bh);
+ /* FIXME: do we need to set this in both places? */
+ if (bh->b_page && bh->b_page->mapping)
+ mapping_set_error(bh->b_page->mapping, -EIO);
+ if (bh->b_assoc_map)
+ mapping_set_error(bh->b_assoc_map, -EIO);
+}
+EXPORT_SYMBOL(mark_buffer_write_io_error);
+
/*
* Decrement a buffer_head's reference count. If all buffers against a page
* have zero reference count, are clean and unlocked, and if the page is clean
@@ -3282,8 +3290,6 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
bh = head;
do {
- if (buffer_write_io_error(bh) && page->mapping)
- mapping_set_error(page->mapping, -EIO);
if (buffer_busy(bh))
goto failed;
bh = bh->b_this_page;
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 034f00f21390..afeefe79c25e 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -146,6 +146,15 @@ config CIFS_DEBUG2
option can be turned off unless you are debugging
cifs problems. If unsure, say N.
+config CIFS_DEBUG_DUMP_KEYS
+ bool "Dump encryption keys for offline decryption (Unsafe)"
+ depends on CIFS_DEBUG && CIFS_SMB2
+ help
+ Enabling this will dump the encryption and decryption keys
+ used to communicate on an encrypted share connection on the
+ console. This allows Wireshark to decrypt and dissect
+ encrypted network captures. Enable this carefully.
+
config CIFS_DFS_UPCALL
bool "DFS feature support"
depends on CIFS && KEYS
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index a0b3e7d1be48..e0445e2075b2 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -79,6 +79,10 @@ convert_sfu_char(const __u16 src_char, char *target)
static bool
convert_sfm_char(const __u16 src_char, char *target)
{
+ if (src_char >= 0xF001 && src_char <= 0xF01F) {
+ *target = src_char - 0xF000;
+ return true;
+ }
switch (src_char) {
case SFM_COLON:
*target = ':';
@@ -417,6 +421,10 @@ static __le16 convert_to_sfm_char(char src_char, bool end_of_string)
{
__le16 dest_char;
+ if (src_char >= 0x01 && src_char <= 0x1F) {
+ dest_char = cpu_to_le16(src_char + 0xF000);
+ return dest_char;
+ }
switch (src_char) {
case ':':
dest_char = cpu_to_le16(SFM_COLON);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index fcef70602b27..bc09df6b473a 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2234,14 +2234,16 @@ cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
set_page_writeback(page);
retry_write:
rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
- if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
- goto retry_write;
- else if (rc == -EAGAIN)
+ if (rc == -EAGAIN) {
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ goto retry_write;
redirty_page_for_writepage(wbc, page);
- else if (rc != 0)
+ } else if (rc != 0) {
SetPageError(page);
- else
+ mapping_set_error(page->mapping, rc);
+ } else {
SetPageUptodate(page);
+ }
end_page_writeback(page);
put_page(page);
free_xid(xid);
@@ -2810,12 +2812,12 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from)
struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
ssize_t rc;
+ inode_lock(inode);
/*
* We need to hold the sem to be sure nobody modifies lock list
* with a brlock that prevents writing.
*/
down_read(&cinode->lock_sem);
- inode_lock(inode);
rc = generic_write_checks(iocb, from);
if (rc <= 0)
@@ -2828,11 +2830,11 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from)
else
rc = -EACCES;
out:
+ up_read(&cinode->lock_sem);
inode_unlock(inode);
if (rc > 0)
rc = generic_write_sync(iocb, rc);
- up_read(&cinode->lock_sem);
return rc;
}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 7e48561abd29..ccbb397debbc 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -1288,6 +1288,108 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
return rc;
}
+#ifdef CONFIG_CIFS_ACL
+static struct cifs_ntsd *
+get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
+ const struct cifs_fid *cifsfid, u32 *pacllen)
+{
+ struct cifs_ntsd *pntsd = NULL;
+ unsigned int xid;
+ int rc = -EOPNOTSUPP;
+ struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+
+ if (IS_ERR(tlink))
+ return ERR_CAST(tlink);
+
+ xid = get_xid();
+ cifs_dbg(FYI, "trying to get acl\n");
+
+ rc = SMB2_query_acl(xid, tlink_tcon(tlink), cifsfid->persistent_fid,
+ cifsfid->volatile_fid, (void **)&pntsd, pacllen);
+ free_xid(xid);
+
+ cifs_put_tlink(tlink);
+
+ cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
+ if (rc)
+ return ERR_PTR(rc);
+ return pntsd;
+
+}
+
+static struct cifs_ntsd *
+get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
+ const char *path, u32 *pacllen)
+{
+ struct cifs_ntsd *pntsd = NULL;
+ u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+ unsigned int xid;
+ int rc;
+ struct cifs_tcon *tcon;
+ struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+ struct cifs_fid fid;
+ struct cifs_open_parms oparms;
+ __le16 *utf16_path;
+
+ cifs_dbg(FYI, "get smb3 acl for path %s\n", path);
+ if (IS_ERR(tlink))
+ return ERR_CAST(tlink);
+
+ tcon = tlink_tcon(tlink);
+ xid = get_xid();
+
+ if (backup_cred(cifs_sb))
+ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+ else
+ oparms.create_options = 0;
+
+ utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
+ if (!utf16_path)
+ return ERR_PTR(-ENOMEM);
+
+ oparms.tcon = tcon;
+ oparms.desired_access = READ_CONTROL;
+ oparms.disposition = FILE_OPEN;
+ oparms.fid = &fid;
+ oparms.reconnect = false;
+
+ rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
+ kfree(utf16_path);
+ if (!rc) {
+ rc = SMB2_query_acl(xid, tlink_tcon(tlink), fid.persistent_fid,
+ fid.volatile_fid, (void **)&pntsd, pacllen);
+ SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+ }
+
+ cifs_put_tlink(tlink);
+ free_xid(xid);
+
+ cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
+ if (rc)
+ return ERR_PTR(rc);
+ return pntsd;
+}
+
+/* Retrieve an ACL from the server */
+static struct cifs_ntsd *
+get_smb2_acl(struct cifs_sb_info *cifs_sb,
+ struct inode *inode, const char *path,
+ u32 *pacllen)
+{
+ struct cifs_ntsd *pntsd = NULL;
+ struct cifsFileInfo *open_file = NULL;
+
+ if (inode)
+ open_file = find_readable_file(CIFS_I(inode), true);
+ if (!open_file)
+ return get_smb2_acl_by_path(cifs_sb, path, pacllen);
+
+ pntsd = get_smb2_acl_by_fid(cifs_sb, &open_file->fid, pacllen);
+ cifsFileInfo_put(open_file);
+ return pntsd;
+}
+#endif
+
static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
loff_t offset, loff_t len, bool keep_size)
{
@@ -2393,6 +2495,11 @@ struct smb_version_operations smb20_operations = {
.dir_needs_close = smb2_dir_needs_close,
.get_dfs_refer = smb2_get_dfs_refer,
.select_sectype = smb2_select_sectype,
+#ifdef CONFIG_CIFS_ACL
+ .get_acl = get_smb2_acl,
+ .get_acl_by_fid = get_smb2_acl_by_fid,
+/* .set_acl = set_smb3_acl, */
+#endif /* CIFS_ACL */
};
struct smb_version_operations smb21_operations = {
@@ -2477,6 +2584,11 @@ struct smb_version_operations smb21_operations = {
.enum_snapshots = smb3_enum_snapshots,
.get_dfs_refer = smb2_get_dfs_refer,
.select_sectype = smb2_select_sectype,
+#ifdef CONFIG_CIFS_ACL
+ .get_acl = get_smb2_acl,
+ .get_acl_by_fid = get_smb2_acl_by_fid,
+/* .set_acl = set_smb3_acl, */
+#endif /* CIFS_ACL */
};
struct smb_version_operations smb30_operations = {
@@ -2571,6 +2683,11 @@ struct smb_version_operations smb30_operations = {
.receive_transform = smb3_receive_transform,
.get_dfs_refer = smb2_get_dfs_refer,
.select_sectype = smb2_select_sectype,
+#ifdef CONFIG_CIFS_ACL
+ .get_acl = get_smb2_acl,
+ .get_acl_by_fid = get_smb2_acl_by_fid,
+/* .set_acl = set_smb3_acl, */
+#endif /* CIFS_ACL */
};
#ifdef CONFIG_CIFS_SMB311
@@ -2753,7 +2870,7 @@ struct smb_version_values smb302_values = {
struct smb_version_values smb311_values = {
.version_string = SMB311_VERSION_STRING,
.protocol_id = SMB311_PROT_ID,
- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES,
+ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
.large_lock_type = 0,
.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
.shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index e4afdaae743f..4938e8b6d32f 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -2081,8 +2081,9 @@ validate_and_copy_buf(unsigned int offset, unsigned int buffer_length,
static int
query_info(const unsigned int xid, struct cifs_tcon *tcon,
- u64 persistent_fid, u64 volatile_fid, u8 info_class,
- size_t output_len, size_t min_len, void *data)
+ u64 persistent_fid, u64 volatile_fid, u8 info_class, u8 info_type,
+ u32 additional_info, size_t output_len, size_t min_len, void **data,
+ u32 *dlen)
{
struct smb2_query_info_req *req;
struct smb2_query_info_rsp *rsp = NULL;
@@ -2108,10 +2109,11 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
if (encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- req->InfoType = SMB2_O_INFO_FILE;
+ req->InfoType = info_type;
req->FileInfoClass = info_class;
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
+ req->AdditionalInformation = cpu_to_le32(additional_info);
/* 4 for rfc1002 length field and 1 for Buffer */
req->InputBufferOffset =
cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
@@ -2130,24 +2132,51 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
goto qinf_exit;
}
+ if (dlen) {
+ *dlen = le32_to_cpu(rsp->OutputBufferLength);
+ if (!*data) {
+ *data = kmalloc(*dlen, GFP_KERNEL);
+ if (!*data) {
+ cifs_dbg(VFS,
+ "Error %d allocating memory for acl\n",
+ rc);
+ *dlen = 0;
+ goto qinf_exit;
+ }
+ }
+ }
+
rc = validate_and_copy_buf(le16_to_cpu(rsp->OutputBufferOffset),
le32_to_cpu(rsp->OutputBufferLength),
- &rsp->hdr, min_len, data);
+ &rsp->hdr, min_len, *data);
qinf_exit:
free_rsp_buf(resp_buftype, rsp);
return rc;
}
+int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid, struct smb2_file_all_info *data)
+{
+ return query_info(xid, tcon, persistent_fid, volatile_fid,
+ FILE_ALL_INFORMATION, SMB2_O_INFO_FILE, 0,
+ sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
+ sizeof(struct smb2_file_all_info), (void **)&data,
+ NULL);
+}
+
int
-SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
+SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid,
- struct smb2_file_all_info *data)
+ void **data, u32 *plen)
{
+ __u32 additional_info = OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO;
+ *plen = 0;
+
return query_info(xid, tcon, persistent_fid, volatile_fid,
- FILE_ALL_INFORMATION,
- sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
- sizeof(struct smb2_file_all_info), data);
+ 0, SMB2_O_INFO_SECURITY, additional_info,
+ SMB2_MAX_BUFFER_SIZE,
+ sizeof(struct smb2_file_all_info), data, plen);
}
int
@@ -2155,9 +2184,10 @@ SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid)
{
return query_info(xid, tcon, persistent_fid, volatile_fid,
- FILE_INTERNAL_INFORMATION,
+ FILE_INTERNAL_INFORMATION, SMB2_O_INFO_FILE, 0,
+ sizeof(struct smb2_file_internal_info),
sizeof(struct smb2_file_internal_info),
- sizeof(struct smb2_file_internal_info), uniqueid);
+ (void **)&uniqueid, NULL);
}
/*
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 6853454fc871..3595cd755147 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -135,6 +135,9 @@ extern int SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon,
extern int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id,
struct smb2_file_all_info *data);
+extern int SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_file_id, u64 volatile_file_id,
+ void **data, unsigned int *plen);
extern int SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid,
__le64 *uniqueid);
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index c69ec96e92ac..67367cf1f8cd 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -335,9 +335,31 @@ generate_smb3signingkey(struct cifs_ses *ses,
if (rc)
return rc;
- return generate_key(ses, ptriplet->decryption.label,
- ptriplet->decryption.context,
- ses->smb3decryptionkey, SMB3_SIGN_KEY_SIZE);
+ rc = generate_key(ses, ptriplet->decryption.label,
+ ptriplet->decryption.context,
+ ses->smb3decryptionkey, SMB3_SIGN_KEY_SIZE);
+
+ if (rc)
+ return rc;
+
+#ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS
+ cifs_dbg(VFS, "%s: dumping generated AES session keys\n", __func__);
+ /*
+ * The session id is opaque in terms of endianness, so we can't
+ * print it as a long long. we dump it as we got it on the wire
+ */
+ cifs_dbg(VFS, "Session Id %*ph\n", (int)sizeof(ses->Suid),
+ &ses->Suid);
+ cifs_dbg(VFS, "Session Key %*ph\n",
+ SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response);
+ cifs_dbg(VFS, "Signing Key %*ph\n",
+ SMB3_SIGN_KEY_SIZE, ses->smb3signingkey);
+ cifs_dbg(VFS, "ServerIn Key %*ph\n",
+ SMB3_SIGN_KEY_SIZE, ses->smb3encryptionkey);
+ cifs_dbg(VFS, "ServerOut Key %*ph\n",
+ SMB3_SIGN_KEY_SIZE, ses->smb3decryptionkey);
+#endif
+ return rc;
}
int
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 47a125ece11e..7efbab013957 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -536,11 +536,14 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
list_add_tail(&mid->qhead, &server->pending_mid_q);
spin_unlock(&GlobalMid_Lock);
-
+ /*
+ * Need to store the time in mid before calling I/O. For call_async,
+ * I/O response may come back and free the mid entry on another thread.
+ */
+ cifs_save_when_sent(mid);
cifs_in_send_inc(server);
rc = smb_send_rqst(server, rqst, flags);
cifs_in_send_dec(server);
- cifs_save_when_sent(mid);
if (rc < 0) {
server->sequence_number -= 2;
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 112b3e1e20e3..2dd4a7af7dd7 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -739,23 +739,22 @@ static int do_i2c_smbus_ioctl(struct file *file,
unsigned int cmd, struct i2c_smbus_ioctl_data32 __user *udata)
{
struct i2c_smbus_ioctl_data __user *tdata;
- compat_caddr_t datap;
+ union {
+ /* beginnings of those have identical layouts */
+ struct i2c_smbus_ioctl_data32 data32;
+ struct i2c_smbus_ioctl_data data;
+ } v;
tdata = compat_alloc_user_space(sizeof(*tdata));
if (tdata == NULL)
return -ENOMEM;
- if (!access_ok(VERIFY_WRITE, tdata, sizeof(*tdata)))
- return -EFAULT;
- if (!access_ok(VERIFY_READ, udata, sizeof(*udata)))
+ memset(&v, 0, sizeof(v));
+ if (copy_from_user(&v.data32, udata, sizeof(v.data32)))
return -EFAULT;
+ v.data.data = compat_ptr(v.data32.data);
- if (__copy_in_user(&tdata->read_write, &udata->read_write, 2 * sizeof(u8)))
- return -EFAULT;
- if (__copy_in_user(&tdata->size, &udata->size, 2 * sizeof(u32)))
- return -EFAULT;
- if (__get_user(datap, &udata->data) ||
- __put_user(compat_ptr(datap), &tdata->data))
+ if (copy_to_user(tdata, &v.data, sizeof(v.data)))
return -EFAULT;
return do_ioctl(file, cmd, (unsigned long)tdata);
diff --git a/fs/dax.c b/fs/dax.c
index 33d05aa02aad..306c2b603fb8 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -25,7 +25,6 @@
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/pagevec.h>
-#include <linux/pmem.h>
#include <linux/sched.h>
#include <linux/sched/signal.h>
#include <linux/uio.h>
@@ -784,7 +783,7 @@ static int dax_writeback_one(struct block_device *bdev,
}
dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn));
- wb_cache_pmem(kaddr, size);
+ dax_flush(dax_dev, pgoff, kaddr, size);
/*
* After we have flushed the cache, we can clear the dirty tag. There
* cannot be new dirty data in the pfn after the flush has completed as
@@ -856,8 +855,10 @@ int dax_writeback_mapping_range(struct address_space *mapping,
ret = dax_writeback_one(bdev, dax_dev, mapping,
indices[i], pvec.pages[i]);
- if (ret < 0)
+ if (ret < 0) {
+ mapping_set_error(mapping, ret);
goto out;
+ }
}
start_index = indices[pvec.nr - 1] + 1;
}
@@ -976,7 +977,8 @@ int __dax_zero_page_range(struct block_device *bdev,
dax_read_unlock(id);
return rc;
}
- clear_pmem(kaddr + offset, size);
+ memset(kaddr + offset, 0, size);
+ dax_flush(dax_dev, pgoff, kaddr + offset, size);
dax_read_unlock(id);
}
return 0;
@@ -1055,7 +1057,8 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
map_len = end - pos;
if (iov_iter_rw(iter) == WRITE)
- map_len = copy_from_iter_pmem(kaddr, map_len, iter);
+ map_len = dax_copy_from_iter(dax_dev, pgoff, kaddr,
+ map_len, iter);
else
map_len = copy_to_iter(kaddr, map_len, iter);
if (map_len <= 0) {
@@ -1213,7 +1216,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf,
case IOMAP_MAPPED:
if (iomap.flags & IOMAP_F_NEW) {
count_vm_event(PGMAJFAULT);
- mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT);
+ count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
major = VM_FAULT_MAJOR;
}
error = dax_insert_mapping(mapping, iomap.bdev, iomap.dax_dev,
diff --git a/fs/dcache.c b/fs/dcache.c
index a9f995f6859e..7ece68d0d4db 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -277,6 +277,33 @@ static inline int dname_external(const struct dentry *dentry)
return dentry->d_name.name != dentry->d_iname;
}
+void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
+{
+ spin_lock(&dentry->d_lock);
+ if (unlikely(dname_external(dentry))) {
+ struct external_name *p = external_name(dentry);
+ atomic_inc(&p->u.count);
+ spin_unlock(&dentry->d_lock);
+ name->name = p->name;
+ } else {
+ memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN);
+ spin_unlock(&dentry->d_lock);
+ name->name = name->inline_name;
+ }
+}
+EXPORT_SYMBOL(take_dentry_name_snapshot);
+
+void release_dentry_name_snapshot(struct name_snapshot *name)
+{
+ if (unlikely(name->name != name->inline_name)) {
+ struct external_name *p;
+ p = container_of(name->name, struct external_name, name[0]);
+ if (unlikely(atomic_dec_and_test(&p->u.count)))
+ kfree_rcu(p, u.head);
+ }
+}
+EXPORT_SYMBOL(release_dentry_name_snapshot);
+
static inline void __d_set_inode_and_type(struct dentry *dentry,
struct inode *inode,
unsigned type_flags)
@@ -3546,8 +3573,6 @@ __setup("dhash_entries=", set_dhash_entries);
static void __init dcache_init_early(void)
{
- unsigned int loop;
-
/* If hashes are distributed across NUMA nodes, defer
* hash allocation until vmalloc space is available.
*/
@@ -3559,24 +3584,19 @@ static void __init dcache_init_early(void)
sizeof(struct hlist_bl_head),
dhash_entries,
13,
- HASH_EARLY,
+ HASH_EARLY | HASH_ZERO,
&d_hash_shift,
&d_hash_mask,
0,
0);
-
- for (loop = 0; loop < (1U << d_hash_shift); loop++)
- INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
}
static void __init dcache_init(void)
{
- unsigned int loop;
-
- /*
+ /*
* A constructor could be added for stable state like the lists,
* but it is probably not worth it because of the cache nature
- * of the dcache.
+ * of the dcache.
*/
dentry_cache = KMEM_CACHE(dentry,
SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT);
@@ -3590,14 +3610,11 @@ static void __init dcache_init(void)
sizeof(struct hlist_bl_head),
dhash_entries,
13,
- 0,
+ HASH_ZERO,
&d_hash_shift,
&d_hash_mask,
0,
0);
-
- for (loop = 0; loop < (1U << d_hash_shift); loop++)
- INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
}
/* SLAB cache for __getname() consumers */
@@ -3608,6 +3625,11 @@ EXPORT_SYMBOL(d_genocide);
void __init vfs_caches_init_early(void)
{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++)
+ INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]);
+
dcache_init_early();
inode_init_early();
}
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 77440e4aa9d4..a0e4e2f7e0be 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -766,7 +766,7 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
{
int error;
struct dentry *dentry = NULL, *trap;
- const char *old_name;
+ struct name_snapshot old_name;
trap = lock_rename(new_dir, old_dir);
/* Source or destination directories don't exist? */
@@ -781,19 +781,19 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
if (IS_ERR(dentry) || dentry == trap || d_really_is_positive(dentry))
goto exit;
- old_name = fsnotify_oldname_init(old_dentry->d_name.name);
+ take_dentry_name_snapshot(&old_name, old_dentry);
error = simple_rename(d_inode(old_dir), old_dentry, d_inode(new_dir),
dentry, 0);
if (error) {
- fsnotify_oldname_free(old_name);
+ release_dentry_name_snapshot(&old_name);
goto exit;
}
d_move(old_dentry, dentry);
- fsnotify_move(d_inode(old_dir), d_inode(new_dir), old_name,
+ fsnotify_move(d_inode(old_dir), d_inode(new_dir), old_name.name,
d_is_dir(old_dentry),
NULL, old_dentry);
- fsnotify_oldname_free(old_name);
+ release_dentry_name_snapshot(&old_name);
unlock_rename(new_dir, old_dir);
dput(dentry);
return old_dentry;
diff --git a/fs/exec.c b/fs/exec.c
index 904199086490..62175cbcc801 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -220,8 +220,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
if (write) {
unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
- unsigned long ptr_size;
- struct rlimit *rlim;
+ unsigned long ptr_size, limit;
/*
* Since the stack will hold pointers to the strings, we
@@ -250,14 +249,16 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
return page;
/*
- * Limit to 1/4-th the stack size for the argv+env strings.
+ * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
+ * (whichever is smaller) for the argv+env strings.
* This ensures that:
* - the remaining binfmt code will not run out of stack space,
* - the program will have a reasonable amount of stack left
* to work from.
*/
- rlim = current->signal->rlim;
- if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4)
+ limit = _STK_LIM / 4 * 3;
+ limit = min(limit, rlimit(RLIMIT_STACK) / 4);
+ if (size > limit)
goto fail;
}
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index 8eeb694332fe..98233a97b7b8 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -72,7 +72,7 @@ static int exofs_commit_chunk(struct page *page, loff_t pos, unsigned len)
set_page_dirty(page);
if (IS_DIRSYNC(dir))
- err = write_one_page(page, 1);
+ err = write_one_page(page);
else
unlock_page(page);
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index d9650c9508e4..e2709695b177 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -100,7 +100,7 @@ static int ext2_commit_chunk(struct page *page, loff_t pos, unsigned len)
}
if (IS_DIRSYNC(dir)) {
- err = write_one_page(page, 1);
+ err = write_one_page(page);
if (!err)
err = sync_inode_metadata(dir, 1);
} else {
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index b21891a6bfca..d34d32bdc944 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -174,15 +174,12 @@ int ext2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
int ret;
struct super_block *sb = file->f_mapping->host->i_sb;
- struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
ret = generic_file_fsync(file, start, end, datasync);
- if (ret == -EIO || test_and_clear_bit(AS_EIO, &mapping->flags)) {
+ if (ret == -EIO)
/* We don't really know where the IO error happened... */
ext2_error(sb, __func__,
"detected IO error when writing metadata buffers");
- ret = -EIO;
- }
return ret;
}
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 9d549608fd30..aae2c3971cef 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -124,7 +124,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
goto out;
}
- ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ ret = file_write_and_wait_range(file, start, end);
if (ret)
return ret;
/*
diff --git a/fs/fcntl.c b/fs/fcntl.c
index b6bd89628025..3b01b646e528 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -520,50 +520,50 @@ out:
#ifdef CONFIG_COMPAT
/* careful - don't use anywhere else */
-#define copy_flock_fields(from, to) \
- (to).l_type = (from).l_type; \
- (to).l_whence = (from).l_whence; \
- (to).l_start = (from).l_start; \
- (to).l_len = (from).l_len; \
- (to).l_pid = (from).l_pid;
-
-static int get_compat_flock(struct flock *kfl, struct compat_flock __user *ufl)
+#define copy_flock_fields(dst, src) \
+ (dst)->l_type = (src)->l_type; \
+ (dst)->l_whence = (src)->l_whence; \
+ (dst)->l_start = (src)->l_start; \
+ (dst)->l_len = (src)->l_len; \
+ (dst)->l_pid = (src)->l_pid;
+
+static int get_compat_flock(struct flock *kfl, const struct compat_flock __user *ufl)
{
struct compat_flock fl;
if (copy_from_user(&fl, ufl, sizeof(struct compat_flock)))
return -EFAULT;
- copy_flock_fields(*kfl, fl);
+ copy_flock_fields(kfl, &fl);
return 0;
}
-static int get_compat_flock64(struct flock *kfl, struct compat_flock64 __user *ufl)
+static int get_compat_flock64(struct flock *kfl, const struct compat_flock64 __user *ufl)
{
struct compat_flock64 fl;
if (copy_from_user(&fl, ufl, sizeof(struct compat_flock64)))
return -EFAULT;
- copy_flock_fields(*kfl, fl);
+ copy_flock_fields(kfl, &fl);
return 0;
}
-static int put_compat_flock(struct flock *kfl, struct compat_flock __user *ufl)
+static int put_compat_flock(const struct flock *kfl, struct compat_flock __user *ufl)
{
struct compat_flock fl;
memset(&fl, 0, sizeof(struct compat_flock));
- copy_flock_fields(fl, *kfl);
+ copy_flock_fields(&fl, kfl);
if (copy_to_user(ufl, &fl, sizeof(struct compat_flock)))
return -EFAULT;
return 0;
}
-static int put_compat_flock64(struct flock *kfl, struct compat_flock64 __user *ufl)
+static int put_compat_flock64(const struct flock *kfl, struct compat_flock64 __user *ufl)
{
struct compat_flock64 fl;
memset(&fl, 0, sizeof(struct compat_flock64));
- copy_flock_fields(fl, *kfl);
+ copy_flock_fields(&fl, kfl);
if (copy_to_user(ufl, &fl, sizeof(struct compat_flock64)))
return -EFAULT;
return 0;
diff --git a/fs/file.c b/fs/file.c
index 1c2972e3a405..1fc7fbbb4510 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -30,21 +30,6 @@ unsigned int sysctl_nr_open_min = BITS_PER_LONG;
unsigned int sysctl_nr_open_max =
__const_min(INT_MAX, ~(size_t)0/sizeof(void *)) & -BITS_PER_LONG;
-static void *alloc_fdmem(size_t size)
-{
- /*
- * Very large allocations can stress page reclaim, so fall back to
- * vmalloc() if the allocation size will be considered "large" by the VM.
- */
- if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
- void *data = kmalloc(size, GFP_KERNEL_ACCOUNT |
- __GFP_NOWARN | __GFP_NORETRY);
- if (data != NULL)
- return data;
- }
- return __vmalloc(size, GFP_KERNEL_ACCOUNT, PAGE_KERNEL);
-}
-
static void __free_fdtable(struct fdtable *fdt)
{
kvfree(fdt->fd);
@@ -131,13 +116,14 @@ static struct fdtable * alloc_fdtable(unsigned int nr)
if (!fdt)
goto out;
fdt->max_fds = nr;
- data = alloc_fdmem(nr * sizeof(struct file *));
+ data = kvmalloc_array(nr, sizeof(struct file *), GFP_KERNEL_ACCOUNT);
if (!data)
goto out_fdt;
fdt->fd = data;
- data = alloc_fdmem(max_t(size_t,
- 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES));
+ data = kvmalloc(max_t(size_t,
+ 2 * nr / BITS_PER_BYTE + BITBIT_SIZE(nr), L1_CACHE_BYTES),
+ GFP_KERNEL_ACCOUNT);
if (!data)
goto out_arr;
fdt->open_fds = data;
diff --git a/fs/file_table.c b/fs/file_table.c
index 954d510b765a..72e861a35a7f 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -168,6 +168,7 @@ struct file *alloc_file(const struct path *path, fmode_t mode,
file->f_path = *path;
file->f_inode = path->dentry->d_inode;
file->f_mapping = path->dentry->d_inode->i_mapping;
+ file->f_wb_err = filemap_sample_wb_err(file->f_mapping);
if ((mode & FMODE_READ) &&
likely(fop->read || fop->read_iter))
mode |= FMODE_CAN_READ;
diff --git a/fs/filesystems.c b/fs/filesystems.c
index cac75547d35c..8b99955e3504 100644
--- a/fs/filesystems.c
+++ b/fs/filesystems.c
@@ -275,8 +275,10 @@ struct file_system_type *get_fs_type(const char *name)
int len = dot ? dot - name : strlen(name);
fs = __get_fs_type(name, len);
- if (!fs && (request_module("fs-%.*s", len, name) == 0))
+ if (!fs && (request_module("fs-%.*s", len, name) == 0)) {
fs = __get_fs_type(name, len);
+ WARN_ONCE(!fs, "request_module fs-%.*s succeeded, but still no fs?\n", len, name);
+ }
if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
put_filesystem(fs);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 6cd71c50b8bd..c38ab6c81898 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -80,9 +80,9 @@ static struct rhashtable_params ht_parms = {
static struct rhashtable gl_hash_table;
-void gfs2_glock_free(struct gfs2_glock *gl)
+static void gfs2_glock_dealloc(struct rcu_head *rcu)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
if (gl->gl_ops->go_flags & GLOF_ASPACE) {
kmem_cache_free(gfs2_glock_aspace_cachep, gl);
@@ -90,6 +90,13 @@ void gfs2_glock_free(struct gfs2_glock *gl)
kfree(gl->gl_lksb.sb_lvbptr);
kmem_cache_free(gfs2_glock_cachep, gl);
}
+}
+
+void gfs2_glock_free(struct gfs2_glock *gl)
+{
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+
+ call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_glock_wait);
}
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 790e73984288..73fce76e67ee 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -373,6 +373,7 @@ struct gfs2_glock {
loff_t end;
} gl_vm;
};
+ struct rcu_head gl_rcu;
struct rhash_head gl_node;
};
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index e5259cd92ea4..3010f9edd177 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -180,7 +180,7 @@ static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
bh = bh->b_this_page;
do {
if (error)
- set_buffer_write_io_error(bh);
+ mark_buffer_write_io_error(bh);
unlock_buffer(bh);
next = bh->b_this_page;
size -= bh->b_size;
diff --git a/fs/inode.c b/fs/inode.c
index ab3b9a795c0b..50370599e371 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1915,8 +1915,6 @@ __setup("ihash_entries=", set_ihash_entries);
*/
void __init inode_init_early(void)
{
- unsigned int loop;
-
/* If hashes are distributed across NUMA nodes, defer
* hash allocation until vmalloc space is available.
*/
@@ -1928,20 +1926,15 @@ void __init inode_init_early(void)
sizeof(struct hlist_head),
ihash_entries,
14,
- HASH_EARLY,
+ HASH_EARLY | HASH_ZERO,
&i_hash_shift,
&i_hash_mask,
0,
0);
-
- for (loop = 0; loop < (1U << i_hash_shift); loop++)
- INIT_HLIST_HEAD(&inode_hashtable[loop]);
}
void __init inode_init(void)
{
- unsigned int loop;
-
/* inode slab cache */
inode_cachep = kmem_cache_create("inode_cache",
sizeof(struct inode),
@@ -1959,14 +1952,11 @@ void __init inode_init(void)
sizeof(struct hlist_head),
ihash_entries,
14,
- 0,
+ HASH_ZERO,
&i_hash_shift,
&i_hash_mask,
0,
0);
-
- for (loop = 0; loop < (1U << i_hash_shift); loop++)
- INIT_HLIST_HEAD(&inode_hashtable[loop]);
}
void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
@@ -2024,7 +2014,7 @@ bool inode_owner_or_capable(const struct inode *inode)
return true;
ns = current_user_ns();
- if (ns_capable(ns, CAP_FOWNER) && kuid_has_mapping(ns, inode->i_uid))
+ if (kuid_has_mapping(ns, inode->i_uid) && ns_capable(ns, CAP_FOWNER))
return true;
return false;
}
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index b6b194ec1b4f..3c1c31321d9b 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -263,18 +263,10 @@ static int journal_finish_inode_data_buffers(journal_t *journal,
continue;
jinode->i_flags |= JI_COMMIT_RUNNING;
spin_unlock(&journal->j_list_lock);
- err = filemap_fdatawait(jinode->i_vfs_inode->i_mapping);
- if (err) {
- /*
- * Because AS_EIO is cleared by
- * filemap_fdatawait_range(), set it again so
- * that user process can get -EIO from fsync().
- */
- mapping_set_error(jinode->i_vfs_inode->i_mapping, -EIO);
-
- if (!ret)
- ret = err;
- }
+ err = filemap_fdatawait_keep_errors(
+ jinode->i_vfs_inode->i_mapping);
+ if (!ret)
+ ret = err;
spin_lock(&journal->j_list_lock);
jinode->i_flags &= ~JI_COMMIT_RUNNING;
smp_mb();
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index ce93db3aef3c..65120a471729 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -664,6 +664,7 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
INCREMENT(mpStat.pagealloc);
mp = alloc_metapage(GFP_NOFS);
mp->page = page;
+ mp->sb = inode->i_sb;
mp->flag = 0;
mp->xflag = COMMIT_PAGE;
mp->count = 1;
@@ -711,7 +712,8 @@ void force_metapage(struct metapage *mp)
get_page(page);
lock_page(page);
set_page_dirty(page);
- write_one_page(page, 1);
+ if (write_one_page(page))
+ jfs_error(mp->sb, "write_one_page() failed\n");
clear_bit(META_forcewrite, &mp->flag);
put_page(page);
}
@@ -756,7 +758,8 @@ void release_metapage(struct metapage * mp)
set_page_dirty(page);
if (test_bit(META_sync, &mp->flag)) {
clear_bit(META_sync, &mp->flag);
- write_one_page(page, 1);
+ if (write_one_page(page))
+ jfs_error(mp->sb, "write_one_page() failed\n");
lock_page(page); /* write_one_page unlocks the page */
}
} else if (mp->lsn) /* discard_metapage doesn't remove it */
diff --git a/fs/jfs/jfs_metapage.h b/fs/jfs/jfs_metapage.h
index a869fb4a20d6..8b0ee514eb84 100644
--- a/fs/jfs/jfs_metapage.h
+++ b/fs/jfs/jfs_metapage.h
@@ -38,6 +38,7 @@ struct metapage {
/* implementation */
struct page *page;
+ struct super_block *sb;
unsigned int logical_size;
/* Journal management */
diff --git a/fs/libfs.c b/fs/libfs.c
index a04395334bb1..3aabe553fc45 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -974,7 +974,7 @@ int __generic_file_fsync(struct file *file, loff_t start, loff_t end,
int err;
int ret;
- err = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ err = file_write_and_wait_range(file, start, end);
if (err)
return err;
@@ -991,6 +991,10 @@ int __generic_file_fsync(struct file *file, loff_t start, loff_t end,
out:
inode_unlock(inode);
+ /* check and advance again to catch errors after syncing out buffers */
+ err = file_check_and_advance_wb_err(file);
+ if (ret == 0)
+ ret = err;
return ret;
}
EXPORT_SYMBOL(__generic_file_fsync);
diff --git a/fs/minix/dir.c b/fs/minix/dir.c
index 7edc9b395700..baa9721f1299 100644
--- a/fs/minix/dir.c
+++ b/fs/minix/dir.c
@@ -57,7 +57,7 @@ static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len)
mark_inode_dirty(dir);
}
if (IS_DIRSYNC(dir))
- err = write_one_page(page, 1);
+ err = write_one_page(page);
else
unlock_page(page);
return err;
diff --git a/fs/minix/itree_common.c b/fs/minix/itree_common.c
index 4c57c9af6946..2d1ca08870f7 100644
--- a/fs/minix/itree_common.c
+++ b/fs/minix/itree_common.c
@@ -142,7 +142,7 @@ changed:
return -EAGAIN;
}
-static inline int get_block(struct inode * inode, sector_t block,
+static int get_block(struct inode * inode, sector_t block,
struct buffer_head *bh, int create)
{
int err = -EIO;
diff --git a/fs/namei.c b/fs/namei.c
index 8bacc390c51e..e0b46eb0e212 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1008,7 +1008,7 @@ static int may_linkat(struct path *link)
/* Source inode owner (or CAP_FOWNER) can hardlink all they like,
* otherwise, it must be a safe source.
*/
- if (inode_owner_or_capable(inode) || safe_hardlink_source(inode))
+ if (safe_hardlink_source(inode) || inode_owner_or_capable(inode))
return 0;
audit_log_link_denied("linkat", link);
@@ -4363,11 +4363,11 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
{
int error;
bool is_dir = d_is_dir(old_dentry);
- const unsigned char *old_name;
struct inode *source = old_dentry->d_inode;
struct inode *target = new_dentry->d_inode;
bool new_is_dir = false;
unsigned max_links = new_dir->i_sb->s_max_links;
+ struct name_snapshot old_name;
if (source == target)
return 0;
@@ -4414,7 +4414,7 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (error)
return error;
- old_name = fsnotify_oldname_init(old_dentry->d_name.name);
+ take_dentry_name_snapshot(&old_name, old_dentry);
dget(new_dentry);
if (!is_dir || (flags & RENAME_EXCHANGE))
lock_two_nondirectories(source, target);
@@ -4469,14 +4469,14 @@ out:
inode_unlock(target);
dput(new_dentry);
if (!error) {
- fsnotify_move(old_dir, new_dir, old_name, is_dir,
+ fsnotify_move(old_dir, new_dir, old_name.name, is_dir,
!(flags & RENAME_EXCHANGE) ? target : NULL, old_dentry);
if (flags & RENAME_EXCHANGE) {
fsnotify_move(new_dir, old_dir, old_dentry->d_name.name,
new_is_dir, NULL, new_dentry);
}
}
- fsnotify_oldname_free(old_name);
+ release_dentry_name_snapshot(&old_name);
return error;
}
diff --git a/fs/namespace.c b/fs/namespace.c
index f70914a859a4..81f934b5d571 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -3239,7 +3239,6 @@ static void __init init_mount_tree(void)
void __init mnt_init(void)
{
- unsigned u;
int err;
mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
@@ -3248,22 +3247,17 @@ void __init mnt_init(void)
mount_hashtable = alloc_large_system_hash("Mount-cache",
sizeof(struct hlist_head),
mhash_entries, 19,
- 0,
+ HASH_ZERO,
&m_hash_shift, &m_hash_mask, 0, 0);
mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
sizeof(struct hlist_head),
mphash_entries, 19,
- 0,
+ HASH_ZERO,
&mp_hash_shift, &mp_hash_mask, 0, 0);
if (!mount_hashtable || !mountpoint_hashtable)
panic("Failed to allocate mount hash table\n");
- for (u = 0; u <= m_hash_mask; u++)
- INIT_HLIST_HEAD(&mount_hashtable[u]);
- for (u = 0; u <= mp_hash_mask; u++)
- INIT_HLIST_HEAD(&mountpoint_hashtable[u]);
-
kernfs_init();
err = sysfs_init();
diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c
index 0c3905e0542e..6719c0be674d 100644
--- a/fs/ncpfs/mmap.c
+++ b/fs/ncpfs/mmap.c
@@ -89,7 +89,7 @@ static int ncp_file_mmap_fault(struct vm_fault *vmf)
* -- nyc
*/
count_vm_event(PGMAJFAULT);
- mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT);
+ count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
return VM_FAULT_MAJOR;
}
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 01a9f0f007d4..0c4583b61717 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -161,16 +161,20 @@ int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask
if (unlikely(!fsnotify_inode_watches_children(p_inode)))
__fsnotify_update_child_dentry_flags(p_inode);
else if (p_inode->i_fsnotify_mask & mask) {
+ struct name_snapshot name;
+
/* we are notifying a parent so come up with the new mask which
* specifies these are events which came from a child. */
mask |= FS_EVENT_ON_CHILD;
+ take_dentry_name_snapshot(&name, dentry);
if (path)
ret = fsnotify(p_inode, mask, path, FSNOTIFY_EVENT_PATH,
- dentry->d_name.name, 0);
+ name.name, 0);
else
ret = fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE,
- dentry->d_name.name, 0);
+ name.name, 0);
+ release_dentry_name_snapshot(&name);
}
dput(parent);
diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c
index 564c504d6efd..74a21f6695c8 100644
--- a/fs/ocfs2/cluster/netdebug.c
+++ b/fs/ocfs2/cluster/netdebug.c
@@ -426,6 +426,7 @@ static int sc_fop_release(struct inode *inode, struct file *file)
struct o2net_sock_container *dummy_sc = sd->dbg_sock;
o2net_debug_del_sc(dummy_sc);
+ kfree(dummy_sc);
return seq_release_private(inode, file);
}
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 382401d3e88f..1a1e0078ab38 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -136,7 +136,7 @@ struct inode *ocfs2_ilookup(struct super_block *sb, u64 blkno)
struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags,
int sysfile_type)
{
- int rc = 0;
+ int rc = -ESTALE;
struct inode *inode = NULL;
struct super_block *sb = osb->sb;
struct ocfs2_find_inode_args args;
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 44d178b8d1aa..5bb4a89f9045 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -25,6 +25,8 @@
#ifndef _OCFS2_FS_H
#define _OCFS2_FS_H
+#include <linux/magic.h>
+
/* Version */
#define OCFS2_MAJOR_REV_LEVEL 0
#define OCFS2_MINOR_REV_LEVEL 90
@@ -56,9 +58,6 @@
#define OCFS2_MIN_BLOCKSIZE 512
#define OCFS2_MAX_BLOCKSIZE OCFS2_MIN_CLUSTERSIZE
-/* Filesystem magic number */
-#define OCFS2_SUPER_MAGIC 0x7461636f
-
/* Object signatures */
#define OCFS2_SUPER_BLOCK_SIGNATURE "OCFSV2"
#define OCFS2_INODE_SIGNATURE "INODE01"
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index 820359096c7a..d6c350ba25b9 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -631,7 +631,7 @@ static struct attribute *ocfs2_attrs[] = {
NULL,
};
-static struct attribute_group ocfs2_attr_group = {
+static const struct attribute_group ocfs2_attr_group = {
.attrs = ocfs2_attrs,
};
diff --git a/fs/open.c b/fs/open.c
index 3fe0c4aa7d27..35bb784763a4 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -707,6 +707,9 @@ static int do_dentry_open(struct file *f,
f->f_inode = inode;
f->f_mapping = inode->i_mapping;
+ /* Ensure that we skip any errors that predate opening of the file */
+ f->f_wb_err = filemap_sample_wb_err(f->f_mapping);
+
if (unlikely(f->f_flags & O_PATH)) {
f->f_mode = FMODE_PATH;
f->f_op = &empty_fops;
diff --git a/fs/read_write.c b/fs/read_write.c
index a2cbc8303dae..0cc7033aa413 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -925,12 +925,10 @@ static ssize_t do_iter_write(struct file *file, struct iov_iter *iter,
if (ret < 0)
return ret;
- file_start_write(file);
if (file->f_op->write_iter)
ret = do_iter_readv_writev(file, iter, pos, WRITE, flags);
else
ret = do_loop_readv_writev(file, iter, pos, WRITE, flags);
- file_end_write(file);
if (ret > 0)
fsnotify_modify(file);
return ret;
@@ -973,7 +971,9 @@ ssize_t vfs_writev(struct file *file, const struct iovec __user *vec,
ret = import_iovec(WRITE, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
if (ret >= 0) {
+ file_start_write(file);
ret = do_iter_write(file, &iter, pos, flags);
+ file_end_write(file);
kfree(iov);
}
return ret;
@@ -1241,7 +1241,9 @@ static size_t compat_writev(struct file *file,
ret = compat_import_iovec(WRITE, vec, vlen, UIO_FASTIOV, &iov, &iter);
if (ret >= 0) {
+ file_start_write(file);
ret = do_iter_write(file, &iter, pos, flags);
+ file_end_write(file);
kfree(iov);
}
if (ret > 0)
diff --git a/fs/select.c b/fs/select.c
index 5b524a977d91..9d5f15ed87fe 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -1161,59 +1161,25 @@ static
int compat_get_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
unsigned long *fdset)
{
- nr = DIV_ROUND_UP(nr, __COMPAT_NFDBITS);
if (ufdset) {
- unsigned long odd;
-
- if (!access_ok(VERIFY_WRITE, ufdset, nr*sizeof(compat_ulong_t)))
- return -EFAULT;
-
- odd = nr & 1UL;
- nr &= ~1UL;
- while (nr) {
- unsigned long h, l;
- if (__get_user(l, ufdset) || __get_user(h, ufdset+1))
- return -EFAULT;
- ufdset += 2;
- *fdset++ = h << 32 | l;
- nr -= 2;
- }
- if (odd && __get_user(*fdset, ufdset))
- return -EFAULT;
+ return compat_get_bitmap(fdset, ufdset, nr);
} else {
/* Tricky, must clear full unsigned long in the
- * kernel fdset at the end, this makes sure that
+ * kernel fdset at the end, ALIGN makes sure that
* actually happens.
*/
- memset(fdset, 0, ((nr + 1) & ~1)*sizeof(compat_ulong_t));
+ memset(fdset, 0, ALIGN(nr, BITS_PER_LONG));
+ return 0;
}
- return 0;
}
static
int compat_set_fd_set(unsigned long nr, compat_ulong_t __user *ufdset,
unsigned long *fdset)
{
- unsigned long odd;
- nr = DIV_ROUND_UP(nr, __COMPAT_NFDBITS);
-
if (!ufdset)
return 0;
-
- odd = nr & 1UL;
- nr &= ~1UL;
- while (nr) {
- unsigned long h, l;
- l = *fdset++;
- h = l >> 32;
- if (__put_user(l, ufdset) || __put_user(h, ufdset+1))
- return -EFAULT;
- ufdset += 2;
- nr -= 2;
- }
- if (odd && __put_user(*fdset, ufdset))
- return -EFAULT;
- return 0;
+ return compat_put_bitmap(ufdset, fdset, nr);
}
diff --git a/fs/statfs.c b/fs/statfs.c
index 41a6a82da5e2..fab9b6a3c116 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -38,6 +38,8 @@ static int flags_by_sb(int s_flags)
flags |= ST_SYNCHRONOUS;
if (s_flags & MS_MANDLOCK)
flags |= ST_MANDLOCK;
+ if (s_flags & MS_RDONLY)
+ flags |= ST_RDONLY;
return flags;
}
diff --git a/fs/sync.c b/fs/sync.c
index 11ba023434b1..2a54c1f22035 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -192,7 +192,7 @@ int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync)
spin_unlock(&inode->i_lock);
mark_inode_dirty_sync(inode);
}
- return call_fsync(file, start, end, datasync);
+ return file->f_op->fsync(file, start, end, datasync);
}
EXPORT_SYMBOL(vfs_fsync_range);
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c
index 5bdae85ceef7..f5191cb2c947 100644
--- a/fs/sysv/dir.c
+++ b/fs/sysv/dir.c
@@ -45,7 +45,7 @@ static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len)
mark_inode_dirty(dir);
}
if (IS_DIRSYNC(dir))
- err = write_one_page(page, 1);
+ err = write_one_page(page);
else
unlock_page(page);
return err;
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index de01b8f2aa78..48609f1d9580 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -53,7 +53,7 @@ static int ufs_commit_chunk(struct page *page, loff_t pos, unsigned len)
mark_inode_dirty(dir);
}
if (IS_DIRSYNC(dir))
- err = write_one_page(page, 1);
+ err = write_one_page(page);
else
unlock_page(page);
return err;
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 6148ccd6cccf..cadcd12a3d35 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -214,6 +214,7 @@ static inline struct uffd_msg userfault_msg(unsigned long address,
* hugepmd ranges.
*/
static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
+ struct vm_area_struct *vma,
unsigned long address,
unsigned long flags,
unsigned long reason)
@@ -224,7 +225,7 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
- pte = huge_pte_offset(mm, address);
+ pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
if (!pte)
goto out;
@@ -243,6 +244,7 @@ out:
}
#else
static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
+ struct vm_area_struct *vma,
unsigned long address,
unsigned long flags,
unsigned long reason)
@@ -448,7 +450,8 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
reason);
else
- must_wait = userfaultfd_huge_must_wait(ctx, vmf->address,
+ must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma,
+ vmf->address,
vmf->flags, reason);
up_read(&mm->mmap_sem);
@@ -1114,11 +1117,6 @@ static ssize_t userfaultfd_read(struct file *file, char __user *buf,
static void __wake_userfault(struct userfaultfd_ctx *ctx,
struct userfaultfd_wake_range *range)
{
- unsigned long start, end;
-
- start = range->start;
- end = range->start + range->len;
-
spin_lock(&ctx->fault_pending_wqh.lock);
/* wake all in the range and autoremove */
if (waitqueue_active(&ctx->fault_pending_wqh))
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 17f27a2fb5e2..51dfae5576a4 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -140,7 +140,7 @@ xfs_file_fsync(
trace_xfs_file_fsync(ip);
- error = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ error = file_write_and_wait_range(file, start, end);
if (error)
return error;
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index d249546da15e..43d07f9c4e9e 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1211,7 +1211,7 @@ xfs_mod_icount(
struct xfs_mount *mp,
int64_t delta)
{
- __percpu_counter_add(&mp->m_icount, delta, XFS_ICOUNT_BATCH);
+ percpu_counter_add_batch(&mp->m_icount, delta, XFS_ICOUNT_BATCH);
if (__percpu_counter_compare(&mp->m_icount, 0, XFS_ICOUNT_BATCH) < 0) {
ASSERT(0);
percpu_counter_add(&mp->m_icount, -delta);
@@ -1290,7 +1290,7 @@ xfs_mod_fdblocks(
else
batch = XFS_FDBLOCKS_BATCH;
- __percpu_counter_add(&mp->m_fdblocks, delta, batch);
+ percpu_counter_add_batch(&mp->m_fdblocks, delta, batch);
if (__percpu_counter_compare(&mp->m_fdblocks, mp->m_alloc_set_aside,
XFS_FDBLOCKS_BATCH) >= 0) {
/* we had space! */
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index 99b490b4d05a..540354f94f83 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -31,10 +31,12 @@ static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
return pte_modify(pte, newprot);
}
+#ifndef huge_pte_clear
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
+ pte_t *ptep, unsigned long sz)
{
pte_clear(mm, addr, ptep);
}
+#endif
#endif /* _ASM_GENERIC_HUGETLB_H */
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index bbe4bb438e39..723e81a6c162 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -200,11 +200,6 @@ static inline long strnlen_user(const char __user *src, long n)
return __strnlen_user(src, n);
}
-static inline long strlen_user(const char __user *src)
-{
- return strnlen_user(src, 32767);
-}
-
/*
* Zero Userspace
*/
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 0d64658a224f..da0be9a8d1de 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -125,9 +125,9 @@
VMLINUX_SYMBOL(__start_ftrace_events) = .; \
KEEP(*(_ftrace_events)) \
VMLINUX_SYMBOL(__stop_ftrace_events) = .; \
- VMLINUX_SYMBOL(__start_ftrace_enum_maps) = .; \
- KEEP(*(_ftrace_enum_map)) \
- VMLINUX_SYMBOL(__stop_ftrace_enum_maps) = .;
+ VMLINUX_SYMBOL(__start_ftrace_eval_maps) = .; \
+ KEEP(*(_ftrace_eval_map)) \
+ VMLINUX_SYMBOL(__stop_ftrace_eval_maps) = .;
#else
#define FTRACE_EVENTS()
#endif
@@ -594,6 +594,7 @@
#define SBSS(sbss_align) \
. = ALIGN(sbss_align); \
.sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
+ *(.dynsbss) \
*(.sbss) \
*(.scommon) \
}
@@ -640,11 +641,22 @@
.debug_str 0 : { *(.debug_str) } \
.debug_loc 0 : { *(.debug_loc) } \
.debug_macinfo 0 : { *(.debug_macinfo) } \
+ .debug_pubtypes 0 : { *(.debug_pubtypes) } \
+ /* DWARF 3 */ \
+ .debug_ranges 0 : { *(.debug_ranges) } \
/* SGI/MIPS DWARF 2 extensions */ \
.debug_weaknames 0 : { *(.debug_weaknames) } \
.debug_funcnames 0 : { *(.debug_funcnames) } \
.debug_typenames 0 : { *(.debug_typenames) } \
.debug_varnames 0 : { *(.debug_varnames) } \
+ /* GNU DWARF 2 extensions */ \
+ .debug_gnu_pubnames 0 : { *(.debug_gnu_pubnames) } \
+ .debug_gnu_pubtypes 0 : { *(.debug_gnu_pubtypes) } \
+ /* DWARF 4 */ \
+ .debug_types 0 : { *(.debug_types) } \
+ /* DWARF 5 */ \
+ .debug_macro 0 : { *(.debug_macro) } \
+ .debug_addr 0 : { *(.debug_addr) }
/* Stabs debugging sections. */
#define STABS_DEBUG \
diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h
index ee03b3c44b3b..add42809642a 100644
--- a/include/drm/drm_ioctl.h
+++ b/include/drm/drm_ioctl.h
@@ -172,6 +172,7 @@ struct drm_ioctl_desc {
int drm_ioctl_permit(u32 flags, struct drm_file *file_priv);
long drm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+long drm_ioctl_kernel(struct file *, drm_ioctl_t, void *, u32);
#ifdef CONFIG_COMPAT
long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
#else
diff --git a/include/dt-bindings/clock/cortina,gemini-clock.h b/include/dt-bindings/clock/cortina,gemini-clock.h
new file mode 100644
index 000000000000..acf5cd550b0c
--- /dev/null
+++ b/include/dt-bindings/clock/cortina,gemini-clock.h
@@ -0,0 +1,29 @@
+#ifndef DT_BINDINGS_CORTINA_GEMINI_CLOCK_H
+#define DT_BINDINGS_CORTINA_GEMINI_CLOCK_H
+
+/* RTC, AHB, APB, CPU, PCI, TVC, UART clocks and 13 gates */
+#define GEMINI_NUM_CLKS 20
+
+#define GEMINI_CLK_RTC 0
+#define GEMINI_CLK_AHB 1
+#define GEMINI_CLK_APB 2
+#define GEMINI_CLK_CPU 3
+#define GEMINI_CLK_PCI 4
+#define GEMINI_CLK_TVC 5
+#define GEMINI_CLK_UART 6
+#define GEMINI_CLK_GATES 7
+#define GEMINI_CLK_GATE_SECURITY 7
+#define GEMINI_CLK_GATE_GMAC0 8
+#define GEMINI_CLK_GATE_GMAC1 9
+#define GEMINI_CLK_GATE_SATA0 10
+#define GEMINI_CLK_GATE_SATA1 11
+#define GEMINI_CLK_GATE_USB0 12
+#define GEMINI_CLK_GATE_USB1 13
+#define GEMINI_CLK_GATE_IDE 14
+#define GEMINI_CLK_GATE_PCI 15
+#define GEMINI_CLK_GATE_DDR 16
+#define GEMINI_CLK_GATE_FLASH 17
+#define GEMINI_CLK_GATE_TVC 18
+#define GEMINI_CLK_GATE_BOOT 19
+
+#endif /* DT_BINDINGS_CORTINA_GEMINI_CLOCK_H */
diff --git a/include/dt-bindings/clock/exynos5420.h b/include/dt-bindings/clock/exynos5420.h
index 6fd21c291416..2740ae0424a9 100644
--- a/include/dt-bindings/clock/exynos5420.h
+++ b/include/dt-bindings/clock/exynos5420.h
@@ -217,6 +217,9 @@
#define CLK_MOUT_MCLK_CDREX 654
#define CLK_MOUT_BPLL 655
#define CLK_MOUT_MX_MSPLL_CCORE 656
+#define CLK_MOUT_EPLL 657
+#define CLK_MOUT_MAU_EPLL 658
+#define CLK_MOUT_USER_MAU_EPLL 659
/* divider clocks */
#define CLK_DOUT_PIXEL 768
diff --git a/include/dt-bindings/clock/hi3660-clock.h b/include/dt-bindings/clock/hi3660-clock.h
index 1c00b7fe296f..adb768d447a5 100644
--- a/include/dt-bindings/clock/hi3660-clock.h
+++ b/include/dt-bindings/clock/hi3660-clock.h
@@ -154,6 +154,23 @@
#define HI3660_CLK_DIV_UFSPERI 137
#define HI3660_CLK_DIV_AOMM 138
#define HI3660_CLK_DIV_IOPERI 139
+#define HI3660_VENC_VOLT_HOLD 140
+#define HI3660_PERI_VOLT_HOLD 141
+#define HI3660_CLK_GATE_VENC 142
+#define HI3660_CLK_GATE_VDEC 143
+#define HI3660_CLK_ANDGT_VENC 144
+#define HI3660_CLK_ANDGT_VDEC 145
+#define HI3660_CLK_MUX_VENC 146
+#define HI3660_CLK_MUX_VDEC 147
+#define HI3660_CLK_DIV_VENC 148
+#define HI3660_CLK_DIV_VDEC 149
+#define HI3660_CLK_FAC_ISP_SNCLK 150
+#define HI3660_CLK_GATE_ISP_SNCLK0 151
+#define HI3660_CLK_GATE_ISP_SNCLK1 152
+#define HI3660_CLK_GATE_ISP_SNCLK2 153
+#define HI3660_CLK_ANGT_ISP_SNCLK 154
+#define HI3660_CLK_MUX_ISP_SNCLK 155
+#define HI3660_CLK_DIV_ISP_SNCLK 156
/* clk in pmuctrl */
#define HI3660_GATE_ABB_192 0
diff --git a/include/dt-bindings/clock/hi6220-clock.h b/include/dt-bindings/clock/hi6220-clock.h
index b8ba665aab7b..409cc02cd844 100644
--- a/include/dt-bindings/clock/hi6220-clock.h
+++ b/include/dt-bindings/clock/hi6220-clock.h
@@ -174,4 +174,8 @@
#define HI6220_DDRC_AXI1 7
#define HI6220_POWER_NR_CLKS 8
+
+/* clk in Hi6220 acpu sctrl */
+#define HI6220_ACPU_SFT_AT_S 0
+
#endif
diff --git a/include/dt-bindings/clock/histb-clock.h b/include/dt-bindings/clock/histb-clock.h
index 181c0f070f7c..067f5e501b0c 100644
--- a/include/dt-bindings/clock/histb-clock.h
+++ b/include/dt-bindings/clock/histb-clock.h
@@ -53,7 +53,14 @@
#define HISTB_ETH1_MAC_CLK 31
#define HISTB_ETH1_MACIF_CLK 32
#define HISTB_COMBPHY1_CLK 33
-
+#define HISTB_USB2_BUS_CLK 34
+#define HISTB_USB2_PHY_CLK 35
+#define HISTB_USB2_UTMI_CLK 36
+#define HISTB_USB2_12M_CLK 37
+#define HISTB_USB2_48M_CLK 38
+#define HISTB_USB2_OTG_UTMI_CLK 39
+#define HISTB_USB2_PHY1_REF_CLK 40
+#define HISTB_USB2_PHY2_REF_CLK 41
/* clocks provided by mcu CRG */
#define HISTB_MCE_CLK 1
diff --git a/include/dt-bindings/clock/imx7d-clock.h b/include/dt-bindings/clock/imx7d-clock.h
index a7a1a50f33ef..de62a83b6c80 100644
--- a/include/dt-bindings/clock/imx7d-clock.h
+++ b/include/dt-bindings/clock/imx7d-clock.h
@@ -450,5 +450,7 @@
#define IMX7D_CLK_ARM 437
#define IMX7D_CKIL 438
#define IMX7D_OCOTP_CLK 439
-#define IMX7D_CLK_END 440
+#define IMX7D_NAND_RAWNAND_CLK 440
+#define IMX7D_NAND_USDHC_BUS_RAWNAND_CLK 441
+#define IMX7D_CLK_END 442
#endif /* __DT_BINDINGS_CLOCK_IMX7D_H */
diff --git a/include/dt-bindings/clock/mt2701-clk.h b/include/dt-bindings/clock/mt2701-clk.h
index 2062c67e2e51..551f7600ab58 100644
--- a/include/dt-bindings/clock/mt2701-clk.h
+++ b/include/dt-bindings/clock/mt2701-clk.h
@@ -221,7 +221,8 @@
#define CLK_INFRA_PMICWRAP 17
#define CLK_INFRA_DDCCI 18
#define CLK_INFRA_CLK_13M 19
-#define CLK_INFRA_NR 20
+#define CLK_INFRA_CPUSEL 20
+#define CLK_INFRA_NR 21
/* PERICFG */
diff --git a/include/dt-bindings/clock/mt8173-clk.h b/include/dt-bindings/clock/mt8173-clk.h
index 6094bf7e50ab..8aea623dd518 100644
--- a/include/dt-bindings/clock/mt8173-clk.h
+++ b/include/dt-bindings/clock/mt8173-clk.h
@@ -193,7 +193,9 @@
#define CLK_INFRA_PMICSPI 10
#define CLK_INFRA_PMICWRAP 11
#define CLK_INFRA_CLK_13M 12
-#define CLK_INFRA_NR_CLK 13
+#define CLK_INFRA_CA53SEL 13
+#define CLK_INFRA_CA57SEL 14
+#define CLK_INFRA_NR_CLK 15
/* PERI_SYS */
diff --git a/include/dt-bindings/clock/omap4.h b/include/dt-bindings/clock/omap4.h
new file mode 100644
index 000000000000..e86c758e50ce
--- /dev/null
+++ b/include/dt-bindings/clock/omap4.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2017 Texas Instruments, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DT_BINDINGS_CLK_OMAP4_H
+#define __DT_BINDINGS_CLK_OMAP4_H
+
+#define OMAP4_CLKCTRL_OFFSET 0x20
+#define OMAP4_CLKCTRL_INDEX(offset) ((offset) - OMAP4_CLKCTRL_OFFSET)
+
+/* mpuss clocks */
+#define OMAP4_MPU_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20)
+
+/* tesla clocks */
+#define OMAP4_DSP_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20)
+
+/* abe clocks */
+#define OMAP4_L4_ABE_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20)
+#define OMAP4_AESS_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28)
+#define OMAP4_MCPDM_CLKCTRL OMAP4_CLKCTRL_INDEX(0x30)
+#define OMAP4_DMIC_CLKCTRL OMAP4_CLKCTRL_INDEX(0x38)
+#define OMAP4_MCASP_CLKCTRL OMAP4_CLKCTRL_INDEX(0x40)
+#define OMAP4_MCBSP1_CLKCTRL OMAP4_CLKCTRL_INDEX(0x48)
+#define OMAP4_MCBSP2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x50)
+#define OMAP4_MCBSP3_CLKCTRL OMAP4_CLKCTRL_INDEX(0x58)
+#define OMAP4_SLIMBUS1_CLKCTRL OMAP4_CLKCTRL_INDEX(0x60)
+#define OMAP4_TIMER5_CLKCTRL OMAP4_CLKCTRL_INDEX(0x68)
+#define OMAP4_TIMER6_CLKCTRL OMAP4_CLKCTRL_INDEX(0x70)
+#define OMAP4_TIMER7_CLKCTRL OMAP4_CLKCTRL_INDEX(0x78)
+#define OMAP4_TIMER8_CLKCTRL OMAP4_CLKCTRL_INDEX(0x80)
+#define OMAP4_WD_TIMER3_CLKCTRL OMAP4_CLKCTRL_INDEX(0x88)
+
+/* l4_ao clocks */
+#define OMAP4_SMARTREFLEX_MPU_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28)
+#define OMAP4_SMARTREFLEX_IVA_CLKCTRL OMAP4_CLKCTRL_INDEX(0x30)
+#define OMAP4_SMARTREFLEX_CORE_CLKCTRL OMAP4_CLKCTRL_INDEX(0x38)
+
+/* l3_1 clocks */
+#define OMAP4_L3_MAIN_1_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20)
+
+/* l3_2 clocks */
+#define OMAP4_L3_MAIN_2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20)
+#define OMAP4_GPMC_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28)
+#define OMAP4_OCMC_RAM_CLKCTRL OMAP4_CLKCTRL_INDEX(0x30)
+
+/* ducati clocks */
+#define OMAP4_IPU_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20)
+
+/* l3_dma clocks */
+#define OMAP4_DMA_SYSTEM_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20)
+
+/* l3_emif clocks */
+#define OMAP4_DMM_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20)
+#define OMAP4_EMIF1_CLKCTRL OMAP4_CLKCTRL_INDEX(0x30)
+#define OMAP4_EMIF2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x38)
+
+/* d2d clocks */
+#define OMAP4_C2C_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20)
+
+/* l4_cfg clocks */
+#define OMAP4_L4_CFG_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20)
+#define OMAP4_SPINLOCK_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28)
+#define OMAP4_MAILBOX_CLKCTRL OMAP4_CLKCTRL_INDEX(0x30)
+
+/* l3_instr clocks */
+#define OMAP4_L3_MAIN_3_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20)
+#define OMAP4_L3_INSTR_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28)
+#define OMAP4_OCP_WP_NOC_CLKCTRL OMAP4_CLKCTRL_INDEX(0x40)
+
+/* ivahd clocks */
+#define OMAP4_IVA_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20)
+#define OMAP4_SL2IF_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28)
+
+/* iss clocks */
+#define OMAP4_ISS_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20)
+#define OMAP4_FDIF_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28)
+
+/* l3_dss clocks */
+#define OMAP4_DSS_CORE_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20)
+
+/* l3_gfx clocks */
+#define OMAP4_GPU_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20)
+
+/* l3_init clocks */
+#define OMAP4_MMC1_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28)
+#define OMAP4_MMC2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x30)
+#define OMAP4_HSI_CLKCTRL OMAP4_CLKCTRL_INDEX(0x38)
+#define OMAP4_USB_HOST_HS_CLKCTRL OMAP4_CLKCTRL_INDEX(0x58)
+#define OMAP4_USB_OTG_HS_CLKCTRL OMAP4_CLKCTRL_INDEX(0x60)
+#define OMAP4_USB_TLL_HS_CLKCTRL OMAP4_CLKCTRL_INDEX(0x68)
+#define OMAP4_USB_HOST_FS_CLKCTRL OMAP4_CLKCTRL_INDEX(0xd0)
+#define OMAP4_OCP2SCP_USB_PHY_CLKCTRL OMAP4_CLKCTRL_INDEX(0xe0)
+
+/* l4_per clocks */
+#define OMAP4_TIMER10_CLKCTRL OMAP4_CLKCTRL_INDEX(0x28)
+#define OMAP4_TIMER11_CLKCTRL OMAP4_CLKCTRL_INDEX(0x30)
+#define OMAP4_TIMER2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x38)
+#define OMAP4_TIMER3_CLKCTRL OMAP4_CLKCTRL_INDEX(0x40)
+#define OMAP4_TIMER4_CLKCTRL OMAP4_CLKCTRL_INDEX(0x48)
+#define OMAP4_TIMER9_CLKCTRL OMAP4_CLKCTRL_INDEX(0x50)
+#define OMAP4_ELM_CLKCTRL OMAP4_CLKCTRL_INDEX(0x58)
+#define OMAP4_GPIO2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x60)
+#define OMAP4_GPIO3_CLKCTRL OMAP4_CLKCTRL_INDEX(0x68)
+#define OMAP4_GPIO4_CLKCTRL OMAP4_CLKCTRL_INDEX(0x70)
+#define OMAP4_GPIO5_CLKCTRL OMAP4_CLKCTRL_INDEX(0x78)
+#define OMAP4_GPIO6_CLKCTRL OMAP4_CLKCTRL_INDEX(0x80)
+#define OMAP4_HDQ1W_CLKCTRL OMAP4_CLKCTRL_INDEX(0x88)
+#define OMAP4_I2C1_CLKCTRL OMAP4_CLKCTRL_INDEX(0xa0)
+#define OMAP4_I2C2_CLKCTRL OMAP4_CLKCTRL_INDEX(0xa8)
+#define OMAP4_I2C3_CLKCTRL OMAP4_CLKCTRL_INDEX(0xb0)
+#define OMAP4_I2C4_CLKCTRL OMAP4_CLKCTRL_INDEX(0xb8)
+#define OMAP4_L4_PER_CLKCTRL OMAP4_CLKCTRL_INDEX(0xc0)
+#define OMAP4_MCBSP4_CLKCTRL OMAP4_CLKCTRL_INDEX(0xe0)
+#define OMAP4_MCSPI1_CLKCTRL OMAP4_CLKCTRL_INDEX(0xf0)
+#define OMAP4_MCSPI2_CLKCTRL OMAP4_CLKCTRL_INDEX(0xf8)
+#define OMAP4_MCSPI3_CLKCTRL OMAP4_CLKCTRL_INDEX(0x100)
+#define OMAP4_MCSPI4_CLKCTRL OMAP4_CLKCTRL_INDEX(0x108)
+#define OMAP4_MMC3_CLKCTRL OMAP4_CLKCTRL_INDEX(0x120)
+#define OMAP4_MMC4_CLKCTRL OMAP4_CLKCTRL_INDEX(0x128)
+#define OMAP4_SLIMBUS2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x138)
+#define OMAP4_UART1_CLKCTRL OMAP4_CLKCTRL_INDEX(0x140)
+#define OMAP4_UART2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x148)
+#define OMAP4_UART3_CLKCTRL OMAP4_CLKCTRL_INDEX(0x150)
+#define OMAP4_UART4_CLKCTRL OMAP4_CLKCTRL_INDEX(0x158)
+#define OMAP4_MMC5_CLKCTRL OMAP4_CLKCTRL_INDEX(0x160)
+
+/* l4_wkup clocks */
+#define OMAP4_L4_WKUP_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20)
+#define OMAP4_WD_TIMER2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x30)
+#define OMAP4_GPIO1_CLKCTRL OMAP4_CLKCTRL_INDEX(0x38)
+#define OMAP4_TIMER1_CLKCTRL OMAP4_CLKCTRL_INDEX(0x40)
+#define OMAP4_COUNTER_32K_CLKCTRL OMAP4_CLKCTRL_INDEX(0x50)
+#define OMAP4_KBD_CLKCTRL OMAP4_CLKCTRL_INDEX(0x78)
+
+/* emu_sys clocks */
+#define OMAP4_DEBUGSS_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20)
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq8074.h b/include/dt-bindings/clock/qcom,gcc-ipq8074.h
new file mode 100644
index 000000000000..370c83c3bccc
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-ipq8074.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_IPQ_GCC_8074_H
+#define _DT_BINDINGS_CLOCK_IPQ_GCC_8074_H
+
+#define GPLL0 0
+#define GPLL0_MAIN 1
+#define GCC_SLEEP_CLK_SRC 2
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 3
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 4
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 5
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 6
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 7
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 8
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 9
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 10
+#define BLSP1_QUP5_I2C_APPS_CLK_SRC 11
+#define BLSP1_QUP5_SPI_APPS_CLK_SRC 12
+#define BLSP1_QUP6_I2C_APPS_CLK_SRC 13
+#define BLSP1_QUP6_SPI_APPS_CLK_SRC 14
+#define BLSP1_UART1_APPS_CLK_SRC 15
+#define BLSP1_UART2_APPS_CLK_SRC 16
+#define BLSP1_UART3_APPS_CLK_SRC 17
+#define BLSP1_UART4_APPS_CLK_SRC 18
+#define BLSP1_UART5_APPS_CLK_SRC 19
+#define BLSP1_UART6_APPS_CLK_SRC 20
+#define GCC_BLSP1_AHB_CLK 21
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 22
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 23
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 24
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 25
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 26
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 27
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 28
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 29
+#define GCC_BLSP1_QUP5_I2C_APPS_CLK 30
+#define GCC_BLSP1_QUP5_SPI_APPS_CLK 31
+#define GCC_BLSP1_QUP6_I2C_APPS_CLK 32
+#define GCC_BLSP1_QUP6_SPI_APPS_CLK 33
+#define GCC_BLSP1_UART1_APPS_CLK 34
+#define GCC_BLSP1_UART2_APPS_CLK 35
+#define GCC_BLSP1_UART3_APPS_CLK 36
+#define GCC_BLSP1_UART4_APPS_CLK 37
+#define GCC_BLSP1_UART5_APPS_CLK 38
+#define GCC_BLSP1_UART6_APPS_CLK 39
+#define GCC_PRNG_AHB_CLK 40
+#define GCC_QPIC_AHB_CLK 41
+#define GCC_QPIC_CLK 42
+#define PCNOC_BFDCD_CLK_SRC 43
+
+#define GCC_BLSP1_BCR 0
+#define GCC_BLSP1_QUP1_BCR 1
+#define GCC_BLSP1_UART1_BCR 2
+#define GCC_BLSP1_QUP2_BCR 3
+#define GCC_BLSP1_UART2_BCR 4
+#define GCC_BLSP1_QUP3_BCR 5
+#define GCC_BLSP1_UART3_BCR 6
+#define GCC_BLSP1_QUP4_BCR 7
+#define GCC_BLSP1_UART4_BCR 8
+#define GCC_BLSP1_QUP5_BCR 9
+#define GCC_BLSP1_UART5_BCR 10
+#define GCC_BLSP1_QUP6_BCR 11
+#define GCC_BLSP1_UART6_BCR 12
+#define GCC_IMEM_BCR 13
+#define GCC_SMMU_BCR 14
+#define GCC_APSS_TCU_BCR 15
+#define GCC_SMMU_XPU_BCR 16
+#define GCC_PCNOC_TBU_BCR 17
+#define GCC_SMMU_CFG_BCR 18
+#define GCC_PRNG_BCR 19
+#define GCC_BOOT_ROM_BCR 20
+#define GCC_CRYPTO_BCR 21
+#define GCC_WCSS_BCR 22
+#define GCC_WCSS_Q6_BCR 23
+#define GCC_NSS_BCR 24
+#define GCC_SEC_CTRL_BCR 25
+#define GCC_ADSS_BCR 26
+#define GCC_DDRSS_BCR 27
+#define GCC_SYSTEM_NOC_BCR 28
+#define GCC_PCNOC_BCR 29
+#define GCC_TCSR_BCR 30
+#define GCC_QDSS_BCR 31
+#define GCC_DCD_BCR 32
+#define GCC_MSG_RAM_BCR 33
+#define GCC_MPM_BCR 34
+#define GCC_SPMI_BCR 35
+#define GCC_SPDM_BCR 36
+#define GCC_RBCPR_BCR 37
+#define GCC_RBCPR_MX_BCR 38
+#define GCC_TLMM_BCR 39
+#define GCC_RBCPR_WCSS_BCR 40
+#define GCC_USB0_PHY_BCR 41
+#define GCC_USB3PHY_0_PHY_BCR 42
+#define GCC_USB0_BCR 43
+#define GCC_USB1_PHY_BCR 44
+#define GCC_USB3PHY_1_PHY_BCR 45
+#define GCC_USB1_BCR 46
+#define GCC_QUSB2_0_PHY_BCR 47
+#define GCC_QUSB2_1_PHY_BCR 48
+#define GCC_SDCC1_BCR 49
+#define GCC_SDCC2_BCR 50
+#define GCC_SNOC_BUS_TIMEOUT0_BCR 51
+#define GCC_SNOC_BUS_TIMEOUT2_BCR 52
+#define GCC_SNOC_BUS_TIMEOUT3_BCR 53
+#define GCC_PCNOC_BUS_TIMEOUT0_BCR 54
+#define GCC_PCNOC_BUS_TIMEOUT1_BCR 55
+#define GCC_PCNOC_BUS_TIMEOUT2_BCR 56
+#define GCC_PCNOC_BUS_TIMEOUT3_BCR 57
+#define GCC_PCNOC_BUS_TIMEOUT4_BCR 58
+#define GCC_PCNOC_BUS_TIMEOUT5_BCR 59
+#define GCC_PCNOC_BUS_TIMEOUT6_BCR 60
+#define GCC_PCNOC_BUS_TIMEOUT7_BCR 61
+#define GCC_PCNOC_BUS_TIMEOUT8_BCR 62
+#define GCC_PCNOC_BUS_TIMEOUT9_BCR 63
+#define GCC_UNIPHY0_BCR 64
+#define GCC_UNIPHY1_BCR 65
+#define GCC_UNIPHY2_BCR 66
+#define GCC_CMN_12GPLL_BCR 67
+#define GCC_QPIC_BCR 68
+#define GCC_MDIO_BCR 69
+#define GCC_PCIE1_TBU_BCR 70
+#define GCC_WCSS_CORE_TBU_BCR 71
+#define GCC_WCSS_Q6_TBU_BCR 72
+#define GCC_USB0_TBU_BCR 73
+#define GCC_USB1_TBU_BCR 74
+#define GCC_PCIE0_TBU_BCR 75
+#define GCC_NSS_NOC_TBU_BCR 76
+#define GCC_PCIE0_BCR 77
+#define GCC_PCIE0_PHY_BCR 78
+#define GCC_PCIE0PHY_PHY_BCR 79
+#define GCC_PCIE0_LINK_DOWN_BCR 80
+#define GCC_PCIE1_BCR 81
+#define GCC_PCIE1_PHY_BCR 82
+#define GCC_PCIE1PHY_PHY_BCR 83
+#define GCC_PCIE1_LINK_DOWN_BCR 84
+#define GCC_DCC_BCR 85
+#define GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR 86
+#define GCC_APC1_VOLTAGE_DROOP_DETECTOR_BCR 87
+#define GCC_SMMU_CATS_BCR 88
+
+#endif
diff --git a/include/dt-bindings/clock/r8a7790-cpg-mssr.h b/include/dt-bindings/clock/r8a7790-cpg-mssr.h
new file mode 100644
index 000000000000..1625b8bf3482
--- /dev/null
+++ b/include/dt-bindings/clock/r8a7790-cpg-mssr.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_R8A7790_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A7790_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a7790 CPG Core Clocks */
+#define R8A7790_CLK_Z 0
+#define R8A7790_CLK_Z2 1
+#define R8A7790_CLK_ZG 2
+#define R8A7790_CLK_ZTR 3
+#define R8A7790_CLK_ZTRD2 4
+#define R8A7790_CLK_ZT 5
+#define R8A7790_CLK_ZX 6
+#define R8A7790_CLK_ZS 7
+#define R8A7790_CLK_HP 8
+#define R8A7790_CLK_I 9
+#define R8A7790_CLK_B 10
+#define R8A7790_CLK_LB 11
+#define R8A7790_CLK_P 12
+#define R8A7790_CLK_CL 13
+#define R8A7790_CLK_M2 14
+#define R8A7790_CLK_ADSP 15
+#define R8A7790_CLK_IMP 16
+#define R8A7790_CLK_ZB3 17
+#define R8A7790_CLK_ZB3D2 18
+#define R8A7790_CLK_DDR 19
+#define R8A7790_CLK_SDH 20
+#define R8A7790_CLK_SD0 21
+#define R8A7790_CLK_SD1 22
+#define R8A7790_CLK_SD2 23
+#define R8A7790_CLK_SD3 24
+#define R8A7790_CLK_MMC0 25
+#define R8A7790_CLK_MMC1 26
+#define R8A7790_CLK_MP 27
+#define R8A7790_CLK_SSP 28
+#define R8A7790_CLK_SSPRS 29
+#define R8A7790_CLK_QSPI 30
+#define R8A7790_CLK_CP 31
+#define R8A7790_CLK_RCAN 32
+#define R8A7790_CLK_R 33
+#define R8A7790_CLK_OSC 34
+
+#endif /* __DT_BINDINGS_CLOCK_R8A7790_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a7791-cpg-mssr.h b/include/dt-bindings/clock/r8a7791-cpg-mssr.h
new file mode 100644
index 000000000000..e8823410c01c
--- /dev/null
+++ b/include/dt-bindings/clock/r8a7791-cpg-mssr.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_R8A7791_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A7791_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a7791 CPG Core Clocks */
+#define R8A7791_CLK_Z 0
+#define R8A7791_CLK_ZG 1
+#define R8A7791_CLK_ZTR 2
+#define R8A7791_CLK_ZTRD2 3
+#define R8A7791_CLK_ZT 4
+#define R8A7791_CLK_ZX 5
+#define R8A7791_CLK_ZS 6
+#define R8A7791_CLK_HP 7
+#define R8A7791_CLK_I 8
+#define R8A7791_CLK_B 9
+#define R8A7791_CLK_LB 10
+#define R8A7791_CLK_P 11
+#define R8A7791_CLK_CL 12
+#define R8A7791_CLK_M2 13
+#define R8A7791_CLK_ADSP 14
+#define R8A7791_CLK_ZB3 15
+#define R8A7791_CLK_ZB3D2 16
+#define R8A7791_CLK_DDR 17
+#define R8A7791_CLK_SDH 18
+#define R8A7791_CLK_SD0 19
+#define R8A7791_CLK_SD2 20
+#define R8A7791_CLK_SD3 21
+#define R8A7791_CLK_MMC0 22
+#define R8A7791_CLK_MP 23
+#define R8A7791_CLK_SSP 24
+#define R8A7791_CLK_SSPRS 25
+#define R8A7791_CLK_QSPI 26
+#define R8A7791_CLK_CP 27
+#define R8A7791_CLK_RCAN 28
+#define R8A7791_CLK_R 29
+#define R8A7791_CLK_OSC 30
+
+#endif /* __DT_BINDINGS_CLOCK_R8A7791_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a7792-cpg-mssr.h b/include/dt-bindings/clock/r8a7792-cpg-mssr.h
new file mode 100644
index 000000000000..72ce85cb2f94
--- /dev/null
+++ b/include/dt-bindings/clock/r8a7792-cpg-mssr.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_R8A7792_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A7792_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a7792 CPG Core Clocks */
+#define R8A7792_CLK_Z 0
+#define R8A7792_CLK_ZG 1
+#define R8A7792_CLK_ZTR 2
+#define R8A7792_CLK_ZTRD2 3
+#define R8A7792_CLK_ZT 4
+#define R8A7792_CLK_ZX 5
+#define R8A7792_CLK_ZS 6
+#define R8A7792_CLK_HP 7
+#define R8A7792_CLK_I 8
+#define R8A7792_CLK_B 9
+#define R8A7792_CLK_LB 10
+#define R8A7792_CLK_P 11
+#define R8A7792_CLK_CL 12
+#define R8A7792_CLK_M2 13
+#define R8A7792_CLK_IMP 14
+#define R8A7792_CLK_ZB3 15
+#define R8A7792_CLK_ZB3D2 16
+#define R8A7792_CLK_DDR 17
+#define R8A7792_CLK_SD 18
+#define R8A7792_CLK_MP 19
+#define R8A7792_CLK_QSPI 20
+#define R8A7792_CLK_CP 21
+#define R8A7792_CLK_CPEX 22
+#define R8A7792_CLK_RCAN 23
+#define R8A7792_CLK_R 24
+#define R8A7792_CLK_OSC 25
+
+#endif /* __DT_BINDINGS_CLOCK_R8A7792_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a7793-cpg-mssr.h b/include/dt-bindings/clock/r8a7793-cpg-mssr.h
new file mode 100644
index 000000000000..8809b0f62d61
--- /dev/null
+++ b/include/dt-bindings/clock/r8a7793-cpg-mssr.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_R8A7793_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A7793_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a7793 CPG Core Clocks */
+#define R8A7793_CLK_Z 0
+#define R8A7793_CLK_ZG 1
+#define R8A7793_CLK_ZTR 2
+#define R8A7793_CLK_ZTRD2 3
+#define R8A7793_CLK_ZT 4
+#define R8A7793_CLK_ZX 5
+#define R8A7793_CLK_ZS 6
+#define R8A7793_CLK_HP 7
+#define R8A7793_CLK_I 8
+#define R8A7793_CLK_B 9
+#define R8A7793_CLK_LB 10
+#define R8A7793_CLK_P 11
+#define R8A7793_CLK_CL 12
+#define R8A7793_CLK_M2 13
+#define R8A7793_CLK_ADSP 14
+#define R8A7793_CLK_ZB3 15
+#define R8A7793_CLK_ZB3D2 16
+#define R8A7793_CLK_DDR 17
+#define R8A7793_CLK_SDH 18
+#define R8A7793_CLK_SD0 19
+#define R8A7793_CLK_SD2 20
+#define R8A7793_CLK_SD3 21
+#define R8A7793_CLK_MMC0 22
+#define R8A7793_CLK_MP 23
+#define R8A7793_CLK_SSP 24
+#define R8A7793_CLK_SSPRS 25
+#define R8A7793_CLK_QSPI 26
+#define R8A7793_CLK_CP 27
+#define R8A7793_CLK_RCAN 28
+#define R8A7793_CLK_R 29
+#define R8A7793_CLK_OSC 30
+
+#endif /* __DT_BINDINGS_CLOCK_R8A7793_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a7794-cpg-mssr.h b/include/dt-bindings/clock/r8a7794-cpg-mssr.h
new file mode 100644
index 000000000000..9d720311ae3a
--- /dev/null
+++ b/include/dt-bindings/clock/r8a7794-cpg-mssr.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2015 Renesas Electronics Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_R8A7794_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A7794_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a7794 CPG Core Clocks */
+#define R8A7794_CLK_Z2 0
+#define R8A7794_CLK_ZG 1
+#define R8A7794_CLK_ZTR 2
+#define R8A7794_CLK_ZTRD2 3
+#define R8A7794_CLK_ZT 4
+#define R8A7794_CLK_ZX 5
+#define R8A7794_CLK_ZS 6
+#define R8A7794_CLK_HP 7
+#define R8A7794_CLK_I 8
+#define R8A7794_CLK_B 9
+#define R8A7794_CLK_LB 10
+#define R8A7794_CLK_P 11
+#define R8A7794_CLK_CL 12
+#define R8A7794_CLK_CP 13
+#define R8A7794_CLK_M2 14
+#define R8A7794_CLK_ADSP 15
+#define R8A7794_CLK_ZB3 16
+#define R8A7794_CLK_ZB3D2 17
+#define R8A7794_CLK_DDR 18
+#define R8A7794_CLK_SDH 19
+#define R8A7794_CLK_SD0 20
+#define R8A7794_CLK_SD2 21
+#define R8A7794_CLK_SD3 22
+#define R8A7794_CLK_MMC0 23
+#define R8A7794_CLK_MP 24
+#define R8A7794_CLK_QSPI 25
+#define R8A7794_CLK_CPEX 26
+#define R8A7794_CLK_RCAN 27
+#define R8A7794_CLK_R 28
+#define R8A7794_CLK_OSC 29
+
+#endif /* __DT_BINDINGS_CLOCK_R8A7794_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/rk3128-cru.h b/include/dt-bindings/clock/rk3128-cru.h
new file mode 100644
index 000000000000..92894f4306cf
--- /dev/null
+++ b/include/dt-bindings/clock/rk3128-cru.h
@@ -0,0 +1,282 @@
+/*
+ * Copyright (c) 2017 Rockchip Electronics Co. Ltd.
+ * Author: Elaine <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3128_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3128_H
+
+/* core clocks */
+#define PLL_APLL 1
+#define PLL_DPLL 2
+#define PLL_CPLL 3
+#define PLL_GPLL 4
+#define ARMCLK 5
+#define PLL_GPLL_DIV2 6
+#define PLL_GPLL_DIV3 7
+
+/* sclk gates (special clocks) */
+#define SCLK_SPI0 65
+#define SCLK_NANDC 67
+#define SCLK_SDMMC 68
+#define SCLK_SDIO 69
+#define SCLK_EMMC 71
+#define SCLK_UART0 77
+#define SCLK_UART1 78
+#define SCLK_UART2 79
+#define SCLK_I2S0 80
+#define SCLK_I2S1 81
+#define SCLK_SPDIF 83
+#define SCLK_TIMER0 85
+#define SCLK_TIMER1 86
+#define SCLK_TIMER2 87
+#define SCLK_TIMER3 88
+#define SCLK_TIMER4 89
+#define SCLK_TIMER5 90
+#define SCLK_SARADC 91
+#define SCLK_I2S_OUT 113
+#define SCLK_SDMMC_DRV 114
+#define SCLK_SDIO_DRV 115
+#define SCLK_EMMC_DRV 117
+#define SCLK_SDMMC_SAMPLE 118
+#define SCLK_SDIO_SAMPLE 119
+#define SCLK_EMMC_SAMPLE 121
+#define SCLK_VOP 122
+#define SCLK_MAC_SRC 124
+#define SCLK_MAC 126
+#define SCLK_MAC_REFOUT 127
+#define SCLK_MAC_REF 128
+#define SCLK_MAC_RX 129
+#define SCLK_MAC_TX 130
+#define SCLK_HEVC_CORE 134
+#define SCLK_RGA 135
+#define SCLK_CRYPTO 138
+#define SCLK_TSP 139
+#define SCLK_OTGPHY0 142
+#define SCLK_OTGPHY1 143
+#define SCLK_DDRC 144
+#define SCLK_PVTM_FUNC 145
+#define SCLK_PVTM_CORE 146
+#define SCLK_PVTM_GPU 147
+#define SCLK_MIPI_24M 148
+#define SCLK_PVTM 149
+#define SCLK_CIF_SRC 150
+#define SCLK_CIF_OUT_SRC 151
+#define SCLK_CIF_OUT 152
+#define SCLK_SFC 153
+#define SCLK_USB480M 154
+
+/* dclk gates */
+#define DCLK_VOP 190
+#define DCLK_EBC 191
+
+/* aclk gates */
+#define ACLK_VIO0 192
+#define ACLK_VIO1 193
+#define ACLK_DMAC 194
+#define ACLK_CPU 195
+#define ACLK_VEPU 196
+#define ACLK_VDPU 197
+#define ACLK_CIF 198
+#define ACLK_IEP 199
+#define ACLK_LCDC0 204
+#define ACLK_RGA 205
+#define ACLK_PERI 210
+#define ACLK_VOP 211
+#define ACLK_GMAC 212
+#define ACLK_GPU 213
+
+/* pclk gates */
+#define PCLK_SARADC 318
+#define PCLK_WDT 319
+#define PCLK_GPIO0 320
+#define PCLK_GPIO1 321
+#define PCLK_GPIO2 322
+#define PCLK_GPIO3 323
+#define PCLK_VIO_H2P 324
+#define PCLK_MIPI 325
+#define PCLK_EFUSE 326
+#define PCLK_HDMI 327
+#define PCLK_ACODEC 328
+#define PCLK_GRF 329
+#define PCLK_I2C0 332
+#define PCLK_I2C1 333
+#define PCLK_I2C2 334
+#define PCLK_I2C3 335
+#define PCLK_SPI0 338
+#define PCLK_UART0 341
+#define PCLK_UART1 342
+#define PCLK_UART2 343
+#define PCLK_TSADC 344
+#define PCLK_PWM 350
+#define PCLK_TIMER 353
+#define PCLK_CPU 354
+#define PCLK_PERI 363
+#define PCLK_GMAC 367
+#define PCLK_PMU_PRE 368
+#define PCLK_SIM_CARD 369
+
+/* hclk gates */
+#define HCLK_SPDIF 440
+#define HCLK_GPS 441
+#define HCLK_USBHOST 442
+#define HCLK_I2S_8CH 443
+#define HCLK_I2S_2CH 444
+#define HCLK_VOP 452
+#define HCLK_NANDC 453
+#define HCLK_SDMMC 456
+#define HCLK_SDIO 457
+#define HCLK_EMMC 459
+#define HCLK_CPU 460
+#define HCLK_VEPU 461
+#define HCLK_VDPU 462
+#define HCLK_LCDC0 463
+#define HCLK_EBC 465
+#define HCLK_VIO 466
+#define HCLK_RGA 467
+#define HCLK_IEP 468
+#define HCLK_VIO_H2P 469
+#define HCLK_CIF 470
+#define HCLK_HOST2 473
+#define HCLK_OTG 474
+#define HCLK_TSP 475
+#define HCLK_CRYPTO 476
+#define HCLK_PERI 478
+
+#define CLK_NR_CLKS (HCLK_PERI + 1)
+
+/* soft-reset indices */
+#define SRST_CORE0_PO 0
+#define SRST_CORE1_PO 1
+#define SRST_CORE2_PO 2
+#define SRST_CORE3_PO 3
+#define SRST_CORE0 4
+#define SRST_CORE1 5
+#define SRST_CORE2 6
+#define SRST_CORE3 7
+#define SRST_CORE0_DBG 8
+#define SRST_CORE1_DBG 9
+#define SRST_CORE2_DBG 10
+#define SRST_CORE3_DBG 11
+#define SRST_TOPDBG 12
+#define SRST_ACLK_CORE 13
+#define SRST_STRC_SYS_A 14
+#define SRST_L2C 15
+
+#define SRST_CPUSYS_H 18
+#define SRST_AHB2APBSYS_H 19
+#define SRST_SPDIF 20
+#define SRST_INTMEM 21
+#define SRST_ROM 22
+#define SRST_PERI_NIU 23
+#define SRST_I2S_2CH 24
+#define SRST_I2S_8CH 25
+#define SRST_GPU_PVTM 26
+#define SRST_FUNC_PVTM 27
+#define SRST_CORE_PVTM 29
+#define SRST_EFUSE_P 30
+#define SRST_ACODEC_P 31
+
+#define SRST_GPIO0 32
+#define SRST_GPIO1 33
+#define SRST_GPIO2 34
+#define SRST_GPIO3 35
+#define SRST_MIPIPHY_P 36
+#define SRST_UART0 39
+#define SRST_UART1 40
+#define SRST_UART2 41
+#define SRST_I2C0 43
+#define SRST_I2C1 44
+#define SRST_I2C2 45
+#define SRST_I2C3 46
+#define SRST_SFC 47
+
+#define SRST_PWM 48
+#define SRST_DAP_PO 50
+#define SRST_DAP 51
+#define SRST_DAP_SYS 52
+#define SRST_CRYPTO 53
+#define SRST_GRF 55
+#define SRST_GMAC 56
+#define SRST_PERIPH_SYS_A 57
+#define SRST_PERIPH_SYS_H 58
+#define SRST_PERIPH_SYS_P 59
+#define SRST_SMART_CARD 60
+#define SRST_CPU_PERI 61
+#define SRST_EMEM_PERI 62
+#define SRST_USB_PERI 63
+
+#define SRST_DMA 64
+#define SRST_GPS 67
+#define SRST_NANDC 68
+#define SRST_USBOTG0 69
+#define SRST_OTGC0 71
+#define SRST_USBOTG1 72
+#define SRST_OTGC1 74
+#define SRST_DDRMSCH 79
+
+#define SRST_SDMMC 81
+#define SRST_SDIO 82
+#define SRST_EMMC 83
+#define SRST_SPI 84
+#define SRST_WDT 86
+#define SRST_SARADC 87
+#define SRST_DDRPHY 88
+#define SRST_DDRPHY_P 89
+#define SRST_DDRCTRL 90
+#define SRST_DDRCTRL_P 91
+#define SRST_TSP 92
+#define SRST_TSP_CLKIN 93
+#define SRST_HOST0_ECHI 94
+
+#define SRST_HDMI_P 96
+#define SRST_VIO_ARBI_H 97
+#define SRST_VIO0_A 98
+#define SRST_VIO_BUS_H 99
+#define SRST_VOP_A 100
+#define SRST_VOP_H 101
+#define SRST_VOP_D 102
+#define SRST_UTMI0 103
+#define SRST_UTMI1 104
+#define SRST_USBPOR 105
+#define SRST_IEP_A 106
+#define SRST_IEP_H 107
+#define SRST_RGA_A 108
+#define SRST_RGA_H 109
+#define SRST_CIF0 110
+#define SRST_PMU 111
+
+#define SRST_VCODEC_A 112
+#define SRST_VCODEC_H 113
+#define SRST_VIO1_A 114
+#define SRST_HEVC_CORE 115
+#define SRST_VCODEC_NIU_A 116
+#define SRST_PMU_NIU_P 117
+#define SRST_LCDC0_S 119
+#define SRST_GPU 120
+#define SRST_GPU_NIU_A 122
+#define SRST_EBC_A 123
+#define SRST_EBC_H 124
+
+#define SRST_CORE_DBG 128
+#define SRST_DBG_P 129
+#define SRST_TIMER0 130
+#define SRST_TIMER1 131
+#define SRST_TIMER2 132
+#define SRST_TIMER3 133
+#define SRST_TIMER4 134
+#define SRST_TIMER5 135
+#define SRST_VIO_H2P 136
+#define SRST_VIO_MIPI_DSI 137
+
+#endif
diff --git a/include/dt-bindings/clock/sun5i-ccu.h b/include/dt-bindings/clock/sun5i-ccu.h
index aeb2e2f781fb..81f34d477aeb 100644
--- a/include/dt-bindings/clock/sun5i-ccu.h
+++ b/include/dt-bindings/clock/sun5i-ccu.h
@@ -19,6 +19,9 @@
#define CLK_HOSC 1
+#define CLK_PLL_VIDEO0_2X 9
+
+#define CLK_PLL_VIDEO1_2X 16
#define CLK_CPU 17
#define CLK_AHB_OTG 23
diff --git a/include/dt-bindings/clock/sun8i-a83t-ccu.h b/include/dt-bindings/clock/sun8i-a83t-ccu.h
new file mode 100644
index 000000000000..78af5085f630
--- /dev/null
+++ b/include/dt-bindings/clock/sun8i-a83t-ccu.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2017 Chen-Yu Tsai <[email protected]>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_SUN8I_A83T_CCU_H_
+#define _DT_BINDINGS_CLOCK_SUN8I_A83T_CCU_H_
+
+#define CLK_PLL_PERIPH 6
+
+#define CLK_PLL_DE 9
+
+#define CLK_C0CPUX 11
+#define CLK_C1CPUX 12
+
+#define CLK_BUS_MIPI_DSI 19
+#define CLK_BUS_SS 20
+#define CLK_BUS_DMA 21
+#define CLK_BUS_MMC0 22
+#define CLK_BUS_MMC1 23
+#define CLK_BUS_MMC2 24
+#define CLK_BUS_NAND 25
+#define CLK_BUS_DRAM 26
+#define CLK_BUS_EMAC 27
+#define CLK_BUS_HSTIMER 28
+#define CLK_BUS_SPI0 29
+#define CLK_BUS_SPI1 30
+#define CLK_BUS_OTG 31
+#define CLK_BUS_EHCI0 32
+#define CLK_BUS_EHCI1 33
+#define CLK_BUS_OHCI0 34
+
+#define CLK_BUS_VE 35
+#define CLK_BUS_TCON0 36
+#define CLK_BUS_TCON1 37
+#define CLK_BUS_CSI 38
+#define CLK_BUS_HDMI 39
+#define CLK_BUS_DE 40
+#define CLK_BUS_GPU 41
+#define CLK_BUS_MSGBOX 42
+#define CLK_BUS_SPINLOCK 43
+
+#define CLK_BUS_SPDIF 44
+#define CLK_BUS_PIO 45
+#define CLK_BUS_I2S0 46
+#define CLK_BUS_I2S1 47
+#define CLK_BUS_I2S2 48
+#define CLK_BUS_TDM 49
+
+#define CLK_BUS_I2C0 50
+#define CLK_BUS_I2C1 51
+#define CLK_BUS_I2C2 52
+#define CLK_BUS_UART0 53
+#define CLK_BUS_UART1 54
+#define CLK_BUS_UART2 55
+#define CLK_BUS_UART3 56
+#define CLK_BUS_UART4 57
+
+#define CLK_NAND 59
+#define CLK_MMC0 60
+#define CLK_MMC0_SAMPLE 61
+#define CLK_MMC0_OUTPUT 62
+#define CLK_MMC1 63
+#define CLK_MMC1_SAMPLE 64
+#define CLK_MMC1_OUTPUT 65
+#define CLK_MMC2 66
+#define CLK_MMC2_SAMPLE 67
+#define CLK_MMC2_OUTPUT 68
+#define CLK_SS 69
+#define CLK_SPI0 70
+#define CLK_SPI1 71
+#define CLK_I2S0 72
+#define CLK_I2S1 73
+#define CLK_I2S2 74
+#define CLK_TDM 75
+#define CLK_SPDIF 76
+#define CLK_USB_PHY0 77
+#define CLK_USB_PHY1 78
+#define CLK_USB_HSIC 79
+#define CLK_USB_HSIC_12M 80
+#define CLK_USB_OHCI0 81
+
+#define CLK_DRAM_VE 83
+#define CLK_DRAM_CSI 84
+
+#define CLK_TCON0 85
+#define CLK_TCON1 86
+#define CLK_CSI_MISC 87
+#define CLK_MIPI_CSI 88
+#define CLK_CSI_MCLK 89
+#define CLK_CSI_SCLK 90
+#define CLK_VE 91
+#define CLK_AVS 92
+#define CLK_HDMI 93
+#define CLK_HDMI_SLOW 94
+
+#define CLK_MIPI_DSI0 96
+#define CLK_MIPI_DSI1 97
+#define CLK_GPU_CORE 98
+#define CLK_GPU_MEMORY 99
+#define CLK_GPU_HYD 100
+
+#endif /* _DT_BINDINGS_CLOCK_SUN8I_A83T_CCU_H_ */
diff --git a/include/dt-bindings/clock/sun8i-de2.h b/include/dt-bindings/clock/sun8i-de2.h
new file mode 100644
index 000000000000..3bed63b524aa
--- /dev/null
+++ b/include/dt-bindings/clock/sun8i-de2.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2016 Icenowy Zheng <[email protected]>
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_SUN8I_DE2_H_
+#define _DT_BINDINGS_CLOCK_SUN8I_DE2_H_
+
+#define CLK_BUS_MIXER0 0
+#define CLK_BUS_MIXER1 1
+#define CLK_BUS_WB 2
+
+#define CLK_MIXER0 6
+#define CLK_MIXER1 7
+#define CLK_WB 8
+
+#endif /* _DT_BINDINGS_CLOCK_SUN8I_DE2_H_ */
diff --git a/include/dt-bindings/clock/zx296718-clock.h b/include/dt-bindings/clock/zx296718-clock.h
index 822d52385080..092c9751a697 100644
--- a/include/dt-bindings/clock/zx296718-clock.h
+++ b/include/dt-bindings/clock/zx296718-clock.h
@@ -157,7 +157,11 @@
#define AUDIO_TDM_WCLK 17
#define AUDIO_TDM_PCLK 18
#define AUDIO_TS_PCLK 19
+#define I2S0_WCLK_MUX 20
+#define I2S1_WCLK_MUX 21
+#define I2S2_WCLK_MUX 22
+#define I2S3_WCLK_MUX 23
-#define AUDIO_NR_CLKS 20
+#define AUDIO_NR_CLKS 24
#endif
diff --git a/include/dt-bindings/gpio/gpio.h b/include/dt-bindings/gpio/gpio.h
index b4f54da694eb..c5074584561d 100644
--- a/include/dt-bindings/gpio/gpio.h
+++ b/include/dt-bindings/gpio/gpio.h
@@ -28,4 +28,8 @@
#define GPIO_OPEN_DRAIN (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_DRAIN)
#define GPIO_OPEN_SOURCE (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_SOURCE)
+/* Bit 3 express GPIO suspend/resume persistence */
+#define GPIO_SLEEP_MAINTAIN_VALUE 0
+#define GPIO_SLEEP_MAY_LOOSE_VALUE 8
+
#endif
diff --git a/include/dt-bindings/reset/sun8i-a83t-ccu.h b/include/dt-bindings/reset/sun8i-a83t-ccu.h
new file mode 100644
index 000000000000..784f6e11664e
--- /dev/null
+++ b/include/dt-bindings/reset/sun8i-a83t-ccu.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2017 Chen-Yu Tsai <[email protected]>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DT_BINDINGS_RESET_SUN8I_A83T_CCU_H_
+#define _DT_BINDINGS_RESET_SUN8I_A83T_CCU_H_
+
+#define RST_USB_PHY0 0
+#define RST_USB_PHY1 1
+#define RST_USB_HSIC 2
+
+#define RST_DRAM 3
+#define RST_MBUS 4
+
+#define RST_BUS_MIPI_DSI 5
+#define RST_BUS_SS 6
+#define RST_BUS_DMA 7
+#define RST_BUS_MMC0 8
+#define RST_BUS_MMC1 9
+#define RST_BUS_MMC2 10
+#define RST_BUS_NAND 11
+#define RST_BUS_DRAM 12
+#define RST_BUS_EMAC 13
+#define RST_BUS_HSTIMER 14
+#define RST_BUS_SPI0 15
+#define RST_BUS_SPI1 16
+#define RST_BUS_OTG 17
+#define RST_BUS_EHCI0 18
+#define RST_BUS_EHCI1 19
+#define RST_BUS_OHCI0 20
+
+#define RST_BUS_VE 21
+#define RST_BUS_TCON0 22
+#define RST_BUS_TCON1 23
+#define RST_BUS_CSI 24
+#define RST_BUS_HDMI0 25
+#define RST_BUS_HDMI1 26
+#define RST_BUS_DE 27
+#define RST_BUS_GPU 28
+#define RST_BUS_MSGBOX 29
+#define RST_BUS_SPINLOCK 30
+
+#define RST_BUS_LVDS 31
+
+#define RST_BUS_SPDIF 32
+#define RST_BUS_I2S0 33
+#define RST_BUS_I2S1 34
+#define RST_BUS_I2S2 35
+#define RST_BUS_TDM 36
+
+#define RST_BUS_I2C0 37
+#define RST_BUS_I2C1 38
+#define RST_BUS_I2C2 39
+#define RST_BUS_UART0 40
+#define RST_BUS_UART1 41
+#define RST_BUS_UART2 42
+#define RST_BUS_UART3 43
+#define RST_BUS_UART4 44
+
+#endif /* _DT_BINDINGS_RESET_SUN8I_A83T_CCU_H_ */
diff --git a/include/dt-bindings/reset/sun8i-de2.h b/include/dt-bindings/reset/sun8i-de2.h
new file mode 100644
index 000000000000..9526017432f0
--- /dev/null
+++ b/include/dt-bindings/reset/sun8i-de2.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2016 Icenowy Zheng <[email protected]>
+ *
+ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ */
+
+#ifndef _DT_BINDINGS_RESET_SUN8I_DE2_H_
+#define _DT_BINDINGS_RESET_SUN8I_DE2_H_
+
+#define RST_MIXER0 0
+#define RST_MIXER1 1
+#define RST_WB 2
+
+#endif /* _DT_BINDINGS_RESET_SUN8I_DE2_H_ */
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index 295584f31a4e..f0053f884b4a 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -57,9 +57,7 @@ struct arch_timer_cpu {
int kvm_timer_hyp_init(void);
int kvm_timer_enable(struct kvm_vcpu *vcpu);
-int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
- const struct kvm_irq_level *virt_irq,
- const struct kvm_irq_level *phys_irq);
+int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu);
@@ -70,6 +68,10 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value);
+int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
+int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
+int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
+
bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx);
void kvm_timer_schedule(struct kvm_vcpu *vcpu);
void kvm_timer_unschedule(struct kvm_vcpu *vcpu);
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 1ab4633adf4f..f6e030617467 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -35,6 +35,7 @@ struct kvm_pmu {
int irq_num;
struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
bool ready;
+ bool created;
bool irq_level;
};
@@ -63,6 +64,7 @@ int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
+int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
#else
struct kvm_pmu {
};
@@ -112,6 +114,10 @@ static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
{
return -ENXIO;
}
+static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
#endif
#endif
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index ef718586321c..34dba516ef24 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -38,6 +38,10 @@
#define VGIC_MIN_LPI 8192
#define KVM_IRQCHIP_NUM_PINS (1020 - 32)
+#define irq_is_ppi(irq) ((irq) >= VGIC_NR_SGIS && (irq) < VGIC_NR_PRIVATE_IRQS)
+#define irq_is_spi(irq) ((irq) >= VGIC_NR_PRIVATE_IRQS && \
+ (irq) <= VGIC_MAX_SPI)
+
enum vgic_type {
VGIC_V2, /* Good ol' GICv2 */
VGIC_V3, /* New fancy GICv3 */
@@ -119,6 +123,9 @@ struct vgic_irq {
u8 source; /* GICv2 SGIs only */
u8 priority;
enum vgic_irq_config config; /* Level or edge */
+
+ void *owner; /* Opaque pointer to reserve an interrupt
+ for in-kernel devices. */
};
struct vgic_register_region;
@@ -285,6 +292,7 @@ struct vgic_cpu {
};
extern struct static_key_false vgic_v2_cpuif_trap;
+extern struct static_key_false vgic_v3_cpuif_trap;
int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
void kvm_vgic_early_init(struct kvm *kvm);
@@ -298,9 +306,7 @@ int kvm_vgic_hyp_init(void);
void kvm_vgic_init_cpu_hardware(void);
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
- bool level);
-int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, unsigned int intid,
- bool level);
+ bool level, void *owner);
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq);
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq);
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq);
@@ -341,4 +347,6 @@ int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
*/
int kvm_vgic_setup_default_irq_routing(struct kvm *kvm);
+int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner);
+
#endif /* __KVM_ARM_VGIC_H */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index cafdfb84ca28..99f96df83dd8 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -964,6 +964,8 @@ int devm_acpi_dev_add_driver_gpios(struct device *dev,
const struct acpi_gpio_mapping *gpios);
void devm_acpi_dev_remove_driver_gpios(struct device *dev);
+bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
+ struct acpi_resource_gpio **agpio);
int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index);
#else
static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
@@ -980,6 +982,11 @@ static inline int devm_acpi_dev_add_driver_gpios(struct device *dev,
}
static inline void devm_acpi_dev_remove_driver_gpios(struct device *dev) {}
+static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
+ struct acpi_resource_gpio **agpio)
+{
+ return false;
+}
static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
{
return -ENXIO;
diff --git a/include/linux/amba/pl080.h b/include/linux/amba/pl080.h
index 580b5323a717..ab036b6b1804 100644
--- a/include/linux/amba/pl080.h
+++ b/include/linux/amba/pl080.h
@@ -44,7 +44,14 @@
#define PL080_SYNC (0x34)
-/* Per channel configuration registers */
+/* The Faraday Technology FTDMAC020 variant registers */
+#define FTDMAC020_CH_BUSY (0x20)
+/* Identical to PL080_CONFIG */
+#define FTDMAC020_CSR (0x24)
+/* Identical to PL080_SYNC */
+#define FTDMAC020_SYNC (0x2C)
+#define FTDMAC020_REVISION (0x30)
+#define FTDMAC020_FEATURE (0x34)
/* Per channel configuration registers */
#define PL080_Cx_BASE(x) ((0x100 + (x * 0x20)))
@@ -55,13 +62,20 @@
#define PL080_CH_CONFIG (0x10)
#define PL080S_CH_CONTROL2 (0x10)
#define PL080S_CH_CONFIG (0x14)
-
-#define PL080_LLI_ADDR_MASK (0x3fffffff << 2)
+/* The Faraday FTDMAC020 derivative shuffles the registers around */
+#define FTDMAC020_CH_CSR (0x00)
+#define FTDMAC020_CH_CFG (0x04)
+#define FTDMAC020_CH_SRC_ADDR (0x08)
+#define FTDMAC020_CH_DST_ADDR (0x0C)
+#define FTDMAC020_CH_LLP (0x10)
+#define FTDMAC020_CH_SIZE (0x14)
+
+#define PL080_LLI_ADDR_MASK GENMASK(31, 2)
#define PL080_LLI_ADDR_SHIFT (2)
#define PL080_LLI_LM_AHB2 BIT(0)
#define PL080_CONTROL_TC_IRQ_EN BIT(31)
-#define PL080_CONTROL_PROT_MASK (0x7 << 28)
+#define PL080_CONTROL_PROT_MASK GENMASK(30, 28)
#define PL080_CONTROL_PROT_SHIFT (28)
#define PL080_CONTROL_PROT_CACHE BIT(30)
#define PL080_CONTROL_PROT_BUFF BIT(29)
@@ -70,16 +84,16 @@
#define PL080_CONTROL_SRC_INCR BIT(26)
#define PL080_CONTROL_DST_AHB2 BIT(25)
#define PL080_CONTROL_SRC_AHB2 BIT(24)
-#define PL080_CONTROL_DWIDTH_MASK (0x7 << 21)
+#define PL080_CONTROL_DWIDTH_MASK GENMASK(23, 21)
#define PL080_CONTROL_DWIDTH_SHIFT (21)
-#define PL080_CONTROL_SWIDTH_MASK (0x7 << 18)
+#define PL080_CONTROL_SWIDTH_MASK GENMASK(20, 18)
#define PL080_CONTROL_SWIDTH_SHIFT (18)
-#define PL080_CONTROL_DB_SIZE_MASK (0x7 << 15)
+#define PL080_CONTROL_DB_SIZE_MASK GENMASK(17, 15)
#define PL080_CONTROL_DB_SIZE_SHIFT (15)
-#define PL080_CONTROL_SB_SIZE_MASK (0x7 << 12)
+#define PL080_CONTROL_SB_SIZE_MASK GENMASK(14, 12)
#define PL080_CONTROL_SB_SIZE_SHIFT (12)
-#define PL080_CONTROL_TRANSFER_SIZE_MASK (0xfff << 0)
-#define PL080S_CONTROL_TRANSFER_SIZE_MASK (0x1ffffff << 0)
+#define PL080_CONTROL_TRANSFER_SIZE_MASK GENMASK(11, 0)
+#define PL080S_CONTROL_TRANSFER_SIZE_MASK GENMASK(24, 0)
#define PL080_CONTROL_TRANSFER_SIZE_SHIFT (0)
#define PL080_BSIZE_1 (0x0)
@@ -102,11 +116,11 @@
#define PL080_CONFIG_LOCK BIT(16)
#define PL080_CONFIG_TC_IRQ_MASK BIT(15)
#define PL080_CONFIG_ERR_IRQ_MASK BIT(14)
-#define PL080_CONFIG_FLOW_CONTROL_MASK (0x7 << 11)
+#define PL080_CONFIG_FLOW_CONTROL_MASK GENMASK(13, 11)
#define PL080_CONFIG_FLOW_CONTROL_SHIFT (11)
-#define PL080_CONFIG_DST_SEL_MASK (0xf << 6)
+#define PL080_CONFIG_DST_SEL_MASK GENMASK(9, 6)
#define PL080_CONFIG_DST_SEL_SHIFT (6)
-#define PL080_CONFIG_SRC_SEL_MASK (0xf << 1)
+#define PL080_CONFIG_SRC_SEL_MASK GENMASK(4, 1)
#define PL080_CONFIG_SRC_SEL_SHIFT (1)
#define PL080_CONFIG_ENABLE BIT(0)
@@ -119,6 +133,73 @@
#define PL080_FLOW_PER2MEM_PER (0x6)
#define PL080_FLOW_SRC2DST_SRC (0x7)
+#define FTDMAC020_CH_CSR_TC_MSK BIT(31)
+/* Later versions have a threshold in bits 24..26, */
+#define FTDMAC020_CH_CSR_FIFOTH_MSK GENMASK(26, 24)
+#define FTDMAC020_CH_CSR_FIFOTH_SHIFT (24)
+#define FTDMAC020_CH_CSR_CHPR1_MSK GENMASK(23, 22)
+#define FTDMAC020_CH_CSR_PROT3 BIT(21)
+#define FTDMAC020_CH_CSR_PROT2 BIT(20)
+#define FTDMAC020_CH_CSR_PROT1 BIT(19)
+#define FTDMAC020_CH_CSR_SRC_SIZE_MSK GENMASK(18, 16)
+#define FTDMAC020_CH_CSR_SRC_SIZE_SHIFT (16)
+#define FTDMAC020_CH_CSR_ABT BIT(15)
+#define FTDMAC020_CH_CSR_SRC_WIDTH_MSK GENMASK(13, 11)
+#define FTDMAC020_CH_CSR_SRC_WIDTH_SHIFT (11)
+#define FTDMAC020_CH_CSR_DST_WIDTH_MSK GENMASK(10, 8)
+#define FTDMAC020_CH_CSR_DST_WIDTH_SHIFT (8)
+#define FTDMAC020_CH_CSR_MODE BIT(7)
+/* 00 = increase, 01 = decrease, 10 = fix */
+#define FTDMAC020_CH_CSR_SRCAD_CTL_MSK GENMASK(6, 5)
+#define FTDMAC020_CH_CSR_SRCAD_CTL_SHIFT (5)
+#define FTDMAC020_CH_CSR_DSTAD_CTL_MSK GENMASK(4, 3)
+#define FTDMAC020_CH_CSR_DSTAD_CTL_SHIFT (3)
+#define FTDMAC020_CH_CSR_SRC_SEL BIT(2)
+#define FTDMAC020_CH_CSR_DST_SEL BIT(1)
+#define FTDMAC020_CH_CSR_EN BIT(0)
+
+/* FIFO threshold setting */
+#define FTDMAC020_CH_CSR_FIFOTH_1 (0x0)
+#define FTDMAC020_CH_CSR_FIFOTH_2 (0x1)
+#define FTDMAC020_CH_CSR_FIFOTH_4 (0x2)
+#define FTDMAC020_CH_CSR_FIFOTH_8 (0x3)
+#define FTDMAC020_CH_CSR_FIFOTH_16 (0x4)
+/* The FTDMAC020 supports 64bit wide transfers */
+#define FTDMAC020_WIDTH_64BIT (0x3)
+/* Address can be increased, decreased or fixed */
+#define FTDMAC020_CH_CSR_SRCAD_CTL_INC (0x0)
+#define FTDMAC020_CH_CSR_SRCAD_CTL_DEC (0x1)
+#define FTDMAC020_CH_CSR_SRCAD_CTL_FIXED (0x2)
+
+#define FTDMAC020_CH_CFG_LLP_CNT_MASK GENMASK(19, 16)
+#define FTDMAC020_CH_CFG_LLP_CNT_SHIFT (16)
+#define FTDMAC020_CH_CFG_BUSY BIT(8)
+#define FTDMAC020_CH_CFG_INT_ABT_MASK BIT(2)
+#define FTDMAC020_CH_CFG_INT_ERR_MASK BIT(1)
+#define FTDMAC020_CH_CFG_INT_TC_MASK BIT(0)
+
+/* Inside the LLIs, the applicable CSR fields are mapped differently */
+#define FTDMAC020_LLI_TC_MSK BIT(28)
+#define FTDMAC020_LLI_SRC_WIDTH_MSK GENMASK(27, 25)
+#define FTDMAC020_LLI_SRC_WIDTH_SHIFT (25)
+#define FTDMAC020_LLI_DST_WIDTH_MSK GENMASK(24, 22)
+#define FTDMAC020_LLI_DST_WIDTH_SHIFT (22)
+#define FTDMAC020_LLI_SRCAD_CTL_MSK GENMASK(21, 20)
+#define FTDMAC020_LLI_SRCAD_CTL_SHIFT (20)
+#define FTDMAC020_LLI_DSTAD_CTL_MSK GENMASK(19, 18)
+#define FTDMAC020_LLI_DSTAD_CTL_SHIFT (18)
+#define FTDMAC020_LLI_SRC_SEL BIT(17)
+#define FTDMAC020_LLI_DST_SEL BIT(16)
+#define FTDMAC020_LLI_TRANSFER_SIZE_MASK GENMASK(11, 0)
+#define FTDMAC020_LLI_TRANSFER_SIZE_SHIFT (0)
+
+#define FTDMAC020_CFG_LLP_CNT_MASK GENMASK(19, 16)
+#define FTDMAC020_CFG_LLP_CNT_SHIFT (16)
+#define FTDMAC020_CFG_BUSY BIT(8)
+#define FTDMAC020_CFG_INT_ABT_MSK BIT(2)
+#define FTDMAC020_CFG_INT_ERR_MSK BIT(1)
+#define FTDMAC020_CFG_INT_TC_MSK BIT(0)
+
/* DMA linked list chain structure */
struct pl080_lli {
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index 5308eae9ce35..79d1bcee738d 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -47,8 +47,6 @@ enum {
* devices with static assignments
* @muxval: a number usually used to poke into some mux regiser to
* mux in the signal to this channel
- * @cctl_memcpy: options for the channel control register for memcpy
- * *** not used for slave channels ***
* @addr: source/target address in physical memory for this DMA channel,
* can be the address of a FIFO register for burst requests for example.
* This can be left undefined if the PrimeCell API is used for configuring
@@ -63,12 +61,28 @@ struct pl08x_channel_data {
int min_signal;
int max_signal;
u32 muxval;
- u32 cctl_memcpy;
dma_addr_t addr;
bool single;
u8 periph_buses;
};
+enum pl08x_burst_size {
+ PL08X_BURST_SZ_1,
+ PL08X_BURST_SZ_4,
+ PL08X_BURST_SZ_8,
+ PL08X_BURST_SZ_16,
+ PL08X_BURST_SZ_32,
+ PL08X_BURST_SZ_64,
+ PL08X_BURST_SZ_128,
+ PL08X_BURST_SZ_256,
+};
+
+enum pl08x_bus_width {
+ PL08X_BUS_WIDTH_8_BITS,
+ PL08X_BUS_WIDTH_16_BITS,
+ PL08X_BUS_WIDTH_32_BITS,
+};
+
/**
* struct pl08x_platform_data - the platform configuration for the PL08x
* PrimeCells.
@@ -76,6 +90,11 @@ struct pl08x_channel_data {
* platform, all inclusive, including multiplexed channels. The available
* physical channels will be multiplexed around these signals as they are
* requested, just enumerate all possible channels.
+ * @num_slave_channels: number of elements in the slave channel array
+ * @memcpy_burst_size: the appropriate burst size for memcpy operations
+ * @memcpy_bus_width: memory bus width
+ * @memcpy_prot_buff: whether memcpy DMA is bufferable
+ * @memcpy_prot_cache: whether memcpy DMA is cacheable
* @get_xfer_signal: request a physical signal to be used for a DMA transfer
* immediately: if there is some multiplexing or similar blocking the use
* of the channel the transfer can be denied by returning less than zero,
@@ -90,7 +109,10 @@ struct pl08x_channel_data {
struct pl08x_platform_data {
struct pl08x_channel_data *slave_channels;
unsigned int num_slave_channels;
- struct pl08x_channel_data memcpy_channel;
+ enum pl08x_burst_size memcpy_burst_size;
+ enum pl08x_bus_width memcpy_bus_width;
+ bool memcpy_prot_buff;
+ bool memcpy_prot_cache;
int (*get_xfer_signal)(const struct pl08x_channel_data *);
void (*put_xfer_signal)(const struct pl08x_channel_data *, int);
u8 lli_buses;
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 73fe18edfdaf..e65ae4b2ed48 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -336,11 +336,16 @@ enum {
/* READ_LOG_EXT pages */
ATA_LOG_DIRECTORY = 0x0,
ATA_LOG_SATA_NCQ = 0x10,
- ATA_LOG_NCQ_NON_DATA = 0x12,
- ATA_LOG_NCQ_SEND_RECV = 0x13,
- ATA_LOG_SATA_ID_DEV_DATA = 0x30,
+ ATA_LOG_NCQ_NON_DATA = 0x12,
+ ATA_LOG_NCQ_SEND_RECV = 0x13,
+ ATA_LOG_IDENTIFY_DEVICE = 0x30,
+
+ /* Identify device log pages: */
+ ATA_LOG_SECURITY = 0x06,
ATA_LOG_SATA_SETTINGS = 0x08,
ATA_LOG_ZONED_INFORMATION = 0x09,
+
+ /* Identify device SATA settings log:*/
ATA_LOG_DEVSLP_OFFSET = 0x30,
ATA_LOG_DEVSLP_SIZE = 0x08,
ATA_LOG_DEVSLP_MDAT = 0x00,
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 557d84063934..ace73f96eb1e 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -66,7 +66,7 @@ static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
static inline void __add_wb_stat(struct bdi_writeback *wb,
enum wb_stat_item item, s64 amount)
{
- __percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
+ percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
}
static inline void __inc_wb_stat(struct bdi_writeback *wb,
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 01b62e7bac74..7104bea8dab1 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -518,7 +518,7 @@ static inline void blkg_stat_exit(struct blkg_stat *stat)
*/
static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
{
- __percpu_counter_add(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
+ percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
}
/**
@@ -597,14 +597,14 @@ static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
else
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
- __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
+ percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
if (op_is_sync(op))
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
else
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
- __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
+ percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
}
/**
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 962164d36506..e223d91b6439 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -358,6 +358,7 @@ extern void *alloc_large_system_hash(const char *tablename,
#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
* shift passed via *_hash_shift */
+#define HASH_ZERO 0x00000004 /* Zero allocated hash table */
/* Only NUMA needs hash distribution. 64bit NUMA architectures have
* sufficient vmalloc space.
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index bd029e52ef5e..e0abeba3ced7 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -149,6 +149,7 @@ void buffer_check_dirty_writeback(struct page *page,
*/
void mark_buffer_dirty(struct buffer_head *bh);
+void mark_buffer_write_io_error(struct buffer_head *bh);
void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
void touch_buffer(struct buffer_head *bh);
void set_bh_page(struct buffer_head *bh,
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index ec47101cb1bf..09f4c7df1478 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -67,12 +67,21 @@ enum {
enum {
CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */
CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */
+
+ /*
+ * Consider namespaces as delegation boundaries. If this flag is
+ * set, controller specific interface files in a namespace root
+ * aren't writeable from inside the namespace.
+ */
+ CGRP_ROOT_NS_DELEGATE = (1 << 3),
};
/* cftype->flags */
enum {
CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */
CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
+ CFTYPE_NS_DELEGATABLE = (1 << 2), /* writeable beyond delegation boundaries */
+
CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */
@@ -166,6 +175,9 @@ struct css_set {
/* the default cgroup associated with this css_set */
struct cgroup *dfl_cgrp;
+ /* internal task count, protected by css_set_lock */
+ int nr_tasks;
+
/*
* Lists running through all tasks using this cgroup group.
* mg_tasks lists tasks which belong to this cset but are in the
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index a428aec36ace..c59c62571e4f 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -412,9 +412,10 @@ extern const struct clk_ops clk_divider_ro_ops;
unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
unsigned int val, const struct clk_div_table *table,
unsigned long flags);
-long divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate, const struct clk_div_table *table,
- u8 width, unsigned long flags);
+long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
+ unsigned long rate, unsigned long *prate,
+ const struct clk_div_table *table,
+ u8 width, unsigned long flags);
int divider_get_val(unsigned long rate, unsigned long parent_rate,
const struct clk_div_table *table, u8 width,
unsigned long flags);
@@ -757,6 +758,15 @@ static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
dst->core = src->core;
}
+static inline long divider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate,
+ const struct clk_div_table *table,
+ u8 width, unsigned long flags)
+{
+ return divider_round_rate_parent(hw, clk_hw_get_parent(hw),
+ rate, prate, table, width, flags);
+}
+
/*
* FIXME clock api without lock protection
*/
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 024cd07870d0..91bd464f4c9b 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -77,6 +77,21 @@ struct clk_notifier_data {
unsigned long new_rate;
};
+/**
+ * struct clk_bulk_data - Data used for bulk clk operations.
+ *
+ * @id: clock consumer ID
+ * @clk: struct clk * to store the associated clock
+ *
+ * The CLK APIs provide a series of clk_bulk_() API calls as
+ * a convenience to consumers which require multiple clks. This
+ * structure is used to manage data for these calls.
+ */
+struct clk_bulk_data {
+ const char *id;
+ struct clk *clk;
+};
+
#ifdef CONFIG_COMMON_CLK
/**
@@ -185,12 +200,20 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q)
*/
#ifdef CONFIG_HAVE_CLK_PREPARE
int clk_prepare(struct clk *clk);
+int __must_check clk_bulk_prepare(int num_clks,
+ const struct clk_bulk_data *clks);
#else
static inline int clk_prepare(struct clk *clk)
{
might_sleep();
return 0;
}
+
+static inline int clk_bulk_prepare(int num_clks, struct clk_bulk_data *clks)
+{
+ might_sleep();
+ return 0;
+}
#endif
/**
@@ -204,11 +227,16 @@ static inline int clk_prepare(struct clk *clk)
*/
#ifdef CONFIG_HAVE_CLK_PREPARE
void clk_unprepare(struct clk *clk);
+void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks);
#else
static inline void clk_unprepare(struct clk *clk)
{
might_sleep();
}
+static inline void clk_bulk_unprepare(int num_clks, struct clk_bulk_data *clks)
+{
+ might_sleep();
+}
#endif
#ifdef CONFIG_HAVE_CLK
@@ -230,6 +258,44 @@ static inline void clk_unprepare(struct clk *clk)
struct clk *clk_get(struct device *dev, const char *id);
/**
+ * clk_bulk_get - lookup and obtain a number of references to clock producer.
+ * @dev: device for clock "consumer"
+ * @num_clks: the number of clk_bulk_data
+ * @clks: the clk_bulk_data table of consumer
+ *
+ * This helper function allows drivers to get several clk consumers in one
+ * operation. If any of the clk cannot be acquired then any clks
+ * that were obtained will be freed before returning to the caller.
+ *
+ * Returns 0 if all clocks specified in clk_bulk_data table are obtained
+ * successfully, or valid IS_ERR() condition containing errno.
+ * The implementation uses @dev and @clk_bulk_data.id to determine the
+ * clock consumer, and thereby the clock producer.
+ * The clock returned is stored in each @clk_bulk_data.clk field.
+ *
+ * Drivers must assume that the clock source is not enabled.
+ *
+ * clk_bulk_get should not be called from within interrupt context.
+ */
+int __must_check clk_bulk_get(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks);
+
+/**
+ * devm_clk_bulk_get - managed get multiple clk consumers
+ * @dev: device for clock "consumer"
+ * @num_clks: the number of clk_bulk_data
+ * @clks: the clk_bulk_data table of consumer
+ *
+ * Return 0 on success, an errno on failure.
+ *
+ * This helper function allows drivers to get several clk
+ * consumers in one operation with management, the clks will
+ * automatically be freed when the device is unbound.
+ */
+int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks);
+
+/**
* devm_clk_get - lookup and obtain a managed reference to a clock producer.
* @dev: device for clock "consumer"
* @id: clock consumer ID
@@ -279,6 +345,18 @@ struct clk *devm_get_clk_from_child(struct device *dev,
int clk_enable(struct clk *clk);
/**
+ * clk_bulk_enable - inform the system when the set of clks should be running.
+ * @num_clks: the number of clk_bulk_data
+ * @clks: the clk_bulk_data table of consumer
+ *
+ * May be called from atomic contexts.
+ *
+ * Returns success (0) or negative errno.
+ */
+int __must_check clk_bulk_enable(int num_clks,
+ const struct clk_bulk_data *clks);
+
+/**
* clk_disable - inform the system when the clock source is no longer required.
* @clk: clock source
*
@@ -295,6 +373,24 @@ int clk_enable(struct clk *clk);
void clk_disable(struct clk *clk);
/**
+ * clk_bulk_disable - inform the system when the set of clks is no
+ * longer required.
+ * @num_clks: the number of clk_bulk_data
+ * @clks: the clk_bulk_data table of consumer
+ *
+ * Inform the system that a set of clks is no longer required by
+ * a driver and may be shut down.
+ *
+ * May be called from atomic contexts.
+ *
+ * Implementation detail: if the set of clks is shared between
+ * multiple drivers, clk_bulk_enable() calls must be balanced by the
+ * same number of clk_bulk_disable() calls for the clock source to be
+ * disabled.
+ */
+void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks);
+
+/**
* clk_get_rate - obtain the current clock rate (in Hz) for a clock source.
* This is only valid once the clock source has been enabled.
* @clk: clock source
@@ -314,6 +410,19 @@ unsigned long clk_get_rate(struct clk *clk);
void clk_put(struct clk *clk);
/**
+ * clk_bulk_put - "free" the clock source
+ * @num_clks: the number of clk_bulk_data
+ * @clks: the clk_bulk_data table of consumer
+ *
+ * Note: drivers must ensure that all clk_bulk_enable calls made on this
+ * clock source are balanced by clk_bulk_disable calls prior to calling
+ * this function.
+ *
+ * clk_bulk_put should not be called from within interrupt context.
+ */
+void clk_bulk_put(int num_clks, struct clk_bulk_data *clks);
+
+/**
* devm_clk_put - "free" a managed clock source
* @dev: device used to acquire the clock
* @clk: clock source acquired with devm_clk_get()
@@ -445,11 +554,23 @@ static inline struct clk *clk_get(struct device *dev, const char *id)
return NULL;
}
+static inline int clk_bulk_get(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks)
+{
+ return 0;
+}
+
static inline struct clk *devm_clk_get(struct device *dev, const char *id)
{
return NULL;
}
+static inline int devm_clk_bulk_get(struct device *dev, int num_clks,
+ struct clk_bulk_data *clks)
+{
+ return 0;
+}
+
static inline struct clk *devm_get_clk_from_child(struct device *dev,
struct device_node *np, const char *con_id)
{
@@ -458,6 +579,8 @@ static inline struct clk *devm_get_clk_from_child(struct device *dev,
static inline void clk_put(struct clk *clk) {}
+static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {}
+
static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
static inline int clk_enable(struct clk *clk)
@@ -465,8 +588,17 @@ static inline int clk_enable(struct clk *clk)
return 0;
}
+static inline int clk_bulk_enable(int num_clks, struct clk_bulk_data *clks)
+{
+ return 0;
+}
+
static inline void clk_disable(struct clk *clk) {}
+
+static inline void clk_bulk_disable(int num_clks,
+ struct clk_bulk_data *clks) {}
+
static inline unsigned long clk_get_rate(struct clk *clk)
{
return 0;
@@ -539,6 +671,10 @@ static inline struct clk *of_clk_get_by_name(struct device_node *np,
{
return ERR_PTR(-ENOENT);
}
+static inline struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
+{
+ return ERR_PTR(-ENOENT);
+}
#endif
#endif
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 2ed54020ace0..5a6a109b4a50 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -402,8 +402,7 @@ asmlinkage long compat_sys_wait4(compat_pid_t pid,
#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t))
-#define BITS_TO_COMPAT_LONGS(bits) \
- (((bits)+BITS_PER_COMPAT_LONG-1)/BITS_PER_COMPAT_LONG)
+#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG)
long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
unsigned long bitmap_size);
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index d614c5ea1b5e..de179993e039 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -15,11 +15,3 @@
* with any version that can compile the kernel
*/
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
-
-/*
- * GCC does not warn about unused static inline functions for
- * -Wunused-function. This turns out to avoid the need for complex #ifdef
- * directives. Suppress the warning in clang as well.
- */
-#undef inline
-#define inline inline __attribute__((unused)) notrace
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 7deaae3dc87d..cd4bbe8242bd 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -66,18 +66,22 @@
/*
* Force always-inline if the user requests it so via the .config,
- * or if gcc is too old:
+ * or if gcc is too old.
+ * GCC does not warn about unused static inline functions for
+ * -Wunused-function. This turns out to avoid the need for complex #ifdef
+ * directives. Suppress the warning in clang as well by using "unused"
+ * function attribute, which is redundant but not harmful for gcc.
*/
#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
!defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
-#define inline inline __attribute__((always_inline)) notrace
-#define __inline__ __inline__ __attribute__((always_inline)) notrace
-#define __inline __inline __attribute__((always_inline)) notrace
+#define inline inline __attribute__((always_inline,unused)) notrace
+#define __inline__ __inline__ __attribute__((always_inline,unused)) notrace
+#define __inline __inline __attribute__((always_inline,unused)) notrace
#else
/* A lot of inline functions can cause havoc with function tracing */
-#define inline inline notrace
-#define __inline__ __inline__ notrace
-#define __inline __inline notrace
+#define inline inline __attribute__((unused)) notrace
+#define __inline__ __inline__ __attribute__((unused)) notrace
+#define __inline __inline __attribute__((unused)) notrace
#endif
#define __always_inline inline __attribute__((always_inline))
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index 541a197ba4a2..4090a42578a8 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -10,6 +10,11 @@
#define CRASH_CORE_NOTE_NAME_BYTES ALIGN(sizeof(CRASH_CORE_NOTE_NAME), 4)
#define CRASH_CORE_NOTE_DESC_BYTES ALIGN(sizeof(struct elf_prstatus), 4)
+/*
+ * The per-cpu notes area is a list of notes terminated by a "NULL"
+ * note header. For kdump, the code in vmcore.c runs in the context
+ * of the second kernel to combine them into one note.
+ */
#define CRASH_CORE_NOTE_BYTES ((CRASH_CORE_NOTE_HEAD_BYTES * 2) + \
CRASH_CORE_NOTE_NAME_BYTES + \
CRASH_CORE_NOTE_DESC_BYTES)
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 5ec1f6c47716..8f39db7439c3 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -16,8 +16,15 @@ struct dax_operations {
*/
long (*direct_access)(struct dax_device *, pgoff_t, long,
void **, pfn_t *);
+ /* copy_from_iter: required operation for fs-dax direct-i/o */
+ size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
+ struct iov_iter *);
+ /* flush: optional driver-specific cache management after writes */
+ void (*flush)(struct dax_device *, pgoff_t, void *, size_t);
};
+extern struct attribute_group dax_attribute_group;
+
#if IS_ENABLED(CONFIG_DAX)
struct dax_device *dax_get_by_host(const char *host);
void put_dax(struct dax_device *dax_dev);
@@ -75,6 +82,11 @@ void kill_dax(struct dax_device *dax_dev);
void *dax_get_private(struct dax_device *dax_dev);
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
void **kaddr, pfn_t *pfn);
+size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+ size_t bytes, struct iov_iter *i);
+void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+ size_t size);
+void dax_write_cache(struct dax_device *dax_dev, bool wc);
/*
* We use lowest available bit in exceptional entry for locking, one bit for
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index d2e38dc6172c..025727bf6797 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -591,5 +591,11 @@ static inline struct inode *d_real_inode(const struct dentry *dentry)
return d_backing_inode(d_real((struct dentry *) dentry, NULL, 0));
}
+struct name_snapshot {
+ const char *name;
+ char inline_name[DNAME_INLINE_LEN];
+};
+void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *);
+void release_dentry_name_snapshot(struct name_snapshot *);
#endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 456da5017b32..1473455d0341 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -132,6 +132,10 @@ typedef int (*dm_busy_fn) (struct dm_target *ti);
*/
typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
long nr_pages, void **kaddr, pfn_t *pfn);
+typedef size_t (*dm_dax_copy_from_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i);
+typedef void (*dm_dax_flush_fn)(struct dm_target *ti, pgoff_t pgoff, void *addr,
+ size_t size);
#define PAGE_SECTORS (PAGE_SIZE / 512)
void dm_error(const char *message);
@@ -181,6 +185,8 @@ struct target_type {
dm_iterate_devices_fn iterate_devices;
dm_io_hints_fn io_hints;
dm_dax_direct_access_fn direct_access;
+ dm_dax_copy_from_iter_fn dax_copy_from_iter;
+ dm_dax_flush_fn dax_flush;
/* For internal device-mapper use. */
struct list_head list;
@@ -237,6 +243,12 @@ typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio)
#define DM_TARGET_PASSES_INTEGRITY 0x00000020
#define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
+/*
+ * Indicates that a target supports host-managed zoned block devices.
+ */
+#define DM_TARGET_ZONED_HM 0x00000040
+#define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM)
+
struct dm_target {
struct dm_table *table;
struct target_type *type;
@@ -444,6 +456,8 @@ struct gendisk *dm_disk(struct mapped_device *md);
int dm_suspended(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
+void dm_remap_zone_report(struct dm_target *ti, struct bio *bio,
+ sector_t start);
union map_info *dm_get_rq_mapinfo(struct request *rq);
struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
@@ -543,48 +557,41 @@ extern struct ratelimit_state dm_ratelimit_state;
#define dm_ratelimit() 0
#endif
-#define DMCRIT(f, arg...) \
- printk(KERN_CRIT DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
-
-#define DMERR(f, arg...) \
- printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
-#define DMERR_LIMIT(f, arg...) \
- do { \
- if (dm_ratelimit()) \
- printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \
- f "\n", ## arg); \
- } while (0)
-
-#define DMWARN(f, arg...) \
- printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
-#define DMWARN_LIMIT(f, arg...) \
- do { \
- if (dm_ratelimit()) \
- printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \
- f "\n", ## arg); \
- } while (0)
-
-#define DMINFO(f, arg...) \
- printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
-#define DMINFO_LIMIT(f, arg...) \
- do { \
- if (dm_ratelimit()) \
- printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \
- "\n", ## arg); \
- } while (0)
+#define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n"
+
+#define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__)
+
+#define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__)
+#define DMERR_LIMIT(fmt, ...) \
+do { \
+ if (dm_ratelimit()) \
+ DMERR(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__)
+#define DMWARN_LIMIT(fmt, ...) \
+do { \
+ if (dm_ratelimit()) \
+ DMWARN(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__)
+#define DMINFO_LIMIT(fmt, ...) \
+do { \
+ if (dm_ratelimit()) \
+ DMINFO(fmt, ##__VA_ARGS__); \
+} while (0)
#ifdef CONFIG_DM_DEBUG
-# define DMDEBUG(f, arg...) \
- printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
-# define DMDEBUG_LIMIT(f, arg...) \
- do { \
- if (dm_ratelimit()) \
- printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \
- "\n", ## arg); \
- } while (0)
+#define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__)
+#define DMDEBUG_LIMIT(fmt, ...) \
+do { \
+ if (dm_ratelimit()) \
+ DMDEBUG(fmt, ##__VA_ARGS__); \
+} while (0)
#else
-# define DMDEBUG(f, arg...) do {} while (0)
-# define DMDEBUG_LIMIT(f, arg...) do {} while (0)
+#define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
+#define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#endif
#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
diff --git a/include/linux/device.h b/include/linux/device.h
index 6baa1238f158..723cd54b94da 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -375,6 +375,7 @@ int subsys_virtual_register(struct bus_type *subsys,
* @suspend: Used to put the device to sleep mode, usually to a low power
* state.
* @resume: Used to bring the device from the sleep mode.
+ * @shutdown: Called at shut-down time to quiesce the device.
* @ns_type: Callbacks so sysfs can detemine namespaces.
* @namespace: Namespace of the device belongs to this class.
* @pm: The default device power management operations of this class.
@@ -403,6 +404,7 @@ struct class {
int (*suspend)(struct device *dev, pm_message_t state);
int (*resume)(struct device *dev);
+ int (*shutdown)(struct device *dev);
const struct kobj_ns_type_operations *ns_type;
const void *(*namespace)(struct device *dev);
diff --git a/include/linux/dm-kcopyd.h b/include/linux/dm-kcopyd.h
index f486d636b82e..cfac8588ed56 100644
--- a/include/linux/dm-kcopyd.h
+++ b/include/linux/dm-kcopyd.h
@@ -20,6 +20,7 @@
#define DM_KCOPYD_MAX_REGIONS 8
#define DM_KCOPYD_IGNORE_ERROR 1
+#define DM_KCOPYD_WRITE_SEQ 2
struct dm_kcopyd_throttle {
unsigned throttle;
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 4f3eecedca2d..843ab866e0f4 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -127,7 +127,6 @@ struct dma_map_ops {
enum dma_data_direction dir);
int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
int (*dma_supported)(struct device *dev, u64 mask);
- int (*set_dma_mask)(struct device *dev, u64 mask);
#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
u64 (*get_required_mask)(struct device *dev);
#endif
@@ -546,15 +545,9 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
if (get_dma_ops(dev)->mapping_error)
return get_dma_ops(dev)->mapping_error(dev, dma_addr);
-
-#ifdef DMA_ERROR_CODE
- return dma_addr == DMA_ERROR_CODE;
-#else
return 0;
-#endif
}
-#ifndef HAVE_ARCH_DMA_SUPPORTED
static inline int dma_supported(struct device *dev, u64 mask)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
@@ -565,16 +558,10 @@ static inline int dma_supported(struct device *dev, u64 mask)
return 1;
return ops->dma_supported(dev, mask);
}
-#endif
#ifndef HAVE_ARCH_DMA_SET_MASK
static inline int dma_set_mask(struct device *dev, u64 mask)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- if (ops->set_dma_mask)
- return ops->set_dma_mask(dev, mask);
-
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
*dev->dma_mask = mask;
@@ -747,10 +734,9 @@ extern void *dmam_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp);
extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle);
-extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp);
-extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle);
+extern void *dmam_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ unsigned long attrs);
#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
extern int dmam_declare_coherent_memory(struct device *dev,
phys_addr_t phys_addr,
diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h
index b63b25814d77..e166cac8e870 100644
--- a/include/linux/dma/dw.h
+++ b/include/linux/dma/dw.h
@@ -50,25 +50,4 @@ static inline int dw_dma_probe(struct dw_dma_chip *chip) { return -ENODEV; }
static inline int dw_dma_remove(struct dw_dma_chip *chip) { return 0; }
#endif /* CONFIG_DW_DMAC_CORE */
-/* DMA API extensions */
-struct dw_desc;
-
-struct dw_cyclic_desc {
- struct dw_desc **desc;
- unsigned long periods;
- void (*period_callback)(void *param);
- void *period_callback_param;
-};
-
-struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
- dma_addr_t buf_addr, size_t buf_len, size_t period_len,
- enum dma_transfer_direction direction);
-void dw_dma_cyclic_free(struct dma_chan *chan);
-int dw_dma_cyclic_start(struct dma_chan *chan);
-void dw_dma_cyclic_stop(struct dma_chan *chan);
-
-dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
-
-dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
-
#endif /* _DMA_DW_H */
diff --git a/include/linux/errseq.h b/include/linux/errseq.h
new file mode 100644
index 000000000000..9e0d444ac88d
--- /dev/null
+++ b/include/linux/errseq.h
@@ -0,0 +1,19 @@
+#ifndef _LINUX_ERRSEQ_H
+#define _LINUX_ERRSEQ_H
+
+/* See lib/errseq.c for more info */
+
+typedef u32 errseq_t;
+
+errseq_t __errseq_set(errseq_t *eseq, int err);
+static inline void errseq_set(errseq_t *eseq, int err)
+{
+ /* Optimize for the common case of no error */
+ if (unlikely(err))
+ __errseq_set(eseq, err);
+}
+
+errseq_t errseq_sample(errseq_t *eseq);
+int errseq_check(errseq_t *eseq, errseq_t since);
+int errseq_check_and_advance(errseq_t *eseq, errseq_t *since);
+#endif
diff --git a/include/linux/filter.h b/include/linux/filter.h
index f1fc9baa3509..bfef1e5734f8 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -16,13 +16,10 @@
#include <linux/sched.h>
#include <linux/capability.h>
#include <linux/cryptohash.h>
+#include <linux/set_memory.h>
#include <net/sch_generic.h>
-#ifdef CONFIG_ARCH_HAS_SET_MEMORY
-#include <asm/set_memory.h>
-#endif
-
#include <uapi/linux/filter.h>
#include <uapi/linux/bpf.h>
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 818568c8e5ed..0cfa47125d52 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -32,6 +32,7 @@
#include <linux/workqueue.h>
#include <linux/delayed_call.h>
#include <linux/uuid.h>
+#include <linux/errseq.h>
#include <asm/byteorder.h>
#include <uapi/linux/fs.h>
@@ -401,6 +402,7 @@ struct address_space {
gfp_t gfp_mask; /* implicit gfp mask for allocations */
struct list_head private_list; /* ditto */
void *private_data; /* ditto */
+ errseq_t wb_err;
} __attribute__((aligned(sizeof(long))));
/*
* On most architectures that alignment is already the case; but
@@ -879,6 +881,7 @@ struct file {
struct list_head f_tfile_llink;
#endif /* #ifdef CONFIG_EPOLL */
struct address_space *f_mapping;
+ errseq_t f_wb_err;
} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
struct file_handle {
@@ -1748,12 +1751,6 @@ static inline int call_mmap(struct file *file, struct vm_area_struct *vma)
return file->f_op->mmap(file, vma);
}
-static inline int call_fsync(struct file *file, loff_t start, loff_t end,
- int datasync)
-{
- return file->f_op->fsync(file, start, end, datasync);
-}
-
ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
unsigned long nr_segs, unsigned long fast_segs,
struct iovec *fast_pointer,
@@ -2542,7 +2539,7 @@ extern int write_inode_now(struct inode *, int);
extern int filemap_fdatawrite(struct address_space *);
extern int filemap_flush(struct address_space *);
extern int filemap_fdatawait(struct address_space *);
-extern void filemap_fdatawait_keep_errors(struct address_space *);
+extern int filemap_fdatawait_keep_errors(struct address_space *mapping);
extern int filemap_fdatawait_range(struct address_space *, loff_t lstart,
loff_t lend);
extern bool filemap_range_has_page(struct address_space *, loff_t lstart,
@@ -2556,6 +2553,62 @@ extern int filemap_fdatawrite_range(struct address_space *mapping,
loff_t start, loff_t end);
extern int filemap_check_errors(struct address_space *mapping);
+extern void __filemap_set_wb_err(struct address_space *mapping, int err);
+extern int __must_check file_check_and_advance_wb_err(struct file *file);
+extern int __must_check file_write_and_wait_range(struct file *file,
+ loff_t start, loff_t end);
+
+/**
+ * filemap_set_wb_err - set a writeback error on an address_space
+ * @mapping: mapping in which to set writeback error
+ * @err: error to be set in mapping
+ *
+ * When writeback fails in some way, we must record that error so that
+ * userspace can be informed when fsync and the like are called. We endeavor
+ * to report errors on any file that was open at the time of the error. Some
+ * internal callers also need to know when writeback errors have occurred.
+ *
+ * When a writeback error occurs, most filesystems will want to call
+ * filemap_set_wb_err to record the error in the mapping so that it will be
+ * automatically reported whenever fsync is called on the file.
+ *
+ * FIXME: mention FS_* flag here?
+ */
+static inline void filemap_set_wb_err(struct address_space *mapping, int err)
+{
+ /* Fastpath for common case of no error */
+ if (unlikely(err))
+ __filemap_set_wb_err(mapping, err);
+}
+
+/**
+ * filemap_check_wb_error - has an error occurred since the mark was sampled?
+ * @mapping: mapping to check for writeback errors
+ * @since: previously-sampled errseq_t
+ *
+ * Grab the errseq_t value from the mapping, and see if it has changed "since"
+ * the given value was sampled.
+ *
+ * If it has then report the latest error set, otherwise return 0.
+ */
+static inline int filemap_check_wb_err(struct address_space *mapping,
+ errseq_t since)
+{
+ return errseq_check(&mapping->wb_err, since);
+}
+
+/**
+ * filemap_sample_wb_err - sample the current errseq_t to test for later errors
+ * @mapping: mapping to be sampled
+ *
+ * Writeback errors are always reported relative to a particular sample point
+ * in the past. This function provides those sample points.
+ */
+static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
+{
+ return errseq_sample(&mapping->wb_err);
+}
+
extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
int datasync);
extern int vfs_fsync(struct file *file, int datasync);
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index b43d3f5bd9ea..b78aa7ac77ce 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -293,35 +293,4 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
}
}
-#if defined(CONFIG_FSNOTIFY) /* notify helpers */
-
-/*
- * fsnotify_oldname_init - save off the old filename before we change it
- */
-static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
-{
- return kstrdup(name, GFP_KERNEL);
-}
-
-/*
- * fsnotify_oldname_free - free the name we got from fsnotify_oldname_init
- */
-static inline void fsnotify_oldname_free(const unsigned char *old_name)
-{
- kfree(old_name);
-}
-
-#else /* CONFIG_FSNOTIFY */
-
-static inline const char *fsnotify_oldname_init(const unsigned char *name)
-{
- return NULL;
-}
-
-static inline void fsnotify_oldname_free(const unsigned char *old_name)
-{
-}
-
-#endif /* CONFIG_FSNOTIFY */
-
#endif /* _LINUX_FS_NOTIFY_H */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 473f088aabea..5857390ac35a 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -119,6 +119,8 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
* for any of the functions that this ops will be registered for, then
* this ops will fail to register or set_filter_ip.
* PID - Is affected by set_ftrace_pid (allows filtering on those pids)
+ * RCU - Set when the ops can only be called when RCU is watching.
+ * TRACE_ARRAY - The ops->private points to a trace_array descriptor.
*/
enum {
FTRACE_OPS_FL_ENABLED = 1 << 0,
@@ -137,6 +139,7 @@ enum {
FTRACE_OPS_FL_IPMODIFY = 1 << 13,
FTRACE_OPS_FL_PID = 1 << 14,
FTRACE_OPS_FL_RCU = 1 << 15,
+ FTRACE_OPS_FL_TRACE_ARRAY = 1 << 16,
};
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -445,7 +448,8 @@ enum {
FTRACE_ITER_PRINTALL = (1 << 2),
FTRACE_ITER_DO_PROBES = (1 << 3),
FTRACE_ITER_PROBE = (1 << 4),
- FTRACE_ITER_ENABLED = (1 << 5),
+ FTRACE_ITER_MOD = (1 << 5),
+ FTRACE_ITER_ENABLED = (1 << 6),
};
void arch_ftrace_update_code(int command);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index a89d37e8b387..4c6656f1fee7 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -432,14 +432,13 @@ static inline void arch_alloc_page(struct page *page, int order) { }
#endif
struct page *
-__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
- struct zonelist *zonelist, nodemask_t *nodemask);
+__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
+ nodemask_t *nodemask);
static inline struct page *
-__alloc_pages(gfp_t gfp_mask, unsigned int order,
- struct zonelist *zonelist)
+__alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid)
{
- return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
+ return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL);
}
/*
@@ -452,7 +451,7 @@ __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
VM_WARN_ON(!node_online(nid));
- return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
+ return __alloc_pages(gfp_mask, order, nid);
}
/*
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 393582867afd..af20369ec8e7 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -213,6 +213,9 @@ bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset);
bool gpiochip_line_is_open_drain(struct gpio_chip *chip, unsigned int offset);
bool gpiochip_line_is_open_source(struct gpio_chip *chip, unsigned int offset);
+/* Sleep persistence inquiry for drivers */
+bool gpiochip_line_is_persistent(struct gpio_chip *chip, unsigned int offset);
+
/* get driver data */
void *gpiochip_get_data(struct gpio_chip *chip);
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index f738d50cc17d..6e76b16fcade 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -9,6 +9,8 @@ enum gpio_lookup_flags {
GPIO_ACTIVE_LOW = (1 << 0),
GPIO_OPEN_DRAIN = (1 << 1),
GPIO_OPEN_SOURCE = (1 << 2),
+ GPIO_SLEEP_MAINTAIN_VALUE = (0 << 3),
+ GPIO_SLEEP_MAY_LOOSE_VALUE = (1 << 3),
};
/**
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index a3762d49ba39..d3b3e8fcc717 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -113,6 +113,7 @@ extern unsigned long thp_get_unmapped_area(struct file *filp,
extern void prep_transhuge_page(struct page *page);
extern void free_transhuge_page(struct page *page);
+bool can_split_huge_page(struct page *page, int *pextra_pins);
int split_huge_page_to_list(struct page *page, struct list_head *list);
static inline int split_huge_page(struct page *page)
{
@@ -231,6 +232,12 @@ static inline void prep_transhuge_page(struct page *page) {}
#define thp_get_unmapped_area NULL
+static inline bool
+can_split_huge_page(struct page *page, int *pextra_pins)
+{
+ BUILD_BUG();
+ return false;
+}
static inline int
split_huge_page_to_list(struct page *page, struct list_head *list)
{
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index b857fc8cc2ec..46bfb702e7d6 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -14,6 +14,30 @@ struct ctl_table;
struct user_struct;
struct mmu_gather;
+#ifndef is_hugepd
+/*
+ * Some architectures requires a hugepage directory format that is
+ * required to support multiple hugepage sizes. For example
+ * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
+ * introduced the same on powerpc. This allows for a more flexible hugepage
+ * pagetable layout.
+ */
+typedef struct { unsigned long pd; } hugepd_t;
+#define is_hugepd(hugepd) (0)
+#define __hugepd(x) ((hugepd_t) { (x) })
+static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
+ unsigned pdshift, unsigned long end,
+ int write, struct page **pages, int *nr)
+{
+ return 0;
+}
+#else
+extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
+ unsigned pdshift, unsigned long end,
+ int write, struct page **pages, int *nr);
+#endif
+
+
#ifdef CONFIG_HUGETLB_PAGE
#include <linux/mempolicy.h>
@@ -113,19 +137,27 @@ extern struct list_head huge_boot_pages;
pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz);
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
+pte_t *huge_pte_offset(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz);
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write);
+struct page *follow_huge_pd(struct vm_area_struct *vma,
+ unsigned long address, hugepd_t hpd,
+ int flags, int pdshift);
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int flags);
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
pud_t *pud, int flags);
+struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
+ pgd_t *pgd, int flags);
+
int pmd_huge(pmd_t pmd);
int pud_huge(pud_t pud);
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot);
+bool is_hugetlb_entry_migration(pte_t pte);
#else /* !CONFIG_HUGETLB_PAGE */
static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
@@ -147,8 +179,10 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
static inline void hugetlb_show_meminfo(void)
{
}
+#define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
#define follow_huge_pmd(mm, addr, pmd, flags) NULL
#define follow_huge_pud(mm, addr, pud, flags) NULL
+#define follow_huge_pgd(mm, addr, pgd, flags) NULL
#define prepare_hugepage_range(file, addr, len) (-EINVAL)
#define pmd_huge(x) 0
#define pud_huge(x) 0
@@ -157,7 +191,7 @@ static inline void hugetlb_show_meminfo(void)
#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
src_addr, pagep) ({ BUG(); 0; })
-#define huge_pte_offset(mm, address) 0
+#define huge_pte_offset(mm, address, sz) 0
static inline int dequeue_hwpoisoned_huge_page(struct page *page)
{
return 0;
@@ -217,29 +251,6 @@ static inline int pud_write(pud_t pud)
}
#endif
-#ifndef is_hugepd
-/*
- * Some architectures requires a hugepage directory format that is
- * required to support multiple hugepage sizes. For example
- * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
- * introduced the same on powerpc. This allows for a more flexible hugepage
- * pagetable layout.
- */
-typedef struct { unsigned long pd; } hugepd_t;
-#define is_hugepd(hugepd) (0)
-#define __hugepd(x) ((hugepd_t) { (x) })
-static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
- unsigned pdshift, unsigned long end,
- int write, struct page **pages, int *nr)
-{
- return 0;
-}
-#else
-extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
- unsigned pdshift, unsigned long end,
- int write, struct page **pages, int *nr);
-#endif
-
#define HUGETLB_ANON_FILE "anon_hugepage"
enum {
@@ -466,7 +477,11 @@ extern int dissolve_free_huge_pages(unsigned long start_pfn,
static inline bool hugepage_migration_supported(struct hstate *h)
{
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
- return huge_page_shift(h) == PMD_SHIFT;
+ if ((huge_page_shift(h) == PMD_SHIFT) ||
+ (huge_page_shift(h) == PGDIR_SHIFT))
+ return true;
+ else
+ return false;
#else
return false;
#endif
@@ -501,6 +516,14 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
{
atomic_long_sub(l, &mm->hugetlb_usage);
}
+
+#ifndef set_huge_swap_pte_at
+static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned long sz)
+{
+ set_huge_pte_at(mm, addr, ptep, pte);
+}
+#endif
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
#define alloc_huge_page(v, a, r) NULL
@@ -518,6 +541,11 @@ struct hstate {};
#define vma_mmu_pagesize(v) PAGE_SIZE
#define huge_page_order(h) 0
#define huge_page_shift(h) PAGE_SHIFT
+static inline bool hstate_is_gigantic(struct hstate *h)
+{
+ return false;
+}
+
static inline unsigned int pages_per_huge_page(struct hstate *h)
{
return 1;
@@ -545,6 +573,11 @@ static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
{
}
+
+static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned long sz)
+{
+}
#endif /* CONFIG_HUGETLB_PAGE */
static inline spinlock_t *huge_pte_lock(struct hstate *h,
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index e09fc8290c2f..b7d7bbec74e0 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -744,7 +744,6 @@ struct vmbus_channel {
u32 ringbuffer_pagecount;
struct hv_ring_buffer_info outbound; /* send to parent */
struct hv_ring_buffer_info inbound; /* receive from parent */
- spinlock_t inbound_lock;
struct vmbus_close_msg close_msg;
diff --git a/include/linux/imx-media.h b/include/linux/imx-media.h
new file mode 100644
index 000000000000..77221ecad6fc
--- /dev/null
+++ b/include/linux/imx-media.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2014-2017 Mentor Graphics Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+
+#ifndef __LINUX_IMX_MEDIA_H__
+#define __LINUX_IMX_MEDIA_H__
+
+/*
+ * events from the subdevs
+ */
+#define V4L2_EVENT_IMX_CLASS V4L2_EVENT_PRIVATE_START
+#define V4L2_EVENT_IMX_FRAME_INTERVAL_ERROR (V4L2_EVENT_IMX_CLASS + 1)
+
+enum imx_ctrl_id {
+ V4L2_CID_IMX_FIM_ENABLE = (V4L2_CID_USER_IMX_BASE + 0),
+ V4L2_CID_IMX_FIM_NUM,
+ V4L2_CID_IMX_FIM_TOLERANCE_MIN,
+ V4L2_CID_IMX_FIM_TOLERANCE_MAX,
+ V4L2_CID_IMX_FIM_NUM_SKIP,
+ V4L2_CID_IMX_FIM_ICAP_EDGE,
+ V4L2_CID_IMX_FIM_ICAP_CHANNEL,
+};
+
+#endif
diff --git a/include/linux/input/sparse-keymap.h b/include/linux/input/sparse-keymap.h
index 52db62064c6e..c7346e33d958 100644
--- a/include/linux/input/sparse-keymap.h
+++ b/include/linux/input/sparse-keymap.h
@@ -51,7 +51,6 @@ struct key_entry *sparse_keymap_entry_from_keycode(struct input_dev *dev,
int sparse_keymap_setup(struct input_dev *dev,
const struct key_entry *keymap,
int (*setup)(struct input_dev *, struct key_entry *));
-void sparse_keymap_free(struct input_dev *dev);
void sparse_keymap_report_entry(struct input_dev *dev, const struct key_entry *ke,
unsigned int value, bool autorelease);
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 37f8e354f564..431e1d83e274 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -291,7 +291,7 @@ extern int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
-int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd);
+int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd);
#else /* CONFIG_SMP */
@@ -331,7 +331,7 @@ irq_create_affinity_masks(int nvec, const struct irq_affinity *affd)
}
static inline int
-irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd)
+irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd)
{
return maxvec;
}
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 1fa293a37f4a..6a1f87ff94e2 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -405,6 +405,7 @@
#define ICH_LR_PHYS_ID_SHIFT 32
#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT)
#define ICH_LR_PRIORITY_SHIFT 48
+#define ICH_LR_PRIORITY_MASK (0xffULL << ICH_LR_PRIORITY_SHIFT)
/* These are for GICv2 emulation only */
#define GICH_LR_VIRTUALID (0x3ffUL << 0)
@@ -416,6 +417,11 @@
#define ICH_HCR_EN (1 << 0)
#define ICH_HCR_UIE (1 << 1)
+#define ICH_HCR_TC (1 << 10)
+#define ICH_HCR_TALL0 (1 << 11)
+#define ICH_HCR_TALL1 (1 << 12)
+#define ICH_HCR_EOIcount_SHIFT 27
+#define ICH_HCR_EOIcount_MASK (0x1f << ICH_HCR_EOIcount_SHIFT)
#define ICH_VMCR_ACK_CTL_SHIFT 2
#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT)
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index c9481ebcbc0c..65888418fb69 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -63,15 +63,6 @@
#define KEXEC_CORE_NOTE_NAME CRASH_CORE_NOTE_NAME
/*
- * The per-cpu notes area is a list of notes terminated by a "NULL"
- * note header. For kdump, the code in vmcore.c runs in the context
- * of the second kernel to combine them into one note.
- */
-#ifndef KEXEC_NOTE_BYTES
-#define KEXEC_NOTE_BYTES CRASH_CORE_NOTE_BYTES
-#endif
-
-/*
* This structure is used to hold the arguments that are used when loading
* kernel binaries.
*/
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 1c2a32829620..590343f6c1b1 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -22,6 +22,7 @@
#define __KMEMLEAK_H
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#ifdef CONFIG_DEBUG_KMEMLEAK
@@ -30,6 +31,8 @@ extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
gfp_t gfp) __ref;
extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
gfp_t gfp) __ref;
+extern void kmemleak_vmalloc(const struct vm_struct *area, size_t size,
+ gfp_t gfp) __ref;
extern void kmemleak_free(const void *ptr) __ref;
extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
extern void kmemleak_free_percpu(const void __percpu *ptr) __ref;
@@ -81,6 +84,10 @@ static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
gfp_t gfp)
{
}
+static inline void kmemleak_vmalloc(const struct vm_struct *area, size_t size,
+ gfp_t gfp)
+{
+}
static inline void kmemleak_free(const void *ptr)
{
}
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 8c0664309815..0b50e7b35ed4 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -126,6 +126,13 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_PENDING_TIMER 2
#define KVM_REQ_UNHALT 3
+#define KVM_REQUEST_ARCH_BASE 8
+
+#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
+ BUILD_BUG_ON((unsigned)(nr) >= 32 - KVM_REQUEST_ARCH_BASE); \
+ (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
+})
+#define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0)
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -1098,6 +1105,11 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
set_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
}
+static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
+{
+ return READ_ONCE(vcpu->requests);
+}
+
static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
{
return test_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 9e6633235ad7..55de3da58b1c 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -156,6 +156,7 @@ enum {
ATA_DFLAG_ACPI_PENDING = (1 << 5), /* ACPI resume action pending */
ATA_DFLAG_ACPI_FAILED = (1 << 6), /* ACPI on devcfg has failed */
ATA_DFLAG_AN = (1 << 7), /* AN configured */
+ ATA_DFLAG_TRUSTED = (1 << 8), /* device supports trusted send/recv */
ATA_DFLAG_DMADIR = (1 << 10), /* device requires DMADIR */
ATA_DFLAG_CFG_MASK = (1 << 12) - 1,
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 6c807017128d..f3d3e6af8838 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/sizes.h>
#include <linux/types.h>
+#include <linux/uuid.h>
enum {
/* when a dimm supports both PMEM and BLK access a label is required */
@@ -54,6 +55,7 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm_bus_descriptor {
const struct attribute_group **attr_groups;
+ unsigned long bus_dsm_mask;
unsigned long cmd_mask;
struct module *module;
char *provider_name;
@@ -71,9 +73,14 @@ struct nd_cmd_desc {
};
struct nd_interleave_set {
- u64 cookie;
+ /* v1.1 definition of the interleave-set-cookie algorithm */
+ u64 cookie1;
+ /* v1.2 definition of the interleave-set-cookie algorithm */
+ u64 cookie2;
/* compatibility with initial buggy Linux implementation */
u64 altcookie;
+
+ guid_t type_guid;
};
struct nd_mapping_desc {
@@ -159,9 +166,11 @@ void *nd_region_provider_data(struct nd_region *nd_region);
void *nd_blk_region_provider_data(struct nd_blk_region *ndbr);
void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data);
struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr);
+unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr);
unsigned int nd_region_acquire_lane(struct nd_region *nd_region);
void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane);
u64 nd_fletcher64(void *addr, size_t len, bool le);
void nvdimm_flush(struct nd_region *nd_region);
int nvdimm_has_flush(struct nd_region *nd_region);
+int nvdimm_has_cache(struct nd_region *nd_region);
#endif /* __LIBNVDIMM_H__ */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 8098695e5d8d..77d427974f57 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -57,10 +57,6 @@ struct memblock {
extern struct memblock memblock;
extern int memblock_debug;
-#ifdef CONFIG_MOVABLE_NODE
-/* If movable_node boot option specified */
-extern bool movable_node_enabled;
-#endif /* CONFIG_MOVABLE_NODE */
#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
#define __init_memblock __meminit
@@ -169,27 +165,11 @@ void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
i != (u64)ULLONG_MAX; \
__next_reserved_mem_region(&i, p_start, p_end))
-#ifdef CONFIG_MOVABLE_NODE
static inline bool memblock_is_hotpluggable(struct memblock_region *m)
{
return m->flags & MEMBLOCK_HOTPLUG;
}
-static inline bool __init_memblock movable_node_is_enabled(void)
-{
- return movable_node_enabled;
-}
-#else
-static inline bool memblock_is_hotpluggable(struct memblock_region *m)
-{
- return false;
-}
-static inline bool movable_node_is_enabled(void)
-{
- return false;
-}
-#endif
-
static inline bool memblock_is_mirror(struct memblock_region *m)
{
return m->flags & MEMBLOCK_MIRROR;
@@ -296,7 +276,6 @@ phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
-#ifdef CONFIG_MOVABLE_NODE
/*
* Set the allocation direction to bottom-up or top-down.
*/
@@ -314,10 +293,6 @@ static inline bool memblock_bottom_up(void)
{
return memblock.bottom_up;
}
-#else
-static inline void __init memblock_set_bottom_up(bool enable) {}
-static inline bool memblock_bottom_up(void) { return false; }
-#endif
/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 899949bbb2f9..3914e3dd6168 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -26,7 +26,8 @@
#include <linux/page_counter.h>
#include <linux/vmpressure.h>
#include <linux/eventfd.h>
-#include <linux/mmzone.h>
+#include <linux/mm.h>
+#include <linux/vmstat.h>
#include <linux/writeback.h>
#include <linux/page-flags.h>
@@ -44,8 +45,6 @@ enum memcg_stat_item {
MEMCG_SOCK,
/* XXX: why are these zone and not node counters? */
MEMCG_KERNEL_STACK_KB,
- MEMCG_SLAB_RECLAIMABLE,
- MEMCG_SLAB_UNRECLAIMABLE,
MEMCG_NR_STAT,
};
@@ -100,11 +99,16 @@ struct mem_cgroup_reclaim_iter {
unsigned int generation;
};
+struct lruvec_stat {
+ long count[NR_VM_NODE_STAT_ITEMS];
+};
+
/*
* per-zone information in memory controller.
*/
struct mem_cgroup_per_node {
struct lruvec lruvec;
+ struct lruvec_stat __percpu *lruvec_stat;
unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
@@ -357,6 +361,17 @@ static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
}
struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
+static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
+{
+ struct mem_cgroup_per_node *mz;
+
+ if (mem_cgroup_disabled())
+ return NULL;
+
+ mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
+ return mz->memcg;
+}
+
/**
* parent_mem_cgroup - find the accounting parent of a memcg
* @memcg: memcg whose parent to find
@@ -487,23 +502,18 @@ static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
return val;
}
-static inline void mod_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx, int val)
+static inline void __mod_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx, int val)
{
if (!mem_cgroup_disabled())
- this_cpu_add(memcg->stat->count[idx], val);
+ __this_cpu_add(memcg->stat->count[idx], val);
}
-static inline void inc_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
-{
- mod_memcg_state(memcg, idx, 1);
-}
-
-static inline void dec_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+static inline void mod_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx, int val)
{
- mod_memcg_state(memcg, idx, -1);
+ if (!mem_cgroup_disabled())
+ this_cpu_add(memcg->stat->count[idx], val);
}
/**
@@ -523,6 +533,13 @@ static inline void dec_memcg_state(struct mem_cgroup *memcg,
*
* Kernel pages are an exception to this, since they'll never move.
*/
+static inline void __mod_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx, int val)
+{
+ if (page->mem_cgroup)
+ __mod_memcg_state(page->mem_cgroup, idx, val);
+}
+
static inline void mod_memcg_page_state(struct page *page,
enum memcg_stat_item idx, int val)
{
@@ -530,24 +547,99 @@ static inline void mod_memcg_page_state(struct page *page,
mod_memcg_state(page->mem_cgroup, idx, val);
}
-static inline void inc_memcg_page_state(struct page *page,
- enum memcg_stat_item idx)
+static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
+ enum node_stat_item idx)
{
- mod_memcg_page_state(page, idx, 1);
+ struct mem_cgroup_per_node *pn;
+ long val = 0;
+ int cpu;
+
+ if (mem_cgroup_disabled())
+ return node_page_state(lruvec_pgdat(lruvec), idx);
+
+ pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
+ for_each_possible_cpu(cpu)
+ val += per_cpu(pn->lruvec_stat->count[idx], cpu);
+
+ if (val < 0)
+ val = 0;
+
+ return val;
}
-static inline void dec_memcg_page_state(struct page *page,
- enum memcg_stat_item idx)
+static inline void __mod_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx, int val)
{
- mod_memcg_page_state(page, idx, -1);
+ struct mem_cgroup_per_node *pn;
+
+ __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
+ if (mem_cgroup_disabled())
+ return;
+ pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
+ __mod_memcg_state(pn->memcg, idx, val);
+ __this_cpu_add(pn->lruvec_stat->count[idx], val);
+}
+
+static inline void mod_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx, int val)
+{
+ struct mem_cgroup_per_node *pn;
+
+ mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
+ if (mem_cgroup_disabled())
+ return;
+ pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
+ mod_memcg_state(pn->memcg, idx, val);
+ this_cpu_add(pn->lruvec_stat->count[idx], val);
+}
+
+static inline void __mod_lruvec_page_state(struct page *page,
+ enum node_stat_item idx, int val)
+{
+ struct mem_cgroup_per_node *pn;
+
+ __mod_node_page_state(page_pgdat(page), idx, val);
+ if (mem_cgroup_disabled() || !page->mem_cgroup)
+ return;
+ __mod_memcg_state(page->mem_cgroup, idx, val);
+ pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
+ __this_cpu_add(pn->lruvec_stat->count[idx], val);
+}
+
+static inline void mod_lruvec_page_state(struct page *page,
+ enum node_stat_item idx, int val)
+{
+ struct mem_cgroup_per_node *pn;
+
+ mod_node_page_state(page_pgdat(page), idx, val);
+ if (mem_cgroup_disabled() || !page->mem_cgroup)
+ return;
+ mod_memcg_state(page->mem_cgroup, idx, val);
+ pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
+ this_cpu_add(pn->lruvec_stat->count[idx], val);
}
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask,
unsigned long *total_scanned);
-static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
- enum vm_event_item idx)
+static inline void count_memcg_events(struct mem_cgroup *memcg,
+ enum vm_event_item idx,
+ unsigned long count)
+{
+ if (!mem_cgroup_disabled())
+ this_cpu_add(memcg->stat->events[idx], count);
+}
+
+static inline void count_memcg_page_event(struct page *page,
+ enum memcg_stat_item idx)
+{
+ if (page->mem_cgroup)
+ count_memcg_events(page->mem_cgroup, idx, 1);
+}
+
+static inline void count_memcg_event_mm(struct mm_struct *mm,
+ enum vm_event_item idx)
{
struct mem_cgroup *memcg;
@@ -556,8 +648,11 @@ static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
rcu_read_lock();
memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
- if (likely(memcg))
+ if (likely(memcg)) {
this_cpu_inc(memcg->stat->events[idx]);
+ if (idx == OOM_KILL)
+ cgroup_file_notify(&memcg->events_file);
+ }
rcu_read_unlock();
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -675,6 +770,11 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
return NULL;
}
+static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
+{
+ return NULL;
+}
+
static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
{
return true;
@@ -745,19 +845,21 @@ static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
return 0;
}
-static inline void mod_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx,
- int nr)
+static inline void __mod_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx,
+ int nr)
{
}
-static inline void inc_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+static inline void mod_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx,
+ int nr)
{
}
-static inline void dec_memcg_state(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+static inline void __mod_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx,
+ int nr)
{
}
@@ -767,14 +869,34 @@ static inline void mod_memcg_page_state(struct page *page,
{
}
-static inline void inc_memcg_page_state(struct page *page,
- enum memcg_stat_item idx)
+static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
+ enum node_stat_item idx)
{
+ return node_page_state(lruvec_pgdat(lruvec), idx);
}
-static inline void dec_memcg_page_state(struct page *page,
- enum memcg_stat_item idx)
+static inline void __mod_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx, int val)
+{
+ __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
+}
+
+static inline void mod_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx, int val)
+{
+ mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
+}
+
+static inline void __mod_lruvec_page_state(struct page *page,
+ enum node_stat_item idx, int val)
{
+ __mod_node_page_state(page_pgdat(page), idx, val);
+}
+
+static inline void mod_lruvec_page_state(struct page *page,
+ enum node_stat_item idx, int val)
+{
+ mod_node_page_state(page_pgdat(page), idx, val);
}
static inline
@@ -789,12 +911,119 @@ static inline void mem_cgroup_split_huge_fixup(struct page *head)
{
}
+static inline void count_memcg_events(struct mem_cgroup *memcg,
+ enum vm_event_item idx,
+ unsigned long count)
+{
+}
+
+static inline void count_memcg_page_event(struct page *page,
+ enum memcg_stat_item idx)
+{
+}
+
static inline
-void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
+void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
{
}
#endif /* CONFIG_MEMCG */
+static inline void __inc_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx)
+{
+ __mod_memcg_state(memcg, idx, 1);
+}
+
+static inline void __dec_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx)
+{
+ __mod_memcg_state(memcg, idx, -1);
+}
+
+static inline void __inc_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx)
+{
+ __mod_memcg_page_state(page, idx, 1);
+}
+
+static inline void __dec_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx)
+{
+ __mod_memcg_page_state(page, idx, -1);
+}
+
+static inline void __inc_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx)
+{
+ __mod_lruvec_state(lruvec, idx, 1);
+}
+
+static inline void __dec_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx)
+{
+ __mod_lruvec_state(lruvec, idx, -1);
+}
+
+static inline void __inc_lruvec_page_state(struct page *page,
+ enum node_stat_item idx)
+{
+ __mod_lruvec_page_state(page, idx, 1);
+}
+
+static inline void __dec_lruvec_page_state(struct page *page,
+ enum node_stat_item idx)
+{
+ __mod_lruvec_page_state(page, idx, -1);
+}
+
+static inline void inc_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx)
+{
+ mod_memcg_state(memcg, idx, 1);
+}
+
+static inline void dec_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx)
+{
+ mod_memcg_state(memcg, idx, -1);
+}
+
+static inline void inc_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx)
+{
+ mod_memcg_page_state(page, idx, 1);
+}
+
+static inline void dec_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx)
+{
+ mod_memcg_page_state(page, idx, -1);
+}
+
+static inline void inc_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx)
+{
+ mod_lruvec_state(lruvec, idx, 1);
+}
+
+static inline void dec_lruvec_state(struct lruvec *lruvec,
+ enum node_stat_item idx)
+{
+ mod_lruvec_state(lruvec, idx, -1);
+}
+
+static inline void inc_lruvec_page_state(struct page *page,
+ enum node_stat_item idx)
+{
+ mod_lruvec_page_state(page, idx, 1);
+}
+
+static inline void dec_lruvec_page_state(struct page *page,
+ enum node_stat_item idx)
+{
+ mod_lruvec_page_state(page, idx, -1);
+}
+
#ifdef CONFIG_CGROUP_WRITEBACK
struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg);
@@ -886,19 +1115,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
return memcg ? memcg->kmemcg_id : -1;
}
-/**
- * memcg_kmem_update_page_stat - update kmem page state statistics
- * @page: the page
- * @idx: page state item to account
- * @val: number of pages (positive or negative)
- */
-static inline void memcg_kmem_update_page_stat(struct page *page,
- enum memcg_stat_item idx, int val)
-{
- if (memcg_kmem_enabled() && page->mem_cgroup)
- this_cpu_add(page->mem_cgroup->stat->count[idx], val);
-}
-
#else
#define for_each_memcg_cache_index(_idx) \
for (; NULL; )
@@ -921,10 +1137,6 @@ static inline void memcg_put_cache_ids(void)
{
}
-static inline void memcg_kmem_update_page_stat(struct page *page,
- enum memcg_stat_item idx, int val)
-{
-}
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 134a2f69c21a..c8a5056a5ae0 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -14,6 +14,20 @@ struct memory_block;
struct resource;
#ifdef CONFIG_MEMORY_HOTPLUG
+/*
+ * Return page for the valid pfn only if the page is online. All pfn
+ * walkers which rely on the fully initialized page->flags and others
+ * should use this rather than pfn_valid && pfn_to_page
+ */
+#define pfn_to_online_page(pfn) \
+({ \
+ struct page *___page = NULL; \
+ unsigned long ___nr = pfn_to_section_nr(pfn); \
+ \
+ if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr))\
+ ___page = pfn_to_page(pfn); \
+ ___page; \
+})
/*
* Types for free bootmem stored in page->lru.next. These have to be in
@@ -101,6 +115,12 @@ extern void __online_page_free(struct page *page);
extern int try_online_node(int nid);
extern bool memhp_auto_online;
+/* If movable_node boot option specified */
+extern bool movable_node_enabled;
+static inline bool movable_node_is_enabled(void)
+{
+ return movable_node_enabled;
+}
#ifdef CONFIG_MEMORY_HOTREMOVE
extern bool is_pageblock_removable_nolock(struct page *page);
@@ -109,9 +129,9 @@ extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages);
#endif /* CONFIG_MEMORY_HOTREMOVE */
-/* reasonably generic interface to expand the physical pages in a zone */
-extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn,
- unsigned long nr_pages);
+/* reasonably generic interface to expand the physical pages */
+extern int __add_pages(int nid, unsigned long start_pfn,
+ unsigned long nr_pages, bool want_memblock);
#ifdef CONFIG_NUMA
extern int memory_add_physaddr_to_nid(u64 start);
@@ -203,6 +223,14 @@ extern void set_zone_contiguous(struct zone *zone);
extern void clear_zone_contiguous(struct zone *zone);
#else /* ! CONFIG_MEMORY_HOTPLUG */
+#define pfn_to_online_page(pfn) \
+({ \
+ struct page *___page = NULL; \
+ if (pfn_valid(pfn)) \
+ ___page = pfn_to_page(pfn); \
+ ___page; \
+ })
+
/*
* Stub functions for when hotplug is off
*/
@@ -244,6 +272,10 @@ static inline void put_online_mems(void) {}
static inline void mem_hotplug_begin(void) {}
static inline void mem_hotplug_done(void) {}
+static inline bool movable_node_is_enabled(void)
+{
+ return false;
+}
#endif /* ! CONFIG_MEMORY_HOTPLUG */
#ifdef CONFIG_MEMORY_HOTREMOVE
@@ -274,18 +306,19 @@ extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
void *arg, int (*func)(struct memory_block *, void *));
extern int add_memory(int nid, u64 start, u64 size);
extern int add_memory_resource(int nid, struct resource *resource, bool online);
-extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default,
- bool for_device);
-extern int arch_add_memory(int nid, u64 start, u64 size, bool for_device);
+extern int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock);
+extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
+ unsigned long nr_pages);
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
extern bool is_memblock_offlined(struct memory_block *mem);
extern void remove_memory(int nid, u64 start, u64 size);
-extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn);
+extern int sparse_add_one_section(struct pglist_data *pgdat, unsigned long start_pfn);
extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
unsigned long map_offset);
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
unsigned long pnum);
-extern bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
- enum zone_type target, int *zone_shift);
-
+extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages,
+ int online_type);
+extern struct zone *default_zone_for_pfn(int nid, unsigned long pfn,
+ unsigned long nr_pages);
#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 5f4d8281832b..3a58b4be1b0c 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -142,11 +142,10 @@ bool vma_policy_mof(struct vm_area_struct *vma);
extern void numa_default_policy(void);
extern void numa_policy_init(void);
-extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
- enum mpol_rebind_step step);
+extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new);
extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
-extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
+extern int huge_node(struct vm_area_struct *vma,
unsigned long addr, gfp_t gfp_flags,
struct mempolicy **mpol, nodemask_t **nodemask);
extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
@@ -260,8 +259,7 @@ static inline void numa_default_policy(void)
}
static inline void mpol_rebind_task(struct task_struct *tsk,
- const nodemask_t *new,
- enum mpol_rebind_step step)
+ const nodemask_t *new)
{
}
@@ -269,13 +267,13 @@ static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
{
}
-static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
+static inline int huge_node(struct vm_area_struct *vma,
unsigned long addr, gfp_t gfp_flags,
struct mempolicy **mpol, nodemask_t **nodemask)
{
*mpol = NULL;
*nodemask = NULL;
- return node_zonelist(0, gfp_flags);
+ return 0;
}
static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h
index 956caa0628f5..5aacdb017a9f 100644
--- a/include/linux/mfd/intel_soc_pmic.h
+++ b/include/linux/mfd/intel_soc_pmic.h
@@ -25,8 +25,11 @@ struct intel_soc_pmic {
int irq;
struct regmap *regmap;
struct regmap_irq_chip_data *irq_chip_data;
- struct regmap_irq_chip_data *irq_chip_data_level2;
struct regmap_irq_chip_data *irq_chip_data_tmu;
+ struct regmap_irq_chip_data *irq_chip_data_bcu;
+ struct regmap_irq_chip_data *irq_chip_data_adc;
+ struct regmap_irq_chip_data *irq_chip_data_chgr;
+ struct regmap_irq_chip_data *irq_chip_data_crit;
struct device *dev;
};
diff --git a/include/linux/mfd/lp87565.h b/include/linux/mfd/lp87565.h
new file mode 100644
index 000000000000..d0c91ba65525
--- /dev/null
+++ b/include/linux/mfd/lp87565.h
@@ -0,0 +1,270 @@
+/*
+ * Functions to access LP87565 power management chip.
+ *
+ * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ */
+
+#ifndef __LINUX_MFD_LP87565_H
+#define __LINUX_MFD_LP87565_H
+
+#include <linux/i2c.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+enum lp87565_device_type {
+ LP87565_DEVICE_TYPE_UNKNOWN = 0,
+ LP87565_DEVICE_TYPE_LP87565_Q1,
+};
+
+/* All register addresses */
+#define LP87565_REG_DEV_REV 0X00
+#define LP87565_REG_OTP_REV 0X01
+#define LP87565_REG_BUCK0_CTRL_1 0X02
+#define LP87565_REG_BUCK0_CTRL_2 0X03
+
+#define LP87565_REG_BUCK1_CTRL_1 0X04
+#define LP87565_REG_BUCK1_CTRL_2 0X05
+
+#define LP87565_REG_BUCK2_CTRL_1 0X06
+#define LP87565_REG_BUCK2_CTRL_2 0X07
+
+#define LP87565_REG_BUCK3_CTRL_1 0X08
+#define LP87565_REG_BUCK3_CTRL_2 0X09
+
+#define LP87565_REG_BUCK0_VOUT 0X0A
+#define LP87565_REG_BUCK0_FLOOR_VOUT 0X0B
+
+#define LP87565_REG_BUCK1_VOUT 0X0C
+#define LP87565_REG_BUCK1_FLOOR_VOUT 0X0D
+
+#define LP87565_REG_BUCK2_VOUT 0X0E
+#define LP87565_REG_BUCK2_FLOOR_VOUT 0X0F
+
+#define LP87565_REG_BUCK3_VOUT 0X10
+#define LP87565_REG_BUCK3_FLOOR_VOUT 0X11
+
+#define LP87565_REG_BUCK0_DELAY 0X12
+#define LP87565_REG_BUCK1_DELAY 0X13
+
+#define LP87565_REG_BUCK2_DELAY 0X14
+#define LP87565_REG_BUCK3_DELAY 0X15
+
+#define LP87565_REG_GPO2_DELAY 0X16
+#define LP87565_REG_GPO3_DELAY 0X17
+#define LP87565_REG_RESET 0X18
+#define LP87565_REG_CONFIG 0X19
+
+#define LP87565_REG_INT_TOP_1 0X1A
+#define LP87565_REG_INT_TOP_2 0X1B
+
+#define LP87565_REG_INT_BUCK_0_1 0X1C
+#define LP87565_REG_INT_BUCK_2_3 0X1D
+#define LP87565_REG_TOP_STAT 0X1E
+#define LP87565_REG_BUCK_0_1_STAT 0X1F
+#define LP87565_REG_BUCK_2_3_STAT 0x20
+
+#define LP87565_REG_TOP_MASK_1 0x21
+#define LP87565_REG_TOP_MASK_2 0x22
+
+#define LP87565_REG_BUCK_0_1_MASK 0x23
+#define LP87565_REG_BUCK_2_3_MASK 0x24
+#define LP87565_REG_SEL_I_LOAD 0x25
+
+#define LP87565_REG_I_LOAD_2 0x26
+#define LP87565_REG_I_LOAD_1 0x27
+
+#define LP87565_REG_PGOOD_CTRL1 0x28
+#define LP87565_REG_PGOOD_CTRL2 0x29
+#define LP87565_REG_PGOOD_FLT 0x2A
+#define LP87565_REG_PLL_CTRL 0x2B
+#define LP87565_REG_PIN_FUNCTION 0x2C
+#define LP87565_REG_GPIO_CONFIG 0x2D
+#define LP87565_REG_GPIO_IN 0x2E
+#define LP87565_REG_GPIO_OUT 0x2F
+
+#define LP87565_REG_MAX LP87565_REG_GPIO_OUT
+
+/* Register field definitions */
+#define LP87565_DEV_REV_DEV_ID 0xC0
+#define LP87565_DEV_REV_ALL_LAYER 0x30
+#define LP87565_DEV_REV_METAL_LAYER 0x0F
+
+#define LP87565_OTP_REV_OTP_ID 0xFF
+
+#define LP87565_BUCK_CTRL_1_EN BIT(7)
+#define LP87565_BUCK_CTRL_1_EN_PIN_CTRL BIT(6)
+#define LP87565_BUCK_CTRL_1_PIN_SELECT_EN 0x30
+
+#define LP87565_BUCK_CTRL_1_ROOF_FLOOR_EN BIT(3)
+#define LP87565_BUCK_CTRL_1_RDIS_EN BIT(2)
+#define LP87565_BUCK_CTRL_1_FPWM BIT(1)
+/* Bit0 is reserved for BUCK1 and BUCK3 and valid only for BUCK0 and BUCK2 */
+#define LP87565_BUCK_CTRL_1_FPWM_MP_0_2 BIT(0)
+
+#define LP87565_BUCK_CTRL_2_ILIM 0x38
+#define LP87565_BUCK_CTRL_2_SLEW_RATE 0x07
+
+#define LP87565_BUCK_VSET 0xFF
+#define LP87565_BUCK_FLOOR_VSET 0xFF
+
+#define LP87565_BUCK_SHUTDOWN_DELAY 0xF0
+#define LP87565_BUCK_STARTUP_DELAY 0x0F
+
+#define LP87565_GPIO_SHUTDOWN_DELAY 0xF0
+#define LP87565_GPIO_STARTUP_DELAY 0x0F
+
+#define LP87565_RESET_SW_RESET BIT(0)
+
+#define LP87565_CONFIG_DOUBLE_DELAY BIT(7)
+#define LP87565_CONFIG_CLKIN_PD BIT(6)
+#define LP87565_CONFIG_EN4_PD BIT(5)
+#define LP87565_CONFIG_EN3_PD BIT(4)
+#define LP87565_CONFIG_TDIE_WARN_LEVEL BIT(3)
+#define LP87565_CONFIG_EN2_PD BIT(2)
+#define LP87565_CONFIG_EN1_PD BIT(1)
+
+#define LP87565_INT_GPIO BIT(7)
+#define LP87565_INT_BUCK23 BIT(6)
+#define LP87565_INT_BUCK01 BIT(5)
+#define LP87565_NO_SYNC_CLK BIT(4)
+#define LP87565_TDIE_SD BIT(3)
+#define LP87565_TDIE_WARN BIT(2)
+#define LP87565_INT_OVP BIT(1)
+#define LP87565_I_LOAD_READY BIT(0)
+
+#define LP87565_INT_TOP2_RESET_REG BIT(0)
+
+#define LP87565_BUCK1_PG_INT BIT(6)
+#define LP87565_BUCK1_SC_INT BIT(5)
+#define LP87565_BUCK1_ILIM_INT BIT(4)
+#define LP87565_BUCK0_PG_INT BIT(2)
+#define LP87565_BUCK0_SC_INT BIT(1)
+#define LP87565_BUCK0_ILIM_INT BIT(0)
+
+#define LP87565_BUCK3_PG_INT BIT(6)
+#define LP87565_BUCK3_SC_INT BIT(5)
+#define LP87565_BUCK3_ILIM_INT BIT(4)
+#define LP87565_BUCK2_PG_INT BIT(2)
+#define LP87565_BUCK2_SC_INT BIT(1)
+#define LP87565_BUCK2_ILIM_INT BIT(0)
+
+#define LP87565_SYNC_CLK_STAT BIT(4)
+#define LP87565_TDIE_SD_STAT BIT(3)
+#define LP87565_TDIE_WARN_STAT BIT(2)
+#define LP87565_OVP_STAT BIT(1)
+
+#define LP87565_BUCK1_STAT BIT(7)
+#define LP87565_BUCK1_PG_STAT BIT(6)
+#define LP87565_BUCK1_ILIM_STAT BIT(4)
+#define LP87565_BUCK0_STAT BIT(3)
+#define LP87565_BUCK0_PG_STAT BIT(2)
+#define LP87565_BUCK0_ILIM_STAT BIT(0)
+
+#define LP87565_BUCK3_STAT BIT(7)
+#define LP87565_BUCK3_PG_STAT BIT(6)
+#define LP87565_BUCK3_ILIM_STAT BIT(4)
+#define LP87565_BUCK2_STAT BIT(3)
+#define LP87565_BUCK2_PG_STAT BIT(2)
+#define LP87565_BUCK2_ILIM_STAT BIT(0)
+
+#define LPL87565_GPIO_MASK BIT(7)
+#define LPL87565_SYNC_CLK_MASK BIT(4)
+#define LPL87565_TDIE_WARN_MASK BIT(2)
+#define LPL87565_I_LOAD_READY_MASK BIT(0)
+
+#define LPL87565_RESET_REG_MASK BIT(0)
+
+#define LPL87565_BUCK1_PG_MASK BIT(6)
+#define LPL87565_BUCK1_ILIM_MASK BIT(4)
+#define LPL87565_BUCK0_PG_MASK BIT(2)
+#define LPL87565_BUCK0_ILIM_MASK BIT(0)
+
+#define LPL87565_BUCK3_PG_MASK BIT(6)
+#define LPL87565_BUCK3_ILIM_MASK BIT(4)
+#define LPL87565_BUCK2_PG_MASK BIT(2)
+#define LPL87565_BUCK2_ILIM_MASK BIT(0)
+
+#define LP87565_LOAD_CURRENT_BUCK_SELECT 0x3
+
+#define LP87565_I_LOAD2_BUCK_LOAD_CURRENT 0x3
+#define LP87565_I_LOAD1_BUCK_LOAD_CURRENT 0xFF
+
+#define LP87565_PG3_SEL 0xC0
+#define LP87565_PG2_SEL 0x30
+#define LP87565_PG1_SEL 0x0C
+#define LP87565_PG0_SEL 0x03
+
+#define LP87565_HALF_DAY BIT(7)
+#define LP87565_EN_PG0_NINT BIT(6)
+#define LP87565_PGOOD_SET_DELAY BIT(5)
+#define LP87565_EN_PGFLT_STAT BIT(4)
+#define LP87565_PGOOD_WINDOW BIT(2)
+#define LP87565_PGOOD_OD BIT(1)
+#define LP87565_PGOOD_POL BIT(0)
+
+#define LP87565_PG3_FLT BIT(3)
+#define LP87565_PG2_FLT BIT(2)
+#define LP87565_PG1_FLT BIT(1)
+#define LP87565_PG0_FLT BIT(0)
+
+#define LP87565_PLL_MODE 0xC0
+#define LP87565_EXT_CLK_FREQ 0x1F
+
+#define LP87565_EN_SPREAD_SPEC BIT(7)
+#define LP87565_EN_PIN_CTRL_GPIO3 BIT(6)
+#define LP87565_EN_PIN_SELECT_GPIO3 BIT(5)
+#define LP87565_EN_PIN_CTRL_GPIO2 BIT(4)
+#define LP87565_EN_PIN_SELECT_GPIO2 BIT(3)
+#define LP87565_GPIO3_SEL BIT(2)
+#define LP87565_GPIO2_SEL BIT(1)
+#define LP87565_GPIO1_SEL BIT(0)
+
+#define LP87565_GOIO3_OD BIT(6)
+#define LP87565_GOIO2_OD BIT(5)
+#define LP87565_GOIO1_OD BIT(4)
+#define LP87565_GOIO3_DIR BIT(2)
+#define LP87565_GOIO2_DIR BIT(1)
+#define LP87565_GOIO1_DIR BIT(0)
+
+#define LP87565_GOIO3_IN BIT(2)
+#define LP87565_GOIO2_IN BIT(1)
+#define LP87565_GOIO1_IN BIT(0)
+
+#define LP87565_GOIO3_OUT BIT(2)
+#define LP87565_GOIO2_OUT BIT(1)
+#define LP87565_GOIO1_OUT BIT(0)
+
+/* Number of step-down converters available */
+#define LP87565_NUM_BUCK 6
+
+enum LP87565_regulator_id {
+ /* BUCK's */
+ LP87565_BUCK_0,
+ LP87565_BUCK_1,
+ LP87565_BUCK_2,
+ LP87565_BUCK_3,
+ LP87565_BUCK_10,
+ LP87565_BUCK_23,
+};
+
+/**
+ * struct LP87565 - state holder for the LP87565 driver
+ * @dev: struct device pointer for MFD device
+ * @rev: revision of the LP87565
+ * @dev_type: The device type for example lp87565-q1
+ * @lock: lock guarding the data structure
+ * @regmap: register map of the LP87565 PMIC
+ *
+ * Device data may be used to access the LP87565 chip
+ */
+struct lp87565 {
+ struct device *dev;
+ u8 rev;
+ u8 dev_type;
+ struct regmap *regmap;
+};
+#endif /* __LINUX_MFD_LP87565_H */
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h
index 7eb7cbac0a9a..116816fb9110 100644
--- a/include/linux/mfd/rtsx_pci.h
+++ b/include/linux/mfd/rtsx_pci.h
@@ -850,6 +850,9 @@
#define rtsx_pci_init_cmd(pcr) ((pcr)->ci = 0)
+#define RTS5227_DEVICE_ID 0x5227
+#define RTS_MAX_TIMES_FREQ_REDUCTION 8
+
struct rtsx_pcr;
struct pcr_handle {
@@ -957,6 +960,8 @@ struct rtsx_pcr {
int num_slots;
struct rtsx_slot *slots;
+
+ u8 dma_error_count;
};
#define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid))
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6f543a47fc92..46b9ac5e8569 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2171,7 +2171,7 @@ extern void filemap_map_pages(struct vm_fault *vmf,
extern int filemap_page_mkwrite(struct vm_fault *vmf);
/* mm/page-writeback.c */
-int write_one_page(struct page *page, int wait);
+int __must_check write_one_page(struct page *page);
void task_dirty_inc(struct task_struct *tsk);
/* readahead.c */
diff --git a/include/linux/mman.h b/include/linux/mman.h
index 634c4c51fe3a..c8367041fafd 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -22,7 +22,7 @@ unsigned long vm_memory_committed(void);
static inline void vm_acct_memory(long pages)
{
- __percpu_counter_add(&vm_committed_as, pages, vm_committed_as_batch);
+ percpu_counter_add_batch(&vm_committed_as, pages, vm_committed_as_batch);
}
static inline void vm_unacct_memory(long pages)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index ef6a13b7bd3e..7e8f100cb56d 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -125,8 +125,6 @@ enum zone_stat_item {
NR_ZONE_UNEVICTABLE,
NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */
NR_MLOCK, /* mlock()ed pages found and moved off LRU */
- NR_SLAB_RECLAIMABLE,
- NR_SLAB_UNRECLAIMABLE,
NR_PAGETABLE, /* used for pagetables */
NR_KERNEL_STACK_KB, /* measured in KiB */
/* Second 128 byte cacheline */
@@ -152,6 +150,8 @@ enum node_stat_item {
NR_INACTIVE_FILE, /* " " " " " */
NR_ACTIVE_FILE, /* " " " " " */
NR_UNEVICTABLE, /* " " " " " */
+ NR_SLAB_RECLAIMABLE,
+ NR_SLAB_UNRECLAIMABLE,
NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
WORKINGSET_REFAULT,
@@ -533,6 +533,22 @@ static inline bool zone_is_empty(struct zone *zone)
}
/*
+ * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
+ * intersection with the given zone
+ */
+static inline bool zone_intersects(struct zone *zone,
+ unsigned long start_pfn, unsigned long nr_pages)
+{
+ if (zone_is_empty(zone))
+ return false;
+ if (start_pfn >= zone_end_pfn(zone) ||
+ start_pfn + nr_pages <= zone->zone_start_pfn)
+ return false;
+
+ return true;
+}
+
+/*
* The "priority" of VM scanning is how much of the queues we will scan in one
* go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
* queues ("queue_length >> 12") during an aging round.
@@ -772,7 +788,7 @@ enum memmap_context {
MEMMAP_EARLY,
MEMMAP_HOTPLUG,
};
-extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
+extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
unsigned long size);
extern void lruvec_init(struct lruvec *lruvec);
@@ -1144,9 +1160,10 @@ extern unsigned long usemap_size(void);
*/
#define SECTION_MARKED_PRESENT (1UL<<0)
#define SECTION_HAS_MEM_MAP (1UL<<1)
-#define SECTION_MAP_LAST_BIT (1UL<<2)
+#define SECTION_IS_ONLINE (1UL<<2)
+#define SECTION_MAP_LAST_BIT (1UL<<3)
#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
-#define SECTION_NID_SHIFT 2
+#define SECTION_NID_SHIFT 3
static inline struct page *__section_mem_map_addr(struct mem_section *section)
{
@@ -1175,11 +1192,30 @@ static inline int valid_section_nr(unsigned long nr)
return valid_section(__nr_to_section(nr));
}
+static inline int online_section(struct mem_section *section)
+{
+ return (section && (section->section_mem_map & SECTION_IS_ONLINE));
+}
+
+static inline int online_section_nr(unsigned long nr)
+{
+ return online_section(__nr_to_section(nr));
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
+#ifdef CONFIG_MEMORY_HOTREMOVE
+void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
+#endif
+#endif
+
static inline struct mem_section *__pfn_to_section(unsigned long pfn)
{
return __nr_to_section(pfn_to_section_nr(pfn));
}
+extern int __highest_present_section_nr;
+
#ifndef CONFIG_HAVE_ARCH_PFN_VALID
static inline int pfn_valid(unsigned long pfn)
{
@@ -1251,10 +1287,15 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
/*
* pfn_valid() is meant to be able to tell if a given PFN has valid memmap
- * associated with it or not. In FLATMEM, it is expected that holes always
- * have valid memmap as long as there is valid PFNs either side of the hole.
- * In SPARSEMEM, it is assumed that a valid section has a memmap for the
- * entire section.
+ * associated with it or not. This means that a struct page exists for this
+ * pfn. The caller cannot assume the page is fully initialized in general.
+ * Hotplugable pages might not have been onlined yet. pfn_to_online_page()
+ * will ensure the struct page is fully online and initialized. Special pages
+ * (e.g. ZONE_DEVICE) are never onlined and should be treated accordingly.
+ *
+ * In FLATMEM, it is expected that holes always have valid memmap as long as
+ * there is valid PFNs either side of the hole. In SPARSEMEM, it is assumed
+ * that a valid section has a memmap for the entire section.
*
* However, an ARM, and maybe other embedded architectures in the future
* free memmap backing holes to save memory on the assumption the memmap is
diff --git a/include/linux/module.h b/include/linux/module.h
index 21f56393602f..8eb9a1e693e5 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -442,8 +442,8 @@ struct module {
#ifdef CONFIG_EVENT_TRACING
struct trace_event_call **trace_events;
unsigned int num_trace_events;
- struct trace_enum_map **trace_enums;
- unsigned int num_trace_enums;
+ struct trace_eval_map **trace_evals;
+ unsigned int num_trace_evals;
#endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
unsigned int num_ftrace_callsites;
diff --git a/include/linux/nd.h b/include/linux/nd.h
index 194b8e002ea7..5dc6b695437d 100644
--- a/include/linux/nd.h
+++ b/include/linux/nd.h
@@ -21,6 +21,15 @@ enum nvdimm_event {
NVDIMM_REVALIDATE_POISON,
};
+enum nvdimm_claim_class {
+ NVDIMM_CCLASS_NONE,
+ NVDIMM_CCLASS_BTT,
+ NVDIMM_CCLASS_BTT2,
+ NVDIMM_CCLASS_PFN,
+ NVDIMM_CCLASS_DAX,
+ NVDIMM_CCLASS_UNKNOWN,
+};
+
struct nd_device_driver {
struct device_driver drv;
unsigned long type;
@@ -41,12 +50,14 @@ static inline struct nd_device_driver *to_nd_device_driver(
* @force_raw: ignore other personalities for the namespace (e.g. btt)
* @dev: device model node
* @claim: when set a another personality has taken ownership of the namespace
+ * @claim_class: restrict claim type to a given class
* @rw_bytes: access the raw namespace capacity with byte-aligned transfers
*/
struct nd_namespace_common {
int force_raw;
struct device dev;
struct device *claim;
+ enum nvdimm_claim_class claim_class;
int (*rw_bytes)(struct nd_namespace_common *, resource_size_t offset,
void *buf, size_t size, int rw, unsigned long flags);
};
@@ -75,12 +86,14 @@ struct nd_namespace_io {
/**
* struct nd_namespace_pmem - namespace device for dimm-backed interleaved memory
* @nsio: device and system physical address range to drive
+ * @lbasize: logical sector size for the namespace in block-device-mode
* @alt_name: namespace name supplied in the dimm label
* @uuid: namespace name supplied in the dimm label
* @id: ida allocated id
*/
struct nd_namespace_pmem {
struct nd_namespace_io nsio;
+ unsigned long lbasize;
char *alt_name;
u8 *uuid;
int id;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e48ee2eaaa3e..779b23595596 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3284,6 +3284,7 @@ void __dev_notify_flags(struct net_device *, unsigned int old_flags,
int dev_change_name(struct net_device *, const char *);
int dev_set_alias(struct net_device *, const char *, size_t);
int dev_change_net_namespace(struct net_device *, struct net *, const char *);
+int __dev_set_mtu(struct net_device *, int);
int dev_set_mtu(struct net_device *, int);
void dev_set_group(struct net_device *, int);
int dev_set_mac_address(struct net_device *, struct sockaddr *);
diff --git a/include/linux/node.h b/include/linux/node.h
index 2115ad5d6f19..d1751beb462c 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -30,9 +30,38 @@ struct memory_block;
extern struct node *node_devices[];
typedef void (*node_registration_func_t)(struct node *);
+#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA)
+extern int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages);
+#else
+static inline int link_mem_sections(int nid, unsigned long start_pfn, unsigned long nr_pages)
+{
+ return 0;
+}
+#endif
+
extern void unregister_node(struct node *node);
#ifdef CONFIG_NUMA
-extern int register_one_node(int nid);
+/* Core of the node registration - only memory hotplug should use this */
+extern int __register_one_node(int nid);
+
+/* Registers an online node */
+static inline int register_one_node(int nid)
+{
+ int error = 0;
+
+ if (node_online(nid)) {
+ struct pglist_data *pgdat = NODE_DATA(nid);
+
+ error = __register_one_node(nid);
+ if (error)
+ return error;
+ /* link memory sections under this node */
+ error = link_mem_sections(nid, pgdat->node_start_pfn, pgdat->node_spanned_pages);
+ }
+
+ return error;
+}
+
extern void unregister_one_node(int nid);
extern int register_cpu_under_node(unsigned int cpu, unsigned int nid);
extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
@@ -46,6 +75,10 @@ extern void register_hugetlbfs_with_node(node_registration_func_t doregister,
node_registration_func_t unregister);
#endif
#else
+static inline int __register_one_node(int nid)
+{
+ return 0;
+}
static inline int register_one_node(int nid)
{
return 0;
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index f746e44d4046..cf0b91c3ec12 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -387,11 +387,7 @@ enum node_states {
#else
N_HIGH_MEMORY = N_NORMAL_MEMORY,
#endif
-#ifdef CONFIG_MOVABLE_NODE
N_MEMORY, /* The node has memory(regular, high, movable) */
-#else
- N_MEMORY = N_HIGH_MEMORY,
-#endif
N_CPU, /* The node has one or more cpus */
NR_NODE_STATES
};
diff --git a/include/linux/of.h b/include/linux/of.h
index 50fcdb54087f..fa089a2789a0 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -148,18 +148,28 @@ extern raw_spinlock_t devtree_lock;
#ifdef CONFIG_OF
void of_core_init(void);
-static inline bool is_of_node(struct fwnode_handle *fwnode)
+static inline bool is_of_node(const struct fwnode_handle *fwnode)
{
return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_OF;
}
-static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
-{
- return is_of_node(fwnode) ?
- container_of(fwnode, struct device_node, fwnode) : NULL;
-}
-
-#define of_fwnode_handle(node) (&(node)->fwnode)
+#define to_of_node(__fwnode) \
+ ({ \
+ typeof(__fwnode) __to_of_node_fwnode = (__fwnode); \
+ \
+ is_of_node(__to_of_node_fwnode) ? \
+ container_of(__to_of_node_fwnode, \
+ struct device_node, fwnode) : \
+ NULL; \
+ })
+
+#define of_fwnode_handle(node) \
+ ({ \
+ typeof(node) __of_fwnode_handle_node = (node); \
+ \
+ __of_fwnode_handle_node ? \
+ &__of_fwnode_handle_node->fwnode : NULL; \
+ })
static inline bool of_have_populated_dt(void)
{
@@ -533,12 +543,12 @@ static inline void of_core_init(void)
{
}
-static inline bool is_of_node(struct fwnode_handle *fwnode)
+static inline bool is_of_node(const struct fwnode_handle *fwnode)
{
return false;
}
-static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
+static inline struct device_node *to_of_node(const struct fwnode_handle *fwnode)
{
return NULL;
}
@@ -627,6 +637,12 @@ static inline int of_device_is_compatible(const struct device_node *device,
return 0;
}
+static inline int of_device_compatible_match(struct device_node *device,
+ const char *const *compat)
+{
+ return 0;
+}
+
static inline bool of_device_is_available(const struct device_node *device)
{
return false;
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 1dfbfd0d8040..013c5418aeec 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -31,9 +31,6 @@ extern void *of_fdt_get_property(const void *blob,
unsigned long node,
const char *name,
int *size);
-extern int of_fdt_is_compatible(const void *blob,
- unsigned long node,
- const char *compat);
extern bool of_fdt_is_big_endian(const void *blob,
unsigned long node);
extern int of_fdt_match(const void *blob, unsigned long node,
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 1e089d5a182b..ca10f43564de 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -31,6 +31,7 @@ enum of_gpio_flags {
OF_GPIO_ACTIVE_LOW = 0x1,
OF_GPIO_SINGLE_ENDED = 0x2,
OF_GPIO_OPEN_DRAIN = 0x4,
+ OF_GPIO_SLEEP_MAY_LOOSE_VALUE = 0x8,
};
#ifdef CONFIG_OF_GPIO
diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h
index abdb02eaef06..3e058f05ab04 100644
--- a/include/linux/of_graph.h
+++ b/include/linux/of_graph.h
@@ -43,11 +43,15 @@ struct of_endpoint {
#ifdef CONFIG_OF
int of_graph_parse_endpoint(const struct device_node *node,
struct of_endpoint *endpoint);
+int of_graph_get_endpoint_count(const struct device_node *np);
struct device_node *of_graph_get_port_by_id(struct device_node *node, u32 id);
struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
struct device_node *previous);
struct device_node *of_graph_get_endpoint_by_regs(
const struct device_node *parent, int port_reg, int reg);
+struct device_node *of_graph_get_remote_endpoint(
+ const struct device_node *node);
+struct device_node *of_graph_get_port_parent(struct device_node *node);
struct device_node *of_graph_get_remote_port_parent(
const struct device_node *node);
struct device_node *of_graph_get_remote_port(const struct device_node *node);
@@ -61,6 +65,11 @@ static inline int of_graph_parse_endpoint(const struct device_node *node,
return -ENOSYS;
}
+static inline int of_graph_get_endpoint_count(const struct device_node *np)
+{
+ return 0;
+}
+
static inline struct device_node *of_graph_get_port_by_id(
struct device_node *node, u32 id)
{
@@ -80,6 +89,18 @@ static inline struct device_node *of_graph_get_endpoint_by_regs(
return NULL;
}
+static inline struct device_node *of_graph_get_remote_endpoint(
+ const struct device_node *node)
+{
+ return NULL;
+}
+
+static inline struct device_node *of_graph_get_port_parent(
+ struct device_node *node)
+{
+ return NULL;
+}
+
static inline struct device_node *of_graph_get_remote_port_parent(
const struct device_node *node)
{
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 6b5818d6de32..d33e3280c8ad 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -326,11 +326,14 @@ PAGEFLAG_FALSE(HighMem)
#ifdef CONFIG_SWAP
static __always_inline int PageSwapCache(struct page *page)
{
+#ifdef CONFIG_THP_SWAP
+ page = compound_head(page);
+#endif
return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
}
-SETPAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND)
-CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND)
+SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
+CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
#else
PAGEFLAG_FALSE(SwapCache)
#endif
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index e7bbd9d4dc6c..baa9344dcd10 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -28,14 +28,33 @@ enum mapping_flags {
AS_NO_WRITEBACK_TAGS = 5,
};
+/**
+ * mapping_set_error - record a writeback error in the address_space
+ * @mapping - the mapping in which an error should be set
+ * @error - the error to set in the mapping
+ *
+ * When writeback fails in some way, we must record that error so that
+ * userspace can be informed when fsync and the like are called. We endeavor
+ * to report errors on any file that was open at the time of the error. Some
+ * internal callers also need to know when writeback errors have occurred.
+ *
+ * When a writeback error occurs, most filesystems will want to call
+ * mapping_set_error to record the error in the mapping so that it can be
+ * reported when the application calls fsync(2).
+ */
static inline void mapping_set_error(struct address_space *mapping, int error)
{
- if (unlikely(error)) {
- if (error == -ENOSPC)
- set_bit(AS_ENOSPC, &mapping->flags);
- else
- set_bit(AS_EIO, &mapping->flags);
- }
+ if (likely(!error))
+ return;
+
+ /* Record in wb_err for checkers using errseq_t based tracking */
+ filemap_set_wb_err(mapping, error);
+
+ /* Record it in flags for now, for legacy callers */
+ if (error == -ENOSPC)
+ set_bit(AS_ENOSPC, &mapping->flags);
+ else
+ set_bit(AS_EIO, &mapping->flags);
}
static inline void mapping_set_unevictable(struct address_space *mapping)
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index 57e0b8250947..782fb8e0755f 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -7,6 +7,7 @@
int pci_enable_pri(struct pci_dev *pdev, u32 reqs);
void pci_disable_pri(struct pci_dev *pdev);
+void pci_restore_pri_state(struct pci_dev *pdev);
int pci_reset_pri(struct pci_dev *pdev);
#else /* CONFIG_PCI_PRI */
@@ -20,6 +21,10 @@ static inline void pci_disable_pri(struct pci_dev *pdev)
{
}
+static inline void pci_restore_pri_state(struct pci_dev *pdev)
+{
+}
+
static inline int pci_reset_pri(struct pci_dev *pdev)
{
return -ENODEV;
@@ -31,6 +36,7 @@ static inline int pci_reset_pri(struct pci_dev *pdev)
int pci_enable_pasid(struct pci_dev *pdev, int features);
void pci_disable_pasid(struct pci_dev *pdev);
+void pci_restore_pasid_state(struct pci_dev *pdev);
int pci_pasid_features(struct pci_dev *pdev);
int pci_max_pasids(struct pci_dev *pdev);
@@ -45,6 +51,10 @@ static inline void pci_disable_pasid(struct pci_dev *pdev)
{
}
+static inline void pci_restore_pasid_state(struct pci_dev *pdev)
+{
+}
+
static inline int pci_pasid_features(struct pci_dev *pdev)
{
return -EINVAL;
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 1ef093866581..4869e66dd659 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -360,6 +360,8 @@ struct pci_dev {
unsigned int msix_enabled:1;
unsigned int ari_enabled:1; /* ARI forwarding */
unsigned int ats_enabled:1; /* Address Translation Service */
+ unsigned int pasid_enabled:1; /* Process Address Space ID */
+ unsigned int pri_enabled:1; /* Page Request Interface */
unsigned int is_managed:1;
unsigned int needs_freset:1; /* Dev requires fundamental reset */
unsigned int state_saved:1;
@@ -370,7 +372,7 @@ struct pci_dev {
unsigned int is_thunderbolt:1; /* Thunderbolt controller */
unsigned int __aer_firmware_first_valid:1;
unsigned int __aer_firmware_first:1;
- unsigned int broken_intx_masking:1;
+ unsigned int broken_intx_masking:1; /* INTx masking can't be used */
unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
unsigned int irq_managed:1;
unsigned int has_secondary_link:1;
@@ -404,6 +406,12 @@ struct pci_dev {
u8 ats_stu; /* ATS Smallest Translation Unit */
atomic_t ats_ref_cnt; /* number of VFs with ATS enabled */
#endif
+#ifdef CONFIG_PCI_PRI
+ u32 pri_reqs_alloc; /* Number of PRI requests allocated */
+#endif
+#ifdef CONFIG_PCI_PASID
+ u16 pasid_features;
+#endif
phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */
size_t romlen; /* Length of ROM if it's not from the BAR */
char *driver_override; /* Driver name to force a match */
@@ -437,6 +445,8 @@ struct pci_host_bridge {
void *sysdata;
int busnr;
struct list_head windows; /* resource_entry */
+ u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* platform IRQ swizzler */
+ int (*map_irq)(const struct pci_dev *, u8, u8);
void (*release_fn)(struct pci_host_bridge *);
void *release_data;
struct msi_controller *msi;
@@ -463,7 +473,9 @@ static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv)
}
struct pci_host_bridge *pci_alloc_host_bridge(size_t priv);
-int pci_register_host_bridge(struct pci_host_bridge *bridge);
+struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
+ size_t priv);
+void pci_free_host_bridge(struct pci_host_bridge *bridge);
struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
@@ -695,7 +707,8 @@ struct pci_error_handlers {
pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
/* PCI function reset prepare or completed */
- void (*reset_notify)(struct pci_dev *dev, bool prepare);
+ void (*reset_prepare)(struct pci_dev *dev);
+ void (*reset_done)(struct pci_dev *dev);
/* Device driver may resume normal operations */
void (*resume)(struct pci_dev *dev);
@@ -852,13 +865,10 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
void pci_bus_release_busn_res(struct pci_bus *b);
-struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
- struct pci_ops *ops, void *sysdata,
- struct list_head *resources,
- struct msi_controller *msi);
struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata,
struct list_head *resources);
+int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge);
struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
int busnr);
void pcie_update_link_speed(struct pci_bus *bus, u16 link_status);
@@ -1008,6 +1018,15 @@ int __must_check pci_reenable_device(struct pci_dev *);
int __must_check pcim_enable_device(struct pci_dev *pdev);
void pcim_pin_device(struct pci_dev *pdev);
+static inline bool pci_intx_mask_supported(struct pci_dev *pdev)
+{
+ /*
+ * INTx masking is supported if PCI_COMMAND_INTX_DISABLE is
+ * writable and no quirk has marked the feature broken.
+ */
+ return !pdev->broken_intx_masking;
+}
+
static inline int pci_is_enabled(struct pci_dev *pdev)
{
return (atomic_read(&pdev->enable_cnt) > 0);
@@ -1031,7 +1050,6 @@ int __must_check pci_set_mwi(struct pci_dev *dev);
int pci_try_set_mwi(struct pci_dev *dev);
void pci_clear_mwi(struct pci_dev *dev);
void pci_intx(struct pci_dev *dev, int enable);
-bool pci_intx_mask_supported(struct pci_dev *dev);
bool pci_check_and_mask_intx(struct pci_dev *dev);
bool pci_check_and_unmask_intx(struct pci_dev *dev);
int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
@@ -1144,6 +1162,7 @@ void pdev_enable_device(struct pci_dev *);
int pci_enable_resources(struct pci_dev *, int mask);
void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *),
int (*)(const struct pci_dev *, u8, u8));
+void pci_assign_irq(struct pci_dev *dev);
struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
#define HAVE_PCI_REQ_REGIONS 2
int __must_check pci_request_regions(struct pci_dev *, const char *);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 5f6b71d15393..c71e532da458 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1373,6 +1373,8 @@
#define PCI_DEVICE_ID_TTI_HPT374 0x0008
#define PCI_DEVICE_ID_TTI_HPT372N 0x0009 /* apparently a 372N variant? */
+#define PCI_VENDOR_ID_SIGMA 0x1105
+
#define PCI_VENDOR_ID_VIA 0x1106
#define PCI_DEVICE_ID_VIA_8763_0 0x0198
#define PCI_DEVICE_ID_VIA_8380_0 0x0204
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 84a109449610..ec065387f443 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -39,7 +39,8 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
void percpu_counter_destroy(struct percpu_counter *fbc);
void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
-void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
+void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
+ s32 batch);
s64 __percpu_counter_sum(struct percpu_counter *fbc);
int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
@@ -50,7 +51,7 @@ static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
- __percpu_counter_add(fbc, amount, percpu_counter_batch);
+ percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
}
static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
@@ -136,7 +137,7 @@ percpu_counter_add(struct percpu_counter *fbc, s64 amount)
}
static inline void
-__percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
+percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
{
percpu_counter_add(fbc, amount);
}
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index 7620eb127cff..231d3075815a 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -73,10 +73,16 @@
* operation, if several modes of operation are supported these can be
* passed in the argument on a custom form, else just use argument 1
* to indicate low power mode, argument 0 turns low power mode off.
- * @PIN_CONFIG_OUTPUT: this will configure the pin as an output. Use argument
- * 1 to indicate high level, argument 0 to indicate low level. (Please
- * see Documentation/pinctrl.txt, section "GPIO mode pitfalls" for a
- * discussion around this parameter.)
+ * @PIN_CONFIG_OUTPUT_ENABLE: this will enable the pin's output mode
+ * without driving a value there. For most platforms this reduces to
+ * enable the output buffers and then let the pin controller current
+ * configuration (eg. the currently selected mux function) drive values on
+ * the line. Use argument 1 to enable output mode, argument 0 to disable
+ * it.
+ * @PIN_CONFIG_OUTPUT: this will configure the pin as an output and drive a
+ * value on the line. Use argument 1 to indicate high level, argument 0 to
+ * indicate low level. (Please see Documentation/pinctrl.txt, section
+ * "GPIO mode pitfalls" for a discussion around this parameter.)
* @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power
* supplies, the argument to this parameter (on a custom format) tells
* the driver which alternative power source to use.
@@ -105,6 +111,7 @@ enum pin_config_param {
PIN_CONFIG_INPUT_SCHMITT,
PIN_CONFIG_INPUT_SCHMITT_ENABLE,
PIN_CONFIG_LOW_POWER_MODE,
+ PIN_CONFIG_OUTPUT_ENABLE,
PIN_CONFIG_OUTPUT,
PIN_CONFIG_POWER_SOURCE,
PIN_CONFIG_SLEW_RATE,
diff --git a/include/linux/i2c/adp5588.h b/include/linux/platform_data/adp5588.h
index c2153049cfbd..c2153049cfbd 100644
--- a/include/linux/i2c/adp5588.h
+++ b/include/linux/platform_data/adp5588.h
diff --git a/include/linux/i2c/adp8860.h b/include/linux/platform_data/adp8860.h
index 0b4d39855c91..0b4d39855c91 100644
--- a/include/linux/i2c/adp8860.h
+++ b/include/linux/platform_data/adp8860.h
diff --git a/include/linux/i2c/adp8870.h b/include/linux/platform_data/adp8870.h
index 624dceccbd5b..624dceccbd5b 100644
--- a/include/linux/i2c/adp8870.h
+++ b/include/linux/platform_data/adp8870.h
diff --git a/include/linux/platform_data/clk-realview.h b/include/linux/platform_data/clk-realview.h
deleted file mode 100644
index 2e426a7dbc51..000000000000
--- a/include/linux/platform_data/clk-realview.h
+++ /dev/null
@@ -1 +0,0 @@
-void realview_clk_init(void __iomem *sysbase, bool is_pb1176);
diff --git a/include/linux/platform_data/leds-pca963x.h b/include/linux/platform_data/leds-pca963x.h
index e731f0036329..54e845ffb5ed 100644
--- a/include/linux/platform_data/leds-pca963x.h
+++ b/include/linux/platform_data/leds-pca963x.h
@@ -33,10 +33,16 @@ enum pca963x_blink_type {
PCA963X_HW_BLINK,
};
+enum pca963x_direction {
+ PCA963X_NORMAL,
+ PCA963X_INVERTED,
+};
+
struct pca963x_platform_data {
struct led_platform_data leds;
enum pca963x_outdrv outdrv;
enum pca963x_blink_type blink_type;
+ enum pca963x_direction dir;
};
#endif /* __LINUX_PCA963X_H*/
diff --git a/include/linux/i2c/lm8323.h b/include/linux/platform_data/lm8323.h
index 478d668bc590..478d668bc590 100644
--- a/include/linux/i2c/lm8323.h
+++ b/include/linux/platform_data/lm8323.h
diff --git a/include/linux/i2c/max732x.h b/include/linux/platform_data/max732x.h
index c04bac8bf2fe..c04bac8bf2fe 100644
--- a/include/linux/i2c/max732x.h
+++ b/include/linux/platform_data/max732x.h
diff --git a/include/linux/i2c/mcs.h b/include/linux/platform_data/mcs.h
index 61bb18a4fd3c..61bb18a4fd3c 100644
--- a/include/linux/i2c/mcs.h
+++ b/include/linux/platform_data/mcs.h
diff --git a/include/linux/i2c/mms114.h b/include/linux/platform_data/mms114.h
index 5722ebfb2738..5722ebfb2738 100644
--- a/include/linux/i2c/mms114.h
+++ b/include/linux/platform_data/mms114.h
diff --git a/include/linux/i2c/pcf857x.h b/include/linux/platform_data/pcf857x.h
index 0767a2a6b2f1..0767a2a6b2f1 100644
--- a/include/linux/i2c/pcf857x.h
+++ b/include/linux/platform_data/pcf857x.h
diff --git a/include/linux/i2c/tsc2007.h b/include/linux/platform_data/tsc2007.h
index 4f35b6ad3889..c2d3aa1dadd4 100644
--- a/include/linux/i2c/tsc2007.h
+++ b/include/linux/platform_data/tsc2007.h
@@ -1,7 +1,7 @@
#ifndef __LINUX_I2C_TSC2007_H
#define __LINUX_I2C_TSC2007_H
-/* linux/i2c/tsc2007.h */
+/* linux/platform_data/tsc2007.h */
struct tsc2007_platform_data {
u16 model; /* 2007. */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 98c2a7c7108e..49f634d96118 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -172,7 +172,7 @@ extern int platform_device_add_resources(struct platform_device *pdev,
extern int platform_device_add_data(struct platform_device *pdev,
const void *data, size_t size);
extern int platform_device_add_properties(struct platform_device *pdev,
- struct property_entry *properties);
+ const struct property_entry *properties);
extern int platform_device_add(struct platform_device *pdev);
extern void platform_device_del(struct platform_device *pdev);
extern void platform_device_put(struct platform_device *pdev);
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
deleted file mode 100644
index 71ecf3d46aac..000000000000
--- a/include/linux/pmem.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-#ifndef __PMEM_H__
-#define __PMEM_H__
-
-#include <linux/io.h>
-#include <linux/uio.h>
-
-#ifdef CONFIG_ARCH_HAS_PMEM_API
-#define ARCH_MEMREMAP_PMEM MEMREMAP_WB
-#include <asm/pmem.h>
-#else
-#define ARCH_MEMREMAP_PMEM MEMREMAP_WT
-/*
- * These are simply here to enable compilation, all call sites gate
- * calling these symbols with arch_has_pmem_api() and redirect to the
- * implementation in asm/pmem.h.
- */
-static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
-{
- BUG();
-}
-
-static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
- struct iov_iter *i)
-{
- BUG();
- return 0;
-}
-
-static inline void arch_clear_pmem(void *addr, size_t size)
-{
- BUG();
-}
-
-static inline void arch_wb_cache_pmem(void *addr, size_t size)
-{
- BUG();
-}
-
-static inline void arch_invalidate_pmem(void *addr, size_t size)
-{
- BUG();
-}
-#endif
-
-static inline bool arch_has_pmem_api(void)
-{
- return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
-}
-
-/**
- * memcpy_to_pmem - copy data to persistent memory
- * @dst: destination buffer for the copy
- * @src: source buffer for the copy
- * @n: length of the copy in bytes
- *
- * Perform a memory copy that results in the destination of the copy
- * being effectively evicted from, or never written to, the processor
- * cache hierarchy after the copy completes. After memcpy_to_pmem()
- * data may still reside in cpu or platform buffers, so this operation
- * must be followed by a blkdev_issue_flush() on the pmem block device.
- */
-static inline void memcpy_to_pmem(void *dst, const void *src, size_t n)
-{
- if (arch_has_pmem_api())
- arch_memcpy_to_pmem(dst, src, n);
- else
- memcpy(dst, src, n);
-}
-
-/**
- * copy_from_iter_pmem - copy data from an iterator to PMEM
- * @addr: PMEM destination address
- * @bytes: number of bytes to copy
- * @i: iterator with source data
- *
- * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
- * See blkdev_issue_flush() note for memcpy_to_pmem().
- */
-static inline size_t copy_from_iter_pmem(void *addr, size_t bytes,
- struct iov_iter *i)
-{
- if (arch_has_pmem_api())
- return arch_copy_from_iter_pmem(addr, bytes, i);
- return copy_from_iter_nocache(addr, bytes, i);
-}
-
-/**
- * clear_pmem - zero a PMEM memory range
- * @addr: virtual start address
- * @size: number of bytes to zero
- *
- * Write zeros into the memory range starting at 'addr' for 'size' bytes.
- * See blkdev_issue_flush() note for memcpy_to_pmem().
- */
-static inline void clear_pmem(void *addr, size_t size)
-{
- if (arch_has_pmem_api())
- arch_clear_pmem(addr, size);
- else
- memset(addr, 0, size);
-}
-
-/**
- * invalidate_pmem - flush a pmem range from the cache hierarchy
- * @addr: virtual start address
- * @size: bytes to invalidate (internally aligned to cache line size)
- *
- * For platforms that support clearing poison this flushes any poisoned
- * ranges out of the cache
- */
-static inline void invalidate_pmem(void *addr, size_t size)
-{
- if (arch_has_pmem_api())
- arch_invalidate_pmem(addr, size);
-}
-
-/**
- * wb_cache_pmem - write back processor cache for PMEM memory range
- * @addr: virtual start address
- * @size: number of bytes to write back
- *
- * Write back the processor cache range starting at 'addr' for 'size' bytes.
- * See blkdev_issue_flush() note for memcpy_to_pmem().
- */
-static inline void wb_cache_pmem(void *addr, size_t size)
-{
- if (arch_has_pmem_api())
- arch_wb_cache_pmem(addr, size);
-}
-#endif /* __PMEM_H__ */
diff --git a/include/linux/processor.h b/include/linux/processor.h
new file mode 100644
index 000000000000..da0c5e56ca02
--- /dev/null
+++ b/include/linux/processor.h
@@ -0,0 +1,70 @@
+/* Misc low level processor primitives */
+#ifndef _LINUX_PROCESSOR_H
+#define _LINUX_PROCESSOR_H
+
+#include <asm/processor.h>
+
+/*
+ * spin_begin is used before beginning a busy-wait loop, and must be paired
+ * with spin_end when the loop is exited. spin_cpu_relax must be called
+ * within the loop.
+ *
+ * The loop body should be as small and fast as possible, on the order of
+ * tens of instructions/cycles as a guide. It should and avoid calling
+ * cpu_relax, or any "spin" or sleep type of primitive including nested uses
+ * of these primitives. It should not lock or take any other resource.
+ * Violations of these guidelies will not cause a bug, but may cause sub
+ * optimal performance.
+ *
+ * These loops are optimized to be used where wait times are expected to be
+ * less than the cost of a context switch (and associated overhead).
+ *
+ * Detection of resource owner and decision to spin or sleep or guest-yield
+ * (e.g., spin lock holder vcpu preempted, or mutex owner not on CPU) can be
+ * tested within the loop body.
+ */
+#ifndef spin_begin
+#define spin_begin()
+#endif
+
+#ifndef spin_cpu_relax
+#define spin_cpu_relax() cpu_relax()
+#endif
+
+/*
+ * spin_cpu_yield may be called to yield (undirected) to the hypervisor if
+ * necessary. This should be used if the wait is expected to take longer
+ * than context switch overhead, but we can't sleep or do a directed yield.
+ */
+#ifndef spin_cpu_yield
+#define spin_cpu_yield() cpu_relax_yield()
+#endif
+
+#ifndef spin_end
+#define spin_end()
+#endif
+
+/*
+ * spin_until_cond can be used to wait for a condition to become true. It
+ * may be expected that the first iteration will true in the common case
+ * (no spinning), so that callers should not require a first "likely" test
+ * for the uncontended case before using this primitive.
+ *
+ * Usage and implementation guidelines are the same as for the spin_begin
+ * primitives, above.
+ */
+#ifndef spin_until_cond
+#define spin_until_cond(cond) \
+do { \
+ if (unlikely(!(cond))) { \
+ spin_begin(); \
+ do { \
+ spin_cpu_relax(); \
+ } while (!(cond)); \
+ spin_end(); \
+ } \
+} while (0)
+
+#endif
+
+#endif /* _LINUX_PROCESSOR_H */
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 4d57bbaaa1bf..30f945329818 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -142,6 +142,7 @@ int raid6_select_algo(void);
extern const u8 raid6_gfmul[256][256] __attribute__((aligned(256)));
extern const u8 raid6_vgfmul[256][32] __attribute__((aligned(256)));
extern const u8 raid6_gfexp[256] __attribute__((aligned(256)));
+extern const u8 raid6_gflog[256] __attribute__((aligned(256)));
extern const u8 raid6_gfinv[256] __attribute__((aligned(256)));
extern const u8 raid6_gfexi[256] __attribute__((aligned(256)));
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9c4ca7433d9d..5e8759b1b428 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -904,7 +904,7 @@ struct task_struct {
#ifdef CONFIG_NUMA
/* Protected by alloc_lock: */
struct mempolicy *mempolicy;
- short il_next;
+ short il_prev;
short pref_node_fork;
#endif
#ifdef CONFIG_NUMA_BALANCING
diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
new file mode 100644
index 000000000000..e5140648f638
--- /dev/null
+++ b/include/linux/set_memory.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2017, Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation;
+ */
+#ifndef _LINUX_SET_MEMORY_H_
+#define _LINUX_SET_MEMORY_H_
+
+#ifdef CONFIG_ARCH_HAS_SET_MEMORY
+#include <asm/set_memory.h>
+#else
+static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
+static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
+static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
+static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
+#endif
+
+#endif /* _LINUX_SET_MEMORY_H_ */
diff --git a/include/linux/signal.h b/include/linux/signal.h
index a39feddd71ba..e2678b5dbb21 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -243,8 +243,6 @@ extern int do_send_sig_info(int sig, struct siginfo *info,
struct task_struct *p, bool group);
extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p);
extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *);
-extern int do_sigtimedwait(const sigset_t *, siginfo_t *,
- const struct timespec *);
extern int sigprocmask(int, sigset_t *, sigset_t *);
extern void set_current_blocked(sigset_t *);
extern void __set_current_blocked(const sigset_t *);
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 93315d6b21a8..cc0faf3a90be 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -41,12 +41,31 @@ struct kmem_cache_cpu {
void **freelist; /* Pointer to next available object */
unsigned long tid; /* Globally unique transaction id */
struct page *page; /* The slab from which we are allocating */
+#ifdef CONFIG_SLUB_CPU_PARTIAL
struct page *partial; /* Partially allocated frozen slabs */
+#endif
#ifdef CONFIG_SLUB_STATS
unsigned stat[NR_SLUB_STAT_ITEMS];
#endif
};
+#ifdef CONFIG_SLUB_CPU_PARTIAL
+#define slub_percpu_partial(c) ((c)->partial)
+
+#define slub_set_percpu_partial(c, p) \
+({ \
+ slub_percpu_partial(c) = (p)->next; \
+})
+
+#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
+#else
+#define slub_percpu_partial(c) NULL
+
+#define slub_set_percpu_partial(c, p)
+
+#define slub_percpu_partial_read_once(c) NULL
+#endif // CONFIG_SLUB_CPU_PARTIAL
+
/*
* Word size structure that can be atomically updated or read and that
* contains both the order and the number of objects that a slab of the
@@ -67,7 +86,9 @@ struct kmem_cache {
int size; /* The size of an object including meta data */
int object_size; /* The size of an object without meta data */
int offset; /* Free pointer offset. */
+#ifdef CONFIG_SLUB_CPU_PARTIAL
int cpu_partial; /* Number of per cpu partial objects to keep around */
+#endif
struct kmem_cache_order_objects oo;
/* Allocation and freeing of slabs */
@@ -79,9 +100,9 @@ struct kmem_cache {
int inuse; /* Offset to metadata */
int align; /* Alignment */
int reserved; /* Reserved bytes at the end of slabs */
+ int red_left_pad; /* Left redzone padding size */
const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */
- int red_left_pad; /* Left redzone padding size */
#ifdef CONFIG_SYSFS
struct kobject kobj; /* For sysfs */
struct work_struct kobj_remove_work;
@@ -112,6 +133,17 @@ struct kmem_cache {
struct kmem_cache_node *node[MAX_NUMNODES];
};
+#ifdef CONFIG_SLUB_CPU_PARTIAL
+#define slub_cpu_partial(s) ((s)->cpu_partial)
+#define slub_set_cpu_partial(s, n) \
+({ \
+ slub_cpu_partial(s) = (n); \
+})
+#else
+#define slub_cpu_partial(s) (0)
+#define slub_set_cpu_partial(s, n)
+#endif // CONFIG_SLUB_CPU_PARTIAL
+
#ifdef CONFIG_SYSFS
#define SLAB_SUPPORTS_SYSFS
void sysfs_slab_release(struct kmem_cache *);
diff --git a/include/linux/spi/mcp23s08.h b/include/linux/spi/mcp23s08.h
index aa07d7b32568..82d96a346e6f 100644
--- a/include/linux/spi/mcp23s08.h
+++ b/include/linux/spi/mcp23s08.h
@@ -1,11 +1,3 @@
-
-/* FIXME driver should be able to handle IRQs... */
-
-struct mcp23s08_chip_info {
- bool is_present; /* true if populated */
- unsigned pullups; /* BIT(x) means enable pullup x */
-};
-
struct mcp23s08_platform_data {
/* For mcp23s08, up to 4 slaves (numbered 0..3) can share one SPI
* chipselect, each providing 1 gpio_chip instance with 8 gpios.
@@ -13,31 +5,13 @@ struct mcp23s08_platform_data {
* chipselect, each providing 1 gpio_chip (port A + port B) with
* 16 gpios.
*/
- struct mcp23s08_chip_info chip[8];
+ u32 spi_present_mask;
- /* "base" is the number of the first GPIO. Dynamic assignment is
- * not currently supported, and even if there are gaps in chip
- * addressing the GPIO numbers are sequential .. so for example
- * if only slaves 0 and 3 are present, their GPIOs range from
- * base to base+15 (or base+31 for s17 variant).
+ /* "base" is the number of the first GPIO or -1 for dynamic
+ * assignment. If there are gaps in chip addressing the GPIO
+ * numbers are sequential .. so for example if only slaves 0
+ * and 3 are present, their GPIOs range from base to base+15
+ * (or base+31 for s17 variant).
*/
unsigned base;
- /* Marks the device as a interrupt controller.
- * NOTE: The interrupt functionality is only supported for i2c
- * versions of the chips. The spi chips can also do the interrupts,
- * but this is not supported by the linux driver yet.
- */
- bool irq_controller;
-
- /* Sets the mirror flag in the IOCON register. Devices
- * with two interrupt outputs (these are the devices ending with 17 and
- * those that have 16 IOs) have two IO banks: IO 0-7 form bank 1 and
- * IO 8-15 are bank 2. These chips have two different interrupt outputs:
- * One for bank 1 and another for bank 2. If irq-mirror is set, both
- * interrupts are generated regardless of the bank that an input change
- * occurred on. If it is not set, the interrupt are only generated for
- * the bank they belong to.
- * On devices with only one interrupt output this property is useless.
- */
- bool mirror;
};
diff --git a/include/linux/string.h b/include/linux/string.h
index 537918f8a98e..7439d83eaa33 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -122,6 +122,12 @@ static inline __must_check int memcpy_mcsafe(void *dst, const void *src,
return 0;
}
#endif
+#ifndef __HAVE_ARCH_MEMCPY_FLUSHCACHE
+static inline void memcpy_flushcache(void *dst, const void *src, size_t cnt)
+{
+ memcpy(dst, src, cnt);
+}
+#endif
void *memchr_inv(const void *s, int c, size_t n);
char *strreplace(char *s, char old, char new);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index ba5882419a7d..5ab1c98c7d27 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -353,7 +353,7 @@ extern struct address_space *swapper_spaces[];
>> SWAP_ADDRESS_SPACE_SHIFT])
extern unsigned long total_swapcache_pages(void);
extern void show_swap_cache_info(void);
-extern int add_to_swap(struct page *, struct list_head *list);
+extern int add_to_swap(struct page *page);
extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
extern int __add_to_swap_cache(struct page *page, swp_entry_t entry);
extern void __delete_from_swap_cache(struct page *);
@@ -386,15 +386,15 @@ static inline long get_nr_swap_pages(void)
}
extern void si_swapinfo(struct sysinfo *);
-extern swp_entry_t get_swap_page(void);
+extern swp_entry_t get_swap_page(struct page *page);
+extern void put_swap_page(struct page *page, swp_entry_t entry);
extern swp_entry_t get_swap_page_of_type(int);
-extern int get_swap_pages(int n, swp_entry_t swp_entries[]);
+extern int get_swap_pages(int n, bool cluster, swp_entry_t swp_entries[]);
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
extern void swap_shmem_alloc(swp_entry_t);
extern int swap_duplicate(swp_entry_t);
extern int swapcache_prepare(swp_entry_t);
extern void swap_free(swp_entry_t);
-extern void swapcache_free(swp_entry_t);
extern void swapcache_free_entries(swp_entry_t *entries, int n);
extern int free_swap_and_cache(swp_entry_t);
extern int swap_type_of(dev_t, sector_t, struct block_device **);
@@ -453,7 +453,7 @@ static inline void swap_free(swp_entry_t swp)
{
}
-static inline void swapcache_free(swp_entry_t swp)
+static inline void put_swap_page(struct page *page, swp_entry_t swp)
{
}
@@ -473,7 +473,7 @@ static inline struct page *lookup_swap_cache(swp_entry_t swp)
return NULL;
}
-static inline int add_to_swap(struct page *page, struct list_head *list)
+static inline int add_to_swap(struct page *page)
{
return 0;
}
@@ -515,7 +515,7 @@ static inline int try_to_free_swap(struct page *page)
return 0;
}
-static inline swp_entry_t get_swap_page(void)
+static inline swp_entry_t get_swap_page(struct page *page)
{
swp_entry_t entry;
entry.val = 0;
@@ -548,7 +548,7 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
#ifdef CONFIG_MEMCG_SWAP
extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
-extern void mem_cgroup_uncharge_swap(swp_entry_t entry);
+extern void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
extern bool mem_cgroup_swap_full(struct page *page);
#else
@@ -562,7 +562,8 @@ static inline int mem_cgroup_try_charge_swap(struct page *page,
return 0;
}
-static inline void mem_cgroup_uncharge_swap(swp_entry_t entry)
+static inline void mem_cgroup_uncharge_swap(swp_entry_t entry,
+ unsigned int nr_pages)
{
}
diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h
index 145306bdc92f..b2b8ec7bda3f 100644
--- a/include/linux/swap_cgroup.h
+++ b/include/linux/swap_cgroup.h
@@ -7,7 +7,8 @@
extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
unsigned short old, unsigned short new);
-extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
+extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id,
+ unsigned int nr_ents);
extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent);
extern int swap_cgroup_swapon(int type, unsigned long max_pages);
extern void swap_cgroup_swapoff(int type);
@@ -15,7 +16,8 @@ extern void swap_cgroup_swapoff(int type);
#else
static inline
-unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
+unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id,
+ unsigned int nr_ents)
{
return 0;
}
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 980c3c9b06f8..3cb15ea48aee 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -650,7 +650,7 @@ asmlinkage long sys_olduname(struct oldold_utsname __user *);
asmlinkage long sys_getrlimit(unsigned int resource,
struct rlimit __user *rlim);
-#if defined(COMPAT_RLIM_OLD_INFINITY) || !(defined(CONFIG_IA64))
+#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim);
#endif
asmlinkage long sys_setrlimit(unsigned int resource,
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index d7d3ea637dd0..250a27614328 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -113,6 +113,33 @@ static inline void check_object_size(const void *ptr, unsigned long n,
{ }
#endif /* CONFIG_HARDENED_USERCOPY */
+extern void __compiletime_error("copy source size is too small")
+__bad_copy_from(void);
+extern void __compiletime_error("copy destination size is too small")
+__bad_copy_to(void);
+
+static inline void copy_overflow(int size, unsigned long count)
+{
+ WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
+}
+
+static __always_inline bool
+check_copy_size(const void *addr, size_t bytes, bool is_source)
+{
+ int sz = __compiletime_object_size(addr);
+ if (unlikely(sz >= 0 && sz < bytes)) {
+ if (!__builtin_constant_p(bytes))
+ copy_overflow(sz, bytes);
+ else if (is_source)
+ __bad_copy_from();
+ else
+ __bad_copy_to();
+ return false;
+ }
+ check_object_size(addr, bytes, is_source);
+ return true;
+}
+
#ifndef arch_setup_new_exec
static inline void arch_setup_new_exec(void) { }
#endif
diff --git a/include/linux/time.h b/include/linux/time.h
index f9858d7e6361..4abb32d4c6b8 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -180,9 +180,6 @@ extern int do_getitimer(int which, struct itimerval *value);
extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags);
-struct tms;
-extern void do_sys_times(struct tms *);
-
/*
* Similar to the struct tm in userspace <time.h>, but it needs to be here so
* that the kernel source is self contained.
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index a556805eff8a..f73cedfa2e0b 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -151,7 +151,15 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
int type, unsigned long len,
unsigned long flags, int pc);
-void tracing_record_cmdline(struct task_struct *tsk);
+#define TRACE_RECORD_CMDLINE BIT(0)
+#define TRACE_RECORD_TGID BIT(1)
+
+void tracing_record_taskinfo(struct task_struct *task, int flags);
+void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
+ struct task_struct *next, int flags);
+
+void tracing_record_cmdline(struct task_struct *task);
+void tracing_record_tgid(struct task_struct *task);
int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
@@ -290,6 +298,7 @@ struct trace_subsystem_dir;
enum {
EVENT_FILE_FL_ENABLED_BIT,
EVENT_FILE_FL_RECORDED_CMD_BIT,
+ EVENT_FILE_FL_RECORDED_TGID_BIT,
EVENT_FILE_FL_FILTERED_BIT,
EVENT_FILE_FL_NO_SET_FILTER_BIT,
EVENT_FILE_FL_SOFT_MODE_BIT,
@@ -303,6 +312,7 @@ enum {
* Event file flags:
* ENABLED - The event is enabled
* RECORDED_CMD - The comms should be recorded at sched_switch
+ * RECORDED_TGID - The tgids should be recorded at sched_switch
* FILTERED - The event has a filter attached
* NO_SET_FILTER - Set when filter has error and is to be ignored
* SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED
@@ -315,6 +325,7 @@ enum {
enum {
EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
EVENT_FILE_FL_RECORDED_CMD = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT),
+ EVENT_FILE_FL_RECORDED_TGID = (1 << EVENT_FILE_FL_RECORDED_TGID_BIT),
EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT),
EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT),
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index cc48cb2ce209..a26ffbe09e71 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -25,10 +25,10 @@ struct module;
struct tracepoint;
struct notifier_block;
-struct trace_enum_map {
+struct trace_eval_map {
const char *system;
- const char *enum_string;
- unsigned long enum_value;
+ const char *eval_string;
+ unsigned long eval_value;
};
#define TRACEPOINT_DEFAULT_PRIO 10
@@ -88,6 +88,7 @@ extern void syscall_unregfunc(void);
#define PARAMS(args...) args
#define TRACE_DEFINE_ENUM(x)
+#define TRACE_DEFINE_SIZEOF(x)
#endif /* _LINUX_TRACEPOINT_H */
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 201418d5e15c..acdd6f915a8d 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -109,8 +109,11 @@ static inline unsigned long
_copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
- if (likely(access_ok(VERIFY_READ, from, n)))
+ might_fault();
+ if (likely(access_ok(VERIFY_READ, from, n))) {
+ kasan_check_write(to, n);
res = raw_copy_from_user(to, from, n);
+ }
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
@@ -124,8 +127,11 @@ _copy_from_user(void *, const void __user *, unsigned long);
static inline unsigned long
_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- if (access_ok(VERIFY_WRITE, to, n))
+ might_fault();
+ if (access_ok(VERIFY_WRITE, to, n)) {
+ kasan_check_read(from, n);
n = raw_copy_to_user(to, from, n);
+ }
return n;
}
#else
@@ -133,59 +139,23 @@ extern unsigned long
_copy_to_user(void __user *, const void *, unsigned long);
#endif
-extern void __compiletime_error("usercopy buffer size is too small")
-__bad_copy_user(void);
-
-static inline void copy_user_overflow(int size, unsigned long count)
-{
- WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
-}
-
static __always_inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
- int sz = __compiletime_object_size(to);
-
- might_fault();
- kasan_check_write(to, n);
-
- if (likely(sz < 0 || sz >= n)) {
- check_object_size(to, n, false);
+ if (likely(check_copy_size(to, n, false)))
n = _copy_from_user(to, from, n);
- } else if (!__builtin_constant_p(n))
- copy_user_overflow(sz, n);
- else
- __bad_copy_user();
-
return n;
}
static __always_inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
- int sz = __compiletime_object_size(from);
-
- kasan_check_read(from, n);
- might_fault();
-
- if (likely(sz < 0 || sz >= n)) {
- check_object_size(from, n, true);
+ if (likely(check_copy_size(from, n, true)))
n = _copy_to_user(to, from, n);
- } else if (!__builtin_constant_p(n))
- copy_user_overflow(sz, n);
- else
- __bad_copy_user();
-
return n;
}
#ifdef CONFIG_COMPAT
static __always_inline unsigned long __must_check
-__copy_in_user(void __user *to, const void *from, unsigned long n)
-{
- might_fault();
- return raw_copy_in_user(to, from, n);
-}
-static __always_inline unsigned long __must_check
copy_in_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
diff --git a/include/linux/uio.h b/include/linux/uio.h
index f2d36a3d3005..342d2dc225b9 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -10,6 +10,7 @@
#define __LINUX_UIO_H
#include <linux/kernel.h>
+#include <linux/thread_info.h>
#include <uapi/linux/uio.h>
struct page;
@@ -91,11 +92,79 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
-size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
-size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
-bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
-size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
-bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
+
+size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
+size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
+bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
+size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
+bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
+
+static __always_inline __must_check
+size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
+{
+ if (unlikely(!check_copy_size(addr, bytes, true)))
+ return bytes;
+ else
+ return _copy_to_iter(addr, bytes, i);
+}
+
+static __always_inline __must_check
+size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
+{
+ if (unlikely(!check_copy_size(addr, bytes, false)))
+ return bytes;
+ else
+ return _copy_from_iter(addr, bytes, i);
+}
+
+static __always_inline __must_check
+bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
+{
+ if (unlikely(!check_copy_size(addr, bytes, false)))
+ return false;
+ else
+ return _copy_from_iter_full(addr, bytes, i);
+}
+
+static __always_inline __must_check
+size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
+{
+ if (unlikely(!check_copy_size(addr, bytes, false)))
+ return bytes;
+ else
+ return _copy_from_iter_nocache(addr, bytes, i);
+}
+
+static __always_inline __must_check
+bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
+{
+ if (unlikely(!check_copy_size(addr, bytes, false)))
+ return false;
+ else
+ return _copy_from_iter_full_nocache(addr, bytes, i);
+}
+
+#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
+/*
+ * Note, users like pmem that depend on the stricter semantics of
+ * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
+ * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
+ * destination is flushed from the cache on return.
+ */
+size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
+#else
+#define _copy_from_iter_flushcache _copy_from_iter_nocache
+#endif
+
+static __always_inline __must_check
+size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
+{
+ if (unlikely(!check_copy_size(addr, bytes, false)))
+ return bytes;
+ else
+ return _copy_from_iter_flushcache(addr, bytes, i);
+}
+
size_t iov_iter_zero(size_t bytes, struct iov_iter *);
unsigned long iov_iter_alignment(const struct iov_iter *i);
unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index be3ab2d13adf..37e8d31a4632 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -41,6 +41,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
PAGEOUTRUN, PGROTATED,
DROP_PAGECACHE, DROP_SLAB,
+ OOM_KILL,
#ifdef CONFIG_NUMA_BALANCING
NUMA_PTE_UPDATES,
NUMA_HUGE_PTE_UPDATES,
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 613771909b6e..b3d85f30d424 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -3,7 +3,6 @@
#include <linux/types.h>
#include <linux/percpu.h>
-#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/vm_event_item.h>
#include <linux/atomic.h>
diff --git a/include/linux/wmi.h b/include/linux/wmi.h
new file mode 100644
index 000000000000..cd0d7734dc49
--- /dev/null
+++ b/include/linux/wmi.h
@@ -0,0 +1,59 @@
+/*
+ * wmi.h - ACPI WMI interface
+ *
+ * Copyright (c) 2015 Andrew Lutomirski
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _LINUX_WMI_H
+#define _LINUX_WMI_H
+
+#include <linux/device.h>
+#include <linux/acpi.h>
+
+struct wmi_device {
+ struct device dev;
+
+ /* True for data blocks implementing the Set Control Method */
+ bool setable;
+};
+
+/* Caller must kfree the result. */
+extern union acpi_object *wmidev_block_query(struct wmi_device *wdev,
+ u8 instance);
+
+/* Gets another device on the same bus. Caller must put_device the result. */
+extern struct wmi_device *wmidev_get_other_guid(struct wmi_device *wdev,
+ const char *guid_string);
+
+struct wmi_device_id {
+ const char *guid_string;
+};
+
+struct wmi_driver {
+ struct device_driver driver;
+ const struct wmi_device_id *id_table;
+
+ int (*probe)(struct wmi_device *wdev);
+ int (*remove)(struct wmi_device *wdev);
+ void (*notify)(struct wmi_device *device, union acpi_object *data);
+};
+
+extern int __must_check __wmi_driver_register(struct wmi_driver *driver,
+ struct module *owner);
+extern void wmi_driver_unregister(struct wmi_driver *driver);
+#define wmi_driver_register(driver) __wmi_driver_register((driver), THIS_MODULE)
+
+#define module_wmi_driver(__wmi_driver) \
+ module_driver(__wmi_driver, wmi_driver_register, \
+ wmi_driver_unregister)
+
+#endif
diff --git a/include/media/cec.h b/include/media/cec.h
index 201f060978da..56643b27e4b8 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -164,6 +164,7 @@ struct cec_adapter {
u8 available_log_addrs;
u16 phys_addr;
+ bool needs_hpd;
bool is_configuring;
bool is_configured;
u32 monitor_all_cnt;
@@ -206,6 +207,8 @@ static inline bool cec_is_sink(const struct cec_adapter *adap)
#define cec_phys_addr_exp(pa) \
((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf
+struct edid;
+
#if IS_REACHABLE(CONFIG_CEC_CORE)
struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
void *priv, const char *name, u32 caps, u8 available_las);
@@ -217,12 +220,20 @@ int cec_s_log_addrs(struct cec_adapter *adap, struct cec_log_addrs *log_addrs,
bool block);
void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr,
bool block);
+void cec_s_phys_addr_from_edid(struct cec_adapter *adap,
+ const struct edid *edid);
int cec_transmit_msg(struct cec_adapter *adap, struct cec_msg *msg,
bool block);
/* Called by the adapter */
void cec_transmit_done(struct cec_adapter *adap, u8 status, u8 arb_lost_cnt,
u8 nack_cnt, u8 low_drive_cnt, u8 error_cnt);
+/*
+ * Simplified version of cec_transmit_done for hardware that doesn't retry
+ * failed transmits. So this is always just one attempt in which case
+ * the status is sufficient.
+ */
+void cec_transmit_attempt_done(struct cec_adapter *adap, u8 status);
void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg);
/**
@@ -326,6 +337,11 @@ static inline void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr,
{
}
+static inline void cec_s_phys_addr_from_edid(struct cec_adapter *adap,
+ const struct edid *edid)
+{
+}
+
static inline u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
unsigned int *offset)
{
@@ -351,4 +367,17 @@ static inline int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port)
#endif
+/**
+ * cec_phys_addr_invalidate() - set the physical address to INVALID
+ *
+ * @adap: the CEC adapter
+ *
+ * This is a simple helper function to invalidate the physical
+ * address.
+ */
+static inline void cec_phys_addr_invalidate(struct cec_adapter *adap)
+{
+ cec_s_phys_addr(adap, CEC_PHYS_ADDR_INVALID, false);
+}
+
#endif /* _MEDIA_CEC_H */
diff --git a/include/media/davinci/vpif_types.h b/include/media/davinci/vpif_types.h
index 385597da20dc..eae23e4e9b93 100644
--- a/include/media/davinci/vpif_types.h
+++ b/include/media/davinci/vpif_types.h
@@ -62,14 +62,14 @@ struct vpif_display_config {
struct vpif_input {
struct v4l2_input input;
- const char *subdev_name;
+ char *subdev_name;
u32 input_route;
u32 output_route;
};
struct vpif_capture_chan_config {
struct vpif_interface vpif_if;
- const struct vpif_input *inputs;
+ struct vpif_input *inputs;
int input_count;
};
@@ -81,7 +81,8 @@ struct vpif_capture_config {
int subdev_count;
int i2c_adapter_id;
const char *card_name;
- struct v4l2_async_subdev **asd; /* Flat array, arranged in groups */
- int *asd_sizes; /* 0-terminated array of asd group sizes */
+
+ struct v4l2_async_subdev *asd[VPIF_CAPTURE_MAX_CHANNELS];
+ int asd_sizes[VPIF_CAPTURE_MAX_CHANNELS];
};
#endif /* _VPIF_TYPES_H */
diff --git a/include/media/imx.h b/include/media/imx.h
new file mode 100644
index 000000000000..6e5f50d35f89
--- /dev/null
+++ b/include/media/imx.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2014-2017 Mentor Graphics Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version
+ */
+
+#ifndef __MEDIA_IMX_H__
+#define __MEDIA_IMX_H__
+
+#include <linux/imx-media.h>
+
+#endif
diff --git a/include/media/lirc_dev.h b/include/media/lirc_dev.h
index cec7d35602d1..86d15a9b6c01 100644
--- a/include/media/lirc_dev.h
+++ b/include/media/lirc_dev.h
@@ -12,8 +12,6 @@
#define MAX_IRCTL_DEVICES 8
#define BUFLEN 16
-#define mod(n, div) ((n) % (div))
-
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/ioctl.h>
@@ -90,11 +88,6 @@ static inline int lirc_buffer_empty(struct lirc_buffer *buf)
return !lirc_buffer_len(buf);
}
-static inline int lirc_buffer_available(struct lirc_buffer *buf)
-{
- return buf->size - (lirc_buffer_len(buf) / buf->chunk_size);
-}
-
static inline unsigned int lirc_buffer_read(struct lirc_buffer *buf,
unsigned char *dest)
{
@@ -133,12 +126,6 @@ static inline unsigned int lirc_buffer_write(struct lirc_buffer *buf,
* @buffer_size: Number of FIFO buffers with @chunk_size size. If zero,
* creates a buffer with BUFLEN size (16 bytes).
*
- * @sample_rate: if zero, the device will wait for an event with a new
- * code to be parsed. Otherwise, specifies the sample
- * rate for polling. Value should be between 0
- * and HZ. If equal to HZ, it would mean one polling per
- * second.
- *
* @features: lirc compatible hardware features, like LIRC_MODE_RAW,
* LIRC_CAN\_\*, as defined at include/media/lirc.h.
*
@@ -153,22 +140,10 @@ static inline unsigned int lirc_buffer_write(struct lirc_buffer *buf,
* @max_timeout: Maximum timeout for record. Valid only if
* LIRC_CAN_SET_REC_TIMEOUT is defined.
*
- * @add_to_buf: add_to_buf will be called after specified period of the
- * time or triggered by the external event, this behavior
- * depends on value of the sample_rate this function will
- * be called in user context. This routine should return
- * 0 if data was added to the buffer and -ENODATA if none
- * was available. This should add some number of bits
- * evenly divisible by code_length to the buffer.
- *
* @rbuf: if not NULL, it will be used as a read buffer, you will
* have to write to the buffer by other means, like irq's
* (see also lirc_serial.c).
*
- * @set_use_inc: set_use_inc will be called after device is opened
- *
- * @set_use_dec: set_use_dec will be called after device is closed
- *
* @rdev: Pointed to struct rc_dev associated with the LIRC
* device.
*
@@ -188,7 +163,6 @@ struct lirc_driver {
int minor;
__u32 code_length;
unsigned int buffer_size; /* in chunks holding one code each */
- int sample_rate;
__u32 features;
unsigned int chunk_size;
@@ -196,10 +170,7 @@ struct lirc_driver {
void *data;
int min_timeout;
int max_timeout;
- int (*add_to_buf)(void *data, struct lirc_buffer *buf);
struct lirc_buffer *rbuf;
- int (*set_use_inc)(void *data);
- void (*set_use_dec)(void *data);
struct rc_dev *rdev;
const struct file_operations *fops;
struct device *dev;
@@ -232,7 +203,4 @@ unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait);
long lirc_dev_fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
ssize_t lirc_dev_fop_read(struct file *file, char __user *buffer, size_t length,
loff_t *ppos);
-ssize_t lirc_dev_fop_write(struct file *file, const char __user *buffer,
- size_t length, loff_t *ppos);
-
#endif
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index c7c254c5bca1..754182d29668 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -21,6 +21,7 @@
#include <linux/bitmap.h>
#include <linux/bug.h>
+#include <linux/fwnode.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/media.h>
@@ -171,6 +172,9 @@ struct media_pad {
/**
* struct media_entity_operations - Media entity operations
+ * @get_fwnode_pad: Return the pad number based on a fwnode endpoint or
+ * a negative value on error. This operation can be used
+ * to map a fwnode to a media pad number. Optional.
* @link_setup: Notify the entity of link changes. The operation can
* return an error, in which case link setup will be
* cancelled. Optional.
@@ -184,6 +188,7 @@ struct media_pad {
* mutex held.
*/
struct media_entity_operations {
+ int (*get_fwnode_pad)(struct fwnode_endpoint *endpoint);
int (*link_setup)(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags);
@@ -816,6 +821,29 @@ struct media_pad *media_entity_remote_pad(struct media_pad *pad);
struct media_entity *media_entity_get(struct media_entity *entity);
/**
+ * media_entity_get_fwnode_pad - Get pad number from fwnode
+ *
+ * @entity: The entity
+ * @fwnode: Pointer to the fwnode_handle which should be used to find the pad
+ * @direction_flags: Expected direction of the pad, as defined in
+ * :ref:`include/uapi/linux/media.h <media_header>`
+ * (seek for ``MEDIA_PAD_FL_*``)
+ *
+ * This function can be used to resolve the media pad number from
+ * a fwnode. This is useful for devices which use more complex
+ * mappings of media pads.
+ *
+ * If the entity dose not implement the get_fwnode_pad() operation
+ * then this function searches the entity for the first pad that
+ * matches the @direction_flags.
+ *
+ * Return: returns the pad number on success or a negative error code.
+ */
+int media_entity_get_fwnode_pad(struct media_entity *entity,
+ struct fwnode_handle *fwnode,
+ unsigned long direction_flags);
+
+/**
* media_graph_walk_init - Allocate resources used by graph walk.
*
* @graph: Media graph structure that will be used to walk the graph
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 73ddd721d7ba..78dea39a9b39 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -70,7 +70,6 @@ enum rc_filter_type {
/**
* struct rc_dev - represents a remote control device
* @dev: driver model's view of this device
- * @initialized: 1 if the device init has completed, 0 otherwise
* @managed_alloc: devm_rc_allocate_device was used to create rc_dev
* @sysfs_groups: sysfs attribute groups
* @input_name: name of the input child device
@@ -137,7 +136,6 @@ enum rc_filter_type {
*/
struct rc_dev {
struct device dev;
- atomic_t initialized;
bool managed_alloc;
const struct attribute_group *sysfs_groups[5];
const char *input_name;
diff --git a/include/media/v4l2-async.h b/include/media/v4l2-async.h
index 8e2a236a4d03..c69d8c8a66d0 100644
--- a/include/media/v4l2-async.h
+++ b/include/media/v4l2-async.h
@@ -31,7 +31,7 @@ struct v4l2_async_notifier;
* v4l2_async_subdev.match ops
* @V4L2_ASYNC_MATCH_DEVNAME: Match will use the device name
* @V4L2_ASYNC_MATCH_I2C: Match will check for I2C adapter ID and address
- * @V4L2_ASYNC_MATCH_OF: Match will use OF node
+ * @V4L2_ASYNC_MATCH_FWNODE: Match will use firmware node
*
* This enum is used by the asyncrhronous sub-device logic to define the
* algorithm that will be used to match an asynchronous device.
@@ -40,7 +40,7 @@ enum v4l2_async_match_type {
V4L2_ASYNC_MATCH_CUSTOM,
V4L2_ASYNC_MATCH_DEVNAME,
V4L2_ASYNC_MATCH_I2C,
- V4L2_ASYNC_MATCH_OF,
+ V4L2_ASYNC_MATCH_FWNODE,
};
/**
@@ -55,8 +55,8 @@ struct v4l2_async_subdev {
enum v4l2_async_match_type match_type;
union {
struct {
- const struct device_node *node;
- } of;
+ struct fwnode_handle *fwnode;
+ } fwnode;
struct {
const char *name;
} device_name;
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index bee1404391dd..2d2aed56922f 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -458,6 +458,19 @@ static inline void v4l2_ctrl_unlock(struct v4l2_ctrl *ctrl)
}
/**
+ * __v4l2_ctrl_handler_setup() - Call the s_ctrl op for all controls belonging
+ * to the handler to initialize the hardware to the current control values. The
+ * caller is responsible for acquiring the control handler mutex on behalf of
+ * __v4l2_ctrl_handler_setup().
+ * @hdl: The control handler.
+ *
+ * Button controls will be skipped, as are read-only controls.
+ *
+ * If @hdl == NULL, then this just returns 0.
+ */
+int __v4l2_ctrl_handler_setup(struct v4l2_ctrl_handler *hdl);
+
+/**
* v4l2_ctrl_handler_setup() - Call the s_ctrl op for all controls belonging
* to the handler to initialize the hardware to the current control values.
* @hdl: The control handler.
diff --git a/include/media/v4l2-flash-led-class.h b/include/media/v4l2-flash-led-class.h
index b0fe4d6f4a5f..f9dcd54c1745 100644
--- a/include/media/v4l2-flash-led-class.h
+++ b/include/media/v4l2-flash-led-class.h
@@ -108,7 +108,7 @@ static inline struct v4l2_flash *v4l2_ctrl_to_v4l2_flash(struct v4l2_ctrl *c)
/**
* v4l2_flash_init - initialize V4L2 flash led sub-device
* @dev: flash device, e.g. an I2C device
- * @of_node: of_node of the LED, may be NULL if the same as device's
+ * @fwn: fwnode_handle of the LED, may be NULL if the same as device's
* @fled_cdev: LED flash class device to wrap
* @iled_cdev: LED flash class device representing indicator LED associated
* with fled_cdev, may be NULL
@@ -122,7 +122,7 @@ static inline struct v4l2_flash *v4l2_ctrl_to_v4l2_flash(struct v4l2_ctrl *c)
* PTR_ERR() to obtain the numeric return value.
*/
struct v4l2_flash *v4l2_flash_init(
- struct device *dev, struct device_node *of_node,
+ struct device *dev, struct fwnode_handle *fwn,
struct led_classdev_flash *fled_cdev,
struct led_classdev_flash *iled_cdev,
const struct v4l2_flash_ops *ops,
@@ -138,7 +138,7 @@ void v4l2_flash_release(struct v4l2_flash *v4l2_flash);
#else
static inline struct v4l2_flash *v4l2_flash_init(
- struct device *dev, struct device_node *of_node,
+ struct device *dev, struct fwnode_handle *fwn,
struct led_classdev_flash *fled_cdev,
struct led_classdev_flash *iled_cdev,
const struct v4l2_flash_ops *ops,
diff --git a/include/media/v4l2-of.h b/include/media/v4l2-fwnode.h
index 4dc34b245d47..ecc1233a873e 100644
--- a/include/media/v4l2-of.h
+++ b/include/media/v4l2-fwnode.h
@@ -1,5 +1,8 @@
/*
- * V4L2 OF binding parsing library
+ * V4L2 fwnode binding parsing library
+ *
+ * Copyright (c) 2016 Intel Corporation.
+ * Author: Sakari Ailus <[email protected]>
*
* Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd.
* Author: Sylwester Nawrocki <[email protected]>
@@ -11,20 +14,20 @@
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*/
-#ifndef _V4L2_OF_H
-#define _V4L2_OF_H
+#ifndef _V4L2_FWNODE_H
+#define _V4L2_FWNODE_H
+#include <linux/errno.h>
+#include <linux/fwnode.h>
#include <linux/list.h>
#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/of_graph.h>
#include <media/v4l2-mediabus.h>
-struct device_node;
+struct fwnode_handle;
/**
- * struct v4l2_of_bus_mipi_csi2 - MIPI CSI-2 bus data structure
+ * struct v4l2_fwnode_bus_mipi_csi2 - MIPI CSI-2 bus data structure
* @flags: media bus (V4L2_MBUS_*) flags
* @data_lanes: an array of physical data lane indexes
* @clock_lane: physical lane index of the clock lane
@@ -32,7 +35,7 @@ struct device_node;
* @lane_polarities: polarity of the lanes. The order is the same of
* the physical lanes.
*/
-struct v4l2_of_bus_mipi_csi2 {
+struct v4l2_fwnode_bus_mipi_csi2 {
unsigned int flags;
unsigned char data_lanes[4];
unsigned char clock_lane;
@@ -41,88 +44,61 @@ struct v4l2_of_bus_mipi_csi2 {
};
/**
- * struct v4l2_of_bus_parallel - parallel data bus data structure
+ * struct v4l2_fwnode_bus_parallel - parallel data bus data structure
* @flags: media bus (V4L2_MBUS_*) flags
* @bus_width: bus width in bits
* @data_shift: data shift in bits
*/
-struct v4l2_of_bus_parallel {
+struct v4l2_fwnode_bus_parallel {
unsigned int flags;
unsigned char bus_width;
unsigned char data_shift;
};
/**
- * struct v4l2_of_endpoint - the endpoint data structure
- * @base: struct of_endpoint containing port, id, and local of_node
+ * struct v4l2_fwnode_endpoint - the endpoint data structure
+ * @base: fwnode endpoint of the v4l2_fwnode
* @bus_type: bus type
* @bus: bus configuration data structure
* @link_frequencies: array of supported link frequencies
* @nr_of_link_frequencies: number of elements in link_frequenccies array
*/
-struct v4l2_of_endpoint {
- struct of_endpoint base;
- /* Fields below this line will be zeroed by v4l2_of_parse_endpoint() */
+struct v4l2_fwnode_endpoint {
+ struct fwnode_endpoint base;
+ /*
+ * Fields below this line will be zeroed by
+ * v4l2_fwnode_parse_endpoint()
+ */
enum v4l2_mbus_type bus_type;
union {
- struct v4l2_of_bus_parallel parallel;
- struct v4l2_of_bus_mipi_csi2 mipi_csi2;
+ struct v4l2_fwnode_bus_parallel parallel;
+ struct v4l2_fwnode_bus_mipi_csi2 mipi_csi2;
} bus;
u64 *link_frequencies;
unsigned int nr_of_link_frequencies;
};
/**
- * struct v4l2_of_link - a link between two endpoints
+ * struct v4l2_fwnode_link - a link between two endpoints
* @local_node: pointer to device_node of this endpoint
* @local_port: identifier of the port this endpoint belongs to
* @remote_node: pointer to device_node of the remote endpoint
* @remote_port: identifier of the port the remote endpoint belongs to
*/
-struct v4l2_of_link {
- struct device_node *local_node;
+struct v4l2_fwnode_link {
+ struct fwnode_handle *local_node;
unsigned int local_port;
- struct device_node *remote_node;
+ struct fwnode_handle *remote_node;
unsigned int remote_port;
};
-#ifdef CONFIG_OF
-int v4l2_of_parse_endpoint(const struct device_node *node,
- struct v4l2_of_endpoint *endpoint);
-struct v4l2_of_endpoint *v4l2_of_alloc_parse_endpoint(
- const struct device_node *node);
-void v4l2_of_free_endpoint(struct v4l2_of_endpoint *endpoint);
-int v4l2_of_parse_link(const struct device_node *node,
- struct v4l2_of_link *link);
-void v4l2_of_put_link(struct v4l2_of_link *link);
-#else /* CONFIG_OF */
-
-static inline int v4l2_of_parse_endpoint(const struct device_node *node,
- struct v4l2_of_endpoint *link)
-{
- return -ENOSYS;
-}
-
-static inline struct v4l2_of_endpoint *v4l2_of_alloc_parse_endpoint(
- const struct device_node *node)
-{
- return NULL;
-}
-
-static inline void v4l2_of_free_endpoint(struct v4l2_of_endpoint *endpoint)
-{
-}
-
-static inline int v4l2_of_parse_link(const struct device_node *node,
- struct v4l2_of_link *link)
-{
- return -ENOSYS;
-}
-
-static inline void v4l2_of_put_link(struct v4l2_of_link *link)
-{
-}
-
-#endif /* CONFIG_OF */
+int v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_endpoint *vep);
+struct v4l2_fwnode_endpoint *v4l2_fwnode_endpoint_alloc_parse(
+ struct fwnode_handle *fwnode);
+void v4l2_fwnode_endpoint_free(struct v4l2_fwnode_endpoint *vep);
+int v4l2_fwnode_parse_link(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_link *link);
+void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link);
-#endif /* _V4L2_OF_H */
+#endif /* _V4L2_FWNODE_H */
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 3ccd01bd245e..e157d5c9b224 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -437,6 +437,47 @@ static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
}
/**
+ * v4l2_m2m_for_each_dst_buf() - iterate over a list of destination ready
+ * buffers
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @b: current buffer of type struct v4l2_m2m_buffer
+ */
+#define v4l2_m2m_for_each_dst_buf(m2m_ctx, b) \
+ list_for_each_entry(b, &m2m_ctx->cap_q_ctx.rdy_queue, list)
+
+/**
+ * v4l2_m2m_for_each_src_buf() - iterate over a list of source ready buffers
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @b: current buffer of type struct v4l2_m2m_buffer
+ */
+#define v4l2_m2m_for_each_src_buf(m2m_ctx, b) \
+ list_for_each_entry(b, &m2m_ctx->out_q_ctx.rdy_queue, list)
+
+/**
+ * v4l2_m2m_for_each_dst_buf_safe() - iterate over a list of destination ready
+ * buffers safely
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @b: current buffer of type struct v4l2_m2m_buffer
+ * @n: used as temporary storage
+ */
+#define v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, b, n) \
+ list_for_each_entry_safe(b, n, &m2m_ctx->cap_q_ctx.rdy_queue, list)
+
+/**
+ * v4l2_m2m_for_each_src_buf_safe() - iterate over a list of source ready
+ * buffers safely
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @b: current buffer of type struct v4l2_m2m_buffer
+ * @n: used as temporary storage
+ */
+#define v4l2_m2m_for_each_src_buf_safe(m2m_ctx, b, n) \
+ list_for_each_entry_safe(b, n, &m2m_ctx->out_q_ctx.rdy_queue, list)
+
+/**
* v4l2_m2m_get_src_vq() - return vb2_queue for source buffers
*
* @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
@@ -488,6 +529,57 @@ static inline void *v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx);
}
+/**
+ * v4l2_m2m_buf_remove_by_buf() - take off exact buffer from the list of ready
+ * buffers
+ *
+ * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
+ * @vbuf: the buffer to be removed
+ */
+void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx,
+ struct vb2_v4l2_buffer *vbuf);
+
+/**
+ * v4l2_m2m_src_buf_remove_by_buf() - take off exact source buffer from the list
+ * of ready buffers
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @vbuf: the buffer to be removed
+ */
+static inline void v4l2_m2m_src_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx,
+ struct vb2_v4l2_buffer *vbuf)
+{
+ v4l2_m2m_buf_remove_by_buf(&m2m_ctx->out_q_ctx, vbuf);
+}
+
+/**
+ * v4l2_m2m_dst_buf_remove_by_buf() - take off exact destination buffer from the
+ * list of ready buffers
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @vbuf: the buffer to be removed
+ */
+static inline void v4l2_m2m_dst_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx,
+ struct vb2_v4l2_buffer *vbuf)
+{
+ v4l2_m2m_buf_remove_by_buf(&m2m_ctx->cap_q_ctx, vbuf);
+}
+
+struct vb2_v4l2_buffer *
+v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx);
+
+static inline struct vb2_v4l2_buffer *
+v4l2_m2m_src_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
+{
+ return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->out_q_ctx, idx);
+}
+
+static inline struct vb2_v4l2_buffer *
+v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
+{
+ return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->cap_q_ctx, idx);
+}
+
/* v4l2 ioctl helpers */
int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 0ab1c5df6fac..0f92ebd2d710 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -787,7 +787,8 @@ struct v4l2_subdev_platform_data {
* is attached.
* @devnode: subdev device node
* @dev: pointer to the physical device, if any
- * @of_node: The device_node of the subdev, usually the same as dev->of_node.
+ * @fwnode: The fwnode_handle of the subdev, usually the same as
+ * either dev->of_node->fwnode or dev->fwnode (whichever is non-NULL).
* @async_list: Links this subdev to a global subdev_list or @notifier->done
* list.
* @asd: Pointer to respective &struct v4l2_async_subdev.
@@ -818,15 +819,22 @@ struct v4l2_subdev {
void *host_priv;
struct video_device *devnode;
struct device *dev;
- struct device_node *of_node;
+ struct fwnode_handle *fwnode;
struct list_head async_list;
struct v4l2_async_subdev *asd;
struct v4l2_async_notifier *notifier;
struct v4l2_subdev_platform_data *pdata;
};
-#define media_entity_to_v4l2_subdev(ent) \
- container_of(ent, struct v4l2_subdev, entity)
+#define media_entity_to_v4l2_subdev(ent) \
+({ \
+ typeof(ent) __me_sd_ent = (ent); \
+ \
+ __me_sd_ent ? \
+ container_of(__me_sd_ent, struct v4l2_subdev, entity) : \
+ NULL; \
+})
+
#define vdev_to_v4l2_subdev(vdev) \
((struct v4l2_subdev *)video_get_drvdata(vdev))
diff --git a/include/misc/cxllib.h b/include/misc/cxllib.h
new file mode 100644
index 000000000000..e5aa29f019a6
--- /dev/null
+++ b/include/misc/cxllib.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2017 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _MISC_CXLLIB_H
+#define _MISC_CXLLIB_H
+
+#include <linux/pci.h>
+#include <asm/reg.h>
+
+/*
+ * cxl driver exports a in-kernel 'library' API which can be called by
+ * other drivers to help interacting with an IBM XSL.
+ */
+
+/*
+ * tells whether capi is supported on the PCIe slot where the
+ * device is seated
+ *
+ * Input:
+ * dev: device whose slot needs to be checked
+ * flags: 0 for the time being
+ */
+bool cxllib_slot_is_supported(struct pci_dev *dev, unsigned long flags);
+
+
+/*
+ * Returns the configuration parameters to be used by the XSL or device
+ *
+ * Input:
+ * dev: device, used to find PHB
+ * Output:
+ * struct cxllib_xsl_config:
+ * version
+ * capi BAR address, i.e. 0x2000000000000-0x2FFFFFFFFFFFF
+ * capi BAR size
+ * data send control (XSL_DSNCTL)
+ * dummy read address (XSL_DRA)
+ */
+#define CXL_XSL_CONFIG_VERSION1 1
+struct cxllib_xsl_config {
+ u32 version; /* format version for register encoding */
+ u32 log_bar_size;/* log size of the capi_window */
+ u64 bar_addr; /* address of the start of capi window */
+ u64 dsnctl; /* matches definition of XSL_DSNCTL */
+ u64 dra; /* real address that can be used for dummy read */
+};
+
+int cxllib_get_xsl_config(struct pci_dev *dev, struct cxllib_xsl_config *cfg);
+
+
+/*
+ * Activate capi for the pci host bridge associated with the device.
+ * Can be extended to deactivate once we know how to do it.
+ * Device must be ready to accept messages from the CAPP unit and
+ * respond accordingly (TLB invalidates, ...)
+ *
+ * PHB is switched to capi mode through calls to skiboot.
+ * CAPP snooping is activated
+ *
+ * Input:
+ * dev: device whose PHB should switch mode
+ * mode: mode to switch to i.e. CAPI or PCI
+ * flags: options related to the mode
+ */
+enum cxllib_mode {
+ CXL_MODE_CXL,
+ CXL_MODE_PCI,
+};
+
+#define CXL_MODE_NO_DMA 0
+#define CXL_MODE_DMA_TVT0 1
+#define CXL_MODE_DMA_TVT1 2
+
+int cxllib_switch_phb_mode(struct pci_dev *dev, enum cxllib_mode mode,
+ unsigned long flags);
+
+
+/*
+ * Set the device for capi DMA.
+ * Define its dma_ops and dma offset so that allocations will be using TVT#1
+ *
+ * Input:
+ * dev: device to set
+ * flags: options. CXL_MODE_DMA_TVT1 should be used
+ */
+int cxllib_set_device_dma(struct pci_dev *dev, unsigned long flags);
+
+
+/*
+ * Get the Process Element structure for the given thread
+ *
+ * Input:
+ * task: task_struct for the context of the translation
+ * translation_mode: whether addresses should be translated
+ * Output:
+ * attr: attributes to fill up the Process Element structure from CAIA
+ */
+struct cxllib_pe_attributes {
+ u64 sr;
+ u32 lpid;
+ u32 tid;
+ u32 pid;
+};
+#define CXL_TRANSLATED_MODE 0
+#define CXL_REAL_MODE 1
+
+int cxllib_get_PE_attributes(struct task_struct *task,
+ unsigned long translation_mode, struct cxllib_pe_attributes *attr);
+
+
+/*
+ * Handle memory fault.
+ * Fault in all the pages of the specified buffer for the permissions
+ * provided in ‘flags’
+ *
+ * Shouldn't be called from interrupt context
+ *
+ * Input:
+ * mm: struct mm for the thread faulting the pages
+ * addr: base address of the buffer to page in
+ * size: size of the buffer to page in
+ * flags: permission requested (DSISR_ISSTORE...)
+ */
+int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags);
+
+
+#endif /* _MISC_CXLLIB_H */
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 440c1e9d0623..6fdcd2427776 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -154,12 +154,12 @@ static inline int frag_mem_limit(struct netns_frags *nf)
static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
{
- __percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch);
+ percpu_counter_add_batch(&nf->mem, -i, frag_percpu_counter_batch);
}
static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
{
- __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch);
+ percpu_counter_add_batch(&nf->mem, i, frag_percpu_counter_batch);
}
static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
diff --git a/include/net/sock.h b/include/net/sock.h
index 48e4d5c38f85..8c85791fc196 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1697,6 +1697,7 @@ static inline void sock_orphan(struct sock *sk)
static inline void sock_graft(struct sock *sk, struct socket *parent)
{
+ WARN_ON(parent->sk);
write_lock_bh(&sk->sk_callback_lock);
sk->sk_wq = parent->wq;
parent->sk = sk;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 0e480a5630d4..356953d3dbd1 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1968,6 +1968,9 @@ struct rdma_netdev {
struct ib_device *hca;
u8 port_num;
+ /* cleanup function must be specified */
+ void (*free_rdma_netdev)(struct net_device *netdev);
+
/* control functions */
void (*set_id)(struct net_device *netdev, int id);
/* send packet */
@@ -2243,7 +2246,7 @@ struct ib_device {
struct ib_udata *udata);
int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
/**
- * rdma netdev operations
+ * rdma netdev operation
*
* Driver implementing alloc_rdma_netdev must return -EOPNOTSUPP if it
* doesn't support the specified rdma netdev type.
@@ -2255,7 +2258,6 @@ struct ib_device {
const char *name,
unsigned char name_assign_type,
void (*setup)(struct net_device *));
- void (*free_rdma_netdev)(struct net_device *netdev);
struct module *owner;
struct device dev;
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index dd0f72c95abe..cfaeed256ab2 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -415,9 +415,9 @@ struct sas_ha_struct {
* their siblings when forming wide ports */
/* LLDD calls these to notify the class of an event. */
- void (*notify_ha_event)(struct sas_ha_struct *, enum ha_event);
- void (*notify_port_event)(struct asd_sas_phy *, enum port_event);
- void (*notify_phy_event)(struct asd_sas_phy *, enum phy_event);
+ int (*notify_ha_event)(struct sas_ha_struct *, enum ha_event);
+ int (*notify_port_event)(struct asd_sas_phy *, enum port_event);
+ int (*notify_phy_event)(struct asd_sas_phy *, enum phy_event);
void *lldd_ha; /* not touched by sas class code */
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index da9bf2bcdf1a..a1266d318c85 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -56,6 +56,7 @@ struct scsi_pointer {
/* for scmd->flags */
#define SCMD_TAGGED (1 << 0)
+#define SCMD_UNCHECKED_ISA_DMA (1 << 1)
struct scsi_cmnd {
struct scsi_request req;
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 05641aebd181..0979a5f3b69a 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -176,12 +176,12 @@ struct scsi_device {
unsigned no_read_disc_info:1; /* Avoid READ_DISC_INFO cmds */
unsigned no_read_capacity_16:1; /* Avoid READ_CAPACITY_16 cmds */
unsigned try_rc_10_first:1; /* Try READ_CAPACACITY_10 first */
+ unsigned security_supported:1; /* Supports Security Protocols */
unsigned is_visible:1; /* is the device visible in sysfs */
unsigned wce_default_on:1; /* Cache is ON by default */
unsigned no_dif:1; /* T10 PI (DIF) should be disabled */
unsigned broken_fua:1; /* Don't set FUA bit */
unsigned lun_in_cdb:1; /* Store LUN bits in CDB[1] */
- unsigned synchronous_alua:1; /* Synchronous ALUA commands */
atomic_t disk_events_disable_depth; /* disable depth for disk events */
@@ -207,6 +207,7 @@ struct scsi_device {
void *handler_data;
unsigned char access_state;
+ struct mutex state_mutex;
enum scsi_device_state sdev_state;
unsigned long sdev_data[0];
} __attribute__((aligned(sizeof(unsigned long))));
@@ -248,6 +249,7 @@ enum scsi_target_state {
STARGET_CREATED = 1,
STARGET_RUNNING,
STARGET_REMOVE,
+ STARGET_CREATED_REMOVE,
STARGET_DEL,
};
@@ -472,9 +474,9 @@ static inline int scsi_device_created(struct scsi_device *sdev)
sdev->sdev_state == SDEV_CREATED_BLOCK;
}
-int scsi_internal_device_block(struct scsi_device *sdev, bool wait);
-int scsi_internal_device_unblock(struct scsi_device *sdev,
- enum scsi_device_state new_state);
+int scsi_internal_device_block_nowait(struct scsi_device *sdev);
+int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
+ enum scsi_device_state new_state);
/* accessor functions for the SCSI parameters */
static inline int scsi_device_sync(struct scsi_device *sdev)
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
index 9f750cb63b03..9592570e092a 100644
--- a/include/scsi/scsi_devinfo.h
+++ b/include/scsi/scsi_devinfo.h
@@ -15,12 +15,7 @@
#define BLIST_ISROM 0x100 /* Treat as (removable) CD-ROM */
#define BLIST_LARGELUN 0x200 /* LUNs past 7 on a SCSI-2 device */
#define BLIST_INQUIRY_36 0x400 /* override additional length field */
-#define BLIST_INQUIRY_58 0x800 /* ... for broken inquiry responses */
#define BLIST_NOSTARTONADD 0x1000 /* do not do automatic start on add */
-#define BLIST_MS_SKIP_PAGE_08 0x2000 /* do not send ms page 0x08 */
-#define BLIST_MS_SKIP_PAGE_3F 0x4000 /* do not send ms page 0x3f */
-#define BLIST_USE_10_BYTE_MS 0x8000 /* use 10 byte ms before 6 byte ms */
-#define BLIST_MS_192_BYTES_FOR_3F 0x10000 /* 192 byte ms page 0x3f request */
#define BLIST_REPORTLUN2 0x20000 /* try REPORT_LUNS even for SCSI-2 devs
(if HBA supports more than 8 LUNs) */
#define BLIST_NOREPORTLUN 0x40000 /* don't try REPORT_LUNS scan (SCSI-3 devs) */
@@ -29,14 +24,10 @@
#define BLIST_SELECT_NO_ATN 0x200000 /* select without ATN */
#define BLIST_RETRY_HWERROR 0x400000 /* retry HARDWARE_ERROR */
#define BLIST_MAX_512 0x800000 /* maximum 512 sector cdb length */
-#define BLIST_ATTACH_PQ3 0x1000000 /* Scan: Attach to PQ3 devices */
#define BLIST_NO_DIF 0x2000000 /* Disable T10 PI (DIF) */
#define BLIST_SKIP_VPD_PAGES 0x4000000 /* Ignore SBC-3 VPD pages */
-#define BLIST_SCSI3LUN 0x8000000 /* Scan more than 256 LUNs
- for sequential scan */
#define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */
#define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */
#define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */
-#define BLIST_SYNC_ALUA 0x80000000 /* Synchronous ALUA commands */
#endif
diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h
index ce78ec8e367d..8260700d662b 100644
--- a/include/scsi/scsi_proto.h
+++ b/include/scsi/scsi_proto.h
@@ -125,9 +125,6 @@
#define SAI_READ_CAPACITY_16 0x10
#define SAI_GET_LBA_STATUS 0x12
#define SAI_REPORT_REFERRALS 0x13
-/* values for VARIABLE_LENGTH_CMD service action codes
- * see spc4r17 Section D.3.5, table D.7 and D.8 */
-#define VLC_SA_RECEIVE_CREDENTIAL 0x1800
/* values for maintenance in */
#define MI_REPORT_IDENTIFYING_INFORMATION 0x05
#define MI_REPORT_TARGET_PGS 0x0a
@@ -162,6 +159,7 @@
#define VERIFY_32 0x0a
#define WRITE_32 0x0b
#define WRITE_SAME_32 0x0d
+#define ATA_32 0x1ff0
/* Values for T10/04-262r7 */
#define ATA_16 0x85 /* 16-byte pass-thru */
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 6e208bb32c78..e308cd59e556 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -658,10 +658,6 @@ struct fc_function_template {
int (*vport_disable)(struct fc_vport *, bool);
int (*vport_delete)(struct fc_vport *);
- /* target-mode drivers' functions */
- int (* tsk_mgmt_response)(struct Scsi_Host *, u64, u64, int);
- int (* it_nexus_response)(struct Scsi_Host *, u64, int);
-
/* bsg support */
int (*bsg_request)(struct bsg_job *);
int (*bsg_timeout)(struct bsg_job *);
diff --git a/include/sound/ak4113.h b/include/sound/ak4113.h
index 58c145620c3c..b2d09fd09559 100644
--- a/include/sound/ak4113.h
+++ b/include/sound/ak4113.h
@@ -281,6 +281,14 @@ typedef void (ak4113_write_t)(void *private_data, unsigned char addr,
unsigned char data);
typedef unsigned char (ak4113_read_t)(void *private_data, unsigned char addr);
+enum {
+ AK4113_PARITY_ERRORS,
+ AK4113_V_BIT_ERRORS,
+ AK4113_QCRC_ERRORS,
+ AK4113_CCRC_ERRORS,
+ AK4113_NUM_ERRORS
+};
+
struct ak4113 {
struct snd_card *card;
ak4113_write_t *write;
@@ -292,10 +300,7 @@ struct ak4113 {
unsigned char regmap[AK4113_WRITABLE_REGS];
struct snd_kcontrol *kctls[AK4113_CONTROLS];
struct snd_pcm_substream *substream;
- unsigned long parity_errors;
- unsigned long v_bit_errors;
- unsigned long qcrc_errors;
- unsigned long ccrc_errors;
+ unsigned long errors[AK4113_NUM_ERRORS];
unsigned char rcs0;
unsigned char rcs1;
unsigned char rcs2;
diff --git a/include/sound/ak4114.h b/include/sound/ak4114.h
index b6feb7e225f2..39df064c82fc 100644
--- a/include/sound/ak4114.h
+++ b/include/sound/ak4114.h
@@ -163,6 +163,14 @@
typedef void (ak4114_write_t)(void *private_data, unsigned char addr, unsigned char data);
typedef unsigned char (ak4114_read_t)(void *private_data, unsigned char addr);
+enum {
+ AK4114_PARITY_ERRORS,
+ AK4114_V_BIT_ERRORS,
+ AK4114_QCRC_ERRORS,
+ AK4114_CCRC_ERRORS,
+ AK4114_NUM_ERRORS
+};
+
struct ak4114 {
struct snd_card *card;
ak4114_write_t * write;
@@ -176,10 +184,7 @@ struct ak4114 {
struct snd_kcontrol *kctls[AK4114_CONTROLS];
struct snd_pcm_substream *playback_substream;
struct snd_pcm_substream *capture_substream;
- unsigned long parity_errors;
- unsigned long v_bit_errors;
- unsigned long qcrc_errors;
- unsigned long ccrc_errors;
+ unsigned long errors[AK4114_NUM_ERRORS];
unsigned char rcs0;
unsigned char rcs1;
struct delayed_work work;
diff --git a/include/sound/ak4117.h b/include/sound/ak4117.h
index 1e8178171baf..5fab517cfe46 100644
--- a/include/sound/ak4117.h
+++ b/include/sound/ak4117.h
@@ -155,6 +155,14 @@
typedef void (ak4117_write_t)(void *private_data, unsigned char addr, unsigned char data);
typedef unsigned char (ak4117_read_t)(void *private_data, unsigned char addr);
+enum {
+ AK4117_PARITY_ERRORS,
+ AK4117_V_BIT_ERRORS,
+ AK4117_QCRC_ERRORS,
+ AK4117_CCRC_ERRORS,
+ AK4117_NUM_ERRORS
+};
+
struct ak4117 {
struct snd_card *card;
ak4117_write_t * write;
@@ -165,10 +173,7 @@ struct ak4117 {
unsigned char regmap[5];
struct snd_kcontrol *kctls[AK4117_CONTROLS];
struct snd_pcm_substream *substream;
- unsigned long parity_errors;
- unsigned long v_bit_errors;
- unsigned long qcrc_errors;
- unsigned long ccrc_errors;
+ unsigned long errors[AK4117_NUM_ERRORS];
unsigned char rcs0;
unsigned char rcs1;
unsigned char rcs2;
diff --git a/include/sound/core.h b/include/sound/core.h
index f7d8c10c4c45..55385588eefa 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -142,7 +142,7 @@ struct snd_card {
wait_queue_head_t power_sleep;
#endif
-#if defined(CONFIG_SND_MIXER_OSS) || defined(CONFIG_SND_MIXER_OSS_MODULE)
+#if IS_ENABLED(CONFIG_SND_MIXER_OSS)
struct snd_mixer_oss *mixer_oss;
int mixer_oss_change_count;
#endif
@@ -243,7 +243,7 @@ int copy_from_user_toio(volatile void __iomem *dst, const void __user *src, size
extern struct snd_card *snd_cards[SNDRV_CARDS];
int snd_card_locked(int card);
-#if defined(CONFIG_SND_MIXER_OSS) || defined(CONFIG_SND_MIXER_OSS_MODULE)
+#if IS_ENABLED(CONFIG_SND_MIXER_OSS)
#define SND_MIXER_OSS_NOTIFY_REGISTER 0
#define SND_MIXER_OSS_NOTIFY_DISCONNECT 1
#define SND_MIXER_OSS_NOTIFY_FREE 2
@@ -394,7 +394,7 @@ static inline void snd_printdd(const char *format, ...) {}
#define SNDRV_OSS_VERSION ((3<<16)|(8<<8)|(1<<4)|(0)) /* 3.8.1a */
/* for easier backward-porting */
-#if defined(CONFIG_GAMEPORT) || defined(CONFIG_GAMEPORT_MODULE)
+#if IS_ENABLED(CONFIG_GAMEPORT)
#define gameport_set_dev_parent(gp,xdev) ((gp)->dev.parent = (xdev))
#define gameport_set_port_data(gp,r) ((gp)->port_data = (r))
#define gameport_get_port_data(gp) (gp)->port_data
diff --git a/include/sound/cs35l35.h b/include/sound/cs35l35.h
index 29da899e17e4..d69cd7847afd 100644
--- a/include/sound/cs35l35.h
+++ b/include/sound/cs35l35.h
@@ -99,6 +99,8 @@ struct cs35l35_platform_data {
bool shared_bst;
/* Specifies this amp is using an external boost supply */
bool ext_bst;
+ /* Inductor Value */
+ int boost_ind;
/* ClassH Algorithm */
struct classh_cfg classh_algo;
/* Monitor Config */
diff --git a/include/sound/designware_i2s.h b/include/sound/designware_i2s.h
index 5681855396c4..830f5caa915c 100644
--- a/include/sound/designware_i2s.h
+++ b/include/sound/designware_i2s.h
@@ -47,6 +47,7 @@ struct i2s_platform_data {
#define DW_I2S_QUIRK_COMP_REG_OFFSET (1 << 0)
#define DW_I2S_QUIRK_COMP_PARAM1 (1 << 1)
+ #define DW_I2S_QUIRK_16BIT_IDX_OVERRIDE (1 << 2)
unsigned int quirks;
unsigned int i2s_reg_comp1;
unsigned int i2s_reg_comp2;
diff --git a/include/sound/emux_synth.h b/include/sound/emux_synth.h
index a0a40b74bf13..19a0cb561ffc 100644
--- a/include/sound/emux_synth.h
+++ b/include/sound/emux_synth.h
@@ -25,9 +25,7 @@
#include <sound/seq_device.h>
#include <sound/soundfont.h>
#include <sound/seq_midi_emul.h>
-#ifdef CONFIG_SND_SEQUENCER_OSS
#include <sound/seq_oss.h>
-#endif
#include <sound/emux_legacy.h>
#include <sound/seq_virmidi.h>
@@ -66,7 +64,7 @@ struct snd_emux_operators {
const void __user *data, long count);
void (*sysex)(struct snd_emux *emu, char *buf, int len, int parsed,
struct snd_midi_channel_set *chset);
-#ifdef CONFIG_SND_SEQUENCER_OSS
+#if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS)
int (*oss_ioctl)(struct snd_emux *emu, int cmd, int p1, int p2);
#endif
};
@@ -129,7 +127,7 @@ struct snd_emux {
struct snd_info_entry *proc;
#endif
-#ifdef CONFIG_SND_SEQUENCER_OSS
+#if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS)
struct snd_seq_device *oss_synth;
#endif
};
@@ -150,7 +148,7 @@ struct snd_emux_port {
#ifdef SNDRV_EMUX_USE_RAW_EFFECT
struct snd_emux_effect_table *effect;
#endif
-#ifdef CONFIG_SND_SEQUENCER_OSS
+#if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS)
struct snd_seq_oss_arg *oss_arg;
#endif
};
diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h
index 915c4357945c..9483c55f871b 100644
--- a/include/sound/hdmi-codec.h
+++ b/include/sound/hdmi-codec.h
@@ -18,9 +18,11 @@
#ifndef __HDMI_CODEC_H__
#define __HDMI_CODEC_H__
+#include <linux/of_graph.h>
#include <linux/hdmi.h>
#include <drm/drm_edid.h>
#include <sound/asoundef.h>
+#include <sound/soc.h>
#include <uapi/sound/asound.h>
/*
@@ -87,6 +89,13 @@ struct hdmi_codec_ops {
*/
int (*get_eld)(struct device *dev, void *data,
uint8_t *buf, size_t len);
+
+ /*
+ * Getting DAI ID
+ * Optional
+ */
+ int (*get_dai_id)(struct snd_soc_component *comment,
+ struct device_node *endpoint);
};
/* HDMI codec initalization data */
diff --git a/include/sound/mixer_oss.h b/include/sound/mixer_oss.h
index 13cb0b430a1b..930da10fb65b 100644
--- a/include/sound/mixer_oss.h
+++ b/include/sound/mixer_oss.h
@@ -22,7 +22,7 @@
*
*/
-#if defined(CONFIG_SND_MIXER_OSS) || defined(CONFIG_SND_MIXER_OSS_MODULE)
+#if IS_ENABLED(CONFIG_SND_MIXER_OSS)
#define SNDRV_OSS_MAX_MIXERS 32
diff --git a/include/sound/opl3.h b/include/sound/opl3.h
index 6ba670707831..a4a593590cff 100644
--- a/include/sound/opl3.h
+++ b/include/sound/opl3.h
@@ -55,10 +55,8 @@
#include <sound/hwdep.h>
#include <sound/timer.h>
#include <sound/seq_midi_emul.h>
-#ifdef CONFIG_SND_SEQUENCER_OSS
#include <sound/seq_oss.h>
#include <sound/seq_oss_legacy.h>
-#endif
#include <sound/seq_device.h>
#include <sound/asound_fm.h>
@@ -321,7 +319,7 @@ struct snd_opl3 {
unsigned char fm_mode; /* OPL mode, see SNDRV_DM_FM_MODE_XXX */
unsigned char rhythm; /* percussion mode flag */
unsigned char max_voices; /* max number of voices */
-#if defined(CONFIG_SND_SEQUENCER) || defined(CONFIG_SND_SEQUENCER_MODULE)
+#if IS_ENABLED(CONFIG_SND_SEQUENCER)
#define SNDRV_OPL3_MODE_SYNTH 0 /* OSS - voices allocated by application */
#define SNDRV_OPL3_MODE_SEQ 1 /* ALSA - driver handles voice allocation */
int synth_mode; /* synth mode */
@@ -330,7 +328,7 @@ struct snd_opl3 {
struct snd_seq_device *seq_dev; /* sequencer device */
struct snd_midi_channel_set * chset;
-#ifdef CONFIG_SND_SEQUENCER_OSS
+#if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS)
struct snd_seq_device *oss_seq_dev; /* OSS sequencer device */
struct snd_midi_channel_set * oss_chset;
#endif
@@ -374,7 +372,7 @@ int snd_opl3_release(struct snd_hwdep * hw, struct file *file);
void snd_opl3_reset(struct snd_opl3 * opl3);
-#if defined(CONFIG_SND_SEQUENCER) || defined(CONFIG_SND_SEQUENCER_MODULE)
+#if IS_ENABLED(CONFIG_SND_SEQUENCER)
long snd_opl3_write(struct snd_hwdep *hw, const char __user *buf, long count,
loff_t *offset);
int snd_opl3_load_patch(struct snd_opl3 *opl3,
diff --git a/include/sound/pcm-indirect.h b/include/sound/pcm-indirect.h
index 1df7acaaa535..7ade285328cf 100644
--- a/include/sound/pcm-indirect.h
+++ b/include/sound/pcm-indirect.h
@@ -43,7 +43,7 @@ typedef void (*snd_pcm_indirect_copy_t)(struct snd_pcm_substream *substream,
/*
* helper function for playback ack callback
*/
-static inline void
+static inline int
snd_pcm_indirect_playback_transfer(struct snd_pcm_substream *substream,
struct snd_pcm_indirect *rec,
snd_pcm_indirect_copy_t copy)
@@ -56,6 +56,8 @@ snd_pcm_indirect_playback_transfer(struct snd_pcm_substream *substream,
if (diff) {
if (diff < -(snd_pcm_sframes_t) (runtime->boundary / 2))
diff += runtime->boundary;
+ if (diff < 0)
+ return -EINVAL;
rec->sw_ready += (int)frames_to_bytes(runtime, diff);
rec->appl_ptr = appl_ptr;
}
@@ -82,6 +84,7 @@ snd_pcm_indirect_playback_transfer(struct snd_pcm_substream *substream,
rec->hw_ready += bytes;
rec->sw_ready -= bytes;
}
+ return 0;
}
/*
@@ -109,7 +112,7 @@ snd_pcm_indirect_playback_pointer(struct snd_pcm_substream *substream,
/*
* helper function for capture ack callback
*/
-static inline void
+static inline int
snd_pcm_indirect_capture_transfer(struct snd_pcm_substream *substream,
struct snd_pcm_indirect *rec,
snd_pcm_indirect_copy_t copy)
@@ -121,6 +124,8 @@ snd_pcm_indirect_capture_transfer(struct snd_pcm_substream *substream,
if (diff) {
if (diff < -(snd_pcm_sframes_t) (runtime->boundary / 2))
diff += runtime->boundary;
+ if (diff < 0)
+ return -EINVAL;
rec->sw_ready -= frames_to_bytes(runtime, diff);
rec->appl_ptr = appl_ptr;
}
@@ -147,6 +152,7 @@ snd_pcm_indirect_capture_transfer(struct snd_pcm_substream *substream,
rec->hw_ready -= bytes;
rec->sw_ready += bytes;
}
+ return 0;
}
/*
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 361749e60799..24febf9e177c 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -34,7 +34,7 @@
#define snd_pcm_substream_chip(substream) ((substream)->private_data)
#define snd_pcm_chip(pcm) ((pcm)->private_data)
-#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE)
+#if IS_ENABLED(CONFIG_SND_PCM_OSS)
#include <sound/pcm_oss.h>
#endif
@@ -78,11 +78,13 @@ struct snd_pcm_ops {
struct timespec *system_ts, struct timespec *audio_ts,
struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
struct snd_pcm_audio_tstamp_report *audio_tstamp_report);
- int (*copy)(struct snd_pcm_substream *substream, int channel,
- snd_pcm_uframes_t pos,
- void __user *buf, snd_pcm_uframes_t count);
- int (*silence)(struct snd_pcm_substream *substream, int channel,
- snd_pcm_uframes_t pos, snd_pcm_uframes_t count);
+ int (*fill_silence)(struct snd_pcm_substream *substream, int channel,
+ unsigned long pos, unsigned long bytes);
+ int (*copy_user)(struct snd_pcm_substream *substream, int channel,
+ unsigned long pos, void __user *buf,
+ unsigned long bytes);
+ int (*copy_kernel)(struct snd_pcm_substream *substream, int channel,
+ unsigned long pos, void *buf, unsigned long bytes);
struct page *(*page)(struct snd_pcm_substream *substream,
unsigned long offset);
int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
@@ -100,9 +102,9 @@ struct snd_pcm_ops {
#endif
#define SNDRV_PCM_IOCTL1_RESET 0
-#define SNDRV_PCM_IOCTL1_INFO 1
+/* 1 is absent slot. */
#define SNDRV_PCM_IOCTL1_CHANNEL_INFO 2
-#define SNDRV_PCM_IOCTL1_GSTATE 3
+/* 3 is absent slot. */
#define SNDRV_PCM_IOCTL1_FIFO_SIZE 4
#define SNDRV_PCM_TRIGGER_STOP 0
@@ -216,6 +218,7 @@ struct snd_pcm_ops {
struct snd_pcm_file {
struct snd_pcm_substream *substream;
int no_compat_mmap;
+ unsigned int user_pversion; /* supported protocol version */
};
struct snd_pcm_hw_rule;
@@ -418,7 +421,7 @@ struct snd_pcm_runtime {
struct snd_pcm_audio_tstamp_report audio_tstamp_report;
struct timespec driver_tstamp;
-#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE)
+#if IS_ENABLED(CONFIG_SND_PCM_OSS)
/* -- OSS things -- */
struct snd_pcm_oss_runtime oss;
#endif
@@ -464,7 +467,7 @@ struct snd_pcm_substream {
unsigned int f_flags;
void (*pcm_release)(struct snd_pcm_substream *);
struct pid *pid;
-#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE)
+#if IS_ENABLED(CONFIG_SND_PCM_OSS)
/* -- OSS things -- */
struct snd_pcm_oss_substream oss;
#endif
@@ -494,7 +497,7 @@ struct snd_pcm_str {
unsigned int substream_count;
unsigned int substream_opened;
struct snd_pcm_substream *substream;
-#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE)
+#if IS_ENABLED(CONFIG_SND_PCM_OSS)
/* -- OSS things -- */
struct snd_pcm_oss_stream oss;
#endif
@@ -526,18 +529,11 @@ struct snd_pcm {
void (*private_free) (struct snd_pcm *pcm);
bool internal; /* pcm is for internal use only */
bool nonatomic; /* whole PCM operations are in non-atomic context */
-#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE)
+#if IS_ENABLED(CONFIG_SND_PCM_OSS)
struct snd_pcm_oss oss;
#endif
};
-struct snd_pcm_notify {
- int (*n_register) (struct snd_pcm * pcm);
- int (*n_disconnect) (struct snd_pcm * pcm);
- int (*n_unregister) (struct snd_pcm * pcm);
- struct list_head list;
-};
-
/*
* Registering
*/
@@ -552,7 +548,15 @@ int snd_pcm_new_internal(struct snd_card *card, const char *id, int device,
struct snd_pcm **rpcm);
int snd_pcm_new_stream(struct snd_pcm *pcm, int stream, int substream_count);
+#if IS_ENABLED(CONFIG_SND_PCM_OSS)
+struct snd_pcm_notify {
+ int (*n_register) (struct snd_pcm * pcm);
+ int (*n_disconnect) (struct snd_pcm * pcm);
+ int (*n_unregister) (struct snd_pcm * pcm);
+ struct list_head list;
+};
int snd_pcm_notify(struct snd_pcm_notify *notify, int nfree);
+#endif
/*
* Native I/O
@@ -968,12 +972,6 @@ static inline unsigned int params_buffer_bytes(const struct snd_pcm_hw_params *p
}
int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v);
-void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c);
-void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c);
-void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b,
- unsigned int k, struct snd_interval *c);
-void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k,
- const struct snd_interval *b, struct snd_interval *c);
int snd_interval_list(struct snd_interval *i, unsigned int count,
const unsigned int *list, unsigned int mask);
int snd_interval_ranges(struct snd_interval *i, unsigned int count,
@@ -984,15 +982,9 @@ int snd_interval_ratnum(struct snd_interval *i,
void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params);
void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var);
-int snd_pcm_hw_params_choose(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params);
int snd_pcm_hw_refine(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params);
-int snd_pcm_hw_constraints_init(struct snd_pcm_substream *substream);
-int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream);
-
-int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
- u_int32_t mask);
int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
u_int64_t mask);
int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
@@ -1054,7 +1046,7 @@ int snd_pcm_format_unsigned(snd_pcm_format_t format);
int snd_pcm_format_linear(snd_pcm_format_t format);
int snd_pcm_format_little_endian(snd_pcm_format_t format);
int snd_pcm_format_big_endian(snd_pcm_format_t format);
-#if 0 /* just for DocBook */
+#if 0 /* just for kernel-doc */
/**
* snd_pcm_format_cpu_endian - Check the PCM format is CPU-endian
* @format: the format to check
@@ -1080,22 +1072,66 @@ void snd_pcm_set_ops(struct snd_pcm * pcm, int direction,
void snd_pcm_set_sync(struct snd_pcm_substream *substream);
int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg);
-int snd_pcm_update_state(struct snd_pcm_substream *substream,
- struct snd_pcm_runtime *runtime);
-int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream);
-void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr);
void snd_pcm_period_elapsed(struct snd_pcm_substream *substream);
-snd_pcm_sframes_t snd_pcm_lib_write(struct snd_pcm_substream *substream,
- const void __user *buf,
- snd_pcm_uframes_t frames);
-snd_pcm_sframes_t snd_pcm_lib_read(struct snd_pcm_substream *substream,
- void __user *buf, snd_pcm_uframes_t frames);
-snd_pcm_sframes_t snd_pcm_lib_writev(struct snd_pcm_substream *substream,
- void __user **bufs, snd_pcm_uframes_t frames);
-snd_pcm_sframes_t snd_pcm_lib_readv(struct snd_pcm_substream *substream,
- void __user **bufs, snd_pcm_uframes_t frames);
-
-extern const struct snd_pcm_hw_constraint_list snd_pcm_known_rates;
+snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
+ void *buf, bool interleaved,
+ snd_pcm_uframes_t frames, bool in_kernel);
+
+static inline snd_pcm_sframes_t
+snd_pcm_lib_write(struct snd_pcm_substream *substream,
+ const void __user *buf, snd_pcm_uframes_t frames)
+{
+ return __snd_pcm_lib_xfer(substream, (void *)buf, true, frames, false);
+}
+
+static inline snd_pcm_sframes_t
+snd_pcm_lib_read(struct snd_pcm_substream *substream,
+ void __user *buf, snd_pcm_uframes_t frames)
+{
+ return __snd_pcm_lib_xfer(substream, (void *)buf, true, frames, false);
+}
+
+static inline snd_pcm_sframes_t
+snd_pcm_lib_writev(struct snd_pcm_substream *substream,
+ void __user **bufs, snd_pcm_uframes_t frames)
+{
+ return __snd_pcm_lib_xfer(substream, (void *)bufs, false, frames, false);
+}
+
+static inline snd_pcm_sframes_t
+snd_pcm_lib_readv(struct snd_pcm_substream *substream,
+ void __user **bufs, snd_pcm_uframes_t frames)
+{
+ return __snd_pcm_lib_xfer(substream, (void *)bufs, false, frames, false);
+}
+
+static inline snd_pcm_sframes_t
+snd_pcm_kernel_write(struct snd_pcm_substream *substream,
+ const void *buf, snd_pcm_uframes_t frames)
+{
+ return __snd_pcm_lib_xfer(substream, (void *)buf, true, frames, true);
+}
+
+static inline snd_pcm_sframes_t
+snd_pcm_kernel_read(struct snd_pcm_substream *substream,
+ void *buf, snd_pcm_uframes_t frames)
+{
+ return __snd_pcm_lib_xfer(substream, buf, true, frames, true);
+}
+
+static inline snd_pcm_sframes_t
+snd_pcm_kernel_writev(struct snd_pcm_substream *substream,
+ void **bufs, snd_pcm_uframes_t frames)
+{
+ return __snd_pcm_lib_xfer(substream, bufs, false, frames, true);
+}
+
+static inline snd_pcm_sframes_t
+snd_pcm_kernel_readv(struct snd_pcm_substream *substream,
+ void **bufs, snd_pcm_uframes_t frames)
+{
+ return __snd_pcm_lib_xfer(substream, bufs, false, frames, true);
+}
int snd_pcm_limit_hw_rates(struct snd_pcm_runtime *runtime);
unsigned int snd_pcm_rate_to_rate_bit(unsigned int rate);
@@ -1130,20 +1166,6 @@ static inline void snd_pcm_set_runtime_buffer(struct snd_pcm_substream *substrea
}
}
-/*
- * Timer interface
- */
-
-#ifdef CONFIG_SND_PCM_TIMER
-void snd_pcm_timer_resolution_change(struct snd_pcm_substream *substream);
-void snd_pcm_timer_init(struct snd_pcm_substream *substream);
-void snd_pcm_timer_done(struct snd_pcm_substream *substream);
-#else
-static inline void
-snd_pcm_timer_resolution_change(struct snd_pcm_substream *substream) {}
-static inline void snd_pcm_timer_init(struct snd_pcm_substream *substream) {}
-static inline void snd_pcm_timer_done(struct snd_pcm_substream *substream) {}
-#endif
/**
* snd_pcm_gettime - Fill the timespec depending on the timestamp mode
* @runtime: PCM runtime instance
diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
index 492a3ca7f17b..6665cb29e1a2 100644
--- a/include/sound/rawmidi.h
+++ b/include/sound/rawmidi.h
@@ -30,7 +30,7 @@
#include <linux/workqueue.h>
#include <linux/device.h>
-#if defined(CONFIG_SND_SEQUENCER) || defined(CONFIG_SND_SEQUENCER_MODULE)
+#if IS_ENABLED(CONFIG_SND_SEQUENCER)
#include <sound/seq_device.h>
#endif
@@ -144,7 +144,7 @@ struct snd_rawmidi {
struct snd_info_entry *proc_entry;
-#if defined(CONFIG_SND_SEQUENCER) || defined(CONFIG_SND_SEQUENCER_MODULE)
+#if IS_ENABLED(CONFIG_SND_SEQUENCER)
struct snd_seq_device *seq_dev;
#endif
};
diff --git a/include/sound/rt5645.h b/include/sound/rt5645.h
index a5cf6152e778..d0c33a9972b9 100644
--- a/include/sound/rt5645.h
+++ b/include/sound/rt5645.h
@@ -21,8 +21,10 @@ struct rt5645_platform_data {
/* 0 = IN2P; 1 = GPIO6; 2 = GPIO10; 3 = GPIO12 */
unsigned int jd_mode;
- /* Invert JD when jack insert */
- bool jd_invert;
+ /* Use level triggered irq */
+ bool level_trigger_irq;
+ /* Invert JD1_1 status polarity */
+ bool inv_jd1_1;
};
#endif
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index af58d2362975..42c6a6ac3ce6 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -22,6 +22,11 @@ struct asoc_simple_dai {
struct clk *clk;
};
+struct asoc_simple_card_data {
+ u32 convert_rate;
+ u32 convert_channels;
+};
+
int asoc_simple_card_parse_daifmt(struct device *dev,
struct device_node *node,
struct device_node *codec,
@@ -35,13 +40,18 @@ int asoc_simple_card_parse_card_name(struct snd_soc_card *card,
char *prefix);
#define asoc_simple_card_parse_clk_cpu(dev, node, dai_link, simple_dai) \
- asoc_simple_card_parse_clk(dev, node, dai_link->cpu_of_node, simple_dai)
+ asoc_simple_card_parse_clk(dev, node, dai_link->cpu_of_node, simple_dai, \
+ dai_link->cpu_dai_name)
#define asoc_simple_card_parse_clk_codec(dev, node, dai_link, simple_dai) \
- asoc_simple_card_parse_clk(dev, node, dai_link->codec_of_node, simple_dai)
+ asoc_simple_card_parse_clk(dev, node, dai_link->codec_of_node, simple_dai,\
+ dai_link->codec_dai_name)
int asoc_simple_card_parse_clk(struct device *dev,
struct device_node *node,
struct device_node *dai_of_node,
- struct asoc_simple_dai *simple_dai);
+ struct asoc_simple_dai *simple_dai,
+ const char *name);
+int asoc_simple_card_clk_enable(struct asoc_simple_dai *dai);
+void asoc_simple_card_clk_disable(struct asoc_simple_dai *dai);
#define asoc_simple_card_parse_cpu(node, dai_link, \
list_name, cells_name, is_single_link) \
@@ -60,6 +70,22 @@ int asoc_simple_card_parse_dai(struct device_node *node,
const char *cells_name,
int *is_single_links);
+#define asoc_simple_card_parse_graph_cpu(ep, dai_link) \
+ asoc_simple_card_parse_graph_dai(ep, &dai_link->cpu_of_node, \
+ &dai_link->cpu_dai_name)
+#define asoc_simple_card_parse_graph_codec(ep, dai_link) \
+ asoc_simple_card_parse_graph_dai(ep, &dai_link->codec_of_node, \
+ &dai_link->codec_dai_name)
+int asoc_simple_card_parse_graph_dai(struct device_node *ep,
+ struct device_node **endpoint_np,
+ const char **dai_name);
+
+#define asoc_simple_card_of_parse_tdm(np, dai) \
+ snd_soc_of_parse_tdm_slot(np, &(dai)->tx_slot_mask, \
+ &(dai)->rx_slot_mask, \
+ &(dai)->slots, \
+ &(dai)->slot_width);
+
int asoc_simple_card_init_dai(struct snd_soc_dai *dai,
struct asoc_simple_dai *simple_dai);
@@ -69,4 +95,15 @@ void asoc_simple_card_canonicalize_cpu(struct snd_soc_dai_link *dai_link,
int asoc_simple_card_clean_reference(struct snd_soc_card *card);
+void asoc_simple_card_convert_fixup(struct asoc_simple_card_data *data,
+ struct snd_pcm_hw_params *params);
+void asoc_simple_card_parse_convert(struct device *dev, char *prefix,
+ struct asoc_simple_card_data *data);
+
+int asoc_simple_card_of_parse_routing(struct snd_soc_card *card,
+ char *prefix,
+ int optional);
+int asoc_simple_card_of_parse_widgets(struct snd_soc_card *card,
+ char *prefix);
+
#endif /* __SIMPLE_CARD_UTILS_H */
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index a466f4bdc835..344b96c206a3 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -510,6 +510,13 @@ enum snd_soc_dapm_type {
snd_soc_dapm_dai_out,
snd_soc_dapm_dai_link, /* link between two DAI structures */
snd_soc_dapm_kcontrol, /* Auto-disabled kcontrol */
+ snd_soc_dapm_buffer, /* DSP/CODEC internal buffer */
+ snd_soc_dapm_scheduler, /* DSP/CODEC internal scheduler */
+ snd_soc_dapm_effect, /* DSP/CODEC effect component */
+ snd_soc_dapm_src, /* DSP/CODEC SRC component */
+ snd_soc_dapm_asrc, /* DSP/CODEC ASRC component */
+ snd_soc_dapm_encoder, /* FW/SW audio encoder component */
+ snd_soc_dapm_decoder, /* FW/SW audio decoder component */
};
enum snd_soc_dapm_subclass {
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
index f9cc7b9271ac..f552c3f56368 100644
--- a/include/sound/soc-topology.h
+++ b/include/sound/soc-topology.h
@@ -28,6 +28,8 @@ struct snd_soc_component;
struct snd_soc_tplg_pcm_fe;
struct snd_soc_dapm_context;
struct snd_soc_card;
+struct snd_kcontrol_new;
+struct snd_soc_dai_link;
/* object scan be loaded and unloaded in groups with identfying indexes */
#define SND_SOC_TPLG_INDEX_ALL 0 /* ID that matches all FW objects */
@@ -116,6 +118,9 @@ struct snd_soc_tplg_ops {
int (*widget_load)(struct snd_soc_component *,
struct snd_soc_dapm_widget *,
struct snd_soc_tplg_dapm_widget *);
+ int (*widget_ready)(struct snd_soc_component *,
+ struct snd_soc_dapm_widget *,
+ struct snd_soc_tplg_dapm_widget *);
int (*widget_unload)(struct snd_soc_component *,
struct snd_soc_dobj *);
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 5170fd81e1fd..9c94b97c17f8 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -803,6 +803,8 @@ struct snd_soc_component_driver {
int (*of_xlate_dai_name)(struct snd_soc_component *component,
struct of_phandle_args *args,
const char **dai_name);
+ int (*of_xlate_dai_id)(struct snd_soc_component *comment,
+ struct device_node *endpoint);
void (*seq_notifier)(struct snd_soc_component *, enum snd_soc_dapm_type,
int subseq);
int (*stream_event)(struct snd_soc_component *, int event);
@@ -1676,6 +1678,7 @@ unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
const char *prefix,
struct device_node **bitclkmaster,
struct device_node **framemaster);
+int snd_soc_get_dai_id(struct device_node *ep);
int snd_soc_get_dai_name(struct of_phandle_args *args,
const char **dai_name);
int snd_soc_of_get_dai_name(struct device_node *of_node,
diff --git a/include/trace/events/filemap.h b/include/trace/events/filemap.h
index 42febb6bc1d5..ff91325b8123 100644
--- a/include/trace/events/filemap.h
+++ b/include/trace/events/filemap.h
@@ -10,6 +10,7 @@
#include <linux/memcontrol.h>
#include <linux/device.h>
#include <linux/kdev_t.h>
+#include <linux/errseq.h>
DECLARE_EVENT_CLASS(mm_filemap_op_page_cache,
@@ -52,6 +53,62 @@ DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_add_to_page_cache,
TP_ARGS(page)
);
+TRACE_EVENT(filemap_set_wb_err,
+ TP_PROTO(struct address_space *mapping, errseq_t eseq),
+
+ TP_ARGS(mapping, eseq),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, i_ino)
+ __field(dev_t, s_dev)
+ __field(errseq_t, errseq)
+ ),
+
+ TP_fast_assign(
+ __entry->i_ino = mapping->host->i_ino;
+ __entry->errseq = eseq;
+ if (mapping->host->i_sb)
+ __entry->s_dev = mapping->host->i_sb->s_dev;
+ else
+ __entry->s_dev = mapping->host->i_rdev;
+ ),
+
+ TP_printk("dev=%d:%d ino=0x%lx errseq=0x%x",
+ MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
+ __entry->i_ino, __entry->errseq)
+);
+
+TRACE_EVENT(file_check_and_advance_wb_err,
+ TP_PROTO(struct file *file, errseq_t old),
+
+ TP_ARGS(file, old),
+
+ TP_STRUCT__entry(
+ __field(struct file *, file);
+ __field(unsigned long, i_ino)
+ __field(dev_t, s_dev)
+ __field(errseq_t, old)
+ __field(errseq_t, new)
+ ),
+
+ TP_fast_assign(
+ __entry->file = file;
+ __entry->i_ino = file->f_mapping->host->i_ino;
+ if (file->f_mapping->host->i_sb)
+ __entry->s_dev =
+ file->f_mapping->host->i_sb->s_dev;
+ else
+ __entry->s_dev =
+ file->f_mapping->host->i_rdev;
+ __entry->old = old;
+ __entry->new = file->f_wb_err;
+ ),
+
+ TP_printk("file=%p dev=%d:%d ino=0x%lx old=0x%x new=0x%x",
+ __entry->file, MAJOR(__entry->s_dev),
+ MINOR(__entry->s_dev), __entry->i_ino, __entry->old,
+ __entry->new)
+);
#endif /* _TRACE_FILEMAP_H */
/* This part must be outside protection */
diff --git a/include/trace/events/percpu.h b/include/trace/events/percpu.h
new file mode 100644
index 000000000000..ad34b1bae047
--- /dev/null
+++ b/include/trace/events/percpu.h
@@ -0,0 +1,125 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM percpu
+
+#if !defined(_TRACE_PERCPU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PERCPU_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(percpu_alloc_percpu,
+
+ TP_PROTO(bool reserved, bool is_atomic, size_t size,
+ size_t align, void *base_addr, int off, void __percpu *ptr),
+
+ TP_ARGS(reserved, is_atomic, size, align, base_addr, off, ptr),
+
+ TP_STRUCT__entry(
+ __field( bool, reserved )
+ __field( bool, is_atomic )
+ __field( size_t, size )
+ __field( size_t, align )
+ __field( void *, base_addr )
+ __field( int, off )
+ __field( void __percpu *, ptr )
+ ),
+
+ TP_fast_assign(
+ __entry->reserved = reserved;
+ __entry->is_atomic = is_atomic;
+ __entry->size = size;
+ __entry->align = align;
+ __entry->base_addr = base_addr;
+ __entry->off = off;
+ __entry->ptr = ptr;
+ ),
+
+ TP_printk("reserved=%d is_atomic=%d size=%zu align=%zu base_addr=%p off=%d ptr=%p",
+ __entry->reserved, __entry->is_atomic,
+ __entry->size, __entry->align,
+ __entry->base_addr, __entry->off, __entry->ptr)
+);
+
+TRACE_EVENT(percpu_free_percpu,
+
+ TP_PROTO(void *base_addr, int off, void __percpu *ptr),
+
+ TP_ARGS(base_addr, off, ptr),
+
+ TP_STRUCT__entry(
+ __field( void *, base_addr )
+ __field( int, off )
+ __field( void __percpu *, ptr )
+ ),
+
+ TP_fast_assign(
+ __entry->base_addr = base_addr;
+ __entry->off = off;
+ __entry->ptr = ptr;
+ ),
+
+ TP_printk("base_addr=%p off=%d ptr=%p",
+ __entry->base_addr, __entry->off, __entry->ptr)
+);
+
+TRACE_EVENT(percpu_alloc_percpu_fail,
+
+ TP_PROTO(bool reserved, bool is_atomic, size_t size, size_t align),
+
+ TP_ARGS(reserved, is_atomic, size, align),
+
+ TP_STRUCT__entry(
+ __field( bool, reserved )
+ __field( bool, is_atomic )
+ __field( size_t, size )
+ __field( size_t, align )
+ ),
+
+ TP_fast_assign(
+ __entry->reserved = reserved;
+ __entry->is_atomic = is_atomic;
+ __entry->size = size;
+ __entry->align = align;
+ ),
+
+ TP_printk("reserved=%d is_atomic=%d size=%zu align=%zu",
+ __entry->reserved, __entry->is_atomic,
+ __entry->size, __entry->align)
+);
+
+TRACE_EVENT(percpu_create_chunk,
+
+ TP_PROTO(void *base_addr),
+
+ TP_ARGS(base_addr),
+
+ TP_STRUCT__entry(
+ __field( void *, base_addr )
+ ),
+
+ TP_fast_assign(
+ __entry->base_addr = base_addr;
+ ),
+
+ TP_printk("base_addr=%p", __entry->base_addr)
+);
+
+TRACE_EVENT(percpu_destroy_chunk,
+
+ TP_PROTO(void *base_addr),
+
+ TP_ARGS(base_addr),
+
+ TP_STRUCT__entry(
+ __field( void *, base_addr )
+ ),
+
+ TP_fast_assign(
+ __entry->base_addr = base_addr;
+ ),
+
+ TP_printk("base_addr=%p", __entry->base_addr)
+);
+
+#endif /* _TRACE_PERCPU_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index 31acce9019a6..b70a38b7fa84 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -30,6 +30,8 @@ DECLARE_EVENT_CLASS(xen_mc__batch,
DEFINE_XEN_MC_BATCH(xen_mc_batch);
DEFINE_XEN_MC_BATCH(xen_mc_issue);
+TRACE_DEFINE_SIZEOF(ulong);
+
TRACE_EVENT(xen_mc_entry,
TP_PROTO(struct multicall_entry *mc, unsigned nargs),
TP_ARGS(mc, nargs),
@@ -40,8 +42,8 @@ TRACE_EVENT(xen_mc_entry,
),
TP_fast_assign(__entry->op = mc->op;
__entry->nargs = nargs;
- memcpy(__entry->args, mc->args, sizeof(unsigned long) * nargs);
- memset(__entry->args + nargs, 0, sizeof(unsigned long) * (6 - nargs));
+ memcpy(__entry->args, mc->args, sizeof(ulong) * nargs);
+ memset(__entry->args + nargs, 0, sizeof(ulong) * (6 - nargs));
),
TP_printk("op %u%s args [%lx, %lx, %lx, %lx, %lx, %lx]",
__entry->op, xen_hypercall_name(__entry->op),
@@ -122,6 +124,7 @@ TRACE_EVENT(xen_mc_extend_args,
__entry->res == XEN_MC_XE_NO_SPACE ? "NO_SPACE" : "???")
);
+TRACE_DEFINE_SIZEOF(pteval_t);
/* mmu */
DECLARE_EVENT_CLASS(xen_mmu__set_pte,
TP_PROTO(pte_t *ptep, pte_t pteval),
@@ -199,6 +202,8 @@ TRACE_EVENT(xen_mmu_pte_clear,
__entry->mm, __entry->addr, __entry->ptep)
);
+TRACE_DEFINE_SIZEOF(pmdval_t);
+
TRACE_EVENT(xen_mmu_set_pmd,
TP_PROTO(pmd_t *pmdp, pmd_t pmdval),
TP_ARGS(pmdp, pmdval),
@@ -226,6 +231,8 @@ TRACE_EVENT(xen_mmu_pmd_clear,
#if CONFIG_PGTABLE_LEVELS >= 4
+TRACE_DEFINE_SIZEOF(pudval_t);
+
TRACE_EVENT(xen_mmu_set_pud,
TP_PROTO(pud_t *pudp, pud_t pudval),
TP_ARGS(pudp, pudval),
@@ -241,6 +248,8 @@ TRACE_EVENT(xen_mmu_set_pud,
(int)sizeof(pudval_t) * 2, (unsigned long long)__entry->pudval)
);
+TRACE_DEFINE_SIZEOF(p4dval_t);
+
TRACE_EVENT(xen_mmu_set_p4d,
TP_PROTO(p4d_t *p4dp, p4d_t *user_p4dp, p4d_t p4dval),
TP_ARGS(p4dp, user_p4dp, p4dval),
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
index 00f643164ca2..3976fa1f6e42 100644
--- a/include/trace/trace_events.h
+++ b/include/trace/trace_events.h
@@ -35,15 +35,28 @@ TRACE_MAKE_SYSTEM_STR();
#undef TRACE_DEFINE_ENUM
#define TRACE_DEFINE_ENUM(a) \
- static struct trace_enum_map __used __initdata \
+ static struct trace_eval_map __used __initdata \
__##TRACE_SYSTEM##_##a = \
{ \
.system = TRACE_SYSTEM_STRING, \
- .enum_string = #a, \
- .enum_value = a \
+ .eval_string = #a, \
+ .eval_value = a \
}; \
- static struct trace_enum_map __used \
- __attribute__((section("_ftrace_enum_map"))) \
+ static struct trace_eval_map __used \
+ __attribute__((section("_ftrace_eval_map"))) \
+ *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
+
+#undef TRACE_DEFINE_SIZEOF
+#define TRACE_DEFINE_SIZEOF(a) \
+ static struct trace_eval_map __used __initdata \
+ __##TRACE_SYSTEM##_##a = \
+ { \
+ .system = TRACE_SYSTEM_STRING, \
+ .eval_string = "sizeof(" #a ")", \
+ .eval_value = sizeof(a) \
+ }; \
+ static struct trace_eval_map __used \
+ __attribute__((section("_ftrace_eval_map"))) \
*TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
/*
@@ -158,6 +171,9 @@ TRACE_MAKE_SYSTEM_STR();
#undef TRACE_DEFINE_ENUM
#define TRACE_DEFINE_ENUM(a)
+#undef TRACE_DEFINE_SIZEOF
+#define TRACE_DEFINE_SIZEOF(a)
+
#undef __field
#define __field(type, item)
diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h
index a0dfe27bc6c7..44579a24f95d 100644
--- a/include/uapi/linux/cec.h
+++ b/include/uapi/linux/cec.h
@@ -336,6 +336,8 @@ static inline int cec_is_unconfigured(__u16 log_addr_mask)
#define CEC_CAP_RC (1 << 4)
/* Hardware can monitor all messages, not just directed and broadcast. */
#define CEC_CAP_MONITOR_ALL (1 << 5)
+/* Hardware can use CEC only if the HDMI HPD pin is high. */
+#define CEC_CAP_NEEDS_HPD (1 << 6)
/**
* struct cec_caps - CEC capabilities structure.
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index 2f6c77aebe1a..412c06a624c8 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -240,7 +240,8 @@ enum {
/* Added later */
DM_LIST_VERSIONS_CMD,
DM_TARGET_MSG_CMD,
- DM_DEV_SET_GEOMETRY_CMD
+ DM_DEV_SET_GEOMETRY_CMD,
+ DM_DEV_ARM_POLL_CMD,
};
#define DM_IOCTL 0xfd
@@ -255,6 +256,7 @@ enum {
#define DM_DEV_SUSPEND _IOWR(DM_IOCTL, DM_DEV_SUSPEND_CMD, struct dm_ioctl)
#define DM_DEV_STATUS _IOWR(DM_IOCTL, DM_DEV_STATUS_CMD, struct dm_ioctl)
#define DM_DEV_WAIT _IOWR(DM_IOCTL, DM_DEV_WAIT_CMD, struct dm_ioctl)
+#define DM_DEV_ARM_POLL _IOWR(DM_IOCTL, DM_DEV_ARM_POLL_CMD, struct dm_ioctl)
#define DM_TABLE_LOAD _IOWR(DM_IOCTL, DM_TABLE_LOAD_CMD, struct dm_ioctl)
#define DM_TABLE_CLEAR _IOWR(DM_IOCTL, DM_TABLE_CLEAR_CMD, struct dm_ioctl)
diff --git a/include/uapi/linux/dvb/video.h b/include/uapi/linux/dvb/video.h
index 260f033a5b54..c83d40b8a8a4 100644
--- a/include/uapi/linux/dvb/video.h
+++ b/include/uapi/linux/dvb/video.h
@@ -134,7 +134,8 @@ struct video_event {
#define VIDEO_EVENT_FRAME_RATE_CHANGED 2
#define VIDEO_EVENT_DECODER_STOPPED 3
#define VIDEO_EVENT_VSYNC 4
- __kernel_time_t timestamp;
+ /* unused, make sure to use atomic time for y2038 if it ever gets used */
+ long timestamp;
union {
video_size_t size;
unsigned int frame_rate; /* in frames per 1000sec */
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index f5a8d96e1e09..179891074b3c 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -600,6 +600,7 @@
#define KEY_APPSELECT 0x244 /* AL Select Task/Application */
#define KEY_SCREENSAVER 0x245 /* AL Screen Saver */
#define KEY_VOICECOMMAND 0x246 /* Listening Voice Command */
+#define KEY_ASSISTANT 0x247 /* AL Context-aware desktop assistant */
#define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */
#define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 577429a95ad8..c0b6dfec5f87 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -155,6 +155,35 @@ struct kvm_s390_skeys {
__u32 reserved[9];
};
+#define KVM_S390_CMMA_PEEK (1 << 0)
+
+/**
+ * kvm_s390_cmma_log - Used for CMMA migration.
+ *
+ * Used both for input and output.
+ *
+ * @start_gfn: Guest page number to start from.
+ * @count: Size of the result buffer.
+ * @flags: Control operation mode via KVM_S390_CMMA_* flags
+ * @remaining: Used with KVM_S390_GET_CMMA_BITS. Indicates how many dirty
+ * pages are still remaining.
+ * @mask: Used with KVM_S390_SET_CMMA_BITS. Bitmap of bits to actually set
+ * in the PGSTE.
+ * @values: Pointer to the values buffer.
+ *
+ * Used in KVM_S390_{G,S}ET_CMMA_BITS ioctls.
+ */
+struct kvm_s390_cmma_log {
+ __u64 start_gfn;
+ __u32 count;
+ __u32 flags;
+ union {
+ __u64 remaining;
+ __u64 mask;
+ };
+ __u64 values;
+};
+
struct kvm_hyperv_exit {
#define KVM_EXIT_HYPERV_SYNIC 1
#define KVM_EXIT_HYPERV_HCALL 2
@@ -895,6 +924,9 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_SPAPR_TCE_VFIO 142
#define KVM_CAP_X86_GUEST_MWAIT 143
#define KVM_CAP_ARM_USER_IRQ 144
+#define KVM_CAP_S390_CMMA_MIGRATION 145
+#define KVM_CAP_PPC_FWNMI 146
+#define KVM_CAP_PPC_SMT_POSSIBLE 147
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1318,6 +1350,9 @@ struct kvm_s390_ucas_mapping {
#define KVM_S390_GET_IRQ_STATE _IOW(KVMIO, 0xb6, struct kvm_s390_irq_state)
/* Available with KVM_CAP_X86_SMM */
#define KVM_SMI _IO(KVMIO, 0xb7)
+/* Available with KVM_CAP_S390_CMMA_MIGRATION */
+#define KVM_S390_GET_CMMA_BITS _IOW(KVMIO, 0xb8, struct kvm_s390_cmma_log)
+#define KVM_S390_SET_CMMA_BITS _IOW(KVMIO, 0xb9, struct kvm_s390_cmma_log)
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index a0908f1d2760..e439565df838 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -42,6 +42,7 @@
#define MSDOS_SUPER_MAGIC 0x4d44 /* MD */
#define NCP_SUPER_MAGIC 0x564c /* Guess, what 0x564c is :-) */
#define NFS_SUPER_MAGIC 0x6969
+#define OCFS2_SUPER_MAGIC 0x7461636f
#define OPENPROM_SUPER_MAGIC 0x9fa1
#define QNX4_SUPER_MAGIC 0x002f /* qnx4 fs detection */
#define QNX6_SUPER_MAGIC 0x68191122 /* qnx6 fs detection */
diff --git a/include/uapi/linux/max2175.h b/include/uapi/linux/max2175.h
new file mode 100644
index 000000000000..3ef5d264440f
--- /dev/null
+++ b/include/uapi/linux/max2175.h
@@ -0,0 +1,28 @@
+/*
+ * max2175.h
+ *
+ * Maxim Integrated MAX2175 RF to Bits tuner driver - user space header file.
+ *
+ * Copyright (C) 2016 Maxim Integrated Products
+ * Copyright (C) 2017 Renesas Electronics Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __UAPI_MAX2175_H_
+#define __UAPI_MAX2175_H_
+
+#include <linux/v4l2-controls.h>
+
+#define V4L2_CID_MAX2175_I2S_ENABLE (V4L2_CID_USER_MAX217X_BASE + 0x01)
+#define V4L2_CID_MAX2175_HSLS (V4L2_CID_USER_MAX217X_BASE + 0x02)
+#define V4L2_CID_MAX2175_RX_MODE (V4L2_CID_USER_MAX217X_BASE + 0x03)
+
+#endif /* __UAPI_MAX2175_H_ */
diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h
index 4890787731b8..fac96c64fe51 100644
--- a/include/uapi/linux/media.h
+++ b/include/uapi/linux/media.h
@@ -105,6 +105,12 @@ struct media_device_info {
#define MEDIA_ENT_F_PROC_VIDEO_STATISTICS (MEDIA_ENT_F_BASE + 0x4006)
/*
+ * Switch and bridge entitites
+ */
+#define MEDIA_ENT_F_VID_MUX (MEDIA_ENT_F_BASE + 0x5001)
+#define MEDIA_ENT_F_VID_IF_BRIDGE (MEDIA_ENT_F_BASE + 0x5002)
+
+/*
* Connectors
*/
/* It is a responsibility of the entity drivers to add connectors and links */
diff --git a/include/uapi/linux/mempolicy.h b/include/uapi/linux/mempolicy.h
index 9cd8b21dddbe..2a4d89508fec 100644
--- a/include/uapi/linux/mempolicy.h
+++ b/include/uapi/linux/mempolicy.h
@@ -24,13 +24,6 @@ enum {
MPOL_MAX, /* always last member of enum */
};
-enum mpol_rebind_step {
- MPOL_REBIND_ONCE, /* do rebind work at once(not by two step) */
- MPOL_REBIND_STEP1, /* first step(set all the newly nodes) */
- MPOL_REBIND_STEP2, /* second step(clean all the disallowed nodes)*/
- MPOL_REBIND_NSTEP,
-};
-
/* Flags for set_mempolicy */
#define MPOL_F_STATIC_NODES (1 << 15)
#define MPOL_F_RELATIVE_NODES (1 << 14)
@@ -65,7 +58,6 @@ enum mpol_rebind_step {
*/
#define MPOL_F_SHARED (1 << 0) /* identify shared policies */
#define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */
-#define MPOL_F_REBINDING (1 << 2) /* identify policies in rebinding */
#define MPOL_F_MOF (1 << 3) /* this policy wants migrate on fault */
#define MPOL_F_MORON (1 << 4) /* Migrate On protnone Reference On Node */
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index 7ad3863cb88b..6d3c54264d8e 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -105,7 +105,8 @@ struct nd_cmd_ars_cap {
__u32 status;
__u32 max_ars_out;
__u32 clear_err_unit;
- __u32 reserved;
+ __u16 flags;
+ __u16 reserved;
} __packed;
struct nd_cmd_ars_start {
@@ -144,6 +145,43 @@ struct nd_cmd_clear_error {
__u64 cleared;
} __packed;
+struct nd_cmd_trans_spa {
+ __u64 spa;
+ __u32 status;
+ __u8 flags;
+ __u8 _reserved[3];
+ __u64 trans_length;
+ __u32 num_nvdimms;
+ struct nd_nvdimm_device {
+ __u32 nfit_device_handle;
+ __u32 _reserved;
+ __u64 dpa;
+ } __packed devices[0];
+
+} __packed;
+
+struct nd_cmd_ars_err_inj {
+ __u64 err_inj_spa_range_base;
+ __u64 err_inj_spa_range_length;
+ __u8 err_inj_options;
+ __u32 status;
+} __packed;
+
+struct nd_cmd_ars_err_inj_clr {
+ __u64 err_inj_clr_spa_range_base;
+ __u64 err_inj_clr_spa_range_length;
+ __u32 status;
+} __packed;
+
+struct nd_cmd_ars_err_inj_stat {
+ __u32 status;
+ __u32 inj_err_rec_count;
+ struct nd_error_stat_query_record {
+ __u64 err_inj_stat_spa_range_base;
+ __u64 err_inj_stat_spa_range_length;
+ } __packed record[0];
+} __packed;
+
enum {
ND_CMD_IMPLEMENTED = 0,
@@ -169,6 +207,7 @@ enum {
enum {
ND_ARS_VOLATILE = 1,
ND_ARS_PERSISTENT = 2,
+ ND_ARS_RETURN_PREV_DATA = 1 << 1,
ND_CONFIG_LOCKED = 1,
};
@@ -179,6 +218,7 @@ static inline const char *nvdimm_bus_cmd_name(unsigned cmd)
[ND_CMD_ARS_START] = "ars_start",
[ND_CMD_ARS_STATUS] = "ars_status",
[ND_CMD_CLEAR_ERROR] = "clear_error",
+ [ND_CMD_CALL] = "cmd_call",
};
if (cmd < ARRAY_SIZE(names) && names[cmd])
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index d56bb0051009..c22d3ebaca20 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -517,6 +517,7 @@
#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */
#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */
#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */
+#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit 2 */
#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */
#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */
#define PCI_EXP_LNKCAP_L0SEL 0x00007000 /* L0s Exit Latency */
diff --git a/include/uapi/linux/switchtec_ioctl.h b/include/uapi/linux/switchtec_ioctl.h
index 3e824e1a6495..5e392968bad2 100644
--- a/include/uapi/linux/switchtec_ioctl.h
+++ b/include/uapi/linux/switchtec_ioctl.h
@@ -39,6 +39,9 @@ struct switchtec_ioctl_flash_info {
__u32 padding;
};
+#define SWITCHTEC_IOCTL_PART_ACTIVE 1
+#define SWITCHTEC_IOCTL_PART_RUNNING 2
+
struct switchtec_ioctl_flash_part_info {
__u32 flash_partition;
__u32 address;
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 0d2e1e01fbd5..31bfc68f86d6 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -180,6 +180,15 @@ enum v4l2_colorfx {
* We reserve 16 controls for this driver. */
#define V4L2_CID_USER_TC358743_BASE (V4L2_CID_USER_BASE + 0x1080)
+/* The base for the max217x driver controls.
+ * We reserve 32 controls for this driver
+ */
+#define V4L2_CID_USER_MAX217X_BASE (V4L2_CID_USER_BASE + 0x1090)
+
+/* The base for the imx driver controls.
+ * We reserve 16 controls for this driver. */
+#define V4L2_CID_USER_IMX_BASE (V4L2_CID_USER_BASE + 0x1090)
+
/* MPEG-class control IDs */
/* The MPEG controls are applicable to all codec controls
* and the 'MPEG' part of the define is historical */
@@ -893,7 +902,7 @@ enum v4l2_jpeg_chroma_subsampling {
#define V4L2_CID_PIXEL_RATE (V4L2_CID_IMAGE_PROC_CLASS_BASE + 2)
#define V4L2_CID_TEST_PATTERN (V4L2_CID_IMAGE_PROC_CLASS_BASE + 3)
#define V4L2_CID_DEINTERLACING_MODE (V4L2_CID_IMAGE_PROC_CLASS_BASE + 4)
-
+#define V4L2_CID_DIGITAL_GAIN (V4L2_CID_IMAGE_PROC_CLASS_BASE + 5)
/* DV-class control IDs defined by V4L2 */
#define V4L2_CID_DV_CLASS_BASE (V4L2_CTRL_CLASS_DV | 0x900)
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 2b8feb86d09e..45cf7359822c 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -669,6 +669,9 @@ struct v4l2_pix_format {
#define V4L2_SDR_FMT_CS8 v4l2_fourcc('C', 'S', '0', '8') /* complex s8 */
#define V4L2_SDR_FMT_CS14LE v4l2_fourcc('C', 'S', '1', '4') /* complex s14le */
#define V4L2_SDR_FMT_RU12LE v4l2_fourcc('R', 'U', '1', '2') /* real u12le */
+#define V4L2_SDR_FMT_PCU16BE v4l2_fourcc('P', 'C', '1', '6') /* planar complex u16be */
+#define V4L2_SDR_FMT_PCU18BE v4l2_fourcc('P', 'C', '1', '8') /* planar complex u18be */
+#define V4L2_SDR_FMT_PCU20BE v4l2_fourcc('P', 'C', '2', '0') /* planar complex u20be */
/* Touch formats - used for Touch devices */
#define V4L2_TCH_FMT_DELTA_TD16 v4l2_fourcc('T', 'D', '1', '6') /* 16-bit signed deltas */
diff --git a/include/uapi/scsi/cxlflash_ioctl.h b/include/uapi/scsi/cxlflash_ioctl.h
index e9fdc12ad984..48d107e75cf2 100644
--- a/include/uapi/scsi/cxlflash_ioctl.h
+++ b/include/uapi/scsi/cxlflash_ioctl.h
@@ -18,6 +18,11 @@
#include <linux/types.h>
/*
+ * Structure and definitions for all CXL Flash ioctls
+ */
+#define CXLFLASH_WWID_LEN 16
+
+/*
* Structure and flag definitions CXL Flash superpipe ioctls
*/
@@ -31,7 +36,7 @@ struct dk_cxlflash_hdr {
};
/*
- * Return flag definitions available to all ioctls
+ * Return flag definitions available to all superpipe ioctls
*
* Similar to the input flags, these are grown from the bottom-up with the
* intention that ioctl-specific return flag definitions would grow from the
@@ -151,7 +156,7 @@ struct dk_cxlflash_recover_afu {
__u64 reserved[8]; /* Reserved for future use */
};
-#define DK_CXLFLASH_MANAGE_LUN_WWID_LEN 16
+#define DK_CXLFLASH_MANAGE_LUN_WWID_LEN CXLFLASH_WWID_LEN
#define DK_CXLFLASH_MANAGE_LUN_ENABLE_SUPERPIPE 0x8000000000000000ULL
#define DK_CXLFLASH_MANAGE_LUN_DISABLE_SUPERPIPE 0x4000000000000000ULL
#define DK_CXLFLASH_MANAGE_LUN_ALL_PORTS_ACCESSIBLE 0x2000000000000000ULL
@@ -180,6 +185,10 @@ union cxlflash_ioctls {
#define CXL_MAGIC 0xCA
#define CXL_IOWR(_n, _s) _IOWR(CXL_MAGIC, _n, struct _s)
+/*
+ * CXL Flash superpipe ioctls start at base of the reserved CXL_MAGIC
+ * region (0x80) and grow upwards.
+ */
#define DK_CXLFLASH_ATTACH CXL_IOWR(0x80, dk_cxlflash_attach)
#define DK_CXLFLASH_USER_DIRECT CXL_IOWR(0x81, dk_cxlflash_udirect)
#define DK_CXLFLASH_RELEASE CXL_IOWR(0x82, dk_cxlflash_release)
@@ -191,4 +200,76 @@ union cxlflash_ioctls {
#define DK_CXLFLASH_VLUN_RESIZE CXL_IOWR(0x88, dk_cxlflash_resize)
#define DK_CXLFLASH_VLUN_CLONE CXL_IOWR(0x89, dk_cxlflash_clone)
+/*
+ * Structure and flag definitions CXL Flash host ioctls
+ */
+
+#define HT_CXLFLASH_VERSION_0 0
+
+struct ht_cxlflash_hdr {
+ __u16 version; /* Version data */
+ __u16 subcmd; /* Sub-command */
+ __u16 rsvd[2]; /* Reserved for future use */
+ __u64 flags; /* Input flags */
+ __u64 return_flags; /* Returned flags */
+};
+
+/*
+ * Input flag definitions available to all host ioctls
+ *
+ * These are grown from the bottom-up with the intention that ioctl-specific
+ * input flag definitions would grow from the top-down, allowing the two sets
+ * to co-exist. While not required/enforced at this time, this provides future
+ * flexibility.
+ */
+#define HT_CXLFLASH_HOST_READ 0x0000000000000000ULL
+#define HT_CXLFLASH_HOST_WRITE 0x0000000000000001ULL
+
+#define HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN 0x0001
+#define HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN 0x0002
+#define HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT 0x0003
+
+struct ht_cxlflash_lun_provision {
+ struct ht_cxlflash_hdr hdr; /* Common fields */
+ __u16 port; /* Target port for provision request */
+ __u16 reserved16[3]; /* Reserved for future use */
+ __u64 size; /* Size of LUN (4K blocks) */
+ __u64 lun_id; /* SCSI LUN ID */
+ __u8 wwid[CXLFLASH_WWID_LEN];/* Page83 WWID, NAA-6 */
+ __u64 max_num_luns; /* Maximum number of LUNs provisioned */
+ __u64 cur_num_luns; /* Current number of LUNs provisioned */
+ __u64 max_cap_port; /* Total capacity for port (4K blocks) */
+ __u64 cur_cap_port; /* Current capacity for port (4K blocks) */
+ __u64 reserved[8]; /* Reserved for future use */
+};
+
+#define HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN 262144 /* 256K */
+#define HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN 12
+struct ht_cxlflash_afu_debug {
+ struct ht_cxlflash_hdr hdr; /* Common fields */
+ __u8 reserved8[4]; /* Reserved for future use */
+ __u8 afu_subcmd[HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN]; /* AFU subcommand,
+ * (pass through)
+ */
+ __u64 data_ea; /* Data buffer effective address */
+ __u32 data_len; /* Data buffer length */
+ __u32 reserved32; /* Reserved for future use */
+ __u64 reserved[8]; /* Reserved for future use */
+};
+
+union cxlflash_ht_ioctls {
+ struct ht_cxlflash_lun_provision lun_provision;
+ struct ht_cxlflash_afu_debug afu_debug;
+};
+
+#define MAX_HT_CXLFLASH_IOCTL_SZ (sizeof(union cxlflash_ht_ioctls))
+
+/*
+ * CXL Flash host ioctls start at the top of the reserved CXL_MAGIC
+ * region (0xBF) and grow downwards.
+ */
+#define HT_CXLFLASH_LUN_PROVISION CXL_IOWR(0xBF, ht_cxlflash_lun_provision)
+#define HT_CXLFLASH_AFU_DEBUG CXL_IOWR(0xBE, ht_cxlflash_afu_debug)
+
+
#endif /* ifndef _CXLFLASH_IOCTL_H */
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index 6702533c8bd8..78014ec56357 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -73,7 +73,15 @@
#define SND_SOC_TPLG_DAPM_DAI_IN 13
#define SND_SOC_TPLG_DAPM_DAI_OUT 14
#define SND_SOC_TPLG_DAPM_DAI_LINK 15
-#define SND_SOC_TPLG_DAPM_LAST SND_SOC_TPLG_DAPM_DAI_LINK
+#define SND_SOC_TPLG_DAPM_BUFFER 16
+#define SND_SOC_TPLG_DAPM_SCHEDULER 17
+#define SND_SOC_TPLG_DAPM_EFFECT 18
+#define SND_SOC_TPLG_DAPM_SIGGEN 19
+#define SND_SOC_TPLG_DAPM_SRC 20
+#define SND_SOC_TPLG_DAPM_ASRC 21
+#define SND_SOC_TPLG_DAPM_ENCODER 22
+#define SND_SOC_TPLG_DAPM_DECODER 23
+#define SND_SOC_TPLG_DAPM_LAST SND_SOC_TPLG_DAPM_DECODER
/* Header magic number and string sizes */
#define SND_SOC_TPLG_MAGIC 0x41536F43 /* ASoC */
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index fd41697cb4d3..1949923a40bf 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -152,7 +152,7 @@ struct snd_hwdep_dsp_image {
* *
*****************************************************************************/
-#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 13)
+#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 14)
typedef unsigned long snd_pcm_uframes_t;
typedef signed long snd_pcm_sframes_t;
@@ -268,6 +268,7 @@ typedef int __bitwise snd_pcm_subformat_t;
#define SNDRV_PCM_INFO_MMAP_VALID 0x00000002 /* period data are valid during transfer */
#define SNDRV_PCM_INFO_DOUBLE 0x00000004 /* Double buffering needed for PCM start/stop */
#define SNDRV_PCM_INFO_BATCH 0x00000010 /* double buffering */
+#define SNDRV_PCM_INFO_SYNC_APPLPTR 0x00000020 /* need the explicit sync of appl_ptr update */
#define SNDRV_PCM_INFO_INTERLEAVED 0x00000100 /* channels are interleaved */
#define SNDRV_PCM_INFO_NONINTERLEAVED 0x00000200 /* channels are not interleaved */
#define SNDRV_PCM_INFO_COMPLEX 0x00000400 /* complex frame organization (mmap only) */
@@ -563,6 +564,7 @@ enum {
#define SNDRV_PCM_IOCTL_INFO _IOR('A', 0x01, struct snd_pcm_info)
#define SNDRV_PCM_IOCTL_TSTAMP _IOW('A', 0x02, int)
#define SNDRV_PCM_IOCTL_TTSTAMP _IOW('A', 0x03, int)
+#define SNDRV_PCM_IOCTL_USER_PVERSION _IOW('A', 0x04, int)
#define SNDRV_PCM_IOCTL_HW_REFINE _IOWR('A', 0x10, struct snd_pcm_hw_params)
#define SNDRV_PCM_IOCTL_HW_PARAMS _IOWR('A', 0x11, struct snd_pcm_hw_params)
#define SNDRV_PCM_IOCTL_HW_FREE _IO('A', 0x12)
diff --git a/include/uapi/sound/snd_sst_tokens.h b/include/uapi/sound/snd_sst_tokens.h
index 93392bedcc58..dedb2056160d 100644
--- a/include/uapi/sound/snd_sst_tokens.h
+++ b/include/uapi/sound/snd_sst_tokens.h
@@ -161,6 +161,8 @@
*
* %SKL_TKL_U32_D0I3_CAPS: Specifies the D0i3 capability for module
*
+ * %SKL_TKN_U32_DMA_BUF_SIZE: DMA buffer size in millisec
+ *
* module_id and loadable flags dont have tokens as these values will be
* read from the DSP FW manifest
*/
@@ -213,8 +215,10 @@ enum SKL_TKNS {
SKL_TKN_U32_LIB_COUNT,
SKL_TKN_STR_LIB_NAME,
SKL_TKN_U32_PMODE,
- SKL_TKL_U32_D0I3_CAPS,
- SKL_TKN_MAX = SKL_TKL_U32_D0I3_CAPS,
+ SKL_TKL_U32_D0I3_CAPS, /* Typo added at v4.10 */
+ SKL_TKN_U32_D0I3_CAPS = SKL_TKL_U32_D0I3_CAPS,
+ SKL_TKN_U32_DMA_BUF_SIZE,
+ SKL_TKN_MAX = SKL_TKN_U32_DMA_BUF_SIZE,
};
#endif
diff --git a/include/xen/arm/hypercall.h b/include/xen/arm/hypercall.h
index 73db4b2eeb89..b40485e54d80 100644
--- a/include/xen/arm/hypercall.h
+++ b/include/xen/arm/hypercall.h
@@ -39,6 +39,8 @@
#include <xen/interface/sched.h>
#include <xen/interface/platform.h>
+struct xen_dm_op_buf;
+
long privcmd_call(unsigned call, unsigned long a1,
unsigned long a2, unsigned long a3,
unsigned long a4, unsigned long a5);
@@ -53,7 +55,8 @@ int HYPERVISOR_physdev_op(int cmd, void *arg);
int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args);
int HYPERVISOR_tmem_op(void *arg);
int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type);
-int HYPERVISOR_dm_op(domid_t domid, unsigned int nr_bufs, void *bufs);
+int HYPERVISOR_dm_op(domid_t domid, unsigned int nr_bufs,
+ struct xen_dm_op_buf *bufs);
int HYPERVISOR_platform_op_raw(void *arg);
static inline int HYPERVISOR_platform_op(struct xen_platform_op *op)
{
diff --git a/include/xen/events.h b/include/xen/events.h
index 88da2abaf535..f442ca5fcd82 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -58,6 +58,7 @@ void evtchn_put(unsigned int evtchn);
void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
void rebind_evtchn_irq(int evtchn, int irq);
+int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu);
static inline void notify_remote_via_evtchn(int port)
{
diff --git a/include/xen/interface/version.h b/include/xen/interface/version.h
index 7ff6498679a3..145f12f9ecec 100644
--- a/include/xen/interface/version.h
+++ b/include/xen/interface/version.h
@@ -63,4 +63,19 @@ struct xen_feature_info {
/* arg == xen_domain_handle_t. */
#define XENVER_guest_handle 8
+#define XENVER_commandline 9
+struct xen_commandline {
+ char buf[1024];
+};
+
+/*
+ * Return value is the number of bytes written, or XEN_Exx on error.
+ * Calling with empty parameter returns the size of build_id.
+ */
+#define XENVER_build_id 10
+struct xen_build_id {
+ uint32_t len; /* IN: size of buf[]. */
+ unsigned char buf[];
+};
+
#endif /* __XEN_PUBLIC_VERSION_H__ */
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index 1f6d78f044b6..ed2de363da33 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -1,69 +1,9 @@
#ifndef __LINUX_SWIOTLB_XEN_H
#define __LINUX_SWIOTLB_XEN_H
-#include <linux/dma-direction.h>
-#include <linux/scatterlist.h>
#include <linux/swiotlb.h>
extern int xen_swiotlb_init(int verbose, bool early);
+extern const struct dma_map_ops xen_swiotlb_dma_ops;
-extern void
-*xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- unsigned long attrs);
-
-extern void
-xen_swiotlb_free_coherent(struct device *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs);
-
-extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs);
-
-extern void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs);
-extern int
-xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir,
- unsigned long attrs);
-
-extern void
-xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir,
- unsigned long attrs);
-
-extern void
-xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir);
-
-extern void
-xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir);
-
-extern void
-xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir);
-
-extern void
-xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir);
-
-extern int
-xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
-
-extern int
-xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask);
-
-extern int
-xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs);
-
-extern int
-xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t handle, size_t size,
- unsigned long attrs);
#endif /* __LINUX_SWIOTLB_XEN_H */
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index c44a2ee8c8f8..218e6aae5433 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -15,6 +15,8 @@ static inline uint32_t xen_vcpu_nr(int cpu)
return per_cpu(xen_vcpu_id, cpu);
}
+#define XEN_VCPU_ID_INVALID U32_MAX
+
void xen_arch_pre_suspend(void);
void xen_arch_post_suspend(int suspend_cancelled);
diff --git a/init/Kconfig b/init/Kconfig
index ee0f03b69d11..8514b25db21c 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -859,11 +859,14 @@ config CGROUP_BPF
inet sockets.
config CGROUP_DEBUG
- bool "Example controller"
+ bool "Debug controller"
default n
+ depends on DEBUG_KERNEL
help
This option enables a simple controller that exports
- debugging information about the cgroups framework.
+ debugging information about the cgroups framework. This
+ controller is for control cgroup debugging only. Its
+ interfaces are not stable.
Say N.
@@ -1545,6 +1548,20 @@ config SLOB
endchoice
+config SLAB_MERGE_DEFAULT
+ bool "Allow slab caches to be merged"
+ default y
+ help
+ For reduced kernel memory fragmentation, slab caches can be
+ merged when they share the same size and other characteristics.
+ This carries a risk of kernel heap overflows being able to
+ overwrite objects from merged caches (and more easily control
+ cache layout), which makes such heap attacks easier to exploit
+ by attackers. By keeping caches unmerged, these kinds of exploits
+ can usually only damage objects in the same cache. To disable
+ merging at runtime, "slab_nomerge" can be passed on the kernel
+ command line.
+
config SLAB_FREELIST_RANDOM
default n
depends on SLAB || SLUB
diff --git a/ipc/Makefile b/ipc/Makefile
index 86c7300ecdf5..9c200e544434 100644
--- a/ipc/Makefile
+++ b/ipc/Makefile
@@ -5,8 +5,7 @@
obj-$(CONFIG_SYSVIPC_COMPAT) += compat.o
obj-$(CONFIG_SYSVIPC) += util.o msgutil.o msg.o sem.o shm.o syscall.o
obj-$(CONFIG_SYSVIPC_SYSCTL) += ipc_sysctl.o
-obj_mq-$(CONFIG_COMPAT) += compat_mq.o
-obj-$(CONFIG_POSIX_MQUEUE) += mqueue.o msgutil.o $(obj_mq-y)
+obj-$(CONFIG_POSIX_MQUEUE) += mqueue.o msgutil.o
obj-$(CONFIG_IPC_NS) += namespace.o
obj-$(CONFIG_POSIX_MQUEUE_SYSCTL) += mq_sysctl.o
diff --git a/ipc/compat_mq.c b/ipc/compat_mq.c
deleted file mode 100644
index ef6f91cc4490..000000000000
--- a/ipc/compat_mq.c
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * ipc/compat_mq.c
- * 32 bit emulation for POSIX message queue system calls
- *
- * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author: Arnd Bergmann <[email protected]>
- */
-
-#include <linux/compat.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/mqueue.h>
-#include <linux/syscalls.h>
-
-#include <linux/uaccess.h>
-
-struct compat_mq_attr {
- compat_long_t mq_flags; /* message queue flags */
- compat_long_t mq_maxmsg; /* maximum number of messages */
- compat_long_t mq_msgsize; /* maximum message size */
- compat_long_t mq_curmsgs; /* number of messages currently queued */
- compat_long_t __reserved[4]; /* ignored for input, zeroed for output */
-};
-
-static inline int get_compat_mq_attr(struct mq_attr *attr,
- const struct compat_mq_attr __user *uattr)
-{
- if (!access_ok(VERIFY_READ, uattr, sizeof *uattr))
- return -EFAULT;
-
- return __get_user(attr->mq_flags, &uattr->mq_flags)
- | __get_user(attr->mq_maxmsg, &uattr->mq_maxmsg)
- | __get_user(attr->mq_msgsize, &uattr->mq_msgsize)
- | __get_user(attr->mq_curmsgs, &uattr->mq_curmsgs);
-}
-
-static inline int put_compat_mq_attr(const struct mq_attr *attr,
- struct compat_mq_attr __user *uattr)
-{
- if (clear_user(uattr, sizeof *uattr))
- return -EFAULT;
-
- return __put_user(attr->mq_flags, &uattr->mq_flags)
- | __put_user(attr->mq_maxmsg, &uattr->mq_maxmsg)
- | __put_user(attr->mq_msgsize, &uattr->mq_msgsize)
- | __put_user(attr->mq_curmsgs, &uattr->mq_curmsgs);
-}
-
-COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name,
- int, oflag, compat_mode_t, mode,
- struct compat_mq_attr __user *, u_attr)
-{
- void __user *p = NULL;
- if (u_attr && oflag & O_CREAT) {
- struct mq_attr attr;
-
- memset(&attr, 0, sizeof(attr));
-
- p = compat_alloc_user_space(sizeof(attr));
- if (get_compat_mq_attr(&attr, u_attr) ||
- copy_to_user(p, &attr, sizeof(attr)))
- return -EFAULT;
- }
- return sys_mq_open(u_name, oflag, mode, p);
-}
-
-COMPAT_SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes,
- const char __user *, u_msg_ptr,
- compat_size_t, msg_len, unsigned int, msg_prio,
- const struct compat_timespec __user *, u_abs_timeout)
-{
- struct timespec __user *u_ts;
-
- if (compat_convert_timespec(&u_ts, u_abs_timeout))
- return -EFAULT;
-
- return sys_mq_timedsend(mqdes, u_msg_ptr, msg_len,
- msg_prio, u_ts);
-}
-
-COMPAT_SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes,
- char __user *, u_msg_ptr,
- compat_size_t, msg_len, unsigned int __user *, u_msg_prio,
- const struct compat_timespec __user *, u_abs_timeout)
-{
- struct timespec __user *u_ts;
-
- if (compat_convert_timespec(&u_ts, u_abs_timeout))
- return -EFAULT;
-
- return sys_mq_timedreceive(mqdes, u_msg_ptr, msg_len,
- u_msg_prio, u_ts);
-}
-
-COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
- const struct compat_sigevent __user *, u_notification)
-{
- struct sigevent __user *p = NULL;
- if (u_notification) {
- struct sigevent n;
- p = compat_alloc_user_space(sizeof(*p));
- if (get_compat_sigevent(&n, u_notification))
- return -EFAULT;
- if (n.sigev_notify == SIGEV_THREAD)
- n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int);
- if (copy_to_user(p, &n, sizeof(*p)))
- return -EFAULT;
- }
- return sys_mq_notify(mqdes, p);
-}
-
-COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
- const struct compat_mq_attr __user *, u_mqstat,
- struct compat_mq_attr __user *, u_omqstat)
-{
- struct mq_attr mqstat;
- struct mq_attr __user *p = compat_alloc_user_space(2 * sizeof(*p));
- long ret;
-
- memset(&mqstat, 0, sizeof(mqstat));
-
- if (u_mqstat) {
- if (get_compat_mq_attr(&mqstat, u_mqstat) ||
- copy_to_user(p, &mqstat, sizeof(mqstat)))
- return -EFAULT;
- }
- ret = sys_mq_getsetattr(mqdes,
- u_mqstat ? p : NULL,
- u_omqstat ? p + 1 : NULL);
- if (ret)
- return ret;
- if (u_omqstat) {
- if (copy_from_user(&mqstat, p + 1, sizeof(mqstat)) ||
- put_compat_mq_attr(&mqstat, u_omqstat))
- return -EFAULT;
- }
- return 0;
-}
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index e8d41ff57241..c9ff943f19ab 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -668,14 +668,12 @@ static void __do_notify(struct mqueue_inode_info *info)
}
static int prepare_timeout(const struct timespec __user *u_abs_timeout,
- ktime_t *expires, struct timespec *ts)
+ struct timespec *ts)
{
if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec)))
return -EFAULT;
if (!timespec_valid(ts))
return -EINVAL;
-
- *expires = timespec_to_ktime(*ts);
return 0;
}
@@ -770,23 +768,19 @@ static struct file *do_open(struct path *path, int oflag)
return dentry_open(path, oflag, current_cred());
}
-SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
- struct mq_attr __user *, u_attr)
+static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
+ struct mq_attr *attr)
{
struct path path;
struct file *filp;
struct filename *name;
- struct mq_attr attr;
int fd, error;
struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
struct vfsmount *mnt = ipc_ns->mq_mnt;
struct dentry *root = mnt->mnt_root;
int ro;
- if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
- return -EFAULT;
-
- audit_mq_open(oflag, mode, u_attr ? &attr : NULL);
+ audit_mq_open(oflag, mode, attr);
if (IS_ERR(name = getname(u_name)))
return PTR_ERR(name);
@@ -819,9 +813,8 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
goto out;
}
audit_inode_parent_hidden(name, root);
- filp = do_create(ipc_ns, d_inode(root),
- &path, oflag, mode,
- u_attr ? &attr : NULL);
+ filp = do_create(ipc_ns, d_inode(root), &path,
+ oflag, mode, attr);
}
} else {
if (d_really_is_negative(path.dentry)) {
@@ -851,6 +844,16 @@ out_putname:
return fd;
}
+SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
+ struct mq_attr __user *, u_attr)
+{
+ struct mq_attr attr;
+ if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
+ return -EFAULT;
+
+ return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL);
+}
+
SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
{
int err;
@@ -957,9 +960,9 @@ static inline void pipelined_receive(struct wake_q_head *wake_q,
sender->state = STATE_READY;
}
-SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
- size_t, msg_len, unsigned int, msg_prio,
- const struct timespec __user *, u_abs_timeout)
+static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
+ size_t msg_len, unsigned int msg_prio,
+ struct timespec *ts)
{
struct fd f;
struct inode *inode;
@@ -968,22 +971,19 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
struct msg_msg *msg_ptr;
struct mqueue_inode_info *info;
ktime_t expires, *timeout = NULL;
- struct timespec ts;
struct posix_msg_tree_node *new_leaf = NULL;
int ret = 0;
DEFINE_WAKE_Q(wake_q);
- if (u_abs_timeout) {
- int res = prepare_timeout(u_abs_timeout, &expires, &ts);
- if (res)
- return res;
- timeout = &expires;
- }
-
if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
return -EINVAL;
- audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL);
+ if (ts) {
+ expires = timespec_to_ktime(*ts);
+ timeout = &expires;
+ }
+
+ audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts);
f = fdget(mqdes);
if (unlikely(!f.file)) {
@@ -1078,9 +1078,9 @@ out:
return ret;
}
-SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
- size_t, msg_len, unsigned int __user *, u_msg_prio,
- const struct timespec __user *, u_abs_timeout)
+static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
+ size_t msg_len, unsigned int __user *u_msg_prio,
+ struct timespec *ts)
{
ssize_t ret;
struct msg_msg *msg_ptr;
@@ -1089,17 +1089,14 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
struct mqueue_inode_info *info;
struct ext_wait_queue wait;
ktime_t expires, *timeout = NULL;
- struct timespec ts;
struct posix_msg_tree_node *new_leaf = NULL;
- if (u_abs_timeout) {
- int res = prepare_timeout(u_abs_timeout, &expires, &ts);
- if (res)
- return res;
+ if (ts) {
+ expires = timespec_to_ktime(*ts);
timeout = &expires;
}
- audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL);
+ audit_mq_sendrecv(mqdes, msg_len, 0, ts);
f = fdget(mqdes);
if (unlikely(!f.file)) {
@@ -1183,42 +1180,62 @@ out:
return ret;
}
+SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
+ size_t, msg_len, unsigned int, msg_prio,
+ const struct timespec __user *, u_abs_timeout)
+{
+ struct timespec ts, *p = NULL;
+ if (u_abs_timeout) {
+ int res = prepare_timeout(u_abs_timeout, &ts);
+ if (res)
+ return res;
+ p = &ts;
+ }
+ return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
+}
+
+SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
+ size_t, msg_len, unsigned int __user *, u_msg_prio,
+ const struct timespec __user *, u_abs_timeout)
+{
+ struct timespec ts, *p = NULL;
+ if (u_abs_timeout) {
+ int res = prepare_timeout(u_abs_timeout, &ts);
+ if (res)
+ return res;
+ p = &ts;
+ }
+ return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
+}
+
/*
* Notes: the case when user wants us to deregister (with NULL as pointer)
* and he isn't currently owner of notification, will be silently discarded.
* It isn't explicitly defined in the POSIX.
*/
-SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
- const struct sigevent __user *, u_notification)
+static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification)
{
int ret;
struct fd f;
struct sock *sock;
struct inode *inode;
- struct sigevent notification;
struct mqueue_inode_info *info;
struct sk_buff *nc;
- if (u_notification) {
- if (copy_from_user(&notification, u_notification,
- sizeof(struct sigevent)))
- return -EFAULT;
- }
-
- audit_mq_notify(mqdes, u_notification ? &notification : NULL);
+ audit_mq_notify(mqdes, notification);
nc = NULL;
sock = NULL;
- if (u_notification != NULL) {
- if (unlikely(notification.sigev_notify != SIGEV_NONE &&
- notification.sigev_notify != SIGEV_SIGNAL &&
- notification.sigev_notify != SIGEV_THREAD))
+ if (notification != NULL) {
+ if (unlikely(notification->sigev_notify != SIGEV_NONE &&
+ notification->sigev_notify != SIGEV_SIGNAL &&
+ notification->sigev_notify != SIGEV_THREAD))
return -EINVAL;
- if (notification.sigev_notify == SIGEV_SIGNAL &&
- !valid_signal(notification.sigev_signo)) {
+ if (notification->sigev_notify == SIGEV_SIGNAL &&
+ !valid_signal(notification->sigev_signo)) {
return -EINVAL;
}
- if (notification.sigev_notify == SIGEV_THREAD) {
+ if (notification->sigev_notify == SIGEV_THREAD) {
long timeo;
/* create the notify skb */
@@ -1228,7 +1245,7 @@ SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
goto out;
}
if (copy_from_user(nc->data,
- notification.sigev_value.sival_ptr,
+ notification->sigev_value.sival_ptr,
NOTIFY_COOKIE_LEN)) {
ret = -EFAULT;
goto out;
@@ -1238,7 +1255,7 @@ SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
skb_put(nc, NOTIFY_COOKIE_LEN);
/* and attach it to the socket */
retry:
- f = fdget(notification.sigev_signo);
+ f = fdget(notification->sigev_signo);
if (!f.file) {
ret = -EBADF;
goto out;
@@ -1278,7 +1295,7 @@ retry:
ret = 0;
spin_lock(&info->lock);
- if (u_notification == NULL) {
+ if (notification == NULL) {
if (info->notify_owner == task_tgid(current)) {
remove_notification(info);
inode->i_atime = inode->i_ctime = current_time(inode);
@@ -1286,7 +1303,7 @@ retry:
} else if (info->notify_owner != NULL) {
ret = -EBUSY;
} else {
- switch (notification.sigev_notify) {
+ switch (notification->sigev_notify) {
case SIGEV_NONE:
info->notify.sigev_notify = SIGEV_NONE;
break;
@@ -1298,8 +1315,8 @@ retry:
info->notify.sigev_notify = SIGEV_THREAD;
break;
case SIGEV_SIGNAL:
- info->notify.sigev_signo = notification.sigev_signo;
- info->notify.sigev_value = notification.sigev_value;
+ info->notify.sigev_signo = notification->sigev_signo;
+ info->notify.sigev_value = notification->sigev_value;
info->notify.sigev_notify = SIGEV_SIGNAL;
break;
}
@@ -1320,44 +1337,49 @@ out:
return ret;
}
-SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
- const struct mq_attr __user *, u_mqstat,
- struct mq_attr __user *, u_omqstat)
+SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
+ const struct sigevent __user *, u_notification)
+{
+ struct sigevent n, *p = NULL;
+ if (u_notification) {
+ if (copy_from_user(&n, u_notification, sizeof(struct sigevent)))
+ return -EFAULT;
+ p = &n;
+ }
+ return do_mq_notify(mqdes, p);
+}
+
+static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old)
{
- int ret;
- struct mq_attr mqstat, omqstat;
struct fd f;
struct inode *inode;
struct mqueue_inode_info *info;
- if (u_mqstat != NULL) {
- if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr)))
- return -EFAULT;
- if (mqstat.mq_flags & (~O_NONBLOCK))
- return -EINVAL;
- }
+ if (new && (new->mq_flags & (~O_NONBLOCK)))
+ return -EINVAL;
f = fdget(mqdes);
- if (!f.file) {
- ret = -EBADF;
- goto out;
- }
+ if (!f.file)
+ return -EBADF;
- inode = file_inode(f.file);
if (unlikely(f.file->f_op != &mqueue_file_operations)) {
- ret = -EBADF;
- goto out_fput;
+ fdput(f);
+ return -EBADF;
}
+
+ inode = file_inode(f.file);
info = MQUEUE_I(inode);
spin_lock(&info->lock);
- omqstat = info->attr;
- omqstat.mq_flags = f.file->f_flags & O_NONBLOCK;
- if (u_mqstat) {
- audit_mq_getsetattr(mqdes, &mqstat);
+ if (old) {
+ *old = info->attr;
+ old->mq_flags = f.file->f_flags & O_NONBLOCK;
+ }
+ if (new) {
+ audit_mq_getsetattr(mqdes, new);
spin_lock(&f.file->f_lock);
- if (mqstat.mq_flags & O_NONBLOCK)
+ if (new->mq_flags & O_NONBLOCK)
f.file->f_flags |= O_NONBLOCK;
else
f.file->f_flags &= ~O_NONBLOCK;
@@ -1367,17 +1389,168 @@ SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
}
spin_unlock(&info->lock);
+ fdput(f);
+ return 0;
+}
- ret = 0;
- if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat,
- sizeof(struct mq_attr)))
- ret = -EFAULT;
+SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
+ const struct mq_attr __user *, u_mqstat,
+ struct mq_attr __user *, u_omqstat)
+{
+ int ret;
+ struct mq_attr mqstat, omqstat;
+ struct mq_attr *new = NULL, *old = NULL;
-out_fput:
- fdput(f);
-out:
- return ret;
+ if (u_mqstat) {
+ new = &mqstat;
+ if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr)))
+ return -EFAULT;
+ }
+ if (u_omqstat)
+ old = &omqstat;
+
+ ret = do_mq_getsetattr(mqdes, new, old);
+ if (ret || !old)
+ return ret;
+
+ if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr)))
+ return -EFAULT;
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+
+struct compat_mq_attr {
+ compat_long_t mq_flags; /* message queue flags */
+ compat_long_t mq_maxmsg; /* maximum number of messages */
+ compat_long_t mq_msgsize; /* maximum message size */
+ compat_long_t mq_curmsgs; /* number of messages currently queued */
+ compat_long_t __reserved[4]; /* ignored for input, zeroed for output */
+};
+
+static inline int get_compat_mq_attr(struct mq_attr *attr,
+ const struct compat_mq_attr __user *uattr)
+{
+ struct compat_mq_attr v;
+
+ if (copy_from_user(&v, uattr, sizeof(*uattr)))
+ return -EFAULT;
+
+ memset(attr, 0, sizeof(*attr));
+ attr->mq_flags = v.mq_flags;
+ attr->mq_maxmsg = v.mq_maxmsg;
+ attr->mq_msgsize = v.mq_msgsize;
+ attr->mq_curmsgs = v.mq_curmsgs;
+ return 0;
+}
+
+static inline int put_compat_mq_attr(const struct mq_attr *attr,
+ struct compat_mq_attr __user *uattr)
+{
+ struct compat_mq_attr v;
+
+ memset(&v, 0, sizeof(v));
+ v.mq_flags = attr->mq_flags;
+ v.mq_maxmsg = attr->mq_maxmsg;
+ v.mq_msgsize = attr->mq_msgsize;
+ v.mq_curmsgs = attr->mq_curmsgs;
+ if (copy_to_user(uattr, &v, sizeof(*uattr)))
+ return -EFAULT;
+ return 0;
+}
+
+COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name,
+ int, oflag, compat_mode_t, mode,
+ struct compat_mq_attr __user *, u_attr)
+{
+ struct mq_attr attr, *p = NULL;
+ if (u_attr && oflag & O_CREAT) {
+ p = &attr;
+ if (get_compat_mq_attr(&attr, u_attr))
+ return -EFAULT;
+ }
+ return do_mq_open(u_name, oflag, mode, p);
+}
+
+static int compat_prepare_timeout(const struct compat_timespec __user *p,
+ struct timespec *ts)
+{
+ if (compat_get_timespec(ts, p))
+ return -EFAULT;
+ if (!timespec_valid(ts))
+ return -EINVAL;
+ return 0;
+}
+
+COMPAT_SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes,
+ const char __user *, u_msg_ptr,
+ compat_size_t, msg_len, unsigned int, msg_prio,
+ const struct compat_timespec __user *, u_abs_timeout)
+{
+ struct timespec ts, *p = NULL;
+ if (u_abs_timeout) {
+ int res = compat_prepare_timeout(u_abs_timeout, &ts);
+ if (res)
+ return res;
+ p = &ts;
+ }
+ return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
+}
+
+COMPAT_SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes,
+ char __user *, u_msg_ptr,
+ compat_size_t, msg_len, unsigned int __user *, u_msg_prio,
+ const struct compat_timespec __user *, u_abs_timeout)
+{
+ struct timespec ts, *p = NULL;
+ if (u_abs_timeout) {
+ int res = compat_prepare_timeout(u_abs_timeout, &ts);
+ if (res)
+ return res;
+ p = &ts;
+ }
+ return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
+}
+
+COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
+ const struct compat_sigevent __user *, u_notification)
+{
+ struct sigevent n, *p = NULL;
+ if (u_notification) {
+ if (get_compat_sigevent(&n, u_notification))
+ return -EFAULT;
+ if (n.sigev_notify == SIGEV_THREAD)
+ n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int);
+ p = &n;
+ }
+ return do_mq_notify(mqdes, p);
+}
+
+COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
+ const struct compat_mq_attr __user *, u_mqstat,
+ struct compat_mq_attr __user *, u_omqstat)
+{
+ int ret;
+ struct mq_attr mqstat, omqstat;
+ struct mq_attr *new = NULL, *old = NULL;
+
+ if (u_mqstat) {
+ new = &mqstat;
+ if (get_compat_mq_attr(new, u_mqstat))
+ return -EFAULT;
+ }
+ if (u_omqstat)
+ old = &omqstat;
+
+ ret = do_mq_getsetattr(mqdes, new, old);
+ if (ret || !old)
+ return ret;
+
+ if (put_compat_mq_attr(old, u_omqstat))
+ return -EFAULT;
+ return 0;
}
+#endif
static const struct inode_operations mqueue_dir_inode_operations = {
.lookup = simple_lookup,
diff --git a/ipc/shm.c b/ipc/shm.c
index 34c4344e8d4b..f45c7959b264 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -452,7 +452,7 @@ static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
if (!sfd->file->f_op->fsync)
return -EINVAL;
- return call_fsync(sfd->file, start, end, datasync);
+ return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
}
static long shm_fallocate(struct file *file, int mode, loff_t offset,
diff --git a/kernel/cgroup/Makefile b/kernel/cgroup/Makefile
index 387348a40c64..ce693ccb8c58 100644
--- a/kernel/cgroup/Makefile
+++ b/kernel/cgroup/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_CGROUP_FREEZER) += freezer.o
obj-$(CONFIG_CGROUP_PIDS) += pids.o
obj-$(CONFIG_CGROUP_RDMA) += rdma.o
obj-$(CONFIG_CPUSETS) += cpuset.o
+obj-$(CONFIG_CGROUP_DEBUG) += debug.o
diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
index 00f4d6bf048f..793565c05742 100644
--- a/kernel/cgroup/cgroup-internal.h
+++ b/kernel/cgroup/cgroup-internal.h
@@ -192,6 +192,8 @@ int cgroup_rmdir(struct kernfs_node *kn);
int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
struct kernfs_root *kf_root);
+int cgroup_task_count(const struct cgroup *cgrp);
+
/*
* namespace.c
*/
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 85d75152402d..7bf4b1533f34 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -334,19 +334,15 @@ static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
/**
* cgroup_task_count - count the number of tasks in a cgroup.
* @cgrp: the cgroup in question
- *
- * Return the number of tasks in the cgroup. The returned number can be
- * higher than the actual number of tasks due to css_set references from
- * namespace roots and temporary usages.
*/
-static int cgroup_task_count(const struct cgroup *cgrp)
+int cgroup_task_count(const struct cgroup *cgrp)
{
int count = 0;
struct cgrp_cset_link *link;
spin_lock_irq(&css_set_lock);
list_for_each_entry(link, &cgrp->cset_links, cset_link)
- count += refcount_read(&link->cset->refcount);
+ count += link->cset->nr_tasks;
spin_unlock_irq(&css_set_lock);
return count;
}
@@ -1263,150 +1259,3 @@ static int __init cgroup_no_v1(char *str)
return 1;
}
__setup("cgroup_no_v1=", cgroup_no_v1);
-
-
-#ifdef CONFIG_CGROUP_DEBUG
-static struct cgroup_subsys_state *
-debug_css_alloc(struct cgroup_subsys_state *parent_css)
-{
- struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
-
- if (!css)
- return ERR_PTR(-ENOMEM);
-
- return css;
-}
-
-static void debug_css_free(struct cgroup_subsys_state *css)
-{
- kfree(css);
-}
-
-static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
- struct cftype *cft)
-{
- return cgroup_task_count(css->cgroup);
-}
-
-static u64 current_css_set_read(struct cgroup_subsys_state *css,
- struct cftype *cft)
-{
- return (u64)(unsigned long)current->cgroups;
-}
-
-static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
- struct cftype *cft)
-{
- u64 count;
-
- rcu_read_lock();
- count = refcount_read(&task_css_set(current)->refcount);
- rcu_read_unlock();
- return count;
-}
-
-static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
-{
- struct cgrp_cset_link *link;
- struct css_set *cset;
- char *name_buf;
-
- name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
- if (!name_buf)
- return -ENOMEM;
-
- spin_lock_irq(&css_set_lock);
- rcu_read_lock();
- cset = rcu_dereference(current->cgroups);
- list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
- struct cgroup *c = link->cgrp;
-
- cgroup_name(c, name_buf, NAME_MAX + 1);
- seq_printf(seq, "Root %d group %s\n",
- c->root->hierarchy_id, name_buf);
- }
- rcu_read_unlock();
- spin_unlock_irq(&css_set_lock);
- kfree(name_buf);
- return 0;
-}
-
-#define MAX_TASKS_SHOWN_PER_CSS 25
-static int cgroup_css_links_read(struct seq_file *seq, void *v)
-{
- struct cgroup_subsys_state *css = seq_css(seq);
- struct cgrp_cset_link *link;
-
- spin_lock_irq(&css_set_lock);
- list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
- struct css_set *cset = link->cset;
- struct task_struct *task;
- int count = 0;
-
- seq_printf(seq, "css_set %pK\n", cset);
-
- list_for_each_entry(task, &cset->tasks, cg_list) {
- if (count++ > MAX_TASKS_SHOWN_PER_CSS)
- goto overflow;
- seq_printf(seq, " task %d\n", task_pid_vnr(task));
- }
-
- list_for_each_entry(task, &cset->mg_tasks, cg_list) {
- if (count++ > MAX_TASKS_SHOWN_PER_CSS)
- goto overflow;
- seq_printf(seq, " task %d\n", task_pid_vnr(task));
- }
- continue;
- overflow:
- seq_puts(seq, " ...\n");
- }
- spin_unlock_irq(&css_set_lock);
- return 0;
-}
-
-static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
-{
- return (!cgroup_is_populated(css->cgroup) &&
- !css_has_online_children(&css->cgroup->self));
-}
-
-static struct cftype debug_files[] = {
- {
- .name = "taskcount",
- .read_u64 = debug_taskcount_read,
- },
-
- {
- .name = "current_css_set",
- .read_u64 = current_css_set_read,
- },
-
- {
- .name = "current_css_set_refcount",
- .read_u64 = current_css_set_refcount_read,
- },
-
- {
- .name = "current_css_set_cg_links",
- .seq_show = current_css_set_cg_links_read,
- },
-
- {
- .name = "cgroup_css_links",
- .seq_show = cgroup_css_links_read,
- },
-
- {
- .name = "releasable",
- .read_u64 = releasable_read,
- },
-
- { } /* terminate */
-};
-
-struct cgroup_subsys debug_cgrp_subsys = {
- .css_alloc = debug_css_alloc,
- .css_free = debug_css_free,
- .legacy_cftypes = debug_files,
-};
-#endif /* CONFIG_CGROUP_DEBUG */
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 8d4e85eae42c..620794a20a33 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -573,6 +573,11 @@ static int css_set_count = 1; /* 1 for init_css_set */
/**
* css_set_populated - does a css_set contain any tasks?
* @cset: target css_set
+ *
+ * css_set_populated() should be the same as !!cset->nr_tasks at steady
+ * state. However, css_set_populated() can be called while a task is being
+ * added to or removed from the linked list before the nr_tasks is
+ * properly updated. Hence, we can't just look at ->nr_tasks here.
*/
static bool css_set_populated(struct css_set *cset)
{
@@ -1542,10 +1547,56 @@ int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
return len;
}
+static int parse_cgroup_root_flags(char *data, unsigned int *root_flags)
+{
+ char *token;
+
+ *root_flags = 0;
+
+ if (!data)
+ return 0;
+
+ while ((token = strsep(&data, ",")) != NULL) {
+ if (!strcmp(token, "nsdelegate")) {
+ *root_flags |= CGRP_ROOT_NS_DELEGATE;
+ continue;
+ }
+
+ pr_err("cgroup2: unknown option \"%s\"\n", token);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void apply_cgroup_root_flags(unsigned int root_flags)
+{
+ if (current->nsproxy->cgroup_ns == &init_cgroup_ns) {
+ if (root_flags & CGRP_ROOT_NS_DELEGATE)
+ cgrp_dfl_root.flags |= CGRP_ROOT_NS_DELEGATE;
+ else
+ cgrp_dfl_root.flags &= ~CGRP_ROOT_NS_DELEGATE;
+ }
+}
+
+static int cgroup_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
+{
+ if (cgrp_dfl_root.flags & CGRP_ROOT_NS_DELEGATE)
+ seq_puts(seq, ",nsdelegate");
+ return 0;
+}
+
static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data)
{
- pr_err("remount is not allowed\n");
- return -EINVAL;
+ unsigned int root_flags;
+ int ret;
+
+ ret = parse_cgroup_root_flags(data, &root_flags);
+ if (ret)
+ return ret;
+
+ apply_cgroup_root_flags(root_flags);
+ return 0;
}
/*
@@ -1598,6 +1649,7 @@ static void cgroup_enable_task_cg_lists(void)
css_set_update_populated(cset, true);
list_add_tail(&p->cg_list, &cset->tasks);
get_css_set(cset);
+ cset->nr_tasks++;
}
spin_unlock(&p->sighand->siglock);
} while_each_thread(g, p);
@@ -1784,6 +1836,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
{
struct cgroup_namespace *ns = current->nsproxy->cgroup_ns;
struct dentry *dentry;
+ int ret;
get_cgroup_ns(ns);
@@ -1801,16 +1854,21 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
cgroup_enable_task_cg_lists();
if (fs_type == &cgroup2_fs_type) {
- if (data) {
- pr_err("cgroup2: unknown option \"%s\"\n", (char *)data);
+ unsigned int root_flags;
+
+ ret = parse_cgroup_root_flags(data, &root_flags);
+ if (ret) {
put_cgroup_ns(ns);
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(ret);
}
+
cgrp_dfl_visible = true;
cgroup_get_live(&cgrp_dfl_root.cgrp);
dentry = cgroup_do_mount(&cgroup2_fs_type, flags, &cgrp_dfl_root,
CGROUP2_SUPER_MAGIC, ns);
+ if (!IS_ERR(dentry))
+ apply_cgroup_root_flags(root_flags);
} else {
dentry = cgroup1_mount(&cgroup_fs_type, flags, data,
CGROUP_SUPER_MAGIC, ns);
@@ -2064,8 +2122,10 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx)
struct css_set *to_cset = cset->mg_dst_cset;
get_css_set(to_cset);
+ to_cset->nr_tasks++;
css_set_move_task(task, from_cset, to_cset, true);
put_css_set_locked(from_cset);
+ from_cset->nr_tasks--;
}
}
spin_unlock_irq(&css_set_lock);
@@ -2355,27 +2415,14 @@ static int cgroup_procs_write_permission(struct task_struct *task,
struct cgroup *dst_cgrp,
struct kernfs_open_file *of)
{
- int ret = 0;
-
- if (cgroup_on_dfl(dst_cgrp)) {
- struct super_block *sb = of->file->f_path.dentry->d_sb;
- struct cgroup *cgrp;
- struct inode *inode;
-
- spin_lock_irq(&css_set_lock);
- cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
- spin_unlock_irq(&css_set_lock);
-
- while (!cgroup_is_descendant(dst_cgrp, cgrp))
- cgrp = cgroup_parent(cgrp);
+ struct super_block *sb = of->file->f_path.dentry->d_sb;
+ struct cgroup_namespace *ns = current->nsproxy->cgroup_ns;
+ struct cgroup *root_cgrp = ns->root_cset->dfl_cgrp;
+ struct cgroup *src_cgrp, *com_cgrp;
+ struct inode *inode;
+ int ret;
- ret = -ENOMEM;
- inode = kernfs_get_inode(sb, cgrp->procs_file.kn);
- if (inode) {
- ret = inode_permission(inode, MAY_WRITE);
- iput(inode);
- }
- } else {
+ if (!cgroup_on_dfl(dst_cgrp)) {
const struct cred *cred = current_cred();
const struct cred *tcred = get_task_cred(task);
@@ -2383,14 +2430,47 @@ static int cgroup_procs_write_permission(struct task_struct *task,
* even if we're attaching all tasks in the thread group,
* we only need to check permissions on one of them.
*/
- if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
- !uid_eq(cred->euid, tcred->uid) &&
- !uid_eq(cred->euid, tcred->suid))
+ if (uid_eq(cred->euid, GLOBAL_ROOT_UID) ||
+ uid_eq(cred->euid, tcred->uid) ||
+ uid_eq(cred->euid, tcred->suid))
+ ret = 0;
+ else
ret = -EACCES;
+
put_cred(tcred);
+ return ret;
}
- return ret;
+ /* find the source cgroup */
+ spin_lock_irq(&css_set_lock);
+ src_cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
+ spin_unlock_irq(&css_set_lock);
+
+ /* and the common ancestor */
+ com_cgrp = src_cgrp;
+ while (!cgroup_is_descendant(dst_cgrp, com_cgrp))
+ com_cgrp = cgroup_parent(com_cgrp);
+
+ /* %current should be authorized to migrate to the common ancestor */
+ inode = kernfs_get_inode(sb, com_cgrp->procs_file.kn);
+ if (!inode)
+ return -ENOMEM;
+
+ ret = inode_permission(inode, MAY_WRITE);
+ iput(inode);
+ if (ret)
+ return ret;
+
+ /*
+ * If namespaces are delegation boundaries, %current must be able
+ * to see both source and destination cgroups from its namespace.
+ */
+ if ((cgrp_dfl_root.flags & CGRP_ROOT_NS_DELEGATE) &&
+ (!cgroup_is_descendant(src_cgrp, root_cgrp) ||
+ !cgroup_is_descendant(dst_cgrp, root_cgrp)))
+ return -ENOENT;
+
+ return 0;
}
/*
@@ -2954,11 +3034,23 @@ static void cgroup_file_release(struct kernfs_open_file *of)
static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
size_t nbytes, loff_t off)
{
+ struct cgroup_namespace *ns = current->nsproxy->cgroup_ns;
struct cgroup *cgrp = of->kn->parent->priv;
struct cftype *cft = of->kn->priv;
struct cgroup_subsys_state *css;
int ret;
+ /*
+ * If namespaces are delegation boundaries, disallow writes to
+ * files in an non-init namespace root from inside the namespace
+ * except for the files explicitly marked delegatable -
+ * cgroup.procs and cgroup.subtree_control.
+ */
+ if ((cgrp->root->flags & CGRP_ROOT_NS_DELEGATE) &&
+ !(cft->flags & CFTYPE_NS_DELEGATABLE) &&
+ ns != &init_cgroup_ns && ns->root_cset->dfl_cgrp == cgrp)
+ return -EPERM;
+
if (cft->write)
return cft->write(of, buf, nbytes, off);
@@ -3792,6 +3884,7 @@ static int cgroup_procs_show(struct seq_file *s, void *v)
static struct cftype cgroup_base_files[] = {
{
.name = "cgroup.procs",
+ .flags = CFTYPE_NS_DELEGATABLE,
.file_offset = offsetof(struct cgroup, procs_file),
.release = cgroup_procs_release,
.seq_start = cgroup_procs_start,
@@ -3805,6 +3898,7 @@ static struct cftype cgroup_base_files[] = {
},
{
.name = "cgroup.subtree_control",
+ .flags = CFTYPE_NS_DELEGATABLE,
.seq_show = cgroup_subtree_control_show,
.write = cgroup_subtree_control_write,
},
@@ -4393,6 +4487,7 @@ int cgroup_rmdir(struct kernfs_node *kn)
}
static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
+ .show_options = cgroup_show_options,
.remount_fs = cgroup_remount,
.mkdir = cgroup_mkdir,
.rmdir = cgroup_rmdir,
@@ -4789,6 +4884,7 @@ void cgroup_post_fork(struct task_struct *child)
cset = task_css_set(current);
if (list_empty(&child->cg_list)) {
get_css_set(cset);
+ cset->nr_tasks++;
css_set_move_task(child, NULL, cset, false);
}
spin_unlock_irq(&css_set_lock);
@@ -4838,6 +4934,7 @@ void cgroup_exit(struct task_struct *tsk)
if (!list_empty(&tsk->cg_list)) {
spin_lock_irq(&css_set_lock);
css_set_move_task(tsk, cset, NULL, false);
+ cset->nr_tasks--;
spin_unlock_irq(&css_set_lock);
} else {
get_css_set(cset);
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index ae643412948a..ca8376e5008c 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -1038,40 +1038,25 @@ static void cpuset_post_attach(void)
* @tsk: the task to change
* @newmems: new nodes that the task will be set
*
- * In order to avoid seeing no nodes if the old and new nodes are disjoint,
- * we structure updates as setting all new allowed nodes, then clearing newly
- * disallowed ones.
+ * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
+ * and rebind an eventual tasks' mempolicy. If the task is allocating in
+ * parallel, it might temporarily see an empty intersection, which results in
+ * a seqlock check and retry before OOM or allocation failure.
*/
static void cpuset_change_task_nodemask(struct task_struct *tsk,
nodemask_t *newmems)
{
- bool need_loop;
-
task_lock(tsk);
- /*
- * Determine if a loop is necessary if another thread is doing
- * read_mems_allowed_begin(). If at least one node remains unchanged and
- * tsk does not have a mempolicy, then an empty nodemask will not be
- * possible when mems_allowed is larger than a word.
- */
- need_loop = task_has_mempolicy(tsk) ||
- !nodes_intersects(*newmems, tsk->mems_allowed);
- if (need_loop) {
- local_irq_disable();
- write_seqcount_begin(&tsk->mems_allowed_seq);
- }
+ local_irq_disable();
+ write_seqcount_begin(&tsk->mems_allowed_seq);
nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
- mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
-
- mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
+ mpol_rebind_task(tsk, newmems);
tsk->mems_allowed = *newmems;
- if (need_loop) {
- write_seqcount_end(&tsk->mems_allowed_seq);
- local_irq_enable();
- }
+ write_seqcount_end(&tsk->mems_allowed_seq);
+ local_irq_enable();
task_unlock(tsk);
}
diff --git a/kernel/cgroup/debug.c b/kernel/cgroup/debug.c
new file mode 100644
index 000000000000..dac46af22782
--- /dev/null
+++ b/kernel/cgroup/debug.c
@@ -0,0 +1,357 @@
+/*
+ * Debug controller
+ *
+ * WARNING: This controller is for cgroup core debugging only.
+ * Its interfaces are unstable and subject to changes at any time.
+ */
+#include <linux/ctype.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include "cgroup-internal.h"
+
+static struct cgroup_subsys_state *
+debug_css_alloc(struct cgroup_subsys_state *parent_css)
+{
+ struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
+
+ if (!css)
+ return ERR_PTR(-ENOMEM);
+
+ return css;
+}
+
+static void debug_css_free(struct cgroup_subsys_state *css)
+{
+ kfree(css);
+}
+
+/*
+ * debug_taskcount_read - return the number of tasks in a cgroup.
+ * @cgrp: the cgroup in question
+ */
+static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
+ struct cftype *cft)
+{
+ return cgroup_task_count(css->cgroup);
+}
+
+static int current_css_set_read(struct seq_file *seq, void *v)
+{
+ struct kernfs_open_file *of = seq->private;
+ struct css_set *cset;
+ struct cgroup_subsys *ss;
+ struct cgroup_subsys_state *css;
+ int i, refcnt;
+
+ if (!cgroup_kn_lock_live(of->kn, false))
+ return -ENODEV;
+
+ spin_lock_irq(&css_set_lock);
+ rcu_read_lock();
+ cset = rcu_dereference(current->cgroups);
+ refcnt = refcount_read(&cset->refcount);
+ seq_printf(seq, "css_set %pK %d", cset, refcnt);
+ if (refcnt > cset->nr_tasks)
+ seq_printf(seq, " +%d", refcnt - cset->nr_tasks);
+ seq_puts(seq, "\n");
+
+ /*
+ * Print the css'es stored in the current css_set.
+ */
+ for_each_subsys(ss, i) {
+ css = cset->subsys[ss->id];
+ if (!css)
+ continue;
+ seq_printf(seq, "%2d: %-4s\t- %lx[%d]\n", ss->id, ss->name,
+ (unsigned long)css, css->id);
+ }
+ rcu_read_unlock();
+ spin_unlock_irq(&css_set_lock);
+ cgroup_kn_unlock(of->kn);
+ return 0;
+}
+
+static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
+ struct cftype *cft)
+{
+ u64 count;
+
+ rcu_read_lock();
+ count = refcount_read(&task_css_set(current)->refcount);
+ rcu_read_unlock();
+ return count;
+}
+
+static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
+{
+ struct cgrp_cset_link *link;
+ struct css_set *cset;
+ char *name_buf;
+
+ name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
+ if (!name_buf)
+ return -ENOMEM;
+
+ spin_lock_irq(&css_set_lock);
+ rcu_read_lock();
+ cset = rcu_dereference(current->cgroups);
+ list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
+ struct cgroup *c = link->cgrp;
+
+ cgroup_name(c, name_buf, NAME_MAX + 1);
+ seq_printf(seq, "Root %d group %s\n",
+ c->root->hierarchy_id, name_buf);
+ }
+ rcu_read_unlock();
+ spin_unlock_irq(&css_set_lock);
+ kfree(name_buf);
+ return 0;
+}
+
+#define MAX_TASKS_SHOWN_PER_CSS 25
+static int cgroup_css_links_read(struct seq_file *seq, void *v)
+{
+ struct cgroup_subsys_state *css = seq_css(seq);
+ struct cgrp_cset_link *link;
+ int dead_cnt = 0, extra_refs = 0;
+
+ spin_lock_irq(&css_set_lock);
+ list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
+ struct css_set *cset = link->cset;
+ struct task_struct *task;
+ int count = 0;
+ int refcnt = refcount_read(&cset->refcount);
+
+ seq_printf(seq, " %d", refcnt);
+ if (refcnt - cset->nr_tasks > 0) {
+ int extra = refcnt - cset->nr_tasks;
+
+ seq_printf(seq, " +%d", extra);
+ /*
+ * Take out the one additional reference in
+ * init_css_set.
+ */
+ if (cset == &init_css_set)
+ extra--;
+ extra_refs += extra;
+ }
+ seq_puts(seq, "\n");
+
+ list_for_each_entry(task, &cset->tasks, cg_list) {
+ if (count++ <= MAX_TASKS_SHOWN_PER_CSS)
+ seq_printf(seq, " task %d\n",
+ task_pid_vnr(task));
+ }
+
+ list_for_each_entry(task, &cset->mg_tasks, cg_list) {
+ if (count++ <= MAX_TASKS_SHOWN_PER_CSS)
+ seq_printf(seq, " task %d\n",
+ task_pid_vnr(task));
+ }
+ /* show # of overflowed tasks */
+ if (count > MAX_TASKS_SHOWN_PER_CSS)
+ seq_printf(seq, " ... (%d)\n",
+ count - MAX_TASKS_SHOWN_PER_CSS);
+
+ if (cset->dead) {
+ seq_puts(seq, " [dead]\n");
+ dead_cnt++;
+ }
+
+ WARN_ON(count != cset->nr_tasks);
+ }
+ spin_unlock_irq(&css_set_lock);
+
+ if (!dead_cnt && !extra_refs)
+ return 0;
+
+ seq_puts(seq, "\n");
+ if (extra_refs)
+ seq_printf(seq, "extra references = %d\n", extra_refs);
+ if (dead_cnt)
+ seq_printf(seq, "dead css_sets = %d\n", dead_cnt);
+
+ return 0;
+}
+
+static int cgroup_subsys_states_read(struct seq_file *seq, void *v)
+{
+ struct kernfs_open_file *of = seq->private;
+ struct cgroup *cgrp;
+ struct cgroup_subsys *ss;
+ struct cgroup_subsys_state *css;
+ char pbuf[16];
+ int i;
+
+ cgrp = cgroup_kn_lock_live(of->kn, false);
+ if (!cgrp)
+ return -ENODEV;
+
+ for_each_subsys(ss, i) {
+ css = rcu_dereference_check(cgrp->subsys[ss->id], true);
+ if (!css)
+ continue;
+
+ pbuf[0] = '\0';
+
+ /* Show the parent CSS if applicable*/
+ if (css->parent)
+ snprintf(pbuf, sizeof(pbuf) - 1, " P=%d",
+ css->parent->id);
+ seq_printf(seq, "%2d: %-4s\t- %lx[%d] %d%s\n", ss->id, ss->name,
+ (unsigned long)css, css->id,
+ atomic_read(&css->online_cnt), pbuf);
+ }
+
+ cgroup_kn_unlock(of->kn);
+ return 0;
+}
+
+static void cgroup_masks_read_one(struct seq_file *seq, const char *name,
+ u16 mask)
+{
+ struct cgroup_subsys *ss;
+ int ssid;
+ bool first = true;
+
+ seq_printf(seq, "%-17s: ", name);
+ for_each_subsys(ss, ssid) {
+ if (!(mask & (1 << ssid)))
+ continue;
+ if (!first)
+ seq_puts(seq, ", ");
+ seq_puts(seq, ss->name);
+ first = false;
+ }
+ seq_putc(seq, '\n');
+}
+
+static int cgroup_masks_read(struct seq_file *seq, void *v)
+{
+ struct kernfs_open_file *of = seq->private;
+ struct cgroup *cgrp;
+
+ cgrp = cgroup_kn_lock_live(of->kn, false);
+ if (!cgrp)
+ return -ENODEV;
+
+ cgroup_masks_read_one(seq, "subtree_control", cgrp->subtree_control);
+ cgroup_masks_read_one(seq, "subtree_ss_mask", cgrp->subtree_ss_mask);
+
+ cgroup_kn_unlock(of->kn);
+ return 0;
+}
+
+static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
+{
+ return (!cgroup_is_populated(css->cgroup) &&
+ !css_has_online_children(&css->cgroup->self));
+}
+
+static struct cftype debug_legacy_files[] = {
+ {
+ .name = "taskcount",
+ .read_u64 = debug_taskcount_read,
+ },
+
+ {
+ .name = "current_css_set",
+ .seq_show = current_css_set_read,
+ .flags = CFTYPE_ONLY_ON_ROOT,
+ },
+
+ {
+ .name = "current_css_set_refcount",
+ .read_u64 = current_css_set_refcount_read,
+ .flags = CFTYPE_ONLY_ON_ROOT,
+ },
+
+ {
+ .name = "current_css_set_cg_links",
+ .seq_show = current_css_set_cg_links_read,
+ .flags = CFTYPE_ONLY_ON_ROOT,
+ },
+
+ {
+ .name = "cgroup_css_links",
+ .seq_show = cgroup_css_links_read,
+ },
+
+ {
+ .name = "cgroup_subsys_states",
+ .seq_show = cgroup_subsys_states_read,
+ },
+
+ {
+ .name = "cgroup_masks",
+ .seq_show = cgroup_masks_read,
+ },
+
+ {
+ .name = "releasable",
+ .read_u64 = releasable_read,
+ },
+
+ { } /* terminate */
+};
+
+static struct cftype debug_files[] = {
+ {
+ .name = "taskcount",
+ .read_u64 = debug_taskcount_read,
+ },
+
+ {
+ .name = "current_css_set",
+ .seq_show = current_css_set_read,
+ .flags = CFTYPE_ONLY_ON_ROOT,
+ },
+
+ {
+ .name = "current_css_set_refcount",
+ .read_u64 = current_css_set_refcount_read,
+ .flags = CFTYPE_ONLY_ON_ROOT,
+ },
+
+ {
+ .name = "current_css_set_cg_links",
+ .seq_show = current_css_set_cg_links_read,
+ .flags = CFTYPE_ONLY_ON_ROOT,
+ },
+
+ {
+ .name = "css_links",
+ .seq_show = cgroup_css_links_read,
+ },
+
+ {
+ .name = "csses",
+ .seq_show = cgroup_subsys_states_read,
+ },
+
+ {
+ .name = "masks",
+ .seq_show = cgroup_masks_read,
+ },
+
+ { } /* terminate */
+};
+
+struct cgroup_subsys debug_cgrp_subsys = {
+ .css_alloc = debug_css_alloc,
+ .css_free = debug_css_free,
+ .legacy_cftypes = debug_legacy_files,
+};
+
+/*
+ * On v2, debug is an implicit controller enabled by "cgroup_debug" boot
+ * parameter.
+ */
+static int __init enable_cgroup_debug(char *str)
+{
+ debug_cgrp_subsys.dfl_cftypes = debug_files;
+ debug_cgrp_subsys.implicit_on_dfl = true;
+ return 1;
+}
+__setup("cgroup_debug", enable_cgroup_debug);
diff --git a/kernel/compat.c b/kernel/compat.c
index 0621c8e1ab72..6f0a0e723a06 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -247,53 +247,6 @@ int put_compat_itimerval(struct compat_itimerval __user *o, const struct itimerv
return copy_to_user(o, &v32, sizeof(struct compat_itimerval)) ? -EFAULT : 0;
}
-static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
-{
- return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
-}
-
-COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf)
-{
- if (tbuf) {
- struct tms tms;
- struct compat_tms tmp;
-
- do_sys_times(&tms);
- /* Convert our struct tms to the compat version. */
- tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime);
- tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime);
- tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime);
- tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime);
- if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
- return -EFAULT;
- }
- force_successful_syscall_return();
- return compat_jiffies_to_clock_t(jiffies);
-}
-
-#ifdef __ARCH_WANT_SYS_SIGPENDING
-
-/*
- * Assumption: old_sigset_t and compat_old_sigset_t are both
- * types that can be passed to put_user()/get_user().
- */
-
-COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
-{
- old_sigset_t s;
- long ret;
- mm_segment_t old_fs = get_fs();
-
- set_fs(KERNEL_DS);
- ret = sys_sigpending((old_sigset_t __user *) &s);
- set_fs(old_fs);
- if (ret == 0)
- ret = put_user(s, set);
- return ret;
-}
-
-#endif
-
#ifdef __ARCH_WANT_SYS_SIGPROCMASK
/*
@@ -348,94 +301,29 @@ COMPAT_SYSCALL_DEFINE3(sigprocmask, int, how,
#endif
-COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource,
- struct compat_rlimit __user *, rlim)
-{
- struct rlimit r;
-
- if (!access_ok(VERIFY_READ, rlim, sizeof(*rlim)) ||
- __get_user(r.rlim_cur, &rlim->rlim_cur) ||
- __get_user(r.rlim_max, &rlim->rlim_max))
- return -EFAULT;
-
- if (r.rlim_cur == COMPAT_RLIM_INFINITY)
- r.rlim_cur = RLIM_INFINITY;
- if (r.rlim_max == COMPAT_RLIM_INFINITY)
- r.rlim_max = RLIM_INFINITY;
- return do_prlimit(current, resource, &r, NULL);
-}
-
-#ifdef COMPAT_RLIM_OLD_INFINITY
-
-COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
- struct compat_rlimit __user *, rlim)
-{
- struct rlimit r;
- int ret;
- mm_segment_t old_fs = get_fs();
-
- set_fs(KERNEL_DS);
- ret = sys_old_getrlimit(resource, (struct rlimit __user *)&r);
- set_fs(old_fs);
-
- if (!ret) {
- if (r.rlim_cur > COMPAT_RLIM_OLD_INFINITY)
- r.rlim_cur = COMPAT_RLIM_INFINITY;
- if (r.rlim_max > COMPAT_RLIM_OLD_INFINITY)
- r.rlim_max = COMPAT_RLIM_INFINITY;
-
- if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) ||
- __put_user(r.rlim_cur, &rlim->rlim_cur) ||
- __put_user(r.rlim_max, &rlim->rlim_max))
- return -EFAULT;
- }
- return ret;
-}
-
-#endif
-
-COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource,
- struct compat_rlimit __user *, rlim)
-{
- struct rlimit r;
- int ret;
-
- ret = do_prlimit(current, resource, NULL, &r);
- if (!ret) {
- if (r.rlim_cur > COMPAT_RLIM_INFINITY)
- r.rlim_cur = COMPAT_RLIM_INFINITY;
- if (r.rlim_max > COMPAT_RLIM_INFINITY)
- r.rlim_max = COMPAT_RLIM_INFINITY;
-
- if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) ||
- __put_user(r.rlim_cur, &rlim->rlim_cur) ||
- __put_user(r.rlim_max, &rlim->rlim_max))
- return -EFAULT;
- }
- return ret;
-}
-
int put_compat_rusage(const struct rusage *r, struct compat_rusage __user *ru)
{
- if (!access_ok(VERIFY_WRITE, ru, sizeof(*ru)) ||
- __put_user(r->ru_utime.tv_sec, &ru->ru_utime.tv_sec) ||
- __put_user(r->ru_utime.tv_usec, &ru->ru_utime.tv_usec) ||
- __put_user(r->ru_stime.tv_sec, &ru->ru_stime.tv_sec) ||
- __put_user(r->ru_stime.tv_usec, &ru->ru_stime.tv_usec) ||
- __put_user(r->ru_maxrss, &ru->ru_maxrss) ||
- __put_user(r->ru_ixrss, &ru->ru_ixrss) ||
- __put_user(r->ru_idrss, &ru->ru_idrss) ||
- __put_user(r->ru_isrss, &ru->ru_isrss) ||
- __put_user(r->ru_minflt, &ru->ru_minflt) ||
- __put_user(r->ru_majflt, &ru->ru_majflt) ||
- __put_user(r->ru_nswap, &ru->ru_nswap) ||
- __put_user(r->ru_inblock, &ru->ru_inblock) ||
- __put_user(r->ru_oublock, &ru->ru_oublock) ||
- __put_user(r->ru_msgsnd, &ru->ru_msgsnd) ||
- __put_user(r->ru_msgrcv, &ru->ru_msgrcv) ||
- __put_user(r->ru_nsignals, &ru->ru_nsignals) ||
- __put_user(r->ru_nvcsw, &ru->ru_nvcsw) ||
- __put_user(r->ru_nivcsw, &ru->ru_nivcsw))
+ struct compat_rusage r32;
+ memset(&r32, 0, sizeof(r32));
+ r32.ru_utime.tv_sec = r->ru_utime.tv_sec;
+ r32.ru_utime.tv_usec = r->ru_utime.tv_usec;
+ r32.ru_stime.tv_sec = r->ru_stime.tv_sec;
+ r32.ru_stime.tv_usec = r->ru_stime.tv_usec;
+ r32.ru_maxrss = r->ru_maxrss;
+ r32.ru_ixrss = r->ru_ixrss;
+ r32.ru_idrss = r->ru_idrss;
+ r32.ru_isrss = r->ru_isrss;
+ r32.ru_minflt = r->ru_minflt;
+ r32.ru_majflt = r->ru_majflt;
+ r32.ru_nswap = r->ru_nswap;
+ r32.ru_inblock = r->ru_inblock;
+ r32.ru_oublock = r->ru_oublock;
+ r32.ru_msgsnd = r->ru_msgsnd;
+ r32.ru_msgrcv = r->ru_msgrcv;
+ r32.ru_nsignals = r->ru_nsignals;
+ r32.ru_nvcsw = r->ru_nvcsw;
+ r32.ru_nivcsw = r->ru_nivcsw;
+ if (copy_to_user(ru, &r32, sizeof(r32)))
return -EFAULT;
return 0;
}
@@ -565,84 +453,59 @@ int get_compat_sigevent(struct sigevent *event,
long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
unsigned long bitmap_size)
{
- int i, j;
- unsigned long m;
- compat_ulong_t um;
unsigned long nr_compat_longs;
/* align bitmap up to nearest compat_long_t boundary */
bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG);
+ nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size);
if (!access_ok(VERIFY_READ, umask, bitmap_size / 8))
return -EFAULT;
- nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size);
-
- for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) {
- m = 0;
-
- for (j = 0; j < sizeof(m)/sizeof(um); j++) {
- /*
- * We dont want to read past the end of the userspace
- * bitmap. We must however ensure the end of the
- * kernel bitmap is zeroed.
- */
- if (nr_compat_longs) {
- nr_compat_longs--;
- if (__get_user(um, umask))
- return -EFAULT;
- } else {
- um = 0;
- }
-
- umask++;
- m |= (long)um << (j * BITS_PER_COMPAT_LONG);
- }
- *mask++ = m;
+ user_access_begin();
+ while (nr_compat_longs > 1) {
+ compat_ulong_t l1, l2;
+ unsafe_get_user(l1, umask++, Efault);
+ unsafe_get_user(l2, umask++, Efault);
+ *mask++ = ((unsigned long)l2 << BITS_PER_COMPAT_LONG) | l1;
+ nr_compat_longs -= 2;
}
-
+ if (nr_compat_longs)
+ unsafe_get_user(*mask, umask++, Efault);
+ user_access_end();
return 0;
+
+Efault:
+ user_access_end();
+ return -EFAULT;
}
long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
unsigned long bitmap_size)
{
- int i, j;
- unsigned long m;
- compat_ulong_t um;
unsigned long nr_compat_longs;
/* align bitmap up to nearest compat_long_t boundary */
bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG);
+ nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size);
if (!access_ok(VERIFY_WRITE, umask, bitmap_size / 8))
return -EFAULT;
- nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size);
-
- for (i = 0; i < BITS_TO_LONGS(bitmap_size); i++) {
- m = *mask++;
-
- for (j = 0; j < sizeof(m)/sizeof(um); j++) {
- um = m;
-
- /*
- * We dont want to write past the end of the userspace
- * bitmap.
- */
- if (nr_compat_longs) {
- nr_compat_longs--;
- if (__put_user(um, umask))
- return -EFAULT;
- }
-
- umask++;
- m >>= 4*sizeof(um);
- m >>= 4*sizeof(um);
- }
+ user_access_begin();
+ while (nr_compat_longs > 1) {
+ unsigned long m = *mask++;
+ unsafe_put_user((compat_ulong_t)m, umask++, Efault);
+ unsafe_put_user(m >> BITS_PER_COMPAT_LONG, umask++, Efault);
+ nr_compat_longs -= 2;
}
-
+ if (nr_compat_longs)
+ unsafe_put_user((compat_ulong_t)*mask, umask++, Efault);
+ user_access_end();
return 0;
+Efault:
+ user_access_end();
+ return -EFAULT;
}
void
@@ -668,38 +531,6 @@ sigset_to_compat(compat_sigset_t *compat, const sigset_t *set)
}
}
-COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
- struct compat_siginfo __user *, uinfo,
- struct compat_timespec __user *, uts, compat_size_t, sigsetsize)
-{
- compat_sigset_t s32;
- sigset_t s;
- struct timespec t;
- siginfo_t info;
- long ret;
-
- if (sigsetsize != sizeof(sigset_t))
- return -EINVAL;
-
- if (copy_from_user(&s32, uthese, sizeof(compat_sigset_t)))
- return -EFAULT;
- sigset_from_compat(&s, &s32);
-
- if (uts) {
- if (compat_get_timespec(&t, uts))
- return -EFAULT;
- }
-
- ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
-
- if (ret > 0 && uinfo) {
- if (copy_siginfo_to_user32(uinfo, &info))
- ret = -EFAULT;
- }
-
- return ret;
-}
-
#ifdef CONFIG_NUMA
COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
compat_uptr_t __user *, pages32,
diff --git a/kernel/exit.c b/kernel/exit.c
index b0cc86a2d00b..2bbc23273e2f 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -51,7 +51,6 @@
#include <linux/task_io_accounting_ops.h>
#include <linux/tracehook.h>
#include <linux/fs_struct.h>
-#include <linux/userfaultfd_k.h>
#include <linux/init_task.h>
#include <linux/perf_event.h>
#include <trace/events/sched.h>
diff --git a/kernel/extable.c b/kernel/extable.c
index 0fbdd8582f08..223df4a328a4 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -69,7 +69,7 @@ static inline int init_kernel_text(unsigned long addr)
return 0;
}
-int core_kernel_text(unsigned long addr)
+int notrace core_kernel_text(unsigned long addr)
{
if (addr >= (unsigned long)_stext &&
addr < (unsigned long)_etext)
diff --git a/kernel/fork.c b/kernel/fork.c
index e53770d2bf95..aa01b810c0bd 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -326,8 +326,8 @@ static void account_kernel_stack(struct task_struct *tsk, int account)
}
/* All stack pages belong to the same memcg. */
- memcg_kmem_update_page_stat(vm->pages[0], MEMCG_KERNEL_STACK_KB,
- account * (THREAD_SIZE / 1024));
+ mod_memcg_page_state(vm->pages[0], MEMCG_KERNEL_STACK_KB,
+ account * (THREAD_SIZE / 1024));
} else {
/*
* All stack pages are in the same zone and belong to the
@@ -338,8 +338,8 @@ static void account_kernel_stack(struct task_struct *tsk, int account)
mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB,
THREAD_SIZE / 1024 * account);
- memcg_kmem_update_page_stat(first_page, MEMCG_KERNEL_STACK_KB,
- account * (THREAD_SIZE / 1024));
+ mod_memcg_page_state(first_page, MEMCG_KERNEL_STACK_KB,
+ account * (THREAD_SIZE / 1024));
}
}
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index d2747f9c5707..d69bd77252a7 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -110,6 +110,13 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
struct cpumask *masks;
cpumask_var_t nmsk, *node_to_present_cpumask;
+ /*
+ * If there aren't any vectors left after applying the pre/post
+ * vectors don't bother with assigning affinity.
+ */
+ if (!affv)
+ return NULL;
+
if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
return NULL;
@@ -192,15 +199,19 @@ out:
/**
* irq_calc_affinity_vectors - Calculate the optimal number of vectors
+ * @minvec: The minimum number of vectors available
* @maxvec: The maximum number of vectors available
* @affd: Description of the affinity requirements
*/
-int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd)
+int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd)
{
int resv = affd->pre_vectors + affd->post_vectors;
int vecs = maxvec - resv;
int ret;
+ if (resv > minvec)
+ return 0;
+
get_online_cpus();
ret = min_t(int, cpumask_weight(cpu_present_mask), vecs) + resv;
put_online_cpus();
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
index cc3ed0ccdfa2..2655f26ec882 100644
--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -20,6 +20,7 @@
#include <linux/cpumask.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
+#include <linux/spinlock.h>
#include <asm/qrwlock.h>
/*
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index e6b2f7ad3e51..4ccfcaae5b89 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -193,7 +193,8 @@ void __init __pv_init_lock_hash(void)
*/
pv_lock_hash = alloc_large_system_hash("PV qspinlock",
sizeof(struct pv_hash_entry),
- pv_hash_size, 0, HASH_EARLY,
+ pv_hash_size, 0,
+ HASH_EARLY | HASH_ZERO,
&pv_lock_hash_bits, NULL,
pv_hash_size, pv_hash_size);
}
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 23a6483c3666..124bed776532 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -358,7 +358,11 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
goto err_pfn_remap;
mem_hotplug_begin();
- error = arch_add_memory(nid, align_start, align_size, true);
+ error = arch_add_memory(nid, align_start, align_size, false);
+ if (!error)
+ move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
+ align_start >> PAGE_SHIFT,
+ align_size >> PAGE_SHIFT);
mem_hotplug_done();
if (error)
goto err_add_memory;
diff --git a/kernel/module.c b/kernel/module.c
index d7eb41d772c4..b3dbdde82e80 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -49,9 +49,7 @@
#include <linux/rculist.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
-#ifdef CONFIG_STRICT_MODULE_RWX
-#include <asm/set_memory.h>
-#endif
+#include <linux/set_memory.h>
#include <asm/mmu_context.h>
#include <linux/license.h>
#include <asm/sections.h>
@@ -3074,9 +3072,9 @@ static int find_module_sections(struct module *mod, struct load_info *info)
mod->trace_events = section_objs(info, "_ftrace_events",
sizeof(*mod->trace_events),
&mod->num_trace_events);
- mod->trace_enums = section_objs(info, "_ftrace_enum_map",
- sizeof(*mod->trace_enums),
- &mod->num_trace_enums);
+ mod->trace_evals = section_objs(info, "_ftrace_eval_map",
+ sizeof(*mod->trace_evals),
+ &mod->num_trace_evals);
#endif
#ifdef CONFIG_TRACING
mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
diff --git a/kernel/pid.c b/kernel/pid.c
index fd1cde1e4576..731c4e528f4e 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -575,16 +575,13 @@ struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
*/
void __init pidhash_init(void)
{
- unsigned int i, pidhash_size;
+ unsigned int pidhash_size;
pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
- HASH_EARLY | HASH_SMALL,
+ HASH_EARLY | HASH_SMALL | HASH_ZERO,
&pidhash_shift, NULL,
0, 4096);
pidhash_size = 1U << pidhash_shift;
-
- for (i = 0; i < pidhash_size; i++)
- INIT_HLIST_HEAD(&pid_hash[i]);
}
void __init pidmap_init(void)
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index b7708e319941..222317721c5a 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -30,15 +30,13 @@
#include <linux/slab.h>
#include <linux/compiler.h>
#include <linux/ktime.h>
+#include <linux/set_memory.h>
#include <linux/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/io.h>
-#ifdef CONFIG_ARCH_HAS_SET_MEMORY
-#include <asm/set_memory.h>
-#endif
#include "power.h"
diff --git a/kernel/signal.c b/kernel/signal.c
index 35a570f71f07..48a59eefd8ad 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2776,7 +2776,7 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
* @info: if non-null, the signal's siginfo is returned here
* @ts: upper bound on process time suspension
*/
-int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
+static int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
const struct timespec *ts)
{
ktime_t *to = NULL, timeout = KTIME_MAX;
@@ -2865,6 +2865,40 @@ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
return ret;
}
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
+ struct compat_siginfo __user *, uinfo,
+ struct compat_timespec __user *, uts, compat_size_t, sigsetsize)
+{
+ compat_sigset_t s32;
+ sigset_t s;
+ struct timespec t;
+ siginfo_t info;
+ long ret;
+
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (copy_from_user(&s32, uthese, sizeof(compat_sigset_t)))
+ return -EFAULT;
+ sigset_from_compat(&s, &s32);
+
+ if (uts) {
+ if (compat_get_timespec(&t, uts))
+ return -EFAULT;
+ }
+
+ ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
+
+ if (ret > 0 && uinfo) {
+ if (copy_siginfo_to_user32(uinfo, &info))
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+#endif
+
/**
* sys_kill - send a signal to a process
* @pid: the PID of the process
@@ -3121,78 +3155,68 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
}
static int
-do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
+do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp)
{
- stack_t oss;
- int error;
+ struct task_struct *t = current;
- oss.ss_sp = (void __user *) current->sas_ss_sp;
- oss.ss_size = current->sas_ss_size;
- oss.ss_flags = sas_ss_flags(sp) |
- (current->sas_ss_flags & SS_FLAG_BITS);
+ if (oss) {
+ memset(oss, 0, sizeof(stack_t));
+ oss->ss_sp = (void __user *) t->sas_ss_sp;
+ oss->ss_size = t->sas_ss_size;
+ oss->ss_flags = sas_ss_flags(sp) |
+ (current->sas_ss_flags & SS_FLAG_BITS);
+ }
- if (uss) {
- void __user *ss_sp;
- size_t ss_size;
- unsigned ss_flags;
+ if (ss) {
+ void __user *ss_sp = ss->ss_sp;
+ size_t ss_size = ss->ss_size;
+ unsigned ss_flags = ss->ss_flags;
int ss_mode;
- error = -EFAULT;
- if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
- goto out;
- error = __get_user(ss_sp, &uss->ss_sp) |
- __get_user(ss_flags, &uss->ss_flags) |
- __get_user(ss_size, &uss->ss_size);
- if (error)
- goto out;
-
- error = -EPERM;
- if (on_sig_stack(sp))
- goto out;
+ if (unlikely(on_sig_stack(sp)))
+ return -EPERM;
ss_mode = ss_flags & ~SS_FLAG_BITS;
- error = -EINVAL;
- if (ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
- ss_mode != 0)
- goto out;
+ if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
+ ss_mode != 0))
+ return -EINVAL;
if (ss_mode == SS_DISABLE) {
ss_size = 0;
ss_sp = NULL;
} else {
- error = -ENOMEM;
- if (ss_size < MINSIGSTKSZ)
- goto out;
+ if (unlikely(ss_size < MINSIGSTKSZ))
+ return -ENOMEM;
}
- current->sas_ss_sp = (unsigned long) ss_sp;
- current->sas_ss_size = ss_size;
- current->sas_ss_flags = ss_flags;
+ t->sas_ss_sp = (unsigned long) ss_sp;
+ t->sas_ss_size = ss_size;
+ t->sas_ss_flags = ss_flags;
}
-
- error = 0;
- if (uoss) {
- error = -EFAULT;
- if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
- goto out;
- error = __put_user(oss.ss_sp, &uoss->ss_sp) |
- __put_user(oss.ss_size, &uoss->ss_size) |
- __put_user(oss.ss_flags, &uoss->ss_flags);
- }
-
-out:
- return error;
+ return 0;
}
+
SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
{
- return do_sigaltstack(uss, uoss, current_user_stack_pointer());
+ stack_t new, old;
+ int err;
+ if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
+ return -EFAULT;
+ err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
+ current_user_stack_pointer());
+ if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
+ err = -EFAULT;
+ return err;
}
int restore_altstack(const stack_t __user *uss)
{
- int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
+ stack_t new;
+ if (copy_from_user(&new, uss, sizeof(stack_t)))
+ return -EFAULT;
+ (void)do_sigaltstack(&new, NULL, current_user_stack_pointer());
/* squash all but EFAULT for now */
- return err == -EFAULT ? err : 0;
+ return 0;
}
int __save_altstack(stack_t __user *uss, unsigned long sp)
@@ -3215,29 +3239,24 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
{
stack_t uss, uoss;
int ret;
- mm_segment_t seg;
if (uss_ptr) {
compat_stack_t uss32;
-
- memset(&uss, 0, sizeof(stack_t));
if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
return -EFAULT;
uss.ss_sp = compat_ptr(uss32.ss_sp);
uss.ss_flags = uss32.ss_flags;
uss.ss_size = uss32.ss_size;
}
- seg = get_fs();
- set_fs(KERNEL_DS);
- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
- (stack_t __force __user *) &uoss,
+ ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
compat_user_stack_pointer());
- set_fs(seg);
if (ret >= 0 && uoss_ptr) {
- if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
- __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
- __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
- __put_user(uoss.ss_size, &uoss_ptr->ss_size))
+ compat_stack_t old;
+ memset(&old, 0, sizeof(old));
+ old.ss_sp = ptr_to_compat(uoss.ss_sp);
+ old.ss_flags = uoss.ss_flags;
+ old.ss_size = uoss.ss_size;
+ if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
ret = -EFAULT;
}
return ret;
@@ -3277,6 +3296,18 @@ SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
}
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
+{
+ sigset_t set;
+ int err = do_sigpending(&set, sizeof(old_sigset_t));
+ if (err == 0)
+ if (copy_to_user(set32, &set, sizeof(old_sigset_t)))
+ err = -EFAULT;
+ return err;
+}
+#endif
+
#endif
#ifdef __ARCH_WANT_SYS_SIGPROCMASK
diff --git a/kernel/sys.c b/kernel/sys.c
index dab1a0658a92..47d901586b4e 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -886,7 +886,7 @@ SYSCALL_DEFINE0(getegid)
return from_kgid_munged(current_user_ns(), current_egid());
}
-void do_sys_times(struct tms *tms)
+static void do_sys_times(struct tms *tms)
{
u64 tgutime, tgstime, cutime, cstime;
@@ -912,6 +912,32 @@ SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
return (long) jiffies_64_to_clock_t(get_jiffies_64());
}
+#ifdef CONFIG_COMPAT
+static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
+{
+ return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
+}
+
+COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf)
+{
+ if (tbuf) {
+ struct tms tms;
+ struct compat_tms tmp;
+
+ do_sys_times(&tms);
+ /* Convert our struct tms to the compat version. */
+ tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime);
+ tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime);
+ tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime);
+ tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime);
+ if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
+ return -EFAULT;
+ }
+ force_successful_syscall_return();
+ return compat_jiffies_to_clock_t(jiffies);
+}
+#endif
+
/*
* This needs some heavy checking ...
* I just haven't the stomach for it. I also don't fully
@@ -1306,6 +1332,54 @@ SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
return ret;
}
+#ifdef CONFIG_COMPAT
+
+COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource,
+ struct compat_rlimit __user *, rlim)
+{
+ struct rlimit r;
+ struct compat_rlimit r32;
+
+ if (copy_from_user(&r32, rlim, sizeof(struct compat_rlimit)))
+ return -EFAULT;
+
+ if (r32.rlim_cur == COMPAT_RLIM_INFINITY)
+ r.rlim_cur = RLIM_INFINITY;
+ else
+ r.rlim_cur = r32.rlim_cur;
+ if (r32.rlim_max == COMPAT_RLIM_INFINITY)
+ r.rlim_max = RLIM_INFINITY;
+ else
+ r.rlim_max = r32.rlim_max;
+ return do_prlimit(current, resource, &r, NULL);
+}
+
+COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource,
+ struct compat_rlimit __user *, rlim)
+{
+ struct rlimit r;
+ int ret;
+
+ ret = do_prlimit(current, resource, NULL, &r);
+ if (!ret) {
+ struct rlimit r32;
+ if (r.rlim_cur > COMPAT_RLIM_INFINITY)
+ r32.rlim_cur = COMPAT_RLIM_INFINITY;
+ else
+ r32.rlim_cur = r.rlim_cur;
+ if (r.rlim_max > COMPAT_RLIM_INFINITY)
+ r32.rlim_max = COMPAT_RLIM_INFINITY;
+ else
+ r32.rlim_max = r.rlim_max;
+
+ if (copy_to_user(rlim, &r32, sizeof(struct compat_rlimit)))
+ return -EFAULT;
+ }
+ return ret;
+}
+
+#endif
+
#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
/*
@@ -1328,6 +1402,30 @@ SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0;
}
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
+ struct compat_rlimit __user *, rlim)
+{
+ struct rlimit r;
+
+ if (resource >= RLIM_NLIMITS)
+ return -EINVAL;
+
+ task_lock(current->group_leader);
+ r = current->signal->rlim[resource];
+ task_unlock(current->group_leader);
+ if (r.rlim_cur > 0x7FFFFFFF)
+ r.rlim_cur = 0x7FFFFFFF;
+ if (r.rlim_max > 0x7FFFFFFF)
+ r.rlim_max = 0x7FFFFFFF;
+
+ if (put_user(r.rlim_cur, &rlim->rlim_cur) ||
+ put_user(r.rlim_max, &rlim->rlim_max))
+ return -EFAULT;
+ return 0;
+}
+#endif
+
#endif
static inline bool rlim64_is_infinity(__u64 rlim64)
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 7e06f04e98fe..434c840e2d82 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -667,30 +667,30 @@ config RING_BUFFER_STARTUP_TEST
If unsure, say N
-config TRACE_ENUM_MAP_FILE
- bool "Show enum mappings for trace events"
+config TRACE_EVAL_MAP_FILE
+ bool "Show eval mappings for trace events"
depends on TRACING
help
- The "print fmt" of the trace events will show the enum names instead
- of their values. This can cause problems for user space tools that
- use this string to parse the raw data as user space does not know
+ The "print fmt" of the trace events will show the enum/sizeof names
+ instead of their values. This can cause problems for user space tools
+ that use this string to parse the raw data as user space does not know
how to convert the string to its value.
To fix this, there's a special macro in the kernel that can be used
- to convert the enum into its value. If this macro is used, then the
- print fmt strings will have the enums converted to their values.
+ to convert an enum/sizeof into its value. If this macro is used, then
+ the print fmt strings will be converted to their values.
If something does not get converted properly, this option can be
- used to show what enums the kernel tried to convert.
+ used to show what enums/sizeof the kernel tried to convert.
- This option is for debugging the enum conversions. A file is created
- in the tracing directory called "enum_map" that will show the enum
+ This option is for debugging the conversions. A file is created
+ in the tracing directory called "eval_map" that will show the
names matched with their values and what trace event system they
belong too.
Normally, the mapping of the strings to values will be freed after
boot up or module load. With this option, they will not be freed, as
- they are needed for the "enum_map" file. Enabling this option will
+ they are needed for the "eval_map" file. Enabling this option will
increase the memory footprint of the running kernel.
If unsure, say N
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b308be30dfb9..2953d558bbee 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1293,6 +1293,28 @@ static void ftrace_hash_clear(struct ftrace_hash *hash)
FTRACE_WARN_ON(hash->count);
}
+static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
+{
+ list_del(&ftrace_mod->list);
+ kfree(ftrace_mod->module);
+ kfree(ftrace_mod->func);
+ kfree(ftrace_mod);
+}
+
+static void clear_ftrace_mod_list(struct list_head *head)
+{
+ struct ftrace_mod_load *p, *n;
+
+ /* stack tracer isn't supported yet */
+ if (!head)
+ return;
+
+ mutex_lock(&ftrace_lock);
+ list_for_each_entry_safe(p, n, head, list)
+ free_ftrace_mod(p);
+ mutex_unlock(&ftrace_lock);
+}
+
static void free_ftrace_hash(struct ftrace_hash *hash)
{
if (!hash || hash == EMPTY_HASH)
@@ -1346,6 +1368,35 @@ static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
return hash;
}
+
+static int ftrace_add_mod(struct trace_array *tr,
+ const char *func, const char *module,
+ int enable)
+{
+ struct ftrace_mod_load *ftrace_mod;
+ struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
+
+ ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL);
+ if (!ftrace_mod)
+ return -ENOMEM;
+
+ ftrace_mod->func = kstrdup(func, GFP_KERNEL);
+ ftrace_mod->module = kstrdup(module, GFP_KERNEL);
+ ftrace_mod->enable = enable;
+
+ if (!ftrace_mod->func || !ftrace_mod->module)
+ goto out_free;
+
+ list_add(&ftrace_mod->list, mod_head);
+
+ return 0;
+
+ out_free:
+ free_ftrace_mod(ftrace_mod);
+
+ return -ENOMEM;
+}
+
static struct ftrace_hash *
alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
{
@@ -1359,6 +1410,9 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
if (!new_hash)
return NULL;
+ if (hash)
+ new_hash->flags = hash->flags;
+
/* Empty hash? */
if (ftrace_hash_empty(hash))
return new_hash;
@@ -1403,7 +1457,7 @@ __ftrace_hash_move(struct ftrace_hash *src)
/*
* If the new source is empty, just return the empty_hash.
*/
- if (!src->count)
+ if (ftrace_hash_empty(src))
return EMPTY_HASH;
/*
@@ -1420,6 +1474,8 @@ __ftrace_hash_move(struct ftrace_hash *src)
if (!new_hash)
return NULL;
+ new_hash->flags = src->flags;
+
size = 1 << src->size_bits;
for (i = 0; i < size; i++) {
hhd = &src->buckets[i];
@@ -1650,7 +1706,7 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
struct dyn_ftrace *rec;
bool update = false;
int count = 0;
- int all = 0;
+ int all = false;
/* Only update if the ops has been registered */
if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
@@ -1671,7 +1727,7 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
hash = ops->func_hash->filter_hash;
other_hash = ops->func_hash->notrace_hash;
if (ftrace_hash_empty(hash))
- all = 1;
+ all = true;
} else {
inc = !inc;
hash = ops->func_hash->notrace_hash;
@@ -3061,6 +3117,7 @@ ftrace_allocate_pages(unsigned long num_to_init)
struct ftrace_iterator {
loff_t pos;
loff_t func_pos;
+ loff_t mod_pos;
struct ftrace_page *pg;
struct dyn_ftrace *func;
struct ftrace_func_probe *probe;
@@ -3068,6 +3125,8 @@ struct ftrace_iterator {
struct trace_parser parser;
struct ftrace_hash *hash;
struct ftrace_ops *ops;
+ struct trace_array *tr;
+ struct list_head *mod_list;
int pidx;
int idx;
unsigned flags;
@@ -3152,13 +3211,13 @@ static void *t_probe_start(struct seq_file *m, loff_t *pos)
if (!(iter->flags & FTRACE_ITER_DO_PROBES))
return NULL;
- if (iter->func_pos > *pos)
+ if (iter->mod_pos > *pos)
return NULL;
iter->probe = NULL;
iter->probe_entry = NULL;
iter->pidx = 0;
- for (l = 0; l <= (*pos - iter->func_pos); ) {
+ for (l = 0; l <= (*pos - iter->mod_pos); ) {
p = t_probe_next(m, &l);
if (!p)
break;
@@ -3197,6 +3256,82 @@ t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
}
static void *
+t_mod_next(struct seq_file *m, loff_t *pos)
+{
+ struct ftrace_iterator *iter = m->private;
+ struct trace_array *tr = iter->tr;
+
+ (*pos)++;
+ iter->pos = *pos;
+
+ iter->mod_list = iter->mod_list->next;
+
+ if (iter->mod_list == &tr->mod_trace ||
+ iter->mod_list == &tr->mod_notrace) {
+ iter->flags &= ~FTRACE_ITER_MOD;
+ return NULL;
+ }
+
+ iter->mod_pos = *pos;
+
+ return iter;
+}
+
+static void *t_mod_start(struct seq_file *m, loff_t *pos)
+{
+ struct ftrace_iterator *iter = m->private;
+ void *p = NULL;
+ loff_t l;
+
+ if (iter->func_pos > *pos)
+ return NULL;
+
+ iter->mod_pos = iter->func_pos;
+
+ /* probes are only available if tr is set */
+ if (!iter->tr)
+ return NULL;
+
+ for (l = 0; l <= (*pos - iter->func_pos); ) {
+ p = t_mod_next(m, &l);
+ if (!p)
+ break;
+ }
+ if (!p) {
+ iter->flags &= ~FTRACE_ITER_MOD;
+ return t_probe_start(m, pos);
+ }
+
+ /* Only set this if we have an item */
+ iter->flags |= FTRACE_ITER_MOD;
+
+ return iter;
+}
+
+static int
+t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
+{
+ struct ftrace_mod_load *ftrace_mod;
+ struct trace_array *tr = iter->tr;
+
+ if (WARN_ON_ONCE(!iter->mod_list) ||
+ iter->mod_list == &tr->mod_trace ||
+ iter->mod_list == &tr->mod_notrace)
+ return -EIO;
+
+ ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
+
+ if (ftrace_mod->func)
+ seq_printf(m, "%s", ftrace_mod->func);
+ else
+ seq_putc(m, '*');
+
+ seq_printf(m, ":mod:%s\n", ftrace_mod->module);
+
+ return 0;
+}
+
+static void *
t_func_next(struct seq_file *m, loff_t *pos)
{
struct ftrace_iterator *iter = m->private;
@@ -3237,7 +3372,7 @@ static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
struct ftrace_iterator *iter = m->private;
- loff_t l = *pos; /* t_hash_start() must use original pos */
+ loff_t l = *pos; /* t_probe_start() must use original pos */
void *ret;
if (unlikely(ftrace_disabled))
@@ -3246,16 +3381,19 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
if (iter->flags & FTRACE_ITER_PROBE)
return t_probe_next(m, pos);
+ if (iter->flags & FTRACE_ITER_MOD)
+ return t_mod_next(m, pos);
+
if (iter->flags & FTRACE_ITER_PRINTALL) {
/* next must increment pos, and t_probe_start does not */
(*pos)++;
- return t_probe_start(m, &l);
+ return t_mod_start(m, &l);
}
ret = t_func_next(m, pos);
if (!ret)
- return t_probe_start(m, &l);
+ return t_mod_start(m, &l);
return ret;
}
@@ -3264,7 +3402,7 @@ static void reset_iter_read(struct ftrace_iterator *iter)
{
iter->pos = 0;
iter->func_pos = 0;
- iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE);
+ iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
}
static void *t_start(struct seq_file *m, loff_t *pos)
@@ -3293,15 +3431,15 @@ static void *t_start(struct seq_file *m, loff_t *pos)
ftrace_hash_empty(iter->hash)) {
iter->func_pos = 1; /* Account for the message */
if (*pos > 0)
- return t_probe_start(m, pos);
+ return t_mod_start(m, pos);
iter->flags |= FTRACE_ITER_PRINTALL;
/* reset in case of seek/pread */
iter->flags &= ~FTRACE_ITER_PROBE;
return iter;
}
- if (iter->flags & FTRACE_ITER_PROBE)
- return t_probe_start(m, pos);
+ if (iter->flags & FTRACE_ITER_MOD)
+ return t_mod_start(m, pos);
/*
* Unfortunately, we need to restart at ftrace_pages_start
@@ -3317,7 +3455,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
}
if (!p)
- return t_probe_start(m, pos);
+ return t_mod_start(m, pos);
return iter;
}
@@ -3351,6 +3489,9 @@ static int t_show(struct seq_file *m, void *v)
if (iter->flags & FTRACE_ITER_PROBE)
return t_probe_show(m, iter);
+ if (iter->flags & FTRACE_ITER_MOD)
+ return t_mod_show(m, iter);
+
if (iter->flags & FTRACE_ITER_PRINTALL) {
if (iter->flags & FTRACE_ITER_NOTRACE)
seq_puts(m, "#### no functions disabled ####\n");
@@ -3457,6 +3598,8 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
{
struct ftrace_iterator *iter;
struct ftrace_hash *hash;
+ struct list_head *mod_head;
+ struct trace_array *tr = ops->private;
int ret = 0;
ftrace_ops_init(ops);
@@ -3475,21 +3618,29 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
iter->ops = ops;
iter->flags = flag;
+ iter->tr = tr;
mutex_lock(&ops->func_hash->regex_lock);
- if (flag & FTRACE_ITER_NOTRACE)
+ if (flag & FTRACE_ITER_NOTRACE) {
hash = ops->func_hash->notrace_hash;
- else
+ mod_head = tr ? &tr->mod_notrace : NULL;
+ } else {
hash = ops->func_hash->filter_hash;
+ mod_head = tr ? &tr->mod_trace : NULL;
+ }
+
+ iter->mod_list = mod_head;
if (file->f_mode & FMODE_WRITE) {
const int size_bits = FTRACE_HASH_DEFAULT_BITS;
- if (file->f_flags & O_TRUNC)
+ if (file->f_flags & O_TRUNC) {
iter->hash = alloc_ftrace_hash(size_bits);
- else
+ clear_ftrace_mod_list(mod_head);
+ } else {
iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
+ }
if (!iter->hash) {
trace_parser_put(&iter->parser);
@@ -3761,6 +3912,163 @@ static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
return ret;
}
+static bool module_exists(const char *module)
+{
+ /* All modules have the symbol __this_module */
+ const char this_mod[] = "__this_module";
+ const int modname_size = MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 1;
+ char modname[modname_size + 1];
+ unsigned long val;
+ int n;
+
+ n = snprintf(modname, modname_size + 1, "%s:%s", module, this_mod);
+
+ if (n > modname_size)
+ return false;
+
+ val = module_kallsyms_lookup_name(modname);
+ return val != 0;
+}
+
+static int cache_mod(struct trace_array *tr,
+ const char *func, char *module, int enable)
+{
+ struct ftrace_mod_load *ftrace_mod, *n;
+ struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
+ int ret;
+
+ mutex_lock(&ftrace_lock);
+
+ /* We do not cache inverse filters */
+ if (func[0] == '!') {
+ func++;
+ ret = -EINVAL;
+
+ /* Look to remove this hash */
+ list_for_each_entry_safe(ftrace_mod, n, head, list) {
+ if (strcmp(ftrace_mod->module, module) != 0)
+ continue;
+
+ /* no func matches all */
+ if (!func || strcmp(func, "*") == 0 ||
+ (ftrace_mod->func &&
+ strcmp(ftrace_mod->func, func) == 0)) {
+ ret = 0;
+ free_ftrace_mod(ftrace_mod);
+ continue;
+ }
+ }
+ goto out;
+ }
+
+ ret = -EINVAL;
+ /* We only care about modules that have not been loaded yet */
+ if (module_exists(module))
+ goto out;
+
+ /* Save this string off, and execute it when the module is loaded */
+ ret = ftrace_add_mod(tr, func, module, enable);
+ out:
+ mutex_unlock(&ftrace_lock);
+
+ return ret;
+}
+
+static int
+ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
+ int reset, int enable);
+
+static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
+ char *mod, bool enable)
+{
+ struct ftrace_mod_load *ftrace_mod, *n;
+ struct ftrace_hash **orig_hash, *new_hash;
+ LIST_HEAD(process_mods);
+ char *func;
+ int ret;
+
+ mutex_lock(&ops->func_hash->regex_lock);
+
+ if (enable)
+ orig_hash = &ops->func_hash->filter_hash;
+ else
+ orig_hash = &ops->func_hash->notrace_hash;
+
+ new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS,
+ *orig_hash);
+ if (!new_hash)
+ goto out; /* warn? */
+
+ mutex_lock(&ftrace_lock);
+
+ list_for_each_entry_safe(ftrace_mod, n, head, list) {
+
+ if (strcmp(ftrace_mod->module, mod) != 0)
+ continue;
+
+ if (ftrace_mod->func)
+ func = kstrdup(ftrace_mod->func, GFP_KERNEL);
+ else
+ func = kstrdup("*", GFP_KERNEL);
+
+ if (!func) /* warn? */
+ continue;
+
+ list_del(&ftrace_mod->list);
+ list_add(&ftrace_mod->list, &process_mods);
+
+ /* Use the newly allocated func, as it may be "*" */
+ kfree(ftrace_mod->func);
+ ftrace_mod->func = func;
+ }
+
+ mutex_unlock(&ftrace_lock);
+
+ list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) {
+
+ func = ftrace_mod->func;
+
+ /* Grabs ftrace_lock, which is why we have this extra step */
+ match_records(new_hash, func, strlen(func), mod);
+ free_ftrace_mod(ftrace_mod);
+ }
+
+ if (enable && list_empty(head))
+ new_hash->flags &= ~FTRACE_HASH_FL_MOD;
+
+ mutex_lock(&ftrace_lock);
+
+ ret = ftrace_hash_move_and_update_ops(ops, orig_hash,
+ new_hash, enable);
+ mutex_unlock(&ftrace_lock);
+
+ out:
+ mutex_unlock(&ops->func_hash->regex_lock);
+
+ free_ftrace_hash(new_hash);
+}
+
+static void process_cached_mods(const char *mod_name)
+{
+ struct trace_array *tr;
+ char *mod;
+
+ mod = kstrdup(mod_name, GFP_KERNEL);
+ if (!mod)
+ return;
+
+ mutex_lock(&trace_types_lock);
+ list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+ if (!list_empty(&tr->mod_trace))
+ process_mod_list(&tr->mod_trace, tr->ops, mod, true);
+ if (!list_empty(&tr->mod_notrace))
+ process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
+ }
+ mutex_unlock(&trace_types_lock);
+
+ kfree(mod);
+}
+
/*
* We register the module command as a template to show others how
* to register the a command as well.
@@ -3768,10 +4076,16 @@ static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
static int
ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
- char *func, char *cmd, char *module, int enable)
+ char *func_orig, char *cmd, char *module, int enable)
{
+ char *func;
int ret;
+ /* match_records() modifies func, and we need the original */
+ func = kstrdup(func_orig, GFP_KERNEL);
+ if (!func)
+ return -ENOMEM;
+
/*
* cmd == 'mod' because we only registered this func
* for the 'mod' ftrace_func_command.
@@ -3780,8 +4094,10 @@ ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
* parameter.
*/
ret = match_records(hash, func, strlen(func), module);
+ kfree(func);
+
if (!ret)
- return -EINVAL;
+ return cache_mod(tr, func_orig, module, enable);
if (ret < 0)
return ret;
return 0;
@@ -4725,9 +5041,11 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
if (file->f_mode & FMODE_WRITE) {
filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
- if (filter_hash)
+ if (filter_hash) {
orig_hash = &iter->ops->func_hash->filter_hash;
- else
+ if (iter->tr && !list_empty(&iter->tr->mod_trace))
+ iter->hash->flags |= FTRACE_HASH_FL_MOD;
+ } else
orig_hash = &iter->ops->func_hash->notrace_hash;
mutex_lock(&ftrace_lock);
@@ -5385,6 +5703,7 @@ void ftrace_release_mod(struct module *mod)
if (pg == ftrace_pages)
ftrace_pages = next_to_ftrace_page(last_pg);
+ ftrace_update_tot_cnt -= pg->index;
*last_pg = pg->next;
order = get_count_order(pg->size / ENTRIES_PER_PAGE);
free_pages((unsigned long)pg->records, order);
@@ -5463,6 +5782,8 @@ void ftrace_module_enable(struct module *mod)
out_unlock:
mutex_unlock(&ftrace_lock);
+
+ process_cached_mods(mod->name);
}
void ftrace_module_init(struct module *mod)
@@ -5501,6 +5822,7 @@ void __init ftrace_free_init_mem(void)
if (!rec)
continue;
pg->index--;
+ ftrace_update_tot_cnt--;
if (!pg->index) {
*last_pg = pg->next;
order = get_count_order(pg->size / ENTRIES_PER_PAGE);
@@ -5567,6 +5889,8 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops)
void ftrace_init_trace_array(struct trace_array *tr)
{
INIT_LIST_HEAD(&tr->func_probes);
+ INIT_LIST_HEAD(&tr->mod_trace);
+ INIT_LIST_HEAD(&tr->mod_notrace);
}
#else
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 091e801145c9..948ec32e0c27 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -87,7 +87,7 @@ dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
* tracing is active, only save the comm when a trace event
* occurred.
*/
-static DEFINE_PER_CPU(bool, trace_cmdline_save);
+static DEFINE_PER_CPU(bool, trace_taskinfo_save);
/*
* Kill all tracing for good (never come back).
@@ -120,41 +120,41 @@ enum ftrace_dump_mode ftrace_dump_on_oops;
/* When set, tracing will stop when a WARN*() is hit */
int __disable_trace_on_warning;
-#ifdef CONFIG_TRACE_ENUM_MAP_FILE
-/* Map of enums to their values, for "enum_map" file */
-struct trace_enum_map_head {
+#ifdef CONFIG_TRACE_EVAL_MAP_FILE
+/* Map of enums to their values, for "eval_map" file */
+struct trace_eval_map_head {
struct module *mod;
unsigned long length;
};
-union trace_enum_map_item;
+union trace_eval_map_item;
-struct trace_enum_map_tail {
+struct trace_eval_map_tail {
/*
* "end" is first and points to NULL as it must be different
- * than "mod" or "enum_string"
+ * than "mod" or "eval_string"
*/
- union trace_enum_map_item *next;
+ union trace_eval_map_item *next;
const char *end; /* points to NULL */
};
-static DEFINE_MUTEX(trace_enum_mutex);
+static DEFINE_MUTEX(trace_eval_mutex);
/*
- * The trace_enum_maps are saved in an array with two extra elements,
+ * The trace_eval_maps are saved in an array with two extra elements,
* one at the beginning, and one at the end. The beginning item contains
* the count of the saved maps (head.length), and the module they
* belong to if not built in (head.mod). The ending item contains a
- * pointer to the next array of saved enum_map items.
+ * pointer to the next array of saved eval_map items.
*/
-union trace_enum_map_item {
- struct trace_enum_map map;
- struct trace_enum_map_head head;
- struct trace_enum_map_tail tail;
+union trace_eval_map_item {
+ struct trace_eval_map map;
+ struct trace_eval_map_head head;
+ struct trace_eval_map_tail tail;
};
-static union trace_enum_map_item *trace_enum_maps;
-#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
+static union trace_eval_map_item *trace_eval_maps;
+#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
static int tracing_set_tracer(struct trace_array *tr, const char *buf);
@@ -790,7 +790,7 @@ EXPORT_SYMBOL_GPL(tracing_on);
static __always_inline void
__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
{
- __this_cpu_write(trace_cmdline_save, true);
+ __this_cpu_write(trace_taskinfo_save, true);
/* If this is the temp buffer, we need to commit fully */
if (this_cpu_read(trace_buffered_event) == event) {
@@ -1141,9 +1141,9 @@ unsigned long nsecs_to_usecs(unsigned long nsecs)
/*
* TRACE_FLAGS is defined as a tuple matching bit masks with strings.
- * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
+ * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
* matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
- * of strings in the order that the enums were defined.
+ * of strings in the order that the evals (enum) were defined.
*/
#undef C
#define C(a, b) b
@@ -1709,6 +1709,8 @@ void tracing_reset_all_online_cpus(void)
}
}
+static int *tgid_map;
+
#define SAVED_CMDLINES_DEFAULT 128
#define NO_CMDLINE_MAP UINT_MAX
static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
@@ -1722,7 +1724,7 @@ struct saved_cmdlines_buffer {
static struct saved_cmdlines_buffer *savedcmd;
/* temporary disable recording */
-static atomic_t trace_record_cmdline_disabled __read_mostly;
+static atomic_t trace_record_taskinfo_disabled __read_mostly;
static inline char *get_saved_cmdlines(int idx)
{
@@ -1910,8 +1912,6 @@ static void tracing_stop_tr(struct trace_array *tr)
raw_spin_unlock_irqrestore(&tr->start_lock, flags);
}
-void trace_stop_cmdline_recording(void);
-
static int trace_save_cmdline(struct task_struct *tsk)
{
unsigned pid, idx;
@@ -1992,16 +1992,87 @@ void trace_find_cmdline(int pid, char comm[])
preempt_enable();
}
-void tracing_record_cmdline(struct task_struct *tsk)
+int trace_find_tgid(int pid)
+{
+ if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
+ return 0;
+
+ return tgid_map[pid];
+}
+
+static int trace_save_tgid(struct task_struct *tsk)
+{
+ if (unlikely(!tgid_map || !tsk->pid || tsk->pid > PID_MAX_DEFAULT))
+ return 0;
+
+ tgid_map[tsk->pid] = tsk->tgid;
+ return 1;
+}
+
+static bool tracing_record_taskinfo_skip(int flags)
+{
+ if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
+ return true;
+ if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
+ return true;
+ if (!__this_cpu_read(trace_taskinfo_save))
+ return true;
+ return false;
+}
+
+/**
+ * tracing_record_taskinfo - record the task info of a task
+ *
+ * @task - task to record
+ * @flags - TRACE_RECORD_CMDLINE for recording comm
+ * - TRACE_RECORD_TGID for recording tgid
+ */
+void tracing_record_taskinfo(struct task_struct *task, int flags)
+{
+ if (tracing_record_taskinfo_skip(flags))
+ return;
+ if ((flags & TRACE_RECORD_CMDLINE) && !trace_save_cmdline(task))
+ return;
+ if ((flags & TRACE_RECORD_TGID) && !trace_save_tgid(task))
+ return;
+
+ __this_cpu_write(trace_taskinfo_save, false);
+}
+
+/**
+ * tracing_record_taskinfo_sched_switch - record task info for sched_switch
+ *
+ * @prev - previous task during sched_switch
+ * @next - next task during sched_switch
+ * @flags - TRACE_RECORD_CMDLINE for recording comm
+ * TRACE_RECORD_TGID for recording tgid
+ */
+void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
+ struct task_struct *next, int flags)
{
- if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
+ if (tracing_record_taskinfo_skip(flags))
+ return;
+
+ if ((flags & TRACE_RECORD_CMDLINE) &&
+ (!trace_save_cmdline(prev) || !trace_save_cmdline(next)))
return;
- if (!__this_cpu_read(trace_cmdline_save))
+ if ((flags & TRACE_RECORD_TGID) &&
+ (!trace_save_tgid(prev) || !trace_save_tgid(next)))
return;
- if (trace_save_cmdline(tsk))
- __this_cpu_write(trace_cmdline_save, false);
+ __this_cpu_write(trace_taskinfo_save, false);
+}
+
+/* Helpers to record a specific task information */
+void tracing_record_cmdline(struct task_struct *task)
+{
+ tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
+}
+
+void tracing_record_tgid(struct task_struct *task)
+{
+ tracing_record_taskinfo(task, TRACE_RECORD_TGID);
}
/*
@@ -3146,7 +3217,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
#endif
if (!iter->snapshot)
- atomic_inc(&trace_record_cmdline_disabled);
+ atomic_inc(&trace_record_taskinfo_disabled);
if (*pos != iter->pos) {
iter->ent = NULL;
@@ -3191,7 +3262,7 @@ static void s_stop(struct seq_file *m, void *p)
#endif
if (!iter->snapshot)
- atomic_dec(&trace_record_cmdline_disabled);
+ atomic_dec(&trace_record_taskinfo_disabled);
trace_access_unlock(iter->cpu_file);
trace_event_read_unlock();
@@ -3248,23 +3319,29 @@ static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
seq_puts(m, "#\n");
}
-static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
+static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
+ unsigned int flags)
{
+ bool tgid = flags & TRACE_ITER_RECORD_TGID;
+
print_event_info(buf, m);
- seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
- "# | | | | |\n");
+
+ seq_printf(m, "# TASK-PID CPU# %s TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
+ seq_printf(m, "# | | | %s | |\n", tgid ? " | " : "");
}
-static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
+static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
+ unsigned int flags)
{
- print_event_info(buf, m);
- seq_puts(m, "# _-----=> irqs-off\n"
- "# / _----=> need-resched\n"
- "# | / _---=> hardirq/softirq\n"
- "# || / _--=> preempt-depth\n"
- "# ||| / delay\n"
- "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
- "# | | | |||| | |\n");
+ bool tgid = flags & TRACE_ITER_RECORD_TGID;
+
+ seq_printf(m, "# %s _-----=> irqs-off\n", tgid ? " " : "");
+ seq_printf(m, "# %s / _----=> need-resched\n", tgid ? " " : "");
+ seq_printf(m, "# %s| / _---=> hardirq/softirq\n", tgid ? " " : "");
+ seq_printf(m, "# %s|| / _--=> preempt-depth\n", tgid ? " " : "");
+ seq_printf(m, "# %s||| / delay\n", tgid ? " " : "");
+ seq_printf(m, "# TASK-PID CPU#%s|||| TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
+ seq_printf(m, "# | | | %s|||| | |\n", tgid ? " | " : "");
}
void
@@ -3580,9 +3657,11 @@ void trace_default_header(struct seq_file *m)
} else {
if (!(trace_flags & TRACE_ITER_VERBOSE)) {
if (trace_flags & TRACE_ITER_IRQ_INFO)
- print_func_help_header_irq(iter->trace_buffer, m);
+ print_func_help_header_irq(iter->trace_buffer,
+ m, trace_flags);
else
- print_func_help_header(iter->trace_buffer, m);
+ print_func_help_header(iter->trace_buffer, m,
+ trace_flags);
}
}
}
@@ -4238,6 +4317,18 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
if (mask == TRACE_ITER_RECORD_CMD)
trace_event_enable_cmd_record(enabled);
+ if (mask == TRACE_ITER_RECORD_TGID) {
+ if (!tgid_map)
+ tgid_map = kzalloc((PID_MAX_DEFAULT + 1) * sizeof(*tgid_map),
+ GFP_KERNEL);
+ if (!tgid_map) {
+ tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
+ return -ENOMEM;
+ }
+
+ trace_event_enable_tgid_record(enabled);
+ }
+
if (mask == TRACE_ITER_EVENT_FORK)
trace_event_follow_fork(tr, enabled);
@@ -4473,7 +4564,8 @@ static const char readme_msg[] =
#endif
#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
"\t accepts: event-definitions (one definition per line)\n"
- "\t Format: p|r[:[<group>/]<event>] <place> [<args>]\n"
+ "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
+ "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
"\t -:[<group>/]<event>\n"
#ifdef CONFIG_KPROBE_EVENTS
"\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
@@ -4746,11 +4838,11 @@ static const struct file_operations tracing_saved_cmdlines_size_fops = {
.write = tracing_saved_cmdlines_size_write,
};
-#ifdef CONFIG_TRACE_ENUM_MAP_FILE
-static union trace_enum_map_item *
-update_enum_map(union trace_enum_map_item *ptr)
+#ifdef CONFIG_TRACE_EVAL_MAP_FILE
+static union trace_eval_map_item *
+update_eval_map(union trace_eval_map_item *ptr)
{
- if (!ptr->map.enum_string) {
+ if (!ptr->map.eval_string) {
if (ptr->tail.next) {
ptr = ptr->tail.next;
/* Set ptr to the next real item (skip head) */
@@ -4761,15 +4853,15 @@ update_enum_map(union trace_enum_map_item *ptr)
return ptr;
}
-static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
+static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
{
- union trace_enum_map_item *ptr = v;
+ union trace_eval_map_item *ptr = v;
/*
* Paranoid! If ptr points to end, we don't want to increment past it.
* This really should never happen.
*/
- ptr = update_enum_map(ptr);
+ ptr = update_eval_map(ptr);
if (WARN_ON_ONCE(!ptr))
return NULL;
@@ -4777,104 +4869,104 @@ static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
(*pos)++;
- ptr = update_enum_map(ptr);
+ ptr = update_eval_map(ptr);
return ptr;
}
-static void *enum_map_start(struct seq_file *m, loff_t *pos)
+static void *eval_map_start(struct seq_file *m, loff_t *pos)
{
- union trace_enum_map_item *v;
+ union trace_eval_map_item *v;
loff_t l = 0;
- mutex_lock(&trace_enum_mutex);
+ mutex_lock(&trace_eval_mutex);
- v = trace_enum_maps;
+ v = trace_eval_maps;
if (v)
v++;
while (v && l < *pos) {
- v = enum_map_next(m, v, &l);
+ v = eval_map_next(m, v, &l);
}
return v;
}
-static void enum_map_stop(struct seq_file *m, void *v)
+static void eval_map_stop(struct seq_file *m, void *v)
{
- mutex_unlock(&trace_enum_mutex);
+ mutex_unlock(&trace_eval_mutex);
}
-static int enum_map_show(struct seq_file *m, void *v)
+static int eval_map_show(struct seq_file *m, void *v)
{
- union trace_enum_map_item *ptr = v;
+ union trace_eval_map_item *ptr = v;
seq_printf(m, "%s %ld (%s)\n",
- ptr->map.enum_string, ptr->map.enum_value,
+ ptr->map.eval_string, ptr->map.eval_value,
ptr->map.system);
return 0;
}
-static const struct seq_operations tracing_enum_map_seq_ops = {
- .start = enum_map_start,
- .next = enum_map_next,
- .stop = enum_map_stop,
- .show = enum_map_show,
+static const struct seq_operations tracing_eval_map_seq_ops = {
+ .start = eval_map_start,
+ .next = eval_map_next,
+ .stop = eval_map_stop,
+ .show = eval_map_show,
};
-static int tracing_enum_map_open(struct inode *inode, struct file *filp)
+static int tracing_eval_map_open(struct inode *inode, struct file *filp)
{
if (tracing_disabled)
return -ENODEV;
- return seq_open(filp, &tracing_enum_map_seq_ops);
+ return seq_open(filp, &tracing_eval_map_seq_ops);
}
-static const struct file_operations tracing_enum_map_fops = {
- .open = tracing_enum_map_open,
+static const struct file_operations tracing_eval_map_fops = {
+ .open = tracing_eval_map_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
-static inline union trace_enum_map_item *
-trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
+static inline union trace_eval_map_item *
+trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
{
/* Return tail of array given the head */
return ptr + ptr->head.length + 1;
}
static void
-trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
+trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
int len)
{
- struct trace_enum_map **stop;
- struct trace_enum_map **map;
- union trace_enum_map_item *map_array;
- union trace_enum_map_item *ptr;
+ struct trace_eval_map **stop;
+ struct trace_eval_map **map;
+ union trace_eval_map_item *map_array;
+ union trace_eval_map_item *ptr;
stop = start + len;
/*
- * The trace_enum_maps contains the map plus a head and tail item,
+ * The trace_eval_maps contains the map plus a head and tail item,
* where the head holds the module and length of array, and the
* tail holds a pointer to the next list.
*/
map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
if (!map_array) {
- pr_warn("Unable to allocate trace enum mapping\n");
+ pr_warn("Unable to allocate trace eval mapping\n");
return;
}
- mutex_lock(&trace_enum_mutex);
+ mutex_lock(&trace_eval_mutex);
- if (!trace_enum_maps)
- trace_enum_maps = map_array;
+ if (!trace_eval_maps)
+ trace_eval_maps = map_array;
else {
- ptr = trace_enum_maps;
+ ptr = trace_eval_maps;
for (;;) {
- ptr = trace_enum_jmp_to_tail(ptr);
+ ptr = trace_eval_jmp_to_tail(ptr);
if (!ptr->tail.next)
break;
ptr = ptr->tail.next;
@@ -4892,34 +4984,34 @@ trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
}
memset(map_array, 0, sizeof(*map_array));
- mutex_unlock(&trace_enum_mutex);
+ mutex_unlock(&trace_eval_mutex);
}
-static void trace_create_enum_file(struct dentry *d_tracer)
+static void trace_create_eval_file(struct dentry *d_tracer)
{
- trace_create_file("enum_map", 0444, d_tracer,
- NULL, &tracing_enum_map_fops);
+ trace_create_file("eval_map", 0444, d_tracer,
+ NULL, &tracing_eval_map_fops);
}
-#else /* CONFIG_TRACE_ENUM_MAP_FILE */
-static inline void trace_create_enum_file(struct dentry *d_tracer) { }
-static inline void trace_insert_enum_map_file(struct module *mod,
- struct trace_enum_map **start, int len) { }
-#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
+#else /* CONFIG_TRACE_EVAL_MAP_FILE */
+static inline void trace_create_eval_file(struct dentry *d_tracer) { }
+static inline void trace_insert_eval_map_file(struct module *mod,
+ struct trace_eval_map **start, int len) { }
+#endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
-static void trace_insert_enum_map(struct module *mod,
- struct trace_enum_map **start, int len)
+static void trace_insert_eval_map(struct module *mod,
+ struct trace_eval_map **start, int len)
{
- struct trace_enum_map **map;
+ struct trace_eval_map **map;
if (len <= 0)
return;
map = start;
- trace_event_enum_update(map, len);
+ trace_event_eval_update(map, len);
- trace_insert_enum_map_file(mod, start, len);
+ trace_insert_eval_map_file(mod, start, len);
}
static ssize_t
@@ -6739,33 +6831,18 @@ static const struct file_operations tracing_stats_fops = {
#ifdef CONFIG_DYNAMIC_FTRACE
-int __weak ftrace_arch_read_dyn_info(char *buf, int size)
-{
- return 0;
-}
-
static ssize_t
tracing_read_dyn_info(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- static char ftrace_dyn_info_buffer[1024];
- static DEFINE_MUTEX(dyn_info_mutex);
unsigned long *p = filp->private_data;
- char *buf = ftrace_dyn_info_buffer;
- int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
+ char buf[64]; /* Not too big for a shallow stack */
int r;
- mutex_lock(&dyn_info_mutex);
- r = sprintf(buf, "%ld ", *p);
-
- r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
+ r = scnprintf(buf, 63, "%ld", *p);
buf[r++] = '\n';
- r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
-
- mutex_unlock(&dyn_info_mutex);
-
- return r;
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
static const struct file_operations tracing_dyn_info_fops = {
@@ -7737,21 +7814,21 @@ struct dentry *tracing_init_dentry(void)
return NULL;
}
-extern struct trace_enum_map *__start_ftrace_enum_maps[];
-extern struct trace_enum_map *__stop_ftrace_enum_maps[];
+extern struct trace_eval_map *__start_ftrace_eval_maps[];
+extern struct trace_eval_map *__stop_ftrace_eval_maps[];
-static void __init trace_enum_init(void)
+static void __init trace_eval_init(void)
{
int len;
- len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
- trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
+ len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
+ trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
}
#ifdef CONFIG_MODULES
-static void trace_module_add_enums(struct module *mod)
+static void trace_module_add_evals(struct module *mod)
{
- if (!mod->num_trace_enums)
+ if (!mod->num_trace_evals)
return;
/*
@@ -7761,40 +7838,40 @@ static void trace_module_add_enums(struct module *mod)
if (trace_module_has_bad_taint(mod))
return;
- trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
+ trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
}
-#ifdef CONFIG_TRACE_ENUM_MAP_FILE
-static void trace_module_remove_enums(struct module *mod)
+#ifdef CONFIG_TRACE_EVAL_MAP_FILE
+static void trace_module_remove_evals(struct module *mod)
{
- union trace_enum_map_item *map;
- union trace_enum_map_item **last = &trace_enum_maps;
+ union trace_eval_map_item *map;
+ union trace_eval_map_item **last = &trace_eval_maps;
- if (!mod->num_trace_enums)
+ if (!mod->num_trace_evals)
return;
- mutex_lock(&trace_enum_mutex);
+ mutex_lock(&trace_eval_mutex);
- map = trace_enum_maps;
+ map = trace_eval_maps;
while (map) {
if (map->head.mod == mod)
break;
- map = trace_enum_jmp_to_tail(map);
+ map = trace_eval_jmp_to_tail(map);
last = &map->tail.next;
map = map->tail.next;
}
if (!map)
goto out;
- *last = trace_enum_jmp_to_tail(map)->tail.next;
+ *last = trace_eval_jmp_to_tail(map)->tail.next;
kfree(map);
out:
- mutex_unlock(&trace_enum_mutex);
+ mutex_unlock(&trace_eval_mutex);
}
#else
-static inline void trace_module_remove_enums(struct module *mod) { }
-#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
+static inline void trace_module_remove_evals(struct module *mod) { }
+#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
static int trace_module_notify(struct notifier_block *self,
unsigned long val, void *data)
@@ -7803,10 +7880,10 @@ static int trace_module_notify(struct notifier_block *self,
switch (val) {
case MODULE_STATE_COMING:
- trace_module_add_enums(mod);
+ trace_module_add_evals(mod);
break;
case MODULE_STATE_GOING:
- trace_module_remove_enums(mod);
+ trace_module_remove_evals(mod);
break;
}
@@ -7844,9 +7921,9 @@ static __init int tracer_init_tracefs(void)
trace_create_file("saved_cmdlines_size", 0644, d_tracer,
NULL, &tracing_saved_cmdlines_size_fops);
- trace_enum_init();
+ trace_eval_init();
- trace_create_enum_file(d_tracer);
+ trace_create_eval_file(d_tracer);
#ifdef CONFIG_MODULES
register_module_notifier(&trace_module_nb);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 39fd77330aab..6ade1c55cc3a 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -263,7 +263,10 @@ struct trace_array {
struct ftrace_ops *ops;
struct trace_pid_list __rcu *function_pids;
#ifdef CONFIG_DYNAMIC_FTRACE
+ /* All of these are protected by the ftrace_lock */
struct list_head func_probes;
+ struct list_head mod_trace;
+ struct list_head mod_notrace;
#endif
/* function tracing enabled */
int function_enabled;
@@ -637,6 +640,9 @@ void set_graph_array(struct trace_array *tr);
void tracing_start_cmdline_record(void);
void tracing_stop_cmdline_record(void);
+void tracing_start_tgid_record(void);
+void tracing_stop_tgid_record(void);
+
int register_tracer(struct tracer *type);
int is_tracing_stopped(void);
@@ -697,6 +703,7 @@ static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
extern u64 ftrace_now(int cpu);
extern void trace_find_cmdline(int pid, char comm[]);
+extern int trace_find_tgid(int pid);
extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -761,10 +768,24 @@ enum print_line_t print_trace_line(struct trace_iterator *iter);
extern char trace_find_mark(unsigned long long duration);
+struct ftrace_hash;
+
+struct ftrace_mod_load {
+ struct list_head list;
+ char *func;
+ char *module;
+ int enable;
+};
+
+enum {
+ FTRACE_HASH_FL_MOD = (1 << 0),
+};
+
struct ftrace_hash {
unsigned long size_bits;
struct hlist_head *buckets;
unsigned long count;
+ unsigned long flags;
struct rcu_head rcu;
};
@@ -773,7 +794,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash)
{
- return !hash || !hash->count;
+ return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD));
}
/* Standard output formatting function used for function return traces */
@@ -1107,6 +1128,7 @@ extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \
C(LATENCY_FMT, "latency-format"), \
C(RECORD_CMD, "record-cmd"), \
+ C(RECORD_TGID, "record-tgid"), \
C(OVERWRITE, "overwrite"), \
C(STOP_ON_FREE, "disable_on_free"), \
C(IRQ_INFO, "irq-info"), \
@@ -1423,6 +1445,8 @@ struct ftrace_event_field *
trace_find_event_field(struct trace_event_call *call, char *name);
extern void trace_event_enable_cmd_record(bool enable);
+extern void trace_event_enable_tgid_record(bool enable);
+
extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
extern int event_trace_del_tracer(struct trace_array *tr);
@@ -1773,10 +1797,10 @@ static inline const char *get_syscall_name(int syscall)
#ifdef CONFIG_EVENT_TRACING
void trace_event_init(void);
-void trace_event_enum_update(struct trace_enum_map **map, int len);
+void trace_event_eval_update(struct trace_eval_map **map, int len);
#else
static inline void __init trace_event_init(void) { }
-static inline void trace_event_enum_update(struct trace_enum_map **map, int len) { }
+static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { }
#endif
extern struct trace_iterator *tracepoint_print_iter;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index e7973e10398c..36132f9280e6 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -343,6 +343,28 @@ void trace_event_enable_cmd_record(bool enable)
mutex_unlock(&event_mutex);
}
+void trace_event_enable_tgid_record(bool enable)
+{
+ struct trace_event_file *file;
+ struct trace_array *tr;
+
+ mutex_lock(&event_mutex);
+ do_for_each_event_file(tr, file) {
+ if (!(file->flags & EVENT_FILE_FL_ENABLED))
+ continue;
+
+ if (enable) {
+ tracing_start_tgid_record();
+ set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
+ } else {
+ tracing_stop_tgid_record();
+ clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT,
+ &file->flags);
+ }
+ } while_for_each_event_file();
+ mutex_unlock(&event_mutex);
+}
+
static int __ftrace_event_enable_disable(struct trace_event_file *file,
int enable, int soft_disable)
{
@@ -381,6 +403,12 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
tracing_stop_cmdline_record();
clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
}
+
+ if (file->flags & EVENT_FILE_FL_RECORDED_TGID) {
+ tracing_stop_tgid_record();
+ clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
+ }
+
call->class->reg(call, TRACE_REG_UNREGISTER, file);
}
/* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
@@ -407,18 +435,30 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
}
if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
+ bool cmd = false, tgid = false;
/* Keep the event disabled, when going to SOFT_MODE. */
if (soft_disable)
set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
if (tr->trace_flags & TRACE_ITER_RECORD_CMD) {
+ cmd = true;
tracing_start_cmdline_record();
set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
}
+
+ if (tr->trace_flags & TRACE_ITER_RECORD_TGID) {
+ tgid = true;
+ tracing_start_tgid_record();
+ set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
+ }
+
ret = call->class->reg(call, TRACE_REG_REGISTER, file);
if (ret) {
- tracing_stop_cmdline_record();
+ if (cmd)
+ tracing_stop_cmdline_record();
+ if (tgid)
+ tracing_stop_tgid_record();
pr_info("event trace: Could not enable event "
"%s\n", trace_event_name(call));
break;
@@ -2067,18 +2107,18 @@ __register_event(struct trace_event_call *call, struct module *mod)
return 0;
}
-static char *enum_replace(char *ptr, struct trace_enum_map *map, int len)
+static char *eval_replace(char *ptr, struct trace_eval_map *map, int len)
{
int rlen;
int elen;
- /* Find the length of the enum value as a string */
- elen = snprintf(ptr, 0, "%ld", map->enum_value);
+ /* Find the length of the eval value as a string */
+ elen = snprintf(ptr, 0, "%ld", map->eval_value);
/* Make sure there's enough room to replace the string with the value */
if (len < elen)
return NULL;
- snprintf(ptr, elen + 1, "%ld", map->enum_value);
+ snprintf(ptr, elen + 1, "%ld", map->eval_value);
/* Get the rest of the string of ptr */
rlen = strlen(ptr + len);
@@ -2090,11 +2130,11 @@ static char *enum_replace(char *ptr, struct trace_enum_map *map, int len)
}
static void update_event_printk(struct trace_event_call *call,
- struct trace_enum_map *map)
+ struct trace_eval_map *map)
{
char *ptr;
int quote = 0;
- int len = strlen(map->enum_string);
+ int len = strlen(map->eval_string);
for (ptr = call->print_fmt; *ptr; ptr++) {
if (*ptr == '\\') {
@@ -2125,16 +2165,16 @@ static void update_event_printk(struct trace_event_call *call,
continue;
}
if (isalpha(*ptr) || *ptr == '_') {
- if (strncmp(map->enum_string, ptr, len) == 0 &&
+ if (strncmp(map->eval_string, ptr, len) == 0 &&
!isalnum(ptr[len]) && ptr[len] != '_') {
- ptr = enum_replace(ptr, map, len);
- /* Hmm, enum string smaller than value */
+ ptr = eval_replace(ptr, map, len);
+ /* enum/sizeof string smaller than value */
if (WARN_ON_ONCE(!ptr))
return;
/*
- * No need to decrement here, as enum_replace()
+ * No need to decrement here, as eval_replace()
* returns the pointer to the character passed
- * the enum, and two enums can not be placed
+ * the eval, and two evals can not be placed
* back to back without something in between.
* We can skip that something in between.
*/
@@ -2165,7 +2205,7 @@ static void update_event_printk(struct trace_event_call *call,
}
}
-void trace_event_enum_update(struct trace_enum_map **map, int len)
+void trace_event_eval_update(struct trace_eval_map **map, int len)
{
struct trace_event_call *call, *p;
const char *last_system = NULL;
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 08f9bab8089e..bac629af2285 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -340,31 +340,41 @@ static inline const char *kretprobed(const char *name)
static void
seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
{
-#ifdef CONFIG_KALLSYMS
char str[KSYM_SYMBOL_LEN];
+#ifdef CONFIG_KALLSYMS
const char *name;
kallsyms_lookup(address, NULL, NULL, NULL, str);
name = kretprobed(str);
- trace_seq_printf(s, fmt, name);
+ if (name && strlen(name)) {
+ trace_seq_printf(s, fmt, name);
+ return;
+ }
#endif
+ snprintf(str, KSYM_SYMBOL_LEN, "0x%08lx", address);
+ trace_seq_printf(s, fmt, str);
}
static void
seq_print_sym_offset(struct trace_seq *s, const char *fmt,
unsigned long address)
{
-#ifdef CONFIG_KALLSYMS
char str[KSYM_SYMBOL_LEN];
+#ifdef CONFIG_KALLSYMS
const char *name;
sprint_symbol(str, address);
name = kretprobed(str);
- trace_seq_printf(s, fmt, name);
+ if (name && strlen(name)) {
+ trace_seq_printf(s, fmt, name);
+ return;
+ }
#endif
+ snprintf(str, KSYM_SYMBOL_LEN, "0x%08lx", address);
+ trace_seq_printf(s, fmt, str);
}
#ifndef CONFIG_64BIT
@@ -587,6 +597,15 @@ int trace_print_context(struct trace_iterator *iter)
trace_seq_printf(s, "%16s-%-5d [%03d] ",
comm, entry->pid, iter->cpu);
+ if (tr->trace_flags & TRACE_ITER_RECORD_TGID) {
+ unsigned int tgid = trace_find_tgid(entry->pid);
+
+ if (!tgid)
+ trace_seq_printf(s, "(-----) ");
+ else
+ trace_seq_printf(s, "(%5d) ", tgid);
+ }
+
if (tr->trace_flags & TRACE_ITER_IRQ_INFO)
trace_print_lat_fmt(s, entry);
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 4c896a0101bd..b341c02730be 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -12,27 +12,38 @@
#include "trace.h"
-static int sched_ref;
+#define RECORD_CMDLINE 1
+#define RECORD_TGID 2
+
+static int sched_cmdline_ref;
+static int sched_tgid_ref;
static DEFINE_MUTEX(sched_register_mutex);
static void
probe_sched_switch(void *ignore, bool preempt,
struct task_struct *prev, struct task_struct *next)
{
- if (unlikely(!sched_ref))
- return;
+ int flags;
+
+ flags = (RECORD_TGID * !!sched_tgid_ref) +
+ (RECORD_CMDLINE * !!sched_cmdline_ref);
- tracing_record_cmdline(prev);
- tracing_record_cmdline(next);
+ if (!flags)
+ return;
+ tracing_record_taskinfo_sched_switch(prev, next, flags);
}
static void
probe_sched_wakeup(void *ignore, struct task_struct *wakee)
{
- if (unlikely(!sched_ref))
- return;
+ int flags;
+
+ flags = (RECORD_TGID * !!sched_tgid_ref) +
+ (RECORD_CMDLINE * !!sched_cmdline_ref);
- tracing_record_cmdline(current);
+ if (!flags)
+ return;
+ tracing_record_taskinfo(current, flags);
}
static int tracing_sched_register(void)
@@ -75,28 +86,61 @@ static void tracing_sched_unregister(void)
unregister_trace_sched_wakeup(probe_sched_wakeup, NULL);
}
-static void tracing_start_sched_switch(void)
+static void tracing_start_sched_switch(int ops)
{
+ bool sched_register = (!sched_cmdline_ref && !sched_tgid_ref);
mutex_lock(&sched_register_mutex);
- if (!(sched_ref++))
+
+ switch (ops) {
+ case RECORD_CMDLINE:
+ sched_cmdline_ref++;
+ break;
+
+ case RECORD_TGID:
+ sched_tgid_ref++;
+ break;
+ }
+
+ if (sched_register && (sched_cmdline_ref || sched_tgid_ref))
tracing_sched_register();
mutex_unlock(&sched_register_mutex);
}
-static void tracing_stop_sched_switch(void)
+static void tracing_stop_sched_switch(int ops)
{
mutex_lock(&sched_register_mutex);
- if (!(--sched_ref))
+
+ switch (ops) {
+ case RECORD_CMDLINE:
+ sched_cmdline_ref--;
+ break;
+
+ case RECORD_TGID:
+ sched_tgid_ref--;
+ break;
+ }
+
+ if (!sched_cmdline_ref && !sched_tgid_ref)
tracing_sched_unregister();
mutex_unlock(&sched_register_mutex);
}
void tracing_start_cmdline_record(void)
{
- tracing_start_sched_switch();
+ tracing_start_sched_switch(RECORD_CMDLINE);
}
void tracing_stop_cmdline_record(void)
{
- tracing_stop_sched_switch();
+ tracing_stop_sched_switch(RECORD_CMDLINE);
+}
+
+void tracing_start_tgid_record(void)
+{
+ tracing_start_sched_switch(RECORD_TGID);
+}
+
+void tracing_stop_tgid_record(void)
+{
+ tracing_stop_sched_switch(RECORD_TGID);
}
diff --git a/lib/Kconfig b/lib/Kconfig
index d2fd2623721e..6762529ad9e4 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -556,6 +556,9 @@ config ARCH_HAS_SG_CHAIN
config ARCH_HAS_PMEM_API
bool
+config ARCH_HAS_UACCESS_FLUSHCACHE
+ bool
+
config ARCH_HAS_MMIO_FLUSH
bool
diff --git a/lib/Makefile b/lib/Makefile
index 7fb6ab799b8e..5a008329324e 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -38,7 +38,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
- once.o refcount.o usercopy.o
+ once.o refcount.o usercopy.o errseq.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
obj-y += hexdump.o
diff --git a/lib/dma-noop.c b/lib/dma-noop.c
index de26c8b68f34..acc4190e2731 100644
--- a/lib/dma-noop.c
+++ b/lib/dma-noop.c
@@ -7,6 +7,7 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
+#include <linux/pfn.h>
static void *dma_noop_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
@@ -16,7 +17,8 @@ static void *dma_noop_alloc(struct device *dev, size_t size,
ret = (void *)__get_free_pages(gfp, get_order(size));
if (ret)
- *dma_handle = virt_to_phys(ret);
+ *dma_handle = virt_to_phys(ret) - PFN_PHYS(dev->dma_pfn_offset);
+
return ret;
}
@@ -32,7 +34,7 @@ static dma_addr_t dma_noop_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir,
unsigned long attrs)
{
- return page_to_phys(page) + offset;
+ return page_to_phys(page) + offset - PFN_PHYS(dev->dma_pfn_offset);
}
static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
@@ -43,34 +45,23 @@ static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nent
struct scatterlist *sg;
for_each_sg(sgl, sg, nents, i) {
+ dma_addr_t offset = PFN_PHYS(dev->dma_pfn_offset);
void *va;
BUG_ON(!sg_page(sg));
va = sg_virt(sg);
- sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va);
+ sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va) - offset;
sg_dma_len(sg) = sg->length;
}
return nents;
}
-static int dma_noop_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
-
-static int dma_noop_supported(struct device *dev, u64 mask)
-{
- return 1;
-}
-
const struct dma_map_ops dma_noop_ops = {
.alloc = dma_noop_alloc,
.free = dma_noop_free,
.map_page = dma_noop_map_page,
.map_sg = dma_noop_map_sg,
- .mapping_error = dma_noop_mapping_error,
- .dma_supported = dma_noop_supported,
};
EXPORT_SYMBOL(dma_noop_ops);
diff --git a/lib/dma-virt.c b/lib/dma-virt.c
index dcd4df1f7174..5c4f11329721 100644
--- a/lib/dma-virt.c
+++ b/lib/dma-virt.c
@@ -51,22 +51,10 @@ static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl,
return nents;
}
-static int dma_virt_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return false;
-}
-
-static int dma_virt_supported(struct device *dev, u64 mask)
-{
- return true;
-}
-
const struct dma_map_ops dma_virt_ops = {
.alloc = dma_virt_alloc,
.free = dma_virt_free,
.map_page = dma_virt_map_page,
.map_sg = dma_virt_map_sg,
- .mapping_error = dma_virt_mapping_error,
- .dma_supported = dma_virt_supported,
};
EXPORT_SYMBOL(dma_virt_ops);
diff --git a/lib/errseq.c b/lib/errseq.c
new file mode 100644
index 000000000000..841fa24e6e00
--- /dev/null
+++ b/lib/errseq.c
@@ -0,0 +1,208 @@
+#include <linux/err.h>
+#include <linux/bug.h>
+#include <linux/atomic.h>
+#include <linux/errseq.h>
+
+/*
+ * An errseq_t is a way of recording errors in one place, and allowing any
+ * number of "subscribers" to tell whether it has changed since a previous
+ * point where it was sampled.
+ *
+ * It's implemented as an unsigned 32-bit value. The low order bits are
+ * designated to hold an error code (between 0 and -MAX_ERRNO). The upper bits
+ * are used as a counter. This is done with atomics instead of locking so that
+ * these functions can be called from any context.
+ *
+ * The general idea is for consumers to sample an errseq_t value. That value
+ * can later be used to tell whether any new errors have occurred since that
+ * sampling was done.
+ *
+ * Note that there is a risk of collisions if new errors are being recorded
+ * frequently, since we have so few bits to use as a counter.
+ *
+ * To mitigate this, one bit is used as a flag to tell whether the value has
+ * been sampled since a new value was recorded. That allows us to avoid bumping
+ * the counter if no one has sampled it since the last time an error was
+ * recorded.
+ *
+ * A new errseq_t should always be zeroed out. A errseq_t value of all zeroes
+ * is the special (but common) case where there has never been an error. An all
+ * zero value thus serves as the "epoch" if one wishes to know whether there
+ * has ever been an error set since it was first initialized.
+ */
+
+/* The low bits are designated for error code (max of MAX_ERRNO) */
+#define ERRSEQ_SHIFT ilog2(MAX_ERRNO + 1)
+
+/* This bit is used as a flag to indicate whether the value has been seen */
+#define ERRSEQ_SEEN (1 << ERRSEQ_SHIFT)
+
+/* The lowest bit of the counter */
+#define ERRSEQ_CTR_INC (1 << (ERRSEQ_SHIFT + 1))
+
+/**
+ * __errseq_set - set a errseq_t for later reporting
+ * @eseq: errseq_t field that should be set
+ * @err: error to set
+ *
+ * This function sets the error in *eseq, and increments the sequence counter
+ * if the last sequence was sampled at some point in the past.
+ *
+ * Any error set will always overwrite an existing error.
+ *
+ * Most callers will want to use the errseq_set inline wrapper to efficiently
+ * handle the common case where err is 0.
+ *
+ * We do return an errseq_t here, primarily for debugging purposes. The return
+ * value should not be used as a previously sampled value in later calls as it
+ * will not have the SEEN flag set.
+ */
+errseq_t __errseq_set(errseq_t *eseq, int err)
+{
+ errseq_t cur, old;
+
+ /* MAX_ERRNO must be able to serve as a mask */
+ BUILD_BUG_ON_NOT_POWER_OF_2(MAX_ERRNO + 1);
+
+ /*
+ * Ensure the error code actually fits where we want it to go. If it
+ * doesn't then just throw a warning and don't record anything. We
+ * also don't accept zero here as that would effectively clear a
+ * previous error.
+ */
+ old = READ_ONCE(*eseq);
+
+ if (WARN(unlikely(err == 0 || (unsigned int)-err > MAX_ERRNO),
+ "err = %d\n", err))
+ return old;
+
+ for (;;) {
+ errseq_t new;
+
+ /* Clear out error bits and set new error */
+ new = (old & ~(MAX_ERRNO|ERRSEQ_SEEN)) | -err;
+
+ /* Only increment if someone has looked at it */
+ if (old & ERRSEQ_SEEN)
+ new += ERRSEQ_CTR_INC;
+
+ /* If there would be no change, then call it done */
+ if (new == old) {
+ cur = new;
+ break;
+ }
+
+ /* Try to swap the new value into place */
+ cur = cmpxchg(eseq, old, new);
+
+ /*
+ * Call it success if we did the swap or someone else beat us
+ * to it for the same value.
+ */
+ if (likely(cur == old || cur == new))
+ break;
+
+ /* Raced with an update, try again */
+ old = cur;
+ }
+ return cur;
+}
+EXPORT_SYMBOL(__errseq_set);
+
+/**
+ * errseq_sample - grab current errseq_t value
+ * @eseq: pointer to errseq_t to be sampled
+ *
+ * This function allows callers to sample an errseq_t value, marking it as
+ * "seen" if required.
+ */
+errseq_t errseq_sample(errseq_t *eseq)
+{
+ errseq_t old = READ_ONCE(*eseq);
+ errseq_t new = old;
+
+ /*
+ * For the common case of no errors ever having been set, we can skip
+ * marking the SEEN bit. Once an error has been set, the value will
+ * never go back to zero.
+ */
+ if (old != 0) {
+ new |= ERRSEQ_SEEN;
+ if (old != new)
+ cmpxchg(eseq, old, new);
+ }
+ return new;
+}
+EXPORT_SYMBOL(errseq_sample);
+
+/**
+ * errseq_check - has an error occurred since a particular sample point?
+ * @eseq: pointer to errseq_t value to be checked
+ * @since: previously-sampled errseq_t from which to check
+ *
+ * Grab the value that eseq points to, and see if it has changed "since"
+ * the given value was sampled. The "since" value is not advanced, so there
+ * is no need to mark the value as seen.
+ *
+ * Returns the latest error set in the errseq_t or 0 if it hasn't changed.
+ */
+int errseq_check(errseq_t *eseq, errseq_t since)
+{
+ errseq_t cur = READ_ONCE(*eseq);
+
+ if (likely(cur == since))
+ return 0;
+ return -(cur & MAX_ERRNO);
+}
+EXPORT_SYMBOL(errseq_check);
+
+/**
+ * errseq_check_and_advance - check an errseq_t and advance to current value
+ * @eseq: pointer to value being checked and reported
+ * @since: pointer to previously-sampled errseq_t to check against and advance
+ *
+ * Grab the eseq value, and see whether it matches the value that "since"
+ * points to. If it does, then just return 0.
+ *
+ * If it doesn't, then the value has changed. Set the "seen" flag, and try to
+ * swap it into place as the new eseq value. Then, set that value as the new
+ * "since" value, and return whatever the error portion is set to.
+ *
+ * Note that no locking is provided here for concurrent updates to the "since"
+ * value. The caller must provide that if necessary. Because of this, callers
+ * may want to do a lockless errseq_check before taking the lock and calling
+ * this.
+ */
+int errseq_check_and_advance(errseq_t *eseq, errseq_t *since)
+{
+ int err = 0;
+ errseq_t old, new;
+
+ /*
+ * Most callers will want to use the inline wrapper to check this,
+ * so that the common case of no error is handled without needing
+ * to take the lock that protects the "since" value.
+ */
+ old = READ_ONCE(*eseq);
+ if (old != *since) {
+ /*
+ * Set the flag and try to swap it into place if it has
+ * changed.
+ *
+ * We don't care about the outcome of the swap here. If the
+ * swap doesn't occur, then it has either been updated by a
+ * writer who is altering the value in some way (updating
+ * counter or resetting the error), or another reader who is
+ * just setting the "seen" flag. Either outcome is OK, and we
+ * can advance "since" and return an error based on what we
+ * have.
+ */
+ new = old | ERRSEQ_SEEN;
+ if (new != old)
+ cmpxchg(eseq, old, new);
+ *since = new;
+ err = -(new & MAX_ERRNO);
+ }
+ return err;
+}
+EXPORT_SYMBOL(errseq_check_and_advance);
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
index a71cf1bdd4c9..2cc1f94e03a1 100644
--- a/lib/flex_proportions.c
+++ b/lib/flex_proportions.c
@@ -207,7 +207,7 @@ static void fprop_reflect_period_percpu(struct fprop_global *p,
if (val < (nr_cpu_ids * PROP_BATCH))
val = percpu_counter_sum(&pl->events);
- __percpu_counter_add(&pl->events,
+ percpu_counter_add_batch(&pl->events,
-val + (val >> (period-pl->period)), PROP_BATCH);
} else
percpu_counter_set(&pl->events, 0);
@@ -219,7 +219,7 @@ static void fprop_reflect_period_percpu(struct fprop_global *p,
void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
{
fprop_reflect_period_percpu(p, pl);
- __percpu_counter_add(&pl->events, 1, PROP_BATCH);
+ percpu_counter_add_batch(&pl->events, 1, PROP_BATCH);
percpu_counter_add(&p->events, 1);
}
@@ -267,6 +267,6 @@ void __fprop_inc_percpu_max(struct fprop_global *p,
return;
} else
fprop_reflect_period_percpu(p, pl);
- __percpu_counter_add(&pl->events, 1, PROP_BATCH);
+ percpu_counter_add_batch(&pl->events, 1, PROP_BATCH);
percpu_counter_add(&p->events, 1);
}
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index f835964c9485..52c8dd6d8e82 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -130,6 +130,24 @@
} \
}
+static int copyout(void __user *to, const void *from, size_t n)
+{
+ if (access_ok(VERIFY_WRITE, to, n)) {
+ kasan_check_read(from, n);
+ n = raw_copy_to_user(to, from, n);
+ }
+ return n;
+}
+
+static int copyin(void *to, const void __user *from, size_t n)
+{
+ if (access_ok(VERIFY_READ, from, n)) {
+ kasan_check_write(to, n);
+ n = raw_copy_from_user(to, from, n);
+ }
+ return n;
+}
+
static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
@@ -144,6 +162,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
if (unlikely(!bytes))
return 0;
+ might_fault();
wanted = bytes;
iov = i->iov;
skip = i->iov_offset;
@@ -155,7 +174,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
from = kaddr + offset;
/* first chunk, usually the only one */
- left = __copy_to_user_inatomic(buf, from, copy);
+ left = copyout(buf, from, copy);
copy -= left;
skip += copy;
from += copy;
@@ -165,7 +184,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
iov++;
buf = iov->iov_base;
copy = min(bytes, iov->iov_len);
- left = __copy_to_user_inatomic(buf, from, copy);
+ left = copyout(buf, from, copy);
copy -= left;
skip = copy;
from += copy;
@@ -184,7 +203,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
kaddr = kmap(page);
from = kaddr + offset;
- left = __copy_to_user(buf, from, copy);
+ left = copyout(buf, from, copy);
copy -= left;
skip += copy;
from += copy;
@@ -193,7 +212,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
iov++;
buf = iov->iov_base;
copy = min(bytes, iov->iov_len);
- left = __copy_to_user(buf, from, copy);
+ left = copyout(buf, from, copy);
copy -= left;
skip = copy;
from += copy;
@@ -227,6 +246,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
if (unlikely(!bytes))
return 0;
+ might_fault();
wanted = bytes;
iov = i->iov;
skip = i->iov_offset;
@@ -238,7 +258,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
to = kaddr + offset;
/* first chunk, usually the only one */
- left = __copy_from_user_inatomic(to, buf, copy);
+ left = copyin(to, buf, copy);
copy -= left;
skip += copy;
to += copy;
@@ -248,7 +268,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
iov++;
buf = iov->iov_base;
copy = min(bytes, iov->iov_len);
- left = __copy_from_user_inatomic(to, buf, copy);
+ left = copyin(to, buf, copy);
copy -= left;
skip = copy;
to += copy;
@@ -267,7 +287,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
kaddr = kmap(page);
to = kaddr + offset;
- left = __copy_from_user(to, buf, copy);
+ left = copyin(to, buf, copy);
copy -= left;
skip += copy;
to += copy;
@@ -276,7 +296,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
iov++;
buf = iov->iov_base;
copy = min(bytes, iov->iov_len);
- left = __copy_from_user(to, buf, copy);
+ left = copyin(to, buf, copy);
copy -= left;
skip = copy;
to += copy;
@@ -535,14 +555,15 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
return bytes;
}
-size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
+size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
const char *from = addr;
if (unlikely(i->type & ITER_PIPE))
return copy_pipe_to_iter(addr, bytes, i);
+ if (iter_is_iovec(i))
+ might_fault();
iterate_and_advance(i, bytes, v,
- __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
- v.iov_len),
+ copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
memcpy_to_page(v.bv_page, v.bv_offset,
(from += v.bv_len) - v.bv_len, v.bv_len),
memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
@@ -550,18 +571,19 @@ size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
return bytes;
}
-EXPORT_SYMBOL(copy_to_iter);
+EXPORT_SYMBOL(_copy_to_iter);
-size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
+size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(i->type & ITER_PIPE)) {
WARN_ON(1);
return 0;
}
+ if (iter_is_iovec(i))
+ might_fault();
iterate_and_advance(i, bytes, v,
- __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
- v.iov_len),
+ copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
@@ -569,9 +591,9 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
return bytes;
}
-EXPORT_SYMBOL(copy_from_iter);
+EXPORT_SYMBOL(_copy_from_iter);
-bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
+bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(i->type & ITER_PIPE)) {
@@ -581,8 +603,10 @@ bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
if (unlikely(i->count < bytes))
return false;
+ if (iter_is_iovec(i))
+ might_fault();
iterate_all_kinds(i, bytes, v, ({
- if (__copy_from_user((to += v.iov_len) - v.iov_len,
+ if (copyin((to += v.iov_len) - v.iov_len,
v.iov_base, v.iov_len))
return false;
0;}),
@@ -594,9 +618,9 @@ bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
iov_iter_advance(i, bytes);
return true;
}
-EXPORT_SYMBOL(copy_from_iter_full);
+EXPORT_SYMBOL(_copy_from_iter_full);
-size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
+size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(i->type & ITER_PIPE)) {
@@ -613,9 +637,31 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
return bytes;
}
-EXPORT_SYMBOL(copy_from_iter_nocache);
+EXPORT_SYMBOL(_copy_from_iter_nocache);
-bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
+#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
+size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
+{
+ char *to = addr;
+ if (unlikely(i->type & ITER_PIPE)) {
+ WARN_ON(1);
+ return 0;
+ }
+ iterate_and_advance(i, bytes, v,
+ __copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
+ v.iov_base, v.iov_len),
+ memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
+ v.bv_offset, v.bv_len),
+ memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
+ v.iov_len)
+ )
+
+ return bytes;
+}
+EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
+#endif
+
+bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
if (unlikely(i->type & ITER_PIPE)) {
@@ -637,11 +683,22 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
iov_iter_advance(i, bytes);
return true;
}
-EXPORT_SYMBOL(copy_from_iter_full_nocache);
+EXPORT_SYMBOL(_copy_from_iter_full_nocache);
+
+static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
+{
+ size_t v = n + offset;
+ if (likely(n <= v && v <= (PAGE_SIZE << compound_order(page))))
+ return true;
+ WARN_ON(1);
+ return false;
+}
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
+ if (unlikely(!page_copy_sane(page, offset, bytes)))
+ return 0;
if (i->type & (ITER_BVEC|ITER_KVEC)) {
void *kaddr = kmap_atomic(page);
size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
@@ -657,13 +714,15 @@ EXPORT_SYMBOL(copy_page_to_iter);
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
+ if (unlikely(!page_copy_sane(page, offset, bytes)))
+ return 0;
if (unlikely(i->type & ITER_PIPE)) {
WARN_ON(1);
return 0;
}
if (i->type & (ITER_BVEC|ITER_KVEC)) {
void *kaddr = kmap_atomic(page);
- size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
+ size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
kunmap_atomic(kaddr);
return wanted;
} else
@@ -700,7 +759,7 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
if (unlikely(i->type & ITER_PIPE))
return pipe_zero(bytes, i);
iterate_and_advance(i, bytes, v,
- __clear_user(v.iov_base, v.iov_len),
+ clear_user(v.iov_base, v.iov_len),
memzero_page(v.bv_page, v.bv_offset, v.bv_len),
memset(v.iov_base, 0, v.iov_len)
)
@@ -713,14 +772,17 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes)
{
char *kaddr = kmap_atomic(page), *p = kaddr + offset;
+ if (unlikely(!page_copy_sane(page, offset, bytes))) {
+ kunmap_atomic(kaddr);
+ return 0;
+ }
if (unlikely(i->type & ITER_PIPE)) {
kunmap_atomic(kaddr);
WARN_ON(1);
return 0;
}
iterate_all_kinds(i, bytes, v,
- __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
- v.iov_base, v.iov_len),
+ copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
v.bv_offset, v.bv_len),
memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 9c21000df0b5..8ee7e5ec21be 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -72,7 +72,7 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
}
EXPORT_SYMBOL(percpu_counter_set);
-void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
+void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
{
s64 count;
@@ -89,7 +89,7 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
}
preempt_enable();
}
-EXPORT_SYMBOL(__percpu_counter_add);
+EXPORT_SYMBOL(percpu_counter_add_batch);
/*
* Add up all the per-cpu counts, return the result. This is a more accurate
diff --git a/lib/raid6/mktables.c b/lib/raid6/mktables.c
index 39787db588b0..e824d088f72c 100644
--- a/lib/raid6/mktables.c
+++ b/lib/raid6/mktables.c
@@ -125,6 +125,26 @@ int main(int argc, char *argv[])
printf("EXPORT_SYMBOL(raid6_gfexp);\n");
printf("#endif\n");
+ /* Compute log-of-2 table */
+ printf("\nconst u8 __attribute__((aligned(256)))\n"
+ "raid6_gflog[256] =\n" "{\n");
+ for (i = 0; i < 256; i += 8) {
+ printf("\t");
+ for (j = 0; j < 8; j++) {
+ v = 255;
+ for (k = 0; k < 256; k++)
+ if (exptbl[k] == (i + j)) {
+ v = k;
+ break;
+ }
+ printf("0x%02x,%c", v, (j == 7) ? '\n' : ' ');
+ }
+ }
+ printf("};\n");
+ printf("#ifdef __KERNEL__\n");
+ printf("EXPORT_SYMBOL(raid6_gflog);\n");
+ printf("#endif\n");
+
/* Compute inverse table x^-1 == x^254 */
printf("\nconst u8 __attribute__((aligned(256)))\n"
"raid6_gfinv[256] =\n" "{\n");
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index 8e105ed4df12..a5f567747ced 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -121,37 +121,3 @@ long strnlen_user(const char __user *str, long count)
return 0;
}
EXPORT_SYMBOL(strnlen_user);
-
-/**
- * strlen_user: - Get the size of a user string INCLUDING final NUL.
- * @str: The string to measure.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Get the size of a NUL-terminated string in user space.
- *
- * Returns the size of the string INCLUDING the terminating NUL.
- * On exception, returns 0.
- *
- * If there is a limit on the length of a valid string, you may wish to
- * consider using strnlen_user() instead.
- */
-long strlen_user(const char __user *str)
-{
- unsigned long max_addr, src_addr;
-
- max_addr = user_addr_max();
- src_addr = (unsigned long)str;
- if (likely(src_addr < max_addr)) {
- unsigned long max = max_addr - src_addr;
- long retval;
-
- user_access_begin();
- retval = do_strnlen_user(str, ~0ul, max);
- user_access_end();
- return retval;
- }
- return 0;
-}
-EXPORT_SYMBOL(strlen_user);
diff --git a/lib/usercopy.c b/lib/usercopy.c
index 1b6010a3beb8..f5d9f08ee032 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -6,8 +6,11 @@
unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n)
{
unsigned long res = n;
- if (likely(access_ok(VERIFY_READ, from, n)))
+ might_fault();
+ if (likely(access_ok(VERIFY_READ, from, n))) {
+ kasan_check_write(to, n);
res = raw_copy_from_user(to, from, n);
+ }
if (unlikely(res))
memset(to + (n - res), 0, res);
return res;
@@ -18,8 +21,11 @@ EXPORT_SYMBOL(_copy_from_user);
#ifndef INLINE_COPY_TO_USER
unsigned long _copy_to_user(void *to, const void __user *from, unsigned long n)
{
- if (likely(access_ok(VERIFY_WRITE, to, n)))
+ might_fault();
+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
+ kasan_check_read(from, n);
n = raw_copy_to_user(to, from, n);
+ }
return n;
}
EXPORT_SYMBOL(_copy_to_user);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 9f37d6208e99..86c3385b9eb3 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -31,6 +31,7 @@
#include <linux/dcache.h>
#include <linux/cred.h>
#include <linux/uuid.h>
+#include <linux/of.h>
#include <net/addrconf.h>
#ifdef CONFIG_BLOCK
#include <linux/blkdev.h>
@@ -1470,6 +1471,126 @@ char *flags_string(char *buf, char *end, void *flags_ptr, const char *fmt)
return format_flags(buf, end, flags, names);
}
+static const char *device_node_name_for_depth(const struct device_node *np, int depth)
+{
+ for ( ; np && depth; depth--)
+ np = np->parent;
+
+ return kbasename(np->full_name);
+}
+
+static noinline_for_stack
+char *device_node_gen_full_name(const struct device_node *np, char *buf, char *end)
+{
+ int depth;
+ const struct device_node *parent = np->parent;
+ static const struct printf_spec strspec = {
+ .field_width = -1,
+ .precision = -1,
+ };
+
+ /* special case for root node */
+ if (!parent)
+ return string(buf, end, "/", strspec);
+
+ for (depth = 0; parent->parent; depth++)
+ parent = parent->parent;
+
+ for ( ; depth >= 0; depth--) {
+ buf = string(buf, end, "/", strspec);
+ buf = string(buf, end, device_node_name_for_depth(np, depth),
+ strspec);
+ }
+ return buf;
+}
+
+static noinline_for_stack
+char *device_node_string(char *buf, char *end, struct device_node *dn,
+ struct printf_spec spec, const char *fmt)
+{
+ char tbuf[sizeof("xxxx") + 1];
+ const char *p;
+ int ret;
+ char *buf_start = buf;
+ struct property *prop;
+ bool has_mult, pass;
+ static const struct printf_spec num_spec = {
+ .flags = SMALL,
+ .field_width = -1,
+ .precision = -1,
+ .base = 10,
+ };
+
+ struct printf_spec str_spec = spec;
+ str_spec.field_width = -1;
+
+ if (!IS_ENABLED(CONFIG_OF))
+ return string(buf, end, "(!OF)", spec);
+
+ if ((unsigned long)dn < PAGE_SIZE)
+ return string(buf, end, "(null)", spec);
+
+ /* simple case without anything any more format specifiers */
+ fmt++;
+ if (fmt[0] == '\0' || strcspn(fmt,"fnpPFcC") > 0)
+ fmt = "f";
+
+ for (pass = false; strspn(fmt,"fnpPFcC"); fmt++, pass = true) {
+ if (pass) {
+ if (buf < end)
+ *buf = ':';
+ buf++;
+ }
+
+ switch (*fmt) {
+ case 'f': /* full_name */
+ buf = device_node_gen_full_name(dn, buf, end);
+ break;
+ case 'n': /* name */
+ buf = string(buf, end, dn->name, str_spec);
+ break;
+ case 'p': /* phandle */
+ buf = number(buf, end, (unsigned int)dn->phandle, num_spec);
+ break;
+ case 'P': /* path-spec */
+ p = kbasename(of_node_full_name(dn));
+ if (!p[1])
+ p = "/";
+ buf = string(buf, end, p, str_spec);
+ break;
+ case 'F': /* flags */
+ tbuf[0] = of_node_check_flag(dn, OF_DYNAMIC) ? 'D' : '-';
+ tbuf[1] = of_node_check_flag(dn, OF_DETACHED) ? 'd' : '-';
+ tbuf[2] = of_node_check_flag(dn, OF_POPULATED) ? 'P' : '-';
+ tbuf[3] = of_node_check_flag(dn, OF_POPULATED_BUS) ? 'B' : '-';
+ tbuf[4] = 0;
+ buf = string(buf, end, tbuf, str_spec);
+ break;
+ case 'c': /* major compatible string */
+ ret = of_property_read_string(dn, "compatible", &p);
+ if (!ret)
+ buf = string(buf, end, p, str_spec);
+ break;
+ case 'C': /* full compatible string */
+ has_mult = false;
+ of_property_for_each_string(dn, "compatible", prop, p) {
+ if (has_mult)
+ buf = string(buf, end, ",", str_spec);
+ buf = string(buf, end, "\"", str_spec);
+ buf = string(buf, end, p, str_spec);
+ buf = string(buf, end, "\"", str_spec);
+
+ has_mult = true;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return widen_string(buf, buf - buf_start, end, spec);
+}
+
int kptr_restrict __read_mostly;
/*
@@ -1566,6 +1687,16 @@ int kptr_restrict __read_mostly;
* p page flags (see struct page) given as pointer to unsigned long
* g gfp flags (GFP_* and __GFP_*) given as pointer to gfp_t
* v vma flags (VM_*) given as pointer to unsigned long
+ * - 'O' For a kobject based struct. Must be one of the following:
+ * - 'OF[fnpPcCF]' For a device tree object
+ * Without any optional arguments prints the full_name
+ * f device node full_name
+ * n device node name
+ * p device node phandle
+ * P device node path spec (name + @unit)
+ * F device node flags
+ * c major compatible string
+ * C full compatible string
*
* ** Please update also Documentation/printk-formats.txt when making changes **
*
@@ -1721,6 +1852,11 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
case 'G':
return flags_string(buf, end, ptr, fmt);
+ case 'O':
+ switch (fmt[1]) {
+ case 'F':
+ return device_node_string(buf, end, ptr, spec, fmt + 1);
+ }
}
spec.flags |= SMALL;
if (spec.field_width == -1) {
diff --git a/mm/Kconfig b/mm/Kconfig
index 398b46064544..46ef77d5c332 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -149,32 +149,6 @@ config NO_BOOTMEM
config MEMORY_ISOLATION
bool
-config MOVABLE_NODE
- bool "Enable to assign a node which has only movable memory"
- depends on HAVE_MEMBLOCK
- depends on NO_BOOTMEM
- depends on X86_64 || OF_EARLY_FLATTREE || MEMORY_HOTPLUG
- depends on NUMA
- default n
- help
- Allow a node to have only movable memory. Pages used by the kernel,
- such as direct mapping pages cannot be migrated. So the corresponding
- memory device cannot be hotplugged. This option allows the following
- two things:
- - When the system is booting, node full of hotpluggable memory can
- be arranged to have only movable memory so that the whole node can
- be hot-removed. (need movable_node boot option specified).
- - After the system is up, the option allows users to online all the
- memory of a node as movable memory so that the whole node can be
- hot-removed.
-
- Users who don't use the memory hotplug feature are fine with this
- option on since they don't specify movable_node boot option or they
- don't online memory as movable.
-
- Say Y here if you want to hotplug a whole node.
- Say N here if you want kernel to use memory on all nodes evenly.
-
#
# Only be set on architectures that have completely implemented memory hotplug
# feature. If you are not sure, don't touch it.
@@ -446,6 +420,18 @@ choice
benefit.
endchoice
+config ARCH_WANTS_THP_SWAP
+ def_bool n
+
+config THP_SWAP
+ def_bool y
+ depends on TRANSPARENT_HUGEPAGE && ARCH_WANTS_THP_SWAP
+ help
+ Swap transparent huge pages in one piece, without splitting.
+ XXX: For now this only does clustered swap space allocation.
+
+ For selection by architectures with reasonable THP sizes.
+
config TRANSPARENT_HUGE_PAGECACHE
def_bool y
depends on TRANSPARENT_HUGEPAGE
@@ -683,12 +669,16 @@ config IDLE_PAGE_TRACKING
See Documentation/vm/idle_page_tracking.txt for more details.
+# arch_add_memory() comprehends device memory
+config ARCH_HAS_ZONE_DEVICE
+ bool
+
config ZONE_DEVICE
bool "Device memory (pmem, etc...) hotplug support"
depends on MEMORY_HOTPLUG
depends on MEMORY_HOTREMOVE
depends on SPARSEMEM_VMEMMAP
- depends on X86_64 #arch_add_memory() comprehends device memory
+ depends on ARCH_HAS_ZONE_DEVICE
help
Device memory hotplug support allows for establishing pmem,
@@ -706,3 +696,11 @@ config ARCH_USES_HIGH_VMA_FLAGS
bool
config ARCH_HAS_PKEYS
bool
+
+config PERCPU_STATS
+ bool "Collect percpu memory statistics"
+ default n
+ help
+ This feature collects and exposes statistics via debugfs. The
+ information includes global and per chunk statistics, which can
+ be used to help understand percpu memory usage.
diff --git a/mm/Makefile b/mm/Makefile
index 026f6a828a50..411bd24d4a7c 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -103,3 +103,4 @@ obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o
obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o
obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o
obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
+obj-$(CONFIG_PERCPU_STATS) += percpu-stats.o
diff --git a/mm/compaction.c b/mm/compaction.c
index 613c59e928cb..fb548e4c7bd4 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -236,10 +236,9 @@ static void __reset_isolation_suitable(struct zone *zone)
cond_resched();
- if (!pfn_valid(pfn))
+ page = pfn_to_online_page(pfn);
+ if (!page)
continue;
-
- page = pfn_to_page(pfn);
if (zone != page_zone(page))
continue;
diff --git a/mm/filemap.c b/mm/filemap.c
index aea58e983a73..3247b4208034 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -309,6 +309,16 @@ int filemap_check_errors(struct address_space *mapping)
}
EXPORT_SYMBOL(filemap_check_errors);
+static int filemap_check_and_keep_errors(struct address_space *mapping)
+{
+ /* Check for outstanding write errors */
+ if (test_bit(AS_EIO, &mapping->flags))
+ return -EIO;
+ if (test_bit(AS_ENOSPC, &mapping->flags))
+ return -ENOSPC;
+ return 0;
+}
+
/**
* __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
* @mapping: address space structure to write
@@ -408,17 +418,16 @@ bool filemap_range_has_page(struct address_space *mapping,
}
EXPORT_SYMBOL(filemap_range_has_page);
-static int __filemap_fdatawait_range(struct address_space *mapping,
+static void __filemap_fdatawait_range(struct address_space *mapping,
loff_t start_byte, loff_t end_byte)
{
pgoff_t index = start_byte >> PAGE_SHIFT;
pgoff_t end = end_byte >> PAGE_SHIFT;
struct pagevec pvec;
int nr_pages;
- int ret = 0;
if (end_byte < start_byte)
- goto out;
+ return;
pagevec_init(&pvec, 0);
while ((index <= end) &&
@@ -435,14 +444,11 @@ static int __filemap_fdatawait_range(struct address_space *mapping,
continue;
wait_on_page_writeback(page);
- if (TestClearPageError(page))
- ret = -EIO;
+ ClearPageError(page);
}
pagevec_release(&pvec);
cond_resched();
}
-out:
- return ret;
}
/**
@@ -462,14 +468,8 @@ out:
int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
loff_t end_byte)
{
- int ret, ret2;
-
- ret = __filemap_fdatawait_range(mapping, start_byte, end_byte);
- ret2 = filemap_check_errors(mapping);
- if (!ret)
- ret = ret2;
-
- return ret;
+ __filemap_fdatawait_range(mapping, start_byte, end_byte);
+ return filemap_check_errors(mapping);
}
EXPORT_SYMBOL(filemap_fdatawait_range);
@@ -485,15 +485,17 @@ EXPORT_SYMBOL(filemap_fdatawait_range);
* call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
* fsfreeze(8)
*/
-void filemap_fdatawait_keep_errors(struct address_space *mapping)
+int filemap_fdatawait_keep_errors(struct address_space *mapping)
{
loff_t i_size = i_size_read(mapping->host);
if (i_size == 0)
- return;
+ return 0;
__filemap_fdatawait_range(mapping, 0, i_size - 1);
+ return filemap_check_and_keep_errors(mapping);
}
+EXPORT_SYMBOL(filemap_fdatawait_keep_errors);
/**
* filemap_fdatawait - wait for all under-writeback pages to complete
@@ -535,6 +537,9 @@ int filemap_write_and_wait(struct address_space *mapping)
int err2 = filemap_fdatawait(mapping);
if (!err)
err = err2;
+ } else {
+ /* Clear any previously stored errors */
+ filemap_check_errors(mapping);
}
} else {
err = filemap_check_errors(mapping);
@@ -569,6 +574,9 @@ int filemap_write_and_wait_range(struct address_space *mapping,
lstart, lend);
if (!err)
err = err2;
+ } else {
+ /* Clear any previously stored errors */
+ filemap_check_errors(mapping);
}
} else {
err = filemap_check_errors(mapping);
@@ -577,6 +585,90 @@ int filemap_write_and_wait_range(struct address_space *mapping,
}
EXPORT_SYMBOL(filemap_write_and_wait_range);
+void __filemap_set_wb_err(struct address_space *mapping, int err)
+{
+ errseq_t eseq = __errseq_set(&mapping->wb_err, err);
+
+ trace_filemap_set_wb_err(mapping, eseq);
+}
+EXPORT_SYMBOL(__filemap_set_wb_err);
+
+/**
+ * file_check_and_advance_wb_err - report wb error (if any) that was previously
+ * and advance wb_err to current one
+ * @file: struct file on which the error is being reported
+ *
+ * When userland calls fsync (or something like nfsd does the equivalent), we
+ * want to report any writeback errors that occurred since the last fsync (or
+ * since the file was opened if there haven't been any).
+ *
+ * Grab the wb_err from the mapping. If it matches what we have in the file,
+ * then just quickly return 0. The file is all caught up.
+ *
+ * If it doesn't match, then take the mapping value, set the "seen" flag in
+ * it and try to swap it into place. If it works, or another task beat us
+ * to it with the new value, then update the f_wb_err and return the error
+ * portion. The error at this point must be reported via proper channels
+ * (a'la fsync, or NFS COMMIT operation, etc.).
+ *
+ * While we handle mapping->wb_err with atomic operations, the f_wb_err
+ * value is protected by the f_lock since we must ensure that it reflects
+ * the latest value swapped in for this file descriptor.
+ */
+int file_check_and_advance_wb_err(struct file *file)
+{
+ int err = 0;
+ errseq_t old = READ_ONCE(file->f_wb_err);
+ struct address_space *mapping = file->f_mapping;
+
+ /* Locklessly handle the common case where nothing has changed */
+ if (errseq_check(&mapping->wb_err, old)) {
+ /* Something changed, must use slow path */
+ spin_lock(&file->f_lock);
+ old = file->f_wb_err;
+ err = errseq_check_and_advance(&mapping->wb_err,
+ &file->f_wb_err);
+ trace_file_check_and_advance_wb_err(file, old);
+ spin_unlock(&file->f_lock);
+ }
+ return err;
+}
+EXPORT_SYMBOL(file_check_and_advance_wb_err);
+
+/**
+ * file_write_and_wait_range - write out & wait on a file range
+ * @file: file pointing to address_space with pages
+ * @lstart: offset in bytes where the range starts
+ * @lend: offset in bytes where the range ends (inclusive)
+ *
+ * Write out and wait upon file offsets lstart->lend, inclusive.
+ *
+ * Note that @lend is inclusive (describes the last byte to be written) so
+ * that this function can be used to write to the very end-of-file (end = -1).
+ *
+ * After writing out and waiting on the data, we check and advance the
+ * f_wb_err cursor to the latest value, and return any errors detected there.
+ */
+int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
+{
+ int err = 0, err2;
+ struct address_space *mapping = file->f_mapping;
+
+ if ((!dax_mapping(mapping) && mapping->nrpages) ||
+ (dax_mapping(mapping) && mapping->nrexceptional)) {
+ err = __filemap_fdatawrite_range(mapping, lstart, lend,
+ WB_SYNC_ALL);
+ /* See comment of filemap_write_and_wait() */
+ if (err != -EIO)
+ __filemap_fdatawait_range(mapping, lstart, lend);
+ }
+ err2 = file_check_and_advance_wb_err(file);
+ if (!err)
+ err = err2;
+ return err;
+}
+EXPORT_SYMBOL(file_write_and_wait_range);
+
/**
* replace_page_cache_page - replace a pagecache page with a new one
* @old: page to be replaced
@@ -2265,7 +2357,7 @@ int filemap_fault(struct vm_fault *vmf)
/* No page in the page cache at all */
do_sync_mmap_readahead(vmf->vma, ra, file, offset);
count_vm_event(PGMAJFAULT);
- mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT);
+ count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
ret = VM_FAULT_MAJOR;
retry_find:
page = find_get_page(mapping, offset);
diff --git a/mm/gup.c b/mm/gup.c
index 3ab78dc3db7d..23f01c40c88f 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -208,72 +208,28 @@ no_page:
return no_page_table(vma, flags);
}
-/**
- * follow_page_mask - look up a page descriptor from a user-virtual address
- * @vma: vm_area_struct mapping @address
- * @address: virtual address to look up
- * @flags: flags modifying lookup behaviour
- * @page_mask: on output, *page_mask is set according to the size of the page
- *
- * @flags can have FOLL_ flags set, defined in <linux/mm.h>
- *
- * Returns the mapped (struct page *), %NULL if no mapping exists, or
- * an error pointer if there is a mapping to something not represented
- * by a page descriptor (see also vm_normal_page()).
- */
-struct page *follow_page_mask(struct vm_area_struct *vma,
- unsigned long address, unsigned int flags,
- unsigned int *page_mask)
+static struct page *follow_pmd_mask(struct vm_area_struct *vma,
+ unsigned long address, pud_t *pudp,
+ unsigned int flags, unsigned int *page_mask)
{
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
pmd_t *pmd;
spinlock_t *ptl;
struct page *page;
struct mm_struct *mm = vma->vm_mm;
- *page_mask = 0;
-
- page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
- if (!IS_ERR(page)) {
- BUG_ON(flags & FOLL_GET);
- return page;
- }
-
- pgd = pgd_offset(mm, address);
- if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
- return no_page_table(vma, flags);
- p4d = p4d_offset(pgd, address);
- if (p4d_none(*p4d))
- return no_page_table(vma, flags);
- BUILD_BUG_ON(p4d_huge(*p4d));
- if (unlikely(p4d_bad(*p4d)))
- return no_page_table(vma, flags);
- pud = pud_offset(p4d, address);
- if (pud_none(*pud))
+ pmd = pmd_offset(pudp, address);
+ if (pmd_none(*pmd))
return no_page_table(vma, flags);
- if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
- page = follow_huge_pud(mm, address, pud, flags);
+ if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
+ page = follow_huge_pmd(mm, address, pmd, flags);
if (page)
return page;
return no_page_table(vma, flags);
}
- if (pud_devmap(*pud)) {
- ptl = pud_lock(mm, pud);
- page = follow_devmap_pud(vma, address, pud, flags);
- spin_unlock(ptl);
- if (page)
- return page;
- }
- if (unlikely(pud_bad(*pud)))
- return no_page_table(vma, flags);
-
- pmd = pmd_offset(pud, address);
- if (pmd_none(*pmd))
- return no_page_table(vma, flags);
- if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
- page = follow_huge_pmd(mm, address, pmd, flags);
+ if (is_hugepd(__hugepd(pmd_val(*pmd)))) {
+ page = follow_huge_pd(vma, address,
+ __hugepd(pmd_val(*pmd)), flags,
+ PMD_SHIFT);
if (page)
return page;
return no_page_table(vma, flags);
@@ -319,13 +275,131 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
return ret ? ERR_PTR(ret) :
follow_page_pte(vma, address, pmd, flags);
}
-
page = follow_trans_huge_pmd(vma, address, pmd, flags);
spin_unlock(ptl);
*page_mask = HPAGE_PMD_NR - 1;
return page;
}
+
+static struct page *follow_pud_mask(struct vm_area_struct *vma,
+ unsigned long address, p4d_t *p4dp,
+ unsigned int flags, unsigned int *page_mask)
+{
+ pud_t *pud;
+ spinlock_t *ptl;
+ struct page *page;
+ struct mm_struct *mm = vma->vm_mm;
+
+ pud = pud_offset(p4dp, address);
+ if (pud_none(*pud))
+ return no_page_table(vma, flags);
+ if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
+ page = follow_huge_pud(mm, address, pud, flags);
+ if (page)
+ return page;
+ return no_page_table(vma, flags);
+ }
+ if (is_hugepd(__hugepd(pud_val(*pud)))) {
+ page = follow_huge_pd(vma, address,
+ __hugepd(pud_val(*pud)), flags,
+ PUD_SHIFT);
+ if (page)
+ return page;
+ return no_page_table(vma, flags);
+ }
+ if (pud_devmap(*pud)) {
+ ptl = pud_lock(mm, pud);
+ page = follow_devmap_pud(vma, address, pud, flags);
+ spin_unlock(ptl);
+ if (page)
+ return page;
+ }
+ if (unlikely(pud_bad(*pud)))
+ return no_page_table(vma, flags);
+
+ return follow_pmd_mask(vma, address, pud, flags, page_mask);
+}
+
+
+static struct page *follow_p4d_mask(struct vm_area_struct *vma,
+ unsigned long address, pgd_t *pgdp,
+ unsigned int flags, unsigned int *page_mask)
+{
+ p4d_t *p4d;
+ struct page *page;
+
+ p4d = p4d_offset(pgdp, address);
+ if (p4d_none(*p4d))
+ return no_page_table(vma, flags);
+ BUILD_BUG_ON(p4d_huge(*p4d));
+ if (unlikely(p4d_bad(*p4d)))
+ return no_page_table(vma, flags);
+
+ if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
+ page = follow_huge_pd(vma, address,
+ __hugepd(p4d_val(*p4d)), flags,
+ P4D_SHIFT);
+ if (page)
+ return page;
+ return no_page_table(vma, flags);
+ }
+ return follow_pud_mask(vma, address, p4d, flags, page_mask);
+}
+
+/**
+ * follow_page_mask - look up a page descriptor from a user-virtual address
+ * @vma: vm_area_struct mapping @address
+ * @address: virtual address to look up
+ * @flags: flags modifying lookup behaviour
+ * @page_mask: on output, *page_mask is set according to the size of the page
+ *
+ * @flags can have FOLL_ flags set, defined in <linux/mm.h>
+ *
+ * Returns the mapped (struct page *), %NULL if no mapping exists, or
+ * an error pointer if there is a mapping to something not represented
+ * by a page descriptor (see also vm_normal_page()).
+ */
+struct page *follow_page_mask(struct vm_area_struct *vma,
+ unsigned long address, unsigned int flags,
+ unsigned int *page_mask)
+{
+ pgd_t *pgd;
+ struct page *page;
+ struct mm_struct *mm = vma->vm_mm;
+
+ *page_mask = 0;
+
+ /* make this handle hugepd */
+ page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
+ if (!IS_ERR(page)) {
+ BUG_ON(flags & FOLL_GET);
+ return page;
+ }
+
+ pgd = pgd_offset(mm, address);
+
+ if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
+ return no_page_table(vma, flags);
+
+ if (pgd_huge(*pgd)) {
+ page = follow_huge_pgd(mm, address, pgd, flags);
+ if (page)
+ return page;
+ return no_page_table(vma, flags);
+ }
+ if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
+ page = follow_huge_pd(vma, address,
+ __hugepd(pgd_val(*pgd)), flags,
+ PGDIR_SHIFT);
+ if (page)
+ return page;
+ return no_page_table(vma, flags);
+ }
+
+ return follow_p4d_mask(vma, address, pgd, flags, page_mask);
+}
+
static int get_gate_page(struct mm_struct *mm, unsigned long address,
unsigned int gup_flags, struct vm_area_struct **vma,
struct page **page)
@@ -1349,16 +1423,15 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
return __gup_device_huge_pmd(orig, addr, end, pages, nr);
refs = 0;
- head = pmd_page(orig);
- page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+ page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
do {
- VM_BUG_ON_PAGE(compound_head(page) != head, page);
pages[*nr] = page;
(*nr)++;
page++;
refs++;
} while (addr += PAGE_SIZE, addr != end);
+ head = compound_head(pmd_page(orig));
if (!page_cache_add_speculative(head, refs)) {
*nr -= refs;
return 0;
@@ -1388,16 +1461,15 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
return __gup_device_huge_pud(orig, addr, end, pages, nr);
refs = 0;
- head = pud_page(orig);
- page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+ page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
do {
- VM_BUG_ON_PAGE(compound_head(page) != head, page);
pages[*nr] = page;
(*nr)++;
page++;
refs++;
} while (addr += PAGE_SIZE, addr != end);
+ head = compound_head(pud_page(orig));
if (!page_cache_add_speculative(head, refs)) {
*nr -= refs;
return 0;
@@ -1426,16 +1498,15 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
BUILD_BUG_ON(pgd_devmap(orig));
refs = 0;
- head = pgd_page(orig);
- page = head + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
+ page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
do {
- VM_BUG_ON_PAGE(compound_head(page) != head, page);
pages[*nr] = page;
(*nr)++;
page++;
refs++;
} while (addr += PAGE_SIZE, addr != end);
+ head = compound_head(pgd_page(orig));
if (!page_cache_add_speculative(head, refs)) {
*nr -= refs;
return 0;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 88c6167f194d..86975dec0ba1 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1575,8 +1575,8 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
get_page(page);
spin_unlock(ptl);
split_huge_page(page);
- put_page(page);
unlock_page(page);
+ put_page(page);
goto out_unlocked;
}
@@ -2203,7 +2203,7 @@ static void __split_huge_page_tail(struct page *head, int tail,
* atomic_set() here would be safe on all archs (and not only on x86),
* it's safer to use atomic_inc()/atomic_add().
*/
- if (PageAnon(head)) {
+ if (PageAnon(head) && !PageSwapCache(head)) {
page_ref_inc(page_tail);
} else {
/* Additional pin to radix tree */
@@ -2214,6 +2214,7 @@ static void __split_huge_page_tail(struct page *head, int tail,
page_tail->flags |= (head->flags &
((1L << PG_referenced) |
(1L << PG_swapbacked) |
+ (1L << PG_swapcache) |
(1L << PG_mlocked) |
(1L << PG_uptodate) |
(1L << PG_active) |
@@ -2276,7 +2277,11 @@ static void __split_huge_page(struct page *page, struct list_head *list,
ClearPageCompound(head);
/* See comment in __split_huge_page_tail() */
if (PageAnon(head)) {
- page_ref_inc(head);
+ /* Additional pin to radix tree of swap cache */
+ if (PageSwapCache(head))
+ page_ref_add(head, 2);
+ else
+ page_ref_inc(head);
} else {
/* Additional pin to radix tree */
page_ref_add(head, 2);
@@ -2385,6 +2390,21 @@ int page_trans_huge_mapcount(struct page *page, int *total_mapcount)
return ret;
}
+/* Racy check whether the huge page can be split */
+bool can_split_huge_page(struct page *page, int *pextra_pins)
+{
+ int extra_pins;
+
+ /* Additional pins from radix tree */
+ if (PageAnon(page))
+ extra_pins = PageSwapCache(page) ? HPAGE_PMD_NR : 0;
+ else
+ extra_pins = HPAGE_PMD_NR;
+ if (pextra_pins)
+ *pextra_pins = extra_pins;
+ return total_mapcount(page) == page_count(page) - extra_pins - 1;
+}
+
/*
* This function splits huge page into normal pages. @page can point to any
* subpage of huge page to split. Split doesn't change the position of @page.
@@ -2432,7 +2452,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
ret = -EBUSY;
goto out;
}
- extra_pins = 0;
mapping = NULL;
anon_vma_lock_write(anon_vma);
} else {
@@ -2444,8 +2463,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
goto out;
}
- /* Addidional pins from radix tree */
- extra_pins = HPAGE_PMD_NR;
anon_vma = NULL;
i_mmap_lock_read(mapping);
}
@@ -2454,7 +2471,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
* Racy check if we can split the page, before freeze_page() will
* split PMDs
*/
- if (total_mapcount(head) != page_count(head) - extra_pins - 1) {
+ if (!can_split_huge_page(head, &extra_pins)) {
ret = -EBUSY;
goto out_unlock;
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3eedb187e549..1a88006ec634 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -867,7 +867,7 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
h->free_huge_pages_node[nid]++;
}
-static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
+static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
{
struct page *page;
@@ -887,6 +887,22 @@ static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
return page;
}
+static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
+{
+ struct page *page;
+ int node;
+
+ if (nid != NUMA_NO_NODE)
+ return dequeue_huge_page_node_exact(h, nid);
+
+ for_each_online_node(node) {
+ page = dequeue_huge_page_node_exact(h, node);
+ if (page)
+ return page;
+ }
+ return NULL;
+}
+
/* Movability of hugepages depends on migration support. */
static inline gfp_t htlb_alloc_mask(struct hstate *h)
{
@@ -904,6 +920,8 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
struct page *page = NULL;
struct mempolicy *mpol;
nodemask_t *nodemask;
+ gfp_t gfp_mask;
+ int nid;
struct zonelist *zonelist;
struct zone *zone;
struct zoneref *z;
@@ -924,12 +942,13 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
retry_cpuset:
cpuset_mems_cookie = read_mems_allowed_begin();
- zonelist = huge_zonelist(vma, address,
- htlb_alloc_mask(h), &mpol, &nodemask);
+ gfp_mask = htlb_alloc_mask(h);
+ nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
+ zonelist = node_zonelist(nid, gfp_mask);
for_each_zone_zonelist_nodemask(zone, z, zonelist,
MAX_NR_ZONES - 1, nodemask) {
- if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
+ if (cpuset_zone_allowed(zone, gfp_mask)) {
page = dequeue_huge_page_node(h, zone_to_nid(zone));
if (page) {
if (avoid_reserve)
@@ -1024,9 +1043,7 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
((node = hstate_next_node_to_free(hs, mask)) || 1); \
nr_nodes--)
-#if defined(CONFIG_ARCH_HAS_GIGANTIC_PAGE) && \
- ((defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || \
- defined(CONFIG_CMA))
+#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
static void destroy_compound_gigantic_page(struct page *page,
unsigned int order)
{
@@ -1158,8 +1175,7 @@ static int alloc_fresh_gigantic_page(struct hstate *h,
return 0;
}
-static inline bool gigantic_page_supported(void) { return true; }
-#else
+#else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
static inline bool gigantic_page_supported(void) { return false; }
static inline void free_gigantic_page(struct page *page, unsigned int order) { }
static inline void destroy_compound_gigantic_page(struct page *page,
@@ -1545,13 +1561,13 @@ static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
do {
struct page *page;
struct mempolicy *mpol;
- struct zonelist *zl;
+ int nid;
nodemask_t *nodemask;
cpuset_mems_cookie = read_mems_allowed_begin();
- zl = huge_zonelist(vma, addr, gfp, &mpol, &nodemask);
+ nid = huge_node(vma, addr, gfp, &mpol, &nodemask);
mpol_cond_put(mpol);
- page = __alloc_pages_nodemask(gfp, order, zl, nodemask);
+ page = __alloc_pages_nodemask(gfp, order, nid, nodemask);
if (page)
return page;
} while (read_mems_allowed_retry(cpuset_mems_cookie));
@@ -3185,17 +3201,17 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
update_mmu_cache(vma, address, ptep);
}
-static int is_hugetlb_entry_migration(pte_t pte)
+bool is_hugetlb_entry_migration(pte_t pte)
{
swp_entry_t swp;
if (huge_pte_none(pte) || pte_present(pte))
- return 0;
+ return false;
swp = pte_to_swp_entry(pte);
if (non_swap_entry(swp) && is_migration_entry(swp))
- return 1;
+ return true;
else
- return 0;
+ return false;
}
static int is_hugetlb_entry_hwpoisoned(pte_t pte)
@@ -3233,7 +3249,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
spinlock_t *src_ptl, *dst_ptl;
- src_pte = huge_pte_offset(src, addr);
+ src_pte = huge_pte_offset(src, addr, sz);
if (!src_pte)
continue;
dst_pte = huge_pte_alloc(dst, addr, sz);
@@ -3263,9 +3279,10 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
*/
make_migration_entry_read(&swp_entry);
entry = swp_entry_to_pte(swp_entry);
- set_huge_pte_at(src, addr, src_pte, entry);
+ set_huge_swap_pte_at(src, addr, src_pte,
+ entry, sz);
}
- set_huge_pte_at(dst, addr, dst_pte, entry);
+ set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
} else {
if (cow) {
huge_ptep_set_wrprotect(src, addr, src_pte);
@@ -3317,7 +3334,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
address = start;
for (; address < end; address += sz) {
- ptep = huge_pte_offset(mm, address);
+ ptep = huge_pte_offset(mm, address, sz);
if (!ptep)
continue;
@@ -3338,7 +3355,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
* unmapped and its refcount is dropped, so just clear pte here.
*/
if (unlikely(!pte_present(pte))) {
- huge_pte_clear(mm, address, ptep);
+ huge_pte_clear(mm, address, ptep, sz);
spin_unlock(ptl);
continue;
}
@@ -3535,7 +3552,8 @@ retry_avoidcopy:
unmap_ref_private(mm, vma, old_page, address);
BUG_ON(huge_pte_none(pte));
spin_lock(ptl);
- ptep = huge_pte_offset(mm, address & huge_page_mask(h));
+ ptep = huge_pte_offset(mm, address & huge_page_mask(h),
+ huge_page_size(h));
if (likely(ptep &&
pte_same(huge_ptep_get(ptep), pte)))
goto retry_avoidcopy;
@@ -3574,7 +3592,8 @@ retry_avoidcopy:
* before the page tables are altered
*/
spin_lock(ptl);
- ptep = huge_pte_offset(mm, address & huge_page_mask(h));
+ ptep = huge_pte_offset(mm, address & huge_page_mask(h),
+ huge_page_size(h));
if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
ClearPagePrivate(new_page);
@@ -3861,7 +3880,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
address &= huge_page_mask(h);
- ptep = huge_pte_offset(mm, address);
+ ptep = huge_pte_offset(mm, address, huge_page_size(h));
if (ptep) {
entry = huge_ptep_get(ptep);
if (unlikely(is_hugetlb_entry_migration(entry))) {
@@ -4118,7 +4137,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
*
* Note that page table lock is not held when pte is null.
*/
- pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
+ pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
+ huge_page_size(h));
if (pte)
ptl = huge_pte_lock(h, mm, pte);
absent = !pte || huge_pte_none(huge_ptep_get(pte));
@@ -4257,7 +4277,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
i_mmap_lock_write(vma->vm_file->f_mapping);
for (; address < end; address += huge_page_size(h)) {
spinlock_t *ptl;
- ptep = huge_pte_offset(mm, address);
+ ptep = huge_pte_offset(mm, address, huge_page_size(h));
if (!ptep)
continue;
ptl = huge_pte_lock(h, mm, ptep);
@@ -4279,7 +4299,8 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
make_migration_entry_read(&entry);
newpte = swp_entry_to_pte(entry);
- set_huge_pte_at(mm, address, ptep, newpte);
+ set_huge_swap_pte_at(mm, address, ptep,
+ newpte, huge_page_size(h));
pages++;
}
spin_unlock(ptl);
@@ -4521,7 +4542,8 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
saddr = page_table_shareable(svma, vma, addr, idx);
if (saddr) {
- spte = huge_pte_offset(svma->vm_mm, saddr);
+ spte = huge_pte_offset(svma->vm_mm, saddr,
+ vma_mmu_pagesize(svma));
if (spte) {
get_page(virt_to_page(spte));
break;
@@ -4617,7 +4639,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
return pte;
}
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+pte_t *huge_pte_offset(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
p4d_t *p4d;
@@ -4653,6 +4676,14 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address,
}
struct page * __weak
+follow_huge_pd(struct vm_area_struct *vma,
+ unsigned long address, hugepd_t hpd, int flags, int pdshift)
+{
+ WARN(1, "hugepd follow called with no support for hugepage directory format\n");
+ return NULL;
+}
+
+struct page * __weak
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int flags)
{
@@ -4699,6 +4730,15 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
}
+struct page * __weak
+follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
+{
+ if (flags & FOLL_GET)
+ return NULL;
+
+ return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
+}
+
#ifdef CONFIG_MEMORY_FAILURE
/*
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 20036d4f9f13..7780cd83a495 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -150,7 +150,7 @@ struct kmemleak_scan_area {
*/
struct kmemleak_object {
spinlock_t lock;
- unsigned long flags; /* object status flags */
+ unsigned int flags; /* object status flags */
struct list_head object_list;
struct list_head gray_list;
struct rb_node rb_node;
@@ -159,6 +159,8 @@ struct kmemleak_object {
atomic_t use_count;
unsigned long pointer;
size_t size;
+ /* pass surplus references to this pointer */
+ unsigned long excess_ref;
/* minimum number of a pointers found before it is considered leak */
int min_count;
/* the total number of pointers found pointing to this object */
@@ -253,7 +255,8 @@ enum {
KMEMLEAK_NOT_LEAK,
KMEMLEAK_IGNORE,
KMEMLEAK_SCAN_AREA,
- KMEMLEAK_NO_SCAN
+ KMEMLEAK_NO_SCAN,
+ KMEMLEAK_SET_EXCESS_REF
};
/*
@@ -262,9 +265,12 @@ enum {
*/
struct early_log {
int op_type; /* kmemleak operation type */
- const void *ptr; /* allocated/freed memory block */
- size_t size; /* memory block size */
int min_count; /* minimum reference count */
+ const void *ptr; /* allocated/freed memory block */
+ union {
+ size_t size; /* memory block size */
+ unsigned long excess_ref; /* surplus reference passing */
+ };
unsigned long trace[MAX_TRACE]; /* stack trace */
unsigned int trace_len; /* stack trace length */
};
@@ -393,7 +399,7 @@ static void dump_object_info(struct kmemleak_object *object)
object->comm, object->pid, object->jiffies);
pr_notice(" min_count = %d\n", object->min_count);
pr_notice(" count = %d\n", object->count);
- pr_notice(" flags = 0x%lx\n", object->flags);
+ pr_notice(" flags = 0x%x\n", object->flags);
pr_notice(" checksum = %u\n", object->checksum);
pr_notice(" backtrace:\n");
print_stack_trace(&trace, 4);
@@ -562,6 +568,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
object->flags = OBJECT_ALLOCATED;
object->pointer = ptr;
object->size = size;
+ object->excess_ref = 0;
object->min_count = min_count;
object->count = 0; /* white color initially */
object->jiffies = jiffies;
@@ -795,6 +802,30 @@ out:
}
/*
+ * Any surplus references (object already gray) to 'ptr' are passed to
+ * 'excess_ref'. This is used in the vmalloc() case where a pointer to
+ * vm_struct may be used as an alternative reference to the vmalloc'ed object
+ * (see free_thread_stack()).
+ */
+static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
+{
+ unsigned long flags;
+ struct kmemleak_object *object;
+
+ object = find_and_get_object(ptr, 0);
+ if (!object) {
+ kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
+ ptr);
+ return;
+ }
+
+ spin_lock_irqsave(&object->lock, flags);
+ object->excess_ref = excess_ref;
+ spin_unlock_irqrestore(&object->lock, flags);
+ put_object(object);
+}
+
+/*
* Set the OBJECT_NO_SCAN flag for the object corresponding to the give
* pointer. Such object will not be scanned by kmemleak but references to it
* are searched.
@@ -908,7 +939,7 @@ static void early_alloc_percpu(struct early_log *log)
* @gfp: kmalloc() flags used for kmemleak internal memory allocations
*
* This function is called from the kernel allocators when a new object
- * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.).
+ * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
*/
void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
gfp_t gfp)
@@ -952,6 +983,36 @@ void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
/**
+ * kmemleak_vmalloc - register a newly vmalloc'ed object
+ * @area: pointer to vm_struct
+ * @size: size of the object
+ * @gfp: __vmalloc() flags used for kmemleak internal memory allocations
+ *
+ * This function is called from the vmalloc() kernel allocator when a new
+ * object (memory block) is allocated.
+ */
+void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
+{
+ pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
+
+ /*
+ * A min_count = 2 is needed because vm_struct contains a reference to
+ * the virtual address of the vmalloc'ed block.
+ */
+ if (kmemleak_enabled) {
+ create_object((unsigned long)area->addr, size, 2, gfp);
+ object_set_excess_ref((unsigned long)area,
+ (unsigned long)area->addr);
+ } else if (kmemleak_early_log) {
+ log_early(KMEMLEAK_ALLOC, area->addr, size, 2);
+ /* reusing early_log.size for storing area->addr */
+ log_early(KMEMLEAK_SET_EXCESS_REF,
+ area, (unsigned long)area->addr, 0);
+ }
+}
+EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
+
+/**
* kmemleak_free - unregister a previously registered object
* @ptr: pointer to beginning of the object
*
@@ -1188,6 +1249,30 @@ static bool update_checksum(struct kmemleak_object *object)
}
/*
+ * Update an object's references. object->lock must be held by the caller.
+ */
+static void update_refs(struct kmemleak_object *object)
+{
+ if (!color_white(object)) {
+ /* non-orphan, ignored or new */
+ return;
+ }
+
+ /*
+ * Increase the object's reference count (number of pointers to the
+ * memory block). If this count reaches the required minimum, the
+ * object's color will become gray and it will be added to the
+ * gray_list.
+ */
+ object->count++;
+ if (color_gray(object)) {
+ /* put_object() called when removing from gray_list */
+ WARN_ON(!get_object(object));
+ list_add_tail(&object->gray_list, &gray_list);
+ }
+}
+
+/*
* Memory scanning is a long process and it needs to be interruptable. This
* function checks whether such interrupt condition occurred.
*/
@@ -1224,6 +1309,7 @@ static void scan_block(void *_start, void *_end,
for (ptr = start; ptr < end; ptr++) {
struct kmemleak_object *object;
unsigned long pointer;
+ unsigned long excess_ref;
if (scan_should_stop())
break;
@@ -1259,25 +1345,27 @@ static void scan_block(void *_start, void *_end,
* enclosed by scan_mutex.
*/
spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
- if (!color_white(object)) {
- /* non-orphan, ignored or new */
- spin_unlock(&object->lock);
- continue;
- }
-
- /*
- * Increase the object's reference count (number of pointers
- * to the memory block). If this count reaches the required
- * minimum, the object's color will become gray and it will be
- * added to the gray_list.
- */
- object->count++;
+ /* only pass surplus references (object already gray) */
if (color_gray(object)) {
- /* put_object() called when removing from gray_list */
- WARN_ON(!get_object(object));
- list_add_tail(&object->gray_list, &gray_list);
+ excess_ref = object->excess_ref;
+ /* no need for update_refs() if object already gray */
+ } else {
+ excess_ref = 0;
+ update_refs(object);
}
spin_unlock(&object->lock);
+
+ if (excess_ref) {
+ object = lookup_object(excess_ref, 0);
+ if (!object)
+ continue;
+ if (object == scanned)
+ /* circular reference, ignore */
+ continue;
+ spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
+ update_refs(object);
+ spin_unlock(&object->lock);
+ }
}
read_unlock_irqrestore(&kmemleak_lock, flags);
}
@@ -1980,6 +2068,10 @@ void __init kmemleak_init(void)
case KMEMLEAK_NO_SCAN:
kmemleak_no_scan(log->ptr);
break;
+ case KMEMLEAK_SET_EXCESS_REF:
+ object_set_excess_ref((unsigned long)log->ptr,
+ log->excess_ref);
+ break;
default:
kmemleak_warn("Unknown early log operation: %d\n",
log->op_type);
diff --git a/mm/ksm.c b/mm/ksm.c
index 216184af0e19..4dc92f138786 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -128,9 +128,12 @@ struct ksm_scan {
* struct stable_node - node of the stable rbtree
* @node: rb node of this ksm page in the stable tree
* @head: (overlaying parent) &migrate_nodes indicates temporarily on that list
+ * @hlist_dup: linked into the stable_node->hlist with a stable_node chain
* @list: linked into migrate_nodes, pending placement in the proper node tree
* @hlist: hlist head of rmap_items using this ksm page
* @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid)
+ * @chain_prune_time: time of the last full garbage collection
+ * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN
* @nid: NUMA node id of stable tree in which linked (may not match kpfn)
*/
struct stable_node {
@@ -138,11 +141,24 @@ struct stable_node {
struct rb_node node; /* when node of stable tree */
struct { /* when listed for migration */
struct list_head *head;
- struct list_head list;
+ struct {
+ struct hlist_node hlist_dup;
+ struct list_head list;
+ };
};
};
struct hlist_head hlist;
- unsigned long kpfn;
+ union {
+ unsigned long kpfn;
+ unsigned long chain_prune_time;
+ };
+ /*
+ * STABLE_NODE_CHAIN can be any negative number in
+ * rmap_hlist_len negative range, but better not -1 to be able
+ * to reliably detect underflows.
+ */
+#define STABLE_NODE_CHAIN -1024
+ int rmap_hlist_len;
#ifdef CONFIG_NUMA
int nid;
#endif
@@ -192,6 +208,7 @@ static struct rb_root *root_unstable_tree = one_unstable_tree;
/* Recently migrated nodes of stable tree, pending proper placement */
static LIST_HEAD(migrate_nodes);
+#define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev)
#define MM_SLOTS_HASH_BITS 10
static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
@@ -219,6 +236,18 @@ static unsigned long ksm_pages_unshared;
/* The number of rmap_items in use: to calculate pages_volatile */
static unsigned long ksm_rmap_items;
+/* The number of stable_node chains */
+static unsigned long ksm_stable_node_chains;
+
+/* The number of stable_node dups linked to the stable_node chains */
+static unsigned long ksm_stable_node_dups;
+
+/* Delay in pruning stale stable_node_dups in the stable_node_chains */
+static int ksm_stable_node_chains_prune_millisecs = 2000;
+
+/* Maximum number of page slots sharing a stable node */
+static int ksm_max_page_sharing = 256;
+
/* Number of pages ksmd should scan in one batch */
static unsigned int ksm_thread_pages_to_scan = 100;
@@ -287,6 +316,45 @@ static void __init ksm_slab_free(void)
mm_slot_cache = NULL;
}
+static __always_inline bool is_stable_node_chain(struct stable_node *chain)
+{
+ return chain->rmap_hlist_len == STABLE_NODE_CHAIN;
+}
+
+static __always_inline bool is_stable_node_dup(struct stable_node *dup)
+{
+ return dup->head == STABLE_NODE_DUP_HEAD;
+}
+
+static inline void stable_node_chain_add_dup(struct stable_node *dup,
+ struct stable_node *chain)
+{
+ VM_BUG_ON(is_stable_node_dup(dup));
+ dup->head = STABLE_NODE_DUP_HEAD;
+ VM_BUG_ON(!is_stable_node_chain(chain));
+ hlist_add_head(&dup->hlist_dup, &chain->hlist);
+ ksm_stable_node_dups++;
+}
+
+static inline void __stable_node_dup_del(struct stable_node *dup)
+{
+ VM_BUG_ON(!is_stable_node_dup(dup));
+ hlist_del(&dup->hlist_dup);
+ ksm_stable_node_dups--;
+}
+
+static inline void stable_node_dup_del(struct stable_node *dup)
+{
+ VM_BUG_ON(is_stable_node_chain(dup));
+ if (is_stable_node_dup(dup))
+ __stable_node_dup_del(dup);
+ else
+ rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid));
+#ifdef CONFIG_DEBUG_VM
+ dup->head = NULL;
+#endif
+}
+
static inline struct rmap_item *alloc_rmap_item(void)
{
struct rmap_item *rmap_item;
@@ -317,6 +385,8 @@ static inline struct stable_node *alloc_stable_node(void)
static inline void free_stable_node(struct stable_node *stable_node)
{
+ VM_BUG_ON(stable_node->rmap_hlist_len &&
+ !is_stable_node_chain(stable_node));
kmem_cache_free(stable_node_cache, stable_node);
}
@@ -498,25 +568,82 @@ static inline int get_kpfn_nid(unsigned long kpfn)
return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn));
}
+static struct stable_node *alloc_stable_node_chain(struct stable_node *dup,
+ struct rb_root *root)
+{
+ struct stable_node *chain = alloc_stable_node();
+ VM_BUG_ON(is_stable_node_chain(dup));
+ if (likely(chain)) {
+ INIT_HLIST_HEAD(&chain->hlist);
+ chain->chain_prune_time = jiffies;
+ chain->rmap_hlist_len = STABLE_NODE_CHAIN;
+#if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA)
+ chain->nid = -1; /* debug */
+#endif
+ ksm_stable_node_chains++;
+
+ /*
+ * Put the stable node chain in the first dimension of
+ * the stable tree and at the same time remove the old
+ * stable node.
+ */
+ rb_replace_node(&dup->node, &chain->node, root);
+
+ /*
+ * Move the old stable node to the second dimension
+ * queued in the hlist_dup. The invariant is that all
+ * dup stable_nodes in the chain->hlist point to pages
+ * that are wrprotected and have the exact same
+ * content.
+ */
+ stable_node_chain_add_dup(dup, chain);
+ }
+ return chain;
+}
+
+static inline void free_stable_node_chain(struct stable_node *chain,
+ struct rb_root *root)
+{
+ rb_erase(&chain->node, root);
+ free_stable_node(chain);
+ ksm_stable_node_chains--;
+}
+
static void remove_node_from_stable_tree(struct stable_node *stable_node)
{
struct rmap_item *rmap_item;
+ /* check it's not STABLE_NODE_CHAIN or negative */
+ BUG_ON(stable_node->rmap_hlist_len < 0);
+
hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
if (rmap_item->hlist.next)
ksm_pages_sharing--;
else
ksm_pages_shared--;
+ VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
+ stable_node->rmap_hlist_len--;
put_anon_vma(rmap_item->anon_vma);
rmap_item->address &= PAGE_MASK;
cond_resched();
}
+ /*
+ * We need the second aligned pointer of the migrate_nodes
+ * list_head to stay clear from the rb_parent_color union
+ * (aligned and different than any node) and also different
+ * from &migrate_nodes. This will verify that future list.h changes
+ * don't break STABLE_NODE_DUP_HEAD.
+ */
+#if GCC_VERSION >= 40903 /* only recent gcc can handle it */
+ BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes);
+ BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1);
+#endif
+
if (stable_node->head == &migrate_nodes)
list_del(&stable_node->list);
else
- rb_erase(&stable_node->node,
- root_stable_tree + NUMA(stable_node->nid));
+ stable_node_dup_del(stable_node);
free_stable_node(stable_node);
}
@@ -635,6 +762,8 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
ksm_pages_sharing--;
else
ksm_pages_shared--;
+ VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
+ stable_node->rmap_hlist_len--;
put_anon_vma(rmap_item->anon_vma);
rmap_item->address &= PAGE_MASK;
@@ -743,6 +872,31 @@ static int remove_stable_node(struct stable_node *stable_node)
return err;
}
+static int remove_stable_node_chain(struct stable_node *stable_node,
+ struct rb_root *root)
+{
+ struct stable_node *dup;
+ struct hlist_node *hlist_safe;
+
+ if (!is_stable_node_chain(stable_node)) {
+ VM_BUG_ON(is_stable_node_dup(stable_node));
+ if (remove_stable_node(stable_node))
+ return true;
+ else
+ return false;
+ }
+
+ hlist_for_each_entry_safe(dup, hlist_safe,
+ &stable_node->hlist, hlist_dup) {
+ VM_BUG_ON(!is_stable_node_dup(dup));
+ if (remove_stable_node(dup))
+ return true;
+ }
+ BUG_ON(!hlist_empty(&stable_node->hlist));
+ free_stable_node_chain(stable_node, root);
+ return false;
+}
+
static int remove_all_stable_nodes(void)
{
struct stable_node *stable_node, *next;
@@ -753,7 +907,8 @@ static int remove_all_stable_nodes(void)
while (root_stable_tree[nid].rb_node) {
stable_node = rb_entry(root_stable_tree[nid].rb_node,
struct stable_node, node);
- if (remove_stable_node(stable_node)) {
+ if (remove_stable_node_chain(stable_node,
+ root_stable_tree + nid)) {
err = -EBUSY;
break; /* proceed to next nid */
}
@@ -1138,6 +1293,214 @@ static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
return err ? NULL : page;
}
+static __always_inline
+bool __is_page_sharing_candidate(struct stable_node *stable_node, int offset)
+{
+ VM_BUG_ON(stable_node->rmap_hlist_len < 0);
+ /*
+ * Check that at least one mapping still exists, otherwise
+ * there's no much point to merge and share with this
+ * stable_node, as the underlying tree_page of the other
+ * sharer is going to be freed soon.
+ */
+ return stable_node->rmap_hlist_len &&
+ stable_node->rmap_hlist_len + offset < ksm_max_page_sharing;
+}
+
+static __always_inline
+bool is_page_sharing_candidate(struct stable_node *stable_node)
+{
+ return __is_page_sharing_candidate(stable_node, 0);
+}
+
+struct page *stable_node_dup(struct stable_node **_stable_node_dup,
+ struct stable_node **_stable_node,
+ struct rb_root *root,
+ bool prune_stale_stable_nodes)
+{
+ struct stable_node *dup, *found = NULL, *stable_node = *_stable_node;
+ struct hlist_node *hlist_safe;
+ struct page *_tree_page, *tree_page = NULL;
+ int nr = 0;
+ int found_rmap_hlist_len;
+
+ if (!prune_stale_stable_nodes ||
+ time_before(jiffies, stable_node->chain_prune_time +
+ msecs_to_jiffies(
+ ksm_stable_node_chains_prune_millisecs)))
+ prune_stale_stable_nodes = false;
+ else
+ stable_node->chain_prune_time = jiffies;
+
+ hlist_for_each_entry_safe(dup, hlist_safe,
+ &stable_node->hlist, hlist_dup) {
+ cond_resched();
+ /*
+ * We must walk all stable_node_dup to prune the stale
+ * stable nodes during lookup.
+ *
+ * get_ksm_page can drop the nodes from the
+ * stable_node->hlist if they point to freed pages
+ * (that's why we do a _safe walk). The "dup"
+ * stable_node parameter itself will be freed from
+ * under us if it returns NULL.
+ */
+ _tree_page = get_ksm_page(dup, false);
+ if (!_tree_page)
+ continue;
+ nr += 1;
+ if (is_page_sharing_candidate(dup)) {
+ if (!found ||
+ dup->rmap_hlist_len > found_rmap_hlist_len) {
+ if (found)
+ put_page(tree_page);
+ found = dup;
+ found_rmap_hlist_len = found->rmap_hlist_len;
+ tree_page = _tree_page;
+
+ /* skip put_page for found dup */
+ if (!prune_stale_stable_nodes)
+ break;
+ continue;
+ }
+ }
+ put_page(_tree_page);
+ }
+
+ if (found) {
+ /*
+ * nr is counting all dups in the chain only if
+ * prune_stale_stable_nodes is true, otherwise we may
+ * break the loop at nr == 1 even if there are
+ * multiple entries.
+ */
+ if (prune_stale_stable_nodes && nr == 1) {
+ /*
+ * If there's not just one entry it would
+ * corrupt memory, better BUG_ON. In KSM
+ * context with no lock held it's not even
+ * fatal.
+ */
+ BUG_ON(stable_node->hlist.first->next);
+
+ /*
+ * There's just one entry and it is below the
+ * deduplication limit so drop the chain.
+ */
+ rb_replace_node(&stable_node->node, &found->node,
+ root);
+ free_stable_node(stable_node);
+ ksm_stable_node_chains--;
+ ksm_stable_node_dups--;
+ /*
+ * NOTE: the caller depends on the stable_node
+ * to be equal to stable_node_dup if the chain
+ * was collapsed.
+ */
+ *_stable_node = found;
+ /*
+ * Just for robustneess as stable_node is
+ * otherwise left as a stable pointer, the
+ * compiler shall optimize it away at build
+ * time.
+ */
+ stable_node = NULL;
+ } else if (stable_node->hlist.first != &found->hlist_dup &&
+ __is_page_sharing_candidate(found, 1)) {
+ /*
+ * If the found stable_node dup can accept one
+ * more future merge (in addition to the one
+ * that is underway) and is not at the head of
+ * the chain, put it there so next search will
+ * be quicker in the !prune_stale_stable_nodes
+ * case.
+ *
+ * NOTE: it would be inaccurate to use nr > 1
+ * instead of checking the hlist.first pointer
+ * directly, because in the
+ * prune_stale_stable_nodes case "nr" isn't
+ * the position of the found dup in the chain,
+ * but the total number of dups in the chain.
+ */
+ hlist_del(&found->hlist_dup);
+ hlist_add_head(&found->hlist_dup,
+ &stable_node->hlist);
+ }
+ }
+
+ *_stable_node_dup = found;
+ return tree_page;
+}
+
+static struct stable_node *stable_node_dup_any(struct stable_node *stable_node,
+ struct rb_root *root)
+{
+ if (!is_stable_node_chain(stable_node))
+ return stable_node;
+ if (hlist_empty(&stable_node->hlist)) {
+ free_stable_node_chain(stable_node, root);
+ return NULL;
+ }
+ return hlist_entry(stable_node->hlist.first,
+ typeof(*stable_node), hlist_dup);
+}
+
+/*
+ * Like for get_ksm_page, this function can free the *_stable_node and
+ * *_stable_node_dup if the returned tree_page is NULL.
+ *
+ * It can also free and overwrite *_stable_node with the found
+ * stable_node_dup if the chain is collapsed (in which case
+ * *_stable_node will be equal to *_stable_node_dup like if the chain
+ * never existed). It's up to the caller to verify tree_page is not
+ * NULL before dereferencing *_stable_node or *_stable_node_dup.
+ *
+ * *_stable_node_dup is really a second output parameter of this
+ * function and will be overwritten in all cases, the caller doesn't
+ * need to initialize it.
+ */
+static struct page *__stable_node_chain(struct stable_node **_stable_node_dup,
+ struct stable_node **_stable_node,
+ struct rb_root *root,
+ bool prune_stale_stable_nodes)
+{
+ struct stable_node *stable_node = *_stable_node;
+ if (!is_stable_node_chain(stable_node)) {
+ if (is_page_sharing_candidate(stable_node)) {
+ *_stable_node_dup = stable_node;
+ return get_ksm_page(stable_node, false);
+ }
+ /*
+ * _stable_node_dup set to NULL means the stable_node
+ * reached the ksm_max_page_sharing limit.
+ */
+ *_stable_node_dup = NULL;
+ return NULL;
+ }
+ return stable_node_dup(_stable_node_dup, _stable_node, root,
+ prune_stale_stable_nodes);
+}
+
+static __always_inline struct page *chain_prune(struct stable_node **s_n_d,
+ struct stable_node **s_n,
+ struct rb_root *root)
+{
+ return __stable_node_chain(s_n_d, s_n, root, true);
+}
+
+static __always_inline struct page *chain(struct stable_node **s_n_d,
+ struct stable_node *s_n,
+ struct rb_root *root)
+{
+ struct stable_node *old_stable_node = s_n;
+ struct page *tree_page;
+
+ tree_page = __stable_node_chain(s_n_d, &s_n, root, false);
+ /* not pruning dups so s_n cannot have changed */
+ VM_BUG_ON(s_n != old_stable_node);
+ return tree_page;
+}
+
/*
* stable_tree_search - search for page inside the stable tree
*
@@ -1153,7 +1516,7 @@ static struct page *stable_tree_search(struct page *page)
struct rb_root *root;
struct rb_node **new;
struct rb_node *parent;
- struct stable_node *stable_node;
+ struct stable_node *stable_node, *stable_node_dup, *stable_node_any;
struct stable_node *page_node;
page_node = page_stable_node(page);
@@ -1175,7 +1538,44 @@ again:
cond_resched();
stable_node = rb_entry(*new, struct stable_node, node);
- tree_page = get_ksm_page(stable_node, false);
+ stable_node_any = NULL;
+ tree_page = chain_prune(&stable_node_dup, &stable_node, root);
+ /*
+ * NOTE: stable_node may have been freed by
+ * chain_prune() if the returned stable_node_dup is
+ * not NULL. stable_node_dup may have been inserted in
+ * the rbtree instead as a regular stable_node (in
+ * order to collapse the stable_node chain if a single
+ * stable_node dup was found in it). In such case the
+ * stable_node is overwritten by the calleee to point
+ * to the stable_node_dup that was collapsed in the
+ * stable rbtree and stable_node will be equal to
+ * stable_node_dup like if the chain never existed.
+ */
+ if (!stable_node_dup) {
+ /*
+ * Either all stable_node dups were full in
+ * this stable_node chain, or this chain was
+ * empty and should be rb_erased.
+ */
+ stable_node_any = stable_node_dup_any(stable_node,
+ root);
+ if (!stable_node_any) {
+ /* rb_erase just run */
+ goto again;
+ }
+ /*
+ * Take any of the stable_node dups page of
+ * this stable_node chain to let the tree walk
+ * continue. All KSM pages belonging to the
+ * stable_node dups in a stable_node chain
+ * have the same content and they're
+ * wrprotected at all times. Any will work
+ * fine to continue the walk.
+ */
+ tree_page = get_ksm_page(stable_node_any, false);
+ }
+ VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
if (!tree_page) {
/*
* If we walked over a stale stable_node,
@@ -1198,6 +1598,34 @@ again:
else if (ret > 0)
new = &parent->rb_right;
else {
+ if (page_node) {
+ VM_BUG_ON(page_node->head != &migrate_nodes);
+ /*
+ * Test if the migrated page should be merged
+ * into a stable node dup. If the mapcount is
+ * 1 we can migrate it with another KSM page
+ * without adding it to the chain.
+ */
+ if (page_mapcount(page) > 1)
+ goto chain_append;
+ }
+
+ if (!stable_node_dup) {
+ /*
+ * If the stable_node is a chain and
+ * we got a payload match in memcmp
+ * but we cannot merge the scanned
+ * page in any of the existing
+ * stable_node dups because they're
+ * all full, we need to wait the
+ * scanned page to find itself a match
+ * in the unstable tree to create a
+ * brand new KSM page to add later to
+ * the dups of this stable_node.
+ */
+ return NULL;
+ }
+
/*
* Lock and unlock the stable_node's page (which
* might already have been migrated) so that page
@@ -1205,23 +1633,21 @@ again:
* It would be more elegant to return stable_node
* than kpage, but that involves more changes.
*/
- tree_page = get_ksm_page(stable_node, true);
- if (tree_page) {
- unlock_page(tree_page);
- if (get_kpfn_nid(stable_node->kpfn) !=
- NUMA(stable_node->nid)) {
- put_page(tree_page);
- goto replace;
- }
- return tree_page;
- }
- /*
- * There is now a place for page_node, but the tree may
- * have been rebalanced, so re-evaluate parent and new.
- */
- if (page_node)
+ tree_page = get_ksm_page(stable_node_dup, true);
+ if (unlikely(!tree_page))
+ /*
+ * The tree may have been rebalanced,
+ * so re-evaluate parent and new.
+ */
goto again;
- return NULL;
+ unlock_page(tree_page);
+
+ if (get_kpfn_nid(stable_node_dup->kpfn) !=
+ NUMA(stable_node_dup->nid)) {
+ put_page(tree_page);
+ goto replace;
+ }
+ return tree_page;
}
}
@@ -1232,22 +1658,95 @@ again:
DO_NUMA(page_node->nid = nid);
rb_link_node(&page_node->node, parent, new);
rb_insert_color(&page_node->node, root);
- get_page(page);
- return page;
+out:
+ if (is_page_sharing_candidate(page_node)) {
+ get_page(page);
+ return page;
+ } else
+ return NULL;
replace:
- if (page_node) {
- list_del(&page_node->list);
- DO_NUMA(page_node->nid = nid);
- rb_replace_node(&stable_node->node, &page_node->node, root);
- get_page(page);
+ /*
+ * If stable_node was a chain and chain_prune collapsed it,
+ * stable_node has been updated to be the new regular
+ * stable_node. A collapse of the chain is indistinguishable
+ * from the case there was no chain in the stable
+ * rbtree. Otherwise stable_node is the chain and
+ * stable_node_dup is the dup to replace.
+ */
+ if (stable_node_dup == stable_node) {
+ VM_BUG_ON(is_stable_node_chain(stable_node_dup));
+ VM_BUG_ON(is_stable_node_dup(stable_node_dup));
+ /* there is no chain */
+ if (page_node) {
+ VM_BUG_ON(page_node->head != &migrate_nodes);
+ list_del(&page_node->list);
+ DO_NUMA(page_node->nid = nid);
+ rb_replace_node(&stable_node_dup->node,
+ &page_node->node,
+ root);
+ if (is_page_sharing_candidate(page_node))
+ get_page(page);
+ else
+ page = NULL;
+ } else {
+ rb_erase(&stable_node_dup->node, root);
+ page = NULL;
+ }
} else {
- rb_erase(&stable_node->node, root);
- page = NULL;
+ VM_BUG_ON(!is_stable_node_chain(stable_node));
+ __stable_node_dup_del(stable_node_dup);
+ if (page_node) {
+ VM_BUG_ON(page_node->head != &migrate_nodes);
+ list_del(&page_node->list);
+ DO_NUMA(page_node->nid = nid);
+ stable_node_chain_add_dup(page_node, stable_node);
+ if (is_page_sharing_candidate(page_node))
+ get_page(page);
+ else
+ page = NULL;
+ } else {
+ page = NULL;
+ }
}
- stable_node->head = &migrate_nodes;
- list_add(&stable_node->list, stable_node->head);
+ stable_node_dup->head = &migrate_nodes;
+ list_add(&stable_node_dup->list, stable_node_dup->head);
return page;
+
+chain_append:
+ /* stable_node_dup could be null if it reached the limit */
+ if (!stable_node_dup)
+ stable_node_dup = stable_node_any;
+ /*
+ * If stable_node was a chain and chain_prune collapsed it,
+ * stable_node has been updated to be the new regular
+ * stable_node. A collapse of the chain is indistinguishable
+ * from the case there was no chain in the stable
+ * rbtree. Otherwise stable_node is the chain and
+ * stable_node_dup is the dup to replace.
+ */
+ if (stable_node_dup == stable_node) {
+ VM_BUG_ON(is_stable_node_chain(stable_node_dup));
+ VM_BUG_ON(is_stable_node_dup(stable_node_dup));
+ /* chain is missing so create it */
+ stable_node = alloc_stable_node_chain(stable_node_dup,
+ root);
+ if (!stable_node)
+ return NULL;
+ }
+ /*
+ * Add this stable_node dup that was
+ * migrated to the stable_node chain
+ * of the current nid for this page
+ * content.
+ */
+ VM_BUG_ON(!is_stable_node_chain(stable_node));
+ VM_BUG_ON(!is_stable_node_dup(stable_node_dup));
+ VM_BUG_ON(page_node->head != &migrate_nodes);
+ list_del(&page_node->list);
+ DO_NUMA(page_node->nid = nid);
+ stable_node_chain_add_dup(page_node, stable_node);
+ goto out;
}
/*
@@ -1264,7 +1763,8 @@ static struct stable_node *stable_tree_insert(struct page *kpage)
struct rb_root *root;
struct rb_node **new;
struct rb_node *parent;
- struct stable_node *stable_node;
+ struct stable_node *stable_node, *stable_node_dup, *stable_node_any;
+ bool need_chain = false;
kpfn = page_to_pfn(kpage);
nid = get_kpfn_nid(kpfn);
@@ -1279,7 +1779,32 @@ again:
cond_resched();
stable_node = rb_entry(*new, struct stable_node, node);
- tree_page = get_ksm_page(stable_node, false);
+ stable_node_any = NULL;
+ tree_page = chain(&stable_node_dup, stable_node, root);
+ if (!stable_node_dup) {
+ /*
+ * Either all stable_node dups were full in
+ * this stable_node chain, or this chain was
+ * empty and should be rb_erased.
+ */
+ stable_node_any = stable_node_dup_any(stable_node,
+ root);
+ if (!stable_node_any) {
+ /* rb_erase just run */
+ goto again;
+ }
+ /*
+ * Take any of the stable_node dups page of
+ * this stable_node chain to let the tree walk
+ * continue. All KSM pages belonging to the
+ * stable_node dups in a stable_node chain
+ * have the same content and they're
+ * wrprotected at all times. Any will work
+ * fine to continue the walk.
+ */
+ tree_page = get_ksm_page(stable_node_any, false);
+ }
+ VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
if (!tree_page) {
/*
* If we walked over a stale stable_node,
@@ -1302,27 +1827,37 @@ again:
else if (ret > 0)
new = &parent->rb_right;
else {
- /*
- * It is not a bug that stable_tree_search() didn't
- * find this node: because at that time our page was
- * not yet write-protected, so may have changed since.
- */
- return NULL;
+ need_chain = true;
+ break;
}
}
- stable_node = alloc_stable_node();
- if (!stable_node)
+ stable_node_dup = alloc_stable_node();
+ if (!stable_node_dup)
return NULL;
- INIT_HLIST_HEAD(&stable_node->hlist);
- stable_node->kpfn = kpfn;
- set_page_stable_node(kpage, stable_node);
- DO_NUMA(stable_node->nid = nid);
- rb_link_node(&stable_node->node, parent, new);
- rb_insert_color(&stable_node->node, root);
+ INIT_HLIST_HEAD(&stable_node_dup->hlist);
+ stable_node_dup->kpfn = kpfn;
+ set_page_stable_node(kpage, stable_node_dup);
+ stable_node_dup->rmap_hlist_len = 0;
+ DO_NUMA(stable_node_dup->nid = nid);
+ if (!need_chain) {
+ rb_link_node(&stable_node_dup->node, parent, new);
+ rb_insert_color(&stable_node_dup->node, root);
+ } else {
+ if (!is_stable_node_chain(stable_node)) {
+ struct stable_node *orig = stable_node;
+ /* chain is missing so create it */
+ stable_node = alloc_stable_node_chain(orig, root);
+ if (!stable_node) {
+ free_stable_node(stable_node_dup);
+ return NULL;
+ }
+ }
+ stable_node_chain_add_dup(stable_node_dup, stable_node);
+ }
- return stable_node;
+ return stable_node_dup;
}
/*
@@ -1412,8 +1947,27 @@ struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
* the same ksm page.
*/
static void stable_tree_append(struct rmap_item *rmap_item,
- struct stable_node *stable_node)
+ struct stable_node *stable_node,
+ bool max_page_sharing_bypass)
{
+ /*
+ * rmap won't find this mapping if we don't insert the
+ * rmap_item in the right stable_node
+ * duplicate. page_migration could break later if rmap breaks,
+ * so we can as well crash here. We really need to check for
+ * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check
+ * for other negative values as an undeflow if detected here
+ * for the first time (and not when decreasing rmap_hlist_len)
+ * would be sign of memory corruption in the stable_node.
+ */
+ BUG_ON(stable_node->rmap_hlist_len < 0);
+
+ stable_node->rmap_hlist_len++;
+ if (!max_page_sharing_bypass)
+ /* possibly non fatal but unexpected overflow, only warn */
+ WARN_ON_ONCE(stable_node->rmap_hlist_len >
+ ksm_max_page_sharing);
+
rmap_item->head = stable_node;
rmap_item->address |= STABLE_FLAG;
hlist_add_head(&rmap_item->hlist, &stable_node->hlist);
@@ -1441,19 +1995,26 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
struct page *kpage;
unsigned int checksum;
int err;
+ bool max_page_sharing_bypass = false;
stable_node = page_stable_node(page);
if (stable_node) {
if (stable_node->head != &migrate_nodes &&
- get_kpfn_nid(stable_node->kpfn) != NUMA(stable_node->nid)) {
- rb_erase(&stable_node->node,
- root_stable_tree + NUMA(stable_node->nid));
+ get_kpfn_nid(READ_ONCE(stable_node->kpfn)) !=
+ NUMA(stable_node->nid)) {
+ stable_node_dup_del(stable_node);
stable_node->head = &migrate_nodes;
list_add(&stable_node->list, stable_node->head);
}
if (stable_node->head != &migrate_nodes &&
rmap_item->head == stable_node)
return;
+ /*
+ * If it's a KSM fork, allow it to go over the sharing limit
+ * without warnings.
+ */
+ if (!is_page_sharing_candidate(stable_node))
+ max_page_sharing_bypass = true;
}
/* We first start with searching the page inside the stable tree */
@@ -1473,7 +2034,8 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
* add its rmap_item to the stable tree.
*/
lock_page(kpage);
- stable_tree_append(rmap_item, page_stable_node(kpage));
+ stable_tree_append(rmap_item, page_stable_node(kpage),
+ max_page_sharing_bypass);
unlock_page(kpage);
}
put_page(kpage);
@@ -1523,8 +2085,10 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
lock_page(kpage);
stable_node = stable_tree_insert(kpage);
if (stable_node) {
- stable_tree_append(tree_rmap_item, stable_node);
- stable_tree_append(rmap_item, stable_node);
+ stable_tree_append(tree_rmap_item, stable_node,
+ false);
+ stable_tree_append(rmap_item, stable_node,
+ false);
}
unlock_page(kpage);
@@ -2028,6 +2592,48 @@ static void wait_while_offlining(void)
}
}
+static bool stable_node_dup_remove_range(struct stable_node *stable_node,
+ unsigned long start_pfn,
+ unsigned long end_pfn)
+{
+ if (stable_node->kpfn >= start_pfn &&
+ stable_node->kpfn < end_pfn) {
+ /*
+ * Don't get_ksm_page, page has already gone:
+ * which is why we keep kpfn instead of page*
+ */
+ remove_node_from_stable_tree(stable_node);
+ return true;
+ }
+ return false;
+}
+
+static bool stable_node_chain_remove_range(struct stable_node *stable_node,
+ unsigned long start_pfn,
+ unsigned long end_pfn,
+ struct rb_root *root)
+{
+ struct stable_node *dup;
+ struct hlist_node *hlist_safe;
+
+ if (!is_stable_node_chain(stable_node)) {
+ VM_BUG_ON(is_stable_node_dup(stable_node));
+ return stable_node_dup_remove_range(stable_node, start_pfn,
+ end_pfn);
+ }
+
+ hlist_for_each_entry_safe(dup, hlist_safe,
+ &stable_node->hlist, hlist_dup) {
+ VM_BUG_ON(!is_stable_node_dup(dup));
+ stable_node_dup_remove_range(dup, start_pfn, end_pfn);
+ }
+ if (hlist_empty(&stable_node->hlist)) {
+ free_stable_node_chain(stable_node, root);
+ return true; /* notify caller that tree was rebalanced */
+ } else
+ return false;
+}
+
static void ksm_check_stable_tree(unsigned long start_pfn,
unsigned long end_pfn)
{
@@ -2039,15 +2645,12 @@ static void ksm_check_stable_tree(unsigned long start_pfn,
node = rb_first(root_stable_tree + nid);
while (node) {
stable_node = rb_entry(node, struct stable_node, node);
- if (stable_node->kpfn >= start_pfn &&
- stable_node->kpfn < end_pfn) {
- /*
- * Don't get_ksm_page, page has already gone:
- * which is why we keep kpfn instead of page*
- */
- remove_node_from_stable_tree(stable_node);
+ if (stable_node_chain_remove_range(stable_node,
+ start_pfn, end_pfn,
+ root_stable_tree +
+ nid))
node = rb_first(root_stable_tree + nid);
- } else
+ else
node = rb_next(node);
cond_resched();
}
@@ -2293,6 +2896,47 @@ static ssize_t use_zero_pages_store(struct kobject *kobj,
}
KSM_ATTR(use_zero_pages);
+static ssize_t max_page_sharing_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", ksm_max_page_sharing);
+}
+
+static ssize_t max_page_sharing_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ int knob;
+
+ err = kstrtoint(buf, 10, &knob);
+ if (err)
+ return err;
+ /*
+ * When a KSM page is created it is shared by 2 mappings. This
+ * being a signed comparison, it implicitly verifies it's not
+ * negative.
+ */
+ if (knob < 2)
+ return -EINVAL;
+
+ if (READ_ONCE(ksm_max_page_sharing) == knob)
+ return count;
+
+ mutex_lock(&ksm_thread_mutex);
+ wait_while_offlining();
+ if (ksm_max_page_sharing != knob) {
+ if (ksm_pages_shared || remove_all_stable_nodes())
+ err = -EBUSY;
+ else
+ ksm_max_page_sharing = knob;
+ }
+ mutex_unlock(&ksm_thread_mutex);
+
+ return err ? err : count;
+}
+KSM_ATTR(max_page_sharing);
+
static ssize_t pages_shared_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -2331,6 +2975,46 @@ static ssize_t pages_volatile_show(struct kobject *kobj,
}
KSM_ATTR_RO(pages_volatile);
+static ssize_t stable_node_dups_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", ksm_stable_node_dups);
+}
+KSM_ATTR_RO(stable_node_dups);
+
+static ssize_t stable_node_chains_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", ksm_stable_node_chains);
+}
+KSM_ATTR_RO(stable_node_chains);
+
+static ssize_t
+stable_node_chains_prune_millisecs_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%u\n", ksm_stable_node_chains_prune_millisecs);
+}
+
+static ssize_t
+stable_node_chains_prune_millisecs_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long msecs;
+ int err;
+
+ err = kstrtoul(buf, 10, &msecs);
+ if (err || msecs > UINT_MAX)
+ return -EINVAL;
+
+ ksm_stable_node_chains_prune_millisecs = msecs;
+
+ return count;
+}
+KSM_ATTR(stable_node_chains_prune_millisecs);
+
static ssize_t full_scans_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -2350,6 +3034,10 @@ static struct attribute *ksm_attrs[] = {
#ifdef CONFIG_NUMA
&merge_across_nodes_attr.attr,
#endif
+ &max_page_sharing_attr.attr,
+ &stable_node_chains_attr.attr,
+ &stable_node_dups_attr.attr,
+ &stable_node_chains_prune_millisecs_attr.attr,
&use_zero_pages_attr.attr,
NULL,
};
diff --git a/mm/memblock.c b/mm/memblock.c
index 7b8a5db76a2f..2cb25fe4452c 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -54,9 +54,6 @@ struct memblock memblock __initdata_memblock = {
};
int memblock_debug __initdata_memblock;
-#ifdef CONFIG_MOVABLE_NODE
-bool movable_node_enabled __initdata_memblock = false;
-#endif
static bool system_has_some_mirror __initdata_memblock = false;
static int memblock_can_resize __initdata_memblock;
static int memblock_memory_in_slab __initdata_memblock = 0;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d75b38b66ef6..425aa0caa712 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2376,10 +2376,9 @@ void mem_cgroup_split_huge_fixup(struct page *head)
#ifdef CONFIG_MEMCG_SWAP
static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
- bool charge)
+ int nr_entries)
{
- int val = (charge) ? 1 : -1;
- this_cpu_add(memcg->stat->count[MEMCG_SWAP], val);
+ this_cpu_add(memcg->stat->count[MEMCG_SWAP], nr_entries);
}
/**
@@ -2405,8 +2404,8 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
new_id = mem_cgroup_id(to);
if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
- mem_cgroup_swap_statistics(from, false);
- mem_cgroup_swap_statistics(to, true);
+ mem_cgroup_swap_statistics(from, -1);
+ mem_cgroup_swap_statistics(to, 1);
return 0;
}
return -EINVAL;
@@ -3574,6 +3573,7 @@ static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
+ seq_printf(sf, "oom_kill %lu\n", memcg_sum_events(memcg, OOM_KILL));
return 0;
}
@@ -4122,6 +4122,12 @@ static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
if (!pn)
return 1;
+ pn->lruvec_stat = alloc_percpu(struct lruvec_stat);
+ if (!pn->lruvec_stat) {
+ kfree(pn);
+ return 1;
+ }
+
lruvec_init(&pn->lruvec);
pn->usage_in_excess = 0;
pn->on_tree = false;
@@ -4133,7 +4139,10 @@ static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
{
- kfree(memcg->nodeinfo[node]);
+ struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
+
+ free_percpu(pn->lruvec_stat);
+ kfree(pn);
}
static void __mem_cgroup_free(struct mem_cgroup *memcg)
@@ -5165,6 +5174,7 @@ static int memory_events_show(struct seq_file *m, void *v)
seq_printf(m, "high %lu\n", memcg_sum_events(memcg, MEMCG_HIGH));
seq_printf(m, "max %lu\n", memcg_sum_events(memcg, MEMCG_MAX));
seq_printf(m, "oom %lu\n", memcg_sum_events(memcg, MEMCG_OOM));
+ seq_printf(m, "oom_kill %lu\n", memcg_sum_events(memcg, OOM_KILL));
return 0;
}
@@ -5197,8 +5207,8 @@ static int memory_stat_show(struct seq_file *m, void *v)
seq_printf(m, "kernel_stack %llu\n",
(u64)stat[MEMCG_KERNEL_STACK_KB] * 1024);
seq_printf(m, "slab %llu\n",
- (u64)(stat[MEMCG_SLAB_RECLAIMABLE] +
- stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
+ (u64)(stat[NR_SLAB_RECLAIMABLE] +
+ stat[NR_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
seq_printf(m, "sock %llu\n",
(u64)stat[MEMCG_SOCK] * PAGE_SIZE);
@@ -5222,15 +5232,25 @@ static int memory_stat_show(struct seq_file *m, void *v)
}
seq_printf(m, "slab_reclaimable %llu\n",
- (u64)stat[MEMCG_SLAB_RECLAIMABLE] * PAGE_SIZE);
+ (u64)stat[NR_SLAB_RECLAIMABLE] * PAGE_SIZE);
seq_printf(m, "slab_unreclaimable %llu\n",
- (u64)stat[MEMCG_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
+ (u64)stat[NR_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
/* Accumulated memory events */
seq_printf(m, "pgfault %lu\n", events[PGFAULT]);
seq_printf(m, "pgmajfault %lu\n", events[PGMAJFAULT]);
+ seq_printf(m, "pgrefill %lu\n", events[PGREFILL]);
+ seq_printf(m, "pgscan %lu\n", events[PGSCAN_KSWAPD] +
+ events[PGSCAN_DIRECT]);
+ seq_printf(m, "pgsteal %lu\n", events[PGSTEAL_KSWAPD] +
+ events[PGSTEAL_DIRECT]);
+ seq_printf(m, "pgactivate %lu\n", events[PGACTIVATE]);
+ seq_printf(m, "pgdeactivate %lu\n", events[PGDEACTIVATE]);
+ seq_printf(m, "pglazyfree %lu\n", events[PGLAZYFREE]);
+ seq_printf(m, "pglazyfreed %lu\n", events[PGLAZYFREED]);
+
seq_printf(m, "workingset_refault %lu\n",
stat[WORKINGSET_REFAULT]);
seq_printf(m, "workingset_activate %lu\n",
@@ -5445,7 +5465,7 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
* let's not wait for it. The page already received a
* memory+swap charge, drop the swap entry duplicate.
*/
- mem_cgroup_uncharge_swap(entry);
+ mem_cgroup_uncharge_swap(entry, nr_pages);
}
}
@@ -5873,9 +5893,9 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
* ancestor for the swap instead and transfer the memory+swap charge.
*/
swap_memcg = mem_cgroup_id_get_online(memcg);
- oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg));
+ oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg), 1);
VM_BUG_ON_PAGE(oldid, page);
- mem_cgroup_swap_statistics(swap_memcg, true);
+ mem_cgroup_swap_statistics(swap_memcg, 1);
page->mem_cgroup = NULL;
@@ -5902,19 +5922,20 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
css_put(&memcg->css);
}
-/*
- * mem_cgroup_try_charge_swap - try charging a swap entry
+/**
+ * mem_cgroup_try_charge_swap - try charging swap space for a page
* @page: page being added to swap
* @entry: swap entry to charge
*
- * Try to charge @entry to the memcg that @page belongs to.
+ * Try to charge @page's memcg for the swap space at @entry.
*
* Returns 0 on success, -ENOMEM on failure.
*/
int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
{
- struct mem_cgroup *memcg;
+ unsigned int nr_pages = hpage_nr_pages(page);
struct page_counter *counter;
+ struct mem_cgroup *memcg;
unsigned short oldid;
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
@@ -5929,25 +5950,27 @@ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
memcg = mem_cgroup_id_get_online(memcg);
if (!mem_cgroup_is_root(memcg) &&
- !page_counter_try_charge(&memcg->swap, 1, &counter)) {
+ !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
mem_cgroup_id_put(memcg);
return -ENOMEM;
}
- oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
+ /* Get references for the tail pages, too */
+ if (nr_pages > 1)
+ mem_cgroup_id_get_many(memcg, nr_pages - 1);
+ oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
VM_BUG_ON_PAGE(oldid, page);
- mem_cgroup_swap_statistics(memcg, true);
+ mem_cgroup_swap_statistics(memcg, nr_pages);
return 0;
}
/**
- * mem_cgroup_uncharge_swap - uncharge a swap entry
+ * mem_cgroup_uncharge_swap - uncharge swap space
* @entry: swap entry to uncharge
- *
- * Drop the swap charge associated with @entry.
+ * @nr_pages: the amount of swap space to uncharge
*/
-void mem_cgroup_uncharge_swap(swp_entry_t entry)
+void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
{
struct mem_cgroup *memcg;
unsigned short id;
@@ -5955,18 +5978,18 @@ void mem_cgroup_uncharge_swap(swp_entry_t entry)
if (!do_swap_account)
return;
- id = swap_cgroup_record(entry, 0);
+ id = swap_cgroup_record(entry, 0, nr_pages);
rcu_read_lock();
memcg = mem_cgroup_from_id(id);
if (memcg) {
if (!mem_cgroup_is_root(memcg)) {
if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
- page_counter_uncharge(&memcg->swap, 1);
+ page_counter_uncharge(&memcg->swap, nr_pages);
else
- page_counter_uncharge(&memcg->memsw, 1);
+ page_counter_uncharge(&memcg->memsw, nr_pages);
}
- mem_cgroup_swap_statistics(memcg, false);
- mem_cgroup_id_put(memcg);
+ mem_cgroup_swap_statistics(memcg, -nr_pages);
+ mem_cgroup_id_put_many(memcg, nr_pages);
}
rcu_read_unlock();
}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index ecc183fd94f3..dbe3e50c9aa5 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -684,7 +684,7 @@ static int me_pagecache_dirty(struct page *p, unsigned long pfn)
* the first EIO, but we're not worse than other parts
* of the kernel.
*/
- mapping_set_error(mapping, EIO);
+ mapping_set_error(mapping, -EIO);
}
return me_pagecache_clean(p, pfn);
@@ -1492,11 +1492,16 @@ EXPORT_SYMBOL(unpoison_memory);
static struct page *new_page(struct page *p, unsigned long private, int **x)
{
int nid = page_to_nid(p);
- if (PageHuge(p))
- return alloc_huge_page_node(page_hstate(compound_head(p)),
- nid);
- else
+ if (PageHuge(p)) {
+ struct hstate *hstate = page_hstate(compound_head(p));
+
+ if (hstate_is_gigantic(hstate))
+ return alloc_huge_page_node(hstate, NUMA_NO_NODE);
+
+ return alloc_huge_page_node(hstate, nid);
+ } else {
return __alloc_pages_node(nid, GFP_HIGHUSER_MOVABLE, 0);
+ }
}
/*
diff --git a/mm/memory.c b/mm/memory.c
index bb11c474857e..e31dd97e6114 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2719,7 +2719,7 @@ int do_swap_page(struct vm_fault *vmf)
/* Had to read the page from swap area: Major fault */
ret = VM_FAULT_MAJOR;
count_vm_event(PGMAJFAULT);
- mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
+ count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
} else if (PageHWPoison(page)) {
/*
* hwpoisoned dirty swapcache pages are kept for killing
@@ -3837,7 +3837,7 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
__set_current_state(TASK_RUNNING);
count_vm_event(PGFAULT);
- mem_cgroup_count_vm_event(vma->vm_mm, PGFAULT);
+ count_memcg_event_mm(vma->vm_mm, PGFAULT);
/* do counter updates before entering really critical section. */
check_sync_rss_stat(current);
@@ -4014,8 +4014,6 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
goto out;
ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
- if (!ptep)
- goto out;
if (!pte_present(*ptep))
goto unlock;
*ptepp = ptep;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index b63d7d1239df..f79aac7a12b5 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -79,6 +79,8 @@ static struct {
#define memhp_lock_acquire() lock_map_acquire(&mem_hotplug.dep_map)
#define memhp_lock_release() lock_map_release(&mem_hotplug.dep_map)
+bool movable_node_enabled = false;
+
#ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE
bool memhp_auto_online;
#else
@@ -300,229 +302,38 @@ void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
}
#endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
-static void __meminit grow_zone_span(struct zone *zone, unsigned long start_pfn,
- unsigned long end_pfn)
-{
- unsigned long old_zone_end_pfn;
-
- zone_span_writelock(zone);
-
- old_zone_end_pfn = zone_end_pfn(zone);
- if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
- zone->zone_start_pfn = start_pfn;
-
- zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
- zone->zone_start_pfn;
-
- zone_span_writeunlock(zone);
-}
-
-static void resize_zone(struct zone *zone, unsigned long start_pfn,
- unsigned long end_pfn)
-{
- zone_span_writelock(zone);
-
- if (end_pfn - start_pfn) {
- zone->zone_start_pfn = start_pfn;
- zone->spanned_pages = end_pfn - start_pfn;
- } else {
- /*
- * make it consist as free_area_init_core(),
- * if spanned_pages = 0, then keep start_pfn = 0
- */
- zone->zone_start_pfn = 0;
- zone->spanned_pages = 0;
- }
-
- zone_span_writeunlock(zone);
-}
-
-static void fix_zone_id(struct zone *zone, unsigned long start_pfn,
- unsigned long end_pfn)
-{
- enum zone_type zid = zone_idx(zone);
- int nid = zone->zone_pgdat->node_id;
- unsigned long pfn;
-
- for (pfn = start_pfn; pfn < end_pfn; pfn++)
- set_page_links(pfn_to_page(pfn), zid, nid, pfn);
-}
-
-/* Can fail with -ENOMEM from allocating a wait table with vmalloc() or
- * alloc_bootmem_node_nopanic()/memblock_virt_alloc_node_nopanic() */
-static int __ref ensure_zone_is_initialized(struct zone *zone,
- unsigned long start_pfn, unsigned long num_pages)
-{
- if (!zone_is_initialized(zone))
- return init_currently_empty_zone(zone, start_pfn, num_pages);
-
- return 0;
-}
-
-static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
- unsigned long start_pfn, unsigned long end_pfn)
+static int __meminit __add_section(int nid, unsigned long phys_start_pfn,
+ bool want_memblock)
{
int ret;
- unsigned long flags;
- unsigned long z1_start_pfn;
-
- ret = ensure_zone_is_initialized(z1, start_pfn, end_pfn - start_pfn);
- if (ret)
- return ret;
-
- pgdat_resize_lock(z1->zone_pgdat, &flags);
-
- /* can't move pfns which are higher than @z2 */
- if (end_pfn > zone_end_pfn(z2))
- goto out_fail;
- /* the move out part must be at the left most of @z2 */
- if (start_pfn > z2->zone_start_pfn)
- goto out_fail;
- /* must included/overlap */
- if (end_pfn <= z2->zone_start_pfn)
- goto out_fail;
-
- /* use start_pfn for z1's start_pfn if z1 is empty */
- if (!zone_is_empty(z1))
- z1_start_pfn = z1->zone_start_pfn;
- else
- z1_start_pfn = start_pfn;
-
- resize_zone(z1, z1_start_pfn, end_pfn);
- resize_zone(z2, end_pfn, zone_end_pfn(z2));
-
- pgdat_resize_unlock(z1->zone_pgdat, &flags);
-
- fix_zone_id(z1, start_pfn, end_pfn);
-
- return 0;
-out_fail:
- pgdat_resize_unlock(z1->zone_pgdat, &flags);
- return -1;
-}
-
-static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2,
- unsigned long start_pfn, unsigned long end_pfn)
-{
- int ret;
- unsigned long flags;
- unsigned long z2_end_pfn;
-
- ret = ensure_zone_is_initialized(z2, start_pfn, end_pfn - start_pfn);
- if (ret)
- return ret;
-
- pgdat_resize_lock(z1->zone_pgdat, &flags);
-
- /* can't move pfns which are lower than @z1 */
- if (z1->zone_start_pfn > start_pfn)
- goto out_fail;
- /* the move out part mast at the right most of @z1 */
- if (zone_end_pfn(z1) > end_pfn)
- goto out_fail;
- /* must included/overlap */
- if (start_pfn >= zone_end_pfn(z1))
- goto out_fail;
-
- /* use end_pfn for z2's end_pfn if z2 is empty */
- if (!zone_is_empty(z2))
- z2_end_pfn = zone_end_pfn(z2);
- else
- z2_end_pfn = end_pfn;
-
- resize_zone(z1, z1->zone_start_pfn, start_pfn);
- resize_zone(z2, start_pfn, z2_end_pfn);
-
- pgdat_resize_unlock(z1->zone_pgdat, &flags);
-
- fix_zone_id(z2, start_pfn, end_pfn);
-
- return 0;
-out_fail:
- pgdat_resize_unlock(z1->zone_pgdat, &flags);
- return -1;
-}
-
-static struct zone * __meminit move_pfn_range(int zone_shift,
- unsigned long start_pfn, unsigned long end_pfn)
-{
- struct zone *zone = page_zone(pfn_to_page(start_pfn));
- int ret = 0;
-
- if (zone_shift < 0)
- ret = move_pfn_range_left(zone + zone_shift, zone,
- start_pfn, end_pfn);
- else if (zone_shift)
- ret = move_pfn_range_right(zone, zone + zone_shift,
- start_pfn, end_pfn);
-
- if (ret)
- return NULL;
-
- return zone + zone_shift;
-}
-
-static void __meminit grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
- unsigned long end_pfn)
-{
- unsigned long old_pgdat_end_pfn = pgdat_end_pfn(pgdat);
-
- if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
- pgdat->node_start_pfn = start_pfn;
-
- pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
- pgdat->node_start_pfn;
-}
+ int i;
-static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
-{
- struct pglist_data *pgdat = zone->zone_pgdat;
- int nr_pages = PAGES_PER_SECTION;
- int nid = pgdat->node_id;
- int zone_type;
- unsigned long flags, pfn;
- int ret;
+ if (pfn_valid(phys_start_pfn))
+ return -EEXIST;
- zone_type = zone - pgdat->node_zones;
- ret = ensure_zone_is_initialized(zone, phys_start_pfn, nr_pages);
- if (ret)
+ ret = sparse_add_one_section(NODE_DATA(nid), phys_start_pfn);
+ if (ret < 0)
return ret;
- pgdat_resize_lock(zone->zone_pgdat, &flags);
- grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);
- grow_pgdat_span(zone->zone_pgdat, phys_start_pfn,
- phys_start_pfn + nr_pages);
- pgdat_resize_unlock(zone->zone_pgdat, &flags);
- memmap_init_zone(nr_pages, nid, zone_type,
- phys_start_pfn, MEMMAP_HOTPLUG);
-
- /* online_page_range is called later and expects pages reserved */
- for (pfn = phys_start_pfn; pfn < phys_start_pfn + nr_pages; pfn++) {
+ /*
+ * Make all the pages reserved so that nobody will stumble over half
+ * initialized state.
+ * FIXME: We also have to associate it with a node because pfn_to_node
+ * relies on having page with the proper node.
+ */
+ for (i = 0; i < PAGES_PER_SECTION; i++) {
+ unsigned long pfn = phys_start_pfn + i;
+ struct page *page;
if (!pfn_valid(pfn))
continue;
- SetPageReserved(pfn_to_page(pfn));
+ page = pfn_to_page(pfn);
+ set_page_node(page, nid);
+ SetPageReserved(page);
}
- return 0;
-}
-
-static int __meminit __add_section(int nid, struct zone *zone,
- unsigned long phys_start_pfn)
-{
- int ret;
-
- if (pfn_valid(phys_start_pfn))
- return -EEXIST;
-
- ret = sparse_add_one_section(zone, phys_start_pfn);
-
- if (ret < 0)
- return ret;
-
- ret = __add_zone(zone, phys_start_pfn);
- if (ret < 0)
- return ret;
+ if (!want_memblock)
+ return 0;
return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
}
@@ -533,16 +344,14 @@ static int __meminit __add_section(int nid, struct zone *zone,
* call this function after deciding the zone to which to
* add the new pages.
*/
-int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
- unsigned long nr_pages)
+int __ref __add_pages(int nid, unsigned long phys_start_pfn,
+ unsigned long nr_pages, bool want_memblock)
{
unsigned long i;
int err = 0;
int start_sec, end_sec;
struct vmem_altmap *altmap;
- clear_zone_contiguous(zone);
-
/* during initialize mem_map, align hot-added range to section */
start_sec = pfn_to_section_nr(phys_start_pfn);
end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
@@ -562,7 +371,7 @@ int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
}
for (i = start_sec; i <= end_sec; i++) {
- err = __add_section(nid, zone, section_nr_to_pfn(i));
+ err = __add_section(nid, section_nr_to_pfn(i), want_memblock);
/*
* EEXIST is finally dealt with by ioresource collision
@@ -575,7 +384,6 @@ int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
}
vmemmap_populate_print_last();
out:
- set_zone_contiguous(zone);
return err;
}
EXPORT_SYMBOL_GPL(__add_pages);
@@ -939,33 +747,20 @@ static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
unsigned long i;
unsigned long onlined_pages = *(unsigned long *)arg;
struct page *page;
+
if (PageReserved(pfn_to_page(start_pfn)))
for (i = 0; i < nr_pages; i++) {
page = pfn_to_page(start_pfn + i);
(*online_page_callback)(page);
onlined_pages++;
}
+
+ online_mem_sections(start_pfn, start_pfn + nr_pages);
+
*(unsigned long *)arg = onlined_pages;
return 0;
}
-#ifdef CONFIG_MOVABLE_NODE
-/*
- * When CONFIG_MOVABLE_NODE, we permit onlining of a node which doesn't have
- * normal memory.
- */
-static bool can_online_high_movable(struct zone *zone)
-{
- return true;
-}
-#else /* CONFIG_MOVABLE_NODE */
-/* ensure every online node has NORMAL memory */
-static bool can_online_high_movable(struct zone *zone)
-{
- return node_state(zone_to_nid(zone), N_NORMAL_MEMORY);
-}
-#endif /* CONFIG_MOVABLE_NODE */
-
/* check which state of node_states will be changed when online memory */
static void node_states_check_changes_online(unsigned long nr_pages,
struct zone *zone, struct memory_notify *arg)
@@ -1040,39 +835,131 @@ static void node_states_set_node(int node, struct memory_notify *arg)
node_set_state(node, N_MEMORY);
}
-bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
- enum zone_type target, int *zone_shift)
+bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages, int online_type)
{
- struct zone *zone = page_zone(pfn_to_page(pfn));
- enum zone_type idx = zone_idx(zone);
- int i;
+ struct pglist_data *pgdat = NODE_DATA(nid);
+ struct zone *movable_zone = &pgdat->node_zones[ZONE_MOVABLE];
+ struct zone *default_zone = default_zone_for_pfn(nid, pfn, nr_pages);
- *zone_shift = 0;
+ /*
+ * TODO there shouldn't be any inherent reason to have ZONE_NORMAL
+ * physically before ZONE_MOVABLE. All we need is they do not
+ * overlap. Historically we didn't allow ZONE_NORMAL after ZONE_MOVABLE
+ * though so let's stick with it for simplicity for now.
+ * TODO make sure we do not overlap with ZONE_DEVICE
+ */
+ if (online_type == MMOP_ONLINE_KERNEL) {
+ if (zone_is_empty(movable_zone))
+ return true;
+ return movable_zone->zone_start_pfn >= pfn + nr_pages;
+ } else if (online_type == MMOP_ONLINE_MOVABLE) {
+ return zone_end_pfn(default_zone) <= pfn;
+ }
- if (idx < target) {
- /* pages must be at end of current zone */
- if (pfn + nr_pages != zone_end_pfn(zone))
- return false;
+ /* MMOP_ONLINE_KEEP will always succeed and inherits the current zone */
+ return online_type == MMOP_ONLINE_KEEP;
+}
+
+static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn,
+ unsigned long nr_pages)
+{
+ unsigned long old_end_pfn = zone_end_pfn(zone);
+
+ if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
+ zone->zone_start_pfn = start_pfn;
+
+ zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn;
+}
+
+static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn,
+ unsigned long nr_pages)
+{
+ unsigned long old_end_pfn = pgdat_end_pfn(pgdat);
+
+ if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
+ pgdat->node_start_pfn = start_pfn;
+
+ pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn;
+}
+
+void __ref move_pfn_range_to_zone(struct zone *zone,
+ unsigned long start_pfn, unsigned long nr_pages)
+{
+ struct pglist_data *pgdat = zone->zone_pgdat;
+ int nid = pgdat->node_id;
+ unsigned long flags;
- /* no zones in use between current zone and target */
- for (i = idx + 1; i < target; i++)
- if (zone_is_initialized(zone - idx + i))
- return false;
+ if (zone_is_empty(zone))
+ init_currently_empty_zone(zone, start_pfn, nr_pages);
+
+ clear_zone_contiguous(zone);
+
+ /* TODO Huh pgdat is irqsave while zone is not. It used to be like that before */
+ pgdat_resize_lock(pgdat, &flags);
+ zone_span_writelock(zone);
+ resize_zone_range(zone, start_pfn, nr_pages);
+ zone_span_writeunlock(zone);
+ resize_pgdat_range(pgdat, start_pfn, nr_pages);
+ pgdat_resize_unlock(pgdat, &flags);
+
+ /*
+ * TODO now we have a visible range of pages which are not associated
+ * with their zone properly. Not nice but set_pfnblock_flags_mask
+ * expects the zone spans the pfn range. All the pages in the range
+ * are reserved so nobody should be touching them so we should be safe
+ */
+ memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, MEMMAP_HOTPLUG);
+
+ set_zone_contiguous(zone);
+}
+
+/*
+ * Returns a default kernel memory zone for the given pfn range.
+ * If no kernel zone covers this pfn range it will automatically go
+ * to the ZONE_NORMAL.
+ */
+struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn,
+ unsigned long nr_pages)
+{
+ struct pglist_data *pgdat = NODE_DATA(nid);
+ int zid;
+
+ for (zid = 0; zid <= ZONE_NORMAL; zid++) {
+ struct zone *zone = &pgdat->node_zones[zid];
+
+ if (zone_intersects(zone, start_pfn, nr_pages))
+ return zone;
}
- if (target < idx) {
- /* pages must be at beginning of current zone */
- if (pfn != zone->zone_start_pfn)
- return false;
+ return &pgdat->node_zones[ZONE_NORMAL];
+}
- /* no zones in use between current zone and target */
- for (i = target + 1; i < idx; i++)
- if (zone_is_initialized(zone - idx + i))
- return false;
+/*
+ * Associates the given pfn range with the given node and the zone appropriate
+ * for the given online type.
+ */
+static struct zone * __meminit move_pfn_range(int online_type, int nid,
+ unsigned long start_pfn, unsigned long nr_pages)
+{
+ struct pglist_data *pgdat = NODE_DATA(nid);
+ struct zone *zone = default_zone_for_pfn(nid, start_pfn, nr_pages);
+
+ if (online_type == MMOP_ONLINE_KEEP) {
+ struct zone *movable_zone = &pgdat->node_zones[ZONE_MOVABLE];
+ /*
+ * MMOP_ONLINE_KEEP defaults to MMOP_ONLINE_KERNEL but use
+ * movable zone if that is not possible (e.g. we are within
+ * or past the existing movable zone)
+ */
+ if (!allow_online_pfn_range(nid, start_pfn, nr_pages,
+ MMOP_ONLINE_KERNEL))
+ zone = movable_zone;
+ } else if (online_type == MMOP_ONLINE_MOVABLE) {
+ zone = &pgdat->node_zones[ZONE_MOVABLE];
}
- *zone_shift = target - idx;
- return true;
+ move_pfn_range_to_zone(zone, start_pfn, nr_pages);
+ return zone;
}
/* Must be protected by mem_hotplug_begin() */
@@ -1085,38 +972,18 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
int nid;
int ret;
struct memory_notify arg;
- int zone_shift = 0;
- /*
- * This doesn't need a lock to do pfn_to_page().
- * The section can't be removed here because of the
- * memory_block->state_mutex.
- */
- zone = page_zone(pfn_to_page(pfn));
-
- if ((zone_idx(zone) > ZONE_NORMAL ||
- online_type == MMOP_ONLINE_MOVABLE) &&
- !can_online_high_movable(zone))
+ nid = pfn_to_nid(pfn);
+ if (!allow_online_pfn_range(nid, pfn, nr_pages, online_type))
return -EINVAL;
- if (online_type == MMOP_ONLINE_KERNEL) {
- if (!zone_can_shift(pfn, nr_pages, ZONE_NORMAL, &zone_shift))
- return -EINVAL;
- } else if (online_type == MMOP_ONLINE_MOVABLE) {
- if (!zone_can_shift(pfn, nr_pages, ZONE_MOVABLE, &zone_shift))
- return -EINVAL;
- }
-
- zone = move_pfn_range(zone_shift, pfn, pfn + nr_pages);
- if (!zone)
- return -EINVAL;
+ /* associate pfn range with the zone */
+ zone = move_pfn_range(online_type, nid, pfn, nr_pages);
arg.start_pfn = pfn;
arg.nr_pages = nr_pages;
node_states_check_changes_online(nr_pages, zone, &arg);
- nid = zone_to_nid(zone);
-
ret = memory_notify(MEM_GOING_ONLINE, &arg);
ret = notifier_to_errno(ret);
if (ret)
@@ -1311,39 +1178,6 @@ static int check_hotplug_memory_range(u64 start, u64 size)
return 0;
}
-/*
- * If movable zone has already been setup, newly added memory should be check.
- * If its address is higher than movable zone, it should be added as movable.
- * Without this check, movable zone may overlap with other zone.
- */
-static int should_add_memory_movable(int nid, u64 start, u64 size)
-{
- unsigned long start_pfn = start >> PAGE_SHIFT;
- pg_data_t *pgdat = NODE_DATA(nid);
- struct zone *movable_zone = pgdat->node_zones + ZONE_MOVABLE;
-
- if (zone_is_empty(movable_zone))
- return 0;
-
- if (movable_zone->zone_start_pfn <= start_pfn)
- return 1;
-
- return 0;
-}
-
-int zone_for_memory(int nid, u64 start, u64 size, int zone_default,
- bool for_device)
-{
-#ifdef CONFIG_ZONE_DEVICE
- if (for_device)
- return ZONE_DEVICE;
-#endif
- if (should_add_memory_movable(nid, start, size))
- return ZONE_MOVABLE;
-
- return zone_default;
-}
-
static int online_memory_block(struct memory_block *mem, void *arg)
{
return device_online(&mem->dev);
@@ -1389,7 +1223,7 @@ int __ref add_memory_resource(int nid, struct resource *res, bool online)
}
/* call arch's memory hotadd */
- ret = arch_add_memory(nid, start, size, false);
+ ret = arch_add_memory(nid, start, size, true);
if (ret < 0)
goto error;
@@ -1398,7 +1232,22 @@ int __ref add_memory_resource(int nid, struct resource *res, bool online)
node_set_online(nid);
if (new_node) {
- ret = register_one_node(nid);
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+
+ ret = __register_one_node(nid);
+ if (ret)
+ goto register_fail;
+
+ /*
+ * link memory sections under this node. This is already
+ * done when creatig memory section in register_new_memory
+ * but that depends to have the node registered so offline
+ * nodes have to go through register_node.
+ * TODO clean up this mess.
+ */
+ ret = link_mem_sections(nid, start_pfn, nr_pages);
+register_fail:
/*
* If sysfs file of new node can't create, cpu on the node
* can't be hot-added. There is no rollback way now.
@@ -1592,11 +1441,9 @@ static struct page *new_node_page(struct page *page, unsigned long private,
gfp_mask |= __GFP_HIGHMEM;
if (!nodes_empty(nmask))
- new_page = __alloc_pages_nodemask(gfp_mask, 0,
- node_zonelist(nid, gfp_mask), &nmask);
+ new_page = __alloc_pages_nodemask(gfp_mask, 0, nid, &nmask);
if (!new_page)
- new_page = __alloc_pages(gfp_mask, 0,
- node_zonelist(nid, gfp_mask));
+ new_page = __alloc_pages(gfp_mask, 0, nid);
return new_page;
}
@@ -1725,47 +1572,12 @@ check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
return offlined;
}
-#ifdef CONFIG_MOVABLE_NODE
-/*
- * When CONFIG_MOVABLE_NODE, we permit offlining of a node which doesn't have
- * normal memory.
- */
-static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
-{
- return true;
-}
-#else /* CONFIG_MOVABLE_NODE */
-/* ensure the node has NORMAL memory if it is still online */
-static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
-{
- struct pglist_data *pgdat = zone->zone_pgdat;
- unsigned long present_pages = 0;
- enum zone_type zt;
-
- for (zt = 0; zt <= ZONE_NORMAL; zt++)
- present_pages += pgdat->node_zones[zt].present_pages;
-
- if (present_pages > nr_pages)
- return true;
-
- present_pages = 0;
- for (; zt <= ZONE_MOVABLE; zt++)
- present_pages += pgdat->node_zones[zt].present_pages;
-
- /*
- * we can't offline the last normal memory until all
- * higher memory is offlined.
- */
- return present_pages == 0;
-}
-#endif /* CONFIG_MOVABLE_NODE */
-
static int __init cmdline_parse_movable_node(char *p)
{
-#ifdef CONFIG_MOVABLE_NODE
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
movable_node_enabled = true;
#else
- pr_warn("movable_node option not supported\n");
+ pr_warn("movable_node parameter depends on CONFIG_HAVE_MEMBLOCK_NODE_MAP to work properly\n");
#endif
return 0;
}
@@ -1887,9 +1699,6 @@ static int __ref __offline_pages(unsigned long start_pfn,
node = zone_to_nid(zone);
nr_pages = end_pfn - start_pfn;
- if (zone_idx(zone) <= ZONE_NORMAL && !can_offline_normal(zone, nr_pages))
- return -EINVAL;
-
/* set above range as isolated */
ret = start_isolate_page_range(start_pfn, end_pfn,
MIGRATE_MOVABLE, true);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 37d0b334bfe9..7d8e56214ac0 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -146,22 +146,7 @@ struct mempolicy *get_task_policy(struct task_struct *p)
static const struct mempolicy_operations {
int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
- /*
- * If read-side task has no lock to protect task->mempolicy, write-side
- * task will rebind the task->mempolicy by two step. The first step is
- * setting all the newly nodes, and the second step is cleaning all the
- * disallowed nodes. In this way, we can avoid finding no node to alloc
- * page.
- * If we have a lock to protect task->mempolicy in read-side, we do
- * rebind directly.
- *
- * step:
- * MPOL_REBIND_ONCE - do rebind work at once
- * MPOL_REBIND_STEP1 - set all the newly nodes
- * MPOL_REBIND_STEP2 - clean all the disallowed nodes
- */
- void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
- enum mpol_rebind_step step);
+ void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
} mpol_ops[MPOL_MAX];
static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
@@ -304,19 +289,11 @@ void __mpol_put(struct mempolicy *p)
kmem_cache_free(policy_cache, p);
}
-static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
- enum mpol_rebind_step step)
+static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
{
}
-/*
- * step:
- * MPOL_REBIND_ONCE - do rebind work at once
- * MPOL_REBIND_STEP1 - set all the newly nodes
- * MPOL_REBIND_STEP2 - clean all the disallowed nodes
- */
-static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
- enum mpol_rebind_step step)
+static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
{
nodemask_t tmp;
@@ -325,41 +302,19 @@ static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
else if (pol->flags & MPOL_F_RELATIVE_NODES)
mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
else {
- /*
- * if step == 1, we use ->w.cpuset_mems_allowed to cache the
- * result
- */
- if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
- nodes_remap(tmp, pol->v.nodes,
- pol->w.cpuset_mems_allowed, *nodes);
- pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
- } else if (step == MPOL_REBIND_STEP2) {
- tmp = pol->w.cpuset_mems_allowed;
- pol->w.cpuset_mems_allowed = *nodes;
- } else
- BUG();
+ nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
+ *nodes);
+ pol->w.cpuset_mems_allowed = tmp;
}
if (nodes_empty(tmp))
tmp = *nodes;
- if (step == MPOL_REBIND_STEP1)
- nodes_or(pol->v.nodes, pol->v.nodes, tmp);
- else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
- pol->v.nodes = tmp;
- else
- BUG();
-
- if (!node_isset(current->il_next, tmp)) {
- current->il_next = next_node_in(current->il_next, tmp);
- if (current->il_next >= MAX_NUMNODES)
- current->il_next = numa_node_id();
- }
+ pol->v.nodes = tmp;
}
static void mpol_rebind_preferred(struct mempolicy *pol,
- const nodemask_t *nodes,
- enum mpol_rebind_step step)
+ const nodemask_t *nodes)
{
nodemask_t tmp;
@@ -385,42 +340,19 @@ static void mpol_rebind_preferred(struct mempolicy *pol,
/*
* mpol_rebind_policy - Migrate a policy to a different set of nodes
*
- * If read-side task has no lock to protect task->mempolicy, write-side
- * task will rebind the task->mempolicy by two step. The first step is
- * setting all the newly nodes, and the second step is cleaning all the
- * disallowed nodes. In this way, we can avoid finding no node to alloc
- * page.
- * If we have a lock to protect task->mempolicy in read-side, we do
- * rebind directly.
- *
- * step:
- * MPOL_REBIND_ONCE - do rebind work at once
- * MPOL_REBIND_STEP1 - set all the newly nodes
- * MPOL_REBIND_STEP2 - clean all the disallowed nodes
+ * Per-vma policies are protected by mmap_sem. Allocations using per-task
+ * policies are protected by task->mems_allowed_seq to prevent a premature
+ * OOM/allocation failure due to parallel nodemask modification.
*/
-static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
- enum mpol_rebind_step step)
+static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
{
if (!pol)
return;
- if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
+ if (!mpol_store_user_nodemask(pol) &&
nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
return;
- if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
- return;
-
- if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
- BUG();
-
- if (step == MPOL_REBIND_STEP1)
- pol->flags |= MPOL_F_REBINDING;
- else if (step == MPOL_REBIND_STEP2)
- pol->flags &= ~MPOL_F_REBINDING;
- else if (step >= MPOL_REBIND_NSTEP)
- BUG();
-
- mpol_ops[pol->mode].rebind(pol, newmask, step);
+ mpol_ops[pol->mode].rebind(pol, newmask);
}
/*
@@ -430,10 +362,9 @@ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
* Called with task's alloc_lock held.
*/
-void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
- enum mpol_rebind_step step)
+void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
{
- mpol_rebind_policy(tsk->mempolicy, new, step);
+ mpol_rebind_policy(tsk->mempolicy, new);
}
/*
@@ -448,7 +379,7 @@ void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
down_write(&mm->mmap_sem);
for (vma = mm->mmap; vma; vma = vma->vm_next)
- mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
+ mpol_rebind_policy(vma->vm_policy, new);
up_write(&mm->mmap_sem);
}
@@ -812,9 +743,8 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
}
old = current->mempolicy;
current->mempolicy = new;
- if (new && new->mode == MPOL_INTERLEAVE &&
- nodes_weight(new->v.nodes))
- current->il_next = first_node(new->v.nodes);
+ if (new && new->mode == MPOL_INTERLEAVE)
+ current->il_prev = MAX_NUMNODES-1;
task_unlock(current);
mpol_put(old);
ret = 0;
@@ -916,7 +846,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
*policy = err;
} else if (pol == current->mempolicy &&
pol->mode == MPOL_INTERLEAVE) {
- *policy = current->il_next;
+ *policy = next_node_in(current->il_prev, pol->v.nodes);
} else {
err = -EINVAL;
goto out;
@@ -1676,9 +1606,9 @@ static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
return NULL;
}
-/* Return a zonelist indicated by gfp for node representing a mempolicy */
-static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
- int nd)
+/* Return the node id preferred by the given mempolicy, or the given id */
+static int policy_node(gfp_t gfp, struct mempolicy *policy,
+ int nd)
{
if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
nd = policy->v.preferred_node;
@@ -1691,20 +1621,19 @@ static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
}
- return node_zonelist(nd, gfp);
+ return nd;
}
/* Do dynamic interleaving for a process */
static unsigned interleave_nodes(struct mempolicy *policy)
{
- unsigned nid, next;
+ unsigned next;
struct task_struct *me = current;
- nid = me->il_next;
- next = next_node_in(nid, policy->v.nodes);
+ next = next_node_in(me->il_prev, policy->v.nodes);
if (next < MAX_NUMNODES)
- me->il_next = next;
- return nid;
+ me->il_prev = next;
+ return next;
}
/*
@@ -1799,38 +1728,37 @@ static inline unsigned interleave_nid(struct mempolicy *pol,
#ifdef CONFIG_HUGETLBFS
/*
- * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
+ * huge_node(@vma, @addr, @gfp_flags, @mpol)
* @vma: virtual memory area whose policy is sought
* @addr: address in @vma for shared policy lookup and interleave policy
* @gfp_flags: for requested zone
* @mpol: pointer to mempolicy pointer for reference counted mempolicy
* @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
*
- * Returns a zonelist suitable for a huge page allocation and a pointer
+ * Returns a nid suitable for a huge page allocation and a pointer
* to the struct mempolicy for conditional unref after allocation.
* If the effective policy is 'BIND, returns a pointer to the mempolicy's
* @nodemask for filtering the zonelist.
*
* Must be protected by read_mems_allowed_begin()
*/
-struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
- gfp_t gfp_flags, struct mempolicy **mpol,
- nodemask_t **nodemask)
+int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
+ struct mempolicy **mpol, nodemask_t **nodemask)
{
- struct zonelist *zl;
+ int nid;
*mpol = get_vma_policy(vma, addr);
*nodemask = NULL; /* assume !MPOL_BIND */
if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
- zl = node_zonelist(interleave_nid(*mpol, vma, addr,
- huge_page_shift(hstate_vma(vma))), gfp_flags);
+ nid = interleave_nid(*mpol, vma, addr,
+ huge_page_shift(hstate_vma(vma)));
} else {
- zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
+ nid = policy_node(gfp_flags, *mpol, numa_node_id());
if ((*mpol)->mode == MPOL_BIND)
*nodemask = &(*mpol)->v.nodes;
}
- return zl;
+ return nid;
}
/*
@@ -1932,12 +1860,10 @@ out:
static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
unsigned nid)
{
- struct zonelist *zl;
struct page *page;
- zl = node_zonelist(nid, gfp);
- page = __alloc_pages(gfp, order, zl);
- if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
+ page = __alloc_pages(gfp, order, nid);
+ if (page && page_to_nid(page) == nid)
inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
return page;
}
@@ -1971,13 +1897,10 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
{
struct mempolicy *pol;
struct page *page;
- unsigned int cpuset_mems_cookie;
- struct zonelist *zl;
+ int preferred_nid;
nodemask_t *nmask;
-retry_cpuset:
pol = get_vma_policy(vma, addr);
- cpuset_mems_cookie = read_mems_allowed_begin();
if (pol->mode == MPOL_INTERLEAVE) {
unsigned nid;
@@ -2015,12 +1938,10 @@ retry_cpuset:
}
nmask = policy_nodemask(gfp, pol);
- zl = policy_zonelist(gfp, pol, node);
- page = __alloc_pages_nodemask(gfp, order, zl, nmask);
+ preferred_nid = policy_node(gfp, pol, node);
+ page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
mpol_cond_put(pol);
out:
- if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
- goto retry_cpuset;
return page;
}
@@ -2038,23 +1959,15 @@ out:
* Allocate a page from the kernel page pool. When not in
* interrupt context and apply the current process NUMA policy.
* Returns NULL when no page can be allocated.
- *
- * Don't call cpuset_update_task_memory_state() unless
- * 1) it's ok to take cpuset_sem (can WAIT), and
- * 2) allocating for current task (not interrupt).
*/
struct page *alloc_pages_current(gfp_t gfp, unsigned order)
{
struct mempolicy *pol = &default_policy;
struct page *page;
- unsigned int cpuset_mems_cookie;
if (!in_interrupt() && !(gfp & __GFP_THISNODE))
pol = get_task_policy(current);
-retry_cpuset:
- cpuset_mems_cookie = read_mems_allowed_begin();
-
/*
* No reference counting needed for current->mempolicy
* nor system default_policy
@@ -2063,12 +1976,9 @@ retry_cpuset:
page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
else
page = __alloc_pages_nodemask(gfp, order,
- policy_zonelist(gfp, pol, numa_node_id()),
+ policy_node(gfp, pol, numa_node_id()),
policy_nodemask(gfp, pol));
- if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
- goto retry_cpuset;
-
return page;
}
EXPORT_SYMBOL(alloc_pages_current);
@@ -2112,10 +2022,7 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
if (current_cpuset_is_being_rebound()) {
nodemask_t mems = cpuset_mems_allowed(current);
- if (new->flags & MPOL_F_REBINDING)
- mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
- else
- mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
+ mpol_rebind_policy(new, &mems);
}
atomic_set(&new->refcnt, 1);
return new;
diff --git a/mm/migrate.c b/mm/migrate.c
index 89a0a1707f4c..051cc1555d36 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -227,25 +227,26 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
if (is_write_migration_entry(entry))
pte = maybe_mkwrite(pte, vma);
+ flush_dcache_page(new);
#ifdef CONFIG_HUGETLB_PAGE
if (PageHuge(new)) {
pte = pte_mkhuge(pte);
pte = arch_make_huge_pte(pte, vma, new, 0);
- }
-#endif
- flush_dcache_page(new);
- set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
-
- if (PageHuge(new)) {
+ set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
if (PageAnon(new))
hugepage_add_anon_rmap(new, vma, pvmw.address);
else
page_dup_rmap(new, true);
- } else if (PageAnon(new))
- page_add_anon_rmap(new, vma, pvmw.address, false);
- else
- page_add_file_rmap(new, false);
+ } else
+#endif
+ {
+ set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
+ if (PageAnon(new))
+ page_add_anon_rmap(new, vma, pvmw.address, false);
+ else
+ page_add_file_rmap(new, false);
+ }
if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
mlock_vma_page(new);
diff --git a/mm/mmap.c b/mm/mmap.c
index a5e3dcd75e79..7f8cfe9d9b4d 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -94,7 +94,7 @@ static void unmap_region(struct mm_struct *mm,
* w: (no) no
* x: (yes) yes
*/
-pgprot_t protection_map[16] = {
+pgprot_t protection_map[16] __ro_after_init = {
__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
};
@@ -3186,8 +3186,12 @@ static int special_mapping_mremap(struct vm_area_struct *new_vma)
{
struct vm_special_mapping *sm = new_vma->vm_private_data;
+ if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
+ return -EFAULT;
+
if (sm->mremap)
return sm->mremap(sm, new_vma);
+
return 0;
}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 8edd0d576254..1a8c9ca83e48 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -58,8 +58,6 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
* reading.
*/
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
- if (!pte)
- return 0;
/* Get target node for single threaded private VMAs */
if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 487dad610731..36454d0f96ee 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -118,7 +118,7 @@ static unsigned long __init __free_memory_core(phys_addr_t start,
unsigned long end_pfn = min_t(unsigned long,
PFN_DOWN(end), max_low_pfn);
- if (start_pfn > end_pfn)
+ if (start_pfn >= end_pfn)
return 0;
__free_pages_memory(start_pfn, end_pfn);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 04c9143a8625..0e2c925e7826 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -876,6 +876,11 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
/* Get a reference to safely compare mm after task_unlock(victim) */
mm = victim->mm;
mmgrab(mm);
+
+ /* Raise event before sending signal: task reaper must see this */
+ count_vm_event(OOM_KILL);
+ count_memcg_event_mm(mm, OOM_KILL);
+
/*
* We should send SIGKILL before setting TIF_MEMDIE in order to prevent
* the OOM victim from depleting the memory reserves from the user
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 143c1c25d680..0b60cc7ddac2 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2366,15 +2366,15 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
}
/**
- * write_one_page - write out a single page and optionally wait on I/O
+ * write_one_page - write out a single page and wait on I/O
* @page: the page to write
- * @wait: if true, wait on writeout
*
* The page must be locked by the caller and will be unlocked upon return.
*
- * write_one_page() returns a negative error code if I/O failed.
+ * Note that the mapping's AS_EIO/AS_ENOSPC flags will be cleared when this
+ * function returns.
*/
-int write_one_page(struct page *page, int wait)
+int write_one_page(struct page *page)
{
struct address_space *mapping = page->mapping;
int ret = 0;
@@ -2385,21 +2385,20 @@ int write_one_page(struct page *page, int wait)
BUG_ON(!PageLocked(page));
- if (wait)
- wait_on_page_writeback(page);
+ wait_on_page_writeback(page);
if (clear_page_dirty_for_io(page)) {
get_page(page);
ret = mapping->a_ops->writepage(page, &wbc);
- if (ret == 0 && wait) {
+ if (ret == 0)
wait_on_page_writeback(page);
- if (PageError(page))
- ret = -EIO;
- }
put_page(page);
} else {
unlock_page(page);
}
+
+ if (!ret)
+ ret = filemap_check_errors(mapping);
return ret;
}
EXPORT_SYMBOL(write_one_page);
@@ -2433,8 +2432,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
inode_attach_wb(inode, page);
wb = inode_to_wb(inode);
- inc_memcg_page_state(page, NR_FILE_DIRTY);
- __inc_node_page_state(page, NR_FILE_DIRTY);
+ __inc_lruvec_page_state(page, NR_FILE_DIRTY);
__inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
__inc_node_page_state(page, NR_DIRTIED);
__inc_wb_stat(wb, WB_RECLAIMABLE);
@@ -2455,8 +2453,7 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,
struct bdi_writeback *wb)
{
if (mapping_cap_account_dirty(mapping)) {
- dec_memcg_page_state(page, NR_FILE_DIRTY);
- dec_node_page_state(page, NR_FILE_DIRTY);
+ dec_lruvec_page_state(page, NR_FILE_DIRTY);
dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
dec_wb_stat(wb, WB_RECLAIMABLE);
task_io_account_cancelled_write(PAGE_SIZE);
@@ -2712,8 +2709,7 @@ int clear_page_dirty_for_io(struct page *page)
*/
wb = unlocked_inode_to_wb_begin(inode, &locked);
if (TestClearPageDirty(page)) {
- dec_memcg_page_state(page, NR_FILE_DIRTY);
- dec_node_page_state(page, NR_FILE_DIRTY);
+ dec_lruvec_page_state(page, NR_FILE_DIRTY);
dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
dec_wb_stat(wb, WB_RECLAIMABLE);
ret = 1;
@@ -2759,8 +2755,7 @@ int test_clear_page_writeback(struct page *page)
ret = TestClearPageWriteback(page);
}
if (ret) {
- dec_memcg_page_state(page, NR_WRITEBACK);
- dec_node_page_state(page, NR_WRITEBACK);
+ dec_lruvec_page_state(page, NR_WRITEBACK);
dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
inc_node_page_state(page, NR_WRITTEN);
}
@@ -2814,8 +2809,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
ret = TestSetPageWriteback(page);
}
if (!ret) {
- inc_memcg_page_state(page, NR_WRITEBACK);
- inc_node_page_state(page, NR_WRITEBACK);
+ inc_lruvec_page_state(page, NR_WRITEBACK);
inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
}
unlock_page_memcg(page);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2302f250d6b1..bd65b60939b6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -113,9 +113,7 @@ nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
#ifdef CONFIG_HIGHMEM
[N_HIGH_MEMORY] = { { [0] = 1UL } },
#endif
-#ifdef CONFIG_MOVABLE_NODE
[N_MEMORY] = { { [0] = 1UL } },
-#endif
[N_CPU] = { { [0] = 1UL } },
#endif /* NUMA */
};
@@ -511,7 +509,7 @@ static int page_is_consistent(struct zone *zone, struct page *page)
/*
* Temporary debugging check for pages not lying within a given zone.
*/
-static int bad_range(struct zone *zone, struct page *page)
+static int __maybe_unused bad_range(struct zone *zone, struct page *page)
{
if (page_outside_zone_boundaries(zone, page))
return 1;
@@ -521,7 +519,7 @@ static int bad_range(struct zone *zone, struct page *page)
return 0;
}
#else
-static inline int bad_range(struct zone *zone, struct page *page)
+static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
{
return 0;
}
@@ -1297,8 +1295,9 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
#endif
#ifdef CONFIG_NODES_SPAN_OTHER_NODES
-static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
- struct mminit_pfnnid_cache *state)
+static inline bool __meminit __maybe_unused
+meminit_pfn_in_nid(unsigned long pfn, int node,
+ struct mminit_pfnnid_cache *state)
{
int nid;
@@ -1320,8 +1319,9 @@ static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
{
return true;
}
-static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
- struct mminit_pfnnid_cache *state)
+static inline bool __meminit __maybe_unused
+meminit_pfn_in_nid(unsigned long pfn, int node,
+ struct mminit_pfnnid_cache *state)
{
return true;
}
@@ -1365,7 +1365,9 @@ struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
return NULL;
- start_page = pfn_to_page(start_pfn);
+ start_page = pfn_to_online_page(start_pfn);
+ if (!start_page)
+ return NULL;
if (page_zone(start_page) != zone)
return NULL;
@@ -3673,6 +3675,39 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
return false;
}
+static inline bool
+check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
+{
+ /*
+ * It's possible that cpuset's mems_allowed and the nodemask from
+ * mempolicy don't intersect. This should be normally dealt with by
+ * policy_nodemask(), but it's possible to race with cpuset update in
+ * such a way the check therein was true, and then it became false
+ * before we got our cpuset_mems_cookie here.
+ * This assumes that for all allocations, ac->nodemask can come only
+ * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
+ * when it does not intersect with the cpuset restrictions) or the
+ * caller can deal with a violated nodemask.
+ */
+ if (cpusets_enabled() && ac->nodemask &&
+ !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
+ ac->nodemask = NULL;
+ return true;
+ }
+
+ /*
+ * When updating a task's mems_allowed or mempolicy nodemask, it is
+ * possible to race with parallel threads in such a way that our
+ * allocation can fail while the mask is being updated. If we are about
+ * to fail, check if the cpuset changed during allocation and if so,
+ * retry.
+ */
+ if (read_mems_allowed_retry(cpuset_mems_cookie))
+ return true;
+
+ return false;
+}
+
static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
struct alloc_context *ac)
@@ -3868,11 +3903,9 @@ retry:
&compaction_retries))
goto retry;
- /*
- * It's possible we raced with cpuset update so the OOM would be
- * premature (see below the nopage: label for full explanation).
- */
- if (read_mems_allowed_retry(cpuset_mems_cookie))
+
+ /* Deal with possible cpuset update races before we start OOM killing */
+ if (check_retry_cpuset(cpuset_mems_cookie, ac))
goto retry_cpuset;
/* Reclaim has failed us, start killing things */
@@ -3893,14 +3926,8 @@ retry:
}
nopage:
- /*
- * When updating a task's mems_allowed or mempolicy nodemask, it is
- * possible to race with parallel threads in such a way that our
- * allocation can fail while the mask is being updated. If we are about
- * to fail, check if the cpuset changed during allocation and if so,
- * retry.
- */
- if (read_mems_allowed_retry(cpuset_mems_cookie))
+ /* Deal with possible cpuset update races before we fail */
+ if (check_retry_cpuset(cpuset_mems_cookie, ac))
goto retry_cpuset;
/*
@@ -3951,12 +3978,12 @@ got_pg:
}
static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
- struct zonelist *zonelist, nodemask_t *nodemask,
+ int preferred_nid, nodemask_t *nodemask,
struct alloc_context *ac, gfp_t *alloc_mask,
unsigned int *alloc_flags)
{
ac->high_zoneidx = gfp_zone(gfp_mask);
- ac->zonelist = zonelist;
+ ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
ac->nodemask = nodemask;
ac->migratetype = gfpflags_to_migratetype(gfp_mask);
@@ -4001,8 +4028,8 @@ static inline void finalise_ac(gfp_t gfp_mask,
* This is the 'heart' of the zoned buddy allocator.
*/
struct page *
-__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
- struct zonelist *zonelist, nodemask_t *nodemask)
+__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
+ nodemask_t *nodemask)
{
struct page *page;
unsigned int alloc_flags = ALLOC_WMARK_LOW;
@@ -4010,7 +4037,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
struct alloc_context ac = { };
gfp_mask &= gfp_allowed_mask;
- if (!prepare_alloc_pages(gfp_mask, order, zonelist, nodemask, &ac, &alloc_mask, &alloc_flags))
+ if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
return NULL;
finalise_ac(gfp_mask, order, &ac);
@@ -4614,8 +4641,6 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
" present:%lukB"
" managed:%lukB"
" mlocked:%lukB"
- " slab_reclaimable:%lukB"
- " slab_unreclaimable:%lukB"
" kernel_stack:%lukB"
" pagetables:%lukB"
" bounce:%lukB"
@@ -4637,8 +4662,6 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
K(zone->present_pages),
K(zone->managed_pages),
K(zone_page_state(zone, NR_MLOCK)),
- K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
- K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
zone_page_state(zone, NR_KERNEL_STACK_KB),
K(zone_page_state(zone, NR_PAGETABLE)),
K(zone_page_state(zone, NR_BOUNCE)),
@@ -5124,6 +5147,7 @@ static void build_zonelists(pg_data_t *pgdat)
*/
static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
+static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
static void setup_zone_pageset(struct zone *zone);
/*
@@ -5528,7 +5552,7 @@ static __meminit void zone_pcp_init(struct zone *zone)
zone_batchsize(zone));
}
-int __meminit init_currently_empty_zone(struct zone *zone,
+void __meminit init_currently_empty_zone(struct zone *zone,
unsigned long zone_start_pfn,
unsigned long size)
{
@@ -5546,8 +5570,6 @@ int __meminit init_currently_empty_zone(struct zone *zone,
zone_init_free_lists(zone);
zone->initialized = 1;
-
- return 0;
}
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
@@ -6005,7 +6027,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
{
enum zone_type j;
int nid = pgdat->node_id;
- int ret;
pgdat_resize_init(pgdat);
#ifdef CONFIG_NUMA_BALANCING
@@ -6027,6 +6048,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
spin_lock_init(&pgdat->lru_lock);
lruvec_init(node_lruvec(pgdat));
+ pgdat->per_cpu_nodestats = &boot_nodestats;
+
for (j = 0; j < MAX_NR_ZONES; j++) {
struct zone *zone = pgdat->node_zones + j;
unsigned long size, realsize, freesize, memmap_pages;
@@ -6087,8 +6110,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
set_pageblock_order();
setup_usemap(pgdat, zone, zone_start_pfn, size);
- ret = init_currently_empty_zone(zone, zone_start_pfn, size);
- BUG_ON(ret);
+ init_currently_empty_zone(zone, zone_start_pfn, size);
memmap_init(size, nid, j, zone_start_pfn);
}
}
@@ -7182,6 +7204,21 @@ static unsigned long __init arch_reserved_kernel_pages(void)
#endif
/*
+ * Adaptive scale is meant to reduce sizes of hash tables on large memory
+ * machines. As memory size is increased the scale is also increased but at
+ * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory
+ * quadruples the scale is increased by one, which means the size of hash table
+ * only doubles, instead of quadrupling as well.
+ * Because 32-bit systems cannot have large physical memory, where this scaling
+ * makes sense, it is disabled on such platforms.
+ */
+#if __BITS_PER_LONG > 32
+#define ADAPT_SCALE_BASE (64ul << 30)
+#define ADAPT_SCALE_SHIFT 2
+#define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT)
+#endif
+
+/*
* allocate a large system hash table from bootmem
* - it is assumed that the hash table must contain an exact power-of-2
* quantity of entries
@@ -7200,6 +7237,7 @@ void *__init alloc_large_system_hash(const char *tablename,
unsigned long long max = high_limit;
unsigned long log2qty, size;
void *table = NULL;
+ gfp_t gfp_flags;
/* allow the kernel cmdline to have a say */
if (!numentries) {
@@ -7211,6 +7249,16 @@ void *__init alloc_large_system_hash(const char *tablename,
if (PAGE_SHIFT < 20)
numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
+#if __BITS_PER_LONG > 32
+ if (!high_limit) {
+ unsigned long adapt;
+
+ for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
+ adapt <<= ADAPT_SCALE_SHIFT)
+ scale++;
+ }
+#endif
+
/* limit to 1 bucket per 2^scale bytes of low memory */
if (scale > PAGE_SHIFT)
numentries >>= (scale - PAGE_SHIFT);
@@ -7244,12 +7292,17 @@ void *__init alloc_large_system_hash(const char *tablename,
log2qty = ilog2(numentries);
+ /*
+ * memblock allocator returns zeroed memory already, so HASH_ZERO is
+ * currently not used when HASH_EARLY is specified.
+ */
+ gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
do {
size = bucketsize << log2qty;
if (flags & HASH_EARLY)
table = memblock_virt_alloc_nopanic(size, 0);
else if (hashdist)
- table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
+ table = __vmalloc(size, gfp_flags, PAGE_KERNEL);
else {
/*
* If bucketsize is not a power-of-two, we may free
@@ -7257,8 +7310,8 @@ void *__init alloc_large_system_hash(const char *tablename,
* alloc_pages_exact() automatically does
*/
if (get_order(size) < MAX_ORDER) {
- table = alloc_pages_exact(size, GFP_ATOMIC);
- kmemleak_alloc(table, size, 1, GFP_ATOMIC);
+ table = alloc_pages_exact(size, gfp_flags);
+ kmemleak_alloc(table, size, 1, gfp_flags);
}
}
} while (!table && size > PAGE_SIZE && --log2qty);
@@ -7660,6 +7713,7 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
break;
if (pfn == end_pfn)
return;
+ offline_mem_sections(pfn, end_pfn);
zone = page_zone(pfn_to_page(pfn));
spin_lock_irqsave(&zone->lock, flags);
pfn = start_pfn;
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 5092e4ef00c8..3606104893e0 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -138,12 +138,18 @@ static inline struct page *
__first_valid_page(unsigned long pfn, unsigned long nr_pages)
{
int i;
- for (i = 0; i < nr_pages; i++)
- if (pfn_valid_within(pfn + i))
- break;
- if (unlikely(i == nr_pages))
- return NULL;
- return pfn_to_page(pfn + i);
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page;
+
+ if (!pfn_valid_within(pfn + i))
+ continue;
+ page = pfn_to_online_page(pfn + i);
+ if (!page)
+ continue;
+ return page;
+ }
+ return NULL;
}
/*
@@ -184,8 +190,12 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
undo:
for (pfn = start_pfn;
pfn < undo_pfn;
- pfn += pageblock_nr_pages)
- unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
+ pfn += pageblock_nr_pages) {
+ struct page *page = pfn_to_online_page(pfn);
+ if (!page)
+ continue;
+ unset_migratetype_isolate(page, migratetype);
+ }
return -EBUSY;
}
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index de9c40d7304a..8ec6ba230bb9 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -116,7 +116,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
if (unlikely(PageHuge(pvmw->page))) {
/* when pud is not present, pte will be NULL */
- pvmw->pte = huge_pte_offset(mm, pvmw->address);
+ pvmw->pte = huge_pte_offset(mm, pvmw->address,
+ PAGE_SIZE << compound_order(page));
if (!pvmw->pte)
return false;
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 60f7856e508f..1a4197965415 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -180,12 +180,13 @@ static int walk_hugetlb_range(unsigned long addr, unsigned long end,
struct hstate *h = hstate_vma(vma);
unsigned long next;
unsigned long hmask = huge_page_mask(h);
+ unsigned long sz = huge_page_size(h);
pte_t *pte;
int err = 0;
do {
next = hugetlb_entry_end(h, addr, end);
- pte = huge_pte_offset(walk->mm, addr & hmask);
+ pte = huge_pte_offset(walk->mm, addr & hmask, sz);
if (pte && walk->hugetlb_entry)
err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
if (err)
diff --git a/mm/percpu-internal.h b/mm/percpu-internal.h
new file mode 100644
index 000000000000..cd2442e13d8f
--- /dev/null
+++ b/mm/percpu-internal.h
@@ -0,0 +1,166 @@
+#ifndef _MM_PERCPU_INTERNAL_H
+#define _MM_PERCPU_INTERNAL_H
+
+#include <linux/types.h>
+#include <linux/percpu.h>
+
+struct pcpu_chunk {
+#ifdef CONFIG_PERCPU_STATS
+ int nr_alloc; /* # of allocations */
+ size_t max_alloc_size; /* largest allocation size */
+#endif
+
+ struct list_head list; /* linked to pcpu_slot lists */
+ int free_size; /* free bytes in the chunk */
+ int contig_hint; /* max contiguous size hint */
+ void *base_addr; /* base address of this chunk */
+
+ int map_used; /* # of map entries used before the sentry */
+ int map_alloc; /* # of map entries allocated */
+ int *map; /* allocation map */
+ struct list_head map_extend_list;/* on pcpu_map_extend_chunks */
+
+ void *data; /* chunk data */
+ int first_free; /* no free below this */
+ bool immutable; /* no [de]population allowed */
+ bool has_reserved; /* Indicates if chunk has reserved space
+ at the beginning. Reserved chunk will
+ contain reservation for static chunk.
+ Dynamic chunk will contain reservation
+ for static and reserved chunks. */
+ int nr_populated; /* # of populated pages */
+ unsigned long populated[]; /* populated bitmap */
+};
+
+extern spinlock_t pcpu_lock;
+
+extern struct list_head *pcpu_slot;
+extern int pcpu_nr_slots;
+
+extern struct pcpu_chunk *pcpu_first_chunk;
+extern struct pcpu_chunk *pcpu_reserved_chunk;
+
+#ifdef CONFIG_PERCPU_STATS
+
+#include <linux/spinlock.h>
+
+struct percpu_stats {
+ u64 nr_alloc; /* lifetime # of allocations */
+ u64 nr_dealloc; /* lifetime # of deallocations */
+ u64 nr_cur_alloc; /* current # of allocations */
+ u64 nr_max_alloc; /* max # of live allocations */
+ u32 nr_chunks; /* current # of live chunks */
+ u32 nr_max_chunks; /* max # of live chunks */
+ size_t min_alloc_size; /* min allocaiton size */
+ size_t max_alloc_size; /* max allocation size */
+};
+
+extern struct percpu_stats pcpu_stats;
+extern struct pcpu_alloc_info pcpu_stats_ai;
+
+/*
+ * For debug purposes. We don't care about the flexible array.
+ */
+static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
+{
+ memcpy(&pcpu_stats_ai, ai, sizeof(struct pcpu_alloc_info));
+
+ /* initialize min_alloc_size to unit_size */
+ pcpu_stats.min_alloc_size = pcpu_stats_ai.unit_size;
+}
+
+/*
+ * pcpu_stats_area_alloc - increment area allocation stats
+ * @chunk: the location of the area being allocated
+ * @size: size of area to allocate in bytes
+ *
+ * CONTEXT:
+ * pcpu_lock.
+ */
+static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
+{
+ lockdep_assert_held(&pcpu_lock);
+
+ pcpu_stats.nr_alloc++;
+ pcpu_stats.nr_cur_alloc++;
+ pcpu_stats.nr_max_alloc =
+ max(pcpu_stats.nr_max_alloc, pcpu_stats.nr_cur_alloc);
+ pcpu_stats.min_alloc_size =
+ min(pcpu_stats.min_alloc_size, size);
+ pcpu_stats.max_alloc_size =
+ max(pcpu_stats.max_alloc_size, size);
+
+ chunk->nr_alloc++;
+ chunk->max_alloc_size = max(chunk->max_alloc_size, size);
+}
+
+/*
+ * pcpu_stats_area_dealloc - decrement allocation stats
+ * @chunk: the location of the area being deallocated
+ *
+ * CONTEXT:
+ * pcpu_lock.
+ */
+static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
+{
+ lockdep_assert_held(&pcpu_lock);
+
+ pcpu_stats.nr_dealloc++;
+ pcpu_stats.nr_cur_alloc--;
+
+ chunk->nr_alloc--;
+}
+
+/*
+ * pcpu_stats_chunk_alloc - increment chunk stats
+ */
+static inline void pcpu_stats_chunk_alloc(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&pcpu_lock, flags);
+
+ pcpu_stats.nr_chunks++;
+ pcpu_stats.nr_max_chunks =
+ max(pcpu_stats.nr_max_chunks, pcpu_stats.nr_chunks);
+
+ spin_unlock_irqrestore(&pcpu_lock, flags);
+}
+
+/*
+ * pcpu_stats_chunk_dealloc - decrement chunk stats
+ */
+static inline void pcpu_stats_chunk_dealloc(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&pcpu_lock, flags);
+
+ pcpu_stats.nr_chunks--;
+
+ spin_unlock_irqrestore(&pcpu_lock, flags);
+}
+
+#else
+
+static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
+{
+}
+
+static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
+{
+}
+
+static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
+{
+}
+
+static inline void pcpu_stats_chunk_alloc(void)
+{
+}
+
+static inline void pcpu_stats_chunk_dealloc(void)
+{
+}
+
+#endif /* !CONFIG_PERCPU_STATS */
+
+#endif
diff --git a/mm/percpu-km.c b/mm/percpu-km.c
index d66911ff42d9..eb58aa4c0997 100644
--- a/mm/percpu-km.c
+++ b/mm/percpu-km.c
@@ -72,6 +72,9 @@ static struct pcpu_chunk *pcpu_create_chunk(void)
pcpu_chunk_populated(chunk, 0, nr_pages);
spin_unlock_irq(&pcpu_lock);
+ pcpu_stats_chunk_alloc();
+ trace_percpu_create_chunk(chunk->base_addr);
+
return chunk;
}
@@ -79,7 +82,13 @@ static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
{
const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
- if (chunk && chunk->data)
+ if (!chunk)
+ return;
+
+ pcpu_stats_chunk_dealloc();
+ trace_percpu_destroy_chunk(chunk->base_addr);
+
+ if (chunk->data)
__free_pages(chunk->data, order_base_2(nr_pages));
pcpu_free_chunk(chunk);
}
diff --git a/mm/percpu-stats.c b/mm/percpu-stats.c
new file mode 100644
index 000000000000..03524a56eeff
--- /dev/null
+++ b/mm/percpu-stats.c
@@ -0,0 +1,222 @@
+/*
+ * mm/percpu-debug.c
+ *
+ * Copyright (C) 2017 Facebook Inc.
+ * Copyright (C) 2017 Dennis Zhou <[email protected]>
+ *
+ * This file is released under the GPLv2.
+ *
+ * Prints statistics about the percpu allocator and backing chunks.
+ */
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/percpu.h>
+#include <linux/seq_file.h>
+#include <linux/sort.h>
+#include <linux/vmalloc.h>
+
+#include "percpu-internal.h"
+
+#define P(X, Y) \
+ seq_printf(m, " %-24s: %8lld\n", X, (long long int)Y)
+
+struct percpu_stats pcpu_stats;
+struct pcpu_alloc_info pcpu_stats_ai;
+
+static int cmpint(const void *a, const void *b)
+{
+ return *(int *)a - *(int *)b;
+}
+
+/*
+ * Iterates over all chunks to find the max # of map entries used.
+ */
+static int find_max_map_used(void)
+{
+ struct pcpu_chunk *chunk;
+ int slot, max_map_used;
+
+ max_map_used = 0;
+ for (slot = 0; slot < pcpu_nr_slots; slot++)
+ list_for_each_entry(chunk, &pcpu_slot[slot], list)
+ max_map_used = max(max_map_used, chunk->map_used);
+
+ return max_map_used;
+}
+
+/*
+ * Prints out chunk state. Fragmentation is considered between
+ * the beginning of the chunk to the last allocation.
+ */
+static void chunk_map_stats(struct seq_file *m, struct pcpu_chunk *chunk,
+ void *buffer)
+{
+ int i, s_index, last_alloc, alloc_sign, as_len;
+ int *alloc_sizes, *p;
+ /* statistics */
+ int sum_frag = 0, max_frag = 0;
+ int cur_min_alloc = 0, cur_med_alloc = 0, cur_max_alloc = 0;
+
+ alloc_sizes = buffer;
+ s_index = chunk->has_reserved ? 1 : 0;
+
+ /* find last allocation */
+ last_alloc = -1;
+ for (i = chunk->map_used - 1; i >= s_index; i--) {
+ if (chunk->map[i] & 1) {
+ last_alloc = i;
+ break;
+ }
+ }
+
+ /* if the chunk is not empty - ignoring reserve */
+ if (last_alloc >= s_index) {
+ as_len = last_alloc + 1 - s_index;
+
+ /*
+ * Iterate through chunk map computing size info.
+ * The first bit is overloaded to be a used flag.
+ * negative = free space, positive = allocated
+ */
+ for (i = 0, p = chunk->map + s_index; i < as_len; i++, p++) {
+ alloc_sign = (*p & 1) ? 1 : -1;
+ alloc_sizes[i] = alloc_sign *
+ ((p[1] & ~1) - (p[0] & ~1));
+ }
+
+ sort(alloc_sizes, as_len, sizeof(chunk->map[0]), cmpint, NULL);
+
+ /* Iterate through the unallocated fragements. */
+ for (i = 0, p = alloc_sizes; *p < 0 && i < as_len; i++, p++) {
+ sum_frag -= *p;
+ max_frag = max(max_frag, -1 * (*p));
+ }
+
+ cur_min_alloc = alloc_sizes[i];
+ cur_med_alloc = alloc_sizes[(i + as_len - 1) / 2];
+ cur_max_alloc = alloc_sizes[as_len - 1];
+ }
+
+ P("nr_alloc", chunk->nr_alloc);
+ P("max_alloc_size", chunk->max_alloc_size);
+ P("free_size", chunk->free_size);
+ P("contig_hint", chunk->contig_hint);
+ P("sum_frag", sum_frag);
+ P("max_frag", max_frag);
+ P("cur_min_alloc", cur_min_alloc);
+ P("cur_med_alloc", cur_med_alloc);
+ P("cur_max_alloc", cur_max_alloc);
+ seq_putc(m, '\n');
+}
+
+static int percpu_stats_show(struct seq_file *m, void *v)
+{
+ struct pcpu_chunk *chunk;
+ int slot, max_map_used;
+ void *buffer;
+
+alloc_buffer:
+ spin_lock_irq(&pcpu_lock);
+ max_map_used = find_max_map_used();
+ spin_unlock_irq(&pcpu_lock);
+
+ buffer = vmalloc(max_map_used * sizeof(pcpu_first_chunk->map[0]));
+ if (!buffer)
+ return -ENOMEM;
+
+ spin_lock_irq(&pcpu_lock);
+
+ /* if the buffer allocated earlier is too small */
+ if (max_map_used < find_max_map_used()) {
+ spin_unlock_irq(&pcpu_lock);
+ vfree(buffer);
+ goto alloc_buffer;
+ }
+
+#define PL(X) \
+ seq_printf(m, " %-24s: %8lld\n", #X, (long long int)pcpu_stats_ai.X)
+
+ seq_printf(m,
+ "Percpu Memory Statistics\n"
+ "Allocation Info:\n"
+ "----------------------------------------\n");
+ PL(unit_size);
+ PL(static_size);
+ PL(reserved_size);
+ PL(dyn_size);
+ PL(atom_size);
+ PL(alloc_size);
+ seq_putc(m, '\n');
+
+#undef PL
+
+#define PU(X) \
+ seq_printf(m, " %-18s: %14llu\n", #X, (unsigned long long)pcpu_stats.X)
+
+ seq_printf(m,
+ "Global Stats:\n"
+ "----------------------------------------\n");
+ PU(nr_alloc);
+ PU(nr_dealloc);
+ PU(nr_cur_alloc);
+ PU(nr_max_alloc);
+ PU(nr_chunks);
+ PU(nr_max_chunks);
+ PU(min_alloc_size);
+ PU(max_alloc_size);
+ seq_putc(m, '\n');
+
+#undef PU
+
+ seq_printf(m,
+ "Per Chunk Stats:\n"
+ "----------------------------------------\n");
+
+ if (pcpu_reserved_chunk) {
+ seq_puts(m, "Chunk: <- Reserved Chunk\n");
+ chunk_map_stats(m, pcpu_reserved_chunk, buffer);
+ }
+
+ for (slot = 0; slot < pcpu_nr_slots; slot++) {
+ list_for_each_entry(chunk, &pcpu_slot[slot], list) {
+ if (chunk == pcpu_first_chunk) {
+ seq_puts(m, "Chunk: <- First Chunk\n");
+ chunk_map_stats(m, chunk, buffer);
+
+
+ } else {
+ seq_puts(m, "Chunk:\n");
+ chunk_map_stats(m, chunk, buffer);
+ }
+
+ }
+ }
+
+ spin_unlock_irq(&pcpu_lock);
+
+ vfree(buffer);
+
+ return 0;
+}
+
+static int percpu_stats_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, percpu_stats_show, NULL);
+}
+
+static const struct file_operations percpu_stats_fops = {
+ .open = percpu_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init init_percpu_stats_debugfs(void)
+{
+ debugfs_create_file("percpu_stats", 0444, NULL, NULL,
+ &percpu_stats_fops);
+
+ return 0;
+}
+
+late_initcall(init_percpu_stats_debugfs);
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 9ac639499bd1..15dab691ea70 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -343,12 +343,22 @@ static struct pcpu_chunk *pcpu_create_chunk(void)
chunk->data = vms;
chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0];
+
+ pcpu_stats_chunk_alloc();
+ trace_percpu_create_chunk(chunk->base_addr);
+
return chunk;
}
static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
{
- if (chunk && chunk->data)
+ if (!chunk)
+ return;
+
+ pcpu_stats_chunk_dealloc();
+ trace_percpu_destroy_chunk(chunk->base_addr);
+
+ if (chunk->data)
pcpu_free_vm_areas(chunk->data, pcpu_nr_groups);
pcpu_free_chunk(chunk);
}
diff --git a/mm/percpu.c b/mm/percpu.c
index e0aa8ae7bde7..bd4130a69bbc 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -76,6 +76,11 @@
#include <asm/tlbflush.h>
#include <asm/io.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/percpu.h>
+
+#include "percpu-internal.h"
+
#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
#define PCPU_ATOMIC_MAP_MARGIN_LOW 32
@@ -103,53 +108,35 @@
#define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
#endif /* CONFIG_SMP */
-struct pcpu_chunk {
- struct list_head list; /* linked to pcpu_slot lists */
- int free_size; /* free bytes in the chunk */
- int contig_hint; /* max contiguous size hint */
- void *base_addr; /* base address of this chunk */
-
- int map_used; /* # of map entries used before the sentry */
- int map_alloc; /* # of map entries allocated */
- int *map; /* allocation map */
- struct list_head map_extend_list;/* on pcpu_map_extend_chunks */
-
- void *data; /* chunk data */
- int first_free; /* no free below this */
- bool immutable; /* no [de]population allowed */
- int nr_populated; /* # of populated pages */
- unsigned long populated[]; /* populated bitmap */
-};
-
-static int pcpu_unit_pages __read_mostly;
-static int pcpu_unit_size __read_mostly;
-static int pcpu_nr_units __read_mostly;
-static int pcpu_atom_size __read_mostly;
-static int pcpu_nr_slots __read_mostly;
-static size_t pcpu_chunk_struct_size __read_mostly;
+static int pcpu_unit_pages __ro_after_init;
+static int pcpu_unit_size __ro_after_init;
+static int pcpu_nr_units __ro_after_init;
+static int pcpu_atom_size __ro_after_init;
+int pcpu_nr_slots __ro_after_init;
+static size_t pcpu_chunk_struct_size __ro_after_init;
/* cpus with the lowest and highest unit addresses */
-static unsigned int pcpu_low_unit_cpu __read_mostly;
-static unsigned int pcpu_high_unit_cpu __read_mostly;
+static unsigned int pcpu_low_unit_cpu __ro_after_init;
+static unsigned int pcpu_high_unit_cpu __ro_after_init;
/* the address of the first chunk which starts with the kernel static area */
-void *pcpu_base_addr __read_mostly;
+void *pcpu_base_addr __ro_after_init;
EXPORT_SYMBOL_GPL(pcpu_base_addr);
-static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
-const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */
+static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */
+const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */
/* group information, used for vm allocation */
-static int pcpu_nr_groups __read_mostly;
-static const unsigned long *pcpu_group_offsets __read_mostly;
-static const size_t *pcpu_group_sizes __read_mostly;
+static int pcpu_nr_groups __ro_after_init;
+static const unsigned long *pcpu_group_offsets __ro_after_init;
+static const size_t *pcpu_group_sizes __ro_after_init;
/*
* The first chunk which always exists. Note that unlike other
* chunks, this one can be allocated and mapped in several different
* ways and thus often doesn't live in the vmalloc area.
*/
-static struct pcpu_chunk *pcpu_first_chunk;
+struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
/*
* Optional reserved chunk. This chunk reserves part of the first
@@ -158,13 +145,13 @@ static struct pcpu_chunk *pcpu_first_chunk;
* area doesn't exist, the following variables contain NULL and 0
* respectively.
*/
-static struct pcpu_chunk *pcpu_reserved_chunk;
-static int pcpu_reserved_chunk_limit;
+struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
+static int pcpu_reserved_chunk_limit __ro_after_init;
-static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
+DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
-static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
+struct list_head *pcpu_slot __ro_after_init; /* chunk list slots */
/* chunks which need their map areas extended, protected by pcpu_lock */
static LIST_HEAD(pcpu_map_extend_chunks);
@@ -672,6 +659,9 @@ static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme,
int to_free = 0;
int *p;
+ lockdep_assert_held(&pcpu_lock);
+ pcpu_stats_area_dealloc(chunk);
+
freeme |= 1; /* we are searching for <given offset, in use> pair */
i = 0;
@@ -735,6 +725,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
chunk->map[0] = 0;
chunk->map[1] = pcpu_unit_size | 1;
chunk->map_used = 1;
+ chunk->has_reserved = false;
INIT_LIST_HEAD(&chunk->list);
INIT_LIST_HEAD(&chunk->map_extend_list);
@@ -965,8 +956,10 @@ restart:
* tasks to create chunks simultaneously. Serialize and create iff
* there's still no empty chunk after grabbing the mutex.
*/
- if (is_atomic)
+ if (is_atomic) {
+ err = "atomic alloc failed, no space left";
goto fail;
+ }
if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
chunk = pcpu_create_chunk();
@@ -984,6 +977,7 @@ restart:
goto restart;
area_found:
+ pcpu_stats_area_alloc(chunk, size);
spin_unlock_irqrestore(&pcpu_lock, flags);
/* populate if not all pages are already there */
@@ -1026,11 +1020,17 @@ area_found:
ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
kmemleak_alloc_percpu(ptr, size, gfp);
+
+ trace_percpu_alloc_percpu(reserved, is_atomic, size, align,
+ chunk->base_addr, off, ptr);
+
return ptr;
fail_unlock:
spin_unlock_irqrestore(&pcpu_lock, flags);
fail:
+ trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
+
if (!is_atomic && warn_limit) {
pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
size, align, is_atomic, err);
@@ -1280,6 +1280,8 @@ void free_percpu(void __percpu *ptr)
}
}
+ trace_percpu_free_percpu(chunk->base_addr, off, ptr);
+
spin_unlock_irqrestore(&pcpu_lock, flags);
}
EXPORT_SYMBOL_GPL(free_percpu);
@@ -1656,6 +1658,8 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
+ pcpu_stats_save_ai(ai);
+
/*
* Allocate chunk slots. The additional last slot is for
* empty chunks.
@@ -1699,6 +1703,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
if (schunk->free_size)
schunk->map[++schunk->map_used] = ai->static_size + schunk->free_size;
schunk->map[schunk->map_used] |= 1;
+ schunk->has_reserved = true;
/* init dynamic chunk if necessary */
if (dyn_size) {
@@ -1717,6 +1722,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
dchunk->map[1] = pcpu_reserved_chunk_limit;
dchunk->map[2] = (pcpu_reserved_chunk_limit + dchunk->free_size) | 1;
dchunk->map_used = 2;
+ dchunk->has_reserved = true;
}
/* link the first chunk in */
@@ -1725,6 +1731,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
pcpu_count_occupied_pages(pcpu_first_chunk, 1);
pcpu_chunk_relocate(pcpu_first_chunk, -1);
+ pcpu_stats_chunk_alloc();
+ trace_percpu_create_chunk(base_addr);
+
/* we're done */
pcpu_base_addr = base_addr;
return 0;
diff --git a/mm/rmap.c b/mm/rmap.c
index 130c238fe384..ced14f1af6dc 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1145,8 +1145,7 @@ void page_add_file_rmap(struct page *page, bool compound)
if (!atomic_inc_and_test(&page->_mapcount))
goto out;
}
- __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr);
- mod_memcg_page_state(page, NR_FILE_MAPPED, nr);
+ __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
out:
unlock_page_memcg(page);
}
@@ -1181,12 +1180,11 @@ static void page_remove_file_rmap(struct page *page, bool compound)
}
/*
- * We use the irq-unsafe __{inc|mod}_zone_page_state because
+ * We use the irq-unsafe __{inc|mod}_lruvec_page_state because
* these counters are not modified in interrupt context, and
* pte lock(a spinlock) is held, which implies preemption disabled.
*/
- __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr);
- mod_memcg_page_state(page, NR_FILE_MAPPED, -nr);
+ __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr);
if (unlikely(PageMlocked(page)))
clear_page_mlock(page);
@@ -1367,15 +1365,18 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
update_hiwater_rss(mm);
if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
+ pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
if (PageHuge(page)) {
int nr = 1 << compound_order(page);
hugetlb_count_sub(nr, mm);
+ set_huge_swap_pte_at(mm, address,
+ pvmw.pte, pteval,
+ vma_mmu_pagesize(vma));
} else {
dec_mm_counter(mm, mm_counter(page));
+ set_pte_at(mm, address, pvmw.pte, pteval);
}
- pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
- set_pte_at(mm, address, pvmw.pte, pteval);
} else if (pte_unused(pteval)) {
/*
* The guest indicated that the page content is of no
diff --git a/mm/shmem.c b/mm/shmem.c
index 9100c4952698..9418f5a9bc46 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1291,7 +1291,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
SetPageUptodate(page);
}
- swap = get_swap_page();
+ swap = get_swap_page(page);
if (!swap.val)
goto redirty;
@@ -1327,7 +1327,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
mutex_unlock(&shmem_swaplist_mutex);
free_swap:
- swapcache_free(swap);
+ put_swap_page(page, swap);
redirty:
set_page_dirty(page);
if (wbc->for_reclaim)
@@ -1646,8 +1646,7 @@ repeat:
if (fault_type) {
*fault_type |= VM_FAULT_MAJOR;
count_vm_event(PGMAJFAULT);
- mem_cgroup_count_vm_event(charge_mm,
- PGMAJFAULT);
+ count_memcg_event_mm(charge_mm, PGMAJFAULT);
}
/* Here we actually start the io */
page = shmem_swapin(swap, gfp, info, index);
diff --git a/mm/slab.c b/mm/slab.c
index 2a31ee3c5814..04dec48c3ed7 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1425,11 +1425,9 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
nr_pages = (1 << cachep->gfporder);
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
- add_zone_page_state(page_zone(page),
- NR_SLAB_RECLAIMABLE, nr_pages);
+ mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, nr_pages);
else
- add_zone_page_state(page_zone(page),
- NR_SLAB_UNRECLAIMABLE, nr_pages);
+ mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, nr_pages);
__SetPageSlab(page);
/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
@@ -1459,11 +1457,9 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
kmemcheck_free_shadow(page, order);
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
- sub_zone_page_state(page_zone(page),
- NR_SLAB_RECLAIMABLE, nr_freed);
+ mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, -nr_freed);
else
- sub_zone_page_state(page_zone(page),
- NR_SLAB_UNRECLAIMABLE, nr_freed);
+ mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, -nr_freed);
BUG_ON(!PageSlab(page));
__ClearPageSlabPfmemalloc(page);
@@ -2040,17 +2036,13 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
* unaligned accesses for some archs when redzoning is used, and makes
* sure any on-slab bufctl's are also correctly aligned.
*/
- if (size & (BYTES_PER_WORD - 1)) {
- size += (BYTES_PER_WORD - 1);
- size &= ~(BYTES_PER_WORD - 1);
- }
+ size = ALIGN(size, BYTES_PER_WORD);
if (flags & SLAB_RED_ZONE) {
ralign = REDZONE_ALIGN;
/* If redzoning, ensure that the second redzone is suitably
* aligned, by adjusting the object size accordingly. */
- size += REDZONE_ALIGN - 1;
- size &= ~(REDZONE_ALIGN - 1);
+ size = ALIGN(size, REDZONE_ALIGN);
}
/* 3) caller mandated alignment */
diff --git a/mm/slab.h b/mm/slab.h
index 9cfcf099709c..6885e1192ec5 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -274,22 +274,11 @@ static __always_inline int memcg_charge_slab(struct page *page,
gfp_t gfp, int order,
struct kmem_cache *s)
{
- int ret;
-
if (!memcg_kmem_enabled())
return 0;
if (is_root_cache(s))
return 0;
-
- ret = memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
- if (ret)
- return ret;
-
- memcg_kmem_update_page_stat(page,
- (s->flags & SLAB_RECLAIM_ACCOUNT) ?
- MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
- 1 << order);
- return 0;
+ return memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
}
static __always_inline void memcg_uncharge_slab(struct page *page, int order,
@@ -297,11 +286,6 @@ static __always_inline void memcg_uncharge_slab(struct page *page, int order,
{
if (!memcg_kmem_enabled())
return;
-
- memcg_kmem_update_page_stat(page,
- (s->flags & SLAB_RECLAIM_ACCOUNT) ?
- MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
- -(1 << order));
memcg_kmem_uncharge(page, order);
}
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 01a0fe2eb332..904a83be82de 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -47,13 +47,12 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
/*
* Merge control. If this is set then no merging of slab caches will occur.
- * (Could be removed. This was introduced to pacify the merge skeptics.)
*/
-static int slab_nomerge;
+static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
static int __init setup_slab_nomerge(char *str)
{
- slab_nomerge = 1;
+ slab_nomerge = true;
return 1;
}
diff --git a/mm/slub.c b/mm/slub.c
index 8addc535bcdc..1d3f9835f4ea 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1615,7 +1615,7 @@ out:
if (!page)
return NULL;
- mod_zone_page_state(page_zone(page),
+ mod_lruvec_page_state(page,
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1 << oo_order(oo));
@@ -1655,7 +1655,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
kmemcheck_free_shadow(page, compound_order(page));
- mod_zone_page_state(page_zone(page),
+ mod_lruvec_page_state(page,
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-pages);
@@ -1829,7 +1829,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
stat(s, CPU_PARTIAL_NODE);
}
if (!kmem_cache_has_cpu_partial(s)
- || available > s->cpu_partial / 2)
+ || available > slub_cpu_partial(s) / 2)
break;
}
@@ -1993,7 +1993,7 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
* Remove the cpu slab
*/
static void deactivate_slab(struct kmem_cache *s, struct page *page,
- void *freelist)
+ void *freelist, struct kmem_cache_cpu *c)
{
enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
@@ -2132,6 +2132,9 @@ redo:
discard_slab(s, page);
stat(s, FREE_SLAB);
}
+
+ c->page = NULL;
+ c->freelist = NULL;
}
/*
@@ -2266,11 +2269,9 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{
stat(s, CPUSLAB_FLUSH);
- deactivate_slab(s, c->page, c->freelist);
+ deactivate_slab(s, c->page, c->freelist, c);
c->tid = next_tid(c->tid);
- c->page = NULL;
- c->freelist = NULL;
}
/*
@@ -2302,7 +2303,7 @@ static bool has_cpu_slab(int cpu, void *info)
struct kmem_cache *s = info;
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
- return c->page || c->partial;
+ return c->page || slub_percpu_partial(c);
}
static void flush_all(struct kmem_cache *s)
@@ -2521,9 +2522,7 @@ redo:
if (unlikely(!node_match(page, searchnode))) {
stat(s, ALLOC_NODE_MISMATCH);
- deactivate_slab(s, page, c->freelist);
- c->page = NULL;
- c->freelist = NULL;
+ deactivate_slab(s, page, c->freelist, c);
goto new_slab;
}
}
@@ -2534,9 +2533,7 @@ redo:
* information when the page leaves the per-cpu allocator
*/
if (unlikely(!pfmemalloc_match(page, gfpflags))) {
- deactivate_slab(s, page, c->freelist);
- c->page = NULL;
- c->freelist = NULL;
+ deactivate_slab(s, page, c->freelist, c);
goto new_slab;
}
@@ -2568,11 +2565,10 @@ load_freelist:
new_slab:
- if (c->partial) {
- page = c->page = c->partial;
- c->partial = page->next;
+ if (slub_percpu_partial(c)) {
+ page = c->page = slub_percpu_partial(c);
+ slub_set_percpu_partial(c, page);
stat(s, CPU_PARTIAL_ALLOC);
- c->freelist = NULL;
goto redo;
}
@@ -2592,9 +2588,7 @@ new_slab:
!alloc_debug_processing(s, page, freelist, addr))
goto new_slab; /* Slab failed checks. Next slab needed */
- deactivate_slab(s, page, get_freepointer(s, freelist));
- c->page = NULL;
- c->freelist = NULL;
+ deactivate_slab(s, page, get_freepointer(s, freelist), c);
return freelist;
}
@@ -3410,6 +3404,39 @@ static void set_min_partial(struct kmem_cache *s, unsigned long min)
s->min_partial = min;
}
+static void set_cpu_partial(struct kmem_cache *s)
+{
+#ifdef CONFIG_SLUB_CPU_PARTIAL
+ /*
+ * cpu_partial determined the maximum number of objects kept in the
+ * per cpu partial lists of a processor.
+ *
+ * Per cpu partial lists mainly contain slabs that just have one
+ * object freed. If they are used for allocation then they can be
+ * filled up again with minimal effort. The slab will never hit the
+ * per node partial lists and therefore no locking will be required.
+ *
+ * This setting also determines
+ *
+ * A) The number of objects from per cpu partial slabs dumped to the
+ * per node list when we reach the limit.
+ * B) The number of objects in cpu partial slabs to extract from the
+ * per node list when we run out of per cpu objects. We only fetch
+ * 50% to keep some capacity around for frees.
+ */
+ if (!kmem_cache_has_cpu_partial(s))
+ s->cpu_partial = 0;
+ else if (s->size >= PAGE_SIZE)
+ s->cpu_partial = 2;
+ else if (s->size >= 1024)
+ s->cpu_partial = 6;
+ else if (s->size >= 256)
+ s->cpu_partial = 13;
+ else
+ s->cpu_partial = 30;
+#endif
+}
+
/*
* calculate_sizes() determines the order and the distribution of data within
* a slab object.
@@ -3568,33 +3595,7 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
*/
set_min_partial(s, ilog2(s->size) / 2);
- /*
- * cpu_partial determined the maximum number of objects kept in the
- * per cpu partial lists of a processor.
- *
- * Per cpu partial lists mainly contain slabs that just have one
- * object freed. If they are used for allocation then they can be
- * filled up again with minimal effort. The slab will never hit the
- * per node partial lists and therefore no locking will be required.
- *
- * This setting also determines
- *
- * A) The number of objects from per cpu partial slabs dumped to the
- * per node list when we reach the limit.
- * B) The number of objects in cpu partial slabs to extract from the
- * per node list when we run out of per cpu objects. We only fetch
- * 50% to keep some capacity around for frees.
- */
- if (!kmem_cache_has_cpu_partial(s))
- s->cpu_partial = 0;
- else if (s->size >= PAGE_SIZE)
- s->cpu_partial = 2;
- else if (s->size >= 1024)
- s->cpu_partial = 6;
- else if (s->size >= 256)
- s->cpu_partial = 13;
- else
- s->cpu_partial = 30;
+ set_cpu_partial(s);
#ifdef CONFIG_NUMA
s->remote_node_defrag_ratio = 1000;
@@ -3981,7 +3982,7 @@ void __kmemcg_cache_deactivate(struct kmem_cache *s)
* Disable empty slabs caching. Used to avoid pinning offline
* memory cgroups by kmem pages that can be freed.
*/
- s->cpu_partial = 0;
+ slub_set_cpu_partial(s, 0);
s->min_partial = 0;
/*
@@ -4760,7 +4761,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
total += x;
nodes[node] += x;
- page = READ_ONCE(c->partial);
+ page = slub_percpu_partial_read_once(c);
if (page) {
node = page_to_nid(page);
if (flags & SO_TOTAL)
@@ -4921,7 +4922,7 @@ SLAB_ATTR(min_partial);
static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
{
- return sprintf(buf, "%u\n", s->cpu_partial);
+ return sprintf(buf, "%u\n", slub_cpu_partial(s));
}
static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
@@ -4936,7 +4937,7 @@ static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
if (objects && !kmem_cache_has_cpu_partial(s))
return -EINVAL;
- s->cpu_partial = objects;
+ slub_set_cpu_partial(s, objects);
flush_all(s);
return length;
}
@@ -4988,7 +4989,9 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
int len;
for_each_online_cpu(cpu) {
- struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial;
+ struct page *page;
+
+ page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
if (page) {
pages += page->pages;
@@ -5000,7 +5003,9 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
#ifdef CONFIG_SMP
for_each_online_cpu(cpu) {
- struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial;
+ struct page *page;
+
+ page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
if (page && len < PAGE_SIZE - 20)
len += sprintf(buf + len, " C%d=%d(%d)", cpu,
diff --git a/mm/sparse.c b/mm/sparse.c
index 6903c8fc3085..7b4be3fd5cac 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -168,6 +168,44 @@ void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
}
}
+/*
+ * There are a number of times that we loop over NR_MEM_SECTIONS,
+ * looking for section_present() on each. But, when we have very
+ * large physical address spaces, NR_MEM_SECTIONS can also be
+ * very large which makes the loops quite long.
+ *
+ * Keeping track of this gives us an easy way to break out of
+ * those loops early.
+ */
+int __highest_present_section_nr;
+static void section_mark_present(struct mem_section *ms)
+{
+ int section_nr = __section_nr(ms);
+
+ if (section_nr > __highest_present_section_nr)
+ __highest_present_section_nr = section_nr;
+
+ ms->section_mem_map |= SECTION_MARKED_PRESENT;
+}
+
+static inline int next_present_section_nr(int section_nr)
+{
+ do {
+ section_nr++;
+ if (present_section_nr(section_nr))
+ return section_nr;
+ } while ((section_nr < NR_MEM_SECTIONS) &&
+ (section_nr <= __highest_present_section_nr));
+
+ return -1;
+}
+#define for_each_present_section_nr(start, section_nr) \
+ for (section_nr = next_present_section_nr(start-1); \
+ ((section_nr >= 0) && \
+ (section_nr < NR_MEM_SECTIONS) && \
+ (section_nr <= __highest_present_section_nr)); \
+ section_nr = next_present_section_nr(section_nr))
+
/* Record a memory area against a node. */
void __init memory_present(int nid, unsigned long start, unsigned long end)
{
@@ -183,9 +221,11 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
set_section_nid(section, nid);
ms = __nr_to_section(section);
- if (!ms->section_mem_map)
+ if (!ms->section_mem_map) {
ms->section_mem_map = sparse_encode_early_nid(nid) |
- SECTION_MARKED_PRESENT;
+ SECTION_IS_ONLINE;
+ section_mark_present(ms);
+ }
}
}
@@ -476,23 +516,19 @@ static void __init alloc_usemap_and_memmap(void (*alloc_func)
int nodeid_begin = 0;
unsigned long pnum_begin = 0;
- for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
+ for_each_present_section_nr(0, pnum) {
struct mem_section *ms;
- if (!present_section_nr(pnum))
- continue;
ms = __nr_to_section(pnum);
nodeid_begin = sparse_early_nid(ms);
pnum_begin = pnum;
break;
}
map_count = 1;
- for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
+ for_each_present_section_nr(pnum_begin + 1, pnum) {
struct mem_section *ms;
int nodeid;
- if (!present_section_nr(pnum))
- continue;
ms = __nr_to_section(pnum);
nodeid = sparse_early_nid(ms);
if (nodeid == nodeid_begin) {
@@ -561,10 +597,7 @@ void __init sparse_init(void)
(void *)map_map);
#endif
- for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
- if (!present_section_nr(pnum))
- continue;
-
+ for_each_present_section_nr(0, pnum) {
usemap = usemap_map[pnum];
if (!usemap)
continue;
@@ -590,6 +623,48 @@ void __init sparse_init(void)
}
#ifdef CONFIG_MEMORY_HOTPLUG
+
+/* Mark all memory sections within the pfn range as online */
+void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
+{
+ unsigned long pfn;
+
+ for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+ unsigned long section_nr = pfn_to_section_nr(start_pfn);
+ struct mem_section *ms;
+
+ /* onlining code should never touch invalid ranges */
+ if (WARN_ON(!valid_section_nr(section_nr)))
+ continue;
+
+ ms = __nr_to_section(section_nr);
+ ms->section_mem_map |= SECTION_IS_ONLINE;
+ }
+}
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+/* Mark all memory sections within the pfn range as online */
+void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
+{
+ unsigned long pfn;
+
+ for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+ unsigned long section_nr = pfn_to_section_nr(start_pfn);
+ struct mem_section *ms;
+
+ /*
+ * TODO this needs some double checking. Offlining code makes
+ * sure to check pfn_valid but those checks might be just bogus
+ */
+ if (WARN_ON(!valid_section_nr(section_nr)))
+ continue;
+
+ ms = __nr_to_section(section_nr);
+ ms->section_mem_map &= ~SECTION_IS_ONLINE;
+ }
+}
+#endif
+
#ifdef CONFIG_SPARSEMEM_VMEMMAP
static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
{
@@ -686,10 +761,9 @@ static void free_map_bootmem(struct page *memmap)
* set. If this is <=0, then that means that the passed-in
* map was not consumed and must be freed.
*/
-int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn)
+int __meminit sparse_add_one_section(struct pglist_data *pgdat, unsigned long start_pfn)
{
unsigned long section_nr = pfn_to_section_nr(start_pfn);
- struct pglist_data *pgdat = zone->zone_pgdat;
struct mem_section *ms;
struct page *memmap;
unsigned long *usemap;
@@ -722,7 +796,7 @@ int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn)
memset(memmap, 0, sizeof(struct page) * PAGES_PER_SECTION);
- ms->section_mem_map |= SECTION_MARKED_PRESENT;
+ section_mark_present(ms);
ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
diff --git a/mm/swap.c b/mm/swap.c
index 98d08b4579fa..4f44dbd7f780 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -591,6 +591,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
__count_vm_events(PGLAZYFREE, hpage_nr_pages(page));
+ count_memcg_page_event(page, PGLAZYFREE);
update_page_reclaim_stat(lruvec, 1, 0);
}
}
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
index 3405b4ee1757..fcd2740f4ed7 100644
--- a/mm/swap_cgroup.c
+++ b/mm/swap_cgroup.c
@@ -61,21 +61,27 @@ not_enough_page:
return -ENOMEM;
}
+static struct swap_cgroup *__lookup_swap_cgroup(struct swap_cgroup_ctrl *ctrl,
+ pgoff_t offset)
+{
+ struct page *mappage;
+ struct swap_cgroup *sc;
+
+ mappage = ctrl->map[offset / SC_PER_PAGE];
+ sc = page_address(mappage);
+ return sc + offset % SC_PER_PAGE;
+}
+
static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent,
struct swap_cgroup_ctrl **ctrlp)
{
pgoff_t offset = swp_offset(ent);
struct swap_cgroup_ctrl *ctrl;
- struct page *mappage;
- struct swap_cgroup *sc;
ctrl = &swap_cgroup_ctrl[swp_type(ent)];
if (ctrlp)
*ctrlp = ctrl;
-
- mappage = ctrl->map[offset / SC_PER_PAGE];
- sc = page_address(mappage);
- return sc + offset % SC_PER_PAGE;
+ return __lookup_swap_cgroup(ctrl, offset);
}
/**
@@ -108,25 +114,39 @@ unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
}
/**
- * swap_cgroup_record - record mem_cgroup for this swp_entry.
- * @ent: swap entry to be recorded into
+ * swap_cgroup_record - record mem_cgroup for a set of swap entries
+ * @ent: the first swap entry to be recorded into
* @id: mem_cgroup to be recorded
+ * @nr_ents: number of swap entries to be recorded
*
* Returns old value at success, 0 at failure.
* (Of course, old value can be 0.)
*/
-unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
+unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id,
+ unsigned int nr_ents)
{
struct swap_cgroup_ctrl *ctrl;
struct swap_cgroup *sc;
unsigned short old;
unsigned long flags;
+ pgoff_t offset = swp_offset(ent);
+ pgoff_t end = offset + nr_ents;
sc = lookup_swap_cgroup(ent, &ctrl);
spin_lock_irqsave(&ctrl->lock, flags);
old = sc->id;
- sc->id = id;
+ for (;;) {
+ VM_BUG_ON(sc->id != old);
+ sc->id = id;
+ offset++;
+ if (offset == end)
+ break;
+ if (offset % SC_PER_PAGE)
+ sc++;
+ else
+ sc = __lookup_swap_cgroup(ctrl, offset);
+ }
spin_unlock_irqrestore(&ctrl->lock, flags);
return old;
diff --git a/mm/swap_slots.c b/mm/swap_slots.c
index 58f6c78f1dad..90c1032a8ac3 100644
--- a/mm/swap_slots.c
+++ b/mm/swap_slots.c
@@ -263,7 +263,8 @@ static int refill_swap_slots_cache(struct swap_slots_cache *cache)
cache->cur = 0;
if (swap_slot_cache_active)
- cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, cache->slots);
+ cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, false,
+ cache->slots);
return cache->nr;
}
@@ -301,11 +302,19 @@ direct_free:
return 0;
}
-swp_entry_t get_swap_page(void)
+swp_entry_t get_swap_page(struct page *page)
{
swp_entry_t entry, *pentry;
struct swap_slots_cache *cache;
+ entry.val = 0;
+
+ if (PageTransHuge(page)) {
+ if (IS_ENABLED(CONFIG_THP_SWAP))
+ get_swap_pages(1, true, &entry);
+ return entry;
+ }
+
/*
* Preemption is allowed here, because we may sleep
* in refill_swap_slots_cache(). But it is safe, because
@@ -317,7 +326,6 @@ swp_entry_t get_swap_page(void)
*/
cache = raw_cpu_ptr(&swp_slots);
- entry.val = 0;
if (check_cache_active()) {
mutex_lock(&cache->alloc_lock);
if (cache->slots) {
@@ -337,7 +345,7 @@ repeat:
return entry;
}
- get_swap_pages(1, &entry);
+ get_swap_pages(1, false, &entry);
return entry;
}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 539b8885e3d1..9c71b6b2562f 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -19,6 +19,7 @@
#include <linux/migrate.h>
#include <linux/vmalloc.h>
#include <linux/swap_slots.h>
+#include <linux/huge_mm.h>
#include <asm/pgtable.h>
@@ -38,6 +39,7 @@ struct address_space *swapper_spaces[MAX_SWAPFILES];
static unsigned int nr_swapper_spaces[MAX_SWAPFILES];
#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
+#define ADD_CACHE_INFO(x, nr) do { swap_cache_info.x += (nr); } while (0)
static struct {
unsigned long add_total;
@@ -90,39 +92,46 @@ void show_swap_cache_info(void)
*/
int __add_to_swap_cache(struct page *page, swp_entry_t entry)
{
- int error;
+ int error, i, nr = hpage_nr_pages(page);
struct address_space *address_space;
+ pgoff_t idx = swp_offset(entry);
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageSwapCache(page), page);
VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
- get_page(page);
+ page_ref_add(page, nr);
SetPageSwapCache(page);
- set_page_private(page, entry.val);
address_space = swap_address_space(entry);
spin_lock_irq(&address_space->tree_lock);
- error = radix_tree_insert(&address_space->page_tree,
- swp_offset(entry), page);
- if (likely(!error)) {
- address_space->nrpages++;
- __inc_node_page_state(page, NR_FILE_PAGES);
- INC_CACHE_INFO(add_total);
+ for (i = 0; i < nr; i++) {
+ set_page_private(page + i, entry.val + i);
+ error = radix_tree_insert(&address_space->page_tree,
+ idx + i, page + i);
+ if (unlikely(error))
+ break;
}
- spin_unlock_irq(&address_space->tree_lock);
-
- if (unlikely(error)) {
+ if (likely(!error)) {
+ address_space->nrpages += nr;
+ __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
+ ADD_CACHE_INFO(add_total, nr);
+ } else {
/*
* Only the context which have set SWAP_HAS_CACHE flag
* would call add_to_swap_cache().
* So add_to_swap_cache() doesn't returns -EEXIST.
*/
VM_BUG_ON(error == -EEXIST);
- set_page_private(page, 0UL);
+ set_page_private(page + i, 0UL);
+ while (i--) {
+ radix_tree_delete(&address_space->page_tree, idx + i);
+ set_page_private(page + i, 0UL);
+ }
ClearPageSwapCache(page);
- put_page(page);
+ page_ref_sub(page, nr);
}
+ spin_unlock_irq(&address_space->tree_lock);
return error;
}
@@ -132,7 +141,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
{
int error;
- error = radix_tree_maybe_preload(gfp_mask);
+ error = radix_tree_maybe_preload_order(gfp_mask, compound_order(page));
if (!error) {
error = __add_to_swap_cache(page, entry);
radix_tree_preload_end();
@@ -146,8 +155,10 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
*/
void __delete_from_swap_cache(struct page *page)
{
- swp_entry_t entry;
struct address_space *address_space;
+ int i, nr = hpage_nr_pages(page);
+ swp_entry_t entry;
+ pgoff_t idx;
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(!PageSwapCache(page), page);
@@ -155,12 +166,15 @@ void __delete_from_swap_cache(struct page *page)
entry.val = page_private(page);
address_space = swap_address_space(entry);
- radix_tree_delete(&address_space->page_tree, swp_offset(entry));
- set_page_private(page, 0);
+ idx = swp_offset(entry);
+ for (i = 0; i < nr; i++) {
+ radix_tree_delete(&address_space->page_tree, idx + i);
+ set_page_private(page + i, 0);
+ }
ClearPageSwapCache(page);
- address_space->nrpages--;
- __dec_node_page_state(page, NR_FILE_PAGES);
- INC_CACHE_INFO(del_total);
+ address_space->nrpages -= nr;
+ __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
+ ADD_CACHE_INFO(del_total, nr);
}
/**
@@ -170,7 +184,7 @@ void __delete_from_swap_cache(struct page *page)
* Allocate swap space for the page and add the page to the
* swap cache. Caller needs to hold the page lock.
*/
-int add_to_swap(struct page *page, struct list_head *list)
+int add_to_swap(struct page *page)
{
swp_entry_t entry;
int err;
@@ -178,20 +192,12 @@ int add_to_swap(struct page *page, struct list_head *list)
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(!PageUptodate(page), page);
- entry = get_swap_page();
+ entry = get_swap_page(page);
if (!entry.val)
return 0;
- if (mem_cgroup_try_charge_swap(page, entry)) {
- swapcache_free(entry);
- return 0;
- }
-
- if (unlikely(PageTransHuge(page)))
- if (unlikely(split_huge_page_to_list(page, list))) {
- swapcache_free(entry);
- return 0;
- }
+ if (mem_cgroup_try_charge_swap(page, entry))
+ goto fail;
/*
* Radix-tree node allocations from PF_MEMALLOC contexts could
@@ -206,17 +212,19 @@ int add_to_swap(struct page *page, struct list_head *list)
*/
err = add_to_swap_cache(page, entry,
__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
-
- if (!err) {
- return 1;
- } else { /* -ENOMEM radix-tree allocation failure */
+ /* -ENOMEM radix-tree allocation failure */
+ if (err)
/*
* add_to_swap_cache() doesn't return -EEXIST, so we can safely
* clear SWAP_HAS_CACHE flag.
*/
- swapcache_free(entry);
- return 0;
- }
+ goto fail;
+
+ return 1;
+
+fail:
+ put_swap_page(page, entry);
+ return 0;
}
/*
@@ -237,8 +245,8 @@ void delete_from_swap_cache(struct page *page)
__delete_from_swap_cache(page);
spin_unlock_irq(&address_space->tree_lock);
- swapcache_free(entry);
- put_page(page);
+ put_swap_page(page, entry);
+ page_ref_sub(page, hpage_nr_pages(page));
}
/*
@@ -295,7 +303,7 @@ struct page * lookup_swap_cache(swp_entry_t entry)
page = find_get_page(swap_address_space(entry), swp_offset(entry));
- if (page) {
+ if (page && likely(!PageTransCompound(page))) {
INC_CACHE_INFO(find_success);
if (TestClearPageReadahead(page))
atomic_inc(&swapin_readahead_hits);
@@ -389,7 +397,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* add_to_swap_cache() doesn't return -EEXIST, so we can safely
* clear SWAP_HAS_CACHE flag.
*/
- swapcache_free(entry);
+ put_swap_page(new_page, entry);
} while (err != -ENOMEM);
if (new_page)
@@ -506,7 +514,7 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
gfp_mask, vma, addr);
if (!page)
continue;
- if (offset != entry_offset)
+ if (offset != entry_offset && likely(!PageTransCompound(page)))
SetPageReadahead(page);
put_page(page);
}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 4f6cba1b6632..811d90e1c929 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -37,6 +37,7 @@
#include <linux/swapfile.h>
#include <linux/export.h>
#include <linux/swap_slots.h>
+#include <linux/sort.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
@@ -199,7 +200,11 @@ static void discard_swap_cluster(struct swap_info_struct *si,
}
}
+#ifdef CONFIG_THP_SWAP
+#define SWAPFILE_CLUSTER HPAGE_PMD_NR
+#else
#define SWAPFILE_CLUSTER 256
+#endif
#define LATENCY_LIMIT 256
static inline void cluster_set_flag(struct swap_cluster_info *info,
@@ -374,6 +379,14 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si,
schedule_work(&si->discard_work);
}
+static void __free_cluster(struct swap_info_struct *si, unsigned long idx)
+{
+ struct swap_cluster_info *ci = si->cluster_info;
+
+ cluster_set_flag(ci + idx, CLUSTER_FLAG_FREE);
+ cluster_list_add_tail(&si->free_clusters, ci, idx);
+}
+
/*
* Doing discard actually. After a cluster discard is finished, the cluster
* will be added to free cluster list. caller should hold si->lock.
@@ -394,10 +407,7 @@ static void swap_do_scheduled_discard(struct swap_info_struct *si)
spin_lock(&si->lock);
ci = lock_cluster(si, idx * SWAPFILE_CLUSTER);
- cluster_set_flag(ci, CLUSTER_FLAG_FREE);
- unlock_cluster(ci);
- cluster_list_add_tail(&si->free_clusters, info, idx);
- ci = lock_cluster(si, idx * SWAPFILE_CLUSTER);
+ __free_cluster(si, idx);
memset(si->swap_map + idx * SWAPFILE_CLUSTER,
0, SWAPFILE_CLUSTER);
unlock_cluster(ci);
@@ -415,6 +425,34 @@ static void swap_discard_work(struct work_struct *work)
spin_unlock(&si->lock);
}
+static void alloc_cluster(struct swap_info_struct *si, unsigned long idx)
+{
+ struct swap_cluster_info *ci = si->cluster_info;
+
+ VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx);
+ cluster_list_del_first(&si->free_clusters, ci);
+ cluster_set_count_flag(ci + idx, 0, 0);
+}
+
+static void free_cluster(struct swap_info_struct *si, unsigned long idx)
+{
+ struct swap_cluster_info *ci = si->cluster_info + idx;
+
+ VM_BUG_ON(cluster_count(ci) != 0);
+ /*
+ * If the swap is discardable, prepare discard the cluster
+ * instead of free it immediately. The cluster will be freed
+ * after discard.
+ */
+ if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
+ (SWP_WRITEOK | SWP_PAGE_DISCARD)) {
+ swap_cluster_schedule_discard(si, idx);
+ return;
+ }
+
+ __free_cluster(si, idx);
+}
+
/*
* The cluster corresponding to page_nr will be used. The cluster will be
* removed from free cluster list and its usage counter will be increased.
@@ -426,11 +464,8 @@ static void inc_cluster_info_page(struct swap_info_struct *p,
if (!cluster_info)
return;
- if (cluster_is_free(&cluster_info[idx])) {
- VM_BUG_ON(cluster_list_first(&p->free_clusters) != idx);
- cluster_list_del_first(&p->free_clusters, cluster_info);
- cluster_set_count_flag(&cluster_info[idx], 0, 0);
- }
+ if (cluster_is_free(&cluster_info[idx]))
+ alloc_cluster(p, idx);
VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER);
cluster_set_count(&cluster_info[idx],
@@ -454,21 +489,8 @@ static void dec_cluster_info_page(struct swap_info_struct *p,
cluster_set_count(&cluster_info[idx],
cluster_count(&cluster_info[idx]) - 1);
- if (cluster_count(&cluster_info[idx]) == 0) {
- /*
- * If the swap is discardable, prepare discard the cluster
- * instead of free it immediately. The cluster will be freed
- * after discard.
- */
- if ((p->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
- (SWP_WRITEOK | SWP_PAGE_DISCARD)) {
- swap_cluster_schedule_discard(p, idx);
- return;
- }
-
- cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
- cluster_list_add_tail(&p->free_clusters, cluster_info, idx);
- }
+ if (cluster_count(&cluster_info[idx]) == 0)
+ free_cluster(p, idx);
}
/*
@@ -558,6 +580,60 @@ new_cluster:
return found_free;
}
+static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
+ unsigned int nr_entries)
+{
+ unsigned int end = offset + nr_entries - 1;
+
+ if (offset == si->lowest_bit)
+ si->lowest_bit += nr_entries;
+ if (end == si->highest_bit)
+ si->highest_bit -= nr_entries;
+ si->inuse_pages += nr_entries;
+ if (si->inuse_pages == si->pages) {
+ si->lowest_bit = si->max;
+ si->highest_bit = 0;
+ spin_lock(&swap_avail_lock);
+ plist_del(&si->avail_list, &swap_avail_head);
+ spin_unlock(&swap_avail_lock);
+ }
+}
+
+static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
+ unsigned int nr_entries)
+{
+ unsigned long end = offset + nr_entries - 1;
+ void (*swap_slot_free_notify)(struct block_device *, unsigned long);
+
+ if (offset < si->lowest_bit)
+ si->lowest_bit = offset;
+ if (end > si->highest_bit) {
+ bool was_full = !si->highest_bit;
+
+ si->highest_bit = end;
+ if (was_full && (si->flags & SWP_WRITEOK)) {
+ spin_lock(&swap_avail_lock);
+ WARN_ON(!plist_node_empty(&si->avail_list));
+ if (plist_node_empty(&si->avail_list))
+ plist_add(&si->avail_list, &swap_avail_head);
+ spin_unlock(&swap_avail_lock);
+ }
+ }
+ atomic_long_add(nr_entries, &nr_swap_pages);
+ si->inuse_pages -= nr_entries;
+ if (si->flags & SWP_BLKDEV)
+ swap_slot_free_notify =
+ si->bdev->bd_disk->fops->swap_slot_free_notify;
+ else
+ swap_slot_free_notify = NULL;
+ while (offset <= end) {
+ frontswap_invalidate_page(si->type, offset);
+ if (swap_slot_free_notify)
+ swap_slot_free_notify(si->bdev, offset);
+ offset++;
+ }
+}
+
static int scan_swap_map_slots(struct swap_info_struct *si,
unsigned char usage, int nr,
swp_entry_t slots[])
@@ -676,18 +752,7 @@ checks:
inc_cluster_info_page(si, si->cluster_info, offset);
unlock_cluster(ci);
- if (offset == si->lowest_bit)
- si->lowest_bit++;
- if (offset == si->highest_bit)
- si->highest_bit--;
- si->inuse_pages++;
- if (si->inuse_pages == si->pages) {
- si->lowest_bit = si->max;
- si->highest_bit = 0;
- spin_lock(&swap_avail_lock);
- plist_del(&si->avail_list, &swap_avail_head);
- spin_unlock(&swap_avail_lock);
- }
+ swap_range_alloc(si, offset, 1);
si->cluster_next = offset + 1;
slots[n_ret++] = swp_entry(si->type, offset);
@@ -766,6 +831,52 @@ no_page:
return n_ret;
}
+#ifdef CONFIG_THP_SWAP
+static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
+{
+ unsigned long idx;
+ struct swap_cluster_info *ci;
+ unsigned long offset, i;
+ unsigned char *map;
+
+ if (cluster_list_empty(&si->free_clusters))
+ return 0;
+
+ idx = cluster_list_first(&si->free_clusters);
+ offset = idx * SWAPFILE_CLUSTER;
+ ci = lock_cluster(si, offset);
+ alloc_cluster(si, idx);
+ cluster_set_count_flag(ci, SWAPFILE_CLUSTER, 0);
+
+ map = si->swap_map + offset;
+ for (i = 0; i < SWAPFILE_CLUSTER; i++)
+ map[i] = SWAP_HAS_CACHE;
+ unlock_cluster(ci);
+ swap_range_alloc(si, offset, SWAPFILE_CLUSTER);
+ *slot = swp_entry(si->type, offset);
+
+ return 1;
+}
+
+static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
+{
+ unsigned long offset = idx * SWAPFILE_CLUSTER;
+ struct swap_cluster_info *ci;
+
+ ci = lock_cluster(si, offset);
+ cluster_set_count_flag(ci, 0, 0);
+ free_cluster(si, idx);
+ unlock_cluster(ci);
+ swap_range_free(si, offset, SWAPFILE_CLUSTER);
+}
+#else
+static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
+{
+ VM_WARN_ON_ONCE(1);
+ return 0;
+}
+#endif /* CONFIG_THP_SWAP */
+
static unsigned long scan_swap_map(struct swap_info_struct *si,
unsigned char usage)
{
@@ -781,13 +892,17 @@ static unsigned long scan_swap_map(struct swap_info_struct *si,
}
-int get_swap_pages(int n_goal, swp_entry_t swp_entries[])
+int get_swap_pages(int n_goal, bool cluster, swp_entry_t swp_entries[])
{
+ unsigned long nr_pages = cluster ? SWAPFILE_CLUSTER : 1;
struct swap_info_struct *si, *next;
long avail_pgs;
int n_ret = 0;
- avail_pgs = atomic_long_read(&nr_swap_pages);
+ /* Only single cluster request supported */
+ WARN_ON_ONCE(n_goal > 1 && cluster);
+
+ avail_pgs = atomic_long_read(&nr_swap_pages) / nr_pages;
if (avail_pgs <= 0)
goto noswap;
@@ -797,7 +912,7 @@ int get_swap_pages(int n_goal, swp_entry_t swp_entries[])
if (n_goal > avail_pgs)
n_goal = avail_pgs;
- atomic_long_sub(n_goal, &nr_swap_pages);
+ atomic_long_sub(n_goal * nr_pages, &nr_swap_pages);
spin_lock(&swap_avail_lock);
@@ -823,10 +938,13 @@ start_over:
spin_unlock(&si->lock);
goto nextsi;
}
- n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
- n_goal, swp_entries);
+ if (cluster)
+ n_ret = swap_alloc_cluster(si, swp_entries);
+ else
+ n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
+ n_goal, swp_entries);
spin_unlock(&si->lock);
- if (n_ret)
+ if (n_ret || cluster)
goto check_out;
pr_debug("scan_swap_map of si %d failed to find offset\n",
si->type);
@@ -852,7 +970,8 @@ nextsi:
check_out:
if (n_ret < n_goal)
- atomic_long_add((long) (n_goal-n_ret), &nr_swap_pages);
+ atomic_long_add((long)(n_goal - n_ret) * nr_pages,
+ &nr_swap_pages);
noswap:
return n_ret;
}
@@ -1008,32 +1127,8 @@ static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry)
dec_cluster_info_page(p, p->cluster_info, offset);
unlock_cluster(ci);
- mem_cgroup_uncharge_swap(entry);
- if (offset < p->lowest_bit)
- p->lowest_bit = offset;
- if (offset > p->highest_bit) {
- bool was_full = !p->highest_bit;
-
- p->highest_bit = offset;
- if (was_full && (p->flags & SWP_WRITEOK)) {
- spin_lock(&swap_avail_lock);
- WARN_ON(!plist_node_empty(&p->avail_list));
- if (plist_node_empty(&p->avail_list))
- plist_add(&p->avail_list,
- &swap_avail_head);
- spin_unlock(&swap_avail_lock);
- }
- }
- atomic_long_inc(&nr_swap_pages);
- p->inuse_pages--;
- frontswap_invalidate_page(p->type, offset);
- if (p->flags & SWP_BLKDEV) {
- struct gendisk *disk = p->bdev->bd_disk;
-
- if (disk->fops->swap_slot_free_notify)
- disk->fops->swap_slot_free_notify(p->bdev,
- offset);
- }
+ mem_cgroup_uncharge_swap(entry, 1);
+ swap_range_free(p, offset, 1);
}
/*
@@ -1054,7 +1149,7 @@ void swap_free(swp_entry_t entry)
/*
* Called after dropping swapcache to decrease refcnt to swap entries.
*/
-void swapcache_free(swp_entry_t entry)
+static void swapcache_free(swp_entry_t entry)
{
struct swap_info_struct *p;
@@ -1065,6 +1160,52 @@ void swapcache_free(swp_entry_t entry)
}
}
+#ifdef CONFIG_THP_SWAP
+static void swapcache_free_cluster(swp_entry_t entry)
+{
+ unsigned long offset = swp_offset(entry);
+ unsigned long idx = offset / SWAPFILE_CLUSTER;
+ struct swap_cluster_info *ci;
+ struct swap_info_struct *si;
+ unsigned char *map;
+ unsigned int i;
+
+ si = swap_info_get(entry);
+ if (!si)
+ return;
+
+ ci = lock_cluster(si, offset);
+ map = si->swap_map + offset;
+ for (i = 0; i < SWAPFILE_CLUSTER; i++) {
+ VM_BUG_ON(map[i] != SWAP_HAS_CACHE);
+ map[i] = 0;
+ }
+ unlock_cluster(ci);
+ mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
+ swap_free_cluster(si, idx);
+ spin_unlock(&si->lock);
+}
+#else
+static inline void swapcache_free_cluster(swp_entry_t entry)
+{
+}
+#endif /* CONFIG_THP_SWAP */
+
+void put_swap_page(struct page *page, swp_entry_t entry)
+{
+ if (!PageTransHuge(page))
+ swapcache_free(entry);
+ else
+ swapcache_free_cluster(entry);
+}
+
+static int swp_entry_cmp(const void *ent1, const void *ent2)
+{
+ const swp_entry_t *e1 = ent1, *e2 = ent2;
+
+ return (int)swp_type(*e1) - (int)swp_type(*e2);
+}
+
void swapcache_free_entries(swp_entry_t *entries, int n)
{
struct swap_info_struct *p, *prev;
@@ -1075,6 +1216,14 @@ void swapcache_free_entries(swp_entry_t *entries, int n)
prev = NULL;
p = NULL;
+
+ /*
+ * Sort swap entries by swap device, so each lock is only taken once.
+ * nr_swapfiles isn't absolutely correct, but the overhead of sort() is
+ * so low that it isn't necessary to optimize further.
+ */
+ if (nr_swapfiles > 1)
+ sort(entries, n, sizeof(entries[0]), swp_entry_cmp, NULL);
for (i = 0; i < n; ++i) {
p = swap_info_get_cont(entries[i], prev);
if (p)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index ecc97f74ab18..6211a807cb31 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1770,12 +1770,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
*/
clear_vm_uninitialized_flag(area);
- /*
- * A ref_count = 2 is needed because vm_struct allocated in
- * __get_vm_area_node() contains a reference to the virtual address of
- * the vmalloc'ed block.
- */
- kmemleak_alloc(addr, real_size, 2, gfp_mask);
+ kmemleak_vmalloc(area, size, gfp_mask);
return addr;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c3c1c6ac62da..9e95fafc026b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -708,7 +708,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
mem_cgroup_swapout(page, swap);
__delete_from_swap_cache(page);
spin_unlock_irqrestore(&mapping->tree_lock, flags);
- swapcache_free(swap);
+ put_swap_page(page, swap);
} else {
void (*freepage)(struct page *);
void *shadow = NULL;
@@ -1125,8 +1125,36 @@ static unsigned long shrink_page_list(struct list_head *page_list,
!PageSwapCache(page)) {
if (!(sc->gfp_mask & __GFP_IO))
goto keep_locked;
- if (!add_to_swap(page, page_list))
+ if (PageTransHuge(page)) {
+ /* cannot split THP, skip it */
+ if (!can_split_huge_page(page, NULL))
+ goto activate_locked;
+ /*
+ * Split pages without a PMD map right
+ * away. Chances are some or all of the
+ * tail pages can be freed without IO.
+ */
+ if (!compound_mapcount(page) &&
+ split_huge_page_to_list(page, page_list))
+ goto activate_locked;
+ }
+ if (!add_to_swap(page)) {
+ if (!PageTransHuge(page))
+ goto activate_locked;
+ /* Split THP and swap individual base pages */
+ if (split_huge_page_to_list(page, page_list))
+ goto activate_locked;
+ if (!add_to_swap(page))
+ goto activate_locked;
+ }
+
+ /* XXX: We don't support THP writes */
+ if (PageTransHuge(page) &&
+ split_huge_page_to_list(page, page_list)) {
+ delete_from_swap_cache(page);
goto activate_locked;
+ }
+
may_enter_fs = 1;
/* Adding to swap updated mapping */
@@ -1266,6 +1294,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
}
count_vm_event(PGLAZYFREED);
+ count_memcg_page_event(page, PGLAZYFREED);
} else if (!mapping || !__remove_mapping(mapping, page, true))
goto keep_locked;
/*
@@ -1295,6 +1324,7 @@ activate_locked:
if (!PageMlocked(page)) {
SetPageActive(page);
pgactivate++;
+ count_memcg_page_event(page, PGACTIVATE);
}
keep_locked:
unlock_page(page);
@@ -1734,11 +1764,16 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
reclaim_stat->recent_scanned[file] += nr_taken;
- if (global_reclaim(sc)) {
- if (current_is_kswapd())
+ if (current_is_kswapd()) {
+ if (global_reclaim(sc))
__count_vm_events(PGSCAN_KSWAPD, nr_scanned);
- else
+ count_memcg_events(lruvec_memcg(lruvec), PGSCAN_KSWAPD,
+ nr_scanned);
+ } else {
+ if (global_reclaim(sc))
__count_vm_events(PGSCAN_DIRECT, nr_scanned);
+ count_memcg_events(lruvec_memcg(lruvec), PGSCAN_DIRECT,
+ nr_scanned);
}
spin_unlock_irq(&pgdat->lru_lock);
@@ -1750,11 +1785,16 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
spin_lock_irq(&pgdat->lru_lock);
- if (global_reclaim(sc)) {
- if (current_is_kswapd())
+ if (current_is_kswapd()) {
+ if (global_reclaim(sc))
__count_vm_events(PGSTEAL_KSWAPD, nr_reclaimed);
- else
+ count_memcg_events(lruvec_memcg(lruvec), PGSTEAL_KSWAPD,
+ nr_reclaimed);
+ } else {
+ if (global_reclaim(sc))
__count_vm_events(PGSTEAL_DIRECT, nr_reclaimed);
+ count_memcg_events(lruvec_memcg(lruvec), PGSTEAL_DIRECT,
+ nr_reclaimed);
}
putback_inactive_pages(lruvec, &page_list);
@@ -1899,8 +1939,11 @@ static unsigned move_active_pages_to_lru(struct lruvec *lruvec,
}
}
- if (!is_active_lru(lru))
+ if (!is_active_lru(lru)) {
__count_vm_events(PGDEACTIVATE, nr_moved);
+ count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
+ nr_moved);
+ }
return nr_moved;
}
@@ -1938,6 +1981,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
reclaim_stat->recent_scanned[file] += nr_taken;
__count_vm_events(PGREFILL, nr_scanned);
+ count_memcg_events(lruvec_memcg(lruvec), PGREFILL, nr_scanned);
spin_unlock_irq(&pgdat->lru_lock);
@@ -2967,7 +3011,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
unsigned long nr_reclaimed;
struct scan_control sc = {
.nr_to_reclaim = SWAP_CLUSTER_MAX,
- .gfp_mask = (gfp_mask = current_gfp_context(gfp_mask)),
+ .gfp_mask = current_gfp_context(gfp_mask),
.reclaim_idx = gfp_zone(gfp_mask),
.order = order,
.nodemask = nodemask,
@@ -2982,12 +3026,12 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
* 1 is returned so that the page allocator does not OOM kill at this
* point.
*/
- if (throttle_direct_reclaim(gfp_mask, zonelist, nodemask))
+ if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
return 1;
trace_mm_vmscan_direct_reclaim_begin(order,
sc.may_writepage,
- gfp_mask,
+ sc.gfp_mask,
sc.reclaim_idx);
nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
@@ -3774,17 +3818,16 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
const unsigned long nr_pages = 1 << order;
struct task_struct *p = current;
struct reclaim_state reclaim_state;
- int classzone_idx = gfp_zone(gfp_mask);
unsigned int noreclaim_flag;
struct scan_control sc = {
.nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
- .gfp_mask = (gfp_mask = current_gfp_context(gfp_mask)),
+ .gfp_mask = current_gfp_context(gfp_mask),
.order = order,
.priority = NODE_RECLAIM_PRIORITY,
.may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
.may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),
.may_swap = 1,
- .reclaim_idx = classzone_idx,
+ .reclaim_idx = gfp_zone(gfp_mask),
};
cond_resched();
@@ -3795,7 +3838,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
*/
noreclaim_flag = memalloc_noreclaim_save();
p->flags |= PF_SWAPWRITE;
- lockdep_set_current_reclaim_state(gfp_mask);
+ lockdep_set_current_reclaim_state(sc.gfp_mask);
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
@@ -3831,7 +3874,7 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
* unmapped file backed pages.
*/
if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
- sum_zone_node_page_state(pgdat->node_id, NR_SLAB_RECLAIMABLE) <= pgdat->min_slab_pages)
+ node_page_state(pgdat, NR_SLAB_RECLAIMABLE) <= pgdat->min_slab_pages)
return NODE_RECLAIM_FULL;
/*
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 76f73670200a..744ceaeb42a0 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -928,8 +928,6 @@ const char * const vmstat_text[] = {
"nr_zone_unevictable",
"nr_zone_write_pending",
"nr_mlock",
- "nr_slab_reclaimable",
- "nr_slab_unreclaimable",
"nr_page_table_pages",
"nr_kernel_stack",
"nr_bounce",
@@ -952,6 +950,8 @@ const char * const vmstat_text[] = {
"nr_inactive_file",
"nr_active_file",
"nr_unevictable",
+ "nr_slab_reclaimable",
+ "nr_slab_unreclaimable",
"nr_isolated_anon",
"nr_isolated_file",
"workingset_refault",
@@ -1018,6 +1018,7 @@ const char * const vmstat_text[] = {
"drop_pagecache",
"drop_slab",
+ "oom_kill",
#ifdef CONFIG_NUMA_BALANCING
"numa_pte_updates",
@@ -1223,11 +1224,10 @@ static void pagetypeinfo_showblockcount_print(struct seq_file *m,
for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
struct page *page;
- if (!pfn_valid(pfn))
+ page = pfn_to_online_page(pfn);
+ if (!page)
continue;
- page = pfn_to_page(pfn);
-
/* Watch for unexpected holes punched in the memmap */
if (!memmap_valid_within(pfn, page, zone))
continue;
@@ -1322,7 +1322,7 @@ static int fragmentation_open(struct inode *inode, struct file *file)
return seq_open(file, &fragmentation_op);
}
-static const struct file_operations fragmentation_file_operations = {
+static const struct file_operations buddyinfo_file_operations = {
.open = fragmentation_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -1341,7 +1341,7 @@ static int pagetypeinfo_open(struct inode *inode, struct file *file)
return seq_open(file, &pagetypeinfo_op);
}
-static const struct file_operations pagetypeinfo_file_ops = {
+static const struct file_operations pagetypeinfo_file_operations = {
.open = pagetypeinfo_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -1463,7 +1463,7 @@ static int zoneinfo_open(struct inode *inode, struct file *file)
return seq_open(file, &zoneinfo_op);
}
-static const struct file_operations proc_zoneinfo_file_operations = {
+static const struct file_operations zoneinfo_file_operations = {
.open = zoneinfo_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -1552,7 +1552,7 @@ static int vmstat_open(struct inode *inode, struct file *file)
return seq_open(file, &vmstat_op);
}
-static const struct file_operations proc_vmstat_file_operations = {
+static const struct file_operations vmstat_file_operations = {
.open = vmstat_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -1785,10 +1785,10 @@ void __init init_mm_internals(void)
start_shepherd_timer();
#endif
#ifdef CONFIG_PROC_FS
- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
+ proc_create("buddyinfo", 0444, NULL, &buddyinfo_file_operations);
+ proc_create("pagetypeinfo", 0444, NULL, &pagetypeinfo_file_operations);
+ proc_create("vmstat", 0444, NULL, &vmstat_file_operations);
+ proc_create("zoneinfo", 0444, NULL, &zoneinfo_file_operations);
#endif
}
diff --git a/mm/workingset.c b/mm/workingset.c
index b8c9ab678479..7119cd745ace 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -288,12 +288,10 @@ bool workingset_refault(void *shadow)
*/
refault_distance = (refault - eviction) & EVICTION_MASK;
- inc_node_state(pgdat, WORKINGSET_REFAULT);
- inc_memcg_state(memcg, WORKINGSET_REFAULT);
+ inc_lruvec_state(lruvec, WORKINGSET_REFAULT);
if (refault_distance <= active_file) {
- inc_node_state(pgdat, WORKINGSET_ACTIVATE);
- inc_memcg_state(memcg, WORKINGSET_ACTIVATE);
+ inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE);
rcu_read_unlock();
return true;
}
@@ -474,8 +472,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
}
if (WARN_ON_ONCE(node->exceptional))
goto out_invalid;
- inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM);
- inc_memcg_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM);
+ inc_lruvec_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM);
__radix_tree_delete_node(&mapping->page_tree, node,
workingset_update_node, mapping);
diff --git a/mm/zswap.c b/mm/zswap.c
index eedc27894b10..d39581a076c3 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -371,10 +371,9 @@ static int zswap_dstmem_prepare(unsigned int cpu)
u8 *dst;
dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
- if (!dst) {
- pr_err("can't allocate compressor buffer\n");
+ if (!dst)
return -ENOMEM;
- }
+
per_cpu(zswap_dstmem, cpu) = dst;
return 0;
}
@@ -515,10 +514,8 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
}
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
- if (!pool) {
- pr_err("pool alloc failed\n");
+ if (!pool)
return NULL;
- }
/* unique name for each pool specifically required by zsmalloc */
snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
@@ -1158,7 +1155,7 @@ static void zswap_frontswap_init(unsigned type)
{
struct zswap_tree *tree;
- tree = kzalloc(sizeof(struct zswap_tree), GFP_KERNEL);
+ tree = kzalloc(sizeof(*tree), GFP_KERNEL);
if (!tree) {
pr_err("alloc failed, zswap disabled for swap type %d\n", type);
return;
diff --git a/net/bridge/netfilter/ebt_nflog.c b/net/bridge/netfilter/ebt_nflog.c
index c1dc48686200..da1c2fdc08c8 100644
--- a/net/bridge/netfilter/ebt_nflog.c
+++ b/net/bridge/netfilter/ebt_nflog.c
@@ -30,6 +30,7 @@ ebt_nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
li.u.ulog.copy_len = info->len;
li.u.ulog.group = info->group;
li.u.ulog.qthreshold = info->threshold;
+ li.u.ulog.flags = 0;
nf_log_packet(net, PF_BRIDGE, xt_hooknum(par), skb, xt_in(par),
xt_out(par), &li, "%s", info->prefix);
diff --git a/net/core/dev.c b/net/core/dev.c
index 7098fba52be1..02440518dd69 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6765,7 +6765,7 @@ int dev_change_flags(struct net_device *dev, unsigned int flags)
}
EXPORT_SYMBOL(dev_change_flags);
-static int __dev_set_mtu(struct net_device *dev, int new_mtu)
+int __dev_set_mtu(struct net_device *dev, int new_mtu)
{
const struct net_device_ops *ops = dev->netdev_ops;
@@ -6775,6 +6775,7 @@ static int __dev_set_mtu(struct net_device *dev, int new_mtu)
dev->mtu = new_mtu;
return 0;
}
+EXPORT_SYMBOL(__dev_set_mtu);
/**
* dev_set_mtu - Change maximum transfer unit
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index bdcfb2d04cd2..ea4f481839dd 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -2076,6 +2076,7 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
{
struct net *net = sock_net(in_skb->sk);
u32 portid = NETLINK_CB(in_skb).portid;
+ u32 in_label = LABEL_NOT_SPECIFIED;
struct nlattr *tb[RTA_MAX + 1];
u32 labels[MAX_NEW_LABELS];
struct mpls_shim_hdr *hdr;
@@ -2086,9 +2087,8 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
struct nlmsghdr *nlh;
struct sk_buff *skb;
struct mpls_nh *nh;
- int err = -EINVAL;
- u32 in_label;
u8 n_labels;
+ int err;
err = nlmsg_parse(in_nlh, sizeof(*rtm), tb, RTA_MAX,
rtm_mpls_policy, extack);
@@ -2101,11 +2101,15 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
u8 label_count;
if (nla_get_labels(tb[RTA_DST], 1, &label_count,
- &in_label, extack))
+ &in_label, extack)) {
+ err = -EINVAL;
goto errout;
+ }
- if (in_label < MPLS_LABEL_FIRST_UNRESERVED)
+ if (!mpls_label_ok(net, in_label, extack)) {
+ err = -EINVAL;
goto errout;
+ }
}
rt = mpls_route_input_rcu(net, in_label);
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index b553fdd68816..4707d997558a 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -872,6 +872,11 @@ static int dccp_init_net(struct net *net, u_int16_t proto)
return dccp_kmemdup_sysctl_table(net, pn, dn);
}
+static struct nf_proto_net *dccp_get_net_proto(struct net *net)
+{
+ return &net->ct.nf_ct_proto.dccp.pn;
+}
+
struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 __read_mostly = {
.l3proto = AF_INET,
.l4proto = IPPROTO_DCCP,
@@ -904,6 +909,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 __read_mostly = {
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
.init_net = dccp_init_net,
+ .get_net_proto = dccp_get_net_proto,
};
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_dccp4);
@@ -939,5 +945,6 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 __read_mostly = {
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
.init_net = dccp_init_net,
+ .get_net_proto = dccp_get_net_proto,
};
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_dccp6);
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 31c6c8ee9d5d..6eef29d2eec4 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -783,6 +783,11 @@ static int sctp_init_net(struct net *net, u_int16_t proto)
return sctp_kmemdup_sysctl_table(pn, sn);
}
+static struct nf_proto_net *sctp_get_net_proto(struct net *net)
+{
+ return &net->ct.nf_ct_proto.sctp.pn;
+}
+
struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
.l3proto = PF_INET,
.l4proto = IPPROTO_SCTP,
@@ -816,6 +821,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
.init_net = sctp_init_net,
+ .get_net_proto = sctp_get_net_proto,
};
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_sctp4);
@@ -852,5 +858,6 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif
.init_net = sctp_init_net,
+ .get_net_proto = sctp_get_net_proto,
};
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_sctp6);
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index c6dc8caaf5ca..c061d6eb465d 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -136,7 +136,7 @@ int rds_tcp_accept_one(struct socket *sock)
if (!sock) /* module unload or netns delete in progress */
return -ENETUNREACH;
- ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family,
+ ret = sock_create_lite(sock->sk->sk_family,
sock->sk->sk_type, sock->sk->sk_protocol,
&new_sock);
if (ret)
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 45ba3d0872cc..8ce85420ecb0 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -291,8 +291,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
[NL80211_ATTR_PID] = { .type = NLA_U32 },
[NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
- [NL80211_ATTR_PMKID] = { .type = NLA_BINARY,
- .len = WLAN_PMKID_LEN },
+ [NL80211_ATTR_PMKID] = { .len = WLAN_PMKID_LEN },
[NL80211_ATTR_DURATION] = { .type = NLA_U32 },
[NL80211_ATTR_COOKIE] = { .type = NLA_U64 },
[NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED },
@@ -348,6 +347,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 },
[NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 },
[NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 },
+ [NL80211_ATTR_LOCAL_MESH_POWER_MODE] = {. type = NLA_U32 },
[NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 },
[NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED },
[NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 },
@@ -520,7 +520,7 @@ nl80211_bss_select_policy[NL80211_BSS_SELECT_ATTR_MAX + 1] = {
static const struct nla_policy
nl80211_nan_func_policy[NL80211_NAN_FUNC_ATTR_MAX + 1] = {
[NL80211_NAN_FUNC_TYPE] = { .type = NLA_U8 },
- [NL80211_NAN_FUNC_SERVICE_ID] = { .type = NLA_BINARY,
+ [NL80211_NAN_FUNC_SERVICE_ID] = {
.len = NL80211_NAN_FUNC_SERVICE_ID_LEN },
[NL80211_NAN_FUNC_PUBLISH_TYPE] = { .type = NLA_U8 },
[NL80211_NAN_FUNC_PUBLISH_BCAST] = { .type = NLA_FLAG },
@@ -6469,6 +6469,10 @@ static int validate_scan_freqs(struct nlattr *freqs)
struct nlattr *attr1, *attr2;
int n_channels = 0, tmp1, tmp2;
+ nla_for_each_nested(attr1, freqs, tmp1)
+ if (nla_len(attr1) != sizeof(u32))
+ return 0;
+
nla_for_each_nested(attr1, freqs, tmp1) {
n_channels++;
/*
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 61f87a99bf0a..dd8e2dde0b34 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -108,6 +108,11 @@ as-option = $(call try-run,\
as-instr = $(call try-run,\
printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
+# __cc-option
+# Usage: MY_CFLAGS += $(call __cc-option,$(CC),$(MY_CFLAGS),-march=winchip-c6,-march=i586)
+__cc-option = $(call try-run,\
+ $(1) -Werror $(2) $(3) -c -x c /dev/null -o "$$TMP",$(3),$(4))
+
# Do not attempt to build with gcc plugins during cc-option tests.
# (And this uses delayed resolution so the flags will be up to date.)
CC_OPTION_CFLAGS = $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
@@ -115,19 +120,19 @@ CC_OPTION_CFLAGS = $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
# cc-option
# Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
-cc-option = $(call try-run,\
- $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
+cc-option = $(call __cc-option, $(CC),\
+ $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS),$(1),$(2))
+
+# hostcc-option
+# Usage: cflags-y += $(call hostcc-option,-march=winchip-c6,-march=i586)
+hostcc-option = $(call __cc-option, $(HOSTCC),\
+ $(HOSTCFLAGS) $(HOST_EXTRACFLAGS),$(1),$(2))
# cc-option-yn
# Usage: flag := $(call cc-option-yn,-march=winchip-c6)
cc-option-yn = $(call try-run,\
$(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
-# cc-option-align
-# Prefix align with either -falign or -malign
-cc-option-align = $(subst -functions=0,,\
- $(call cc-option,-falign-functions=0,-malign-functions=0))
-
# cc-disable-warning
# Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable)
cc-disable-warning = $(call try-run,\
diff --git a/scripts/Makefile.asm-generic b/scripts/Makefile.asm-generic
index e4d017d53819..95f7d8090152 100644
--- a/scripts/Makefile.asm-generic
+++ b/scripts/Makefile.asm-generic
@@ -15,7 +15,7 @@ _dummy := $(shell [ -d $(obj) ] || mkdir -p $(obj))
# Stale wrappers when the corresponding files are removed from generic-y
# need removing.
-generated-y := $(generic-y) $(genhdr-y) $(generated-y)
+generated-y := $(generic-y) $(generated-y)
all-files := $(patsubst %, $(obj)/%, $(generated-y))
old-headers := $(wildcard $(obj)/*.h)
unwanted := $(filter-out $(all-files),$(old-headers))
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 733e044fff8b..4a9a2cec0a1b 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -437,8 +437,8 @@ $(sort $(subdir-obj-y)): $(subdir-ym) ;
ifdef builtin-target
ifdef CONFIG_THIN_ARCHIVES
- cmd_make_builtin = rm -f $@; $(AR) rcST$(KBUILD_ARFLAGS)
- cmd_make_empty_builtin = rm -f $@; $(AR) rcST$(KBUILD_ARFLAGS)
+ cmd_make_builtin = rm -f $@; $(AR) rcSTP$(KBUILD_ARFLAGS)
+ cmd_make_empty_builtin = rm -f $@; $(AR) rcSTP$(KBUILD_ARFLAGS)
quiet_cmd_link_o_target = AR $@
else
cmd_make_builtin = $(LD) $(ld_flags) -r -o
@@ -478,7 +478,7 @@ ifdef lib-target
quiet_cmd_link_l_target = AR $@
ifdef CONFIG_THIN_ARCHIVES
- cmd_link_l_target = rm -f $@; $(AR) rcsT$(KBUILD_ARFLAGS) $@ $(lib-y)
+ cmd_link_l_target = rm -f $@; $(AR) rcsTP$(KBUILD_ARFLAGS) $@ $(lib-y)
else
cmd_link_l_target = rm -f $@; $(AR) rcs$(KBUILD_ARFLAGS) $@ $(lib-y)
endif
@@ -531,7 +531,7 @@ cmd_link_multi-link = $(LD) $(ld_flags) -r -o $@ $(link_multi_deps) $(cmd_secana
ifdef CONFIG_THIN_ARCHIVES
quiet_cmd_link_multi-y = AR $@
- cmd_link_multi-y = rm -f $@; $(AR) rcST$(KBUILD_ARFLAGS) $@ $(link_multi_deps)
+ cmd_link_multi-y = rm -f $@; $(AR) rcSTP$(KBUILD_ARFLAGS) $@ $(link_multi_deps)
else
quiet_cmd_link_multi-y = LD $@
cmd_link_multi-y = $(cmd_link_multi-link)
diff --git a/scripts/Makefile.host b/scripts/Makefile.host
index 45b5b1aaedbd..9cfd5c84d76f 100644
--- a/scripts/Makefile.host
+++ b/scripts/Makefile.host
@@ -20,12 +20,6 @@
# Will compile qconf as a C++ program, and menu as a C program.
# They are linked as C++ code to the executable qconf
-# hostcc-option
-# Usage: cflags-y += $(call hostcc-option,-march=winchip-c6,-march=i586)
-
-hostcc-option = $(call try-run,\
- $(HOSTCC) $(HOSTCFLAGS) $(HOST_EXTRACFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
-
__hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
host-cshlib := $(sort $(hostlibs-y) $(hostlibs-m))
host-cxxshlib := $(sort $(hostcxxlibs-y) $(hostcxxlibs-m))
diff --git a/scripts/bootgraph.pl b/scripts/bootgraph.pl
index 9ca667bcaee9..594c55541b16 100755
--- a/scripts/bootgraph.pl
+++ b/scripts/bootgraph.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
# Copyright 2008, Intel Corporation
#
diff --git a/scripts/checkincludes.pl b/scripts/checkincludes.pl
index 381c018a4612..ce9edefd6e0b 100755
--- a/scripts/checkincludes.pl
+++ b/scripts/checkincludes.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
#
# checkincludes: find/remove files included more than once
#
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index c7e4d73fe1ce..3a225d078e75 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl -w
+#!/usr/bin/env perl
# (c) 2001, Dave Jones. (the file handling bit)
# (c) 2005, Joel Schopp <[email protected]> (the ugly bit)
# (c) 2007,2008, Andy Whitcroft <[email protected]> (new conditions, test suite)
@@ -6,6 +6,7 @@
# Licensed under the terms of the GNU GPL License version 2
use strict;
+use warnings;
use POSIX;
use File::Basename;
use Cwd 'abs_path';
@@ -5692,7 +5693,7 @@ sub process {
for (my $count = $linenr; $count <= $lc; $count++) {
my $fmt = get_quoted_string($lines[$count - 1], raw_line($count, 0));
$fmt =~ s/%%//g;
- if ($fmt =~ /(\%[\*\d\.]*p(?![\WFfSsBKRraEhMmIiUDdgVCbGN]).)/) {
+ if ($fmt =~ /(\%[\*\d\.]*p(?![\WFfSsBKRraEhMmIiUDdgVCbGNO]).)/) {
$bad_extension = $1;
last;
}
diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
index 9d37aa4faf5c..7f4c41717e26 100755
--- a/scripts/checkstack.pl
+++ b/scripts/checkstack.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
# Check the stack usage of functions
#
diff --git a/scripts/checksyscalls.sh b/scripts/checksyscalls.sh
index 116b7735ee9f..5a387a264201 100755
--- a/scripts/checksyscalls.sh
+++ b/scripts/checksyscalls.sh
@@ -202,15 +202,12 @@ EOF
}
syscall_list() {
- grep '^[0-9]' "$1" | sort -n | (
+ grep '^[0-9]' "$1" | sort -n |
while read nr abi name entry ; do
- cat <<EOF
-#if !defined(__NR_${name}) && !defined(__IGNORE_${name})
-#warning syscall ${name} not implemented
-#endif
-EOF
+ echo "#if !defined(__NR_${name}) && !defined(__IGNORE_${name})"
+ echo "#warning syscall ${name} not implemented"
+ echo "#endif"
done
- )
}
(ignore_list && syscall_list $(dirname $0)/../arch/x86/entry/syscalls/syscall_32.tbl) | \
diff --git a/scripts/checkversion.pl b/scripts/checkversion.pl
index 5e490a8ceca5..8b4f205234b5 100755
--- a/scripts/checkversion.pl
+++ b/scripts/checkversion.pl
@@ -1,4 +1,4 @@
-#! /usr/bin/perl
+#! /usr/bin/env perl
#
# checkversion find uses of LINUX_VERSION_CODE or KERNEL_VERSION
# without including <linux/version.h>, or cases of
diff --git a/scripts/cleanfile b/scripts/cleanfile
index cefd29e52298..72e3755327ae 100755
--- a/scripts/cleanfile
+++ b/scripts/cleanfile
@@ -1,9 +1,10 @@
-#!/usr/bin/perl -w
+#!/usr/bin/env perl
#
# Clean a text file -- or directory of text files -- of stealth whitespace.
# WARNING: this can be a highly destructive operation. Use with caution.
#
+use warnings;
use bytes;
use File::Basename;
diff --git a/scripts/cleanpatch b/scripts/cleanpatch
index 9680d03ad2b8..3e5a2303dc0e 100755
--- a/scripts/cleanpatch
+++ b/scripts/cleanpatch
@@ -1,9 +1,10 @@
-#!/usr/bin/perl -w
+#!/usr/bin/env perl
#
# Clean a patch file -- or directory of patch files -- of stealth whitespace.
# WARNING: this can be a highly destructive operation. Use with caution.
#
+use warnings;
use bytes;
use File::Basename;
diff --git a/scripts/dtc/dt_to_config b/scripts/dtc/dt_to_config
index 9a248b505c58..5dfd1bff351f 100755
--- a/scripts/dtc/dt_to_config
+++ b/scripts/dtc/dt_to_config
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
# Copyright 2016 by Frank Rowand
# Copyright 2016 by Gaurav Minocha
diff --git a/scripts/dtc/dtx_diff b/scripts/dtc/dtx_diff
index ec47f95991a3..fb86f3899e16 100755
--- a/scripts/dtc/dtx_diff
+++ b/scripts/dtc/dtx_diff
@@ -338,7 +338,7 @@ DTC="${DTC} ${dtc_flags} -O dts -qq -f ${dtc_sort} -o -"
if (( ${cmd_diff} )) ; then
- diff ${diff_flags} \
+ diff ${diff_flags} --label "${dtx_file_1}" --label "${dtx_file_2}" \
<(compile_to_dts "${dtx_file_1}") \
<(compile_to_dts "${dtx_file_2}")
diff --git a/scripts/export_report.pl b/scripts/export_report.pl
index 8f79b701de87..68ff426b347c 100755
--- a/scripts/export_report.pl
+++ b/scripts/export_report.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl -w
+#!/usr/bin/env perl
#
# (C) Copyright IBM Corporation 2006.
# Released under GPL v2.
@@ -7,6 +7,7 @@
# Usage: export_report.pl -k Module.symvers [-o report_file ] -f *.mod.c
#
+use warnings;
use Getopt::Std;
use strict;
diff --git a/scripts/extract-module-sig.pl b/scripts/extract-module-sig.pl
index faac6f2e377f..0f161ea41261 100755
--- a/scripts/extract-module-sig.pl
+++ b/scripts/extract-module-sig.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl -w
+#!/usr/bin/env perl
#
# extract-mod-sig <part> <module-file>
#
@@ -12,6 +12,7 @@
# -k: Just the key ID
# -s: Just the crypto signature or PKCS#7 message
#
+use warnings;
use strict;
die "Format: $0 -[0adnks] module-file >out\n"
diff --git a/scripts/extract-sys-certs.pl b/scripts/extract-sys-certs.pl
index 8227ca10a494..2aa873b944e0 100755
--- a/scripts/extract-sys-certs.pl
+++ b/scripts/extract-sys-certs.pl
@@ -1,5 +1,6 @@
-#!/usr/bin/perl -w
+#!/usr/bin/env perl
#
+use warnings;
use strict;
use Math::BigInt;
use Fcntl "SEEK_SET";
diff --git a/scripts/extract_xc3028.pl b/scripts/extract_xc3028.pl
index 47877deae6d7..61d9b256c658 100755
--- a/scripts/extract_xc3028.pl
+++ b/scripts/extract_xc3028.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
# Copyright (c) Mauro Carvalho Chehab <[email protected]>
# Released under GPLv2
diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh
index 0055b07b03b6..86a3c0e5cfbc 100755
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -174,7 +174,7 @@ dir_filelist() {
${dep_list}header "$1"
srcdir=$(echo "$1" | sed -e 's://*:/:g')
- dirlist=$(find "${srcdir}" -printf "%p %m %U %G\n")
+ dirlist=$(find "${srcdir}" -printf "%p %m %U %G\n" | sort)
# If $dirlist is only one line, then the directory is empty
if [ "$(echo "${dirlist}" | wc -l)" -gt 1 ]; then
@@ -271,10 +271,12 @@ while [ $# -gt 0 ]; do
case "$arg" in
"-u") # map $1 to uid=0 (root)
root_uid="$1"
+ [ "$root_uid" = "-1" ] && root_uid=$(id -u || echo 0)
shift
;;
"-g") # map $1 to gid=0 (root)
root_gid="$1"
+ [ "$root_gid" = "-1" ] && root_gid=$(id -g || echo 0)
shift
;;
"-d") # display default initramfs list
diff --git a/scripts/get_dvb_firmware b/scripts/get_dvb_firmware
index 1a0a04125f71..f3f230225aba 100755
--- a/scripts/get_dvb_firmware
+++ b/scripts/get_dvb_firmware
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
# DVB firmware extractor
#
# (c) 2004 Andrew de Quincey
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 633f2dd3de27..3bd5f4f30235 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl -w
+#!/usr/bin/env perl
# (c) 2007, Joe Perches <[email protected]>
# created from checkpatch.pl
#
@@ -10,6 +10,7 @@
#
# Licensed under the terms of the GNU GPL License version 2
+use warnings;
use strict;
my $P = $0;
diff --git a/scripts/headerdep.pl b/scripts/headerdep.pl
index 8dd019bc5a73..86ebb9ee7570 100755
--- a/scripts/headerdep.pl
+++ b/scripts/headerdep.pl
@@ -1,4 +1,4 @@
-#! /usr/bin/perl
+#! /usr/bin/env perl
#
# Detect cycles in the header file dependency graph
# Vegard Nossum <[email protected]>
diff --git a/scripts/headers_check.pl b/scripts/headers_check.pl
index 8b2da054cdc3..3091e4ee6ee1 100755
--- a/scripts/headers_check.pl
+++ b/scripts/headers_check.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl -w
+#!/usr/bin/env perl
#
# headers_check.pl execute a number of trivial consistency checks
#
@@ -18,6 +18,7 @@
#
# 3) Check for leaked CONFIG_ symbols
+use warnings;
use strict;
use File::Basename;
diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
index b8c7b29affc5..a2e83ab17de3 100755
--- a/scripts/kconfig/streamline_config.pl
+++ b/scripts/kconfig/streamline_config.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl -w
+#!/usr/bin/env perl
#
# Copyright 2005-2009 - Steven Rostedt
# Licensed under the terms of the GNU GPL License version 2
@@ -42,6 +42,7 @@
# mv config_strip .config
# make oldconfig
#
+use warnings;
use strict;
use Getopt::Long;
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index c1ffd31ff423..6e36b7889001 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -1,5 +1,6 @@
-#!/usr/bin/perl -w
+#!/usr/bin/env perl
+use warnings;
use strict;
## Copyright (c) 1998 Michael Zucchi, All Rights Reserved ##
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index c80291319cb2..e7b7eee31538 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -3,9 +3,12 @@
# link vmlinux
#
# vmlinux is linked from the objects selected by $(KBUILD_VMLINUX_INIT) and
-# $(KBUILD_VMLINUX_MAIN). Most are built-in.o files from top-level directories
-# in the kernel tree, others are specified in arch/$(ARCH)/Makefile.
-# Ordering when linking is important, and $(KBUILD_VMLINUX_INIT) must be first.
+# $(KBUILD_VMLINUX_MAIN) and $(KBUILD_VMLINUX_LIBS). Most are built-in.o files
+# from top-level directories in the kernel tree, others are specified in
+# arch/$(ARCH)/Makefile. Ordering when linking is important, and
+# $(KBUILD_VMLINUX_INIT) must be first. $(KBUILD_VMLINUX_LIBS) are archives
+# which are linked conditionally (not within --whole-archive), and do not
+# require symbol indexes added.
#
# vmlinux
# ^
@@ -16,6 +19,9 @@
# +--< $(KBUILD_VMLINUX_MAIN)
# | +--< drivers/built-in.o mm/built-in.o + more
# |
+# +--< $(KBUILD_VMLINUX_LIBS)
+# | +--< lib/lib.a + more
+# |
# +-< ${kallsymso} (see description in KALLSYMS section)
#
# vmlinux version (uname -v) cannot be updated during normal
@@ -37,9 +43,10 @@ info()
fi
}
-# Thin archive build here makes a final archive with
-# symbol table and indexes from vmlinux objects, which can be
-# used as input to linker.
+# Thin archive build here makes a final archive with symbol table and indexes
+# from vmlinux objects INIT and MAIN, which can be used as input to linker.
+# KBUILD_VMLINUX_LIBS archives should already have symbol table and indexes
+# added.
#
# Traditional incremental style of link does not require this step
#
@@ -50,7 +57,7 @@ archive_builtin()
if [ -n "${CONFIG_THIN_ARCHIVES}" ]; then
info AR built-in.o
rm -f built-in.o;
- ${AR} rcsT${KBUILD_ARFLAGS} built-in.o \
+ ${AR} rcsTP${KBUILD_ARFLAGS} built-in.o \
${KBUILD_VMLINUX_INIT} \
${KBUILD_VMLINUX_MAIN}
fi
@@ -63,11 +70,17 @@ modpost_link()
local objects
if [ -n "${CONFIG_THIN_ARCHIVES}" ]; then
- objects="--whole-archive built-in.o"
+ objects="--whole-archive \
+ built-in.o \
+ --no-whole-archive \
+ --start-group \
+ ${KBUILD_VMLINUX_LIBS} \
+ --end-group"
else
objects="${KBUILD_VMLINUX_INIT} \
--start-group \
${KBUILD_VMLINUX_MAIN} \
+ ${KBUILD_VMLINUX_LIBS} \
--end-group"
fi
${LD} ${LDFLAGS} -r -o ${1} ${objects}
@@ -83,11 +96,18 @@ vmlinux_link()
if [ "${SRCARCH}" != "um" ]; then
if [ -n "${CONFIG_THIN_ARCHIVES}" ]; then
- objects="--whole-archive built-in.o ${1}"
+ objects="--whole-archive \
+ built-in.o \
+ --no-whole-archive \
+ --start-group \
+ ${KBUILD_VMLINUX_LIBS} \
+ --end-group \
+ ${1}"
else
objects="${KBUILD_VMLINUX_INIT} \
--start-group \
${KBUILD_VMLINUX_MAIN} \
+ ${KBUILD_VMLINUX_LIBS} \
--end-group \
${1}"
fi
@@ -96,11 +116,18 @@ vmlinux_link()
-T ${lds} ${objects}
else
if [ -n "${CONFIG_THIN_ARCHIVES}" ]; then
- objects="-Wl,--whole-archive built-in.o ${1}"
+ objects="-Wl,--whole-archive \
+ built-in.o \
+ -Wl,--no-whole-archive \
+ -Wl,--start-group \
+ ${KBUILD_VMLINUX_LIBS} \
+ -Wl,--end-group \
+ ${1}"
else
objects="${KBUILD_VMLINUX_INIT} \
-Wl,--start-group \
${KBUILD_VMLINUX_MAIN} \
+ ${KBUILD_VMLINUX_LIBS} \
-Wl,--end-group \
${1}"
fi
diff --git a/scripts/markup_oops.pl b/scripts/markup_oops.pl
index c21d16328d3f..70dcfb6b3de1 100755
--- a/scripts/markup_oops.pl
+++ b/scripts/markup_oops.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
use File::Basename;
use Math::BigInt;
diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
index 6fdc97ef6023..fd8fdb91581d 100755
--- a/scripts/mkcompile_h
+++ b/scripts/mkcompile_h
@@ -76,7 +76,7 @@ UTS_TRUNCATE="cut -b -$UTS_LEN"
echo \#define LINUX_COMPILE_BY \"`echo $LINUX_COMPILE_BY | $UTS_TRUNCATE`\"
echo \#define LINUX_COMPILE_HOST \"`echo $LINUX_COMPILE_HOST | $UTS_TRUNCATE`\"
- echo \#define LINUX_COMPILER \"`$CC -v 2>&1 | grep ' version '`\"
+ echo \#define LINUX_COMPILER \"`$CC -v 2>&1 | grep ' version ' | sed 's/[[:space:]]*$//'`\"
) > .tmpcompile
# Only replace the real compile.h if the new one is different,
diff --git a/scripts/namespace.pl b/scripts/namespace.pl
index 9f3c9d47a4a5..729c547fc9e1 100755
--- a/scripts/namespace.pl
+++ b/scripts/namespace.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl -w
+#!/usr/bin/env perl
#
# namespace.pl. Mon Aug 30 2004
#
@@ -62,7 +62,7 @@
# result.
#
-require 5; # at least perl 5
+use warnings;
use strict;
use File::Find;
diff --git a/scripts/profile2linkerlist.pl b/scripts/profile2linkerlist.pl
index 6943fa7cc95b..f23d7be94394 100755
--- a/scripts/profile2linkerlist.pl
+++ b/scripts/profile2linkerlist.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
#
# Takes a (sorted) output of readprofile and turns it into a list suitable for
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 1633c3e6c0b9..2033af758173 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl -w
+#!/usr/bin/env perl
# (c) 2008, Steven Rostedt <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
@@ -106,6 +106,7 @@
# 9) Move the result back to the original object.
#
+use warnings;
use strict;
my $P = $0;
diff --git a/scripts/spelling.txt b/scripts/spelling.txt
index eb38f49d4b75..400ef35169c5 100644
--- a/scripts/spelling.txt
+++ b/scripts/spelling.txt
@@ -54,6 +54,7 @@ adapater||adapter
addional||additional
additionaly||additionally
addres||address
+adddress||address
addreses||addresses
addresss||address
aditional||additional
@@ -130,6 +131,7 @@ arraival||arrival
artifical||artificial
artillary||artillery
asign||assign
+asser||assert
assertation||assertion
assiged||assigned
assigment||assignment
@@ -149,6 +151,7 @@ atempt||attempt
attachement||attachment
attched||attached
attemps||attempts
+attemping||attempting
attruibutes||attributes
authentification||authentication
automaticaly||automatically
@@ -205,9 +208,11 @@ callibration||calibration
calucate||calculate
calulate||calculate
cancelation||cancellation
+cancle||cancel
capabilites||capabilities
capabitilies||capabilities
capatibilities||capabilities
+capapbilities||capabilities
carefuly||carefully
cariage||carriage
catagory||category
@@ -216,6 +221,7 @@ challange||challenge
challanges||challenges
chanell||channel
changable||changeable
+chanined||chained
channle||channel
channnel||channel
charachter||character
@@ -241,6 +247,7 @@ claread||cleared
clared||cleared
closeing||closing
clustred||clustered
+coexistance||coexistence
collapsable||collapsible
colorfull||colorful
comand||command
@@ -269,6 +276,7 @@ completition||completion
completly||completely
complient||compliant
componnents||components
+compoment||component
compres||compress
compresion||compression
comression||compression
@@ -315,6 +323,7 @@ correponding||corresponding
correponds||corresponds
correspoding||corresponding
cotrol||control
+cound||could
couter||counter
coutner||counter
cryptocraphic||cryptographic
@@ -326,6 +335,7 @@ deafult||default
deamon||daemon
decompres||decompress
decription||description
+dectected||detected
defailt||default
defferred||deferred
definate||definite
@@ -343,6 +353,8 @@ delare||declare
delares||declares
delaring||declaring
delemiter||delimiter
+demodualtor||demodulator
+demension||dimension
dependancies||dependencies
dependancy||dependency
dependant||dependent
@@ -357,11 +369,13 @@ descritptor||descriptor
desctiptor||descriptor
desriptor||descriptor
desriptors||descriptors
+destionation||destination
destory||destroy
destoryed||destroyed
destorys||destroys
destroied||destroyed
detabase||database
+deteced||detected
develope||develop
developement||development
developped||developed
@@ -419,9 +433,11 @@ enchanced||enhanced
encorporating||incorporating
encrupted||encrypted
encrypiton||encryption
+encryptio||encryption
endianess||endianness
enhaced||enhanced
enlightnment||enlightenment
+entrys||entries
enocded||encoded
enterily||entirely
enviroiment||environment
@@ -439,6 +455,7 @@ etsbalishment||establishment
excecutable||executable
exceded||exceeded
excellant||excellent
+exeed||exceed
existance||existence
existant||existent
exixt||exist
@@ -467,6 +484,7 @@ failuer||failure
faireness||fairness
falied||failed
faliure||failure
+fallbck||fallback
familar||familiar
fatser||faster
feauture||feature
@@ -564,6 +582,7 @@ independant||independent
independantly||independently
independed||independent
indiate||indicate
+indicat||indicate
inexpect||inexpected
infomation||information
informatiom||information
@@ -682,6 +701,7 @@ messags||messages
messgaes||messages
messsage||message
messsages||messages
+micropone||microphone
microprocesspr||microprocessor
milliseonds||milliseconds
minium||minimum
@@ -693,11 +713,14 @@ misformed||malformed
mispelled||misspelled
mispelt||misspelt
mising||missing
+missmanaged||mismanaged
+missmatch||mismatch
miximum||maximum
mmnemonic||mnemonic
mnay||many
modulues||modules
momery||memory
+memomry||memory
monochorome||monochrome
monochromo||monochrome
monocrome||monochrome
@@ -798,6 +821,7 @@ permissons||permissions
peroid||period
persistance||persistence
persistant||persistent
+plalform||platform
platfrom||platform
plattform||platform
pleaes||please
@@ -810,6 +834,7 @@ posible||possible
positon||position
possibilites||possibilities
powerfull||powerful
+preapre||prepare
preceeded||preceded
preceeding||preceding
preceed||precede
@@ -868,6 +893,7 @@ psuedo||pseudo
psychadelic||psychedelic
pwoer||power
quering||querying
+randomally||randomly
raoming||roaming
reasearcher||researcher
reasearchers||researchers
@@ -895,6 +921,7 @@ refernnce||reference
refrence||reference
registerd||registered
registeresd||registered
+registerred||registered
registes||registers
registraration||registration
regsiter||register
@@ -923,6 +950,7 @@ requried||required
requst||request
reseting||resetting
resizeable||resizable
+resouce||resource
resouces||resources
resoures||resources
responce||response
@@ -938,6 +966,7 @@ reudce||reduce
reuest||request
reuqest||request
reutnred||returned
+revsion||revision
rmeoved||removed
rmeove||remove
rmeoves||removes
@@ -1099,6 +1128,7 @@ transfering||transferring
transision||transition
transmittd||transmitted
transormed||transformed
+trasfer||transfer
trasmission||transmission
treshold||threshold
trigerring||triggering
@@ -1167,6 +1197,7 @@ virtaul||virtual
virtiual||virtual
visiters||visitors
vitual||virtual
+wakeus||wakeups
wating||waiting
wether||whether
whataver||whatever
diff --git a/scripts/stackdelta b/scripts/stackdelta
index 48eabf2f48f8..20a79f19a111 100755
--- a/scripts/stackdelta
+++ b/scripts/stackdelta
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
# Read two files produced by the stackusage script, and show the
# delta between them.
diff --git a/sound/Kconfig b/sound/Kconfig
index 6a215a8c0490..d7d2aac9542e 100644
--- a/sound/Kconfig
+++ b/sound/Kconfig
@@ -56,7 +56,7 @@ config SOUND_OSS_CORE_PRECLAIM
source "sound/oss/dmasound/Kconfig"
-if !M68K && !UML
+if !UML
menuconfig SND
tristate "Advanced Linux Sound Architecture"
@@ -110,6 +110,8 @@ source "sound/soc/Kconfig"
source "sound/x86/Kconfig"
+source "sound/synth/Kconfig"
+
endif # SND
menuconfig SOUND_PRIME
@@ -125,7 +127,7 @@ source "sound/oss/Kconfig"
endif # SOUND_PRIME
-endif # !M68K
+endif # !UML
endif # SOUND
diff --git a/sound/aoa/codecs/tas.c b/sound/aoa/codecs/tas.c
index 78ed1ffbf786..733b6365dad6 100644
--- a/sound/aoa/codecs/tas.c
+++ b/sound/aoa/codecs/tas.c
@@ -271,7 +271,7 @@ static int tas_snd_vol_put(struct snd_kcontrol *kcontrol,
return 1;
}
-static struct snd_kcontrol_new volume_control = {
+static const struct snd_kcontrol_new volume_control = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Volume",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
@@ -314,7 +314,7 @@ static int tas_snd_mute_put(struct snd_kcontrol *kcontrol,
return 1;
}
-static struct snd_kcontrol_new mute_control = {
+static const struct snd_kcontrol_new mute_control = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
@@ -426,7 +426,7 @@ static int tas_snd_drc_range_put(struct snd_kcontrol *kcontrol,
return 1;
}
-static struct snd_kcontrol_new drc_range_control = {
+static const struct snd_kcontrol_new drc_range_control = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "DRC Range",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
@@ -466,7 +466,7 @@ static int tas_snd_drc_switch_put(struct snd_kcontrol *kcontrol,
return 1;
}
-static struct snd_kcontrol_new drc_switch_control = {
+static const struct snd_kcontrol_new drc_switch_control = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "DRC Range Switch",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
@@ -524,7 +524,7 @@ static int tas_snd_capture_source_put(struct snd_kcontrol *kcontrol,
return 1;
}
-static struct snd_kcontrol_new capture_source_control = {
+static const struct snd_kcontrol_new capture_source_control = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
/* If we name this 'Input Source', it properly shows up in
* alsamixer as a selection, * but it's shown under the
@@ -586,7 +586,7 @@ static int tas_snd_treble_put(struct snd_kcontrol *kcontrol,
return 1;
}
-static struct snd_kcontrol_new treble_control = {
+static const struct snd_kcontrol_new treble_control = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Treble",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
@@ -637,7 +637,7 @@ static int tas_snd_bass_put(struct snd_kcontrol *kcontrol,
return 1;
}
-static struct snd_kcontrol_new bass_control = {
+static const struct snd_kcontrol_new bass_control = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Bass",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
diff --git a/sound/aoa/fabrics/layout.c b/sound/aoa/fabrics/layout.c
index a0c4a5de809c..1eddf8fa188f 100644
--- a/sound/aoa/fabrics/layout.c
+++ b/sound/aoa/fabrics/layout.c
@@ -707,7 +707,7 @@ static int detect_choice_put(struct snd_kcontrol *kcontrol,
return 1;
}
-static struct snd_kcontrol_new headphone_detect_choice = {
+static const struct snd_kcontrol_new headphone_detect_choice = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Headphone Detect Autoswitch",
.info = control_info,
@@ -717,7 +717,7 @@ static struct snd_kcontrol_new headphone_detect_choice = {
.private_value = 0,
};
-static struct snd_kcontrol_new lineout_detect_choice = {
+static const struct snd_kcontrol_new lineout_detect_choice = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Line-Out Detect Autoswitch",
.info = control_info,
@@ -749,7 +749,7 @@ static int detected_get(struct snd_kcontrol *kcontrol,
return 0;
}
-static struct snd_kcontrol_new headphone_detected = {
+static const struct snd_kcontrol_new headphone_detected = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Headphone Detected",
.info = control_info,
@@ -758,7 +758,7 @@ static struct snd_kcontrol_new headphone_detected = {
.private_value = 0,
};
-static struct snd_kcontrol_new lineout_detected = {
+static const struct snd_kcontrol_new lineout_detected = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Line-Out Detected",
.info = control_info,
diff --git a/sound/atmel/Kconfig b/sound/atmel/Kconfig
index 94de43a096f1..d789cbcb9106 100644
--- a/sound/atmel/Kconfig
+++ b/sound/atmel/Kconfig
@@ -1,18 +1,11 @@
-menu "Atmel devices (AVR32 and AT91)"
- depends on AVR32 || ARCH_AT91
-
-config SND_ATMEL_ABDAC
- tristate "Atmel Audio Bitstream DAC (ABDAC) driver"
- select SND_PCM
- depends on DW_DMAC && AVR32
- help
- ALSA sound driver for the Atmel Audio Bitstream DAC (ABDAC).
+menu "Atmel devices (AT91)"
+ depends on ARCH_AT91
config SND_ATMEL_AC97C
tristate "Atmel AC97 Controller (AC97C) driver"
select SND_PCM
select SND_AC97_CODEC
- depends on (DW_DMAC && AVR32) || ARCH_AT91
+ depends on ARCH_AT91
help
ALSA sound driver for the Atmel AC97 controller.
diff --git a/sound/atmel/Makefile b/sound/atmel/Makefile
index 219dcfac6086..d4009d1430ed 100644
--- a/sound/atmel/Makefile
+++ b/sound/atmel/Makefile
@@ -1,5 +1,3 @@
-snd-atmel-abdac-objs := abdac.o
snd-atmel-ac97c-objs := ac97c.o
-obj-$(CONFIG_SND_ATMEL_ABDAC) += snd-atmel-abdac.o
obj-$(CONFIG_SND_ATMEL_AC97C) += snd-atmel-ac97c.o
diff --git a/sound/atmel/abdac.c b/sound/atmel/abdac.c
deleted file mode 100644
index 558618802000..000000000000
--- a/sound/atmel/abdac.c
+++ /dev/null
@@ -1,610 +0,0 @@
-/*
- * Driver for the Atmel on-chip Audio Bitstream DAC (ABDAC)
- *
- * Copyright (C) 2006-2009 Atmel Corporation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- */
-#include <linux/clk.h>
-#include <linux/bitmap.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/types.h>
-#include <linux/io.h>
-
-#include <sound/core.h>
-#include <sound/initval.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/atmel-abdac.h>
-
-#include <linux/platform_data/dma-dw.h>
-#include <linux/dma/dw.h>
-
-/* DAC register offsets */
-#define DAC_DATA 0x0000
-#define DAC_CTRL 0x0008
-#define DAC_INT_MASK 0x000c
-#define DAC_INT_EN 0x0010
-#define DAC_INT_DIS 0x0014
-#define DAC_INT_CLR 0x0018
-#define DAC_INT_STATUS 0x001c
-
-/* Bitfields in CTRL */
-#define DAC_SWAP_OFFSET 30
-#define DAC_SWAP_SIZE 1
-#define DAC_EN_OFFSET 31
-#define DAC_EN_SIZE 1
-
-/* Bitfields in INT_MASK/INT_EN/INT_DIS/INT_STATUS/INT_CLR */
-#define DAC_UNDERRUN_OFFSET 28
-#define DAC_UNDERRUN_SIZE 1
-#define DAC_TX_READY_OFFSET 29
-#define DAC_TX_READY_SIZE 1
-
-/* Bit manipulation macros */
-#define DAC_BIT(name) \
- (1 << DAC_##name##_OFFSET)
-#define DAC_BF(name, value) \
- (((value) & ((1 << DAC_##name##_SIZE) - 1)) \
- << DAC_##name##_OFFSET)
-#define DAC_BFEXT(name, value) \
- (((value) >> DAC_##name##_OFFSET) \
- & ((1 << DAC_##name##_SIZE) - 1))
-#define DAC_BFINS(name, value, old) \
- (((old) & ~(((1 << DAC_##name##_SIZE) - 1) \
- << DAC_##name##_OFFSET)) \
- | DAC_BF(name, value))
-
-/* Register access macros */
-#define dac_readl(port, reg) \
- __raw_readl((port)->regs + DAC_##reg)
-#define dac_writel(port, reg, value) \
- __raw_writel((value), (port)->regs + DAC_##reg)
-
-/*
- * ABDAC supports a maximum of 6 different rates from a generic clock. The
- * generic clock has a power of two divider, which gives 6 steps from 192 kHz
- * to 5112 Hz.
- */
-#define MAX_NUM_RATES 6
-/* ALSA seems to use rates between 192000 Hz and 5112 Hz. */
-#define RATE_MAX 192000
-#define RATE_MIN 5112
-
-enum {
- DMA_READY = 0,
-};
-
-struct atmel_abdac_dma {
- struct dma_chan *chan;
- struct dw_cyclic_desc *cdesc;
-};
-
-struct atmel_abdac {
- struct clk *pclk;
- struct clk *sample_clk;
- struct platform_device *pdev;
- struct atmel_abdac_dma dma;
-
- struct snd_pcm_hw_constraint_list constraints_rates;
- struct snd_pcm_substream *substream;
- struct snd_card *card;
- struct snd_pcm *pcm;
-
- void __iomem *regs;
- unsigned long flags;
- unsigned int rates[MAX_NUM_RATES];
- unsigned int rates_num;
- int irq;
-};
-
-#define get_dac(card) ((struct atmel_abdac *)(card)->private_data)
-
-/* This function is called by the DMA driver. */
-static void atmel_abdac_dma_period_done(void *arg)
-{
- struct atmel_abdac *dac = arg;
- snd_pcm_period_elapsed(dac->substream);
-}
-
-static int atmel_abdac_prepare_dma(struct atmel_abdac *dac,
- struct snd_pcm_substream *substream,
- enum dma_data_direction direction)
-{
- struct dma_chan *chan = dac->dma.chan;
- struct dw_cyclic_desc *cdesc;
- struct snd_pcm_runtime *runtime = substream->runtime;
- unsigned long buffer_len, period_len;
-
- /*
- * We don't do DMA on "complex" transfers, i.e. with
- * non-halfword-aligned buffers or lengths.
- */
- if (runtime->dma_addr & 1 || runtime->buffer_size & 1) {
- dev_dbg(&dac->pdev->dev, "too complex transfer\n");
- return -EINVAL;
- }
-
- buffer_len = frames_to_bytes(runtime, runtime->buffer_size);
- period_len = frames_to_bytes(runtime, runtime->period_size);
-
- cdesc = dw_dma_cyclic_prep(chan, runtime->dma_addr, buffer_len,
- period_len, DMA_MEM_TO_DEV);
- if (IS_ERR(cdesc)) {
- dev_dbg(&dac->pdev->dev, "could not prepare cyclic DMA\n");
- return PTR_ERR(cdesc);
- }
-
- cdesc->period_callback = atmel_abdac_dma_period_done;
- cdesc->period_callback_param = dac;
-
- dac->dma.cdesc = cdesc;
-
- set_bit(DMA_READY, &dac->flags);
-
- return 0;
-}
-
-static struct snd_pcm_hardware atmel_abdac_hw = {
- .info = (SNDRV_PCM_INFO_MMAP
- | SNDRV_PCM_INFO_MMAP_VALID
- | SNDRV_PCM_INFO_INTERLEAVED
- | SNDRV_PCM_INFO_BLOCK_TRANSFER
- | SNDRV_PCM_INFO_RESUME
- | SNDRV_PCM_INFO_PAUSE),
- .formats = (SNDRV_PCM_FMTBIT_S16_BE),
- .rates = (SNDRV_PCM_RATE_KNOT),
- .rate_min = RATE_MIN,
- .rate_max = RATE_MAX,
- .channels_min = 2,
- .channels_max = 2,
- .buffer_bytes_max = 64 * 4096,
- .period_bytes_min = 4096,
- .period_bytes_max = 4096,
- .periods_min = 6,
- .periods_max = 64,
-};
-
-static int atmel_abdac_open(struct snd_pcm_substream *substream)
-{
- struct atmel_abdac *dac = snd_pcm_substream_chip(substream);
-
- dac->substream = substream;
- atmel_abdac_hw.rate_max = dac->rates[dac->rates_num - 1];
- atmel_abdac_hw.rate_min = dac->rates[0];
- substream->runtime->hw = atmel_abdac_hw;
-
- return snd_pcm_hw_constraint_list(substream->runtime, 0,
- SNDRV_PCM_HW_PARAM_RATE, &dac->constraints_rates);
-}
-
-static int atmel_abdac_close(struct snd_pcm_substream *substream)
-{
- struct atmel_abdac *dac = snd_pcm_substream_chip(substream);
- dac->substream = NULL;
- return 0;
-}
-
-static int atmel_abdac_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
-{
- struct atmel_abdac *dac = snd_pcm_substream_chip(substream);
- int retval;
-
- retval = snd_pcm_lib_malloc_pages(substream,
- params_buffer_bytes(hw_params));
- if (retval < 0)
- return retval;
- /* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */
- if (retval == 1)
- if (test_and_clear_bit(DMA_READY, &dac->flags))
- dw_dma_cyclic_free(dac->dma.chan);
-
- return retval;
-}
-
-static int atmel_abdac_hw_free(struct snd_pcm_substream *substream)
-{
- struct atmel_abdac *dac = snd_pcm_substream_chip(substream);
- if (test_and_clear_bit(DMA_READY, &dac->flags))
- dw_dma_cyclic_free(dac->dma.chan);
- return snd_pcm_lib_free_pages(substream);
-}
-
-static int atmel_abdac_prepare(struct snd_pcm_substream *substream)
-{
- struct atmel_abdac *dac = snd_pcm_substream_chip(substream);
- int retval;
-
- retval = clk_set_rate(dac->sample_clk, 256 * substream->runtime->rate);
- if (retval)
- return retval;
-
- if (!test_bit(DMA_READY, &dac->flags))
- retval = atmel_abdac_prepare_dma(dac, substream, DMA_TO_DEVICE);
-
- return retval;
-}
-
-static int atmel_abdac_trigger(struct snd_pcm_substream *substream, int cmd)
-{
- struct atmel_abdac *dac = snd_pcm_substream_chip(substream);
- int retval = 0;
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* fall through */
- case SNDRV_PCM_TRIGGER_RESUME: /* fall through */
- case SNDRV_PCM_TRIGGER_START:
- clk_prepare_enable(dac->sample_clk);
- retval = dw_dma_cyclic_start(dac->dma.chan);
- if (retval)
- goto out;
- dac_writel(dac, CTRL, DAC_BIT(EN));
- break;
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH: /* fall through */
- case SNDRV_PCM_TRIGGER_SUSPEND: /* fall through */
- case SNDRV_PCM_TRIGGER_STOP:
- dw_dma_cyclic_stop(dac->dma.chan);
- dac_writel(dac, DATA, 0);
- dac_writel(dac, CTRL, 0);
- clk_disable_unprepare(dac->sample_clk);
- break;
- default:
- retval = -EINVAL;
- break;
- }
-out:
- return retval;
-}
-
-static snd_pcm_uframes_t
-atmel_abdac_pointer(struct snd_pcm_substream *substream)
-{
- struct atmel_abdac *dac = snd_pcm_substream_chip(substream);
- struct snd_pcm_runtime *runtime = substream->runtime;
- snd_pcm_uframes_t frames;
- unsigned long bytes;
-
- bytes = dw_dma_get_src_addr(dac->dma.chan);
- bytes -= runtime->dma_addr;
-
- frames = bytes_to_frames(runtime, bytes);
- if (frames >= runtime->buffer_size)
- frames -= runtime->buffer_size;
-
- return frames;
-}
-
-static irqreturn_t abdac_interrupt(int irq, void *dev_id)
-{
- struct atmel_abdac *dac = dev_id;
- u32 status;
-
- status = dac_readl(dac, INT_STATUS);
- if (status & DAC_BIT(UNDERRUN)) {
- dev_err(&dac->pdev->dev, "underrun detected\n");
- dac_writel(dac, INT_CLR, DAC_BIT(UNDERRUN));
- } else {
- dev_err(&dac->pdev->dev, "spurious interrupt (status=0x%x)\n",
- status);
- dac_writel(dac, INT_CLR, status);
- }
-
- return IRQ_HANDLED;
-}
-
-static struct snd_pcm_ops atmel_abdac_ops = {
- .open = atmel_abdac_open,
- .close = atmel_abdac_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = atmel_abdac_hw_params,
- .hw_free = atmel_abdac_hw_free,
- .prepare = atmel_abdac_prepare,
- .trigger = atmel_abdac_trigger,
- .pointer = atmel_abdac_pointer,
-};
-
-static int atmel_abdac_pcm_new(struct atmel_abdac *dac)
-{
- struct snd_pcm_hardware hw = atmel_abdac_hw;
- struct snd_pcm *pcm;
- int retval;
-
- retval = snd_pcm_new(dac->card, dac->card->shortname,
- dac->pdev->id, 1, 0, &pcm);
- if (retval)
- return retval;
-
- strcpy(pcm->name, dac->card->shortname);
- pcm->private_data = dac;
- pcm->info_flags = 0;
- dac->pcm = pcm;
-
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &atmel_abdac_ops);
-
- retval = snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
- &dac->pdev->dev, hw.periods_min * hw.period_bytes_min,
- hw.buffer_bytes_max);
-
- return retval;
-}
-
-static bool filter(struct dma_chan *chan, void *slave)
-{
- struct dw_dma_slave *dws = slave;
-
- if (dws->dma_dev == chan->device->dev) {
- chan->private = dws;
- return true;
- } else
- return false;
-}
-
-static int set_sample_rates(struct atmel_abdac *dac)
-{
- long new_rate = RATE_MAX;
- int retval = -EINVAL;
- int index = 0;
-
- /* we start at 192 kHz and work our way down to 5112 Hz */
- while (new_rate >= RATE_MIN && index < (MAX_NUM_RATES + 1)) {
- new_rate = clk_round_rate(dac->sample_clk, 256 * new_rate);
- if (new_rate <= 0)
- break;
- /* make sure we are below the ABDAC clock */
- if (index < MAX_NUM_RATES &&
- new_rate <= clk_get_rate(dac->pclk)) {
- dac->rates[index] = new_rate / 256;
- index++;
- }
- /* divide by 256 and then by two to get next rate */
- new_rate /= 256 * 2;
- }
-
- if (index) {
- int i;
-
- /* reverse array, smallest go first */
- for (i = 0; i < (index / 2); i++) {
- unsigned int tmp = dac->rates[index - 1 - i];
- dac->rates[index - 1 - i] = dac->rates[i];
- dac->rates[i] = tmp;
- }
-
- dac->constraints_rates.count = index;
- dac->constraints_rates.list = dac->rates;
- dac->constraints_rates.mask = 0;
- dac->rates_num = index;
-
- retval = 0;
- }
-
- return retval;
-}
-
-static int atmel_abdac_probe(struct platform_device *pdev)
-{
- struct snd_card *card;
- struct atmel_abdac *dac;
- struct resource *regs;
- struct atmel_abdac_pdata *pdata;
- struct clk *pclk;
- struct clk *sample_clk;
- int retval;
- int irq;
-
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!regs) {
- dev_dbg(&pdev->dev, "no memory resource\n");
- return -ENXIO;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_dbg(&pdev->dev, "could not get IRQ number\n");
- return irq;
- }
-
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- dev_dbg(&pdev->dev, "no platform data\n");
- return -ENXIO;
- }
-
- pclk = clk_get(&pdev->dev, "pclk");
- if (IS_ERR(pclk)) {
- dev_dbg(&pdev->dev, "no peripheral clock\n");
- return PTR_ERR(pclk);
- }
- sample_clk = clk_get(&pdev->dev, "sample_clk");
- if (IS_ERR(sample_clk)) {
- dev_dbg(&pdev->dev, "no sample clock\n");
- retval = PTR_ERR(sample_clk);
- goto out_put_pclk;
- }
- clk_prepare_enable(pclk);
-
- retval = snd_card_new(&pdev->dev, SNDRV_DEFAULT_IDX1,
- SNDRV_DEFAULT_STR1, THIS_MODULE,
- sizeof(struct atmel_abdac), &card);
- if (retval) {
- dev_dbg(&pdev->dev, "could not create sound card device\n");
- goto out_put_sample_clk;
- }
-
- dac = get_dac(card);
-
- dac->irq = irq;
- dac->card = card;
- dac->pclk = pclk;
- dac->sample_clk = sample_clk;
- dac->pdev = pdev;
-
- retval = set_sample_rates(dac);
- if (retval < 0) {
- dev_dbg(&pdev->dev, "could not set supported rates\n");
- goto out_free_card;
- }
-
- dac->regs = ioremap(regs->start, resource_size(regs));
- if (!dac->regs) {
- dev_dbg(&pdev->dev, "could not remap register memory\n");
- retval = -ENOMEM;
- goto out_free_card;
- }
-
- /* make sure the DAC is silent and disabled */
- dac_writel(dac, DATA, 0);
- dac_writel(dac, CTRL, 0);
-
- retval = request_irq(irq, abdac_interrupt, 0, "abdac", dac);
- if (retval) {
- dev_dbg(&pdev->dev, "could not request irq\n");
- goto out_unmap_regs;
- }
-
- if (pdata->dws.dma_dev) {
- dma_cap_mask_t mask;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- dac->dma.chan = dma_request_channel(mask, filter, &pdata->dws);
- if (dac->dma.chan) {
- struct dma_slave_config dma_conf = {
- .dst_addr = regs->start + DAC_DATA,
- .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
- .src_maxburst = 1,
- .dst_maxburst = 1,
- .direction = DMA_MEM_TO_DEV,
- .device_fc = false,
- };
-
- dmaengine_slave_config(dac->dma.chan, &dma_conf);
- }
- }
- if (!pdata->dws.dma_dev || !dac->dma.chan) {
- dev_dbg(&pdev->dev, "DMA not available\n");
- retval = -ENODEV;
- goto out_unmap_regs;
- }
-
- strcpy(card->driver, "Atmel ABDAC");
- strcpy(card->shortname, "Atmel ABDAC");
- sprintf(card->longname, "Atmel Audio Bitstream DAC");
-
- retval = atmel_abdac_pcm_new(dac);
- if (retval) {
- dev_dbg(&pdev->dev, "could not register ABDAC pcm device\n");
- goto out_release_dma;
- }
-
- retval = snd_card_register(card);
- if (retval) {
- dev_dbg(&pdev->dev, "could not register sound card\n");
- goto out_release_dma;
- }
-
- platform_set_drvdata(pdev, card);
-
- dev_info(&pdev->dev, "Atmel ABDAC at 0x%p using %s\n",
- dac->regs, dev_name(&dac->dma.chan->dev->device));
-
- return retval;
-
-out_release_dma:
- dma_release_channel(dac->dma.chan);
- dac->dma.chan = NULL;
-out_unmap_regs:
- iounmap(dac->regs);
-out_free_card:
- snd_card_free(card);
-out_put_sample_clk:
- clk_put(sample_clk);
- clk_disable_unprepare(pclk);
-out_put_pclk:
- clk_put(pclk);
- return retval;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int atmel_abdac_suspend(struct device *pdev)
-{
- struct snd_card *card = dev_get_drvdata(pdev);
- struct atmel_abdac *dac = card->private_data;
-
- dw_dma_cyclic_stop(dac->dma.chan);
- clk_disable_unprepare(dac->sample_clk);
- clk_disable_unprepare(dac->pclk);
-
- return 0;
-}
-
-static int atmel_abdac_resume(struct device *pdev)
-{
- struct snd_card *card = dev_get_drvdata(pdev);
- struct atmel_abdac *dac = card->private_data;
-
- clk_prepare_enable(dac->pclk);
- clk_prepare_enable(dac->sample_clk);
- if (test_bit(DMA_READY, &dac->flags))
- dw_dma_cyclic_start(dac->dma.chan);
-
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(atmel_abdac_pm, atmel_abdac_suspend, atmel_abdac_resume);
-#define ATMEL_ABDAC_PM_OPS &atmel_abdac_pm
-#else
-#define ATMEL_ABDAC_PM_OPS NULL
-#endif
-
-static int atmel_abdac_remove(struct platform_device *pdev)
-{
- struct snd_card *card = platform_get_drvdata(pdev);
- struct atmel_abdac *dac = get_dac(card);
-
- clk_put(dac->sample_clk);
- clk_disable_unprepare(dac->pclk);
- clk_put(dac->pclk);
-
- dma_release_channel(dac->dma.chan);
- dac->dma.chan = NULL;
- iounmap(dac->regs);
- free_irq(dac->irq, dac);
- snd_card_free(card);
-
- return 0;
-}
-
-static struct platform_driver atmel_abdac_driver = {
- .remove = atmel_abdac_remove,
- .driver = {
- .name = "atmel_abdac",
- .pm = ATMEL_ABDAC_PM_OPS,
- },
-};
-
-static int __init atmel_abdac_init(void)
-{
- return platform_driver_probe(&atmel_abdac_driver,
- atmel_abdac_probe);
-}
-module_init(atmel_abdac_init);
-
-static void __exit atmel_abdac_exit(void)
-{
- platform_driver_unregister(&atmel_abdac_driver);
-}
-module_exit(atmel_abdac_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Driver for Atmel Audio Bitstream DAC (ABDAC)");
-MODULE_AUTHOR("Hans-Christian Egtvedt <[email protected]>");
diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c
index 6dad042630d8..9d2c9d9af688 100644
--- a/sound/atmel/ac97c.c
+++ b/sound/atmel/ac97c.c
@@ -11,8 +11,6 @@
#include <linux/delay.h>
#include <linux/bitmap.h>
#include <linux/device.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
#include <linux/atmel_pdc.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -34,36 +32,14 @@
#include <sound/atmel-ac97c.h>
#include <sound/memalloc.h>
-#include <linux/platform_data/dma-dw.h>
-#include <linux/dma/dw.h>
-
-#ifdef CONFIG_AVR32
-#include <mach/cpu.h>
-#else
-#define cpu_is_at32ap7000() 0
-#endif
-
#include "ac97c.h"
-enum {
- DMA_TX_READY = 0,
- DMA_RX_READY,
- DMA_TX_CHAN_PRESENT,
- DMA_RX_CHAN_PRESENT,
-};
-
/* Serialize access to opened variable */
static DEFINE_MUTEX(opened_mutex);
-struct atmel_ac97c_dma {
- struct dma_chan *rx_chan;
- struct dma_chan *tx_chan;
-};
-
struct atmel_ac97c {
struct clk *pclk;
struct platform_device *pdev;
- struct atmel_ac97c_dma dma;
struct snd_pcm_substream *playback_substream;
struct snd_pcm_substream *capture_substream;
@@ -74,7 +50,6 @@ struct atmel_ac97c {
u64 cur_format;
unsigned int cur_rate;
- unsigned long flags;
int playback_period, capture_period;
/* Serialize access to opened variable */
spinlock_t lock;
@@ -91,65 +66,6 @@ struct atmel_ac97c {
#define ac97c_readl(chip, reg) \
__raw_readl((chip)->regs + AC97C_##reg)
-/* This function is called by the DMA driver. */
-static void atmel_ac97c_dma_playback_period_done(void *arg)
-{
- struct atmel_ac97c *chip = arg;
- snd_pcm_period_elapsed(chip->playback_substream);
-}
-
-static void atmel_ac97c_dma_capture_period_done(void *arg)
-{
- struct atmel_ac97c *chip = arg;
- snd_pcm_period_elapsed(chip->capture_substream);
-}
-
-static int atmel_ac97c_prepare_dma(struct atmel_ac97c *chip,
- struct snd_pcm_substream *substream,
- enum dma_transfer_direction direction)
-{
- struct dma_chan *chan;
- struct dw_cyclic_desc *cdesc;
- struct snd_pcm_runtime *runtime = substream->runtime;
- unsigned long buffer_len, period_len;
-
- /*
- * We don't do DMA on "complex" transfers, i.e. with
- * non-halfword-aligned buffers or lengths.
- */
- if (runtime->dma_addr & 1 || runtime->buffer_size & 1) {
- dev_dbg(&chip->pdev->dev, "too complex transfer\n");
- return -EINVAL;
- }
-
- if (direction == DMA_MEM_TO_DEV)
- chan = chip->dma.tx_chan;
- else
- chan = chip->dma.rx_chan;
-
- buffer_len = frames_to_bytes(runtime, runtime->buffer_size);
- period_len = frames_to_bytes(runtime, runtime->period_size);
-
- cdesc = dw_dma_cyclic_prep(chan, runtime->dma_addr, buffer_len,
- period_len, direction);
- if (IS_ERR(cdesc)) {
- dev_dbg(&chip->pdev->dev, "could not prepare cyclic DMA\n");
- return PTR_ERR(cdesc);
- }
-
- if (direction == DMA_MEM_TO_DEV) {
- cdesc->period_callback = atmel_ac97c_dma_playback_period_done;
- set_bit(DMA_TX_READY, &chip->flags);
- } else {
- cdesc->period_callback = atmel_ac97c_dma_capture_period_done;
- set_bit(DMA_RX_READY, &chip->flags);
- }
-
- cdesc->period_callback_param = chip;
-
- return 0;
-}
-
static struct snd_pcm_hardware atmel_ac97c_hw = {
.info = (SNDRV_PCM_INFO_MMAP
| SNDRV_PCM_INFO_MMAP_VALID
@@ -254,13 +170,7 @@ static int atmel_ac97c_playback_hw_params(struct snd_pcm_substream *substream,
params_buffer_bytes(hw_params));
if (retval < 0)
return retval;
- /* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */
- if (cpu_is_at32ap7000()) {
- /* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */
- if (retval == 1)
- if (test_and_clear_bit(DMA_TX_READY, &chip->flags))
- dw_dma_cyclic_free(chip->dma.tx_chan);
- }
+
/* Set restrictions to params. */
mutex_lock(&opened_mutex);
chip->cur_rate = params_rate(hw_params);
@@ -280,10 +190,6 @@ static int atmel_ac97c_capture_hw_params(struct snd_pcm_substream *substream,
params_buffer_bytes(hw_params));
if (retval < 0)
return retval;
- /* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */
- if (cpu_is_at32ap7000() && retval == 1)
- if (test_and_clear_bit(DMA_RX_READY, &chip->flags))
- dw_dma_cyclic_free(chip->dma.rx_chan);
/* Set restrictions to params. */
mutex_lock(&opened_mutex);
@@ -294,26 +200,6 @@ static int atmel_ac97c_capture_hw_params(struct snd_pcm_substream *substream,
return retval;
}
-static int atmel_ac97c_playback_hw_free(struct snd_pcm_substream *substream)
-{
- struct atmel_ac97c *chip = snd_pcm_substream_chip(substream);
- if (cpu_is_at32ap7000()) {
- if (test_and_clear_bit(DMA_TX_READY, &chip->flags))
- dw_dma_cyclic_free(chip->dma.tx_chan);
- }
- return snd_pcm_lib_free_pages(substream);
-}
-
-static int atmel_ac97c_capture_hw_free(struct snd_pcm_substream *substream)
-{
- struct atmel_ac97c *chip = snd_pcm_substream_chip(substream);
- if (cpu_is_at32ap7000()) {
- if (test_and_clear_bit(DMA_RX_READY, &chip->flags))
- dw_dma_cyclic_free(chip->dma.rx_chan);
- }
- return snd_pcm_lib_free_pages(substream);
-}
-
static int atmel_ac97c_playback_prepare(struct snd_pcm_substream *substream)
{
struct atmel_ac97c *chip = snd_pcm_substream_chip(substream);
@@ -349,8 +235,6 @@ static int atmel_ac97c_playback_prepare(struct snd_pcm_substream *substream)
switch (runtime->format) {
case SNDRV_PCM_FORMAT_S16_LE:
- if (cpu_is_at32ap7000())
- word |= AC97C_CMR_CEM_LITTLE;
break;
case SNDRV_PCM_FORMAT_S16_BE: /* fall through */
word &= ~(AC97C_CMR_CEM_LITTLE);
@@ -389,18 +273,11 @@ static int atmel_ac97c_playback_prepare(struct snd_pcm_substream *substream)
dev_dbg(&chip->pdev->dev, "could not set rate %d Hz\n",
runtime->rate);
- if (cpu_is_at32ap7000()) {
- if (!test_bit(DMA_TX_READY, &chip->flags))
- retval = atmel_ac97c_prepare_dma(chip, substream,
- DMA_MEM_TO_DEV);
- } else {
- /* Initialize and start the PDC */
- writel(runtime->dma_addr, chip->regs + ATMEL_PDC_TPR);
- writel(block_size / 2, chip->regs + ATMEL_PDC_TCR);
- writel(runtime->dma_addr + block_size,
- chip->regs + ATMEL_PDC_TNPR);
- writel(block_size / 2, chip->regs + ATMEL_PDC_TNCR);
- }
+ /* Initialize and start the PDC */
+ writel(runtime->dma_addr, chip->regs + ATMEL_PDC_TPR);
+ writel(block_size / 2, chip->regs + ATMEL_PDC_TCR);
+ writel(runtime->dma_addr + block_size, chip->regs + ATMEL_PDC_TNPR);
+ writel(block_size / 2, chip->regs + ATMEL_PDC_TNCR);
return retval;
}
@@ -440,8 +317,6 @@ static int atmel_ac97c_capture_prepare(struct snd_pcm_substream *substream)
switch (runtime->format) {
case SNDRV_PCM_FORMAT_S16_LE:
- if (cpu_is_at32ap7000())
- word |= AC97C_CMR_CEM_LITTLE;
break;
case SNDRV_PCM_FORMAT_S16_BE: /* fall through */
word &= ~(AC97C_CMR_CEM_LITTLE);
@@ -480,18 +355,11 @@ static int atmel_ac97c_capture_prepare(struct snd_pcm_substream *substream)
dev_dbg(&chip->pdev->dev, "could not set rate %d Hz\n",
runtime->rate);
- if (cpu_is_at32ap7000()) {
- if (!test_bit(DMA_RX_READY, &chip->flags))
- retval = atmel_ac97c_prepare_dma(chip, substream,
- DMA_DEV_TO_MEM);
- } else {
- /* Initialize and start the PDC */
- writel(runtime->dma_addr, chip->regs + ATMEL_PDC_RPR);
- writel(block_size / 2, chip->regs + ATMEL_PDC_RCR);
- writel(runtime->dma_addr + block_size,
- chip->regs + ATMEL_PDC_RNPR);
- writel(block_size / 2, chip->regs + ATMEL_PDC_RNCR);
- }
+ /* Initialize and start the PDC */
+ writel(runtime->dma_addr, chip->regs + ATMEL_PDC_RPR);
+ writel(block_size / 2, chip->regs + ATMEL_PDC_RCR);
+ writel(runtime->dma_addr + block_size, chip->regs + ATMEL_PDC_RNPR);
+ writel(block_size / 2, chip->regs + ATMEL_PDC_RNCR);
return retval;
}
@@ -501,7 +369,6 @@ atmel_ac97c_playback_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct atmel_ac97c *chip = snd_pcm_substream_chip(substream);
unsigned long camr, ptcr = 0;
- int retval = 0;
camr = ac97c_readl(chip, CAMR);
@@ -509,35 +376,23 @@ atmel_ac97c_playback_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* fall through */
case SNDRV_PCM_TRIGGER_RESUME: /* fall through */
case SNDRV_PCM_TRIGGER_START:
- if (cpu_is_at32ap7000()) {
- retval = dw_dma_cyclic_start(chip->dma.tx_chan);
- if (retval)
- goto out;
- } else {
- ptcr = ATMEL_PDC_TXTEN;
- }
+ ptcr = ATMEL_PDC_TXTEN;
camr |= AC97C_CMR_CENA | AC97C_CSR_ENDTX;
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH: /* fall through */
case SNDRV_PCM_TRIGGER_SUSPEND: /* fall through */
case SNDRV_PCM_TRIGGER_STOP:
- if (cpu_is_at32ap7000())
- dw_dma_cyclic_stop(chip->dma.tx_chan);
- else
- ptcr |= ATMEL_PDC_TXTDIS;
+ ptcr |= ATMEL_PDC_TXTDIS;
if (chip->opened <= 1)
camr &= ~AC97C_CMR_CENA;
break;
default:
- retval = -EINVAL;
- goto out;
+ return -EINVAL;
}
ac97c_writel(chip, CAMR, camr);
- if (!cpu_is_at32ap7000())
- writel(ptcr, chip->regs + ATMEL_PDC_PTCR);
-out:
- return retval;
+ writel(ptcr, chip->regs + ATMEL_PDC_PTCR);
+ return 0;
}
static int
@@ -545,7 +400,6 @@ atmel_ac97c_capture_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct atmel_ac97c *chip = snd_pcm_substream_chip(substream);
unsigned long camr, ptcr = 0;
- int retval = 0;
camr = ac97c_readl(chip, CAMR);
ptcr = readl(chip->regs + ATMEL_PDC_PTSR);
@@ -554,35 +408,23 @@ atmel_ac97c_capture_trigger(struct snd_pcm_substream *substream, int cmd)
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* fall through */
case SNDRV_PCM_TRIGGER_RESUME: /* fall through */
case SNDRV_PCM_TRIGGER_START:
- if (cpu_is_at32ap7000()) {
- retval = dw_dma_cyclic_start(chip->dma.rx_chan);
- if (retval)
- goto out;
- } else {
- ptcr = ATMEL_PDC_RXTEN;
- }
+ ptcr = ATMEL_PDC_RXTEN;
camr |= AC97C_CMR_CENA | AC97C_CSR_ENDRX;
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH: /* fall through */
case SNDRV_PCM_TRIGGER_SUSPEND: /* fall through */
case SNDRV_PCM_TRIGGER_STOP:
- if (cpu_is_at32ap7000())
- dw_dma_cyclic_stop(chip->dma.rx_chan);
- else
- ptcr |= (ATMEL_PDC_RXTDIS);
+ ptcr |= ATMEL_PDC_RXTDIS;
if (chip->opened <= 1)
camr &= ~AC97C_CMR_CENA;
break;
default:
- retval = -EINVAL;
- break;
+ return -EINVAL;
}
ac97c_writel(chip, CAMR, camr);
- if (!cpu_is_at32ap7000())
- writel(ptcr, chip->regs + ATMEL_PDC_PTCR);
-out:
- return retval;
+ writel(ptcr, chip->regs + ATMEL_PDC_PTCR);
+ return 0;
}
static snd_pcm_uframes_t
@@ -593,10 +435,7 @@ atmel_ac97c_playback_pointer(struct snd_pcm_substream *substream)
snd_pcm_uframes_t frames;
unsigned long bytes;
- if (cpu_is_at32ap7000())
- bytes = dw_dma_get_src_addr(chip->dma.tx_chan);
- else
- bytes = readl(chip->regs + ATMEL_PDC_TPR);
+ bytes = readl(chip->regs + ATMEL_PDC_TPR);
bytes -= runtime->dma_addr;
frames = bytes_to_frames(runtime, bytes);
@@ -613,10 +452,7 @@ atmel_ac97c_capture_pointer(struct snd_pcm_substream *substream)
snd_pcm_uframes_t frames;
unsigned long bytes;
- if (cpu_is_at32ap7000())
- bytes = dw_dma_get_dst_addr(chip->dma.rx_chan);
- else
- bytes = readl(chip->regs + ATMEL_PDC_RPR);
+ bytes = readl(chip->regs + ATMEL_PDC_RPR);
bytes -= runtime->dma_addr;
frames = bytes_to_frames(runtime, bytes);
@@ -630,7 +466,7 @@ static struct snd_pcm_ops atmel_ac97_playback_ops = {
.close = atmel_ac97c_playback_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = atmel_ac97c_playback_hw_params,
- .hw_free = atmel_ac97c_playback_hw_free,
+ .hw_free = snd_pcm_lib_free_pages,
.prepare = atmel_ac97c_playback_prepare,
.trigger = atmel_ac97c_playback_trigger,
.pointer = atmel_ac97c_playback_pointer,
@@ -641,7 +477,7 @@ static struct snd_pcm_ops atmel_ac97_capture_ops = {
.close = atmel_ac97c_capture_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = atmel_ac97c_capture_hw_params,
- .hw_free = atmel_ac97c_capture_hw_free,
+ .hw_free = snd_pcm_lib_free_pages,
.prepare = atmel_ac97c_capture_prepare,
.trigger = atmel_ac97c_capture_trigger,
.pointer = atmel_ac97c_capture_pointer,
@@ -666,49 +502,40 @@ static irqreturn_t atmel_ac97c_interrupt(int irq, void *dev)
casr & AC97C_CSR_TXEMPTY ? " TXEMPTY" : "",
casr & AC97C_CSR_TXRDY ? " TXRDY" : "",
!casr ? " NONE" : "");
- if (!cpu_is_at32ap7000()) {
- if ((casr & camr) & AC97C_CSR_ENDTX) {
- runtime = chip->playback_substream->runtime;
- block_size = frames_to_bytes(runtime,
- runtime->period_size);
- chip->playback_period++;
-
- if (chip->playback_period == runtime->periods)
- chip->playback_period = 0;
- next_period = chip->playback_period + 1;
- if (next_period == runtime->periods)
- next_period = 0;
-
- offset = block_size * next_period;
-
- writel(runtime->dma_addr + offset,
- chip->regs + ATMEL_PDC_TNPR);
- writel(block_size / 2,
- chip->regs + ATMEL_PDC_TNCR);
-
- snd_pcm_period_elapsed(
- chip->playback_substream);
- }
- if ((casr & camr) & AC97C_CSR_ENDRX) {
- runtime = chip->capture_substream->runtime;
- block_size = frames_to_bytes(runtime,
- runtime->period_size);
- chip->capture_period++;
-
- if (chip->capture_period == runtime->periods)
- chip->capture_period = 0;
- next_period = chip->capture_period + 1;
- if (next_period == runtime->periods)
- next_period = 0;
-
- offset = block_size * next_period;
-
- writel(runtime->dma_addr + offset,
- chip->regs + ATMEL_PDC_RNPR);
- writel(block_size / 2,
- chip->regs + ATMEL_PDC_RNCR);
- snd_pcm_period_elapsed(chip->capture_substream);
- }
+ if ((casr & camr) & AC97C_CSR_ENDTX) {
+ runtime = chip->playback_substream->runtime;
+ block_size = frames_to_bytes(runtime, runtime->period_size);
+ chip->playback_period++;
+
+ if (chip->playback_period == runtime->periods)
+ chip->playback_period = 0;
+ next_period = chip->playback_period + 1;
+ if (next_period == runtime->periods)
+ next_period = 0;
+
+ offset = block_size * next_period;
+
+ writel(runtime->dma_addr + offset, chip->regs + ATMEL_PDC_TNPR);
+ writel(block_size / 2, chip->regs + ATMEL_PDC_TNCR);
+
+ snd_pcm_period_elapsed(chip->playback_substream);
+ }
+ if ((casr & camr) & AC97C_CSR_ENDRX) {
+ runtime = chip->capture_substream->runtime;
+ block_size = frames_to_bytes(runtime, runtime->period_size);
+ chip->capture_period++;
+
+ if (chip->capture_period == runtime->periods)
+ chip->capture_period = 0;
+ next_period = chip->capture_period + 1;
+ if (next_period == runtime->periods)
+ next_period = 0;
+
+ offset = block_size * next_period;
+
+ writel(runtime->dma_addr + offset, chip->regs + ATMEL_PDC_RNPR);
+ writel(block_size / 2, chip->regs + ATMEL_PDC_RNCR);
+ snd_pcm_period_elapsed(chip->capture_substream);
}
retval = IRQ_HANDLED;
}
@@ -763,29 +590,20 @@ static int atmel_ac97c_pcm_new(struct atmel_ac97c *chip)
{
struct snd_pcm *pcm;
struct snd_pcm_hardware hw = atmel_ac97c_hw;
- int capture, playback, retval, err;
+ int retval;
- capture = test_bit(DMA_RX_CHAN_PRESENT, &chip->flags);
- playback = test_bit(DMA_TX_CHAN_PRESENT, &chip->flags);
+ retval = snd_ac97_pcm_assign(chip->ac97_bus,
+ ARRAY_SIZE(at91_ac97_pcm_defs),
+ at91_ac97_pcm_defs);
+ if (retval)
+ return retval;
- if (!cpu_is_at32ap7000()) {
- err = snd_ac97_pcm_assign(chip->ac97_bus,
- ARRAY_SIZE(at91_ac97_pcm_defs),
- at91_ac97_pcm_defs);
- if (err)
- return err;
- }
- retval = snd_pcm_new(chip->card, chip->card->shortname,
- 0, playback, capture, &pcm);
+ retval = snd_pcm_new(chip->card, chip->card->shortname, 0, 1, 1, &pcm);
if (retval)
return retval;
- if (capture)
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
- &atmel_ac97_capture_ops);
- if (playback)
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
- &atmel_ac97_playback_ops);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &atmel_ac97_capture_ops);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &atmel_ac97_playback_ops);
retval = snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
&chip->pdev->dev, hw.periods_min * hw.period_bytes_min,
@@ -875,17 +693,6 @@ timed_out:
return 0xffff;
}
-static bool filter(struct dma_chan *chan, void *slave)
-{
- struct dw_dma_slave *dws = slave;
-
- if (dws->dma_dev == chan->device->dev) {
- chan->private = dws;
- return true;
- } else
- return false;
-}
-
static void atmel_ac97c_reset(struct atmel_ac97c *chip)
{
ac97c_writel(chip, MR, 0);
@@ -967,16 +774,11 @@ static int atmel_ac97c_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_dbg(&pdev->dev, "could not get irq\n");
- return -ENXIO;
- }
-
- if (cpu_is_at32ap7000()) {
- pclk = clk_get(&pdev->dev, "pclk");
- } else {
- pclk = clk_get(&pdev->dev, "ac97_clk");
+ dev_dbg(&pdev->dev, "could not get irq: %d\n", irq);
+ return irq;
}
+ pclk = clk_get(&pdev->dev, "ac97_clk");
if (IS_ERR(pclk)) {
dev_dbg(&pdev->dev, "no peripheral clock\n");
return PTR_ERR(pclk);
@@ -1047,88 +849,16 @@ static int atmel_ac97c_probe(struct platform_device *pdev)
goto err_ac97_bus;
}
- if (cpu_is_at32ap7000()) {
- if (pdata->rx_dws.dma_dev) {
- dma_cap_mask_t mask;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- chip->dma.rx_chan = dma_request_channel(mask, filter,
- &pdata->rx_dws);
- if (chip->dma.rx_chan) {
- struct dma_slave_config dma_conf = {
- .src_addr = regs->start + AC97C_CARHR +
- 2,
- .src_addr_width =
- DMA_SLAVE_BUSWIDTH_2_BYTES,
- .src_maxburst = 1,
- .dst_maxburst = 1,
- .direction = DMA_DEV_TO_MEM,
- .device_fc = false,
- };
-
- dmaengine_slave_config(chip->dma.rx_chan,
- &dma_conf);
- }
-
- dev_info(&chip->pdev->dev, "using %s for DMA RX\n",
- dev_name(&chip->dma.rx_chan->dev->device));
- set_bit(DMA_RX_CHAN_PRESENT, &chip->flags);
- }
-
- if (pdata->tx_dws.dma_dev) {
- dma_cap_mask_t mask;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- chip->dma.tx_chan = dma_request_channel(mask, filter,
- &pdata->tx_dws);
- if (chip->dma.tx_chan) {
- struct dma_slave_config dma_conf = {
- .dst_addr = regs->start + AC97C_CATHR +
- 2,
- .dst_addr_width =
- DMA_SLAVE_BUSWIDTH_2_BYTES,
- .src_maxburst = 1,
- .dst_maxburst = 1,
- .direction = DMA_MEM_TO_DEV,
- .device_fc = false,
- };
-
- dmaengine_slave_config(chip->dma.tx_chan,
- &dma_conf);
- }
-
- dev_info(&chip->pdev->dev, "using %s for DMA TX\n",
- dev_name(&chip->dma.tx_chan->dev->device));
- set_bit(DMA_TX_CHAN_PRESENT, &chip->flags);
- }
-
- if (!test_bit(DMA_RX_CHAN_PRESENT, &chip->flags) &&
- !test_bit(DMA_TX_CHAN_PRESENT, &chip->flags)) {
- dev_dbg(&pdev->dev, "DMA not available\n");
- retval = -ENODEV;
- goto err_dma;
- }
- } else {
- /* Just pretend that we have DMA channel(for at91 i is actually
- * the PDC) */
- set_bit(DMA_RX_CHAN_PRESENT, &chip->flags);
- set_bit(DMA_TX_CHAN_PRESENT, &chip->flags);
- }
-
retval = atmel_ac97c_pcm_new(chip);
if (retval) {
dev_dbg(&pdev->dev, "could not register ac97 pcm device\n");
- goto err_dma;
+ goto err_ac97_bus;
}
retval = snd_card_register(card);
if (retval) {
dev_dbg(&pdev->dev, "could not register sound card\n");
- goto err_dma;
+ goto err_ac97_bus;
}
platform_set_drvdata(pdev, card);
@@ -1138,17 +868,6 @@ static int atmel_ac97c_probe(struct platform_device *pdev)
return 0;
-err_dma:
- if (cpu_is_at32ap7000()) {
- if (test_bit(DMA_RX_CHAN_PRESENT, &chip->flags))
- dma_release_channel(chip->dma.rx_chan);
- if (test_bit(DMA_TX_CHAN_PRESENT, &chip->flags))
- dma_release_channel(chip->dma.tx_chan);
- clear_bit(DMA_RX_CHAN_PRESENT, &chip->flags);
- clear_bit(DMA_TX_CHAN_PRESENT, &chip->flags);
- chip->dma.rx_chan = NULL;
- chip->dma.tx_chan = NULL;
- }
err_ac97_bus:
if (gpio_is_valid(chip->reset_pin))
gpio_free(chip->reset_pin);
@@ -1170,14 +889,7 @@ static int atmel_ac97c_suspend(struct device *pdev)
struct snd_card *card = dev_get_drvdata(pdev);
struct atmel_ac97c *chip = card->private_data;
- if (cpu_is_at32ap7000()) {
- if (test_bit(DMA_RX_READY, &chip->flags))
- dw_dma_cyclic_stop(chip->dma.rx_chan);
- if (test_bit(DMA_TX_READY, &chip->flags))
- dw_dma_cyclic_stop(chip->dma.tx_chan);
- }
clk_disable_unprepare(chip->pclk);
-
return 0;
}
@@ -1187,12 +899,6 @@ static int atmel_ac97c_resume(struct device *pdev)
struct atmel_ac97c *chip = card->private_data;
clk_prepare_enable(chip->pclk);
- if (cpu_is_at32ap7000()) {
- if (test_bit(DMA_RX_READY, &chip->flags))
- dw_dma_cyclic_start(chip->dma.rx_chan);
- if (test_bit(DMA_TX_READY, &chip->flags))
- dw_dma_cyclic_start(chip->dma.tx_chan);
- }
return 0;
}
@@ -1219,17 +925,6 @@ static int atmel_ac97c_remove(struct platform_device *pdev)
iounmap(chip->regs);
free_irq(chip->irq, chip);
- if (cpu_is_at32ap7000()) {
- if (test_bit(DMA_RX_CHAN_PRESENT, &chip->flags))
- dma_release_channel(chip->dma.rx_chan);
- if (test_bit(DMA_TX_CHAN_PRESENT, &chip->flags))
- dma_release_channel(chip->dma.tx_chan);
- clear_bit(DMA_RX_CHAN_PRESENT, &chip->flags);
- clear_bit(DMA_TX_CHAN_PRESENT, &chip->flags);
- chip->dma.rx_chan = NULL;
- chip->dma.tx_chan = NULL;
- }
-
snd_card_free(card);
return 0;
diff --git a/sound/core/Kconfig b/sound/core/Kconfig
index 9749f9e8b45c..6e937a8146a1 100644
--- a/sound/core/Kconfig
+++ b/sound/core/Kconfig
@@ -18,8 +18,12 @@ config SND_DMAENGINE_PCM
config SND_HWDEP
tristate
+config SND_SEQ_DEVICE
+ tristate
+
config SND_RAWMIDI
tristate
+ select SND_SEQ_DEVICE if SND_SEQUENCER != n
config SND_COMPRESS_OFFLOAD
tristate
@@ -33,38 +37,15 @@ config SND_JACK_INPUT_DEV
depends on SND_JACK
default y if INPUT=y || INPUT=SND
-config SND_SEQUENCER
- tristate "Sequencer support"
- select SND_TIMER
- help
- Say Y or M to enable MIDI sequencer and router support. This
- feature allows routing and enqueueing of MIDI events. Events
- can be processed at a given time.
-
- Many programs require this feature, so you should enable it
- unless you know what you're doing.
-
-config SND_SEQ_DUMMY
- tristate "Sequencer dummy client"
- depends on SND_SEQUENCER
- help
- Say Y here to enable the dummy sequencer client. This client
- is a simple MIDI-through client: all normal input events are
- redirected to the output port immediately.
-
- You don't need this unless you want to connect many MIDI
- devices or applications together.
-
- To compile this driver as a module, choose M here: the module
- will be called snd-seq-dummy.
-
config SND_OSSEMUL
+ bool "Enable OSS Emulation"
select SOUND_OSS_CORE
- bool
+ help
+ This option enables the build of OSS emulation layer.
config SND_MIXER_OSS
tristate "OSS Mixer API"
- select SND_OSSEMUL
+ depends on SND_OSSEMUL
help
To enable OSS mixer API emulation (/dev/mixer*), say Y here
and read <file:Documentation/sound/alsa/OSS-Emulation.txt>.
@@ -76,7 +57,7 @@ config SND_MIXER_OSS
config SND_PCM_OSS
tristate "OSS PCM (digital audio) API"
- select SND_OSSEMUL
+ depends on SND_OSSEMUL
select SND_PCM
help
To enable OSS digital audio (PCM) emulation (/dev/dsp*), say Y
@@ -107,20 +88,6 @@ config SND_PCM_TIMER
For some embedded devices, we may disable it to reduce memory
footprint, about 20KB on x86_64 platform.
-config SND_SEQUENCER_OSS
- bool "OSS Sequencer API"
- depends on SND_SEQUENCER
- select SND_OSSEMUL
- help
- Say Y here to enable OSS sequencer emulation (both
- /dev/sequencer and /dev/music interfaces).
-
- Many programs still use the OSS API, so say Y.
-
- If you choose M in "Sequencer support" (SND_SEQUENCER),
- this will be compiled as a module. The module will be called
- snd-seq-oss.
-
config SND_HRTIMER
tristate "HR-timer backend support"
depends on HIGH_RES_TIMERS
@@ -133,14 +100,6 @@ config SND_HRTIMER
To compile this driver as a module, choose M here: the module
will be called snd-hrtimer.
-config SND_SEQ_HRTIMER_DEFAULT
- bool "Use HR-timer as default sequencer timer"
- depends on SND_HRTIMER && SND_SEQUENCER
- default y
- help
- Say Y here to use the HR-timer backend as the default sequencer
- timer.
-
config SND_DYNAMIC_MINORS
bool "Dynamic device file minor numbers"
help
diff --git a/sound/core/Makefile b/sound/core/Makefile
index e85d9dd12c2d..e2066e2ef9f8 100644
--- a/sound/core/Makefile
+++ b/sound/core/Makefile
@@ -22,6 +22,7 @@ snd-pcm-$(CONFIG_SND_PCM_IEC958) += pcm_iec958.o
# for trace-points
CFLAGS_pcm_lib.o := -I$(src)
+CFLAGS_pcm_native.o := -I$(src)
snd-pcm-dmaengine-objs := pcm_dmaengine.o
@@ -30,6 +31,7 @@ snd-timer-objs := timer.o
snd-hrtimer-objs := hrtimer.o
snd-rtctimer-objs := rtctimer.o
snd-hwdep-objs := hwdep.o
+snd-seq-device-objs := seq_device.o
snd-compress-objs := compress_offload.o
@@ -39,6 +41,7 @@ obj-$(CONFIG_SND_TIMER) += snd-timer.o
obj-$(CONFIG_SND_HRTIMER) += snd-hrtimer.o
obj-$(CONFIG_SND_PCM) += snd-pcm.o
obj-$(CONFIG_SND_DMAENGINE_PCM) += snd-pcm-dmaengine.o
+obj-$(CONFIG_SND_SEQ_DEVICE) += snd-seq-device.o
obj-$(CONFIG_SND_RAWMIDI) += snd-rawmidi.o
obj-$(CONFIG_SND_OSSEMUL) += oss/
diff --git a/sound/core/control.c b/sound/core/control.c
index 6362da17ac3f..3c6be1452e35 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -747,65 +747,45 @@ static int snd_ctl_card_info(struct snd_card *card, struct snd_ctl_file * ctl,
static int snd_ctl_elem_list(struct snd_card *card,
struct snd_ctl_elem_list __user *_list)
{
- struct list_head *plist;
struct snd_ctl_elem_list list;
struct snd_kcontrol *kctl;
- struct snd_ctl_elem_id *dst, *id;
+ struct snd_ctl_elem_id id;
unsigned int offset, space, jidx;
+ int err = 0;
if (copy_from_user(&list, _list, sizeof(list)))
return -EFAULT;
offset = list.offset;
space = list.space;
- /* try limit maximum space */
- if (space > 16384)
- return -ENOMEM;
+
+ down_read(&card->controls_rwsem);
+ list.count = card->controls_count;
+ list.used = 0;
if (space > 0) {
- /* allocate temporary buffer for atomic operation */
- dst = vmalloc(space * sizeof(struct snd_ctl_elem_id));
- if (dst == NULL)
- return -ENOMEM;
- down_read(&card->controls_rwsem);
- list.count = card->controls_count;
- plist = card->controls.next;
- while (plist != &card->controls) {
- if (offset == 0)
- break;
- kctl = snd_kcontrol(plist);
- if (offset < kctl->count)
- break;
- offset -= kctl->count;
- plist = plist->next;
- }
- list.used = 0;
- id = dst;
- while (space > 0 && plist != &card->controls) {
- kctl = snd_kcontrol(plist);
- for (jidx = offset; space > 0 && jidx < kctl->count; jidx++) {
- snd_ctl_build_ioff(id, kctl, jidx);
- id++;
- space--;
+ list_for_each_entry(kctl, &card->controls, list) {
+ if (offset >= kctl->count) {
+ offset -= kctl->count;
+ continue;
+ }
+ for (jidx = offset; jidx < kctl->count; jidx++) {
+ snd_ctl_build_ioff(&id, kctl, jidx);
+ if (copy_to_user(list.pids + list.used, &id,
+ sizeof(id))) {
+ err = -EFAULT;
+ goto out;
+ }
list.used++;
+ if (!--space)
+ goto out;
}
- plist = plist->next;
offset = 0;
}
- up_read(&card->controls_rwsem);
- if (list.used > 0 &&
- copy_to_user(list.pids, dst,
- list.used * sizeof(struct snd_ctl_elem_id))) {
- vfree(dst);
- return -EFAULT;
- }
- vfree(dst);
- } else {
- down_read(&card->controls_rwsem);
- list.count = card->controls_count;
- up_read(&card->controls_rwsem);
}
- if (copy_to_user(_list, &list, sizeof(list)))
- return -EFAULT;
- return 0;
+ out:
+ up_read(&card->controls_rwsem);
+ if (!err && copy_to_user(_list, &list, sizeof(list)))
+ err = -EFAULT;
+ return err;
}
static bool validate_element_member_dimension(struct snd_ctl_elem_info *info)
diff --git a/sound/core/ctljack.c b/sound/core/ctljack.c
index 84a3cd683068..0249d5e6ac23 100644
--- a/sound/core/ctljack.c
+++ b/sound/core/ctljack.c
@@ -23,7 +23,7 @@ static int jack_detect_kctl_get(struct snd_kcontrol *kcontrol,
return 0;
}
-static struct snd_kcontrol_new jack_detect_kctl = {
+static const struct snd_kcontrol_new jack_detect_kctl = {
/* name is filled later */
.iface = SNDRV_CTL_ELEM_IFACE_CARD,
.access = SNDRV_CTL_ELEM_ACCESS_READ,
diff --git a/sound/core/info.c b/sound/core/info.c
index 8ab72e0f5932..bcf6a48cc70d 100644
--- a/sound/core/info.c
+++ b/sound/core/info.c
@@ -344,12 +344,12 @@ static ssize_t snd_info_text_entry_write(struct file *file,
}
}
if (next > buf->len) {
- char *nbuf = krealloc(buf->buffer, PAGE_ALIGN(next),
- GFP_KERNEL | __GFP_ZERO);
+ char *nbuf = kvzalloc(PAGE_ALIGN(next), GFP_KERNEL);
if (!nbuf) {
err = -ENOMEM;
goto error;
}
+ kvfree(buf->buffer);
buf->buffer = nbuf;
buf->len = PAGE_ALIGN(next);
}
@@ -427,7 +427,7 @@ static int snd_info_text_entry_release(struct inode *inode, struct file *file)
single_release(inode, file);
kfree(data->rbuffer);
if (data->wbuffer) {
- kfree(data->wbuffer->buffer);
+ kvfree(data->wbuffer->buffer);
kfree(data->wbuffer);
}
@@ -652,7 +652,6 @@ int snd_info_get_line(struct snd_info_buffer *buffer, char *line, int len)
*line = '\0';
return 0;
}
-
EXPORT_SYMBOL(snd_info_get_line);
/**
@@ -690,7 +689,6 @@ const char *snd_info_get_str(char *dest, const char *src, int len)
src++;
return src;
}
-
EXPORT_SYMBOL(snd_info_get_str);
/*
@@ -748,7 +746,6 @@ struct snd_info_entry *snd_info_create_module_entry(struct module * module,
entry->module = module;
return entry;
}
-
EXPORT_SYMBOL(snd_info_create_module_entry);
/**
@@ -772,7 +769,6 @@ struct snd_info_entry *snd_info_create_card_entry(struct snd_card *card,
}
return entry;
}
-
EXPORT_SYMBOL(snd_info_create_card_entry);
static void snd_info_disconnect(struct snd_info_entry *entry)
@@ -815,7 +811,6 @@ void snd_info_free_entry(struct snd_info_entry * entry)
entry->private_free(entry);
kfree(entry);
}
-
EXPORT_SYMBOL(snd_info_free_entry);
/**
@@ -858,7 +853,6 @@ int snd_info_register(struct snd_info_entry * entry)
mutex_unlock(&info_mutex);
return 0;
}
-
EXPORT_SYMBOL(snd_info_register);
/*
diff --git a/sound/core/info_oss.c b/sound/core/info_oss.c
index 1478c8dfd473..f479374b6bd8 100644
--- a/sound/core/info_oss.c
+++ b/sound/core/info_oss.c
@@ -61,7 +61,6 @@ int snd_oss_info_register(int dev, int num, char *string)
mutex_unlock(&strings);
return 0;
}
-
EXPORT_SYMBOL(snd_oss_info_register);
static int snd_sndstat_show_strings(struct snd_info_buffer *buf, char *id, int dev)
diff --git a/sound/core/init.c b/sound/core/init.c
index d61d2b3cd521..b4365bcf28a7 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -452,7 +452,6 @@ int snd_card_disconnect(struct snd_card *card)
#endif
return 0;
}
-
EXPORT_SYMBOL(snd_card_disconnect);
static int snd_card_do_free(struct snd_card *card)
@@ -718,7 +717,7 @@ int snd_card_add_dev_attr(struct snd_card *card,
dev_err(card->dev, "Too many groups assigned\n");
return -ENOSPC;
-};
+}
EXPORT_SYMBOL_GPL(snd_card_add_dev_attr);
/**
@@ -775,7 +774,6 @@ int snd_card_register(struct snd_card *card)
#endif
return 0;
}
-
EXPORT_SYMBOL(snd_card_register);
#ifdef CONFIG_SND_PROC_FS
@@ -895,7 +893,6 @@ int snd_component_add(struct snd_card *card, const char *component)
strcat(card->components, component);
return 0;
}
-
EXPORT_SYMBOL(snd_component_add);
/**
@@ -930,7 +927,6 @@ int snd_card_file_add(struct snd_card *card, struct file *file)
spin_unlock(&card->files_lock);
return 0;
}
-
EXPORT_SYMBOL(snd_card_file_add);
/**
@@ -972,7 +968,6 @@ int snd_card_file_remove(struct snd_card *card, struct file *file)
put_device(&card->card_dev);
return 0;
}
-
EXPORT_SYMBOL(snd_card_file_remove);
#ifdef CONFIG_PM
@@ -1012,6 +1007,5 @@ int snd_power_wait(struct snd_card *card, unsigned int power_state)
remove_wait_queue(&card->power_sleep, &wait);
return result;
}
-
EXPORT_SYMBOL(snd_power_wait);
#endif /* CONFIG_PM */
diff --git a/sound/core/isadma.c b/sound/core/isadma.c
index 31e8544d7f2d..7a8515abb5f9 100644
--- a/sound/core/isadma.c
+++ b/sound/core/isadma.c
@@ -55,7 +55,6 @@ void snd_dma_program(unsigned long dma,
enable_dma(dma);
release_dma_lock(flags);
}
-
EXPORT_SYMBOL(snd_dma_program);
/**
@@ -73,7 +72,6 @@ void snd_dma_disable(unsigned long dma)
disable_dma(dma);
release_dma_lock(flags);
}
-
EXPORT_SYMBOL(snd_dma_disable);
/**
@@ -113,5 +111,4 @@ unsigned int snd_dma_pointer(unsigned long dma, unsigned int size)
else
return size - result;
}
-
EXPORT_SYMBOL(snd_dma_pointer);
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index f05cb6a8cbe0..7f89d3c79a4b 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -54,6 +54,7 @@ void *snd_malloc_pages(size_t size, gfp_t gfp_flags)
pg = get_order(size);
return (void *) __get_free_pages(gfp_flags, pg);
}
+EXPORT_SYMBOL(snd_malloc_pages);
/**
* snd_free_pages - release the pages
@@ -71,6 +72,7 @@ void snd_free_pages(void *ptr, size_t size)
pg = get_order(size);
free_pages((unsigned long) ptr, pg);
}
+EXPORT_SYMBOL(snd_free_pages);
/*
*
@@ -217,6 +219,7 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size,
dmab->bytes = size;
return 0;
}
+EXPORT_SYMBOL(snd_dma_alloc_pages);
/**
* snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
@@ -254,6 +257,7 @@ int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
return -ENOMEM;
return 0;
}
+EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
/**
@@ -287,13 +291,4 @@ void snd_dma_free_pages(struct snd_dma_buffer *dmab)
pr_err("snd-malloc: invalid device type %d\n", dmab->dev.type);
}
}
-
-/*
- * exports
- */
-EXPORT_SYMBOL(snd_dma_alloc_pages);
-EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
EXPORT_SYMBOL(snd_dma_free_pages);
-
-EXPORT_SYMBOL(snd_malloc_pages);
-EXPORT_SYMBOL(snd_free_pages);
diff --git a/sound/core/memory.c b/sound/core/memory.c
index 4cd664efad77..19c9ea90d9bf 100644
--- a/sound/core/memory.c
+++ b/sound/core/memory.c
@@ -55,7 +55,6 @@ int copy_to_user_fromio(void __user *dst, const volatile void __iomem *src, size
return 0;
#endif
}
-
EXPORT_SYMBOL(copy_to_user_fromio);
/**
@@ -88,5 +87,4 @@ int copy_from_user_toio(volatile void __iomem *dst, const void __user *src, size
return 0;
#endif
}
-
EXPORT_SYMBOL(copy_from_user_toio);
diff --git a/sound/core/misc.c b/sound/core/misc.c
index 21b228046e88..0f818d593c9e 100644
--- a/sound/core/misc.c
+++ b/sound/core/misc.c
@@ -48,7 +48,6 @@ void release_and_free_resource(struct resource *res)
kfree(res);
}
}
-
EXPORT_SYMBOL(release_and_free_resource);
#ifdef CONFIG_SND_VERBOSE_PRINTK
diff --git a/sound/core/oss/io.c b/sound/core/oss/io.c
index 6faa1d719206..d870b2d93135 100644
--- a/sound/core/oss/io.c
+++ b/sound/core/oss/io.c
@@ -26,9 +26,9 @@
#include "pcm_plugin.h"
#define pcm_write(plug,buf,count) snd_pcm_oss_write3(plug,buf,count,1)
-#define pcm_writev(plug,vec,count) snd_pcm_oss_writev3(plug,vec,count,1)
+#define pcm_writev(plug,vec,count) snd_pcm_oss_writev3(plug,vec,count)
#define pcm_read(plug,buf,count) snd_pcm_oss_read3(plug,buf,count,1)
-#define pcm_readv(plug,vec,count) snd_pcm_oss_readv3(plug,vec,count,1)
+#define pcm_readv(plug,vec,count) snd_pcm_oss_readv3(plug,vec,count)
/*
* Basic io plugin
diff --git a/sound/core/oss/mixer_oss.c b/sound/core/oss/mixer_oss.c
index 2ff9c12d664a..379bf486ccc7 100644
--- a/sound/core/oss/mixer_oss.c
+++ b/sound/core/oss/mixer_oss.c
@@ -395,6 +395,7 @@ int snd_mixer_oss_ioctl_card(struct snd_card *card, unsigned int cmd, unsigned l
fmixer.mixer = card->mixer_oss;
return snd_mixer_oss_ioctl1(&fmixer, cmd, arg);
}
+EXPORT_SYMBOL(snd_mixer_oss_ioctl_card);
#ifdef CONFIG_COMPAT
/* all compatible */
@@ -1425,5 +1426,3 @@ static void __exit alsa_mixer_oss_exit(void)
module_init(alsa_mixer_oss_init)
module_exit(alsa_mixer_oss_exit)
-
-EXPORT_SYMBOL(snd_mixer_oss_ioctl_card);
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index cd8b7bef8d06..e49f448ee04f 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -67,18 +67,6 @@ static int snd_pcm_oss_get_rate(struct snd_pcm_oss_file *pcm_oss_file);
static int snd_pcm_oss_get_channels(struct snd_pcm_oss_file *pcm_oss_file);
static int snd_pcm_oss_get_format(struct snd_pcm_oss_file *pcm_oss_file);
-static inline mm_segment_t snd_enter_user(void)
-{
- mm_segment_t fs = get_fs();
- set_fs(get_ds());
- return fs;
-}
-
-static inline void snd_leave_user(mm_segment_t fs)
-{
- set_fs(fs);
-}
-
/*
* helper functions to process hw_params
*/
@@ -799,7 +787,7 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
static int choose_rate(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params, unsigned int best_rate)
{
- struct snd_interval *it;
+ const struct snd_interval *it;
struct snd_pcm_hw_params *save;
unsigned int rate, prev;
@@ -807,7 +795,7 @@ static int choose_rate(struct snd_pcm_substream *substream,
if (save == NULL)
return -ENOMEM;
*save = *params;
- it = hw_param_interval(save, SNDRV_PCM_HW_PARAM_RATE);
+ it = hw_param_interval_c(save, SNDRV_PCM_HW_PARAM_RATE);
/* try multiples of the best rate */
rate = best_rate;
@@ -848,7 +836,7 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
int direct;
snd_pcm_format_t format, sformat;
int n;
- struct snd_mask sformat_mask;
+ const struct snd_mask *sformat_mask;
struct snd_mask mask;
if (trylock) {
@@ -891,18 +879,18 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
format = snd_pcm_oss_format_from(runtime->oss.format);
- sformat_mask = *hw_param_mask(sparams, SNDRV_PCM_HW_PARAM_FORMAT);
+ sformat_mask = hw_param_mask_c(sparams, SNDRV_PCM_HW_PARAM_FORMAT);
if (direct)
sformat = format;
else
- sformat = snd_pcm_plug_slave_format(format, &sformat_mask);
+ sformat = snd_pcm_plug_slave_format(format, sformat_mask);
if ((__force int)sformat < 0 ||
- !snd_mask_test(&sformat_mask, (__force int)sformat)) {
+ !snd_mask_test(sformat_mask, (__force int)sformat)) {
for (sformat = (__force snd_pcm_format_t)0;
(__force int)sformat <= (__force int)SNDRV_PCM_FORMAT_LAST;
sformat = (__force snd_pcm_format_t)((__force int)sformat + 1)) {
- if (snd_mask_test(&sformat_mask, (__force int)sformat) &&
+ if (snd_mask_test(sformat_mask, (__force int)sformat) &&
snd_pcm_oss_format_to(sformat) >= 0)
break;
}
@@ -1191,14 +1179,8 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
if (ret < 0)
break;
}
- if (in_kernel) {
- mm_segment_t fs;
- fs = snd_enter_user();
- ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
- snd_leave_user(fs);
- } else {
- ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
- }
+ ret = __snd_pcm_lib_xfer(substream, (void *)ptr, true,
+ frames, in_kernel);
if (ret != -EPIPE && ret != -ESTRPIPE)
break;
/* test, if we can't store new data, because the stream */
@@ -1234,14 +1216,8 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
ret = snd_pcm_oss_capture_position_fixup(substream, &delay);
if (ret < 0)
break;
- if (in_kernel) {
- mm_segment_t fs;
- fs = snd_enter_user();
- ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
- snd_leave_user(fs);
- } else {
- ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
- }
+ ret = __snd_pcm_lib_xfer(substream, (void *)ptr, true,
+ frames, in_kernel);
if (ret == -EPIPE) {
if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
ret = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
@@ -1256,7 +1232,8 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
return ret;
}
-snd_pcm_sframes_t snd_pcm_oss_writev3(struct snd_pcm_substream *substream, void **bufs, snd_pcm_uframes_t frames, int in_kernel)
+#ifdef CONFIG_SND_PCM_OSS_PLUGINS
+snd_pcm_sframes_t snd_pcm_oss_writev3(struct snd_pcm_substream *substream, void **bufs, snd_pcm_uframes_t frames)
{
struct snd_pcm_runtime *runtime = substream->runtime;
int ret;
@@ -1273,14 +1250,7 @@ snd_pcm_sframes_t snd_pcm_oss_writev3(struct snd_pcm_substream *substream, void
if (ret < 0)
break;
}
- if (in_kernel) {
- mm_segment_t fs;
- fs = snd_enter_user();
- ret = snd_pcm_lib_writev(substream, (void __user **)bufs, frames);
- snd_leave_user(fs);
- } else {
- ret = snd_pcm_lib_writev(substream, (void __user **)bufs, frames);
- }
+ ret = snd_pcm_kernel_writev(substream, bufs, frames);
if (ret != -EPIPE && ret != -ESTRPIPE)
break;
@@ -1292,7 +1262,7 @@ snd_pcm_sframes_t snd_pcm_oss_writev3(struct snd_pcm_substream *substream, void
return ret;
}
-snd_pcm_sframes_t snd_pcm_oss_readv3(struct snd_pcm_substream *substream, void **bufs, snd_pcm_uframes_t frames, int in_kernel)
+snd_pcm_sframes_t snd_pcm_oss_readv3(struct snd_pcm_substream *substream, void **bufs, snd_pcm_uframes_t frames)
{
struct snd_pcm_runtime *runtime = substream->runtime;
int ret;
@@ -1313,19 +1283,13 @@ snd_pcm_sframes_t snd_pcm_oss_readv3(struct snd_pcm_substream *substream, void *
if (ret < 0)
break;
}
- if (in_kernel) {
- mm_segment_t fs;
- fs = snd_enter_user();
- ret = snd_pcm_lib_readv(substream, (void __user **)bufs, frames);
- snd_leave_user(fs);
- } else {
- ret = snd_pcm_lib_readv(substream, (void __user **)bufs, frames);
- }
+ ret = snd_pcm_kernel_readv(substream, bufs, frames);
if (ret != -EPIPE && ret != -ESTRPIPE)
break;
}
return ret;
}
+#endif /* CONFIG_SND_PCM_OSS_PLUGINS */
static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const char *buf, size_t bytes, int in_kernel)
{
@@ -1650,27 +1614,10 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
size = runtime->control->appl_ptr % runtime->period_size;
if (size > 0) {
size = runtime->period_size - size;
- if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED) {
- size = (runtime->frame_bits * size) / 8;
- while (size > 0) {
- mm_segment_t fs;
- size_t size1 = size < runtime->oss.period_bytes ? size : runtime->oss.period_bytes;
- size -= size1;
- size1 *= 8;
- size1 /= runtime->sample_bits;
- snd_pcm_format_set_silence(runtime->format,
- runtime->oss.buffer,
- size1);
- size1 /= runtime->channels; /* frames */
- fs = snd_enter_user();
- snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
- snd_leave_user(fs);
- }
- } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
- void __user *buffers[runtime->channels];
- memset(buffers, 0, runtime->channels * sizeof(void *));
- snd_pcm_lib_writev(substream, buffers, size);
- }
+ if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED)
+ snd_pcm_lib_write(substream, NULL, size);
+ else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
+ snd_pcm_lib_writev(substream, NULL, size);
}
mutex_unlock(&runtime->oss.params_lock);
/*
@@ -1780,7 +1727,7 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file)
int direct;
struct snd_pcm_hw_params *params;
unsigned int formats = 0;
- struct snd_mask format_mask;
+ const struct snd_mask *format_mask;
int fmt;
if ((err = snd_pcm_oss_get_active_substream(pcm_oss_file, &substream)) < 0)
@@ -1802,12 +1749,12 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file)
return -ENOMEM;
_snd_pcm_hw_params_any(params);
err = snd_pcm_hw_refine(substream, params);
- format_mask = *hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ format_mask = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
kfree(params);
if (err < 0)
return err;
for (fmt = 0; fmt < 32; ++fmt) {
- if (snd_mask_test(&format_mask, fmt)) {
+ if (snd_mask_test(format_mask, fmt)) {
int f = snd_pcm_oss_format_to(fmt);
if (f >= 0)
formats |= f;
diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c
index 727ac44d39f4..cadc93792868 100644
--- a/sound/core/oss/pcm_plugin.c
+++ b/sound/core/oss/pcm_plugin.c
@@ -266,7 +266,8 @@ snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pc
return frames;
}
-static int snd_pcm_plug_formats(struct snd_mask *mask, snd_pcm_format_t format)
+static int snd_pcm_plug_formats(const struct snd_mask *mask,
+ snd_pcm_format_t format)
{
struct snd_mask formats = *mask;
u64 linfmts = (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S8 |
@@ -309,7 +310,7 @@ static snd_pcm_format_t preferred_formats[] = {
};
snd_pcm_format_t snd_pcm_plug_slave_format(snd_pcm_format_t format,
- struct snd_mask *format_mask)
+ const struct snd_mask *format_mask)
{
int i;
diff --git a/sound/core/oss/pcm_plugin.h b/sound/core/oss/pcm_plugin.h
index a5035c2369a6..c9cd29d86efd 100644
--- a/sound/core/oss/pcm_plugin.h
+++ b/sound/core/oss/pcm_plugin.h
@@ -126,7 +126,7 @@ int snd_pcm_plug_format_plugins(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *slave_params);
snd_pcm_format_t snd_pcm_plug_slave_format(snd_pcm_format_t format,
- struct snd_mask *format_mask);
+ const struct snd_mask *format_mask);
int snd_pcm_plugin_append(struct snd_pcm_plugin *plugin);
@@ -162,17 +162,15 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream,
snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream,
char *ptr, snd_pcm_uframes_t size, int in_kernel);
snd_pcm_sframes_t snd_pcm_oss_writev3(struct snd_pcm_substream *substream,
- void **bufs, snd_pcm_uframes_t frames,
- int in_kernel);
+ void **bufs, snd_pcm_uframes_t frames);
snd_pcm_sframes_t snd_pcm_oss_readv3(struct snd_pcm_substream *substream,
- void **bufs, snd_pcm_uframes_t frames,
- int in_kernel);
+ void **bufs, snd_pcm_uframes_t frames);
#else
static inline snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *handle, snd_pcm_uframes_t drv_size) { return drv_size; }
static inline snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *handle, snd_pcm_uframes_t clt_size) { return clt_size; }
-static inline int snd_pcm_plug_slave_format(int format, struct snd_mask *format_mask) { return format; }
+static inline int snd_pcm_plug_slave_format(int format, const struct snd_mask *format_mask) { return format; }
#endif
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 8e980aa678d0..89c7485519cb 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -31,13 +31,17 @@
#include <sound/control.h>
#include <sound/info.h>
+#include "pcm_local.h"
+
MODULE_AUTHOR("Jaroslav Kysela <[email protected]>, Abramo Bagnara <[email protected]>");
MODULE_DESCRIPTION("Midlevel PCM code for ALSA.");
MODULE_LICENSE("GPL");
static LIST_HEAD(snd_pcm_devices);
-static LIST_HEAD(snd_pcm_notify_list);
static DEFINE_MUTEX(register_mutex);
+#if IS_ENABLED(CONFIG_SND_PCM_OSS)
+static LIST_HEAD(snd_pcm_notify_list);
+#endif
static int snd_pcm_free(struct snd_pcm *pcm);
static int snd_pcm_dev_free(struct snd_device *device);
@@ -884,16 +888,23 @@ static void snd_pcm_free_stream(struct snd_pcm_str * pstr)
put_device(&pstr->dev);
}
+#if IS_ENABLED(CONFIG_SND_PCM_OSS)
+#define pcm_call_notify(pcm, call) \
+ do { \
+ struct snd_pcm_notify *_notify; \
+ list_for_each_entry(_notify, &snd_pcm_notify_list, list) \
+ _notify->call(pcm); \
+ } while (0)
+#else
+#define pcm_call_notify(pcm, call) do {} while (0)
+#endif
+
static int snd_pcm_free(struct snd_pcm *pcm)
{
- struct snd_pcm_notify *notify;
-
if (!pcm)
return 0;
- if (!pcm->internal) {
- list_for_each_entry(notify, &snd_pcm_notify_list, list)
- notify->n_unregister(pcm);
- }
+ if (!pcm->internal)
+ pcm_call_notify(pcm, n_unregister);
if (pcm->private_free)
pcm->private_free(pcm);
snd_pcm_lib_preallocate_free_for_all(pcm);
@@ -1056,7 +1067,7 @@ static struct attribute *pcm_dev_attrs[] = {
NULL
};
-static struct attribute_group pcm_dev_attr_group = {
+static const struct attribute_group pcm_dev_attr_group = {
.attrs = pcm_dev_attrs,
};
@@ -1069,7 +1080,6 @@ static int snd_pcm_dev_register(struct snd_device *device)
{
int cidx, err;
struct snd_pcm_substream *substream;
- struct snd_pcm_notify *notify;
struct snd_pcm *pcm;
if (snd_BUG_ON(!device || !device->device_data))
@@ -1107,8 +1117,7 @@ static int snd_pcm_dev_register(struct snd_device *device)
snd_pcm_timer_init(substream);
}
- list_for_each_entry(notify, &snd_pcm_notify_list, list)
- notify->n_register(pcm);
+ pcm_call_notify(pcm, n_register);
unlock:
mutex_unlock(&register_mutex);
@@ -1118,7 +1127,6 @@ static int snd_pcm_dev_register(struct snd_device *device)
static int snd_pcm_dev_disconnect(struct snd_device *device)
{
struct snd_pcm *pcm = device->device_data;
- struct snd_pcm_notify *notify;
struct snd_pcm_substream *substream;
int cidx;
@@ -1138,8 +1146,7 @@ static int snd_pcm_dev_disconnect(struct snd_device *device)
}
}
if (!pcm->internal) {
- list_for_each_entry(notify, &snd_pcm_notify_list, list)
- notify->n_disconnect(pcm);
+ pcm_call_notify(pcm, n_disconnect);
}
for (cidx = 0; cidx < 2; cidx++) {
if (!pcm->internal)
@@ -1151,6 +1158,7 @@ static int snd_pcm_dev_disconnect(struct snd_device *device)
return 0;
}
+#if IS_ENABLED(CONFIG_SND_PCM_OSS)
/**
* snd_pcm_notify - Add/remove the notify list
* @notify: PCM notify list
@@ -1183,6 +1191,7 @@ int snd_pcm_notify(struct snd_pcm_notify *notify, int nfree)
return 0;
}
EXPORT_SYMBOL(snd_pcm_notify);
+#endif /* CONFIG_SND_PCM_OSS */
#ifdef CONFIG_SND_PROC_FS
/*
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index 1f64ab0c2a95..10f537f4d735 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -27,17 +27,13 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
s32 __user *src)
{
snd_pcm_sframes_t delay;
- mm_segment_t fs;
- int err;
- fs = snd_enter_user();
- err = snd_pcm_delay(substream, &delay);
- snd_leave_user(fs);
- if (err < 0)
- return err;
+ delay = snd_pcm_delay(substream);
+ if (delay < 0)
+ return delay;
if (put_user(delay, src))
return -EFAULT;
- return err;
+ return 0;
}
static int snd_pcm_ioctl_rewind_compat(struct snd_pcm_substream *substream,
@@ -680,6 +676,7 @@ static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned l
case SNDRV_PCM_IOCTL_INFO:
case SNDRV_PCM_IOCTL_TSTAMP:
case SNDRV_PCM_IOCTL_TTSTAMP:
+ case SNDRV_PCM_IOCTL_USER_PVERSION:
case SNDRV_PCM_IOCTL_HWSYNC:
case SNDRV_PCM_IOCTL_PREPARE:
case SNDRV_PCM_IOCTL_RESET:
diff --git a/sound/core/pcm_drm_eld.c b/sound/core/pcm_drm_eld.c
index e70379fb63d0..9881d087756f 100644
--- a/sound/core/pcm_drm_eld.c
+++ b/sound/core/pcm_drm_eld.c
@@ -29,13 +29,13 @@ static int eld_limit_rates(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct snd_interval *r = hw_param_interval(params, rule->var);
- struct snd_interval *c;
+ const struct snd_interval *c;
unsigned int rate_mask = 7, i;
const u8 *sad, *eld = rule->private;
sad = drm_eld_sad(eld);
if (sad) {
- c = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ c = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS);
for (i = drm_eld_sad_count(eld); i > 0; i--, sad += 3) {
unsigned max_channels = sad_max_channels(sad);
@@ -57,7 +57,7 @@ static int eld_limit_channels(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct snd_interval *c = hw_param_interval(params, rule->var);
- struct snd_interval *r;
+ const struct snd_interval *r;
struct snd_interval t = { .min = 1, .max = 2, .integer = 1, };
unsigned int i;
const u8 *sad, *eld = rule->private;
@@ -67,7 +67,7 @@ static int eld_limit_channels(struct snd_pcm_hw_params *params,
unsigned int rate_mask = 0;
/* Convert the rate interval to a mask */
- r = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ r = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
for (i = 0; i < ARRAY_SIZE(eld_rates); i++)
if (r->min <= eld_rates[i] && r->max >= eld_rates[i])
rate_mask |= BIT(i);
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 877176067072..a93a4235a332 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -33,6 +33,8 @@
#include <sound/pcm_params.h>
#include <sound/timer.h>
+#include "pcm_local.h"
+
#ifdef CONFIG_SND_PCM_XRUN_DEBUG
#define CREATE_TRACE_POINTS
#include "pcm_trace.h"
@@ -40,8 +42,12 @@
#define trace_hwptr(substream, pos, in_interrupt)
#define trace_xrun(substream)
#define trace_hw_ptr_error(substream, reason)
+#define trace_applptr(substream, prev, curr)
#endif
+static int fill_silence_frames(struct snd_pcm_substream *substream,
+ snd_pcm_uframes_t off, snd_pcm_uframes_t frames);
+
/*
* fill ring buffer with silence
* runtime->silence_start: starting pointer to silence area
@@ -55,18 +61,20 @@ void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_ufram
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_uframes_t frames, ofs, transfer;
+ int err;
if (runtime->silence_size < runtime->boundary) {
snd_pcm_sframes_t noise_dist, n;
- if (runtime->silence_start != runtime->control->appl_ptr) {
- n = runtime->control->appl_ptr - runtime->silence_start;
+ snd_pcm_uframes_t appl_ptr = READ_ONCE(runtime->control->appl_ptr);
+ if (runtime->silence_start != appl_ptr) {
+ n = appl_ptr - runtime->silence_start;
if (n < 0)
n += runtime->boundary;
if ((snd_pcm_uframes_t)n < runtime->silence_filled)
runtime->silence_filled -= n;
else
runtime->silence_filled = 0;
- runtime->silence_start = runtime->control->appl_ptr;
+ runtime->silence_start = appl_ptr;
}
if (runtime->silence_filled >= runtime->buffer_size)
return;
@@ -107,33 +115,8 @@ void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_ufram
ofs = runtime->silence_start % runtime->buffer_size;
while (frames > 0) {
transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames;
- if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
- runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED) {
- if (substream->ops->silence) {
- int err;
- err = substream->ops->silence(substream, -1, ofs, transfer);
- snd_BUG_ON(err < 0);
- } else {
- char *hwbuf = runtime->dma_area + frames_to_bytes(runtime, ofs);
- snd_pcm_format_set_silence(runtime->format, hwbuf, transfer * runtime->channels);
- }
- } else {
- unsigned int c;
- unsigned int channels = runtime->channels;
- if (substream->ops->silence) {
- for (c = 0; c < channels; ++c) {
- int err;
- err = substream->ops->silence(substream, c, ofs, transfer);
- snd_BUG_ON(err < 0);
- }
- } else {
- size_t dma_csize = runtime->dma_bytes / channels;
- for (c = 0; c < channels; ++c) {
- char *hwbuf = runtime->dma_area + (c * dma_csize) + samples_to_bytes(runtime, ofs);
- snd_pcm_format_set_silence(runtime->format, hwbuf, transfer);
- }
- }
- }
+ err = fill_silence_frames(substream, ofs, transfer);
+ snd_BUG_ON(err < 0);
runtime->silence_filled += transfer;
frames -= transfer;
ofs = 0;
@@ -508,7 +491,6 @@ void snd_pcm_set_ops(struct snd_pcm *pcm, int direction,
for (substream = stream->substream; substream != NULL; substream = substream->next)
substream->ops = ops;
}
-
EXPORT_SYMBOL(snd_pcm_set_ops);
/**
@@ -526,7 +508,6 @@ void snd_pcm_set_sync(struct snd_pcm_substream *substream)
runtime->sync.id32[2] = -1;
runtime->sync.id32[3] = -1;
}
-
EXPORT_SYMBOL(snd_pcm_set_sync);
/*
@@ -643,7 +624,6 @@ int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v)
}
return changed;
}
-
EXPORT_SYMBOL(snd_interval_refine);
static int snd_interval_refine_first(struct snd_interval *i)
@@ -906,7 +886,6 @@ int snd_interval_ratnum(struct snd_interval *i,
}
return err;
}
-
EXPORT_SYMBOL(snd_interval_ratnum);
/**
@@ -1044,7 +1023,6 @@ int snd_interval_list(struct snd_interval *i, unsigned int count,
}
return snd_interval_refine(i, &list_range);
}
-
EXPORT_SYMBOL(snd_interval_list);
/**
@@ -1183,7 +1161,6 @@ int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond,
va_end(args);
return 0;
}
-
EXPORT_SYMBOL(snd_pcm_hw_rule_add);
/**
@@ -1247,7 +1224,6 @@ int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_pa
struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
return snd_interval_setinteger(constrs_interval(constrs, var));
}
-
EXPORT_SYMBOL(snd_pcm_hw_constraint_integer);
/**
@@ -1273,7 +1249,6 @@ int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_par
t.integer = 0;
return snd_interval_refine(constrs_interval(constrs, var), &t);
}
-
EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax);
static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params,
@@ -1304,7 +1279,6 @@ int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime,
snd_pcm_hw_rule_list, (void *)l,
var, -1);
}
-
EXPORT_SYMBOL(snd_pcm_hw_constraint_list);
static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params,
@@ -1371,7 +1345,6 @@ int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime,
snd_pcm_hw_rule_ratnums, (void *)r,
var, -1);
}
-
EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums);
static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params,
@@ -1406,7 +1379,6 @@ int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime,
snd_pcm_hw_rule_ratdens, (void *)r,
var, -1);
}
-
EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens);
static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params,
@@ -1415,7 +1387,8 @@ static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params,
unsigned int l = (unsigned long) rule->private;
int width = l & 0xffff;
unsigned int msbits = l >> 16;
- struct snd_interval *i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
+ const struct snd_interval *i =
+ hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
if (!snd_interval_single(i))
return 0;
@@ -1452,7 +1425,6 @@ int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime,
(void*) l,
SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
}
-
EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits);
static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params,
@@ -1480,7 +1452,6 @@ int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime,
snd_pcm_hw_rule_step, (void *) step,
var, -1);
}
-
EXPORT_SYMBOL(snd_pcm_hw_constraint_step);
static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule)
@@ -1511,7 +1482,6 @@ int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime,
snd_pcm_hw_rule_pow2, NULL,
var, -1);
}
-
EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2);
static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params,
@@ -1570,7 +1540,6 @@ void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params)
_snd_pcm_hw_param_any(params, k);
params->info = ~0U;
}
-
EXPORT_SYMBOL(_snd_pcm_hw_params_any);
/**
@@ -1603,7 +1572,6 @@ int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params,
}
return -EINVAL;
}
-
EXPORT_SYMBOL(snd_pcm_hw_param_value);
void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params,
@@ -1621,7 +1589,6 @@ void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params,
snd_BUG();
}
}
-
EXPORT_SYMBOL(_snd_pcm_hw_param_setempty);
static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params,
@@ -1668,7 +1635,6 @@ int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm,
}
return snd_pcm_hw_param_value(params, var, dir);
}
-
EXPORT_SYMBOL(snd_pcm_hw_param_first);
static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params,
@@ -1715,48 +1681,8 @@ int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm,
}
return snd_pcm_hw_param_value(params, var, dir);
}
-
EXPORT_SYMBOL(snd_pcm_hw_param_last);
-/**
- * snd_pcm_hw_param_choose - choose a configuration defined by @params
- * @pcm: PCM instance
- * @params: the hw_params instance
- *
- * Choose one configuration from configuration space defined by @params.
- * The configuration chosen is that obtained fixing in this order:
- * first access, first format, first subformat, min channels,
- * min rate, min period time, max buffer size, min tick time
- *
- * Return: Zero if successful, or a negative error code on failure.
- */
-int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm,
- struct snd_pcm_hw_params *params)
-{
- static int vars[] = {
- SNDRV_PCM_HW_PARAM_ACCESS,
- SNDRV_PCM_HW_PARAM_FORMAT,
- SNDRV_PCM_HW_PARAM_SUBFORMAT,
- SNDRV_PCM_HW_PARAM_CHANNELS,
- SNDRV_PCM_HW_PARAM_RATE,
- SNDRV_PCM_HW_PARAM_PERIOD_TIME,
- SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
- SNDRV_PCM_HW_PARAM_TICK_TIME,
- -1
- };
- int err, *v;
-
- for (v = vars; *v != -1; v++) {
- if (*v != SNDRV_PCM_HW_PARAM_BUFFER_SIZE)
- err = snd_pcm_hw_param_first(pcm, params, *v, NULL);
- else
- err = snd_pcm_hw_param_last(pcm, params, *v, NULL);
- if (snd_BUG_ON(err < 0))
- return err;
- }
- return 0;
-}
-
static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream,
void *arg)
{
@@ -1843,8 +1769,6 @@ int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg)
{
switch (cmd) {
- case SNDRV_PCM_IOCTL1_INFO:
- return 0;
case SNDRV_PCM_IOCTL1_RESET:
return snd_pcm_lib_ioctl_reset(substream, arg);
case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
@@ -1854,7 +1778,6 @@ int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
}
return -ENXIO;
}
-
EXPORT_SYMBOL(snd_pcm_lib_ioctl);
/**
@@ -1890,7 +1813,6 @@ void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
kill_fasync(&runtime->fasync, SIGIO, POLL_IN);
snd_pcm_stream_unlock_irqrestore(substream, flags);
}
-
EXPORT_SYMBOL(snd_pcm_period_elapsed);
/*
@@ -1985,129 +1907,147 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
return err;
}
-static int snd_pcm_lib_write_transfer(struct snd_pcm_substream *substream,
- unsigned int hwoff,
- unsigned long data, unsigned int off,
- snd_pcm_uframes_t frames)
+typedef int (*pcm_transfer_f)(struct snd_pcm_substream *substream,
+ int channel, unsigned long hwoff,
+ void *buf, unsigned long bytes);
+
+typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *,
+ snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f);
+
+/* calculate the target DMA-buffer position to be written/read */
+static void *get_dma_ptr(struct snd_pcm_runtime *runtime,
+ int channel, unsigned long hwoff)
{
- struct snd_pcm_runtime *runtime = substream->runtime;
- int err;
- char __user *buf = (char __user *) data + frames_to_bytes(runtime, off);
- if (substream->ops->copy) {
- if ((err = substream->ops->copy(substream, -1, hwoff, buf, frames)) < 0)
- return err;
- } else {
- char *hwbuf = runtime->dma_area + frames_to_bytes(runtime, hwoff);
- if (copy_from_user(hwbuf, buf, frames_to_bytes(runtime, frames)))
- return -EFAULT;
- }
+ return runtime->dma_area + hwoff +
+ channel * (runtime->dma_bytes / runtime->channels);
+}
+
+/* default copy_user ops for write; used for both interleaved and non- modes */
+static int default_write_copy(struct snd_pcm_substream *substream,
+ int channel, unsigned long hwoff,
+ void *buf, unsigned long bytes)
+{
+ if (copy_from_user(get_dma_ptr(substream->runtime, channel, hwoff),
+ (void __user *)buf, bytes))
+ return -EFAULT;
return 0;
}
-
-typedef int (*transfer_f)(struct snd_pcm_substream *substream, unsigned int hwoff,
- unsigned long data, unsigned int off,
- snd_pcm_uframes_t size);
-static snd_pcm_sframes_t snd_pcm_lib_write1(struct snd_pcm_substream *substream,
- unsigned long data,
- snd_pcm_uframes_t size,
- int nonblock,
- transfer_f transfer)
+/* default copy_kernel ops for write */
+static int default_write_copy_kernel(struct snd_pcm_substream *substream,
+ int channel, unsigned long hwoff,
+ void *buf, unsigned long bytes)
+{
+ memcpy(get_dma_ptr(substream->runtime, channel, hwoff), buf, bytes);
+ return 0;
+}
+
+/* fill silence instead of copy data; called as a transfer helper
+ * from __snd_pcm_lib_write() or directly from noninterleaved_copy() when
+ * a NULL buffer is passed
+ */
+static int fill_silence(struct snd_pcm_substream *substream, int channel,
+ unsigned long hwoff, void *buf, unsigned long bytes)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- snd_pcm_uframes_t xfer = 0;
- snd_pcm_uframes_t offset = 0;
- snd_pcm_uframes_t avail;
- int err = 0;
- if (size == 0)
+ if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
return 0;
+ if (substream->ops->fill_silence)
+ return substream->ops->fill_silence(substream, channel,
+ hwoff, bytes);
- snd_pcm_stream_lock_irq(substream);
- switch (runtime->status->state) {
- case SNDRV_PCM_STATE_PREPARED:
- case SNDRV_PCM_STATE_RUNNING:
- case SNDRV_PCM_STATE_PAUSED:
- break;
- case SNDRV_PCM_STATE_XRUN:
- err = -EPIPE;
- goto _end_unlock;
- case SNDRV_PCM_STATE_SUSPENDED:
- err = -ESTRPIPE;
- goto _end_unlock;
- default:
- err = -EBADFD;
- goto _end_unlock;
- }
+ snd_pcm_format_set_silence(runtime->format,
+ get_dma_ptr(runtime, channel, hwoff),
+ bytes_to_samples(runtime, bytes));
+ return 0;
+}
- runtime->twake = runtime->control->avail_min ? : 1;
- if (runtime->status->state == SNDRV_PCM_STATE_RUNNING)
- snd_pcm_update_hw_ptr(substream);
- avail = snd_pcm_playback_avail(runtime);
- while (size > 0) {
- snd_pcm_uframes_t frames, appl_ptr, appl_ofs;
- snd_pcm_uframes_t cont;
- if (!avail) {
- if (nonblock) {
- err = -EAGAIN;
- goto _end_unlock;
- }
- runtime->twake = min_t(snd_pcm_uframes_t, size,
- runtime->control->avail_min ? : 1);
- err = wait_for_avail(substream, &avail);
- if (err < 0)
- goto _end_unlock;
- }
- frames = size > avail ? avail : size;
- cont = runtime->buffer_size - runtime->control->appl_ptr % runtime->buffer_size;
- if (frames > cont)
- frames = cont;
- if (snd_BUG_ON(!frames)) {
- runtime->twake = 0;
- snd_pcm_stream_unlock_irq(substream);
- return -EINVAL;
- }
- appl_ptr = runtime->control->appl_ptr;
- appl_ofs = appl_ptr % runtime->buffer_size;
- snd_pcm_stream_unlock_irq(substream);
- err = transfer(substream, appl_ofs, data, offset, frames);
- snd_pcm_stream_lock_irq(substream);
- if (err < 0)
- goto _end_unlock;
- switch (runtime->status->state) {
- case SNDRV_PCM_STATE_XRUN:
- err = -EPIPE;
- goto _end_unlock;
- case SNDRV_PCM_STATE_SUSPENDED:
- err = -ESTRPIPE;
- goto _end_unlock;
- default:
- break;
- }
- appl_ptr += frames;
- if (appl_ptr >= runtime->boundary)
- appl_ptr -= runtime->boundary;
- runtime->control->appl_ptr = appl_ptr;
- if (substream->ops->ack)
- substream->ops->ack(substream);
+/* default copy_user ops for read; used for both interleaved and non- modes */
+static int default_read_copy(struct snd_pcm_substream *substream,
+ int channel, unsigned long hwoff,
+ void *buf, unsigned long bytes)
+{
+ if (copy_to_user((void __user *)buf,
+ get_dma_ptr(substream->runtime, channel, hwoff),
+ bytes))
+ return -EFAULT;
+ return 0;
+}
- offset += frames;
- size -= frames;
- xfer += frames;
- avail -= frames;
- if (runtime->status->state == SNDRV_PCM_STATE_PREPARED &&
- snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) {
- err = snd_pcm_start(substream);
- if (err < 0)
- goto _end_unlock;
- }
+/* default copy_kernel ops for read */
+static int default_read_copy_kernel(struct snd_pcm_substream *substream,
+ int channel, unsigned long hwoff,
+ void *buf, unsigned long bytes)
+{
+ memcpy(buf, get_dma_ptr(substream->runtime, channel, hwoff), bytes);
+ return 0;
+}
+
+/* call transfer function with the converted pointers and sizes;
+ * for interleaved mode, it's one shot for all samples
+ */
+static int interleaved_copy(struct snd_pcm_substream *substream,
+ snd_pcm_uframes_t hwoff, void *data,
+ snd_pcm_uframes_t off,
+ snd_pcm_uframes_t frames,
+ pcm_transfer_f transfer)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ /* convert to bytes */
+ hwoff = frames_to_bytes(runtime, hwoff);
+ off = frames_to_bytes(runtime, off);
+ frames = frames_to_bytes(runtime, frames);
+ return transfer(substream, 0, hwoff, data + off, frames);
+}
+
+/* call transfer function with the converted pointers and sizes for each
+ * non-interleaved channel; when buffer is NULL, silencing instead of copying
+ */
+static int noninterleaved_copy(struct snd_pcm_substream *substream,
+ snd_pcm_uframes_t hwoff, void *data,
+ snd_pcm_uframes_t off,
+ snd_pcm_uframes_t frames,
+ pcm_transfer_f transfer)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int channels = runtime->channels;
+ void **bufs = data;
+ int c, err;
+
+ /* convert to bytes; note that it's not frames_to_bytes() here.
+ * in non-interleaved mode, we copy for each channel, thus
+ * each copy is n_samples bytes x channels = whole frames.
+ */
+ off = samples_to_bytes(runtime, off);
+ frames = samples_to_bytes(runtime, frames);
+ hwoff = samples_to_bytes(runtime, hwoff);
+ for (c = 0; c < channels; ++c, ++bufs) {
+ if (!data || !*bufs)
+ err = fill_silence(substream, c, hwoff, NULL, frames);
+ else
+ err = transfer(substream, c, hwoff, *bufs + off,
+ frames);
+ if (err < 0)
+ return err;
}
- _end_unlock:
- runtime->twake = 0;
- if (xfer > 0 && err >= 0)
- snd_pcm_update_state(substream, runtime);
- snd_pcm_stream_unlock_irq(substream);
- return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
+ return 0;
+}
+
+/* fill silence on the given buffer position;
+ * called from snd_pcm_playback_silence()
+ */
+static int fill_silence_frames(struct snd_pcm_substream *substream,
+ snd_pcm_uframes_t off, snd_pcm_uframes_t frames)
+{
+ if (substream->runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
+ substream->runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED)
+ return interleaved_copy(substream, off, NULL, 0, frames,
+ fill_silence);
+ else
+ return noninterleaved_copy(substream, off, NULL, 0, frames,
+ fill_silence);
}
/* sanity-check for read/write methods */
@@ -2117,164 +2057,137 @@ static int pcm_sanity_check(struct snd_pcm_substream *substream)
if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
runtime = substream->runtime;
- if (snd_BUG_ON(!substream->ops->copy && !runtime->dma_area))
+ if (snd_BUG_ON(!substream->ops->copy_user && !runtime->dma_area))
return -EINVAL;
if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
return -EBADFD;
return 0;
}
-snd_pcm_sframes_t snd_pcm_lib_write(struct snd_pcm_substream *substream, const void __user *buf, snd_pcm_uframes_t size)
+static int pcm_accessible_state(struct snd_pcm_runtime *runtime)
{
- struct snd_pcm_runtime *runtime;
- int nonblock;
- int err;
-
- err = pcm_sanity_check(substream);
- if (err < 0)
- return err;
- runtime = substream->runtime;
- nonblock = !!(substream->f_flags & O_NONBLOCK);
-
- if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED &&
- runtime->channels > 1)
- return -EINVAL;
- return snd_pcm_lib_write1(substream, (unsigned long)buf, size, nonblock,
- snd_pcm_lib_write_transfer);
+ switch (runtime->status->state) {
+ case SNDRV_PCM_STATE_PREPARED:
+ case SNDRV_PCM_STATE_RUNNING:
+ case SNDRV_PCM_STATE_PAUSED:
+ return 0;
+ case SNDRV_PCM_STATE_XRUN:
+ return -EPIPE;
+ case SNDRV_PCM_STATE_SUSPENDED:
+ return -ESTRPIPE;
+ default:
+ return -EBADFD;
+ }
}
-EXPORT_SYMBOL(snd_pcm_lib_write);
-
-static int snd_pcm_lib_writev_transfer(struct snd_pcm_substream *substream,
- unsigned int hwoff,
- unsigned long data, unsigned int off,
- snd_pcm_uframes_t frames)
+/* update to the given appl_ptr and call ack callback if needed;
+ * when an error is returned, take back to the original value
+ */
+int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
+ snd_pcm_uframes_t appl_ptr)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- int err;
- void __user **bufs = (void __user **)data;
- int channels = runtime->channels;
- int c;
- if (substream->ops->copy) {
- if (snd_BUG_ON(!substream->ops->silence))
- return -EINVAL;
- for (c = 0; c < channels; ++c, ++bufs) {
- if (*bufs == NULL) {
- if ((err = substream->ops->silence(substream, c, hwoff, frames)) < 0)
- return err;
- } else {
- char __user *buf = *bufs + samples_to_bytes(runtime, off);
- if ((err = substream->ops->copy(substream, c, hwoff, buf, frames)) < 0)
- return err;
- }
- }
- } else {
- /* default transfer behaviour */
- size_t dma_csize = runtime->dma_bytes / channels;
- for (c = 0; c < channels; ++c, ++bufs) {
- char *hwbuf = runtime->dma_area + (c * dma_csize) + samples_to_bytes(runtime, hwoff);
- if (*bufs == NULL) {
- snd_pcm_format_set_silence(runtime->format, hwbuf, frames);
- } else {
- char __user *buf = *bufs + samples_to_bytes(runtime, off);
- if (copy_from_user(hwbuf, buf, samples_to_bytes(runtime, frames)))
- return -EFAULT;
- }
+ snd_pcm_uframes_t old_appl_ptr = runtime->control->appl_ptr;
+ int ret;
+
+ if (old_appl_ptr == appl_ptr)
+ return 0;
+
+ runtime->control->appl_ptr = appl_ptr;
+ if (substream->ops->ack) {
+ ret = substream->ops->ack(substream);
+ if (ret < 0) {
+ runtime->control->appl_ptr = old_appl_ptr;
+ return ret;
}
}
+
+ trace_applptr(substream, old_appl_ptr, appl_ptr);
+
return 0;
}
-
-snd_pcm_sframes_t snd_pcm_lib_writev(struct snd_pcm_substream *substream,
- void __user **bufs,
- snd_pcm_uframes_t frames)
+
+/* the common loop for read/write data */
+snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
+ void *data, bool interleaved,
+ snd_pcm_uframes_t size, bool in_kernel)
{
- struct snd_pcm_runtime *runtime;
- int nonblock;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ snd_pcm_uframes_t xfer = 0;
+ snd_pcm_uframes_t offset = 0;
+ snd_pcm_uframes_t avail;
+ pcm_copy_f writer;
+ pcm_transfer_f transfer;
+ bool nonblock;
+ bool is_playback;
int err;
err = pcm_sanity_check(substream);
if (err < 0)
return err;
- runtime = substream->runtime;
- nonblock = !!(substream->f_flags & O_NONBLOCK);
-
- if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
- return -EINVAL;
- return snd_pcm_lib_write1(substream, (unsigned long)bufs, frames,
- nonblock, snd_pcm_lib_writev_transfer);
-}
-
-EXPORT_SYMBOL(snd_pcm_lib_writev);
-static int snd_pcm_lib_read_transfer(struct snd_pcm_substream *substream,
- unsigned int hwoff,
- unsigned long data, unsigned int off,
- snd_pcm_uframes_t frames)
-{
- struct snd_pcm_runtime *runtime = substream->runtime;
- int err;
- char __user *buf = (char __user *) data + frames_to_bytes(runtime, off);
- if (substream->ops->copy) {
- if ((err = substream->ops->copy(substream, -1, hwoff, buf, frames)) < 0)
- return err;
+ is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ if (interleaved) {
+ if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED &&
+ runtime->channels > 1)
+ return -EINVAL;
+ writer = interleaved_copy;
} else {
- char *hwbuf = runtime->dma_area + frames_to_bytes(runtime, hwoff);
- if (copy_to_user(buf, hwbuf, frames_to_bytes(runtime, frames)))
- return -EFAULT;
+ if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
+ return -EINVAL;
+ writer = noninterleaved_copy;
}
- return 0;
-}
-static snd_pcm_sframes_t snd_pcm_lib_read1(struct snd_pcm_substream *substream,
- unsigned long data,
- snd_pcm_uframes_t size,
- int nonblock,
- transfer_f transfer)
-{
- struct snd_pcm_runtime *runtime = substream->runtime;
- snd_pcm_uframes_t xfer = 0;
- snd_pcm_uframes_t offset = 0;
- snd_pcm_uframes_t avail;
- int err = 0;
+ if (!data) {
+ if (is_playback)
+ transfer = fill_silence;
+ else
+ return -EINVAL;
+ } else if (in_kernel) {
+ if (substream->ops->copy_kernel)
+ transfer = substream->ops->copy_kernel;
+ else
+ transfer = is_playback ?
+ default_write_copy_kernel : default_read_copy_kernel;
+ } else {
+ if (substream->ops->copy_user)
+ transfer = (pcm_transfer_f)substream->ops->copy_user;
+ else
+ transfer = is_playback ?
+ default_write_copy : default_read_copy;
+ }
if (size == 0)
return 0;
+ nonblock = !!(substream->f_flags & O_NONBLOCK);
+
snd_pcm_stream_lock_irq(substream);
- switch (runtime->status->state) {
- case SNDRV_PCM_STATE_PREPARED:
- if (size >= runtime->start_threshold) {
- err = snd_pcm_start(substream);
- if (err < 0)
- goto _end_unlock;
- }
- break;
- case SNDRV_PCM_STATE_DRAINING:
- case SNDRV_PCM_STATE_RUNNING:
- case SNDRV_PCM_STATE_PAUSED:
- break;
- case SNDRV_PCM_STATE_XRUN:
- err = -EPIPE;
- goto _end_unlock;
- case SNDRV_PCM_STATE_SUSPENDED:
- err = -ESTRPIPE;
- goto _end_unlock;
- default:
- err = -EBADFD;
+ err = pcm_accessible_state(runtime);
+ if (err < 0)
goto _end_unlock;
+
+ if (!is_playback &&
+ runtime->status->state == SNDRV_PCM_STATE_PREPARED &&
+ size >= runtime->start_threshold) {
+ err = snd_pcm_start(substream);
+ if (err < 0)
+ goto _end_unlock;
}
runtime->twake = runtime->control->avail_min ? : 1;
if (runtime->status->state == SNDRV_PCM_STATE_RUNNING)
snd_pcm_update_hw_ptr(substream);
- avail = snd_pcm_capture_avail(runtime);
+ if (is_playback)
+ avail = snd_pcm_playback_avail(runtime);
+ else
+ avail = snd_pcm_capture_avail(runtime);
while (size > 0) {
snd_pcm_uframes_t frames, appl_ptr, appl_ofs;
snd_pcm_uframes_t cont;
if (!avail) {
- if (runtime->status->state ==
- SNDRV_PCM_STATE_DRAINING) {
+ if (!is_playback &&
+ runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
goto _end_unlock;
}
@@ -2291,7 +2204,9 @@ static snd_pcm_sframes_t snd_pcm_lib_read1(struct snd_pcm_substream *substream,
continue; /* draining */
}
frames = size > avail ? avail : size;
- cont = runtime->buffer_size - runtime->control->appl_ptr % runtime->buffer_size;
+ appl_ptr = READ_ONCE(runtime->control->appl_ptr);
+ appl_ofs = appl_ptr % runtime->buffer_size;
+ cont = runtime->buffer_size - appl_ofs;
if (frames > cont)
frames = cont;
if (snd_BUG_ON(!frames)) {
@@ -2299,34 +2214,33 @@ static snd_pcm_sframes_t snd_pcm_lib_read1(struct snd_pcm_substream *substream,
snd_pcm_stream_unlock_irq(substream);
return -EINVAL;
}
- appl_ptr = runtime->control->appl_ptr;
- appl_ofs = appl_ptr % runtime->buffer_size;
snd_pcm_stream_unlock_irq(substream);
- err = transfer(substream, appl_ofs, data, offset, frames);
+ err = writer(substream, appl_ofs, data, offset, frames,
+ transfer);
snd_pcm_stream_lock_irq(substream);
if (err < 0)
goto _end_unlock;
- switch (runtime->status->state) {
- case SNDRV_PCM_STATE_XRUN:
- err = -EPIPE;
- goto _end_unlock;
- case SNDRV_PCM_STATE_SUSPENDED:
- err = -ESTRPIPE;
+ err = pcm_accessible_state(runtime);
+ if (err < 0)
goto _end_unlock;
- default:
- break;
- }
appl_ptr += frames;
if (appl_ptr >= runtime->boundary)
appl_ptr -= runtime->boundary;
- runtime->control->appl_ptr = appl_ptr;
- if (substream->ops->ack)
- substream->ops->ack(substream);
+ err = pcm_lib_apply_appl_ptr(substream, appl_ptr);
+ if (err < 0)
+ goto _end_unlock;
offset += frames;
size -= frames;
xfer += frames;
avail -= frames;
+ if (is_playback &&
+ runtime->status->state == SNDRV_PCM_STATE_PREPARED &&
+ snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) {
+ err = snd_pcm_start(substream);
+ if (err < 0)
+ goto _end_unlock;
+ }
}
_end_unlock:
runtime->twake = 0;
@@ -2335,83 +2249,7 @@ static snd_pcm_sframes_t snd_pcm_lib_read1(struct snd_pcm_substream *substream,
snd_pcm_stream_unlock_irq(substream);
return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
}
-
-snd_pcm_sframes_t snd_pcm_lib_read(struct snd_pcm_substream *substream, void __user *buf, snd_pcm_uframes_t size)
-{
- struct snd_pcm_runtime *runtime;
- int nonblock;
- int err;
-
- err = pcm_sanity_check(substream);
- if (err < 0)
- return err;
- runtime = substream->runtime;
- nonblock = !!(substream->f_flags & O_NONBLOCK);
- if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED)
- return -EINVAL;
- return snd_pcm_lib_read1(substream, (unsigned long)buf, size, nonblock, snd_pcm_lib_read_transfer);
-}
-
-EXPORT_SYMBOL(snd_pcm_lib_read);
-
-static int snd_pcm_lib_readv_transfer(struct snd_pcm_substream *substream,
- unsigned int hwoff,
- unsigned long data, unsigned int off,
- snd_pcm_uframes_t frames)
-{
- struct snd_pcm_runtime *runtime = substream->runtime;
- int err;
- void __user **bufs = (void __user **)data;
- int channels = runtime->channels;
- int c;
- if (substream->ops->copy) {
- for (c = 0; c < channels; ++c, ++bufs) {
- char __user *buf;
- if (*bufs == NULL)
- continue;
- buf = *bufs + samples_to_bytes(runtime, off);
- if ((err = substream->ops->copy(substream, c, hwoff, buf, frames)) < 0)
- return err;
- }
- } else {
- snd_pcm_uframes_t dma_csize = runtime->dma_bytes / channels;
- for (c = 0; c < channels; ++c, ++bufs) {
- char *hwbuf;
- char __user *buf;
- if (*bufs == NULL)
- continue;
-
- hwbuf = runtime->dma_area + (c * dma_csize) + samples_to_bytes(runtime, hwoff);
- buf = *bufs + samples_to_bytes(runtime, off);
- if (copy_to_user(buf, hwbuf, samples_to_bytes(runtime, frames)))
- return -EFAULT;
- }
- }
- return 0;
-}
-
-snd_pcm_sframes_t snd_pcm_lib_readv(struct snd_pcm_substream *substream,
- void __user **bufs,
- snd_pcm_uframes_t frames)
-{
- struct snd_pcm_runtime *runtime;
- int nonblock;
- int err;
-
- err = pcm_sanity_check(substream);
- if (err < 0)
- return err;
- runtime = substream->runtime;
- if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
- return -EBADFD;
-
- nonblock = !!(substream->f_flags & O_NONBLOCK);
- if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
- return -EINVAL;
- return snd_pcm_lib_read1(substream, (unsigned long)bufs, frames, nonblock, snd_pcm_lib_readv_transfer);
-}
-
-EXPORT_SYMBOL(snd_pcm_lib_readv);
+EXPORT_SYMBOL(__snd_pcm_lib_xfer);
/*
* standard channel mapping helpers
diff --git a/sound/core/pcm_local.h b/sound/core/pcm_local.h
new file mode 100644
index 000000000000..16f254732b2a
--- /dev/null
+++ b/sound/core/pcm_local.h
@@ -0,0 +1,50 @@
+/*
+ * pcm_local.h - a local header file for snd-pcm module.
+ *
+ * Copyright (c) Takashi Sakamoto <[email protected]>
+ *
+ * Licensed under the terms of the GNU General Public License, version 2.
+ */
+
+#ifndef __SOUND_CORE_PCM_LOCAL_H
+#define __SOUND_CORE_PCM_LOCAL_H
+
+extern const struct snd_pcm_hw_constraint_list snd_pcm_known_rates;
+
+void snd_interval_mul(const struct snd_interval *a,
+ const struct snd_interval *b, struct snd_interval *c);
+void snd_interval_div(const struct snd_interval *a,
+ const struct snd_interval *b, struct snd_interval *c);
+void snd_interval_muldivk(const struct snd_interval *a,
+ const struct snd_interval *b,
+ unsigned int k, struct snd_interval *c);
+void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k,
+ const struct snd_interval *b, struct snd_interval *c);
+
+int snd_pcm_hw_constraints_init(struct snd_pcm_substream *substream);
+int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream);
+
+int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime,
+ snd_pcm_hw_param_t var, u_int32_t mask);
+
+int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
+ snd_pcm_uframes_t appl_ptr);
+int snd_pcm_update_state(struct snd_pcm_substream *substream,
+ struct snd_pcm_runtime *runtime);
+int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream);
+
+void snd_pcm_playback_silence(struct snd_pcm_substream *substream,
+ snd_pcm_uframes_t new_hw_ptr);
+
+#ifdef CONFIG_SND_PCM_TIMER
+void snd_pcm_timer_resolution_change(struct snd_pcm_substream *substream);
+void snd_pcm_timer_init(struct snd_pcm_substream *substream);
+void snd_pcm_timer_done(struct snd_pcm_substream *substream);
+#else
+static inline void
+snd_pcm_timer_resolution_change(struct snd_pcm_substream *substream) {}
+static inline void snd_pcm_timer_init(struct snd_pcm_substream *substream) {}
+static inline void snd_pcm_timer_done(struct snd_pcm_substream *substream) {}
+#endif
+
+#endif /* __SOUND_CORE_PCM_LOCAL_H */
diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c
index b45f6aa32264..ae33e456708c 100644
--- a/sound/core/pcm_memory.c
+++ b/sound/core/pcm_memory.c
@@ -120,7 +120,6 @@ int snd_pcm_lib_preallocate_free_for_all(struct snd_pcm *pcm)
snd_pcm_lib_preallocate_free(substream);
return 0;
}
-
EXPORT_SYMBOL(snd_pcm_lib_preallocate_free_for_all);
#ifdef CONFIG_SND_VERBOSE_PROCFS
@@ -263,7 +262,6 @@ int snd_pcm_lib_preallocate_pages(struct snd_pcm_substream *substream,
substream->dma_buffer.dev.dev = data;
return snd_pcm_lib_preallocate_pages1(substream, size, max);
}
-
EXPORT_SYMBOL(snd_pcm_lib_preallocate_pages);
/**
@@ -292,7 +290,6 @@ int snd_pcm_lib_preallocate_pages_for_all(struct snd_pcm *pcm,
return err;
return 0;
}
-
EXPORT_SYMBOL(snd_pcm_lib_preallocate_pages_for_all);
#ifdef CONFIG_SND_DMA_SGBUF
@@ -314,7 +311,6 @@ struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream, unsigne
return NULL;
return sgbuf->page_table[idx];
}
-
EXPORT_SYMBOL(snd_pcm_sgbuf_ops_page);
#endif /* CONFIG_SND_DMA_SGBUF */
@@ -370,7 +366,6 @@ int snd_pcm_lib_malloc_pages(struct snd_pcm_substream *substream, size_t size)
runtime->dma_bytes = size;
return 1; /* area was changed */
}
-
EXPORT_SYMBOL(snd_pcm_lib_malloc_pages);
/**
@@ -398,7 +393,6 @@ int snd_pcm_lib_free_pages(struct snd_pcm_substream *substream)
snd_pcm_set_runtime_buffer(substream, NULL);
return 0;
}
-
EXPORT_SYMBOL(snd_pcm_lib_free_pages);
int _snd_pcm_lib_alloc_vmalloc_buffer(struct snd_pcm_substream *substream,
diff --git a/sound/core/pcm_misc.c b/sound/core/pcm_misc.c
index 53dc37357bca..9be81025372f 100644
--- a/sound/core/pcm_misc.c
+++ b/sound/core/pcm_misc.c
@@ -23,6 +23,9 @@
#include <linux/export.h>
#include <sound/core.h>
#include <sound/pcm.h>
+
+#include "pcm_local.h"
+
#define SND_PCM_FORMAT_UNKNOWN (-1)
/* NOTE: "signed" prefix must be given below since the default char is
@@ -245,7 +248,6 @@ int snd_pcm_format_signed(snd_pcm_format_t format)
return -EINVAL;
return val;
}
-
EXPORT_SYMBOL(snd_pcm_format_signed);
/**
@@ -264,7 +266,6 @@ int snd_pcm_format_unsigned(snd_pcm_format_t format)
return val;
return !val;
}
-
EXPORT_SYMBOL(snd_pcm_format_unsigned);
/**
@@ -277,7 +278,6 @@ int snd_pcm_format_linear(snd_pcm_format_t format)
{
return snd_pcm_format_signed(format) >= 0;
}
-
EXPORT_SYMBOL(snd_pcm_format_linear);
/**
@@ -296,7 +296,6 @@ int snd_pcm_format_little_endian(snd_pcm_format_t format)
return -EINVAL;
return val;
}
-
EXPORT_SYMBOL(snd_pcm_format_little_endian);
/**
@@ -315,7 +314,6 @@ int snd_pcm_format_big_endian(snd_pcm_format_t format)
return val;
return !val;
}
-
EXPORT_SYMBOL(snd_pcm_format_big_endian);
/**
@@ -334,7 +332,6 @@ int snd_pcm_format_width(snd_pcm_format_t format)
return -EINVAL;
return val;
}
-
EXPORT_SYMBOL(snd_pcm_format_width);
/**
@@ -353,7 +350,6 @@ int snd_pcm_format_physical_width(snd_pcm_format_t format)
return -EINVAL;
return val;
}
-
EXPORT_SYMBOL(snd_pcm_format_physical_width);
/**
@@ -371,7 +367,6 @@ ssize_t snd_pcm_format_size(snd_pcm_format_t format, size_t samples)
return -EINVAL;
return samples * phys_width / 8;
}
-
EXPORT_SYMBOL(snd_pcm_format_size);
/**
@@ -388,7 +383,6 @@ const unsigned char *snd_pcm_format_silence_64(snd_pcm_format_t format)
return NULL;
return pcm_formats[(INT)format].silence;
}
-
EXPORT_SYMBOL(snd_pcm_format_silence_64);
/**
@@ -459,7 +453,6 @@ int snd_pcm_format_set_silence(snd_pcm_format_t format, void *data, unsigned int
#endif
return 0;
}
-
EXPORT_SYMBOL(snd_pcm_format_set_silence);
/**
@@ -488,7 +481,6 @@ int snd_pcm_limit_hw_rates(struct snd_pcm_runtime *runtime)
}
return 0;
}
-
EXPORT_SYMBOL(snd_pcm_limit_hw_rates);
/**
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index faa2e2be6f2e..b3d5bed75029 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -37,6 +37,18 @@
#include <sound/minors.h>
#include <linux/uio.h>
+#include "pcm_local.h"
+
+#ifdef CONFIG_SND_DEBUG
+#define CREATE_TRACE_POINTS
+#include "pcm_param_trace.h"
+#else
+#define trace_hw_mask_param_enabled() 0
+#define trace_hw_interval_param_enabled() 0
+#define trace_hw_mask_param(substream, type, index, prev, curr)
+#define trace_hw_interval_param(substream, type, index, prev, curr)
+#endif
+
/*
* Compatibility
*/
@@ -181,20 +193,6 @@ void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
-static inline mm_segment_t snd_enter_user(void)
-{
- mm_segment_t fs = get_fs();
- set_fs(get_ds());
- return fs;
-}
-
-static inline void snd_leave_user(mm_segment_t fs)
-{
- set_fs(fs);
-}
-
-
-
int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info)
{
struct snd_pcm_runtime *runtime;
@@ -214,11 +212,7 @@ int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info)
info->subdevices_avail = pstr->substream_count - pstr->substream_opened;
strlcpy(info->subname, substream->name, sizeof(info->subname));
runtime = substream->runtime;
- /* AB: FIXME!!! This is definitely nonsense */
- if (runtime) {
- info->sync = runtime->sync;
- substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_INFO, info);
- }
+
return 0;
}
@@ -255,205 +249,268 @@ static bool hw_support_mmap(struct snd_pcm_substream *substream)
return true;
}
-#undef RULES_DEBUG
-
-#ifdef RULES_DEBUG
-#define HW_PARAM(v) [SNDRV_PCM_HW_PARAM_##v] = #v
-static const char * const snd_pcm_hw_param_names[] = {
- HW_PARAM(ACCESS),
- HW_PARAM(FORMAT),
- HW_PARAM(SUBFORMAT),
- HW_PARAM(SAMPLE_BITS),
- HW_PARAM(FRAME_BITS),
- HW_PARAM(CHANNELS),
- HW_PARAM(RATE),
- HW_PARAM(PERIOD_TIME),
- HW_PARAM(PERIOD_SIZE),
- HW_PARAM(PERIOD_BYTES),
- HW_PARAM(PERIODS),
- HW_PARAM(BUFFER_TIME),
- HW_PARAM(BUFFER_SIZE),
- HW_PARAM(BUFFER_BYTES),
- HW_PARAM(TICK_TIME),
-};
-#endif
-
-int snd_pcm_hw_refine(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
+static int constrain_mask_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
{
+ struct snd_pcm_hw_constraints *constrs =
+ &substream->runtime->hw_constraints;
+ struct snd_mask *m;
unsigned int k;
- struct snd_pcm_hardware *hw;
- struct snd_interval *i = NULL;
- struct snd_mask *m = NULL;
- struct snd_pcm_hw_constraints *constrs = &substream->runtime->hw_constraints;
- unsigned int rstamps[constrs->rules_num];
- unsigned int vstamps[SNDRV_PCM_HW_PARAM_LAST_INTERVAL + 1];
- unsigned int stamp = 2;
- int changed, again;
-
- params->info = 0;
- params->fifo_size = 0;
- if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_SAMPLE_BITS))
- params->msbits = 0;
- if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_RATE)) {
- params->rate_num = 0;
- params->rate_den = 0;
- }
+ struct snd_mask old_mask;
+ int changed;
for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
m = hw_param_mask(params, k);
if (snd_mask_empty(m))
return -EINVAL;
+
+ /* This parameter is not requested to change by a caller. */
if (!(params->rmask & (1 << k)))
continue;
-#ifdef RULES_DEBUG
- pr_debug("%s = ", snd_pcm_hw_param_names[k]);
- pr_cont("%04x%04x%04x%04x -> ", m->bits[3], m->bits[2], m->bits[1], m->bits[0]);
-#endif
+
+ if (trace_hw_mask_param_enabled())
+ old_mask = *m;
+
changed = snd_mask_refine(m, constrs_mask(constrs, k));
-#ifdef RULES_DEBUG
- pr_cont("%04x%04x%04x%04x\n", m->bits[3], m->bits[2], m->bits[1], m->bits[0]);
-#endif
- if (changed)
- params->cmask |= 1 << k;
if (changed < 0)
return changed;
+ if (changed == 0)
+ continue;
+
+ /* Set corresponding flag so that the caller gets it. */
+ trace_hw_mask_param(substream, k, 0, &old_mask, m);
+ params->cmask |= 1 << k;
}
+ return 0;
+}
+
+static int constrain_interval_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_pcm_hw_constraints *constrs =
+ &substream->runtime->hw_constraints;
+ struct snd_interval *i;
+ unsigned int k;
+ struct snd_interval old_interval;
+ int changed;
+
for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
i = hw_param_interval(params, k);
if (snd_interval_empty(i))
return -EINVAL;
+
+ /* This parameter is not requested to change by a caller. */
if (!(params->rmask & (1 << k)))
continue;
-#ifdef RULES_DEBUG
- pr_debug("%s = ", snd_pcm_hw_param_names[k]);
- if (i->empty)
- pr_cont("empty");
- else
- pr_cont("%c%u %u%c",
- i->openmin ? '(' : '[', i->min,
- i->max, i->openmax ? ')' : ']');
- pr_cont(" -> ");
-#endif
+
+ if (trace_hw_interval_param_enabled())
+ old_interval = *i;
+
changed = snd_interval_refine(i, constrs_interval(constrs, k));
-#ifdef RULES_DEBUG
- if (i->empty)
- pr_cont("empty\n");
- else
- pr_cont("%c%u %u%c\n",
- i->openmin ? '(' : '[', i->min,
- i->max, i->openmax ? ')' : ']');
-#endif
- if (changed)
- params->cmask |= 1 << k;
if (changed < 0)
return changed;
+ if (changed == 0)
+ continue;
+
+ /* Set corresponding flag so that the caller gets it. */
+ trace_hw_interval_param(substream, k, 0, &old_interval, i);
+ params->cmask |= 1 << k;
}
+ return 0;
+}
+
+static int constrain_params_by_rules(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_pcm_hw_constraints *constrs =
+ &substream->runtime->hw_constraints;
+ unsigned int k;
+ unsigned int rstamps[constrs->rules_num];
+ unsigned int vstamps[SNDRV_PCM_HW_PARAM_LAST_INTERVAL + 1];
+ unsigned int stamp;
+ struct snd_pcm_hw_rule *r;
+ unsigned int d;
+ struct snd_mask old_mask;
+ struct snd_interval old_interval;
+ bool again;
+ int changed;
+
+ /*
+ * Each application of rule has own sequence number.
+ *
+ * Each member of 'rstamps' array represents the sequence number of
+ * recent application of corresponding rule.
+ */
for (k = 0; k < constrs->rules_num; k++)
rstamps[k] = 0;
- for (k = 0; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
+
+ /*
+ * Each member of 'vstamps' array represents the sequence number of
+ * recent application of rule in which corresponding parameters were
+ * changed.
+ *
+ * In initial state, elements corresponding to parameters requested by
+ * a caller is 1. For unrequested parameters, corresponding members
+ * have 0 so that the parameters are never changed anymore.
+ */
+ for (k = 0; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
vstamps[k] = (params->rmask & (1 << k)) ? 1 : 0;
- do {
- again = 0;
- for (k = 0; k < constrs->rules_num; k++) {
- struct snd_pcm_hw_rule *r = &constrs->rules[k];
- unsigned int d;
- int doit = 0;
- if (r->cond && !(r->cond & params->flags))
- continue;
- for (d = 0; r->deps[d] >= 0; d++) {
- if (vstamps[r->deps[d]] > rstamps[k]) {
- doit = 1;
- break;
- }
- }
- if (!doit)
- continue;
-#ifdef RULES_DEBUG
- pr_debug("Rule %d [%p]: ", k, r->func);
- if (r->var >= 0) {
- pr_cont("%s = ", snd_pcm_hw_param_names[r->var]);
- if (hw_is_mask(r->var)) {
- m = hw_param_mask(params, r->var);
- pr_cont("%x", *m->bits);
- } else {
- i = hw_param_interval(params, r->var);
- if (i->empty)
- pr_cont("empty");
- else
- pr_cont("%c%u %u%c",
- i->openmin ? '(' : '[', i->min,
- i->max, i->openmax ? ')' : ']');
- }
- }
-#endif
- changed = r->func(params, r);
-#ifdef RULES_DEBUG
- if (r->var >= 0) {
- pr_cont(" -> ");
- if (hw_is_mask(r->var))
- pr_cont("%x", *m->bits);
- else {
- if (i->empty)
- pr_cont("empty");
- else
- pr_cont("%c%u %u%c",
- i->openmin ? '(' : '[', i->min,
- i->max, i->openmax ? ')' : ']');
- }
+
+ /* Due to the above design, actual sequence number starts at 2. */
+ stamp = 2;
+retry:
+ /* Apply all rules in order. */
+ again = false;
+ for (k = 0; k < constrs->rules_num; k++) {
+ r = &constrs->rules[k];
+
+ /*
+ * Check condition bits of this rule. When the rule has
+ * some condition bits, parameter without the bits is
+ * never processed. SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP
+ * is an example of the condition bits.
+ */
+ if (r->cond && !(r->cond & params->flags))
+ continue;
+
+ /*
+ * The 'deps' array includes maximum three dependencies
+ * to SNDRV_PCM_HW_PARAM_XXXs for this rule. The fourth
+ * member of this array is a sentinel and should be
+ * negative value.
+ *
+ * This rule should be processed in this time when dependent
+ * parameters were changed at former applications of the other
+ * rules.
+ */
+ for (d = 0; r->deps[d] >= 0; d++) {
+ if (vstamps[r->deps[d]] > rstamps[k])
+ break;
+ }
+ if (r->deps[d] < 0)
+ continue;
+
+ if (trace_hw_mask_param_enabled()) {
+ if (hw_is_mask(r->var))
+ old_mask = *hw_param_mask(params, r->var);
+ }
+ if (trace_hw_interval_param_enabled()) {
+ if (hw_is_interval(r->var))
+ old_interval = *hw_param_interval(params, r->var);
+ }
+
+ changed = r->func(params, r);
+ if (changed < 0)
+ return changed;
+
+ /*
+ * When the parameter is changed, notify it to the caller
+ * by corresponding returned bit, then preparing for next
+ * iteration.
+ */
+ if (changed && r->var >= 0) {
+ if (hw_is_mask(r->var)) {
+ trace_hw_mask_param(substream, r->var,
+ k + 1, &old_mask,
+ hw_param_mask(params, r->var));
}
- pr_cont("\n");
-#endif
- rstamps[k] = stamp;
- if (changed && r->var >= 0) {
- params->cmask |= (1 << r->var);
- vstamps[r->var] = stamp;
- again = 1;
+ if (hw_is_interval(r->var)) {
+ trace_hw_interval_param(substream, r->var,
+ k + 1, &old_interval,
+ hw_param_interval(params, r->var));
}
- if (changed < 0)
- return changed;
- stamp++;
+
+ params->cmask |= (1 << r->var);
+ vstamps[r->var] = stamp;
+ again = true;
}
- } while (again);
+
+ rstamps[k] = stamp++;
+ }
+
+ /* Iterate to evaluate all rules till no parameters are changed. */
+ if (again)
+ goto retry;
+
+ return 0;
+}
+
+static int fixup_unreferenced_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ const struct snd_interval *i;
+ const struct snd_mask *m;
+ int err;
+
if (!params->msbits) {
- i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
+ i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
if (snd_interval_single(i))
params->msbits = snd_interval_value(i);
}
if (!params->rate_den) {
- i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
if (snd_interval_single(i)) {
params->rate_num = snd_interval_value(i);
params->rate_den = 1;
}
}
- hw = &substream->runtime->hw;
+ if (!params->fifo_size) {
+ m = hw_param_mask_c(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ i = hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ if (snd_mask_single(m) && snd_interval_single(i)) {
+ err = substream->ops->ioctl(substream,
+ SNDRV_PCM_IOCTL1_FIFO_SIZE, params);
+ if (err < 0)
+ return err;
+ }
+ }
+
if (!params->info) {
- params->info = hw->info & ~(SNDRV_PCM_INFO_FIFO_IN_FRAMES |
- SNDRV_PCM_INFO_DRAIN_TRIGGER);
+ params->info = substream->runtime->hw.info;
+ params->info &= ~(SNDRV_PCM_INFO_FIFO_IN_FRAMES |
+ SNDRV_PCM_INFO_DRAIN_TRIGGER);
if (!hw_support_mmap(substream))
params->info &= ~(SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID);
}
- if (!params->fifo_size) {
- m = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
- i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
- if (snd_mask_min(m) == snd_mask_max(m) &&
- snd_interval_min(i) == snd_interval_max(i)) {
- changed = substream->ops->ioctl(substream,
- SNDRV_PCM_IOCTL1_FIFO_SIZE, params);
- if (changed < 0)
- return changed;
- }
+
+ return 0;
+}
+
+int snd_pcm_hw_refine(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ int err;
+
+ params->info = 0;
+ params->fifo_size = 0;
+ if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_SAMPLE_BITS))
+ params->msbits = 0;
+ if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_RATE)) {
+ params->rate_num = 0;
+ params->rate_den = 0;
}
+
+ err = constrain_mask_params(substream, params);
+ if (err < 0)
+ return err;
+
+ err = constrain_interval_params(substream, params);
+ if (err < 0)
+ return err;
+
+ err = constrain_params_by_rules(substream, params);
+ if (err < 0)
+ return err;
+
params->rmask = 0;
+
return 0;
}
-
EXPORT_SYMBOL(snd_pcm_hw_refine);
static int snd_pcm_hw_refine_user(struct snd_pcm_substream *substream,
@@ -467,11 +524,16 @@ static int snd_pcm_hw_refine_user(struct snd_pcm_substream *substream,
return PTR_ERR(params);
err = snd_pcm_hw_refine(substream, params);
- if (copy_to_user(_params, params, sizeof(*params))) {
- if (!err)
- err = -EFAULT;
- }
+ if (err < 0)
+ goto end;
+
+ err = fixup_unreferenced_params(substream, params);
+ if (err < 0)
+ goto end;
+ if (copy_to_user(_params, params, sizeof(*params)))
+ err = -EFAULT;
+end:
kfree(params);
return err;
}
@@ -509,6 +571,70 @@ static inline void snd_pcm_timer_notify(struct snd_pcm_substream *substream,
#endif
}
+/**
+ * snd_pcm_hw_param_choose - choose a configuration defined by @params
+ * @pcm: PCM instance
+ * @params: the hw_params instance
+ *
+ * Choose one configuration from configuration space defined by @params.
+ * The configuration chosen is that obtained fixing in this order:
+ * first access, first format, first subformat, min channels,
+ * min rate, min period time, max buffer size, min tick time
+ *
+ * Return: Zero if successful, or a negative error code on failure.
+ */
+static int snd_pcm_hw_params_choose(struct snd_pcm_substream *pcm,
+ struct snd_pcm_hw_params *params)
+{
+ static const int vars[] = {
+ SNDRV_PCM_HW_PARAM_ACCESS,
+ SNDRV_PCM_HW_PARAM_FORMAT,
+ SNDRV_PCM_HW_PARAM_SUBFORMAT,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ SNDRV_PCM_HW_PARAM_RATE,
+ SNDRV_PCM_HW_PARAM_PERIOD_TIME,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+ SNDRV_PCM_HW_PARAM_TICK_TIME,
+ -1
+ };
+ const int *v;
+ struct snd_mask old_mask;
+ struct snd_interval old_interval;
+ int changed;
+
+ for (v = vars; *v != -1; v++) {
+ /* Keep old parameter to trace. */
+ if (trace_hw_mask_param_enabled()) {
+ if (hw_is_mask(*v))
+ old_mask = *hw_param_mask(params, *v);
+ }
+ if (trace_hw_interval_param_enabled()) {
+ if (hw_is_interval(*v))
+ old_interval = *hw_param_interval(params, *v);
+ }
+ if (*v != SNDRV_PCM_HW_PARAM_BUFFER_SIZE)
+ changed = snd_pcm_hw_param_first(pcm, params, *v, NULL);
+ else
+ changed = snd_pcm_hw_param_last(pcm, params, *v, NULL);
+ if (snd_BUG_ON(changed < 0))
+ return changed;
+ if (changed == 0)
+ continue;
+
+ /* Trace the changed parameter. */
+ if (hw_is_mask(*v)) {
+ trace_hw_mask_param(pcm, *v, 0, &old_mask,
+ hw_param_mask(params, *v));
+ }
+ if (hw_is_interval(*v)) {
+ trace_hw_interval_param(pcm, *v, 0, &old_interval,
+ hw_param_interval(params, *v));
+ }
+ }
+
+ return 0;
+}
+
static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
@@ -546,6 +672,10 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
if (err < 0)
goto _error;
+ err = fixup_unreferenced_params(substream, params);
+ if (err < 0)
+ goto _error;
+
if (substream->ops->hw_params != NULL) {
err = substream->ops->hw_params(substream, params);
if (err < 0)
@@ -621,11 +751,12 @@ static int snd_pcm_hw_params_user(struct snd_pcm_substream *substream,
return PTR_ERR(params);
err = snd_pcm_hw_params(substream, params);
- if (copy_to_user(_params, params, sizeof(*params))) {
- if (!err)
- err = -EFAULT;
- }
+ if (err < 0)
+ goto end;
+ if (copy_to_user(_params, params, sizeof(*params)))
+ err = -EFAULT;
+end:
kfree(params);
return err;
}
@@ -1081,6 +1212,7 @@ static const struct action_ops snd_pcm_action_start = {
* @substream: the PCM substream instance
*
* Return: Zero if successful, or a negative error code.
+ * The stream lock must be acquired before calling this function.
*/
int snd_pcm_start(struct snd_pcm_substream *substream)
{
@@ -1088,6 +1220,13 @@ int snd_pcm_start(struct snd_pcm_substream *substream)
SNDRV_PCM_STATE_RUNNING);
}
+/* take the stream lock and start the streams */
+static int snd_pcm_start_lock_irq(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_action_lock_irq(&snd_pcm_action_start, substream,
+ SNDRV_PCM_STATE_RUNNING);
+}
+
/*
* stop callbacks
*/
@@ -1139,7 +1278,6 @@ int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t state)
{
return snd_pcm_action(&snd_pcm_action_stop, substream, state);
}
-
EXPORT_SYMBOL(snd_pcm_stop);
/**
@@ -1314,7 +1452,6 @@ int snd_pcm_suspend(struct snd_pcm_substream *substream)
snd_pcm_stream_unlock_irqrestore(substream, flags);
return err;
}
-
EXPORT_SYMBOL(snd_pcm_suspend);
/**
@@ -1346,7 +1483,6 @@ int snd_pcm_suspend_all(struct snd_pcm *pcm)
}
return 0;
}
-
EXPORT_SYMBOL(snd_pcm_suspend_all);
/* resume */
@@ -1397,14 +1533,7 @@ static const struct action_ops snd_pcm_action_resume = {
static int snd_pcm_resume(struct snd_pcm_substream *substream)
{
- struct snd_card *card = substream->pcm->card;
- int res;
-
- snd_power_lock(card);
- if ((res = snd_power_wait(card, SNDRV_CTL_POWER_D0)) >= 0)
- res = snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream, 0);
- snd_power_unlock(card);
- return res;
+ return snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream, 0);
}
#else
@@ -1423,17 +1552,9 @@ static int snd_pcm_resume(struct snd_pcm_substream *substream)
*/
static int snd_pcm_xrun(struct snd_pcm_substream *substream)
{
- struct snd_card *card = substream->pcm->card;
struct snd_pcm_runtime *runtime = substream->runtime;
int result;
- snd_power_lock(card);
- if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) {
- result = snd_power_wait(card, SNDRV_CTL_POWER_D0);
- if (result < 0)
- goto _unlock;
- }
-
snd_pcm_stream_lock_irq(substream);
switch (runtime->status->state) {
case SNDRV_PCM_STATE_XRUN:
@@ -1446,8 +1567,6 @@ static int snd_pcm_xrun(struct snd_pcm_substream *substream)
result = -EBADFD;
}
snd_pcm_stream_unlock_irq(substream);
- _unlock:
- snd_power_unlock(card);
return result;
}
@@ -1551,8 +1670,6 @@ static const struct action_ops snd_pcm_action_prepare = {
static int snd_pcm_prepare(struct snd_pcm_substream *substream,
struct file *file)
{
- int res;
- struct snd_card *card = substream->pcm->card;
int f_flags;
if (file)
@@ -1560,12 +1677,19 @@ static int snd_pcm_prepare(struct snd_pcm_substream *substream,
else
f_flags = substream->f_flags;
- snd_power_lock(card);
- if ((res = snd_power_wait(card, SNDRV_CTL_POWER_D0)) >= 0)
- res = snd_pcm_action_nonatomic(&snd_pcm_action_prepare,
- substream, f_flags);
- snd_power_unlock(card);
- return res;
+ snd_pcm_stream_lock_irq(substream);
+ switch (substream->runtime->status->state) {
+ case SNDRV_PCM_STATE_PAUSED:
+ snd_pcm_pause(substream, 0);
+ /* fallthru */
+ case SNDRV_PCM_STATE_SUSPENDED:
+ snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
+ break;
+ }
+ snd_pcm_stream_unlock_irq(substream);
+
+ return snd_pcm_action_nonatomic(&snd_pcm_action_prepare,
+ substream, f_flags);
}
/*
@@ -1662,15 +1786,6 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
return -EBADFD;
- snd_power_lock(card);
- if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) {
- result = snd_power_wait(card, SNDRV_CTL_POWER_D0);
- if (result < 0) {
- snd_power_unlock(card);
- return result;
- }
- }
-
if (file) {
if (file->f_flags & O_NONBLOCK)
nonblock = 1;
@@ -1753,7 +1868,6 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
unlock:
snd_pcm_stream_unlock_irq(substream);
up_read(&snd_pcm_link_rwsem);
- snd_power_unlock(card);
return result;
}
@@ -1773,8 +1887,7 @@ static int snd_pcm_drop(struct snd_pcm_substream *substream)
runtime = substream->runtime;
if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
- runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED ||
- runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
+ runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
return -EBADFD;
snd_pcm_stream_lock_irq(substream);
@@ -1940,7 +2053,8 @@ static int snd_pcm_hw_rule_format(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
unsigned int k;
- struct snd_interval *i = hw_param_interval(params, rule->deps[0]);
+ const struct snd_interval *i =
+ hw_param_interval_c(params, rule->deps[0]);
struct snd_mask m;
struct snd_mask *mask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
snd_mask_any(&m);
@@ -1986,8 +2100,10 @@ static int snd_pcm_hw_rule_sample_bits(struct snd_pcm_hw_params *params,
#error "Change this table"
#endif
-static unsigned int rates[] = { 5512, 8000, 11025, 16000, 22050, 32000, 44100,
- 48000, 64000, 88200, 96000, 176400, 192000 };
+static const unsigned int rates[] = {
+ 5512, 8000, 11025, 16000, 22050, 32000, 44100,
+ 48000, 64000, 88200, 96000, 176400, 192000
+};
const struct snd_pcm_hw_constraint_list snd_pcm_known_rates = {
.count = ARRAY_SIZE(rates),
@@ -2250,7 +2366,6 @@ void snd_pcm_release_substream(struct snd_pcm_substream *substream)
}
snd_pcm_detach_substream(substream);
}
-
EXPORT_SYMBOL(snd_pcm_release_substream);
int snd_pcm_open_substream(struct snd_pcm *pcm, int stream,
@@ -2292,7 +2407,6 @@ int snd_pcm_open_substream(struct snd_pcm *pcm, int stream,
snd_pcm_release_substream(substream);
return err;
}
-
EXPORT_SYMBOL(snd_pcm_open_substream);
static int snd_pcm_open_file(struct file *file,
@@ -2428,50 +2542,84 @@ static int snd_pcm_release(struct inode *inode, struct file *file)
return 0;
}
-static snd_pcm_sframes_t snd_pcm_playback_rewind(struct snd_pcm_substream *substream,
- snd_pcm_uframes_t frames)
+/* check and update PCM state; return 0 or a negative error
+ * call this inside PCM lock
+ */
+static int do_pcm_hwsync(struct snd_pcm_substream *substream)
{
- struct snd_pcm_runtime *runtime = substream->runtime;
- snd_pcm_sframes_t appl_ptr;
- snd_pcm_sframes_t ret;
- snd_pcm_sframes_t hw_avail;
-
- if (frames == 0)
- return 0;
-
- snd_pcm_stream_lock_irq(substream);
- switch (runtime->status->state) {
- case SNDRV_PCM_STATE_PREPARED:
- break;
+ switch (substream->runtime->status->state) {
case SNDRV_PCM_STATE_DRAINING:
- case SNDRV_PCM_STATE_RUNNING:
- if (snd_pcm_update_hw_ptr(substream) >= 0)
- break;
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ return -EBADFD;
/* Fall through */
- case SNDRV_PCM_STATE_XRUN:
- ret = -EPIPE;
- goto __end;
+ case SNDRV_PCM_STATE_RUNNING:
+ return snd_pcm_update_hw_ptr(substream);
+ case SNDRV_PCM_STATE_PREPARED:
+ case SNDRV_PCM_STATE_PAUSED:
+ return 0;
case SNDRV_PCM_STATE_SUSPENDED:
- ret = -ESTRPIPE;
- goto __end;
+ return -ESTRPIPE;
+ case SNDRV_PCM_STATE_XRUN:
+ return -EPIPE;
default:
- ret = -EBADFD;
- goto __end;
+ return -EBADFD;
}
+}
- hw_avail = snd_pcm_playback_hw_avail(runtime);
- if (hw_avail <= 0) {
- ret = 0;
- goto __end;
- }
- if (frames > (snd_pcm_uframes_t)hw_avail)
- frames = hw_avail;
+/* increase the appl_ptr; returns the processed frames or a negative error */
+static snd_pcm_sframes_t forward_appl_ptr(struct snd_pcm_substream *substream,
+ snd_pcm_uframes_t frames,
+ snd_pcm_sframes_t avail)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ snd_pcm_sframes_t appl_ptr;
+ int ret;
+
+ if (avail <= 0)
+ return 0;
+ if (frames > (snd_pcm_uframes_t)avail)
+ frames = avail;
+ appl_ptr = runtime->control->appl_ptr + frames;
+ if (appl_ptr >= (snd_pcm_sframes_t)runtime->boundary)
+ appl_ptr -= runtime->boundary;
+ ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
+ return ret < 0 ? ret : frames;
+}
+
+/* decrease the appl_ptr; returns the processed frames or a negative error */
+static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
+ snd_pcm_uframes_t frames,
+ snd_pcm_sframes_t avail)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ snd_pcm_sframes_t appl_ptr;
+ int ret;
+
+ if (avail <= 0)
+ return 0;
+ if (frames > (snd_pcm_uframes_t)avail)
+ frames = avail;
appl_ptr = runtime->control->appl_ptr - frames;
if (appl_ptr < 0)
appl_ptr += runtime->boundary;
- runtime->control->appl_ptr = appl_ptr;
- ret = frames;
- __end:
+ ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
+ return ret < 0 ? ret : frames;
+}
+
+static snd_pcm_sframes_t snd_pcm_playback_rewind(struct snd_pcm_substream *substream,
+ snd_pcm_uframes_t frames)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ snd_pcm_sframes_t ret;
+
+ if (frames == 0)
+ return 0;
+
+ snd_pcm_stream_lock_irq(substream);
+ ret = do_pcm_hwsync(substream);
+ if (!ret)
+ ret = rewind_appl_ptr(substream, frames,
+ snd_pcm_playback_hw_avail(runtime));
snd_pcm_stream_unlock_irq(substream);
return ret;
}
@@ -2480,46 +2628,16 @@ static snd_pcm_sframes_t snd_pcm_capture_rewind(struct snd_pcm_substream *substr
snd_pcm_uframes_t frames)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- snd_pcm_sframes_t appl_ptr;
snd_pcm_sframes_t ret;
- snd_pcm_sframes_t hw_avail;
if (frames == 0)
return 0;
snd_pcm_stream_lock_irq(substream);
- switch (runtime->status->state) {
- case SNDRV_PCM_STATE_PREPARED:
- case SNDRV_PCM_STATE_DRAINING:
- break;
- case SNDRV_PCM_STATE_RUNNING:
- if (snd_pcm_update_hw_ptr(substream) >= 0)
- break;
- /* Fall through */
- case SNDRV_PCM_STATE_XRUN:
- ret = -EPIPE;
- goto __end;
- case SNDRV_PCM_STATE_SUSPENDED:
- ret = -ESTRPIPE;
- goto __end;
- default:
- ret = -EBADFD;
- goto __end;
- }
-
- hw_avail = snd_pcm_capture_hw_avail(runtime);
- if (hw_avail <= 0) {
- ret = 0;
- goto __end;
- }
- if (frames > (snd_pcm_uframes_t)hw_avail)
- frames = hw_avail;
- appl_ptr = runtime->control->appl_ptr - frames;
- if (appl_ptr < 0)
- appl_ptr += runtime->boundary;
- runtime->control->appl_ptr = appl_ptr;
- ret = frames;
- __end:
+ ret = do_pcm_hwsync(substream);
+ if (!ret)
+ ret = rewind_appl_ptr(substream, frames,
+ snd_pcm_capture_hw_avail(runtime));
snd_pcm_stream_unlock_irq(substream);
return ret;
}
@@ -2528,47 +2646,16 @@ static snd_pcm_sframes_t snd_pcm_playback_forward(struct snd_pcm_substream *subs
snd_pcm_uframes_t frames)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- snd_pcm_sframes_t appl_ptr;
snd_pcm_sframes_t ret;
- snd_pcm_sframes_t avail;
if (frames == 0)
return 0;
snd_pcm_stream_lock_irq(substream);
- switch (runtime->status->state) {
- case SNDRV_PCM_STATE_PREPARED:
- case SNDRV_PCM_STATE_PAUSED:
- break;
- case SNDRV_PCM_STATE_DRAINING:
- case SNDRV_PCM_STATE_RUNNING:
- if (snd_pcm_update_hw_ptr(substream) >= 0)
- break;
- /* Fall through */
- case SNDRV_PCM_STATE_XRUN:
- ret = -EPIPE;
- goto __end;
- case SNDRV_PCM_STATE_SUSPENDED:
- ret = -ESTRPIPE;
- goto __end;
- default:
- ret = -EBADFD;
- goto __end;
- }
-
- avail = snd_pcm_playback_avail(runtime);
- if (avail <= 0) {
- ret = 0;
- goto __end;
- }
- if (frames > (snd_pcm_uframes_t)avail)
- frames = avail;
- appl_ptr = runtime->control->appl_ptr + frames;
- if (appl_ptr >= (snd_pcm_sframes_t)runtime->boundary)
- appl_ptr -= runtime->boundary;
- runtime->control->appl_ptr = appl_ptr;
- ret = frames;
- __end:
+ ret = do_pcm_hwsync(substream);
+ if (!ret)
+ ret = forward_appl_ptr(substream, frames,
+ snd_pcm_playback_avail(runtime));
snd_pcm_stream_unlock_irq(substream);
return ret;
}
@@ -2577,123 +2664,47 @@ static snd_pcm_sframes_t snd_pcm_capture_forward(struct snd_pcm_substream *subst
snd_pcm_uframes_t frames)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- snd_pcm_sframes_t appl_ptr;
snd_pcm_sframes_t ret;
- snd_pcm_sframes_t avail;
if (frames == 0)
return 0;
snd_pcm_stream_lock_irq(substream);
- switch (runtime->status->state) {
- case SNDRV_PCM_STATE_PREPARED:
- case SNDRV_PCM_STATE_DRAINING:
- case SNDRV_PCM_STATE_PAUSED:
- break;
- case SNDRV_PCM_STATE_RUNNING:
- if (snd_pcm_update_hw_ptr(substream) >= 0)
- break;
- /* Fall through */
- case SNDRV_PCM_STATE_XRUN:
- ret = -EPIPE;
- goto __end;
- case SNDRV_PCM_STATE_SUSPENDED:
- ret = -ESTRPIPE;
- goto __end;
- default:
- ret = -EBADFD;
- goto __end;
- }
-
- avail = snd_pcm_capture_avail(runtime);
- if (avail <= 0) {
- ret = 0;
- goto __end;
- }
- if (frames > (snd_pcm_uframes_t)avail)
- frames = avail;
- appl_ptr = runtime->control->appl_ptr + frames;
- if (appl_ptr >= (snd_pcm_sframes_t)runtime->boundary)
- appl_ptr -= runtime->boundary;
- runtime->control->appl_ptr = appl_ptr;
- ret = frames;
- __end:
+ ret = do_pcm_hwsync(substream);
+ if (!ret)
+ ret = forward_appl_ptr(substream, frames,
+ snd_pcm_capture_avail(runtime));
snd_pcm_stream_unlock_irq(substream);
return ret;
}
static int snd_pcm_hwsync(struct snd_pcm_substream *substream)
{
- struct snd_pcm_runtime *runtime = substream->runtime;
int err;
snd_pcm_stream_lock_irq(substream);
- switch (runtime->status->state) {
- case SNDRV_PCM_STATE_DRAINING:
- if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
- goto __badfd;
- /* Fall through */
- case SNDRV_PCM_STATE_RUNNING:
- if ((err = snd_pcm_update_hw_ptr(substream)) < 0)
- break;
- /* Fall through */
- case SNDRV_PCM_STATE_PREPARED:
- err = 0;
- break;
- case SNDRV_PCM_STATE_SUSPENDED:
- err = -ESTRPIPE;
- break;
- case SNDRV_PCM_STATE_XRUN:
- err = -EPIPE;
- break;
- default:
- __badfd:
- err = -EBADFD;
- break;
- }
+ err = do_pcm_hwsync(substream);
snd_pcm_stream_unlock_irq(substream);
return err;
}
-static int snd_pcm_delay(struct snd_pcm_substream *substream,
- snd_pcm_sframes_t __user *res)
+static snd_pcm_sframes_t snd_pcm_delay(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
snd_pcm_sframes_t n = 0;
snd_pcm_stream_lock_irq(substream);
- switch (runtime->status->state) {
- case SNDRV_PCM_STATE_DRAINING:
- if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
- goto __badfd;
- /* Fall through */
- case SNDRV_PCM_STATE_RUNNING:
- if ((err = snd_pcm_update_hw_ptr(substream)) < 0)
- break;
- /* Fall through */
- case SNDRV_PCM_STATE_PREPARED:
- case SNDRV_PCM_STATE_SUSPENDED:
- err = 0;
+ err = do_pcm_hwsync(substream);
+ if (!err) {
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
n = snd_pcm_playback_hw_avail(runtime);
else
n = snd_pcm_capture_avail(runtime);
n += runtime->delay;
- break;
- case SNDRV_PCM_STATE_XRUN:
- err = -EPIPE;
- break;
- default:
- __badfd:
- err = -EBADFD;
- break;
}
snd_pcm_stream_unlock_irq(substream);
- if (!err)
- if (put_user(n, res))
- err = -EFAULT;
- return err;
+ return err < 0 ? err : n;
}
static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
@@ -2718,10 +2729,16 @@ static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
return err;
}
snd_pcm_stream_lock_irq(substream);
- if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL))
- control->appl_ptr = sync_ptr.c.control.appl_ptr;
- else
+ if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL)) {
+ err = pcm_lib_apply_appl_ptr(substream,
+ sync_ptr.c.control.appl_ptr);
+ if (err < 0) {
+ snd_pcm_stream_unlock_irq(substream);
+ return err;
+ }
+ } else {
sync_ptr.c.control.appl_ptr = control->appl_ptr;
+ }
if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
control->avail_min = sync_ptr.c.control.avail_min;
else
@@ -2749,10 +2766,12 @@ static int snd_pcm_tstamp(struct snd_pcm_substream *substream, int __user *_arg)
return 0;
}
-static int snd_pcm_common_ioctl1(struct file *file,
+static int snd_pcm_common_ioctl(struct file *file,
struct snd_pcm_substream *substream,
unsigned int cmd, void __user *arg)
{
+ struct snd_pcm_file *pcm_file = file->private_data;
+
switch (cmd) {
case SNDRV_PCM_IOCTL_PVERSION:
return put_user(SNDRV_PCM_VERSION, (int __user *)arg) ? -EFAULT : 0;
@@ -2762,6 +2781,11 @@ static int snd_pcm_common_ioctl1(struct file *file,
return 0;
case SNDRV_PCM_IOCTL_TTSTAMP:
return snd_pcm_tstamp(substream, arg);
+ case SNDRV_PCM_IOCTL_USER_PVERSION:
+ if (get_user(pcm_file->user_pversion,
+ (unsigned int __user *)arg))
+ return -EFAULT;
+ return 0;
case SNDRV_PCM_IOCTL_HW_REFINE:
return snd_pcm_hw_refine_user(substream, arg);
case SNDRV_PCM_IOCTL_HW_PARAMS:
@@ -2781,7 +2805,7 @@ static int snd_pcm_common_ioctl1(struct file *file,
case SNDRV_PCM_IOCTL_RESET:
return snd_pcm_reset(substream);
case SNDRV_PCM_IOCTL_START:
- return snd_pcm_action_lock_irq(&snd_pcm_action_start, substream, SNDRV_PCM_STATE_RUNNING);
+ return snd_pcm_start_lock_irq(substream);
case SNDRV_PCM_IOCTL_LINK:
return snd_pcm_link(substream, (int)(unsigned long) arg);
case SNDRV_PCM_IOCTL_UNLINK:
@@ -2793,7 +2817,16 @@ static int snd_pcm_common_ioctl1(struct file *file,
case SNDRV_PCM_IOCTL_HWSYNC:
return snd_pcm_hwsync(substream);
case SNDRV_PCM_IOCTL_DELAY:
- return snd_pcm_delay(substream, arg);
+ {
+ snd_pcm_sframes_t delay = snd_pcm_delay(substream);
+ snd_pcm_sframes_t __user *res = arg;
+
+ if (delay < 0)
+ return delay;
+ if (put_user(delay, res))
+ return -EFAULT;
+ return 0;
+ }
case SNDRV_PCM_IOCTL_SYNC_PTR:
return snd_pcm_sync_ptr(substream, arg);
#ifdef CONFIG_SND_SUPPORT_OLD_API
@@ -2807,23 +2840,34 @@ static int snd_pcm_common_ioctl1(struct file *file,
case SNDRV_PCM_IOCTL_DROP:
return snd_pcm_drop(substream);
case SNDRV_PCM_IOCTL_PAUSE:
- {
- int res;
- snd_pcm_stream_lock_irq(substream);
- res = snd_pcm_pause(substream, (int)(unsigned long)arg);
- snd_pcm_stream_unlock_irq(substream);
- return res;
- }
+ return snd_pcm_action_lock_irq(&snd_pcm_action_pause,
+ substream,
+ (int)(unsigned long)arg);
}
pcm_dbg(substream->pcm, "unknown ioctl = 0x%x\n", cmd);
return -ENOTTY;
}
+static int snd_pcm_common_ioctl1(struct file *file,
+ struct snd_pcm_substream *substream,
+ unsigned int cmd, void __user *arg)
+{
+ struct snd_card *card = substream->pcm->card;
+ int res;
+
+ snd_power_lock(card);
+ res = snd_power_wait(card, SNDRV_CTL_POWER_D0);
+ if (res >= 0)
+ res = snd_pcm_common_ioctl(file, substream, cmd, arg);
+ snd_power_unlock(card);
+ return res;
+}
+
static int snd_pcm_playback_ioctl1(struct file *file,
struct snd_pcm_substream *substream,
unsigned int cmd, void __user *arg)
{
- if (snd_BUG_ON(!substream))
+ if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
if (snd_BUG_ON(substream->stream != SNDRV_PCM_STREAM_PLAYBACK))
return -EINVAL;
@@ -2903,7 +2947,7 @@ static int snd_pcm_capture_ioctl1(struct file *file,
struct snd_pcm_substream *substream,
unsigned int cmd, void __user *arg)
{
- if (snd_BUG_ON(!substream))
+ if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
if (snd_BUG_ON(substream->stream != SNDRV_PCM_STREAM_CAPTURE))
return -EINVAL;
@@ -3007,30 +3051,55 @@ static long snd_pcm_capture_ioctl(struct file *file, unsigned int cmd,
(void __user *)arg);
}
+/**
+ * snd_pcm_kernel_ioctl - Execute PCM ioctl in the kernel-space
+ * @substream: PCM substream
+ * @cmd: IOCTL cmd
+ * @arg: IOCTL argument
+ *
+ * The function is provided primarily for OSS layer and USB gadget drivers,
+ * and it allows only the limited set of ioctls (hw_params, sw_params,
+ * prepare, start, drain, drop, forward).
+ */
int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg)
{
- mm_segment_t fs;
- int result;
+ snd_pcm_uframes_t *frames = arg;
+ snd_pcm_sframes_t result;
- fs = snd_enter_user();
- switch (substream->stream) {
- case SNDRV_PCM_STREAM_PLAYBACK:
- result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
- (void __user *)arg);
- break;
- case SNDRV_PCM_STREAM_CAPTURE:
- result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
- (void __user *)arg);
- break;
+ switch (cmd) {
+ case SNDRV_PCM_IOCTL_FORWARD:
+ {
+ /* provided only for OSS; capture-only and no value returned */
+ if (substream->stream != SNDRV_PCM_STREAM_CAPTURE)
+ return -EINVAL;
+ result = snd_pcm_capture_forward(substream, *frames);
+ return result < 0 ? result : 0;
+ }
+ case SNDRV_PCM_IOCTL_HW_PARAMS:
+ return snd_pcm_hw_params(substream, arg);
+ case SNDRV_PCM_IOCTL_SW_PARAMS:
+ return snd_pcm_sw_params(substream, arg);
+ case SNDRV_PCM_IOCTL_PREPARE:
+ return snd_pcm_prepare(substream, NULL);
+ case SNDRV_PCM_IOCTL_START:
+ return snd_pcm_start_lock_irq(substream);
+ case SNDRV_PCM_IOCTL_DRAIN:
+ return snd_pcm_drain(substream, NULL);
+ case SNDRV_PCM_IOCTL_DROP:
+ return snd_pcm_drop(substream);
+ case SNDRV_PCM_IOCTL_DELAY:
+ {
+ result = snd_pcm_delay(substream);
+ if (result < 0)
+ return result;
+ *frames = result;
+ return 0;
+ }
default:
- result = -EINVAL;
- break;
+ return -EINVAL;
}
- snd_leave_user(fs);
- return result;
}
-
EXPORT_SYMBOL(snd_pcm_kernel_ioctl);
static ssize_t snd_pcm_read(struct file *file, char __user *buf, size_t count,
@@ -3314,10 +3383,41 @@ static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file
area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
return 0;
}
+
+static bool pcm_status_mmap_allowed(struct snd_pcm_file *pcm_file)
+{
+ if (pcm_file->no_compat_mmap)
+ return false;
+ /* See pcm_control_mmap_allowed() below.
+ * Since older alsa-lib requires both status and control mmaps to be
+ * coupled, we have to disable the status mmap for old alsa-lib, too.
+ */
+ if (pcm_file->user_pversion < SNDRV_PROTOCOL_VERSION(2, 0, 14) &&
+ (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR))
+ return false;
+ return true;
+}
+
+static bool pcm_control_mmap_allowed(struct snd_pcm_file *pcm_file)
+{
+ if (pcm_file->no_compat_mmap)
+ return false;
+ /* Disallow the control mmap when SYNC_APPLPTR flag is set;
+ * it enforces the user-space to fall back to snd_pcm_sync_ptr(),
+ * thus it effectively assures the manual update of appl_ptr.
+ */
+ if (pcm_file->substream->runtime->hw.info & SNDRV_PCM_INFO_SYNC_APPLPTR)
+ return false;
+ return true;
+}
+
#else /* ! coherent mmap */
/*
* don't support mmap for status and control records.
*/
+#define pcm_status_mmap_allowed(pcm_file) false
+#define pcm_control_mmap_allowed(pcm_file) false
+
static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
struct vm_area_struct *area)
{
@@ -3437,7 +3537,6 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
area->vm_page_prot = pgprot_noncached(area->vm_page_prot);
return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes);
}
-
EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem);
#endif /* SNDRV_PCM_INFO_MMAP */
@@ -3486,7 +3585,6 @@ int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file,
atomic_inc(&substream->mmap_count);
return err;
}
-
EXPORT_SYMBOL(snd_pcm_mmap_data);
static int snd_pcm_mmap(struct file *file, struct vm_area_struct *area)
@@ -3503,11 +3601,11 @@ static int snd_pcm_mmap(struct file *file, struct vm_area_struct *area)
offset = area->vm_pgoff << PAGE_SHIFT;
switch (offset) {
case SNDRV_PCM_MMAP_OFFSET_STATUS:
- if (pcm_file->no_compat_mmap)
+ if (!pcm_status_mmap_allowed(pcm_file))
return -ENXIO;
return snd_pcm_mmap_status(substream, file, area);
case SNDRV_PCM_MMAP_OFFSET_CONTROL:
- if (pcm_file->no_compat_mmap)
+ if (!pcm_control_mmap_allowed(pcm_file))
return -ENXIO;
return snd_pcm_mmap_control(substream, file, area);
default:
@@ -3603,12 +3701,17 @@ static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
}
snd_pcm_hw_convert_from_old_params(params, oparams);
err = snd_pcm_hw_refine(substream, params);
- snd_pcm_hw_convert_to_old_params(oparams, params);
- if (copy_to_user(_oparams, oparams, sizeof(*oparams))) {
- if (!err)
- err = -EFAULT;
- }
+ if (err < 0)
+ goto out_old;
+ err = fixup_unreferenced_params(substream, params);
+ if (err < 0)
+ goto out_old;
+
+ snd_pcm_hw_convert_to_old_params(oparams, params);
+ if (copy_to_user(_oparams, oparams, sizeof(*oparams)))
+ err = -EFAULT;
+out_old:
kfree(oparams);
out:
kfree(params);
@@ -3631,14 +3734,16 @@ static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
err = PTR_ERR(oparams);
goto out;
}
+
snd_pcm_hw_convert_from_old_params(params, oparams);
err = snd_pcm_hw_params(substream, params);
- snd_pcm_hw_convert_to_old_params(oparams, params);
- if (copy_to_user(_oparams, oparams, sizeof(*oparams))) {
- if (!err)
- err = -EFAULT;
- }
+ if (err < 0)
+ goto out_old;
+ snd_pcm_hw_convert_to_old_params(oparams, params);
+ if (copy_to_user(_oparams, oparams, sizeof(*oparams)))
+ err = -EFAULT;
+out_old:
kfree(oparams);
out:
kfree(params);
diff --git a/sound/core/pcm_param_trace.h b/sound/core/pcm_param_trace.h
new file mode 100644
index 000000000000..86c8d658a25c
--- /dev/null
+++ b/sound/core/pcm_param_trace.h
@@ -0,0 +1,142 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM snd_pcm
+
+#if !defined(_PCM_PARAMS_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _PCM_PARAMS_TRACE_H
+
+#include <linux/tracepoint.h>
+
+#define HW_PARAM_ENTRY(param) {SNDRV_PCM_HW_PARAM_##param, #param}
+#define hw_param_labels \
+ HW_PARAM_ENTRY(ACCESS), \
+ HW_PARAM_ENTRY(FORMAT), \
+ HW_PARAM_ENTRY(SUBFORMAT), \
+ HW_PARAM_ENTRY(SAMPLE_BITS), \
+ HW_PARAM_ENTRY(FRAME_BITS), \
+ HW_PARAM_ENTRY(CHANNELS), \
+ HW_PARAM_ENTRY(RATE), \
+ HW_PARAM_ENTRY(PERIOD_TIME), \
+ HW_PARAM_ENTRY(PERIOD_SIZE), \
+ HW_PARAM_ENTRY(PERIOD_BYTES), \
+ HW_PARAM_ENTRY(PERIODS), \
+ HW_PARAM_ENTRY(BUFFER_TIME), \
+ HW_PARAM_ENTRY(BUFFER_SIZE), \
+ HW_PARAM_ENTRY(BUFFER_BYTES), \
+ HW_PARAM_ENTRY(TICK_TIME)
+
+TRACE_EVENT(hw_mask_param,
+ TP_PROTO(struct snd_pcm_substream *substream, snd_pcm_hw_param_t type, int index, const struct snd_mask *prev, const struct snd_mask *curr),
+ TP_ARGS(substream, type, index, prev, curr),
+ TP_STRUCT__entry(
+ __field(int, card)
+ __field(int, device)
+ __field(int, subdevice)
+ __field(int, direction)
+ __field(snd_pcm_hw_param_t, type)
+ __field(int, index)
+ __field(int, total)
+ __array(__u32, prev_bits, 8)
+ __array(__u32, curr_bits, 8)
+ ),
+ TP_fast_assign(
+ __entry->card = substream->pcm->card->number;
+ __entry->device = substream->pcm->device;
+ __entry->subdevice = substream->number;
+ __entry->direction = substream->stream;
+ __entry->type = type;
+ __entry->index = index;
+ __entry->total = substream->runtime->hw_constraints.rules_num;
+ memcpy(__entry->prev_bits, prev->bits, sizeof(__u32) * 8);
+ memcpy(__entry->curr_bits, curr->bits, sizeof(__u32) * 8);
+ ),
+ TP_printk("pcmC%dD%d%s:%d %03d/%03d %s %08x%08x%08x%08x %08x%08x%08x%08x",
+ __entry->card,
+ __entry->device,
+ __entry->direction ? "c" : "p",
+ __entry->subdevice,
+ __entry->index,
+ __entry->total,
+ __print_symbolic(__entry->type, hw_param_labels),
+ __entry->prev_bits[3], __entry->prev_bits[2],
+ __entry->prev_bits[1], __entry->prev_bits[0],
+ __entry->curr_bits[3], __entry->curr_bits[2],
+ __entry->curr_bits[1], __entry->curr_bits[0]
+ )
+);
+
+TRACE_EVENT(hw_interval_param,
+ TP_PROTO(struct snd_pcm_substream *substream, snd_pcm_hw_param_t type, int index, const struct snd_interval *prev, const struct snd_interval *curr),
+ TP_ARGS(substream, type, index, prev, curr),
+ TP_STRUCT__entry(
+ __field(int, card)
+ __field(int, device)
+ __field(int, subdevice)
+ __field(int, direction)
+ __field(snd_pcm_hw_param_t, type)
+ __field(int, index)
+ __field(int, total)
+ __field(unsigned int, prev_min)
+ __field(unsigned int, prev_max)
+ __field(unsigned int, prev_openmin)
+ __field(unsigned int, prev_openmax)
+ __field(unsigned int, prev_integer)
+ __field(unsigned int, prev_empty)
+ __field(unsigned int, curr_min)
+ __field(unsigned int, curr_max)
+ __field(unsigned int, curr_openmin)
+ __field(unsigned int, curr_openmax)
+ __field(unsigned int, curr_integer)
+ __field(unsigned int, curr_empty)
+ ),
+ TP_fast_assign(
+ __entry->card = substream->pcm->card->number;
+ __entry->device = substream->pcm->device;
+ __entry->subdevice = substream->number;
+ __entry->direction = substream->stream;
+ __entry->type = type;
+ __entry->index = index;
+ __entry->total = substream->runtime->hw_constraints.rules_num;
+ __entry->prev_min = prev->min;
+ __entry->prev_max = prev->max;
+ __entry->prev_openmin = prev->openmin;
+ __entry->prev_openmax = prev->openmax;
+ __entry->prev_integer = prev->integer;
+ __entry->prev_empty = prev->empty;
+ __entry->curr_min = curr->min;
+ __entry->curr_max = curr->max;
+ __entry->curr_openmin = curr->openmin;
+ __entry->curr_openmax = curr->openmax;
+ __entry->curr_integer = curr->integer;
+ __entry->curr_empty = curr->empty;
+ ),
+ TP_printk("pcmC%dD%d%s:%d %03d/%03d %s %d %d %s%u %u%s %d %d %s%u %u%s",
+ __entry->card,
+ __entry->device,
+ __entry->direction ? "c" : "p",
+ __entry->subdevice,
+ __entry->index,
+ __entry->total,
+ __print_symbolic(__entry->type, hw_param_labels),
+ __entry->prev_empty,
+ __entry->prev_integer,
+ __entry->prev_openmin ? "(" : "[",
+ __entry->prev_min,
+ __entry->prev_max,
+ __entry->prev_openmax ? ")" : "]",
+ __entry->curr_empty,
+ __entry->curr_integer,
+ __entry->curr_openmin ? "(" : "[",
+ __entry->curr_min,
+ __entry->curr_max,
+ __entry->curr_openmax ? ")" : "]"
+ )
+);
+
+#endif /* _PCM_PARAMS_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE pcm_param_trace
+#include <trace/define_trace.h>
diff --git a/sound/core/pcm_timer.c b/sound/core/pcm_timer.c
index 20ecd8f18080..11389f13de73 100644
--- a/sound/core/pcm_timer.c
+++ b/sound/core/pcm_timer.c
@@ -25,6 +25,8 @@
#include <sound/pcm.h>
#include <sound/timer.h>
+#include "pcm_local.h"
+
/*
* Timer functions
*/
@@ -33,8 +35,8 @@ void snd_pcm_timer_resolution_change(struct snd_pcm_substream *substream)
{
unsigned long rate, mult, fsize, l, post;
struct snd_pcm_runtime *runtime = substream->runtime;
-
- mult = 1000000000;
+
+ mult = 1000000000;
rate = runtime->rate;
if (snd_BUG_ON(!rate))
return;
@@ -65,7 +67,7 @@ void snd_pcm_timer_resolution_change(struct snd_pcm_substream *substream)
static unsigned long snd_pcm_timer_resolution(struct snd_timer * timer)
{
struct snd_pcm_substream *substream;
-
+
substream = timer->private_data;
return substream->runtime ? substream->runtime->timer_resolution : 0;
}
@@ -73,7 +75,7 @@ static unsigned long snd_pcm_timer_resolution(struct snd_timer * timer)
static int snd_pcm_timer_start(struct snd_timer * timer)
{
struct snd_pcm_substream *substream;
-
+
substream = snd_timer_chip(timer);
substream->timer_running = 1;
return 0;
@@ -82,7 +84,7 @@ static int snd_pcm_timer_start(struct snd_timer * timer)
static int snd_pcm_timer_stop(struct snd_timer * timer)
{
struct snd_pcm_substream *substream;
-
+
substream = snd_timer_chip(timer);
substream->timer_running = 0;
return 0;
@@ -112,7 +114,7 @@ void snd_pcm_timer_init(struct snd_pcm_substream *substream)
{
struct snd_timer_id tid;
struct snd_timer *timer;
-
+
tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE;
tid.dev_class = SNDRV_TIMER_CLASS_PCM;
tid.card = substream->pcm->card->number;
diff --git a/sound/core/pcm_trace.h b/sound/core/pcm_trace.h
index b63b654da5ff..3ddec1b8ae46 100644
--- a/sound/core/pcm_trace.h
+++ b/sound/core/pcm_trace.h
@@ -34,9 +34,9 @@ TRACE_EVENT(hwptr,
__entry->old_hw_ptr = (substream)->runtime->status->hw_ptr;
__entry->hw_ptr_base = (substream)->runtime->hw_ptr_base;
),
- TP_printk("pcmC%dD%d%c/sub%d: %s: pos=%lu, old=%lu, base=%lu, period=%lu, buf=%lu",
+ TP_printk("pcmC%dD%d%s/sub%d: %s: pos=%lu, old=%lu, base=%lu, period=%lu, buf=%lu",
__entry->card, __entry->device,
- __entry->stream == SNDRV_PCM_STREAM_PLAYBACK ? 'p' : 'c',
+ __entry->stream == SNDRV_PCM_STREAM_PLAYBACK ? "p" : "c",
__entry->number,
__entry->in_interrupt ? "IRQ" : "POS",
(unsigned long)__entry->pos,
@@ -69,9 +69,9 @@ TRACE_EVENT(xrun,
__entry->old_hw_ptr = (substream)->runtime->status->hw_ptr;
__entry->hw_ptr_base = (substream)->runtime->hw_ptr_base;
),
- TP_printk("pcmC%dD%d%c/sub%d: XRUN: old=%lu, base=%lu, period=%lu, buf=%lu",
+ TP_printk("pcmC%dD%d%s/sub%d: XRUN: old=%lu, base=%lu, period=%lu, buf=%lu",
__entry->card, __entry->device,
- __entry->stream == SNDRV_PCM_STREAM_PLAYBACK ? 'p' : 'c',
+ __entry->stream == SNDRV_PCM_STREAM_PLAYBACK ? "p" : "c",
__entry->number,
(unsigned long)__entry->old_hw_ptr,
(unsigned long)__entry->hw_ptr_base,
@@ -96,12 +96,50 @@ TRACE_EVENT(hw_ptr_error,
__entry->stream = (substream)->stream;
__entry->reason = (why);
),
- TP_printk("pcmC%dD%d%c/sub%d: ERROR: %s",
+ TP_printk("pcmC%dD%d%s/sub%d: ERROR: %s",
__entry->card, __entry->device,
- __entry->stream == SNDRV_PCM_STREAM_PLAYBACK ? 'p' : 'c',
+ __entry->stream == SNDRV_PCM_STREAM_PLAYBACK ? "p" : "c",
__entry->number, __entry->reason)
);
+TRACE_EVENT(applptr,
+ TP_PROTO(struct snd_pcm_substream *substream, snd_pcm_uframes_t prev, snd_pcm_uframes_t curr),
+ TP_ARGS(substream, prev, curr),
+ TP_STRUCT__entry(
+ __field( unsigned int, card )
+ __field( unsigned int, device )
+ __field( unsigned int, number )
+ __field( unsigned int, stream )
+ __field( snd_pcm_uframes_t, prev )
+ __field( snd_pcm_uframes_t, curr )
+ __field( snd_pcm_uframes_t, avail )
+ __field( snd_pcm_uframes_t, period_size )
+ __field( snd_pcm_uframes_t, buffer_size )
+ ),
+ TP_fast_assign(
+ __entry->card = (substream)->pcm->card->number;
+ __entry->device = (substream)->pcm->device;
+ __entry->number = (substream)->number;
+ __entry->stream = (substream)->stream;
+ __entry->prev = (prev);
+ __entry->curr = (curr);
+ __entry->avail = (substream)->stream ? snd_pcm_capture_avail(substream->runtime) : snd_pcm_playback_avail(substream->runtime);
+ __entry->period_size = (substream)->runtime->period_size;
+ __entry->buffer_size = (substream)->runtime->buffer_size;
+ ),
+ TP_printk("pcmC%dD%d%s/sub%d: prev=%lu, curr=%lu, avail=%lu, period=%lu, buf=%lu",
+ __entry->card,
+ __entry->device,
+ __entry->stream ? "c" : "p",
+ __entry->number,
+ __entry->prev,
+ __entry->curr,
+ __entry->avail,
+ __entry->period_size,
+ __entry->buffer_size
+ )
+);
+
#endif /* _PCM_TRACE_H */
/* This part must be outside protection */
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 32588ad05653..b3b353d72527 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -1610,7 +1610,7 @@ static int snd_rawmidi_dev_free(struct snd_device *device)
return snd_rawmidi_free(rmidi);
}
-#if IS_REACHABLE(CONFIG_SND_SEQUENCER)
+#if IS_ENABLED(CONFIG_SND_SEQUENCER)
static void snd_rawmidi_dev_seq_free(struct snd_seq_device *device)
{
struct snd_rawmidi *rmidi = device->private_data;
@@ -1691,7 +1691,7 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
}
}
rmidi->proc_entry = entry;
-#if IS_REACHABLE(CONFIG_SND_SEQUENCER)
+#if IS_ENABLED(CONFIG_SND_SEQUENCER)
if (!rmidi->ops || !rmidi->ops->dev_register) { /* own registration mechanism */
if (snd_seq_device_new(rmidi->card, rmidi->device, SNDRV_SEQ_DEV_ID_MIDISYNTH, 0, &rmidi->seq_dev) >= 0) {
rmidi->seq_dev->private_data = rmidi;
diff --git a/sound/core/seq/Kconfig b/sound/core/seq/Kconfig
index b851fd890a89..a536760a94c2 100644
--- a/sound/core/seq/Kconfig
+++ b/sound/core/seq/Kconfig
@@ -1,16 +1,62 @@
-# define SND_XXX_SEQ to min(SND_SEQUENCER,SND_XXX)
+config SND_SEQUENCER
+ tristate "Sequencer support"
+ select SND_TIMER
+ select SND_SEQ_DEVICE
+ help
+ Say Y or M to enable MIDI sequencer and router support. This
+ feature allows routing and enqueueing of MIDI events. Events
+ can be processed at a given time.
-config SND_RAWMIDI_SEQ
- def_tristate SND_SEQUENCER && SND_RAWMIDI
+ Many programs require this feature, so you should enable it
+ unless you know what you're doing.
-config SND_OPL3_LIB_SEQ
- def_tristate SND_SEQUENCER && SND_OPL3_LIB
+if SND_SEQUENCER
-config SND_OPL4_LIB_SEQ
- def_tristate SND_SEQUENCER && SND_OPL4_LIB
+config SND_SEQ_DUMMY
+ tristate "Sequencer dummy client"
+ help
+ Say Y here to enable the dummy sequencer client. This client
+ is a simple MIDI-through client: all normal input events are
+ redirected to the output port immediately.
-config SND_SBAWE_SEQ
- def_tristate SND_SEQUENCER && SND_SBAWE
+ You don't need this unless you want to connect many MIDI
+ devices or applications together.
-config SND_EMU10K1_SEQ
- def_tristate SND_SEQUENCER && SND_EMU10K1
+ To compile this driver as a module, choose M here: the module
+ will be called snd-seq-dummy.
+
+config SND_SEQUENCER_OSS
+ tristate "OSS Sequencer API"
+ depends on SND_OSSEMUL
+ select SND_SEQ_MIDI_EVENT
+ help
+ Say Y here to enable OSS sequencer emulation (both
+ /dev/sequencer and /dev/music interfaces).
+
+ Many programs still use the OSS API, so say Y.
+
+ To compile this driver as a module, choose M here: the module
+ will be called snd-seq-oss.
+
+config SND_SEQ_HRTIMER_DEFAULT
+ bool "Use HR-timer as default sequencer timer"
+ depends on SND_HRTIMER
+ default y
+ help
+ Say Y here to use the HR-timer backend as the default sequencer
+ timer.
+
+config SND_SEQ_MIDI_EVENT
+ def_tristate SND_RAWMIDI
+
+config SND_SEQ_MIDI
+ tristate
+ select SND_SEQ_MIDI_EVENT
+
+config SND_SEQ_MIDI_EMUL
+ tristate
+
+config SND_SEQ_VIRMIDI
+ tristate
+
+endif # SND_SEQUENCER
diff --git a/sound/core/seq/Makefile b/sound/core/seq/Makefile
index b65fa5a1943b..68fd367ac39c 100644
--- a/sound/core/seq/Makefile
+++ b/sound/core/seq/Makefile
@@ -3,7 +3,6 @@
# Copyright (c) 1999 by Jaroslav Kysela <[email protected]>
#
-snd-seq-device-objs := seq_device.o
snd-seq-objs := seq.o seq_lock.o seq_clientmgr.o seq_memory.o seq_queue.o \
seq_fifo.o seq_prioq.o seq_timer.o \
seq_system.o seq_ports.o
@@ -14,17 +13,11 @@ snd-seq-midi-event-objs := seq_midi_event.o
snd-seq-dummy-objs := seq_dummy.o
snd-seq-virmidi-objs := seq_virmidi.o
-obj-$(CONFIG_SND_SEQUENCER) += snd-seq.o snd-seq-device.o
-ifeq ($(CONFIG_SND_SEQUENCER_OSS),y)
- obj-$(CONFIG_SND_SEQUENCER) += snd-seq-midi-event.o
- obj-$(CONFIG_SND_SEQUENCER) += oss/
-endif
-obj-$(CONFIG_SND_SEQ_DUMMY) += snd-seq-dummy.o
+obj-$(CONFIG_SND_SEQUENCER) += snd-seq.o
+obj-$(CONFIG_SND_SEQUENCER_OSS) += oss/
-# Toplevel Module Dependency
-obj-$(CONFIG_SND_VIRMIDI) += snd-seq-virmidi.o snd-seq-midi-event.o
-obj-$(CONFIG_SND_RAWMIDI_SEQ) += snd-seq-midi.o snd-seq-midi-event.o
-obj-$(CONFIG_SND_OPL3_LIB_SEQ) += snd-seq-midi-event.o snd-seq-midi-emul.o
-obj-$(CONFIG_SND_OPL4_LIB_SEQ) += snd-seq-midi-event.o snd-seq-midi-emul.o
-obj-$(CONFIG_SND_SBAWE_SEQ) += snd-seq-midi-emul.o snd-seq-virmidi.o
-obj-$(CONFIG_SND_EMU10K1_SEQ) += snd-seq-midi-emul.o snd-seq-virmidi.o
+obj-$(CONFIG_SND_SEQ_DUMMY) += snd-seq-dummy.o
+obj-$(CONFIG_SND_SEQ_MIDI) += snd-seq-midi.o
+obj-$(CONFIG_SND_SEQ_MIDI_EMUL) += snd-seq-midi-emul.o
+obj-$(CONFIG_SND_SEQ_MIDI_EVENT) += snd-seq-midi-event.o
+obj-$(CONFIG_SND_SEQ_VIRMIDI) += snd-seq-virmidi.o
diff --git a/sound/core/seq/oss/Makefile b/sound/core/seq/oss/Makefile
index b38406b8463c..4ea4e3eea6b7 100644
--- a/sound/core/seq/oss/Makefile
+++ b/sound/core/seq/oss/Makefile
@@ -7,4 +7,4 @@ snd-seq-oss-objs := seq_oss.o seq_oss_init.o seq_oss_timer.o seq_oss_ioctl.o \
seq_oss_event.o seq_oss_rw.o seq_oss_synth.o \
seq_oss_midi.o seq_oss_readq.o seq_oss_writeq.o
-obj-$(CONFIG_SND_SEQUENCER) += snd-seq-oss.o
+obj-$(CONFIG_SND_SEQUENCER_OSS) += snd-seq-oss.o
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index f3b1d7f50b81..272c55fe17c8 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1668,7 +1668,6 @@ int snd_seq_set_queue_tempo(int client, struct snd_seq_queue_tempo *tempo)
return -EPERM;
return snd_seq_queue_timer_set_tempo(tempo->queue, client, tempo);
}
-
EXPORT_SYMBOL(snd_seq_set_queue_tempo);
static int snd_seq_ioctl_set_queue_tempo(struct snd_seq_client *client,
@@ -2200,7 +2199,6 @@ int snd_seq_create_kernel_client(struct snd_card *card, int client_index,
/* return client number to caller */
return client->number;
}
-
EXPORT_SYMBOL(snd_seq_create_kernel_client);
/* exported to kernel modules */
@@ -2219,7 +2217,6 @@ int snd_seq_delete_kernel_client(int client)
kfree(ptr);
return 0;
}
-
EXPORT_SYMBOL(snd_seq_delete_kernel_client);
/* skeleton to enqueue event, called from snd_seq_kernel_client_enqueue
@@ -2269,7 +2266,6 @@ int snd_seq_kernel_client_enqueue(int client, struct snd_seq_event * ev,
{
return kernel_client_enqueue(client, ev, NULL, 0, atomic, hop);
}
-
EXPORT_SYMBOL(snd_seq_kernel_client_enqueue);
/*
@@ -2283,7 +2279,6 @@ int snd_seq_kernel_client_enqueue_blocking(int client, struct snd_seq_event * ev
{
return kernel_client_enqueue(client, ev, file, 1, atomic, hop);
}
-
EXPORT_SYMBOL(snd_seq_kernel_client_enqueue_blocking);
/*
@@ -2321,7 +2316,6 @@ int snd_seq_kernel_client_dispatch(int client, struct snd_seq_event * ev,
snd_seq_client_unlock(cptr);
return result;
}
-
EXPORT_SYMBOL(snd_seq_kernel_client_dispatch);
/**
@@ -2354,7 +2348,6 @@ int snd_seq_kernel_client_ctl(int clientid, unsigned int cmd, void *arg)
cmd, _IOC_TYPE(cmd), _IOC_NR(cmd));
return -ENOTTY;
}
-
EXPORT_SYMBOL(snd_seq_kernel_client_ctl);
/* exported (for OSS emulator) */
@@ -2372,7 +2365,6 @@ int snd_seq_kernel_client_write_poll(int clientid, struct file *file, poll_table
return 1;
return 0;
}
-
EXPORT_SYMBOL(snd_seq_kernel_client_write_poll);
/*---------------------------------------------------------------------------*/
diff --git a/sound/core/seq/seq_lock.c b/sound/core/seq/seq_lock.c
index 12ba83367b1b..0ff7926a5a69 100644
--- a/sound/core/seq/seq_lock.c
+++ b/sound/core/seq/seq_lock.c
@@ -40,7 +40,6 @@ void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
schedule_timeout_uninterruptible(1);
}
}
-
EXPORT_SYMBOL(snd_use_lock_sync_helper);
#endif
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index d6e9aacdc36b..f763682584a8 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -118,7 +118,6 @@ int snd_seq_dump_var_event(const struct snd_seq_event *event,
}
return 0;
}
-
EXPORT_SYMBOL(snd_seq_dump_var_event);
@@ -169,7 +168,6 @@ int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char
&buf);
return err < 0 ? err : newlen;
}
-
EXPORT_SYMBOL(snd_seq_expand_var_event);
/*
diff --git a/sound/core/seq/seq_midi_emul.c b/sound/core/seq/seq_midi_emul.c
index 7ba937399ac7..9e2912e3e80f 100644
--- a/sound/core/seq/seq_midi_emul.c
+++ b/sound/core/seq/seq_midi_emul.c
@@ -236,6 +236,7 @@ snd_midi_process_event(struct snd_midi_op *ops,
break;
}
}
+EXPORT_SYMBOL(snd_midi_process_event);
/*
@@ -409,6 +410,7 @@ snd_midi_channel_set_clear(struct snd_midi_channel_set *chset)
chan->drum_channel = 0;
}
}
+EXPORT_SYMBOL(snd_midi_channel_set_clear);
/*
* Process a rpn message.
@@ -701,6 +703,7 @@ struct snd_midi_channel_set *snd_midi_channel_alloc_set(int n)
}
return chset;
}
+EXPORT_SYMBOL(snd_midi_channel_alloc_set);
/*
* Reset the midi controllers on a particular channel to default values.
@@ -724,6 +727,7 @@ void snd_midi_channel_free_set(struct snd_midi_channel_set *chset)
kfree(chset->channels);
kfree(chset);
}
+EXPORT_SYMBOL(snd_midi_channel_free_set);
static int __init alsa_seq_midi_emul_init(void)
{
@@ -736,8 +740,3 @@ static void __exit alsa_seq_midi_emul_exit(void)
module_init(alsa_seq_midi_emul_init)
module_exit(alsa_seq_midi_emul_exit)
-
-EXPORT_SYMBOL(snd_midi_process_event);
-EXPORT_SYMBOL(snd_midi_channel_set_clear);
-EXPORT_SYMBOL(snd_midi_channel_alloc_set);
-EXPORT_SYMBOL(snd_midi_channel_free_set);
diff --git a/sound/core/seq/seq_midi_event.c b/sound/core/seq/seq_midi_event.c
index 37db7ba492a6..90bbbdbeba03 100644
--- a/sound/core/seq/seq_midi_event.c
+++ b/sound/core/seq/seq_midi_event.c
@@ -134,6 +134,7 @@ int snd_midi_event_new(int bufsize, struct snd_midi_event **rdev)
*rdev = dev;
return 0;
}
+EXPORT_SYMBOL(snd_midi_event_new);
void snd_midi_event_free(struct snd_midi_event *dev)
{
@@ -142,6 +143,7 @@ void snd_midi_event_free(struct snd_midi_event *dev)
kfree(dev);
}
}
+EXPORT_SYMBOL(snd_midi_event_free);
/*
* initialize record
@@ -161,6 +163,7 @@ void snd_midi_event_reset_encode(struct snd_midi_event *dev)
reset_encode(dev);
spin_unlock_irqrestore(&dev->lock, flags);
}
+EXPORT_SYMBOL(snd_midi_event_reset_encode);
void snd_midi_event_reset_decode(struct snd_midi_event *dev)
{
@@ -170,6 +173,7 @@ void snd_midi_event_reset_decode(struct snd_midi_event *dev)
dev->lastcmd = 0xff;
spin_unlock_irqrestore(&dev->lock, flags);
}
+EXPORT_SYMBOL(snd_midi_event_reset_decode);
#if 0
void snd_midi_event_init(struct snd_midi_event *dev)
@@ -183,6 +187,7 @@ void snd_midi_event_no_status(struct snd_midi_event *dev, int on)
{
dev->nostat = on ? 1 : 0;
}
+EXPORT_SYMBOL(snd_midi_event_no_status);
/*
* resize buffer
@@ -232,6 +237,7 @@ long snd_midi_event_encode(struct snd_midi_event *dev, unsigned char *buf, long
return result;
}
+EXPORT_SYMBOL(snd_midi_event_encode);
/*
* read one byte and encode to sequencer event:
@@ -307,6 +313,7 @@ int snd_midi_event_encode_byte(struct snd_midi_event *dev, int c,
spin_unlock_irqrestore(&dev->lock, flags);
return rc;
}
+EXPORT_SYMBOL(snd_midi_event_encode_byte);
/* encode note event */
static void note_event(struct snd_midi_event *dev, struct snd_seq_event *ev)
@@ -408,6 +415,7 @@ long snd_midi_event_decode(struct snd_midi_event *dev, unsigned char *buf, long
return qlen;
}
}
+EXPORT_SYMBOL(snd_midi_event_decode);
/* decode note event */
@@ -524,19 +532,6 @@ static int extra_decode_xrpn(struct snd_midi_event *dev, unsigned char *buf,
return idx;
}
-/*
- * exports
- */
-
-EXPORT_SYMBOL(snd_midi_event_new);
-EXPORT_SYMBOL(snd_midi_event_free);
-EXPORT_SYMBOL(snd_midi_event_reset_encode);
-EXPORT_SYMBOL(snd_midi_event_reset_decode);
-EXPORT_SYMBOL(snd_midi_event_no_status);
-EXPORT_SYMBOL(snd_midi_event_encode);
-EXPORT_SYMBOL(snd_midi_event_encode_byte);
-EXPORT_SYMBOL(snd_midi_event_decode);
-
static int __init alsa_seq_midi_event_init(void)
{
return 0;
diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
index fe686ee41c6d..0a7020c82bfc 100644
--- a/sound/core/seq/seq_ports.c
+++ b/sound/core/seq/seq_ports.c
@@ -685,7 +685,6 @@ int snd_seq_event_port_attach(int client,
return ret;
}
-
EXPORT_SYMBOL(snd_seq_event_port_attach);
/*
@@ -706,5 +705,4 @@ int snd_seq_event_port_detach(int client, int port)
return err;
}
-
EXPORT_SYMBOL(snd_seq_event_port_detach);
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
index 52f31f1498f9..8d93a4021c78 100644
--- a/sound/core/seq/seq_virmidi.c
+++ b/sound/core/seq/seq_virmidi.c
@@ -534,6 +534,7 @@ int snd_virmidi_new(struct snd_card *card, int device, struct snd_rawmidi **rrmi
*rrmidi = rmidi;
return 0;
}
+EXPORT_SYMBOL(snd_virmidi_new);
/*
* ENTRY functions
@@ -550,5 +551,3 @@ static void __exit alsa_virmidi_exit(void)
module_init(alsa_virmidi_init)
module_exit(alsa_virmidi_exit)
-
-EXPORT_SYMBOL(snd_virmidi_new);
diff --git a/sound/core/seq/seq_device.c b/sound/core/seq_device.c
index c4acf17e9f5e..c4acf17e9f5e 100644
--- a/sound/core/seq/seq_device.c
+++ b/sound/core/seq_device.c
diff --git a/sound/core/sound.c b/sound/core/sound.c
index 175f9e4e01c8..b30f027eb0fe 100644
--- a/sound/core/sound.c
+++ b/sound/core/sound.c
@@ -74,7 +74,6 @@ void snd_request_card(int card)
return;
request_module("snd-card-%i", card);
}
-
EXPORT_SYMBOL(snd_request_card);
static void snd_request_other(int minor)
@@ -124,7 +123,6 @@ void *snd_lookup_minor_data(unsigned int minor, int type)
mutex_unlock(&sound_mutex);
return private_data;
}
-
EXPORT_SYMBOL(snd_lookup_minor_data);
#ifdef CONFIG_MODULES
diff --git a/sound/core/sound_oss.c b/sound/core/sound_oss.c
index 0ca9d72b2273..0a5c66229a22 100644
--- a/sound/core/sound_oss.c
+++ b/sound/core/sound_oss.c
@@ -55,7 +55,6 @@ void *snd_lookup_oss_minor_data(unsigned int minor, int type)
mutex_unlock(&sound_oss_mutex);
return private_data;
}
-
EXPORT_SYMBOL(snd_lookup_oss_minor_data);
static int snd_oss_kernel_minor(int type, struct snd_card *card, int dev)
@@ -159,7 +158,6 @@ int snd_register_oss_device(int type, struct snd_card *card, int dev,
kfree(preg);
return -EBUSY;
}
-
EXPORT_SYMBOL(snd_register_oss_device);
int snd_unregister_oss_device(int type, struct snd_card *card, int dev)
@@ -200,7 +198,6 @@ int snd_unregister_oss_device(int type, struct snd_card *card, int dev)
kfree(mptr);
return 0;
}
-
EXPORT_SYMBOL(snd_unregister_oss_device);
/*
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 884c3066b028..a9b9a277e00c 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -319,6 +319,7 @@ int snd_timer_open(struct snd_timer_instance **ti,
*ti = timeri;
return 0;
}
+EXPORT_SYMBOL(snd_timer_open);
/*
* close a timer instance
@@ -384,6 +385,7 @@ int snd_timer_close(struct snd_timer_instance *timeri)
mutex_unlock(&register_mutex);
return 0;
}
+EXPORT_SYMBOL(snd_timer_close);
unsigned long snd_timer_resolution(struct snd_timer_instance *timeri)
{
@@ -398,6 +400,7 @@ unsigned long snd_timer_resolution(struct snd_timer_instance *timeri)
}
return 0;
}
+EXPORT_SYMBOL(snd_timer_resolution);
static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
{
@@ -589,6 +592,7 @@ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
else
return snd_timer_start1(timeri, true, ticks);
}
+EXPORT_SYMBOL(snd_timer_start);
/*
* stop the timer instance.
@@ -602,6 +606,7 @@ int snd_timer_stop(struct snd_timer_instance *timeri)
else
return snd_timer_stop1(timeri, true);
}
+EXPORT_SYMBOL(snd_timer_stop);
/*
* start again.. the tick is kept.
@@ -617,6 +622,7 @@ int snd_timer_continue(struct snd_timer_instance *timeri)
else
return snd_timer_start1(timeri, false, 0);
}
+EXPORT_SYMBOL(snd_timer_continue);
/*
* pause.. remember the ticks left
@@ -628,6 +634,7 @@ int snd_timer_pause(struct snd_timer_instance * timeri)
else
return snd_timer_stop1(timeri, false);
}
+EXPORT_SYMBOL(snd_timer_pause);
/*
* reschedule the timer
@@ -809,6 +816,7 @@ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
if (use_tasklet)
tasklet_schedule(&timer->task_queue);
}
+EXPORT_SYMBOL(snd_timer_interrupt);
/*
@@ -859,6 +867,7 @@ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
*rtimer = timer;
return 0;
}
+EXPORT_SYMBOL(snd_timer_new);
static int snd_timer_free(struct snd_timer *timer)
{
@@ -978,6 +987,7 @@ void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstam
}
spin_unlock_irqrestore(&timer->lock, flags);
}
+EXPORT_SYMBOL(snd_timer_notify);
/*
* exported functions for global timers
@@ -993,11 +1003,13 @@ int snd_timer_global_new(char *id, int device, struct snd_timer **rtimer)
tid.subdevice = 0;
return snd_timer_new(NULL, id, &tid, rtimer);
}
+EXPORT_SYMBOL(snd_timer_global_new);
int snd_timer_global_free(struct snd_timer *timer)
{
return snd_timer_free(timer);
}
+EXPORT_SYMBOL(snd_timer_global_free);
int snd_timer_global_register(struct snd_timer *timer)
{
@@ -1007,6 +1019,7 @@ int snd_timer_global_register(struct snd_timer *timer)
dev.device_data = timer;
return snd_timer_dev_register(&dev);
}
+EXPORT_SYMBOL(snd_timer_global_register);
/*
* System timer
@@ -1327,6 +1340,33 @@ static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri,
wake_up(&tu->qchange_sleep);
}
+static int realloc_user_queue(struct snd_timer_user *tu, int size)
+{
+ struct snd_timer_read *queue = NULL;
+ struct snd_timer_tread *tqueue = NULL;
+
+ if (tu->tread) {
+ tqueue = kcalloc(size, sizeof(*tqueue), GFP_KERNEL);
+ if (!tqueue)
+ return -ENOMEM;
+ } else {
+ queue = kcalloc(size, sizeof(*queue), GFP_KERNEL);
+ if (!queue)
+ return -ENOMEM;
+ }
+
+ spin_lock_irq(&tu->qlock);
+ kfree(tu->queue);
+ kfree(tu->tqueue);
+ tu->queue_size = size;
+ tu->queue = queue;
+ tu->tqueue = tqueue;
+ tu->qhead = tu->qtail = tu->qused = 0;
+ spin_unlock_irq(&tu->qlock);
+
+ return 0;
+}
+
static int snd_timer_user_open(struct inode *inode, struct file *file)
{
struct snd_timer_user *tu;
@@ -1343,10 +1383,7 @@ static int snd_timer_user_open(struct inode *inode, struct file *file)
init_waitqueue_head(&tu->qchange_sleep);
mutex_init(&tu->ioctl_lock);
tu->ticks = 1;
- tu->queue_size = 128;
- tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
- GFP_KERNEL);
- if (tu->queue == NULL) {
+ if (realloc_user_queue(tu, 128) < 0) {
kfree(tu);
return -ENOMEM;
}
@@ -1618,34 +1655,12 @@ static int snd_timer_user_tselect(struct file *file,
if (err < 0)
goto __err;
- tu->qhead = tu->qtail = tu->qused = 0;
- kfree(tu->queue);
- tu->queue = NULL;
- kfree(tu->tqueue);
- tu->tqueue = NULL;
- if (tu->tread) {
- tu->tqueue = kmalloc(tu->queue_size * sizeof(struct snd_timer_tread),
- GFP_KERNEL);
- if (tu->tqueue == NULL)
- err = -ENOMEM;
- } else {
- tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
- GFP_KERNEL);
- if (tu->queue == NULL)
- err = -ENOMEM;
- }
-
- if (err < 0) {
- snd_timer_close(tu->timeri);
- tu->timeri = NULL;
- } else {
- tu->timeri->flags |= SNDRV_TIMER_IFLG_FAST;
- tu->timeri->callback = tu->tread
+ tu->timeri->flags |= SNDRV_TIMER_IFLG_FAST;
+ tu->timeri->callback = tu->tread
? snd_timer_user_tinterrupt : snd_timer_user_interrupt;
- tu->timeri->ccallback = snd_timer_user_ccallback;
- tu->timeri->callback_data = (void *)tu;
- tu->timeri->disconnect = snd_timer_user_disconnect;
- }
+ tu->timeri->ccallback = snd_timer_user_ccallback;
+ tu->timeri->callback_data = (void *)tu;
+ tu->timeri->disconnect = snd_timer_user_disconnect;
__err:
return err;
@@ -1687,8 +1702,6 @@ static int snd_timer_user_params(struct file *file,
struct snd_timer_user *tu;
struct snd_timer_params params;
struct snd_timer *t;
- struct snd_timer_read *tr;
- struct snd_timer_tread *ttr;
int err;
tu = file->private_data;
@@ -1751,24 +1764,11 @@ static int snd_timer_user_params(struct file *file,
spin_unlock_irq(&t->lock);
if (params.queue_size > 0 &&
(unsigned int)tu->queue_size != params.queue_size) {
- if (tu->tread) {
- ttr = kmalloc(params.queue_size * sizeof(*ttr),
- GFP_KERNEL);
- if (ttr) {
- kfree(tu->tqueue);
- tu->queue_size = params.queue_size;
- tu->tqueue = ttr;
- }
- } else {
- tr = kmalloc(params.queue_size * sizeof(*tr),
- GFP_KERNEL);
- if (tr) {
- kfree(tu->queue);
- tu->queue_size = params.queue_size;
- tu->queue = tr;
- }
- }
+ err = realloc_user_queue(tu, params.queue_size);
+ if (err < 0)
+ goto _end;
}
+ spin_lock_irq(&tu->qlock);
tu->qhead = tu->qtail = tu->qused = 0;
if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) {
if (tu->tread) {
@@ -1789,6 +1789,7 @@ static int snd_timer_user_params(struct file *file,
}
tu->filter = params.filter;
tu->ticks = params.ticks;
+ spin_unlock_irq(&tu->qlock);
err = 0;
_end:
if (copy_to_user(_params, &params, sizeof(params)))
@@ -1891,13 +1892,19 @@ static long __snd_timer_user_ioctl(struct file *file, unsigned int cmd,
return snd_timer_user_next_device(argp);
case SNDRV_TIMER_IOCTL_TREAD:
{
- int xarg;
+ int xarg, old_tread;
if (tu->timeri) /* too late */
return -EBUSY;
if (get_user(xarg, p))
return -EFAULT;
+ old_tread = tu->tread;
tu->tread = xarg ? 1 : 0;
+ if (tu->tread != old_tread &&
+ realloc_user_queue(tu, tu->queue_size) < 0) {
+ tu->tread = old_tread;
+ return -ENOMEM;
+ }
return 0;
}
case SNDRV_TIMER_IOCTL_GINFO:
@@ -2030,10 +2037,12 @@ static unsigned int snd_timer_user_poll(struct file *file, poll_table * wait)
poll_wait(file, &tu->qchange_sleep, wait);
mask = 0;
+ spin_lock_irq(&tu->qlock);
if (tu->qused)
mask |= POLLIN | POLLRDNORM;
if (tu->disconnected)
mask |= POLLERR;
+ spin_unlock_irq(&tu->qlock);
return mask;
}
@@ -2117,17 +2126,3 @@ static void __exit alsa_timer_exit(void)
module_init(alsa_timer_init)
module_exit(alsa_timer_exit)
-
-EXPORT_SYMBOL(snd_timer_open);
-EXPORT_SYMBOL(snd_timer_close);
-EXPORT_SYMBOL(snd_timer_resolution);
-EXPORT_SYMBOL(snd_timer_start);
-EXPORT_SYMBOL(snd_timer_stop);
-EXPORT_SYMBOL(snd_timer_continue);
-EXPORT_SYMBOL(snd_timer_pause);
-EXPORT_SYMBOL(snd_timer_new);
-EXPORT_SYMBOL(snd_timer_notify);
-EXPORT_SYMBOL(snd_timer_global_new);
-EXPORT_SYMBOL(snd_timer_global_free);
-EXPORT_SYMBOL(snd_timer_global_register);
-EXPORT_SYMBOL(snd_timer_interrupt);
diff --git a/sound/drivers/Kconfig b/sound/drivers/Kconfig
index 8545da99b183..7144cc36e8ae 100644
--- a/sound/drivers/Kconfig
+++ b/sound/drivers/Kconfig
@@ -6,11 +6,24 @@ config SND_OPL3_LIB
tristate
select SND_TIMER
select SND_HWDEP
+ select SND_SEQ_DEVICE if SND_SEQUENCER != n
config SND_OPL4_LIB
tristate
select SND_TIMER
select SND_HWDEP
+ select SND_SEQ_DEVICE if SND_SEQUENCER != n
+
+# select SEQ stuff to min(SND_SEQUENCER,SND_XXX)
+config SND_OPL3_LIB_SEQ
+ def_tristate SND_SEQUENCER && SND_OPL3_LIB
+ select SND_SEQ_MIDI_EMUL
+ select SND_SEQ_MIDI_EVENT
+
+config SND_OPL4_LIB_SEQ
+ def_tristate SND_SEQUENCER && SND_OPL4_LIB
+ select SND_SEQ_MIDI_EMUL
+ select SND_SEQ_MIDI_EVENT
config SND_VX_LIB
tristate
@@ -99,6 +112,8 @@ config SND_VIRMIDI
depends on SND_SEQUENCER
select SND_TIMER
select SND_RAWMIDI
+ select SND_SEQ_VIRMIDI
+ select SND_SEQ_MIDI_EVENT
help
Say Y here to include the virtual MIDI driver. This driver
allows to connect applications using raw MIDI devices to
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index 172dacd925f5..dd5ed037adf2 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -644,15 +644,22 @@ static int alloc_fake_buffer(void)
}
static int dummy_pcm_copy(struct snd_pcm_substream *substream,
- int channel, snd_pcm_uframes_t pos,
- void __user *dst, snd_pcm_uframes_t count)
+ int channel, unsigned long pos,
+ void __user *dst, unsigned long bytes)
+{
+ return 0; /* do nothing */
+}
+
+static int dummy_pcm_copy_kernel(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void *dst, unsigned long bytes)
{
return 0; /* do nothing */
}
static int dummy_pcm_silence(struct snd_pcm_substream *substream,
- int channel, snd_pcm_uframes_t pos,
- snd_pcm_uframes_t count)
+ int channel, unsigned long pos,
+ unsigned long bytes)
{
return 0; /* do nothing */
}
@@ -683,8 +690,9 @@ static struct snd_pcm_ops dummy_pcm_ops_no_buf = {
.prepare = dummy_pcm_prepare,
.trigger = dummy_pcm_trigger,
.pointer = dummy_pcm_pointer,
- .copy = dummy_pcm_copy,
- .silence = dummy_pcm_silence,
+ .copy_user = dummy_pcm_copy,
+ .copy_kernel = dummy_pcm_copy_kernel,
+ .fill_silence = dummy_pcm_silence,
.page = dummy_pcm_page,
};
diff --git a/sound/drivers/opl3/Makefile b/sound/drivers/opl3/Makefile
index 7f2c2a10c4e5..d72b1e7b51c4 100644
--- a/sound/drivers/opl3/Makefile
+++ b/sound/drivers/opl3/Makefile
@@ -5,7 +5,9 @@
snd-opl3-lib-objs := opl3_lib.o opl3_synth.o
snd-opl3-synth-y := opl3_seq.o opl3_midi.o opl3_drums.o
-snd-opl3-synth-$(CONFIG_SND_SEQUENCER_OSS) += opl3_oss.o
+ifneq ($(CONFIG_SND_SEQUENCER_OSS),)
+snd-opl3-synth-y += opl3_oss.o
+endif
obj-$(CONFIG_SND_OPL3_LIB) += snd-opl3-lib.o
obj-$(CONFIG_SND_OPL4_LIB) += snd-opl3-lib.o
diff --git a/sound/drivers/opl3/opl3_lib.c b/sound/drivers/opl3/opl3_lib.c
index cd9e9f31720f..d5e5b4657b4b 100644
--- a/sound/drivers/opl3/opl3_lib.c
+++ b/sound/drivers/opl3/opl3_lib.c
@@ -528,7 +528,7 @@ int snd_opl3_hwdep_new(struct snd_opl3 * opl3,
opl3->hwdep = hw;
opl3->seq_dev_num = seq_device;
-#if IS_REACHABLE(CONFIG_SND_SEQUENCER)
+#if IS_ENABLED(CONFIG_SND_SEQUENCER)
if (snd_seq_device_new(card, seq_device, SNDRV_SEQ_DEV_ID_OPL3,
sizeof(struct snd_opl3 *), &opl3->seq_dev) >= 0) {
strcpy(opl3->seq_dev->name, hw->name);
diff --git a/sound/drivers/opl3/opl3_oss.c b/sound/drivers/opl3/opl3_oss.c
index c1cb249acfaa..22c3e4bca220 100644
--- a/sound/drivers/opl3/opl3_oss.c
+++ b/sound/drivers/opl3/opl3_oss.c
@@ -27,20 +27,6 @@ static int snd_opl3_ioctl_seq_oss(struct snd_seq_oss_arg *arg, unsigned int cmd,
static int snd_opl3_load_patch_seq_oss(struct snd_seq_oss_arg *arg, int format, const char __user *buf, int offs, int count);
static int snd_opl3_reset_seq_oss(struct snd_seq_oss_arg *arg);
-/* */
-
-static inline mm_segment_t snd_enter_user(void)
-{
- mm_segment_t fs = get_fs();
- set_fs(get_ds());
- return fs;
-}
-
-static inline void snd_leave_user(mm_segment_t fs)
-{
- set_fs(fs);
-}
-
/* operators */
extern struct snd_midi_op opl3_ops;
diff --git a/sound/drivers/opl3/opl3_seq.c b/sound/drivers/opl3/opl3_seq.c
index fdae5d7f421f..d3e91be8b23a 100644
--- a/sound/drivers/opl3/opl3_seq.c
+++ b/sound/drivers/opl3/opl3_seq.c
@@ -252,7 +252,7 @@ static int snd_opl3_seq_probe(struct device *_dev)
spin_lock_init(&opl3->sys_timer_lock);
opl3->sys_timer_status = 0;
-#ifdef CONFIG_SND_SEQUENCER_OSS
+#if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS)
snd_opl3_init_seq_oss(opl3, name);
#endif
return 0;
@@ -267,7 +267,7 @@ static int snd_opl3_seq_remove(struct device *_dev)
if (opl3 == NULL)
return -EINVAL;
-#ifdef CONFIG_SND_SEQUENCER_OSS
+#if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS)
snd_opl3_free_seq_oss(opl3);
#endif
if (opl3->seq_client >= 0) {
diff --git a/sound/drivers/opl3/opl3_voice.h b/sound/drivers/opl3/opl3_voice.h
index a371c075ac87..eaef435e0528 100644
--- a/sound/drivers/opl3/opl3_voice.h
+++ b/sound/drivers/opl3/opl3_voice.h
@@ -44,9 +44,12 @@ void snd_opl3_load_drums(struct snd_opl3 *opl3);
void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int on_off, int vel, struct snd_midi_channel *chan);
/* Prototypes for opl3_oss.c */
-#ifdef CONFIG_SND_SEQUENCER_OSS
+#if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS)
void snd_opl3_init_seq_oss(struct snd_opl3 *opl3, char *name);
void snd_opl3_free_seq_oss(struct snd_opl3 *opl3);
+#else
+#define snd_opl3_init_seq_oss(opl3, name) /* NOP */
+#define snd_opl3_free_seq_oss(opl3) /* NOP */
#endif
#endif
diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
index 89c7aa04b3bc..bc345d564f8d 100644
--- a/sound/drivers/opl4/opl4_lib.c
+++ b/sound/drivers/opl4/opl4_lib.c
@@ -153,7 +153,7 @@ static int snd_opl4_detect(struct snd_opl4 *opl4)
return 0;
}
-#if defined(CONFIG_SND_SEQUENCER) || (defined(MODULE) && defined(CONFIG_SND_SEQUENCER_MODULE))
+#if IS_ENABLED(CONFIG_SND_SEQUENCER)
static void snd_opl4_seq_dev_free(struct snd_seq_device *seq_dev)
{
struct snd_opl4 *opl4 = seq_dev->private_data;
@@ -249,7 +249,7 @@ int snd_opl4_create(struct snd_card *card,
snd_opl4_create_mixer(opl4);
snd_opl4_create_proc(opl4);
-#if defined(CONFIG_SND_SEQUENCER) || (defined(MODULE) && defined(CONFIG_SND_SEQUENCER_MODULE))
+#if IS_ENABLED(CONFIG_SND_SEQUENCER)
opl4->seq_client = -1;
if (opl4->hardware < OPL3_HW_OPL4_ML)
snd_opl4_create_seq_dev(opl4, seq_device);
diff --git a/sound/drivers/opl4/opl4_local.h b/sound/drivers/opl4/opl4_local.h
index 9a41bdebce6b..a16b4677c1e9 100644
--- a/sound/drivers/opl4/opl4_local.h
+++ b/sound/drivers/opl4/opl4_local.h
@@ -184,7 +184,7 @@ struct snd_opl4 {
#endif
struct mutex access_mutex;
-#if defined(CONFIG_SND_SEQUENCER) || defined(CONFIG_SND_SEQUENCER_MODULE)
+#if IS_ENABLED(CONFIG_SND_SEQUENCER)
int used;
int seq_dev_num;
diff --git a/sound/drivers/pcsp/pcsp_lib.c b/sound/drivers/pcsp/pcsp_lib.c
index aca2d7d5f059..44b3632f6940 100644
--- a/sound/drivers/pcsp/pcsp_lib.c
+++ b/sound/drivers/pcsp/pcsp_lib.c
@@ -323,7 +323,7 @@ static int snd_pcsp_playback_open(struct snd_pcm_substream *substream)
return 0;
}
-static struct snd_pcm_ops snd_pcsp_playback_ops = {
+static const struct snd_pcm_ops snd_pcsp_playback_ops = {
.open = snd_pcsp_playback_open,
.close = snd_pcsp_playback_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/drivers/vx/vx_mixer.c b/sound/drivers/vx/vx_mixer.c
index be9477e30739..98a41ac40b60 100644
--- a/sound/drivers/vx/vx_mixer.c
+++ b/sound/drivers/vx/vx_mixer.c
@@ -455,7 +455,7 @@ static int vx_output_level_put(struct snd_kcontrol *kcontrol, struct snd_ctl_ele
return 0;
}
-static struct snd_kcontrol_new vx_control_output_level = {
+static const struct snd_kcontrol_new vx_control_output_level = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ),
@@ -514,7 +514,7 @@ static int vx_audio_src_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_v
return 0;
}
-static struct snd_kcontrol_new vx_control_audio_src = {
+static const struct snd_kcontrol_new vx_control_audio_src = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Capture Source",
.info = vx_audio_src_info,
@@ -558,7 +558,7 @@ static int vx_clock_mode_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_
return 0;
}
-static struct snd_kcontrol_new vx_control_clock_mode = {
+static const struct snd_kcontrol_new vx_control_clock_mode = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Clock Mode",
.info = vx_clock_mode_info,
@@ -717,7 +717,7 @@ static int vx_monitor_sw_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_
static const DECLARE_TLV_DB_SCALE(db_scale_audio_gain, -10975, 25, 0);
-static struct snd_kcontrol_new vx_control_audio_gain = {
+static const struct snd_kcontrol_new vx_control_audio_gain = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ),
@@ -727,14 +727,14 @@ static struct snd_kcontrol_new vx_control_audio_gain = {
.put = vx_audio_gain_put,
.tlv = { .p = db_scale_audio_gain },
};
-static struct snd_kcontrol_new vx_control_output_switch = {
+static const struct snd_kcontrol_new vx_control_output_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Playback Switch",
.info = vx_audio_sw_info,
.get = vx_audio_sw_get,
.put = vx_audio_sw_put
};
-static struct snd_kcontrol_new vx_control_monitor_gain = {
+static const struct snd_kcontrol_new vx_control_monitor_gain = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Monitoring Volume",
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
@@ -744,7 +744,7 @@ static struct snd_kcontrol_new vx_control_monitor_gain = {
.put = vx_audio_monitor_put,
.tlv = { .p = db_scale_audio_gain },
};
-static struct snd_kcontrol_new vx_control_monitor_switch = {
+static const struct snd_kcontrol_new vx_control_monitor_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Monitoring Switch",
.info = vx_audio_sw_info, /* shared */
@@ -805,7 +805,7 @@ static int vx_iec958_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_valu
return 0;
}
-static struct snd_kcontrol_new vx_control_iec958_mask = {
+static const struct snd_kcontrol_new vx_control_iec958_mask = {
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,MASK),
@@ -813,7 +813,7 @@ static struct snd_kcontrol_new vx_control_iec958_mask = {
.get = vx_iec958_mask_get,
};
-static struct snd_kcontrol_new vx_control_iec958 = {
+static const struct snd_kcontrol_new vx_control_iec958 = {
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT),
.info = vx_iec958_info,
@@ -878,7 +878,7 @@ static int vx_saturation_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_
return 0;
}
-static struct snd_kcontrol_new vx_control_vu_meter = {
+static const struct snd_kcontrol_new vx_control_vu_meter = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
/* name will be filled later */
@@ -886,7 +886,7 @@ static struct snd_kcontrol_new vx_control_vu_meter = {
.get = vx_vu_meter_get,
};
-static struct snd_kcontrol_new vx_control_peak_meter = {
+static const struct snd_kcontrol_new vx_control_peak_meter = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
/* name will be filled later */
@@ -894,7 +894,7 @@ static struct snd_kcontrol_new vx_control_peak_meter = {
.get = vx_peak_meter_get,
};
-static struct snd_kcontrol_new vx_control_saturation = {
+static const struct snd_kcontrol_new vx_control_saturation = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Input Saturation",
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
diff --git a/sound/drivers/vx/vx_pcm.c b/sound/drivers/vx/vx_pcm.c
index ea7b377f0378..d318a33b6cfb 100644
--- a/sound/drivers/vx/vx_pcm.c
+++ b/sound/drivers/vx/vx_pcm.c
@@ -873,7 +873,7 @@ static int vx_pcm_prepare(struct snd_pcm_substream *subs)
/*
* operators for PCM playback
*/
-static struct snd_pcm_ops vx_pcm_playback_ops = {
+static const struct snd_pcm_ops vx_pcm_playback_ops = {
.open = vx_pcm_playback_open,
.close = vx_pcm_playback_close,
.ioctl = snd_pcm_lib_ioctl,
@@ -1095,7 +1095,7 @@ static snd_pcm_uframes_t vx_pcm_capture_pointer(struct snd_pcm_substream *subs)
/*
* operators for PCM capture
*/
-static struct snd_pcm_ops vx_pcm_capture_ops = {
+static const struct snd_pcm_ops vx_pcm_capture_ops = {
.open = vx_pcm_capture_open,
.close = vx_pcm_capture_close,
.ioctl = snd_pcm_lib_ioctl,
diff --git a/sound/firewire/amdtp-am824.c b/sound/firewire/amdtp-am824.c
index bebddc60fde8..23ccddb20de1 100644
--- a/sound/firewire/amdtp-am824.c
+++ b/sound/firewire/amdtp-am824.c
@@ -38,10 +38,6 @@ struct amdtp_am824 {
u8 pcm_positions[AM824_MAX_CHANNELS_FOR_PCM];
u8 midi_position;
- void (*transfer_samples)(struct amdtp_stream *s,
- struct snd_pcm_substream *pcm,
- __be32 *buffer, unsigned int frames);
-
unsigned int frame_multiplier;
};
@@ -177,32 +173,6 @@ static void write_pcm_s32(struct amdtp_stream *s,
}
}
-static void write_pcm_s16(struct amdtp_stream *s,
- struct snd_pcm_substream *pcm,
- __be32 *buffer, unsigned int frames)
-{
- struct amdtp_am824 *p = s->protocol;
- struct snd_pcm_runtime *runtime = pcm->runtime;
- unsigned int channels, remaining_frames, i, c;
- const u16 *src;
-
- channels = p->pcm_channels;
- src = (void *)runtime->dma_area +
- frames_to_bytes(runtime, s->pcm_buffer_pointer);
- remaining_frames = runtime->buffer_size - s->pcm_buffer_pointer;
-
- for (i = 0; i < frames; ++i) {
- for (c = 0; c < channels; ++c) {
- buffer[p->pcm_positions[c]] =
- cpu_to_be32((*src << 8) | 0x42000000);
- src++;
- }
- buffer += s->data_block_quadlets;
- if (--remaining_frames == 0)
- src = (void *)runtime->dma_area;
- }
-}
-
static void read_pcm_s32(struct amdtp_stream *s,
struct snd_pcm_substream *pcm,
__be32 *buffer, unsigned int frames)
@@ -242,43 +212,6 @@ static void write_pcm_silence(struct amdtp_stream *s,
}
/**
- * amdtp_am824_set_pcm_format - set the PCM format
- * @s: the AMDTP stream to configure
- * @format: the format of the ALSA PCM device
- *
- * The sample format must be set after the other parameters (rate/PCM channels/
- * MIDI) and before the stream is started, and must not be changed while the
- * stream is running.
- */
-void amdtp_am824_set_pcm_format(struct amdtp_stream *s, snd_pcm_format_t format)
-{
- struct amdtp_am824 *p = s->protocol;
-
- if (WARN_ON(amdtp_stream_pcm_running(s)))
- return;
-
- switch (format) {
- default:
- WARN_ON(1);
- /* fall through */
- case SNDRV_PCM_FORMAT_S16:
- if (s->direction == AMDTP_OUT_STREAM) {
- p->transfer_samples = write_pcm_s16;
- break;
- }
- WARN_ON(1);
- /* fall through */
- case SNDRV_PCM_FORMAT_S32:
- if (s->direction == AMDTP_OUT_STREAM)
- p->transfer_samples = write_pcm_s32;
- else
- p->transfer_samples = read_pcm_s32;
- break;
- }
-}
-EXPORT_SYMBOL_GPL(amdtp_am824_set_pcm_format);
-
-/**
* amdtp_am824_add_pcm_hw_constraints - add hw constraints for PCM substream
* @s: the AMDTP stream for AM824 data block, must be initialized.
* @runtime: the PCM substream runtime
@@ -407,7 +340,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s, __be32 *buffe
unsigned int pcm_frames;
if (pcm) {
- p->transfer_samples(s, pcm, buffer, data_blocks);
+ write_pcm_s32(s, pcm, buffer, data_blocks);
pcm_frames = data_blocks * p->frame_multiplier;
} else {
write_pcm_silence(s, buffer, data_blocks);
@@ -428,7 +361,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s, __be32 *buffe
unsigned int pcm_frames;
if (pcm) {
- p->transfer_samples(s, pcm, buffer, data_blocks);
+ read_pcm_s32(s, pcm, buffer, data_blocks);
pcm_frames = data_blocks * p->frame_multiplier;
} else {
pcm_frames = 0;
diff --git a/sound/firewire/amdtp-am824.h b/sound/firewire/amdtp-am824.h
index 73b07b3109db..b56e61fc997d 100644
--- a/sound/firewire/amdtp-am824.h
+++ b/sound/firewire/amdtp-am824.h
@@ -8,8 +8,7 @@
#define AM824_IN_PCM_FORMAT_BITS SNDRV_PCM_FMTBIT_S32
-#define AM824_OUT_PCM_FORMAT_BITS (SNDRV_PCM_FMTBIT_S16 | \
- SNDRV_PCM_FMTBIT_S32)
+#define AM824_OUT_PCM_FORMAT_BITS SNDRV_PCM_FMTBIT_S32
/*
* This module supports maximum 64 PCM channels for one PCM stream
@@ -41,9 +40,6 @@ void amdtp_am824_set_midi_position(struct amdtp_stream *s,
int amdtp_am824_add_pcm_hw_constraints(struct amdtp_stream *s,
struct snd_pcm_runtime *runtime);
-void amdtp_am824_set_pcm_format(struct amdtp_stream *s,
- snd_pcm_format_t format);
-
void amdtp_am824_midi_trigger(struct amdtp_stream *s, unsigned int port,
struct snd_rawmidi_substream *midi);
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index 1e26854b3425..3fc581a5ad62 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -148,8 +148,27 @@ EXPORT_SYMBOL(amdtp_rate_table);
int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
struct snd_pcm_runtime *runtime)
{
+ struct snd_pcm_hardware *hw = &runtime->hw;
int err;
+ hw->info = SNDRV_PCM_INFO_BATCH |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_JOINT_DUPLEX |
+ SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID;
+
+ /* SNDRV_PCM_INFO_BATCH */
+ hw->periods_min = 2;
+ hw->periods_max = UINT_MAX;
+
+ /* bytes for a frame */
+ hw->period_bytes_min = 4 * hw->channels_max;
+
+ /* Just to prevent from allocating much pages. */
+ hw->period_bytes_max = hw->period_bytes_min * 2048;
+ hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
+
/*
* Currently firewire-lib processes 16 packets in one software
* interrupt callback. This equals to 2msec but actually the
@@ -933,6 +952,25 @@ unsigned long amdtp_stream_pcm_pointer(struct amdtp_stream *s)
EXPORT_SYMBOL(amdtp_stream_pcm_pointer);
/**
+ * amdtp_stream_pcm_ack - acknowledge queued PCM frames
+ * @s: the AMDTP stream that transfers the PCM frames
+ *
+ * Returns zero always.
+ */
+int amdtp_stream_pcm_ack(struct amdtp_stream *s)
+{
+ /*
+ * Process isochronous packets for recent isochronous cycle to handle
+ * queued PCM frames.
+ */
+ if (amdtp_stream_running(s))
+ fw_iso_context_flush_completions(s->context);
+
+ return 0;
+}
+EXPORT_SYMBOL(amdtp_stream_pcm_ack);
+
+/**
* amdtp_stream_update - update the stream after a bus reset
* @s: the AMDTP stream
*/
diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h
index ea1a91e99875..ed6eafd10992 100644
--- a/sound/firewire/amdtp-stream.h
+++ b/sound/firewire/amdtp-stream.h
@@ -168,6 +168,7 @@ int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
void amdtp_stream_pcm_prepare(struct amdtp_stream *s);
unsigned long amdtp_stream_pcm_pointer(struct amdtp_stream *s);
+int amdtp_stream_pcm_ack(struct amdtp_stream *s);
void amdtp_stream_pcm_abort(struct amdtp_stream *s);
extern const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT];
diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c
index 07e5abdbceb5..d10208f92edf 100644
--- a/sound/firewire/bebob/bebob_maudio.c
+++ b/sound/firewire/bebob/bebob_maudio.c
@@ -396,7 +396,7 @@ static int special_clk_ctl_put(struct snd_kcontrol *kctl,
return err;
}
-static struct snd_kcontrol_new special_clk_ctl = {
+static const struct snd_kcontrol_new special_clk_ctl = {
.name = "Clock Source",
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
@@ -429,7 +429,7 @@ static int special_sync_ctl_get(struct snd_kcontrol *kctl,
return 0;
}
-static struct snd_kcontrol_new special_sync_ctl = {
+static const struct snd_kcontrol_new special_sync_ctl = {
.name = "Sync Status",
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = SNDRV_CTL_ELEM_ACCESS_READ,
@@ -521,7 +521,7 @@ end:
mutex_unlock(&bebob->mutex);
return err;
}
-static struct snd_kcontrol_new special_dig_in_iface_ctl = {
+static const struct snd_kcontrol_new special_dig_in_iface_ctl = {
.name = "Digital Input Interface",
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
@@ -577,7 +577,7 @@ static int special_dig_out_iface_ctl_set(struct snd_kcontrol *kctl,
mutex_unlock(&bebob->mutex);
return err;
}
-static struct snd_kcontrol_new special_dig_out_iface_ctl = {
+static const struct snd_kcontrol_new special_dig_out_iface_ctl = {
.name = "Digital Output Interface",
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
diff --git a/sound/firewire/bebob/bebob_pcm.c b/sound/firewire/bebob/bebob_pcm.c
index 9e27eb8e1dd4..e6adab3ef42e 100644
--- a/sound/firewire/bebob/bebob_pcm.c
+++ b/sound/firewire/bebob/bebob_pcm.c
@@ -92,19 +92,6 @@ limit_channels_and_rates(struct snd_pcm_hardware *hw,
}
}
-static void
-limit_period_and_buffer(struct snd_pcm_hardware *hw)
-{
- hw->periods_min = 2; /* SNDRV_PCM_INFO_BATCH */
- hw->periods_max = UINT_MAX;
-
- hw->period_bytes_min = 4 * hw->channels_max; /* bytes for a frame */
-
- /* Just to prevent from allocating much pages. */
- hw->period_bytes_max = hw->period_bytes_min * 2048;
- hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
-}
-
static int
pcm_init_hw_params(struct snd_bebob *bebob,
struct snd_pcm_substream *substream)
@@ -114,13 +101,6 @@ pcm_init_hw_params(struct snd_bebob *bebob,
struct snd_bebob_stream_formation *formations;
int err;
- runtime->hw.info = SNDRV_PCM_INFO_BATCH |
- SNDRV_PCM_INFO_BLOCK_TRANSFER |
- SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_JOINT_DUPLEX |
- SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_MMAP_VALID;
-
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
runtime->hw.formats = AM824_IN_PCM_FORMAT_BITS;
s = &bebob->tx_stream;
@@ -132,7 +112,6 @@ pcm_init_hw_params(struct snd_bebob *bebob,
}
limit_channels_and_rates(&runtime->hw, formations);
- limit_period_and_buffer(&runtime->hw);
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
hw_rule_channels, formations,
@@ -224,8 +203,6 @@ pcm_capture_hw_params(struct snd_pcm_substream *substream,
mutex_unlock(&bebob->mutex);
}
- amdtp_am824_set_pcm_format(&bebob->tx_stream, params_format(hw_params));
-
return 0;
}
static int
@@ -246,8 +223,6 @@ pcm_playback_hw_params(struct snd_pcm_substream *substream,
mutex_unlock(&bebob->mutex);
}
- amdtp_am824_set_pcm_format(&bebob->rx_stream, params_format(hw_params));
-
return 0;
}
@@ -359,6 +334,20 @@ pcm_playback_pointer(struct snd_pcm_substream *sbstrm)
return amdtp_stream_pcm_pointer(&bebob->rx_stream);
}
+static int pcm_capture_ack(struct snd_pcm_substream *substream)
+{
+ struct snd_bebob *bebob = substream->private_data;
+
+ return amdtp_stream_pcm_ack(&bebob->tx_stream);
+}
+
+static int pcm_playback_ack(struct snd_pcm_substream *substream)
+{
+ struct snd_bebob *bebob = substream->private_data;
+
+ return amdtp_stream_pcm_ack(&bebob->rx_stream);
+}
+
int snd_bebob_create_pcm_devices(struct snd_bebob *bebob)
{
static const struct snd_pcm_ops capture_ops = {
@@ -370,6 +359,7 @@ int snd_bebob_create_pcm_devices(struct snd_bebob *bebob)
.prepare = pcm_capture_prepare,
.trigger = pcm_capture_trigger,
.pointer = pcm_capture_pointer,
+ .ack = pcm_capture_ack,
.page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops playback_ops = {
@@ -381,6 +371,7 @@ int snd_bebob_create_pcm_devices(struct snd_bebob *bebob)
.prepare = pcm_playback_prepare,
.trigger = pcm_playback_trigger,
.pointer = pcm_playback_pointer,
+ .ack = pcm_playback_ack,
.page = snd_pcm_lib_get_vmalloc_page,
.mmap = snd_pcm_lib_mmap_vmalloc,
};
diff --git a/sound/firewire/dice/dice-pcm.c b/sound/firewire/dice/dice-pcm.c
index 6074fe1f00f7..7cb9e9713ac3 100644
--- a/sound/firewire/dice/dice-pcm.c
+++ b/sound/firewire/dice/dice-pcm.c
@@ -51,18 +51,6 @@ static int limit_channels_and_rates(struct snd_dice *dice,
return 0;
}
-static void limit_period_and_buffer(struct snd_pcm_hardware *hw)
-{
- hw->periods_min = 2; /* SNDRV_PCM_INFO_BATCH */
- hw->periods_max = UINT_MAX;
-
- hw->period_bytes_min = 4 * hw->channels_max; /* byte for a frame */
-
- /* Just to prevent from allocating much pages. */
- hw->period_bytes_max = hw->period_bytes_min * 2048;
- hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
-}
-
static int init_hw_info(struct snd_dice *dice,
struct snd_pcm_substream *substream)
{
@@ -74,13 +62,6 @@ static int init_hw_info(struct snd_dice *dice,
unsigned int count, size;
int err;
- hw->info = SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_MMAP_VALID |
- SNDRV_PCM_INFO_BATCH |
- SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_JOINT_DUPLEX |
- SNDRV_PCM_INFO_BLOCK_TRANSFER;
-
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
hw->formats = AM824_IN_PCM_FORMAT_BITS;
dir = AMDTP_IN_STREAM;
@@ -107,7 +88,6 @@ static int init_hw_info(struct snd_dice *dice,
substream->pcm->device, size);
if (err < 0)
return err;
- limit_period_and_buffer(hw);
return amdtp_am824_add_pcm_hw_constraints(stream, runtime);
}
@@ -146,7 +126,6 @@ static int capture_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct snd_dice *dice = substream->private_data;
- struct amdtp_stream *stream = &dice->tx_stream[substream->pcm->device];
int err;
err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
@@ -160,15 +139,12 @@ static int capture_hw_params(struct snd_pcm_substream *substream,
mutex_unlock(&dice->mutex);
}
- amdtp_am824_set_pcm_format(stream, params_format(hw_params));
-
return 0;
}
static int playback_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct snd_dice *dice = substream->private_data;
- struct amdtp_stream *stream = &dice->rx_stream[substream->pcm->device];
int err;
err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
@@ -182,8 +158,6 @@ static int playback_hw_params(struct snd_pcm_substream *substream,
mutex_unlock(&dice->mutex);
}
- amdtp_am824_set_pcm_format(stream, params_format(hw_params));
-
return 0;
}
@@ -300,6 +274,22 @@ static snd_pcm_uframes_t playback_pointer(struct snd_pcm_substream *substream)
return amdtp_stream_pcm_pointer(stream);
}
+static int capture_ack(struct snd_pcm_substream *substream)
+{
+ struct snd_dice *dice = substream->private_data;
+ struct amdtp_stream *stream = &dice->tx_stream[substream->pcm->device];
+
+ return amdtp_stream_pcm_ack(stream);
+}
+
+static int playback_ack(struct snd_pcm_substream *substream)
+{
+ struct snd_dice *dice = substream->private_data;
+ struct amdtp_stream *stream = &dice->rx_stream[substream->pcm->device];
+
+ return amdtp_stream_pcm_ack(stream);
+}
+
int snd_dice_create_pcm(struct snd_dice *dice)
{
static const struct snd_pcm_ops capture_ops = {
@@ -311,6 +301,7 @@ int snd_dice_create_pcm(struct snd_dice *dice)
.prepare = capture_prepare,
.trigger = capture_trigger,
.pointer = capture_pointer,
+ .ack = capture_ack,
.page = snd_pcm_lib_get_vmalloc_page,
.mmap = snd_pcm_lib_mmap_vmalloc,
};
@@ -323,6 +314,7 @@ int snd_dice_create_pcm(struct snd_dice *dice)
.prepare = playback_prepare,
.trigger = playback_trigger,
.pointer = playback_pointer,
+ .ack = playback_ack,
.page = snd_pcm_lib_get_vmalloc_page,
.mmap = snd_pcm_lib_mmap_vmalloc,
};
diff --git a/sound/firewire/digi00x/amdtp-dot.c b/sound/firewire/digi00x/amdtp-dot.c
index a4688545339c..1453c34ce99f 100644
--- a/sound/firewire/digi00x/amdtp-dot.c
+++ b/sound/firewire/digi00x/amdtp-dot.c
@@ -48,10 +48,6 @@ struct amdtp_dot {
struct snd_rawmidi_substream *midi[MAX_MIDI_PORTS];
int midi_fifo_used[MAX_MIDI_PORTS];
int midi_fifo_limit;
-
- void (*transfer_samples)(struct amdtp_stream *s,
- struct snd_pcm_substream *pcm,
- __be32 *buffer, unsigned int frames);
};
/*
@@ -173,32 +169,6 @@ static void write_pcm_s32(struct amdtp_stream *s, struct snd_pcm_substream *pcm,
}
}
-static void write_pcm_s16(struct amdtp_stream *s, struct snd_pcm_substream *pcm,
- __be32 *buffer, unsigned int frames)
-{
- struct amdtp_dot *p = s->protocol;
- struct snd_pcm_runtime *runtime = pcm->runtime;
- unsigned int channels, remaining_frames, i, c;
- const u16 *src;
-
- channels = p->pcm_channels;
- src = (void *)runtime->dma_area +
- frames_to_bytes(runtime, s->pcm_buffer_pointer);
- remaining_frames = runtime->buffer_size - s->pcm_buffer_pointer;
-
- buffer++;
- for (i = 0; i < frames; ++i) {
- for (c = 0; c < channels; ++c) {
- buffer[c] = cpu_to_be32((*src << 8) | 0x40000000);
- dot_encode_step(&p->state, &buffer[c]);
- src++;
- }
- buffer += s->data_block_quadlets;
- if (--remaining_frames == 0)
- src = (void *)runtime->dma_area;
- }
-}
-
static void read_pcm_s32(struct amdtp_stream *s, struct snd_pcm_substream *pcm,
__be32 *buffer, unsigned int frames)
{
@@ -351,33 +321,6 @@ int amdtp_dot_add_pcm_hw_constraints(struct amdtp_stream *s,
return amdtp_stream_add_pcm_hw_constraints(s, runtime);
}
-void amdtp_dot_set_pcm_format(struct amdtp_stream *s, snd_pcm_format_t format)
-{
- struct amdtp_dot *p = s->protocol;
-
- if (WARN_ON(amdtp_stream_pcm_running(s)))
- return;
-
- switch (format) {
- default:
- WARN_ON(1);
- /* fall through */
- case SNDRV_PCM_FORMAT_S16:
- if (s->direction == AMDTP_OUT_STREAM) {
- p->transfer_samples = write_pcm_s16;
- break;
- }
- WARN_ON(1);
- /* fall through */
- case SNDRV_PCM_FORMAT_S32:
- if (s->direction == AMDTP_OUT_STREAM)
- p->transfer_samples = write_pcm_s32;
- else
- p->transfer_samples = read_pcm_s32;
- break;
- }
-}
-
void amdtp_dot_midi_trigger(struct amdtp_stream *s, unsigned int port,
struct snd_rawmidi_substream *midi)
{
@@ -392,13 +335,12 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s,
unsigned int data_blocks,
unsigned int *syt)
{
- struct amdtp_dot *p = (struct amdtp_dot *)s->protocol;
struct snd_pcm_substream *pcm;
unsigned int pcm_frames;
pcm = ACCESS_ONCE(s->pcm);
if (pcm) {
- p->transfer_samples(s, pcm, buffer, data_blocks);
+ read_pcm_s32(s, pcm, buffer, data_blocks);
pcm_frames = data_blocks;
} else {
pcm_frames = 0;
@@ -414,13 +356,12 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s,
unsigned int data_blocks,
unsigned int *syt)
{
- struct amdtp_dot *p = (struct amdtp_dot *)s->protocol;
struct snd_pcm_substream *pcm;
unsigned int pcm_frames;
pcm = ACCESS_ONCE(s->pcm);
if (pcm) {
- p->transfer_samples(s, pcm, buffer, data_blocks);
+ write_pcm_s32(s, pcm, buffer, data_blocks);
pcm_frames = data_blocks;
} else {
write_pcm_silence(s, buffer, data_blocks);
diff --git a/sound/firewire/digi00x/digi00x-pcm.c b/sound/firewire/digi00x/digi00x-pcm.c
index 68d1c52db051..796f4b4645f5 100644
--- a/sound/firewire/digi00x/digi00x-pcm.c
+++ b/sound/firewire/digi00x/digi00x-pcm.c
@@ -58,41 +58,29 @@ static int hw_rule_channels(struct snd_pcm_hw_params *params,
static int pcm_init_hw_params(struct snd_dg00x *dg00x,
struct snd_pcm_substream *substream)
{
- static const struct snd_pcm_hardware hardware = {
- .info = SNDRV_PCM_INFO_BATCH |
- SNDRV_PCM_INFO_BLOCK_TRANSFER |
- SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_JOINT_DUPLEX |
- SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_MMAP_VALID,
- .rates = SNDRV_PCM_RATE_44100 |
- SNDRV_PCM_RATE_48000 |
- SNDRV_PCM_RATE_88200 |
- SNDRV_PCM_RATE_96000,
- .rate_min = 44100,
- .rate_max = 96000,
- .channels_min = 10,
- .channels_max = 18,
- .period_bytes_min = 4 * 18,
- .period_bytes_max = 4 * 18 * 2048,
- .buffer_bytes_max = 4 * 18 * 2048 * 2,
- .periods_min = 2,
- .periods_max = UINT_MAX,
- };
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_pcm_hardware *hw = &runtime->hw;
struct amdtp_stream *s;
int err;
- substream->runtime->hw = hardware;
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
substream->runtime->hw.formats = SNDRV_PCM_FMTBIT_S32;
s = &dg00x->tx_stream;
} else {
- substream->runtime->hw.formats = SNDRV_PCM_FMTBIT_S16 |
- SNDRV_PCM_FMTBIT_S32;
+ substream->runtime->hw.formats = SNDRV_PCM_FMTBIT_S32;
s = &dg00x->rx_stream;
}
+ hw->channels_min = 10;
+ hw->channels_max = 18;
+
+ hw->rates = SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_88200 |
+ SNDRV_PCM_RATE_96000;
+ snd_pcm_limit_hw_rates(runtime);
+
err = snd_pcm_hw_rule_add(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_CHANNELS,
hw_rule_channels, NULL,
@@ -184,8 +172,6 @@ static int pcm_capture_hw_params(struct snd_pcm_substream *substream,
mutex_unlock(&dg00x->mutex);
}
- amdtp_dot_set_pcm_format(&dg00x->tx_stream, params_format(hw_params));
-
return 0;
}
@@ -206,8 +192,6 @@ static int pcm_playback_hw_params(struct snd_pcm_substream *substream,
mutex_unlock(&dg00x->mutex);
}
- amdtp_dot_set_pcm_format(&dg00x->rx_stream, params_format(hw_params));
-
return 0;
}
@@ -329,6 +313,20 @@ static snd_pcm_uframes_t pcm_playback_pointer(struct snd_pcm_substream *sbstrm)
return amdtp_stream_pcm_pointer(&dg00x->rx_stream);
}
+static int pcm_capture_ack(struct snd_pcm_substream *substream)
+{
+ struct snd_dg00x *dg00x = substream->private_data;
+
+ return amdtp_stream_pcm_ack(&dg00x->tx_stream);
+}
+
+static int pcm_playback_ack(struct snd_pcm_substream *substream)
+{
+ struct snd_dg00x *dg00x = substream->private_data;
+
+ return amdtp_stream_pcm_ack(&dg00x->rx_stream);
+}
+
int snd_dg00x_create_pcm_devices(struct snd_dg00x *dg00x)
{
static const struct snd_pcm_ops capture_ops = {
@@ -340,6 +338,7 @@ int snd_dg00x_create_pcm_devices(struct snd_dg00x *dg00x)
.prepare = pcm_capture_prepare,
.trigger = pcm_capture_trigger,
.pointer = pcm_capture_pointer,
+ .ack = pcm_capture_ack,
.page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops playback_ops = {
@@ -351,6 +350,7 @@ int snd_dg00x_create_pcm_devices(struct snd_dg00x *dg00x)
.prepare = pcm_playback_prepare,
.trigger = pcm_playback_trigger,
.pointer = pcm_playback_pointer,
+ .ack = pcm_playback_ack,
.page = snd_pcm_lib_get_vmalloc_page,
.mmap = snd_pcm_lib_mmap_vmalloc,
};
diff --git a/sound/firewire/digi00x/digi00x.h b/sound/firewire/digi00x/digi00x.h
index 1275a50956c0..4dd1bbf2ed3c 100644
--- a/sound/firewire/digi00x/digi00x.h
+++ b/sound/firewire/digi00x/digi00x.h
@@ -121,7 +121,6 @@ int amdtp_dot_set_parameters(struct amdtp_stream *s, unsigned int rate,
void amdtp_dot_reset(struct amdtp_stream *s);
int amdtp_dot_add_pcm_hw_constraints(struct amdtp_stream *s,
struct snd_pcm_runtime *runtime);
-void amdtp_dot_set_pcm_format(struct amdtp_stream *s, snd_pcm_format_t format);
void amdtp_dot_midi_trigger(struct amdtp_stream *s, unsigned int port,
struct snd_rawmidi_substream *midi);
diff --git a/sound/firewire/fireface/ff-midi.c b/sound/firewire/fireface/ff-midi.c
index 29ee0a7365c3..949ee56b4e0e 100644
--- a/sound/firewire/fireface/ff-midi.c
+++ b/sound/firewire/fireface/ff-midi.c
@@ -74,18 +74,6 @@ static void midi_playback_trigger(struct snd_rawmidi_substream *substream,
spin_unlock_irqrestore(&ff->lock, flags);
}
-static struct snd_rawmidi_ops midi_capture_ops = {
- .open = midi_capture_open,
- .close = midi_capture_close,
- .trigger = midi_capture_trigger,
-};
-
-static struct snd_rawmidi_ops midi_playback_ops = {
- .open = midi_playback_open,
- .close = midi_playback_close,
- .trigger = midi_playback_trigger,
-};
-
static void set_midi_substream_names(struct snd_rawmidi_str *stream,
const char *const name)
{
@@ -99,6 +87,16 @@ static void set_midi_substream_names(struct snd_rawmidi_str *stream,
int snd_ff_create_midi_devices(struct snd_ff *ff)
{
+ static const struct snd_rawmidi_ops midi_capture_ops = {
+ .open = midi_capture_open,
+ .close = midi_capture_close,
+ .trigger = midi_capture_trigger,
+ };
+ static const struct snd_rawmidi_ops midi_playback_ops = {
+ .open = midi_playback_open,
+ .close = midi_playback_close,
+ .trigger = midi_playback_trigger,
+ };
struct snd_rawmidi *rmidi;
struct snd_rawmidi_str *stream;
int err;
diff --git a/sound/firewire/fireface/ff-pcm.c b/sound/firewire/fireface/ff-pcm.c
index 93cee1978e8e..d12a0e3a4219 100644
--- a/sound/firewire/fireface/ff-pcm.c
+++ b/sound/firewire/fireface/ff-pcm.c
@@ -91,18 +91,6 @@ static void limit_channels_and_rates(struct snd_pcm_hardware *hw,
}
}
-static void limit_period_and_buffer(struct snd_pcm_hardware *hw)
-{
- hw->periods_min = 2; /* SNDRV_PCM_INFO_BATCH */
- hw->periods_max = UINT_MAX;
-
- hw->period_bytes_min = 4 * hw->channels_max; /* bytes for a frame */
-
- /* Just to prevent from allocating much pages. */
- hw->period_bytes_max = hw->period_bytes_min * 2048;
- hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
-}
-
static int pcm_init_hw_params(struct snd_ff *ff,
struct snd_pcm_substream *substream)
{
@@ -111,13 +99,6 @@ static int pcm_init_hw_params(struct snd_ff *ff,
const unsigned int *pcm_channels;
int err;
- runtime->hw.info = SNDRV_PCM_INFO_BATCH |
- SNDRV_PCM_INFO_BLOCK_TRANSFER |
- SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_JOINT_DUPLEX |
- SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_MMAP_VALID;
-
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
runtime->hw.formats = SNDRV_PCM_FMTBIT_S32;
s = &ff->tx_stream;
@@ -128,9 +109,7 @@ static int pcm_init_hw_params(struct snd_ff *ff,
pcm_channels = ff->spec->pcm_playback_channels;
}
- /* limit rates */
limit_channels_and_rates(&runtime->hw, pcm_channels);
- limit_period_and_buffer(&runtime->hw);
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
hw_rule_channels, (void *)pcm_channels,
@@ -365,33 +344,47 @@ static snd_pcm_uframes_t pcm_playback_pointer(struct snd_pcm_substream *sbstrm)
return amdtp_stream_pcm_pointer(&ff->rx_stream);
}
-static struct snd_pcm_ops pcm_capture_ops = {
- .open = pcm_open,
- .close = pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = pcm_capture_hw_params,
- .hw_free = pcm_capture_hw_free,
- .prepare = pcm_capture_prepare,
- .trigger = pcm_capture_trigger,
- .pointer = pcm_capture_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
-};
-
-static struct snd_pcm_ops pcm_playback_ops = {
- .open = pcm_open,
- .close = pcm_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = pcm_playback_hw_params,
- .hw_free = pcm_playback_hw_free,
- .prepare = pcm_playback_prepare,
- .trigger = pcm_playback_trigger,
- .pointer = pcm_playback_pointer,
- .page = snd_pcm_lib_get_vmalloc_page,
- .mmap = snd_pcm_lib_mmap_vmalloc,
-};
+static int pcm_capture_ack(struct snd_pcm_substream *substream)
+{
+ struct snd_ff *ff = substream->private_data;
+
+ return amdtp_stream_pcm_ack(&ff->tx_stream);
+}
+
+static int pcm_playback_ack(struct snd_pcm_substream *substream)
+{
+ struct snd_ff *ff = substream->private_data;
+
+ return amdtp_stream_pcm_ack(&ff->rx_stream);
+}
int snd_ff_create_pcm_devices(struct snd_ff *ff)
{
+ static const struct snd_pcm_ops pcm_capture_ops = {
+ .open = pcm_open,
+ .close = pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = pcm_capture_hw_params,
+ .hw_free = pcm_capture_hw_free,
+ .prepare = pcm_capture_prepare,
+ .trigger = pcm_capture_trigger,
+ .pointer = pcm_capture_pointer,
+ .ack = pcm_capture_ack,
+ .page = snd_pcm_lib_get_vmalloc_page,
+ };
+ static const struct snd_pcm_ops pcm_playback_ops = {
+ .open = pcm_open,
+ .close = pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = pcm_playback_hw_params,
+ .hw_free = pcm_playback_hw_free,
+ .prepare = pcm_playback_prepare,
+ .trigger = pcm_playback_trigger,
+ .pointer = pcm_playback_pointer,
+ .ack = pcm_playback_ack,
+ .page = snd_pcm_lib_get_vmalloc_page,
+ .mmap = snd_pcm_lib_mmap_vmalloc,
+ };
struct snd_pcm *pcm;
int err;
diff --git a/sound/firewire/fireworks/fireworks_pcm.c b/sound/firewire/fireworks/fireworks_pcm.c
index 9171702f7d0b..40faed5e6968 100644
--- a/sound/firewire/fireworks/fireworks_pcm.c
+++ b/sound/firewire/fireworks/fireworks_pcm.c
@@ -129,19 +129,6 @@ limit_channels(struct snd_pcm_hardware *hw, unsigned int *pcm_channels)
}
}
-static void
-limit_period_and_buffer(struct snd_pcm_hardware *hw)
-{
- hw->periods_min = 2; /* SNDRV_PCM_INFO_BATCH */
- hw->periods_max = UINT_MAX;
-
- hw->period_bytes_min = 4 * hw->channels_max; /* bytes for a frame */
-
- /* Just to prevent from allocating much pages. */
- hw->period_bytes_max = hw->period_bytes_min * 2048;
- hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
-}
-
static int
pcm_init_hw_params(struct snd_efw *efw,
struct snd_pcm_substream *substream)
@@ -151,13 +138,6 @@ pcm_init_hw_params(struct snd_efw *efw,
unsigned int *pcm_channels;
int err;
- runtime->hw.info = SNDRV_PCM_INFO_BATCH |
- SNDRV_PCM_INFO_BLOCK_TRANSFER |
- SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_JOINT_DUPLEX |
- SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_MMAP_VALID;
-
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
runtime->hw.formats = AM824_IN_PCM_FORMAT_BITS;
s = &efw->tx_stream;
@@ -173,7 +153,6 @@ pcm_init_hw_params(struct snd_efw *efw,
snd_pcm_limit_hw_rates(runtime);
limit_channels(&runtime->hw, pcm_channels);
- limit_period_and_buffer(&runtime->hw);
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
hw_rule_channels, pcm_channels,
@@ -257,8 +236,6 @@ static int pcm_capture_hw_params(struct snd_pcm_substream *substream,
mutex_unlock(&efw->mutex);
}
- amdtp_am824_set_pcm_format(&efw->tx_stream, params_format(hw_params));
-
return 0;
}
static int pcm_playback_hw_params(struct snd_pcm_substream *substream,
@@ -278,8 +255,6 @@ static int pcm_playback_hw_params(struct snd_pcm_substream *substream,
mutex_unlock(&efw->mutex);
}
- amdtp_am824_set_pcm_format(&efw->rx_stream, params_format(hw_params));
-
return 0;
}
@@ -383,6 +358,20 @@ static snd_pcm_uframes_t pcm_playback_pointer(struct snd_pcm_substream *sbstrm)
return amdtp_stream_pcm_pointer(&efw->rx_stream);
}
+static int pcm_capture_ack(struct snd_pcm_substream *substream)
+{
+ struct snd_efw *efw = substream->private_data;
+
+ return amdtp_stream_pcm_ack(&efw->tx_stream);
+}
+
+static int pcm_playback_ack(struct snd_pcm_substream *substream)
+{
+ struct snd_efw *efw = substream->private_data;
+
+ return amdtp_stream_pcm_ack(&efw->rx_stream);
+}
+
int snd_efw_create_pcm_devices(struct snd_efw *efw)
{
static const struct snd_pcm_ops capture_ops = {
@@ -394,6 +383,7 @@ int snd_efw_create_pcm_devices(struct snd_efw *efw)
.prepare = pcm_capture_prepare,
.trigger = pcm_capture_trigger,
.pointer = pcm_capture_pointer,
+ .ack = pcm_capture_ack,
.page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops playback_ops = {
@@ -405,6 +395,7 @@ int snd_efw_create_pcm_devices(struct snd_efw *efw)
.prepare = pcm_playback_prepare,
.trigger = pcm_playback_trigger,
.pointer = pcm_playback_pointer,
+ .ack = pcm_playback_ack,
.page = snd_pcm_lib_get_vmalloc_page,
.mmap = snd_pcm_lib_mmap_vmalloc,
};
diff --git a/sound/firewire/motu/motu-pcm.c b/sound/firewire/motu/motu-pcm.c
index 94558f3d218b..a2b50df70874 100644
--- a/sound/firewire/motu/motu-pcm.c
+++ b/sound/firewire/motu/motu-pcm.c
@@ -96,18 +96,6 @@ static void limit_channels_and_rates(struct snd_motu *motu,
snd_pcm_limit_hw_rates(runtime);
}
-static void limit_period_and_buffer(struct snd_pcm_hardware *hw)
-{
- hw->periods_min = 2; /* SNDRV_PCM_INFO_BATCH */
- hw->periods_max = UINT_MAX;
-
- hw->period_bytes_min = 4 * hw->channels_max; /* byte for a frame */
-
- /* Just to prevent from allocating much pages. */
- hw->period_bytes_max = hw->period_bytes_min * 2048;
- hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
-}
-
static int init_hw_info(struct snd_motu *motu,
struct snd_pcm_substream *substream)
{
@@ -117,13 +105,6 @@ static int init_hw_info(struct snd_motu *motu,
struct snd_motu_packet_format *formats;
int err;
- hw->info = SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_MMAP_VALID |
- SNDRV_PCM_INFO_BATCH |
- SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_JOINT_DUPLEX |
- SNDRV_PCM_INFO_BLOCK_TRANSFER;
-
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
hw->formats = SNDRV_PCM_FMTBIT_S32;
stream = &motu->tx_stream;
@@ -135,7 +116,6 @@ static int init_hw_info(struct snd_motu *motu,
}
limit_channels_and_rates(motu, runtime, formats);
- limit_period_and_buffer(hw);
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
motu_rate_constraint, formats,
@@ -356,6 +336,20 @@ static snd_pcm_uframes_t playback_pointer(struct snd_pcm_substream *substream)
return amdtp_stream_pcm_pointer(&motu->rx_stream);
}
+static int capture_ack(struct snd_pcm_substream *substream)
+{
+ struct snd_motu *motu = substream->private_data;
+
+ return amdtp_stream_pcm_ack(&motu->tx_stream);
+}
+
+static int playback_ack(struct snd_pcm_substream *substream)
+{
+ struct snd_motu *motu = substream->private_data;
+
+ return amdtp_stream_pcm_ack(&motu->rx_stream);
+}
+
int snd_motu_create_pcm_devices(struct snd_motu *motu)
{
static struct snd_pcm_ops capture_ops = {
@@ -367,6 +361,7 @@ int snd_motu_create_pcm_devices(struct snd_motu *motu)
.prepare = capture_prepare,
.trigger = capture_trigger,
.pointer = capture_pointer,
+ .ack = capture_ack,
.page = snd_pcm_lib_get_vmalloc_page,
.mmap = snd_pcm_lib_mmap_vmalloc,
};
@@ -379,6 +374,7 @@ int snd_motu_create_pcm_devices(struct snd_motu *motu)
.prepare = playback_prepare,
.trigger = playback_trigger,
.pointer = playback_pointer,
+ .ack = playback_ack,
.page = snd_pcm_lib_get_vmalloc_page,
.mmap = snd_pcm_lib_mmap_vmalloc,
};
diff --git a/sound/firewire/oxfw/oxfw-pcm.c b/sound/firewire/oxfw/oxfw-pcm.c
index f3530f89a025..3dd46285c0e2 100644
--- a/sound/firewire/oxfw/oxfw-pcm.c
+++ b/sound/firewire/oxfw/oxfw-pcm.c
@@ -106,18 +106,6 @@ static void limit_channels_and_rates(struct snd_pcm_hardware *hw, u8 **formats)
}
}
-static void limit_period_and_buffer(struct snd_pcm_hardware *hw)
-{
- hw->periods_min = 2; /* SNDRV_PCM_INFO_BATCH */
- hw->periods_max = UINT_MAX;
-
- hw->period_bytes_min = 4 * hw->channels_max; /* bytes for a frame */
-
- /* Just to prevent from allocating much pages. */
- hw->period_bytes_max = hw->period_bytes_min * 2048;
- hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
-}
-
static int init_hw_params(struct snd_oxfw *oxfw,
struct snd_pcm_substream *substream)
{
@@ -126,13 +114,6 @@ static int init_hw_params(struct snd_oxfw *oxfw,
struct amdtp_stream *stream;
int err;
- runtime->hw.info = SNDRV_PCM_INFO_BATCH |
- SNDRV_PCM_INFO_BLOCK_TRANSFER |
- SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_JOINT_DUPLEX |
- SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_MMAP_VALID;
-
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
runtime->hw.formats = AM824_IN_PCM_FORMAT_BITS;
stream = &oxfw->tx_stream;
@@ -144,7 +125,6 @@ static int init_hw_params(struct snd_oxfw *oxfw,
}
limit_channels_and_rates(&runtime->hw, formats);
- limit_period_and_buffer(&runtime->hw);
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
hw_rule_channels, formats,
@@ -244,8 +224,6 @@ static int pcm_capture_hw_params(struct snd_pcm_substream *substream,
mutex_unlock(&oxfw->mutex);
}
- amdtp_am824_set_pcm_format(&oxfw->tx_stream, params_format(hw_params));
-
return 0;
}
static int pcm_playback_hw_params(struct snd_pcm_substream *substream,
@@ -265,8 +243,6 @@ static int pcm_playback_hw_params(struct snd_pcm_substream *substream,
mutex_unlock(&oxfw->mutex);
}
- amdtp_am824_set_pcm_format(&oxfw->rx_stream, params_format(hw_params));
-
return 0;
}
@@ -386,6 +362,20 @@ static snd_pcm_uframes_t pcm_playback_pointer(struct snd_pcm_substream *sbstm)
return amdtp_stream_pcm_pointer(&oxfw->rx_stream);
}
+static int pcm_capture_ack(struct snd_pcm_substream *substream)
+{
+ struct snd_oxfw *oxfw = substream->private_data;
+
+ return amdtp_stream_pcm_ack(&oxfw->tx_stream);
+}
+
+static int pcm_playback_ack(struct snd_pcm_substream *substream)
+{
+ struct snd_oxfw *oxfw = substream->private_data;
+
+ return amdtp_stream_pcm_ack(&oxfw->rx_stream);
+}
+
int snd_oxfw_create_pcm(struct snd_oxfw *oxfw)
{
static const struct snd_pcm_ops capture_ops = {
@@ -397,6 +387,7 @@ int snd_oxfw_create_pcm(struct snd_oxfw *oxfw)
.prepare = pcm_capture_prepare,
.trigger = pcm_capture_trigger,
.pointer = pcm_capture_pointer,
+ .ack = pcm_capture_ack,
.page = snd_pcm_lib_get_vmalloc_page,
.mmap = snd_pcm_lib_mmap_vmalloc,
};
@@ -409,6 +400,7 @@ int snd_oxfw_create_pcm(struct snd_oxfw *oxfw)
.prepare = pcm_playback_prepare,
.trigger = pcm_playback_trigger,
.pointer = pcm_playback_pointer,
+ .ack = pcm_playback_ack,
.page = snd_pcm_lib_get_vmalloc_page,
.mmap = snd_pcm_lib_mmap_vmalloc,
};
diff --git a/sound/firewire/tascam/amdtp-tascam.c b/sound/firewire/tascam/amdtp-tascam.c
index 9dd0fccd5ccc..6aff1fc1c72d 100644
--- a/sound/firewire/tascam/amdtp-tascam.c
+++ b/sound/firewire/tascam/amdtp-tascam.c
@@ -14,10 +14,6 @@
struct amdtp_tscm {
unsigned int pcm_channels;
-
- void (*transfer_samples)(struct amdtp_stream *s,
- struct snd_pcm_substream *pcm,
- __be32 *buffer, unsigned int frames);
};
int amdtp_tscm_set_parameters(struct amdtp_stream *s, unsigned int rate)
@@ -62,31 +58,6 @@ static void write_pcm_s32(struct amdtp_stream *s,
}
}
-static void write_pcm_s16(struct amdtp_stream *s,
- struct snd_pcm_substream *pcm,
- __be32 *buffer, unsigned int frames)
-{
- struct amdtp_tscm *p = s->protocol;
- struct snd_pcm_runtime *runtime = pcm->runtime;
- unsigned int channels, remaining_frames, i, c;
- const u16 *src;
-
- channels = p->pcm_channels;
- src = (void *)runtime->dma_area +
- frames_to_bytes(runtime, s->pcm_buffer_pointer);
- remaining_frames = runtime->buffer_size - s->pcm_buffer_pointer;
-
- for (i = 0; i < frames; ++i) {
- for (c = 0; c < channels; ++c) {
- buffer[c] = cpu_to_be32(*src << 16);
- src++;
- }
- buffer += s->data_block_quadlets;
- if (--remaining_frames == 0)
- src = (void *)runtime->dma_area;
- }
-}
-
static void read_pcm_s32(struct amdtp_stream *s,
struct snd_pcm_substream *pcm,
__be32 *buffer, unsigned int frames)
@@ -146,44 +117,16 @@ int amdtp_tscm_add_pcm_hw_constraints(struct amdtp_stream *s,
return amdtp_stream_add_pcm_hw_constraints(s, runtime);
}
-void amdtp_tscm_set_pcm_format(struct amdtp_stream *s, snd_pcm_format_t format)
-{
- struct amdtp_tscm *p = s->protocol;
-
- if (WARN_ON(amdtp_stream_pcm_running(s)))
- return;
-
- switch (format) {
- default:
- WARN_ON(1);
- /* fall through */
- case SNDRV_PCM_FORMAT_S16:
- if (s->direction == AMDTP_OUT_STREAM) {
- p->transfer_samples = write_pcm_s16;
- break;
- }
- WARN_ON(1);
- /* fall through */
- case SNDRV_PCM_FORMAT_S32:
- if (s->direction == AMDTP_OUT_STREAM)
- p->transfer_samples = write_pcm_s32;
- else
- p->transfer_samples = read_pcm_s32;
- break;
- }
-}
-
static unsigned int process_tx_data_blocks(struct amdtp_stream *s,
__be32 *buffer,
unsigned int data_blocks,
unsigned int *syt)
{
- struct amdtp_tscm *p = (struct amdtp_tscm *)s->protocol;
struct snd_pcm_substream *pcm;
pcm = ACCESS_ONCE(s->pcm);
if (data_blocks > 0 && pcm)
- p->transfer_samples(s, pcm, buffer, data_blocks);
+ read_pcm_s32(s, pcm, buffer, data_blocks);
/* A place holder for control messages. */
@@ -195,7 +138,6 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s,
unsigned int data_blocks,
unsigned int *syt)
{
- struct amdtp_tscm *p = (struct amdtp_tscm *)s->protocol;
struct snd_pcm_substream *pcm;
/* This field is not used. */
@@ -203,7 +145,7 @@ static unsigned int process_rx_data_blocks(struct amdtp_stream *s,
pcm = ACCESS_ONCE(s->pcm);
if (pcm)
- p->transfer_samples(s, pcm, buffer, data_blocks);
+ write_pcm_s32(s, pcm, buffer, data_blocks);
else
write_pcm_silence(s, buffer, data_blocks);
diff --git a/sound/firewire/tascam/tascam-pcm.c b/sound/firewire/tascam/tascam-pcm.c
index f5dd6ce6b6f1..6ec8ec634d4d 100644
--- a/sound/firewire/tascam/tascam-pcm.c
+++ b/sound/firewire/tascam/tascam-pcm.c
@@ -8,48 +8,20 @@
#include "tascam.h"
-static void set_buffer_params(struct snd_pcm_hardware *hw)
-{
- hw->period_bytes_min = 4 * hw->channels_min;
- hw->period_bytes_max = hw->period_bytes_min * 2048;
- hw->buffer_bytes_max = hw->period_bytes_max * 2;
-
- hw->periods_min = 2;
- hw->periods_max = UINT_MAX;
-}
-
static int pcm_init_hw_params(struct snd_tscm *tscm,
struct snd_pcm_substream *substream)
{
- static const struct snd_pcm_hardware hardware = {
- .info = SNDRV_PCM_INFO_BATCH |
- SNDRV_PCM_INFO_BLOCK_TRANSFER |
- SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_JOINT_DUPLEX |
- SNDRV_PCM_INFO_MMAP |
- SNDRV_PCM_INFO_MMAP_VALID,
- .rates = SNDRV_PCM_RATE_44100 |
- SNDRV_PCM_RATE_48000 |
- SNDRV_PCM_RATE_88200 |
- SNDRV_PCM_RATE_96000,
- .rate_min = 44100,
- .rate_max = 96000,
- .channels_min = 10,
- .channels_max = 18,
- };
struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_pcm_hardware *hw = &runtime->hw;
struct amdtp_stream *stream;
unsigned int pcm_channels;
- runtime->hw = hardware;
-
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
runtime->hw.formats = SNDRV_PCM_FMTBIT_S32;
stream = &tscm->tx_stream;
pcm_channels = tscm->spec->pcm_capture_analog_channels;
} else {
- runtime->hw.formats =
- SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_S32;
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S32;
stream = &tscm->rx_stream;
pcm_channels = tscm->spec->pcm_playback_analog_channels;
}
@@ -60,7 +32,11 @@ static int pcm_init_hw_params(struct snd_tscm *tscm,
pcm_channels += 2;
runtime->hw.channels_min = runtime->hw.channels_max = pcm_channels;
- set_buffer_params(&runtime->hw);
+ hw->rates = SNDRV_PCM_RATE_44100 |
+ SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_88200 |
+ SNDRV_PCM_RATE_96000;
+ snd_pcm_limit_hw_rates(runtime);
return amdtp_tscm_add_pcm_hw_constraints(stream, runtime);
}
@@ -125,8 +101,6 @@ static int pcm_capture_hw_params(struct snd_pcm_substream *substream,
mutex_unlock(&tscm->mutex);
}
- amdtp_tscm_set_pcm_format(&tscm->tx_stream, params_format(hw_params));
-
return 0;
}
@@ -147,8 +121,6 @@ static int pcm_playback_hw_params(struct snd_pcm_substream *substream,
mutex_unlock(&tscm->mutex);
}
- amdtp_tscm_set_pcm_format(&tscm->rx_stream, params_format(hw_params));
-
return 0;
}
@@ -268,6 +240,20 @@ static snd_pcm_uframes_t pcm_playback_pointer(struct snd_pcm_substream *sbstrm)
return amdtp_stream_pcm_pointer(&tscm->rx_stream);
}
+static int pcm_capture_ack(struct snd_pcm_substream *substream)
+{
+ struct snd_tscm *tscm = substream->private_data;
+
+ return amdtp_stream_pcm_ack(&tscm->tx_stream);
+}
+
+static int pcm_playback_ack(struct snd_pcm_substream *substream)
+{
+ struct snd_tscm *tscm = substream->private_data;
+
+ return amdtp_stream_pcm_ack(&tscm->rx_stream);
+}
+
int snd_tscm_create_pcm_devices(struct snd_tscm *tscm)
{
static const struct snd_pcm_ops capture_ops = {
@@ -279,6 +265,7 @@ int snd_tscm_create_pcm_devices(struct snd_tscm *tscm)
.prepare = pcm_capture_prepare,
.trigger = pcm_capture_trigger,
.pointer = pcm_capture_pointer,
+ .ack = pcm_capture_ack,
.page = snd_pcm_lib_get_vmalloc_page,
};
static const struct snd_pcm_ops playback_ops = {
@@ -290,6 +277,7 @@ int snd_tscm_create_pcm_devices(struct snd_tscm *tscm)
.prepare = pcm_playback_prepare,
.trigger = pcm_playback_trigger,
.pointer = pcm_playback_pointer,
+ .ack = pcm_playback_ack,
.page = snd_pcm_lib_get_vmalloc_page,
.mmap = snd_pcm_lib_mmap_vmalloc,
};
diff --git a/sound/firewire/tascam/tascam.h b/sound/firewire/tascam/tascam.h
index 08ecfae5c584..a5bd167eb5d9 100644
--- a/sound/firewire/tascam/tascam.h
+++ b/sound/firewire/tascam/tascam.h
@@ -131,7 +131,6 @@ int amdtp_tscm_init(struct amdtp_stream *s, struct fw_unit *unit,
int amdtp_tscm_set_parameters(struct amdtp_stream *s, unsigned int rate);
int amdtp_tscm_add_pcm_hw_constraints(struct amdtp_stream *s,
struct snd_pcm_runtime *runtime);
-void amdtp_tscm_set_pcm_format(struct amdtp_stream *s, snd_pcm_format_t format);
int snd_tscm_stream_get_rate(struct snd_tscm *tscm, unsigned int *rate);
int snd_tscm_stream_get_clock(struct snd_tscm *tscm,
diff --git a/sound/hda/hdac_bus.c b/sound/hda/hdac_bus.c
index 0e81ea89a596..714a51721a31 100644
--- a/sound/hda/hdac_bus.c
+++ b/sound/hda/hdac_bus.c
@@ -212,5 +212,6 @@ void snd_hdac_bus_remove_device(struct hdac_bus *bus,
bus->caddr_tbl[codec->addr] = NULL;
clear_bit(codec->addr, &bus->codec_powered);
bus->num_codecs--;
+ flush_work(&bus->unsol_work);
}
EXPORT_SYMBOL_GPL(snd_hdac_bus_remove_device);
diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
index 03c9872c31cf..19deb306facb 100644
--- a/sound/hda/hdac_device.c
+++ b/sound/hda/hdac_device.c
@@ -159,6 +159,7 @@ void snd_hdac_device_unregister(struct hdac_device *codec)
if (device_is_registered(&codec->dev)) {
hda_widget_sysfs_exit(codec);
device_del(&codec->dev);
+ snd_hdac_bus_remove_device(codec->bus, codec);
}
}
EXPORT_SYMBOL_GPL(snd_hdac_device_unregister);
diff --git a/sound/i2c/other/ak4113.c b/sound/i2c/other/ak4113.c
index 2183e9ebaa6d..4099e6062d3c 100644
--- a/sound/i2c/other/ak4113.c
+++ b/sound/i2c/other/ak4113.c
@@ -199,12 +199,11 @@ static int snd_ak4113_in_error_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct ak4113 *chip = snd_kcontrol_chip(kcontrol);
- long *ptr;
spin_lock_irq(&chip->lock);
- ptr = (long *)(((char *)chip) + kcontrol->private_value);
- ucontrol->value.integer.value[0] = *ptr;
- *ptr = 0;
+ ucontrol->value.integer.value[0] =
+ chip->errors[kcontrol->private_value];
+ chip->errors[kcontrol->private_value] = 0;
spin_unlock_irq(&chip->lock);
return 0;
}
@@ -373,7 +372,7 @@ static struct snd_kcontrol_new snd_ak4113_iec958_controls[] = {
SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = snd_ak4113_in_error_info,
.get = snd_ak4113_in_error_get,
- .private_value = offsetof(struct ak4113, parity_errors),
+ .private_value = AK4113_PARITY_ERRORS,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
@@ -382,7 +381,7 @@ static struct snd_kcontrol_new snd_ak4113_iec958_controls[] = {
SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = snd_ak4113_in_error_info,
.get = snd_ak4113_in_error_get,
- .private_value = offsetof(struct ak4113, v_bit_errors),
+ .private_value = AK4113_V_BIT_ERRORS,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
@@ -391,7 +390,7 @@ static struct snd_kcontrol_new snd_ak4113_iec958_controls[] = {
SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = snd_ak4113_in_error_info,
.get = snd_ak4113_in_error_get,
- .private_value = offsetof(struct ak4113, ccrc_errors),
+ .private_value = AK4113_CCRC_ERRORS,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
@@ -400,7 +399,7 @@ static struct snd_kcontrol_new snd_ak4113_iec958_controls[] = {
SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = snd_ak4113_in_error_info,
.get = snd_ak4113_in_error_get,
- .private_value = offsetof(struct ak4113, qcrc_errors),
+ .private_value = AK4113_QCRC_ERRORS,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
@@ -551,13 +550,13 @@ int snd_ak4113_check_rate_and_errors(struct ak4113 *ak4113, unsigned int flags)
rcs2 = reg_read(ak4113, AK4113_REG_RCS2);
spin_lock_irqsave(&ak4113->lock, _flags);
if (rcs0 & AK4113_PAR)
- ak4113->parity_errors++;
+ ak4113->errors[AK4113_PARITY_ERRORS]++;
if (rcs0 & AK4113_V)
- ak4113->v_bit_errors++;
+ ak4113->errors[AK4113_V_BIT_ERRORS]++;
if (rcs2 & AK4113_CCRC)
- ak4113->ccrc_errors++;
+ ak4113->errors[AK4113_CCRC_ERRORS]++;
if (rcs2 & AK4113_QCRC)
- ak4113->qcrc_errors++;
+ ak4113->errors[AK4113_QCRC_ERRORS]++;
c0 = (ak4113->rcs0 & (AK4113_QINT | AK4113_CINT | AK4113_STC |
AK4113_AUDION | AK4113_AUTO | AK4113_UNLCK)) ^
(rcs0 & (AK4113_QINT | AK4113_CINT | AK4113_STC |
diff --git a/sound/i2c/other/ak4114.c b/sound/i2c/other/ak4114.c
index d53c9bb36281..7fb1aeb46915 100644
--- a/sound/i2c/other/ak4114.c
+++ b/sound/i2c/other/ak4114.c
@@ -194,12 +194,11 @@ static int snd_ak4114_in_error_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct ak4114 *chip = snd_kcontrol_chip(kcontrol);
- long *ptr;
spin_lock_irq(&chip->lock);
- ptr = (long *)(((char *)chip) + kcontrol->private_value);
- ucontrol->value.integer.value[0] = *ptr;
- *ptr = 0;
+ ucontrol->value.integer.value[0] =
+ chip->errors[kcontrol->private_value];
+ chip->errors[kcontrol->private_value] = 0;
spin_unlock_irq(&chip->lock);
return 0;
}
@@ -341,7 +340,7 @@ static struct snd_kcontrol_new snd_ak4114_iec958_controls[] = {
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = snd_ak4114_in_error_info,
.get = snd_ak4114_in_error_get,
- .private_value = offsetof(struct ak4114, parity_errors),
+ .private_value = AK4114_PARITY_ERRORS,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
@@ -349,7 +348,7 @@ static struct snd_kcontrol_new snd_ak4114_iec958_controls[] = {
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = snd_ak4114_in_error_info,
.get = snd_ak4114_in_error_get,
- .private_value = offsetof(struct ak4114, v_bit_errors),
+ .private_value = AK4114_V_BIT_ERRORS,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
@@ -357,7 +356,7 @@ static struct snd_kcontrol_new snd_ak4114_iec958_controls[] = {
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = snd_ak4114_in_error_info,
.get = snd_ak4114_in_error_get,
- .private_value = offsetof(struct ak4114, ccrc_errors),
+ .private_value = AK4114_CCRC_ERRORS,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
@@ -365,7 +364,7 @@ static struct snd_kcontrol_new snd_ak4114_iec958_controls[] = {
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = snd_ak4114_in_error_info,
.get = snd_ak4114_in_error_get,
- .private_value = offsetof(struct ak4114, qcrc_errors),
+ .private_value = AK4114_QCRC_ERRORS,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
@@ -581,13 +580,13 @@ int snd_ak4114_check_rate_and_errors(struct ak4114 *ak4114, unsigned int flags)
rcs0 = reg_read(ak4114, AK4114_REG_RCS0);
spin_lock_irqsave(&ak4114->lock, _flags);
if (rcs0 & AK4114_PAR)
- ak4114->parity_errors++;
+ ak4114->errors[AK4114_PARITY_ERRORS]++;
if (rcs1 & AK4114_V)
- ak4114->v_bit_errors++;
+ ak4114->errors[AK4114_V_BIT_ERRORS]++;
if (rcs1 & AK4114_CCRC)
- ak4114->ccrc_errors++;
+ ak4114->errors[AK4114_CCRC_ERRORS]++;
if (rcs1 & AK4114_QCRC)
- ak4114->qcrc_errors++;
+ ak4114->errors[AK4114_QCRC_ERRORS]++;
c0 = (ak4114->rcs0 & (AK4114_QINT | AK4114_CINT | AK4114_PEM | AK4114_AUDION | AK4114_AUTO | AK4114_UNLCK)) ^
(rcs0 & (AK4114_QINT | AK4114_CINT | AK4114_PEM | AK4114_AUDION | AK4114_AUTO | AK4114_UNLCK));
c1 = (ak4114->rcs1 & 0xf0) ^ (rcs1 & 0xf0);
diff --git a/sound/i2c/other/ak4117.c b/sound/i2c/other/ak4117.c
index 0702f0552d19..3ab099fb8c15 100644
--- a/sound/i2c/other/ak4117.c
+++ b/sound/i2c/other/ak4117.c
@@ -168,12 +168,11 @@ static int snd_ak4117_in_error_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct ak4117 *chip = snd_kcontrol_chip(kcontrol);
- long *ptr;
spin_lock_irq(&chip->lock);
- ptr = (long *)(((char *)chip) + kcontrol->private_value);
- ucontrol->value.integer.value[0] = *ptr;
- *ptr = 0;
+ ucontrol->value.integer.value[0] =
+ chip->errors[kcontrol->private_value];
+ chip->errors[kcontrol->private_value] = 0;
spin_unlock_irq(&chip->lock);
return 0;
}
@@ -328,7 +327,7 @@ static struct snd_kcontrol_new snd_ak4117_iec958_controls[] = {
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = snd_ak4117_in_error_info,
.get = snd_ak4117_in_error_get,
- .private_value = offsetof(struct ak4117, parity_errors),
+ .private_value = AK4117_PARITY_ERRORS,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
@@ -336,7 +335,7 @@ static struct snd_kcontrol_new snd_ak4117_iec958_controls[] = {
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = snd_ak4117_in_error_info,
.get = snd_ak4117_in_error_get,
- .private_value = offsetof(struct ak4117, v_bit_errors),
+ .private_value = AK4117_V_BIT_ERRORS,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
@@ -344,7 +343,7 @@ static struct snd_kcontrol_new snd_ak4117_iec958_controls[] = {
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = snd_ak4117_in_error_info,
.get = snd_ak4117_in_error_get,
- .private_value = offsetof(struct ak4117, ccrc_errors),
+ .private_value = AK4117_CCRC_ERRORS,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
@@ -352,7 +351,7 @@ static struct snd_kcontrol_new snd_ak4117_iec958_controls[] = {
.access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.info = snd_ak4117_in_error_info,
.get = snd_ak4117_in_error_get,
- .private_value = offsetof(struct ak4117, qcrc_errors),
+ .private_value = AK4117_QCRC_ERRORS,
},
{
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
@@ -470,13 +469,13 @@ int snd_ak4117_check_rate_and_errors(struct ak4117 *ak4117, unsigned int flags)
// printk(KERN_DEBUG "AK IRQ: rcs0 = 0x%x, rcs1 = 0x%x, rcs2 = 0x%x\n", rcs0, rcs1, rcs2);
spin_lock_irqsave(&ak4117->lock, _flags);
if (rcs0 & AK4117_PAR)
- ak4117->parity_errors++;
+ ak4117->errors[AK4117_PARITY_ERRORS]++;
if (rcs0 & AK4117_V)
- ak4117->v_bit_errors++;
+ ak4117->errors[AK4117_V_BIT_ERRORS]++;
if (rcs2 & AK4117_CCRC)
- ak4117->ccrc_errors++;
+ ak4117->errors[AK4117_CCRC_ERRORS]++;
if (rcs2 & AK4117_QCRC)
- ak4117->qcrc_errors++;
+ ak4117->errors[AK4117_QCRC_ERRORS]++;
c0 = (ak4117->rcs0 & (AK4117_QINT | AK4117_CINT | AK4117_STC | AK4117_AUDION | AK4117_AUTO | AK4117_UNLCK)) ^
(rcs0 & (AK4117_QINT | AK4117_CINT | AK4117_STC | AK4117_AUDION | AK4117_AUTO | AK4117_UNLCK));
c1 = (ak4117->rcs1 & (AK4117_DTSCD | AK4117_NPCM | AK4117_PEM | 0x0f)) ^
diff --git a/sound/isa/Kconfig b/sound/isa/Kconfig
index 37adcc6cbe6b..cb54d9c0a77f 100644
--- a/sound/isa/Kconfig
+++ b/sound/isa/Kconfig
@@ -377,6 +377,7 @@ config SND_SBAWE
select SND_OPL3_LIB
select SND_MPU401_UART
select SND_SB16_DSP
+ select SND_SEQ_DEVICE if SND_SEQUENCER != n
help
Say Y here to include support for Sound Blaster AWE soundcards
(including the Plug and Play version).
@@ -384,6 +385,13 @@ config SND_SBAWE
To compile this driver as a module, choose M here: the module
will be called snd-sbawe.
+# select SEQ stuff to min(SND_SEQUENCER,SND_XXX)
+config SND_SBAWE_SEQ
+ def_tristate SND_SEQUENCER && SND_SBAWE
+ select SND_SEQ_MIDI_EMUL
+ select SND_SEQ_VIRMIDI
+ select SND_SYNTH_EMUX
+
config SND_SB16_CSP
bool "Sound Blaster 16/AWE CSP support"
depends on (SND_SB16 || SND_SBAWE) && (BROKEN || !PPC)
diff --git a/sound/isa/cmi8328.c b/sound/isa/cmi8328.c
index 8e1756c3b9bb..d09e456107ad 100644
--- a/sound/isa/cmi8328.c
+++ b/sound/isa/cmi8328.c
@@ -26,7 +26,7 @@ MODULE_AUTHOR("Ondrej Zary <[email protected]>");
MODULE_DESCRIPTION("C-Media CMI8328");
MODULE_LICENSE("GPL");
-#if defined(CONFIG_GAMEPORT) || defined(CONFIG_GAMEPORT_MODULE)
+#if IS_ENABLED(CONFIG_GAMEPORT)
#define SUPPORT_JOYSTICK 1
#endif
diff --git a/sound/isa/cs423x/cs4236_lib.c b/sound/isa/cs423x/cs4236_lib.c
index 2b7cc596f4c6..2012936f6756 100644
--- a/sound/isa/cs423x/cs4236_lib.c
+++ b/sound/isa/cs423x/cs4236_lib.c
@@ -138,7 +138,7 @@ static unsigned char snd_cs4236_ctrl_in(struct snd_wss *chip, unsigned char reg)
#define CLOCKS 8
-static struct snd_ratnum clocks[CLOCKS] = {
+static const struct snd_ratnum clocks[CLOCKS] = {
{ .num = 16934400, .den_min = 353, .den_max = 353, .den_step = 1 },
{ .num = 16934400, .den_min = 529, .den_max = 529, .den_step = 1 },
{ .num = 16934400, .den_min = 617, .den_max = 617, .den_step = 1 },
@@ -149,7 +149,7 @@ static struct snd_ratnum clocks[CLOCKS] = {
{ .num = 16934400/16, .den_min = 21, .den_max = 192, .den_step = 1 }
};
-static struct snd_pcm_hw_constraint_ratnums hw_constraints_clocks = {
+static const struct snd_pcm_hw_constraint_ratnums hw_constraints_clocks = {
.nrats = CLOCKS,
.rats = clocks,
};
diff --git a/sound/isa/es1688/es1688_lib.c b/sound/isa/es1688/es1688_lib.c
index 81cf26fa28d6..5d3df96c5e5b 100644
--- a/sound/isa/es1688/es1688_lib.c
+++ b/sound/isa/es1688/es1688_lib.c
@@ -290,7 +290,7 @@ static int snd_es1688_init(struct snd_es1688 * chip, int enable)
*/
-static struct snd_ratnum clocks[2] = {
+static const struct snd_ratnum clocks[2] = {
{
.num = 795444,
.den_min = 1,
@@ -305,7 +305,7 @@ static struct snd_ratnum clocks[2] = {
}
};
-static struct snd_pcm_hw_constraint_ratnums hw_constraints_clocks = {
+static const struct snd_pcm_hw_constraint_ratnums hw_constraints_clocks = {
.nrats = 2,
.rats = clocks,
};
diff --git a/sound/isa/es18xx.c b/sound/isa/es18xx.c
index 0cabe2b8974f..ae17a6584061 100644
--- a/sound/isa/es18xx.c
+++ b/sound/isa/es18xx.c
@@ -369,7 +369,7 @@ static int snd_es18xx_reset_fifo(struct snd_es18xx *chip)
return 0;
}
-static struct snd_ratnum new_clocks[2] = {
+static const struct snd_ratnum new_clocks[2] = {
{
.num = 793800,
.den_min = 1,
@@ -384,12 +384,12 @@ static struct snd_ratnum new_clocks[2] = {
}
};
-static struct snd_pcm_hw_constraint_ratnums new_hw_constraints_clocks = {
+static const struct snd_pcm_hw_constraint_ratnums new_hw_constraints_clocks = {
.nrats = 2,
.rats = new_clocks,
};
-static struct snd_ratnum old_clocks[2] = {
+static const struct snd_ratnum old_clocks[2] = {
{
.num = 795444,
.den_min = 1,
@@ -404,7 +404,7 @@ static struct snd_ratnum old_clocks[2] = {
}
};
-static struct snd_pcm_hw_constraint_ratnums old_hw_constraints_clocks = {
+static const struct snd_pcm_hw_constraint_ratnums old_hw_constraints_clocks = {
.nrats = 2,
.rats = old_clocks,
};
diff --git a/sound/isa/gus/gus_main.c b/sound/isa/gus/gus_main.c
index 4490ee442ff4..3cf9b13c780a 100644
--- a/sound/isa/gus/gus_main.c
+++ b/sound/isa/gus/gus_main.c
@@ -82,7 +82,7 @@ static int snd_gus_joystick_put(struct snd_kcontrol *kcontrol, struct snd_ctl_el
return change;
}
-static struct snd_kcontrol_new snd_gus_joystick_control = {
+static const struct snd_kcontrol_new snd_gus_joystick_control = {
.iface = SNDRV_CTL_ELEM_IFACE_CARD,
.name = "Joystick Speed",
.info = snd_gus_joystick_info,
diff --git a/sound/isa/gus/gus_pcm.c b/sound/isa/gus/gus_pcm.c
index 06505999155f..6b3da01a93b7 100644
--- a/sound/isa/gus/gus_pcm.c
+++ b/sound/isa/gus/gus_pcm.c
@@ -61,8 +61,6 @@ struct gus_pcm_private {
int final_volume;
};
-static int snd_gf1_pcm_use_dma = 1;
-
static void snd_gf1_pcm_block_change_ack(struct snd_gus_card * gus, void *private_data)
{
struct gus_pcm_private *pcmp = private_data;
@@ -355,66 +353,83 @@ static int snd_gf1_pcm_poke_block(struct snd_gus_card *gus, unsigned char *buf,
return 0;
}
-static int snd_gf1_pcm_playback_copy(struct snd_pcm_substream *substream,
- int voice,
- snd_pcm_uframes_t pos,
- void __user *src,
- snd_pcm_uframes_t count)
+static int get_bpos(struct gus_pcm_private *pcmp, int voice, unsigned int pos,
+ unsigned int len)
{
- struct snd_pcm_runtime *runtime = substream->runtime;
- struct gus_pcm_private *pcmp = runtime->private_data;
- unsigned int bpos, len;
-
- bpos = samples_to_bytes(runtime, pos) + (voice * (pcmp->dma_size / 2));
- len = samples_to_bytes(runtime, count);
+ unsigned int bpos = pos + (voice * (pcmp->dma_size / 2));
if (snd_BUG_ON(bpos > pcmp->dma_size))
return -EIO;
if (snd_BUG_ON(bpos + len > pcmp->dma_size))
return -EIO;
+ return bpos;
+}
+
+static int playback_copy_ack(struct snd_pcm_substream *substream,
+ unsigned int bpos, unsigned int len)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct gus_pcm_private *pcmp = runtime->private_data;
+ struct snd_gus_card *gus = pcmp->gus;
+ int w16, invert;
+
+ if (len > 32)
+ return snd_gf1_pcm_block_change(substream, bpos,
+ pcmp->memory + bpos, len);
+
+ w16 = (snd_pcm_format_width(runtime->format) == 16);
+ invert = snd_pcm_format_unsigned(runtime->format);
+ return snd_gf1_pcm_poke_block(gus, runtime->dma_area + bpos,
+ pcmp->memory + bpos, len, w16, invert);
+}
+
+static int snd_gf1_pcm_playback_copy(struct snd_pcm_substream *substream,
+ int voice, unsigned long pos,
+ void __user *src, unsigned long count)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct gus_pcm_private *pcmp = runtime->private_data;
+ unsigned int len = count;
+ int bpos;
+
+ bpos = get_bpos(pcmp, voice, pos, len);
+ if (bpos < 0)
+ return pos;
if (copy_from_user(runtime->dma_area + bpos, src, len))
return -EFAULT;
- if (snd_gf1_pcm_use_dma && len > 32) {
- return snd_gf1_pcm_block_change(substream, bpos, pcmp->memory + bpos, len);
- } else {
- struct snd_gus_card *gus = pcmp->gus;
- int err, w16, invert;
+ return playback_copy_ack(substream, bpos, len);
+}
- w16 = (snd_pcm_format_width(runtime->format) == 16);
- invert = snd_pcm_format_unsigned(runtime->format);
- if ((err = snd_gf1_pcm_poke_block(gus, runtime->dma_area + bpos, pcmp->memory + bpos, len, w16, invert)) < 0)
- return err;
- }
- return 0;
+static int snd_gf1_pcm_playback_copy_kernel(struct snd_pcm_substream *substream,
+ int voice, unsigned long pos,
+ void *src, unsigned long count)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct gus_pcm_private *pcmp = runtime->private_data;
+ unsigned int len = count;
+ int bpos;
+
+ bpos = get_bpos(pcmp, voice, pos, len);
+ if (bpos < 0)
+ return pos;
+ memcpy(runtime->dma_area + bpos, src, len);
+ return playback_copy_ack(substream, bpos, len);
}
static int snd_gf1_pcm_playback_silence(struct snd_pcm_substream *substream,
- int voice,
- snd_pcm_uframes_t pos,
- snd_pcm_uframes_t count)
+ int voice, unsigned long pos,
+ unsigned long count)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct gus_pcm_private *pcmp = runtime->private_data;
- unsigned int bpos, len;
+ unsigned int len = count;
+ int bpos;
- bpos = samples_to_bytes(runtime, pos) + (voice * (pcmp->dma_size / 2));
- len = samples_to_bytes(runtime, count);
- if (snd_BUG_ON(bpos > pcmp->dma_size))
- return -EIO;
- if (snd_BUG_ON(bpos + len > pcmp->dma_size))
- return -EIO;
- snd_pcm_format_set_silence(runtime->format, runtime->dma_area + bpos, count);
- if (snd_gf1_pcm_use_dma && len > 32) {
- return snd_gf1_pcm_block_change(substream, bpos, pcmp->memory + bpos, len);
- } else {
- struct snd_gus_card *gus = pcmp->gus;
- int err, w16, invert;
-
- w16 = (snd_pcm_format_width(runtime->format) == 16);
- invert = snd_pcm_format_unsigned(runtime->format);
- if ((err = snd_gf1_pcm_poke_block(gus, runtime->dma_area + bpos, pcmp->memory + bpos, len, w16, invert)) < 0)
- return err;
- }
- return 0;
+ bpos = get_bpos(pcmp, voice, pos, len);
+ if (bpos < 0)
+ return pos;
+ snd_pcm_format_set_silence(runtime->format, runtime->dma_area + bpos,
+ bytes_to_samples(runtime, count));
+ return playback_copy_ack(substream, bpos, len);
}
static int snd_gf1_pcm_playback_hw_params(struct snd_pcm_substream *substream,
@@ -547,14 +562,14 @@ static snd_pcm_uframes_t snd_gf1_pcm_playback_pointer(struct snd_pcm_substream *
return pos;
}
-static struct snd_ratnum clock = {
+static const struct snd_ratnum clock = {
.num = 9878400/16,
.den_min = 2,
.den_max = 257,
.den_step = 1,
};
-static struct snd_pcm_hw_constraint_ratnums hw_constraints_clocks = {
+static const struct snd_pcm_hw_constraint_ratnums hw_constraints_clocks = {
.nrats = 1,
.rats = &clock,
};
@@ -809,7 +824,7 @@ static int snd_gf1_pcm_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_
return change;
}
-static struct snd_kcontrol_new snd_gf1_pcm_volume_control =
+static const struct snd_kcontrol_new snd_gf1_pcm_volume_control =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Playback Volume",
@@ -818,7 +833,7 @@ static struct snd_kcontrol_new snd_gf1_pcm_volume_control =
.put = snd_gf1_pcm_volume_put
};
-static struct snd_kcontrol_new snd_gf1_pcm_volume_control1 =
+static const struct snd_kcontrol_new snd_gf1_pcm_volume_control1 =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "GPCM Playback Volume",
@@ -836,8 +851,9 @@ static struct snd_pcm_ops snd_gf1_pcm_playback_ops = {
.prepare = snd_gf1_pcm_playback_prepare,
.trigger = snd_gf1_pcm_playback_trigger,
.pointer = snd_gf1_pcm_playback_pointer,
- .copy = snd_gf1_pcm_playback_copy,
- .silence = snd_gf1_pcm_playback_silence,
+ .copy_user = snd_gf1_pcm_playback_copy,
+ .copy_kernel = snd_gf1_pcm_playback_copy_kernel,
+ .fill_silence = snd_gf1_pcm_playback_silence,
};
static struct snd_pcm_ops snd_gf1_pcm_capture_ops = {
diff --git a/sound/isa/sb/emu8000.c b/sound/isa/sb/emu8000.c
index ec180708f160..d56973b770c7 100644
--- a/sound/isa/sb/emu8000.c
+++ b/sound/isa/sb/emu8000.c
@@ -1138,7 +1138,7 @@ snd_emu8000_new(struct snd_card *card, int index, long port, int seq_ports,
snd_emu8000_free(hw);
return err;
}
-#if defined(CONFIG_SND_SEQUENCER) || (defined(MODULE) && defined(CONFIG_SND_SEQUENCER_MODULE))
+#if IS_ENABLED(CONFIG_SND_SEQUENCER)
if (snd_seq_device_new(card, index, SNDRV_SEQ_DEV_ID_EMU8000,
sizeof(struct snd_emu8000*), &awe) >= 0) {
strcpy(awe->name, "EMU-8000");
diff --git a/sound/isa/sb/emu8000_callback.c b/sound/isa/sb/emu8000_callback.c
index 72a9ac5efb40..d28d712f99f4 100644
--- a/sound/isa/sb/emu8000_callback.c
+++ b/sound/isa/sb/emu8000_callback.c
@@ -36,7 +36,7 @@ static void reset_voice(struct snd_emux *emu, int ch);
static void terminate_voice(struct snd_emux_voice *vp);
static void sysex(struct snd_emux *emu, char *buf, int len, int parsed,
struct snd_midi_channel_set *chset);
-#ifdef CONFIG_SND_SEQUENCER_OSS
+#if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS)
static int oss_ioctl(struct snd_emux *emu, int cmd, int p1, int p2);
#endif
static int load_fx(struct snd_emux *emu, int type, int mode,
@@ -76,7 +76,7 @@ static struct snd_emux_operators emu8000_ops = {
.sample_reset = snd_emu8000_sample_reset,
.load_fx = load_fx,
.sysex = sysex,
-#ifdef CONFIG_SND_SEQUENCER_OSS
+#if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS)
.oss_ioctl = oss_ioctl,
#endif
};
@@ -477,7 +477,7 @@ sysex(struct snd_emux *emu, char *buf, int len, int parsed, struct snd_midi_chan
}
-#ifdef CONFIG_SND_SEQUENCER_OSS
+#if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS)
/*
* OSS ioctl callback
*/
diff --git a/sound/isa/sb/emu8000_pcm.c b/sound/isa/sb/emu8000_pcm.c
index 32f234f494e5..2ee8d67871ec 100644
--- a/sound/isa/sb/emu8000_pcm.c
+++ b/sound/isa/sb/emu8000_pcm.c
@@ -422,143 +422,148 @@ do { \
return -EAGAIN;\
} while (0)
+enum {
+ COPY_USER, COPY_KERNEL, FILL_SILENCE,
+};
+
+#define GET_VAL(sval, buf, mode) \
+ do { \
+ switch (mode) { \
+ case FILL_SILENCE: \
+ sval = 0; \
+ break; \
+ case COPY_KERNEL: \
+ sval = *buf++; \
+ break; \
+ default: \
+ if (get_user(sval, (unsigned short __user *)buf)) \
+ return -EFAULT; \
+ buf++; \
+ break; \
+ } \
+ } while (0)
#ifdef USE_NONINTERLEAVE
+
+#define LOOP_WRITE(rec, offset, _buf, count, mode) \
+ do { \
+ struct snd_emu8000 *emu = (rec)->emu; \
+ unsigned short *buf = (unsigned short *)(_buf); \
+ snd_emu8000_write_wait(emu, 1); \
+ EMU8000_SMALW_WRITE(emu, offset); \
+ while (count > 0) { \
+ unsigned short sval; \
+ CHECK_SCHEDULER(); \
+ GET_VAL(sval, buf, mode); \
+ EMU8000_SMLD_WRITE(emu, sval); \
+ count--; \
+ } \
+ } while (0)
+
/* copy one channel block */
-static int emu8k_transfer_block(struct snd_emu8000 *emu, int offset, unsigned short *buf, int count)
+static int emu8k_pcm_copy(struct snd_pcm_substream *subs,
+ int voice, unsigned long pos,
+ void __user *src, unsigned long count)
{
- EMU8000_SMALW_WRITE(emu, offset);
- while (count > 0) {
- unsigned short sval;
- CHECK_SCHEDULER();
- if (get_user(sval, buf))
- return -EFAULT;
- EMU8000_SMLD_WRITE(emu, sval);
- buf++;
- count--;
- }
+ struct snd_emu8k_pcm *rec = subs->runtime->private_data;
+
+ /* convert to word unit */
+ pos = (pos << 1) + rec->loop_start[voice];
+ count <<= 1;
+ LOOP_WRITE(rec, pos, src, count, COPY_UESR);
return 0;
}
-static int emu8k_pcm_copy(struct snd_pcm_substream *subs,
- int voice,
- snd_pcm_uframes_t pos,
- void *src,
- snd_pcm_uframes_t count)
+static int emu8k_pcm_copy_kernel(struct snd_pcm_substream *subs,
+ int voice, unsigned long pos,
+ void *src, unsigned long count)
{
struct snd_emu8k_pcm *rec = subs->runtime->private_data;
- struct snd_emu8000 *emu = rec->emu;
-
- snd_emu8000_write_wait(emu, 1);
- if (voice == -1) {
- unsigned short *buf = src;
- int i, err;
- count /= rec->voices;
- for (i = 0; i < rec->voices; i++) {
- err = emu8k_transfer_block(emu, pos + rec->loop_start[i], buf, count);
- if (err < 0)
- return err;
- buf += count;
- }
- return 0;
- } else {
- return emu8k_transfer_block(emu, pos + rec->loop_start[voice], src, count);
- }
-}
-/* make a channel block silence */
-static int emu8k_silence_block(struct snd_emu8000 *emu, int offset, int count)
-{
- EMU8000_SMALW_WRITE(emu, offset);
- while (count > 0) {
- CHECK_SCHEDULER();
- EMU8000_SMLD_WRITE(emu, 0);
- count--;
- }
+ /* convert to word unit */
+ pos = (pos << 1) + rec->loop_start[voice];
+ count <<= 1;
+ LOOP_WRITE(rec, pos, src, count, COPY_KERNEL);
return 0;
}
+/* make a channel block silence */
static int emu8k_pcm_silence(struct snd_pcm_substream *subs,
- int voice,
- snd_pcm_uframes_t pos,
- snd_pcm_uframes_t count)
+ int voice, unsigned long pos, unsigned long count)
{
struct snd_emu8k_pcm *rec = subs->runtime->private_data;
- struct snd_emu8000 *emu = rec->emu;
-
- snd_emu8000_write_wait(emu, 1);
- if (voice == -1 && rec->voices == 1)
- voice = 0;
- if (voice == -1) {
- int err;
- err = emu8k_silence_block(emu, pos + rec->loop_start[0], count / 2);
- if (err < 0)
- return err;
- return emu8k_silence_block(emu, pos + rec->loop_start[1], count / 2);
- } else {
- return emu8k_silence_block(emu, pos + rec->loop_start[voice], count);
- }
+
+ /* convert to word unit */
+ pos = (pos << 1) + rec->loop_start[voice];
+ count <<= 1;
+ LOOP_WRITE(rec, pos, NULL, count, FILL_SILENCE);
+ return 0;
}
#else /* interleave */
+#define LOOP_WRITE(rec, pos, _buf, count, mode) \
+ do { \
+ struct snd_emu8000 *emu = rec->emu; \
+ unsigned short *buf = (unsigned short *)(_buf); \
+ snd_emu8000_write_wait(emu, 1); \
+ EMU8000_SMALW_WRITE(emu, pos + rec->loop_start[0]); \
+ if (rec->voices > 1) \
+ EMU8000_SMARW_WRITE(emu, pos + rec->loop_start[1]); \
+ while (count > 0) { \
+ unsigned short sval; \
+ CHECK_SCHEDULER(); \
+ GET_VAL(sval, buf, mode); \
+ EMU8000_SMLD_WRITE(emu, sval); \
+ if (rec->voices > 1) { \
+ CHECK_SCHEDULER(); \
+ GET_VAL(sval, buf, mode); \
+ EMU8000_SMRD_WRITE(emu, sval); \
+ } \
+ count--; \
+ } \
+ } while (0)
+
+
/*
* copy the interleaved data can be done easily by using
* DMA "left" and "right" channels on emu8k engine.
*/
static int emu8k_pcm_copy(struct snd_pcm_substream *subs,
- int voice,
- snd_pcm_uframes_t pos,
- void __user *src,
- snd_pcm_uframes_t count)
+ int voice, unsigned long pos,
+ void __user *src, unsigned long count)
{
struct snd_emu8k_pcm *rec = subs->runtime->private_data;
- struct snd_emu8000 *emu = rec->emu;
- unsigned short __user *buf = src;
- snd_emu8000_write_wait(emu, 1);
- EMU8000_SMALW_WRITE(emu, pos + rec->loop_start[0]);
- if (rec->voices > 1)
- EMU8000_SMARW_WRITE(emu, pos + rec->loop_start[1]);
-
- while (count-- > 0) {
- unsigned short sval;
- CHECK_SCHEDULER();
- if (get_user(sval, buf))
- return -EFAULT;
- EMU8000_SMLD_WRITE(emu, sval);
- buf++;
- if (rec->voices > 1) {
- CHECK_SCHEDULER();
- if (get_user(sval, buf))
- return -EFAULT;
- EMU8000_SMRD_WRITE(emu, sval);
- buf++;
- }
- }
+ /* convert to frames */
+ pos = bytes_to_frames(subs->runtime, pos);
+ count = bytes_to_frames(subs->runtime, count);
+ LOOP_WRITE(rec, pos, src, count, COPY_USER);
+ return 0;
+}
+
+static int emu8k_pcm_copy_kernel(struct snd_pcm_substream *subs,
+ int voice, unsigned long pos,
+ void *src, unsigned long count)
+{
+ struct snd_emu8k_pcm *rec = subs->runtime->private_data;
+
+ /* convert to frames */
+ pos = bytes_to_frames(subs->runtime, pos);
+ count = bytes_to_frames(subs->runtime, count);
+ LOOP_WRITE(rec, pos, src, count, COPY_KERNEL);
return 0;
}
static int emu8k_pcm_silence(struct snd_pcm_substream *subs,
- int voice,
- snd_pcm_uframes_t pos,
- snd_pcm_uframes_t count)
+ int voice, unsigned long pos, unsigned long count)
{
struct snd_emu8k_pcm *rec = subs->runtime->private_data;
- struct snd_emu8000 *emu = rec->emu;
- snd_emu8000_write_wait(emu, 1);
- EMU8000_SMALW_WRITE(emu, rec->loop_start[0] + pos);
- if (rec->voices > 1)
- EMU8000_SMARW_WRITE(emu, rec->loop_start[1] + pos);
- while (count-- > 0) {
- CHECK_SCHEDULER();
- EMU8000_SMLD_WRITE(emu, 0);
- if (rec->voices > 1) {
- CHECK_SCHEDULER();
- EMU8000_SMRD_WRITE(emu, 0);
- }
- }
+ /* convert to frames */
+ pos = bytes_to_frames(subs->runtime, pos);
+ count = bytes_to_frames(subs->runtime, count);
+ LOOP_WRITE(rec, pos, NULL, count, FILL_SILENCE);
return 0;
}
#endif
@@ -674,8 +679,9 @@ static struct snd_pcm_ops emu8k_pcm_ops = {
.prepare = emu8k_pcm_prepare,
.trigger = emu8k_pcm_trigger,
.pointer = emu8k_pcm_pointer,
- .copy = emu8k_pcm_copy,
- .silence = emu8k_pcm_silence,
+ .copy_user = emu8k_pcm_copy,
+ .copy_kernel = emu8k_pcm_copy_kernel,
+ .fill_silence = emu8k_pcm_silence,
};
diff --git a/sound/isa/sb/sb16.c b/sound/isa/sb/sb16.c
index 3b2e4f405ff2..917a93d696c3 100644
--- a/sound/isa/sb/sb16.c
+++ b/sound/isa/sb/sb16.c
@@ -62,7 +62,7 @@ MODULE_SUPPORTED_DEVICE("{{Creative Labs,SB AWE 32},"
#define SNDRV_DEBUG_IRQ
#endif
-#if defined(SNDRV_SBAWE) && (defined(CONFIG_SND_SEQUENCER) || (defined(MODULE) && defined(CONFIG_SND_SEQUENCER_MODULE)))
+#if defined(SNDRV_SBAWE) && IS_ENABLED(CONFIG_SND_SEQUENCER)
#define SNDRV_SBAWE_EMU8000
#endif
diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c
index 48da2276683d..fa5780bb0c68 100644
--- a/sound/isa/sb/sb16_csp.c
+++ b/sound/isa/sb/sb16_csp.c
@@ -1029,7 +1029,7 @@ static int snd_sb_qsound_space_put(struct snd_kcontrol *kcontrol, struct snd_ctl
return change;
}
-static struct snd_kcontrol_new snd_sb_qsound_switch = {
+static const struct snd_kcontrol_new snd_sb_qsound_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "3D Control - Switch",
.info = snd_sb_qsound_switch_info,
@@ -1037,7 +1037,7 @@ static struct snd_kcontrol_new snd_sb_qsound_switch = {
.put = snd_sb_qsound_switch_put
};
-static struct snd_kcontrol_new snd_sb_qsound_space = {
+static const struct snd_kcontrol_new snd_sb_qsound_space = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "3D Control - Space",
.info = snd_sb_qsound_space_info,
diff --git a/sound/isa/sb/sb16_main.c b/sound/isa/sb/sb16_main.c
index 8b2d6c6bfe97..4be1350f6649 100644
--- a/sound/isa/sb/sb16_main.c
+++ b/sound/isa/sb/sb16_main.c
@@ -737,7 +737,7 @@ static int snd_sb16_dma_control_put(struct snd_kcontrol *kcontrol, struct snd_ct
return change;
}
-static struct snd_kcontrol_new snd_sb16_dma_control = {
+static const struct snd_kcontrol_new snd_sb16_dma_control = {
.iface = SNDRV_CTL_ELEM_IFACE_CARD,
.name = "16-bit DMA Allocation",
.info = snd_sb16_dma_control_info,
diff --git a/sound/isa/sb/sb8_main.c b/sound/isa/sb/sb8_main.c
index 9043397fe62f..0f302be4fb6d 100644
--- a/sound/isa/sb/sb8_main.c
+++ b/sound/isa/sb/sb8_main.c
@@ -46,19 +46,19 @@ MODULE_LICENSE("GPL");
#define SB8_DEN(v) ((SB8_CLOCK + (v) / 2) / (v))
#define SB8_RATE(v) (SB8_CLOCK / SB8_DEN(v))
-static struct snd_ratnum clock = {
+static const struct snd_ratnum clock = {
.num = SB8_CLOCK,
.den_min = 1,
.den_max = 256,
.den_step = 1,
};
-static struct snd_pcm_hw_constraint_ratnums hw_constraints_clock = {
+static const struct snd_pcm_hw_constraint_ratnums hw_constraints_clock = {
.nrats = 1,
.rats = &clock,
};
-static struct snd_ratnum stereo_clocks[] = {
+static const struct snd_ratnum stereo_clocks[] = {
{
.num = SB8_CLOCK,
.den_min = SB8_DEN(22050),
diff --git a/sound/isa/sscape.c b/sound/isa/sscape.c
index 54f5758a1bb3..1cd2908e4f12 100644
--- a/sound/isa/sscape.c
+++ b/sound/isa/sscape.c
@@ -671,7 +671,7 @@ __skip_change:
return change;
}
-static struct snd_kcontrol_new midi_mixer_ctl = {
+static const struct snd_kcontrol_new midi_mixer_ctl = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "MIDI",
.info = sscape_midi_info,
diff --git a/sound/isa/wss/wss_lib.c b/sound/isa/wss/wss_lib.c
index 913b731d2236..9f4e2e34cbe1 100644
--- a/sound/isa/wss/wss_lib.c
+++ b/sound/isa/wss/wss_lib.c
@@ -69,12 +69,12 @@ static unsigned char freq_bits[14] = {
/* 48000 */ 0x0C | CS4231_XTAL1
};
-static unsigned int rates[14] = {
+static const unsigned int rates[14] = {
5510, 6620, 8000, 9600, 11025, 16000, 18900, 22050,
27042, 32000, 33075, 37800, 44100, 48000
};
-static struct snd_pcm_hw_constraint_list hw_constraints_rates = {
+static const struct snd_pcm_hw_constraint_list hw_constraints_rates = {
.count = ARRAY_SIZE(rates),
.list = rates,
.mask = 0,
diff --git a/sound/mips/hal2.c b/sound/mips/hal2.c
index 00fc9241d266..3318c15e324a 100644
--- a/sound/mips/hal2.c
+++ b/sound/mips/hal2.c
@@ -264,7 +264,7 @@ static int hal2_gain_put(struct snd_kcontrol *kcontrol,
return old != new;
}
-static struct snd_kcontrol_new hal2_ctrl_headphone = {
+static const struct snd_kcontrol_new hal2_ctrl_headphone = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Headphone Playback Volume",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
@@ -274,7 +274,7 @@ static struct snd_kcontrol_new hal2_ctrl_headphone = {
.put = hal2_gain_put,
};
-static struct snd_kcontrol_new hal2_ctrl_mic = {
+static const struct snd_kcontrol_new hal2_ctrl_mic = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Mic Capture Volume",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
@@ -461,15 +461,15 @@ static int hal2_alloc_dmabuf(struct hal2_codec *codec)
int count = H2_BUF_SIZE / H2_BLOCK_SIZE;
int i;
- codec->buffer = dma_alloc_noncoherent(NULL, H2_BUF_SIZE,
- &buffer_dma, GFP_KERNEL);
+ codec->buffer = dma_alloc_attrs(NULL, H2_BUF_SIZE, &buffer_dma,
+ GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
if (!codec->buffer)
return -ENOMEM;
- desc = dma_alloc_noncoherent(NULL, count * sizeof(struct hal2_desc),
- &desc_dma, GFP_KERNEL);
+ desc = dma_alloc_attrs(NULL, count * sizeof(struct hal2_desc),
+ &desc_dma, GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
if (!desc) {
- dma_free_noncoherent(NULL, H2_BUF_SIZE,
- codec->buffer, buffer_dma);
+ dma_free_attrs(NULL, H2_BUF_SIZE, codec->buffer, buffer_dma,
+ DMA_ATTR_NON_CONSISTENT);
return -ENOMEM;
}
codec->buffer_dma = buffer_dma;
@@ -490,10 +490,10 @@ static int hal2_alloc_dmabuf(struct hal2_codec *codec)
static void hal2_free_dmabuf(struct hal2_codec *codec)
{
- dma_free_noncoherent(NULL, codec->desc_count * sizeof(struct hal2_desc),
- codec->desc, codec->desc_dma);
- dma_free_noncoherent(NULL, H2_BUF_SIZE, codec->buffer,
- codec->buffer_dma);
+ dma_free_attrs(NULL, codec->desc_count * sizeof(struct hal2_desc),
+ codec->desc, codec->desc_dma, DMA_ATTR_NON_CONSISTENT);
+ dma_free_attrs(NULL, H2_BUF_SIZE, codec->buffer, codec->buffer_dma,
+ DMA_ATTR_NON_CONSISTENT);
}
static struct snd_pcm_hardware hal2_pcm_hw = {
@@ -616,10 +616,9 @@ static int hal2_playback_ack(struct snd_pcm_substream *substream)
struct hal2_codec *dac = &hal2->dac;
dac->pcm_indirect.hw_queue_size = H2_BUF_SIZE / 2;
- snd_pcm_indirect_playback_transfer(substream,
- &dac->pcm_indirect,
- hal2_playback_transfer);
- return 0;
+ return snd_pcm_indirect_playback_transfer(substream,
+ &dac->pcm_indirect,
+ hal2_playback_transfer);
}
static int hal2_capture_open(struct snd_pcm_substream *substream)
@@ -707,10 +706,9 @@ static int hal2_capture_ack(struct snd_pcm_substream *substream)
struct snd_hal2 *hal2 = snd_pcm_substream_chip(substream);
struct hal2_codec *adc = &hal2->adc;
- snd_pcm_indirect_capture_transfer(substream,
- &adc->pcm_indirect,
- hal2_capture_transfer);
- return 0;
+ return snd_pcm_indirect_capture_transfer(substream,
+ &adc->pcm_indirect,
+ hal2_capture_transfer);
}
static struct snd_pcm_ops hal2_playback_ops = {
diff --git a/sound/mips/sgio2audio.c b/sound/mips/sgio2audio.c
index f07aa3993f83..0ebc1c3727df 100644
--- a/sound/mips/sgio2audio.c
+++ b/sound/mips/sgio2audio.c
@@ -230,7 +230,7 @@ static int sgio2audio_source_put(struct snd_kcontrol *kcontrol,
}
/* dac1/pcm0 mixer control */
-static struct snd_kcontrol_new sgio2audio_ctrl_pcm0 = {
+static const struct snd_kcontrol_new sgio2audio_ctrl_pcm0 = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Playback Volume",
.index = 0,
@@ -242,7 +242,7 @@ static struct snd_kcontrol_new sgio2audio_ctrl_pcm0 = {
};
/* dac2/pcm1 mixer control */
-static struct snd_kcontrol_new sgio2audio_ctrl_pcm1 = {
+static const struct snd_kcontrol_new sgio2audio_ctrl_pcm1 = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Playback Volume",
.index = 1,
@@ -254,7 +254,7 @@ static struct snd_kcontrol_new sgio2audio_ctrl_pcm1 = {
};
/* record level mixer control */
-static struct snd_kcontrol_new sgio2audio_ctrl_reclevel = {
+static const struct snd_kcontrol_new sgio2audio_ctrl_reclevel = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Capture Volume",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
@@ -265,7 +265,7 @@ static struct snd_kcontrol_new sgio2audio_ctrl_reclevel = {
};
/* record level source control */
-static struct snd_kcontrol_new sgio2audio_ctrl_recsource = {
+static const struct snd_kcontrol_new sgio2audio_ctrl_recsource = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Capture Source",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
@@ -275,7 +275,7 @@ static struct snd_kcontrol_new sgio2audio_ctrl_recsource = {
};
/* line mixer control */
-static struct snd_kcontrol_new sgio2audio_ctrl_line = {
+static const struct snd_kcontrol_new sgio2audio_ctrl_line = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Line Playback Volume",
.index = 0,
@@ -287,7 +287,7 @@ static struct snd_kcontrol_new sgio2audio_ctrl_line = {
};
/* cd mixer control */
-static struct snd_kcontrol_new sgio2audio_ctrl_cd = {
+static const struct snd_kcontrol_new sgio2audio_ctrl_cd = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Line Playback Volume",
.index = 1,
@@ -299,7 +299,7 @@ static struct snd_kcontrol_new sgio2audio_ctrl_cd = {
};
/* mic mixer control */
-static struct snd_kcontrol_new sgio2audio_ctrl_mic = {
+static const struct snd_kcontrol_new sgio2audio_ctrl_mic = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Mic Playback Volume",
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
diff --git a/sound/parisc/harmony.c b/sound/parisc/harmony.c
index 99b64cb3cef8..5911eb35c2a3 100644
--- a/sound/parisc/harmony.c
+++ b/sound/parisc/harmony.c
@@ -83,14 +83,14 @@ MODULE_DEVICE_TABLE(parisc, snd_harmony_devtable);
#define NAME "harmony"
#define PFX NAME ": "
-static unsigned int snd_harmony_rates[] = {
+static const unsigned int snd_harmony_rates[] = {
5512, 6615, 8000, 9600,
11025, 16000, 18900, 22050,
27428, 32000, 33075, 37800,
44100, 48000
};
-static unsigned int rate_bits[14] = {
+static const unsigned int rate_bits[14] = {
HARMONY_SR_5KHZ, HARMONY_SR_6KHZ, HARMONY_SR_8KHZ,
HARMONY_SR_9KHZ, HARMONY_SR_11KHZ, HARMONY_SR_16KHZ,
HARMONY_SR_18KHZ, HARMONY_SR_22KHZ, HARMONY_SR_27KHZ,
@@ -98,7 +98,7 @@ static unsigned int rate_bits[14] = {
HARMONY_SR_44KHZ, HARMONY_SR_48KHZ
};
-static struct snd_pcm_hw_constraint_list hw_constraint_rates = {
+static const struct snd_pcm_hw_constraint_list hw_constraint_rates = {
.count = ARRAY_SIZE(snd_harmony_rates),
.list = snd_harmony_rates,
.mask = 0,
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
index 32151d8c6bb8..d9f3fdb777e4 100644
--- a/sound/pci/Kconfig
+++ b/sound/pci/Kconfig
@@ -465,6 +465,7 @@ config SND_EMU10K1
select SND_RAWMIDI
select SND_AC97_CODEC
select SND_TIMER
+ select SND_SEQ_DEVICE if SND_SEQUENCER != n
depends on ZONE_DMA
help
Say Y to include support for Sound Blaster PCI 512, Live!,
@@ -477,6 +478,13 @@ config SND_EMU10K1
To compile this driver as a module, choose M here: the module
will be called snd-emu10k1.
+# select SEQ stuff to min(SND_SEQUENCER,SND_XXX)
+config SND_EMU10K1_SEQ
+ def_tristate SND_SEQUENCER && SND_EMU10K1
+ select SND_SEQ_MIDI_EMUL
+ select SND_SEQ_VIRMIDI
+ select SND_SYNTH_EMUX
+
config SND_EMU10K1X
tristate "Emu10k1X (Dell OEM Version)"
select SND_AC97_CODEC
diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
index 34bbc2e730a6..8567f1e5b9cf 100644
--- a/sound/pci/ali5451/ali5451.c
+++ b/sound/pci/ali5451/ali5451.c
@@ -1602,8 +1602,8 @@ static struct snd_pcm_hardware snd_ali_modem =
static int snd_ali_modem_open(struct snd_pcm_substream *substream, int rec,
int channel)
{
- static unsigned int rates[] = {8000, 9600, 12000, 16000};
- static struct snd_pcm_hw_constraint_list hw_constraint_rates = {
+ static const unsigned int rates[] = {8000, 9600, 12000, 16000};
+ static const struct snd_pcm_hw_constraint_list hw_constraint_rates = {
.count = ARRAY_SIZE(rates),
.list = rates,
.mask = 0,
diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c
index 40152feef1e7..52e0ea7b9b80 100644
--- a/sound/pci/atiixp_modem.c
+++ b/sound/pci/atiixp_modem.c
@@ -860,8 +860,8 @@ static int snd_atiixp_pcm_open(struct snd_pcm_substream *substream,
struct atiixp_modem *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
- static unsigned int rates[] = { 8000, 9600, 12000, 16000 };
- static struct snd_pcm_hw_constraint_list hw_constraints_rates = {
+ static const unsigned int rates[] = { 8000, 9600, 12000, 16000 };
+ static const struct snd_pcm_hw_constraint_list hw_constraints_rates = {
.count = ARRAY_SIZE(rates),
.list = rates,
.mask = 0,
diff --git a/sound/pci/au88x0/au88x0_pcm.c b/sound/pci/au88x0/au88x0_pcm.c
index 335979a753fe..1aa97012451d 100644
--- a/sound/pci/au88x0/au88x0_pcm.c
+++ b/sound/pci/au88x0/au88x0_pcm.c
@@ -112,11 +112,11 @@ static struct snd_pcm_hardware snd_vortex_playback_hw_wt = {
};
#endif
#ifdef CHIP_AU8830
-static unsigned int au8830_channels[3] = {
+static const unsigned int au8830_channels[3] = {
1, 2, 4,
};
-static struct snd_pcm_hw_constraint_list hw_constraints_au8830_channels = {
+static const struct snd_pcm_hw_constraint_list hw_constraints_au8830_channels = {
.count = ARRAY_SIZE(au8830_channels),
.list = au8830_channels,
.mask = 0,
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
index 79b2e6b7d88b..fc18c29a8173 100644
--- a/sound/pci/azt3328.c
+++ b/sound/pci/azt3328.c
@@ -2014,7 +2014,7 @@ static const struct snd_pcm_hardware snd_azf3328_hardware =
};
-static unsigned int snd_azf3328_fixed_rates[] = {
+static const unsigned int snd_azf3328_fixed_rates[] = {
AZF_FREQ_4000,
AZF_FREQ_4800,
AZF_FREQ_5512,
@@ -2031,7 +2031,7 @@ static unsigned int snd_azf3328_fixed_rates[] = {
AZF_FREQ_66200
};
-static struct snd_pcm_hw_constraint_list snd_azf3328_hw_constraints_rates = {
+static const struct snd_pcm_hw_constraint_list snd_azf3328_hw_constraints_rates = {
.count = ARRAY_SIZE(snd_azf3328_fixed_rates),
.list = snd_azf3328_fixed_rates,
.mask = 0,
diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c
index 099efb046b1c..de0234294c25 100644
--- a/sound/pci/bt87x.c
+++ b/sound/pci/bt87x.c
@@ -401,13 +401,13 @@ static int snd_bt87x_set_digital_hw(struct snd_bt87x *chip, struct snd_pcm_runti
static int snd_bt87x_set_analog_hw(struct snd_bt87x *chip, struct snd_pcm_runtime *runtime)
{
- static struct snd_ratnum analog_clock = {
+ static const struct snd_ratnum analog_clock = {
.num = ANALOG_CLOCK,
.den_min = CLOCK_DIV_MIN,
.den_max = CLOCK_DIV_MAX,
.den_step = 1
};
- static struct snd_pcm_hw_constraint_ratnums constraint_rates = {
+ static const struct snd_pcm_hw_constraint_ratnums constraint_rates = {
.nrats = 1,
.rats = &analog_clock
};
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
index 745a0a3743b4..a460cb63e971 100644
--- a/sound/pci/cmipci.c
+++ b/sound/pci/cmipci.c
@@ -719,18 +719,18 @@ static int snd_cmipci_hw_free(struct snd_pcm_substream *substream)
/*
*/
-static unsigned int hw_channels[] = {1, 2, 4, 6, 8};
-static struct snd_pcm_hw_constraint_list hw_constraints_channels_4 = {
+static const unsigned int hw_channels[] = {1, 2, 4, 6, 8};
+static const struct snd_pcm_hw_constraint_list hw_constraints_channels_4 = {
.count = 3,
.list = hw_channels,
.mask = 0,
};
-static struct snd_pcm_hw_constraint_list hw_constraints_channels_6 = {
+static const struct snd_pcm_hw_constraint_list hw_constraints_channels_6 = {
.count = 4,
.list = hw_channels,
.mask = 0,
};
-static struct snd_pcm_hw_constraint_list hw_constraints_channels_8 = {
+static const struct snd_pcm_hw_constraint_list hw_constraints_channels_8 = {
.count = 5,
.list = hw_channels,
.mask = 0,
@@ -1597,9 +1597,9 @@ static struct snd_pcm_hardware snd_cmipci_capture_spdif =
.fifo_size = 0,
};
-static unsigned int rate_constraints[] = { 5512, 8000, 11025, 16000, 22050,
+static const unsigned int rate_constraints[] = { 5512, 8000, 11025, 16000, 22050,
32000, 44100, 48000, 88200, 96000, 128000 };
-static struct snd_pcm_hw_constraint_list hw_constraints_rates = {
+static const struct snd_pcm_hw_constraint_list hw_constraints_rates = {
.count = ARRAY_SIZE(rate_constraints),
.list = rate_constraints,
.mask = 0,
diff --git a/sound/pci/cs4281.c b/sound/pci/cs4281.c
index f870697aca67..ee7ba4b0b47b 100644
--- a/sound/pci/cs4281.c
+++ b/sound/pci/cs4281.c
@@ -1296,7 +1296,7 @@ static void snd_cs4281_free_gameport(struct cs4281 *chip)
#else
static inline int snd_cs4281_create_gameport(struct cs4281 *chip) { return -ENOSYS; }
static inline void snd_cs4281_free_gameport(struct cs4281 *chip) { }
-#endif /* CONFIG_GAMEPORT || (MODULE && CONFIG_GAMEPORT_MODULE) */
+#endif /* IS_REACHABLE(CONFIG_GAMEPORT) */
static int snd_cs4281_free(struct cs4281 *chip)
{
diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c
index e4cf3187b4dd..709fb1a503b7 100644
--- a/sound/pci/cs46xx/cs46xx_lib.c
+++ b/sound/pci/cs46xx/cs46xx_lib.c
@@ -887,8 +887,8 @@ static int snd_cs46xx_playback_transfer(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_cs46xx_pcm * cpcm = runtime->private_data;
- snd_pcm_indirect_playback_transfer(substream, &cpcm->pcm_rec, snd_cs46xx_pb_trans_copy);
- return 0;
+ return snd_pcm_indirect_playback_transfer(substream, &cpcm->pcm_rec,
+ snd_cs46xx_pb_trans_copy);
}
static void snd_cs46xx_cp_trans_copy(struct snd_pcm_substream *substream,
@@ -903,8 +903,8 @@ static void snd_cs46xx_cp_trans_copy(struct snd_pcm_substream *substream,
static int snd_cs46xx_capture_transfer(struct snd_pcm_substream *substream)
{
struct snd_cs46xx *chip = snd_pcm_substream_chip(substream);
- snd_pcm_indirect_capture_transfer(substream, &chip->capt.pcm_rec, snd_cs46xx_cp_trans_copy);
- return 0;
+ return snd_pcm_indirect_capture_transfer(substream, &chip->capt.pcm_rec,
+ snd_cs46xx_cp_trans_copy);
}
static snd_pcm_uframes_t snd_cs46xx_playback_direct_pointer(struct snd_pcm_substream *substream)
@@ -1482,9 +1482,9 @@ static struct snd_pcm_hardware snd_cs46xx_capture =
#ifdef CONFIG_SND_CS46XX_NEW_DSP
-static unsigned int period_sizes[] = { 32, 64, 128, 256, 512, 1024, 2048 };
+static const unsigned int period_sizes[] = { 32, 64, 128, 256, 512, 1024, 2048 };
-static struct snd_pcm_hw_constraint_list hw_constraints_period_sizes = {
+static const struct snd_pcm_hw_constraint_list hw_constraints_period_sizes = {
.count = ARRAY_SIZE(period_sizes),
.list = period_sizes,
.mask = 0
@@ -2371,7 +2371,7 @@ static int snd_cs46xx_front_dup_put(struct snd_kcontrol *kcontrol,
ucontrol->value.integer.value[0] ? 0 : 0x200);
}
-static struct snd_kcontrol_new snd_cs46xx_front_dup_ctl = {
+static const struct snd_kcontrol_new snd_cs46xx_front_dup_ctl = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Duplicate Front",
.info = snd_mixer_boolean_info,
diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
index 6a0e49ac5273..d3203df50a1a 100644
--- a/sound/pci/emu10k1/emu10k1.c
+++ b/sound/pci/emu10k1/emu10k1.c
@@ -37,7 +37,7 @@ MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{Creative Labs,SB Live!/PCI512/E-mu APS},"
"{Creative Labs,SB Audigy}}");
-#if IS_REACHABLE(CONFIG_SND_SEQUENCER)
+#if IS_ENABLED(CONFIG_SND_SEQUENCER)
#define ENABLE_SYNTH
#include <sound/emu10k1_synth.h>
#endif
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 56fc47bd6dba..dc585959ca32 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -311,21 +311,6 @@ static const u32 onoff_table[2] = {
};
/*
- */
-
-static inline mm_segment_t snd_enter_user(void)
-{
- mm_segment_t fs = get_fs();
- set_fs(get_ds());
- return fs;
-}
-
-static inline void snd_leave_user(mm_segment_t fs)
-{
- set_fs(fs);
-}
-
-/*
* controls
*/
@@ -538,7 +523,8 @@ unsigned int snd_emu10k1_efx_read(struct snd_emu10k1 *emu, unsigned int pc)
}
static int snd_emu10k1_gpr_poke(struct snd_emu10k1 *emu,
- struct snd_emu10k1_fx8010_code *icode)
+ struct snd_emu10k1_fx8010_code *icode,
+ bool in_kernel)
{
int gpr;
u32 val;
@@ -546,7 +532,9 @@ static int snd_emu10k1_gpr_poke(struct snd_emu10k1 *emu,
for (gpr = 0; gpr < (emu->audigy ? 0x200 : 0x100); gpr++) {
if (!test_bit(gpr, icode->gpr_valid))
continue;
- if (get_user(val, &icode->gpr_map[gpr]))
+ if (in_kernel)
+ val = *(u32 *)&icode->gpr_map[gpr];
+ else if (get_user(val, &icode->gpr_map[gpr]))
return -EFAULT;
snd_emu10k1_ptr_write(emu, emu->gpr_base + gpr, 0, val);
}
@@ -569,7 +557,8 @@ static int snd_emu10k1_gpr_peek(struct snd_emu10k1 *emu,
}
static int snd_emu10k1_tram_poke(struct snd_emu10k1 *emu,
- struct snd_emu10k1_fx8010_code *icode)
+ struct snd_emu10k1_fx8010_code *icode,
+ bool in_kernel)
{
int tram;
u32 addr, val;
@@ -577,9 +566,14 @@ static int snd_emu10k1_tram_poke(struct snd_emu10k1 *emu,
for (tram = 0; tram < (emu->audigy ? 0x100 : 0xa0); tram++) {
if (!test_bit(tram, icode->tram_valid))
continue;
- if (get_user(val, &icode->tram_data_map[tram]) ||
- get_user(addr, &icode->tram_addr_map[tram]))
- return -EFAULT;
+ if (in_kernel) {
+ val = *(u32 *)&icode->tram_data_map[tram];
+ addr = *(u32 *)&icode->tram_addr_map[tram];
+ } else {
+ if (get_user(val, &icode->tram_data_map[tram]) ||
+ get_user(addr, &icode->tram_addr_map[tram]))
+ return -EFAULT;
+ }
snd_emu10k1_ptr_write(emu, TANKMEMDATAREGBASE + tram, 0, val);
if (!emu->audigy) {
snd_emu10k1_ptr_write(emu, TANKMEMADDRREGBASE + tram, 0, addr);
@@ -615,16 +609,22 @@ static int snd_emu10k1_tram_peek(struct snd_emu10k1 *emu,
}
static int snd_emu10k1_code_poke(struct snd_emu10k1 *emu,
- struct snd_emu10k1_fx8010_code *icode)
+ struct snd_emu10k1_fx8010_code *icode,
+ bool in_kernel)
{
u32 pc, lo, hi;
for (pc = 0; pc < (emu->audigy ? 2*1024 : 2*512); pc += 2) {
if (!test_bit(pc / 2, icode->code_valid))
continue;
- if (get_user(lo, &icode->code[pc + 0]) ||
- get_user(hi, &icode->code[pc + 1]))
- return -EFAULT;
+ if (in_kernel) {
+ lo = *(u32 *)&icode->code[pc + 0];
+ hi = *(u32 *)&icode->code[pc + 1];
+ } else {
+ if (get_user(lo, &icode->code[pc + 0]) ||
+ get_user(hi, &icode->code[pc + 1]))
+ return -EFAULT;
+ }
snd_emu10k1_efx_write(emu, pc + 0, lo);
snd_emu10k1_efx_write(emu, pc + 1, hi);
}
@@ -665,14 +665,16 @@ snd_emu10k1_look_for_ctl(struct snd_emu10k1 *emu, struct snd_ctl_elem_id *id)
#define MAX_TLV_SIZE 256
-static unsigned int *copy_tlv(const unsigned int __user *_tlv)
+static unsigned int *copy_tlv(const unsigned int __user *_tlv, bool in_kernel)
{
unsigned int data[2];
unsigned int *tlv;
if (!_tlv)
return NULL;
- if (copy_from_user(data, _tlv, sizeof(data)))
+ if (in_kernel)
+ memcpy(data, (void *)_tlv, sizeof(data));
+ else if (copy_from_user(data, _tlv, sizeof(data)))
return NULL;
if (data[1] >= MAX_TLV_SIZE)
return NULL;
@@ -680,7 +682,9 @@ static unsigned int *copy_tlv(const unsigned int __user *_tlv)
if (!tlv)
return NULL;
memcpy(tlv, data, sizeof(data));
- if (copy_from_user(tlv + 2, _tlv + 2, data[1])) {
+ if (in_kernel) {
+ memcpy(tlv + 2, (void *)(_tlv + 2), data[1]);
+ } else if (copy_from_user(tlv + 2, _tlv + 2, data[1])) {
kfree(tlv);
return NULL;
}
@@ -690,7 +694,7 @@ static unsigned int *copy_tlv(const unsigned int __user *_tlv)
static int copy_gctl(struct snd_emu10k1 *emu,
struct snd_emu10k1_fx8010_control_gpr *gctl,
struct snd_emu10k1_fx8010_control_gpr __user *_gctl,
- int idx)
+ int idx, bool in_kernel)
{
struct snd_emu10k1_fx8010_control_old_gpr __user *octl;
@@ -718,7 +722,8 @@ static int copy_gctl_to_user(struct snd_emu10k1 *emu,
}
static int snd_emu10k1_verify_controls(struct snd_emu10k1 *emu,
- struct snd_emu10k1_fx8010_code *icode)
+ struct snd_emu10k1_fx8010_code *icode,
+ bool in_kernel)
{
unsigned int i;
struct snd_ctl_elem_id __user *_id;
@@ -728,7 +733,9 @@ static int snd_emu10k1_verify_controls(struct snd_emu10k1 *emu,
for (i = 0, _id = icode->gpr_del_controls;
i < icode->gpr_del_control_count; i++, _id++) {
- if (copy_from_user(&id, _id, sizeof(id)))
+ if (in_kernel)
+ id = *(struct snd_ctl_elem_id *)_id;
+ else if (copy_from_user(&id, _id, sizeof(id)))
return -EFAULT;
if (snd_emu10k1_look_for_ctl(emu, &id) == NULL)
return -ENOENT;
@@ -738,7 +745,8 @@ static int snd_emu10k1_verify_controls(struct snd_emu10k1 *emu,
return -ENOMEM;
err = 0;
for (i = 0; i < icode->gpr_add_control_count; i++) {
- if (copy_gctl(emu, gctl, icode->gpr_add_controls, i)) {
+ if (copy_gctl(emu, gctl, icode->gpr_add_controls, i,
+ in_kernel)) {
err = -EFAULT;
goto __error;
}
@@ -759,7 +767,8 @@ static int snd_emu10k1_verify_controls(struct snd_emu10k1 *emu,
}
for (i = 0; i < icode->gpr_list_control_count; i++) {
/* FIXME: we need to check the WRITE access */
- if (copy_gctl(emu, gctl, icode->gpr_list_controls, i)) {
+ if (copy_gctl(emu, gctl, icode->gpr_list_controls, i,
+ in_kernel)) {
err = -EFAULT;
goto __error;
}
@@ -781,7 +790,8 @@ static void snd_emu10k1_ctl_private_free(struct snd_kcontrol *kctl)
}
static int snd_emu10k1_add_controls(struct snd_emu10k1 *emu,
- struct snd_emu10k1_fx8010_code *icode)
+ struct snd_emu10k1_fx8010_code *icode,
+ bool in_kernel)
{
unsigned int i, j;
struct snd_emu10k1_fx8010_control_gpr *gctl;
@@ -800,7 +810,8 @@ static int snd_emu10k1_add_controls(struct snd_emu10k1 *emu,
}
for (i = 0; i < icode->gpr_add_control_count; i++) {
- if (copy_gctl(emu, gctl, icode->gpr_add_controls, i)) {
+ if (copy_gctl(emu, gctl, icode->gpr_add_controls, i,
+ in_kernel)) {
err = -EFAULT;
goto __error;
}
@@ -821,7 +832,7 @@ static int snd_emu10k1_add_controls(struct snd_emu10k1 *emu,
knew.device = gctl->id.device;
knew.subdevice = gctl->id.subdevice;
knew.info = snd_emu10k1_gpr_ctl_info;
- knew.tlv.p = copy_tlv(gctl->tlv);
+ knew.tlv.p = copy_tlv(gctl->tlv, in_kernel);
if (knew.tlv.p)
knew.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ;
@@ -873,7 +884,8 @@ static int snd_emu10k1_add_controls(struct snd_emu10k1 *emu,
}
static int snd_emu10k1_del_controls(struct snd_emu10k1 *emu,
- struct snd_emu10k1_fx8010_code *icode)
+ struct snd_emu10k1_fx8010_code *icode,
+ bool in_kernel)
{
unsigned int i;
struct snd_ctl_elem_id id;
@@ -883,7 +895,9 @@ static int snd_emu10k1_del_controls(struct snd_emu10k1 *emu,
for (i = 0, _id = icode->gpr_del_controls;
i < icode->gpr_del_control_count; i++, _id++) {
- if (copy_from_user(&id, _id, sizeof(id)))
+ if (in_kernel)
+ id = *(struct snd_ctl_elem_id *)_id;
+ else if (copy_from_user(&id, _id, sizeof(id)))
return -EFAULT;
down_write(&card->controls_rwsem);
ctl = snd_emu10k1_look_for_ctl(emu, &id);
@@ -941,12 +955,14 @@ static int snd_emu10k1_list_controls(struct snd_emu10k1 *emu,
}
static int snd_emu10k1_icode_poke(struct snd_emu10k1 *emu,
- struct snd_emu10k1_fx8010_code *icode)
+ struct snd_emu10k1_fx8010_code *icode,
+ bool in_kernel)
{
int err = 0;
mutex_lock(&emu->fx8010.lock);
- if ((err = snd_emu10k1_verify_controls(emu, icode)) < 0)
+ err = snd_emu10k1_verify_controls(emu, icode, in_kernel);
+ if (err < 0)
goto __error;
strlcpy(emu->fx8010.name, icode->name, sizeof(emu->fx8010.name));
/* stop FX processor - this may be dangerous, but it's better to miss
@@ -956,11 +972,20 @@ static int snd_emu10k1_icode_poke(struct snd_emu10k1 *emu,
else
snd_emu10k1_ptr_write(emu, DBG, 0, emu->fx8010.dbg | EMU10K1_DBG_SINGLE_STEP);
/* ok, do the main job */
- if ((err = snd_emu10k1_del_controls(emu, icode)) < 0 ||
- (err = snd_emu10k1_gpr_poke(emu, icode)) < 0 ||
- (err = snd_emu10k1_tram_poke(emu, icode)) < 0 ||
- (err = snd_emu10k1_code_poke(emu, icode)) < 0 ||
- (err = snd_emu10k1_add_controls(emu, icode)) < 0)
+ err = snd_emu10k1_del_controls(emu, icode, in_kernel);
+ if (err < 0)
+ goto __error;
+ err = snd_emu10k1_gpr_poke(emu, icode, in_kernel);
+ if (err < 0)
+ goto __error;
+ err = snd_emu10k1_tram_poke(emu, icode, in_kernel);
+ if (err < 0)
+ goto __error;
+ err = snd_emu10k1_code_poke(emu, icode, in_kernel);
+ if (err < 0)
+ goto __error;
+ err = snd_emu10k1_add_controls(emu, icode, in_kernel);
+ if (err < 0)
goto __error;
/* start FX processor when the DSP code is updated */
if (emu->audigy)
@@ -1179,7 +1204,6 @@ static int _snd_emu10k1_audigy_init_efx(struct snd_emu10k1 *emu)
struct snd_emu10k1_fx8010_code *icode = NULL;
struct snd_emu10k1_fx8010_control_gpr *controls = NULL, *ctl;
u32 *gpr_map;
- mm_segment_t seg;
err = -ENOMEM;
icode = kzalloc(sizeof(*icode), GFP_KERNEL);
@@ -1739,13 +1763,11 @@ A_OP(icode, &ptr, iMAC0, A_GPR(var), A_GPR(var), A_GPR(vol), A_EXTIN(input))
while (ptr < 0x400)
A_OP(icode, &ptr, 0x0f, 0xc0, 0xc0, 0xcf, 0xc0);
- seg = snd_enter_user();
icode->gpr_add_control_count = nctl;
icode->gpr_add_controls = (struct snd_emu10k1_fx8010_control_gpr __user *)controls;
emu->support_tlv = 1; /* support TLV */
- err = snd_emu10k1_icode_poke(emu, icode);
+ err = snd_emu10k1_icode_poke(emu, icode, true);
emu->support_tlv = 0; /* clear again */
- snd_leave_user(seg);
__err:
kfree(controls);
@@ -1817,7 +1839,6 @@ static int _snd_emu10k1_init_efx(struct snd_emu10k1 *emu)
struct snd_emu10k1_fx8010_pcm_rec *ipcm = NULL;
struct snd_emu10k1_fx8010_control_gpr *controls = NULL, *ctl;
u32 *gpr_map;
- mm_segment_t seg;
err = -ENOMEM;
icode = kzalloc(sizeof(*icode), GFP_KERNEL);
@@ -2368,13 +2389,11 @@ static int _snd_emu10k1_init_efx(struct snd_emu10k1 *emu)
if ((err = snd_emu10k1_fx8010_tram_setup(emu, ipcm->buffer_size)) < 0)
goto __err;
- seg = snd_enter_user();
icode->gpr_add_control_count = i;
icode->gpr_add_controls = (struct snd_emu10k1_fx8010_control_gpr __user *)controls;
emu->support_tlv = 1; /* support TLV */
- err = snd_emu10k1_icode_poke(emu, icode);
+ err = snd_emu10k1_icode_poke(emu, icode, true);
emu->support_tlv = 0; /* clear again */
- snd_leave_user(seg);
if (err >= 0)
err = snd_emu10k1_ipcm_poke(emu, ipcm);
__err:
@@ -2537,7 +2556,7 @@ static int snd_emu10k1_fx8010_ioctl(struct snd_hwdep * hw, struct file *file, un
icode = memdup_user(argp, sizeof(*icode));
if (IS_ERR(icode))
return PTR_ERR(icode);
- res = snd_emu10k1_icode_poke(emu, icode);
+ res = snd_emu10k1_icode_poke(emu, icode, false);
kfree(icode);
return res;
case SNDRV_EMU10K1_IOCTL_CODE_PEEK:
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
index ef1cf530c929..5c9054a9f69e 100644
--- a/sound/pci/emu10k1/emupcm.c
+++ b/sound/pci/emu10k1/emupcm.c
@@ -164,7 +164,7 @@ static int snd_emu10k1_pcm_channel_alloc(struct snd_emu10k1_pcm * epcm, int voic
return 0;
}
-static unsigned int capture_period_sizes[31] = {
+static const unsigned int capture_period_sizes[31] = {
384, 448, 512, 640,
384*2, 448*2, 512*2, 640*2,
384*4, 448*4, 512*4, 640*4,
@@ -175,17 +175,17 @@ static unsigned int capture_period_sizes[31] = {
384*128,448*128,512*128
};
-static struct snd_pcm_hw_constraint_list hw_constraints_capture_period_sizes = {
+static const struct snd_pcm_hw_constraint_list hw_constraints_capture_period_sizes = {
.count = 31,
.list = capture_period_sizes,
.mask = 0
};
-static unsigned int capture_rates[8] = {
+static const unsigned int capture_rates[8] = {
8000, 11025, 16000, 22050, 24000, 32000, 44100, 48000
};
-static struct snd_pcm_hw_constraint_list hw_constraints_capture_rates = {
+static const struct snd_pcm_hw_constraint_list hw_constraints_capture_rates = {
.count = 8,
.list = capture_rates,
.mask = 0
@@ -1632,8 +1632,8 @@ static int snd_emu10k1_fx8010_playback_transfer(struct snd_pcm_substream *substr
struct snd_emu10k1 *emu = snd_pcm_substream_chip(substream);
struct snd_emu10k1_fx8010_pcm *pcm = &emu->fx8010.pcm[substream->number];
- snd_pcm_indirect_playback_transfer(substream, &pcm->pcm_rec, fx8010_pb_trans_copy);
- return 0;
+ return snd_pcm_indirect_playback_transfer(substream, &pcm->pcm_rec,
+ fx8010_pb_trans_copy);
}
static int snd_emu10k1_fx8010_playback_hw_params(struct snd_pcm_substream *substream,
diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c
index 09a63ef41ef2..f0d978e3b274 100644
--- a/sound/pci/ens1370.c
+++ b/sound/pci/ens1370.c
@@ -467,41 +467,41 @@ MODULE_DEVICE_TABLE(pci, snd_audiopci_ids);
#define POLL_COUNT 0xa000
#ifdef CHIP1370
-static unsigned int snd_es1370_fixed_rates[] =
+static const unsigned int snd_es1370_fixed_rates[] =
{5512, 11025, 22050, 44100};
-static struct snd_pcm_hw_constraint_list snd_es1370_hw_constraints_rates = {
+static const struct snd_pcm_hw_constraint_list snd_es1370_hw_constraints_rates = {
.count = 4,
.list = snd_es1370_fixed_rates,
.mask = 0,
};
-static struct snd_ratnum es1370_clock = {
+static const struct snd_ratnum es1370_clock = {
.num = ES_1370_SRCLOCK,
.den_min = 29,
.den_max = 353,
.den_step = 1,
};
-static struct snd_pcm_hw_constraint_ratnums snd_es1370_hw_constraints_clock = {
+static const struct snd_pcm_hw_constraint_ratnums snd_es1370_hw_constraints_clock = {
.nrats = 1,
.rats = &es1370_clock,
};
#else
-static struct snd_ratden es1371_dac_clock = {
+static const struct snd_ratden es1371_dac_clock = {
.num_min = 3000 * (1 << 15),
.num_max = 48000 * (1 << 15),
.num_step = 3000,
.den = 1 << 15,
};
-static struct snd_pcm_hw_constraint_ratdens snd_es1371_hw_constraints_dac_clock = {
+static const struct snd_pcm_hw_constraint_ratdens snd_es1371_hw_constraints_dac_clock = {
.nrats = 1,
.rats = &es1371_dac_clock,
};
-static struct snd_ratnum es1371_adc_clock = {
+static const struct snd_ratnum es1371_adc_clock = {
.num = 48000 << 15,
.den_min = 32768,
.den_max = 393216,
.den_step = 1,
};
-static struct snd_pcm_hw_constraint_ratnums snd_es1371_hw_constraints_adc_clock = {
+static const struct snd_pcm_hw_constraint_ratnums snd_es1371_hw_constraints_adc_clock = {
.nrats = 1,
.rats = &es1371_adc_clock,
};
diff --git a/sound/pci/es1938.c b/sound/pci/es1938.c
index e8d943071a8c..069902a06f00 100644
--- a/sound/pci/es1938.c
+++ b/sound/pci/es1938.c
@@ -436,7 +436,7 @@ static void snd_es1938_reset_fifo(struct es1938 *chip)
outb(0, SLSB_REG(chip, RESET));
}
-static struct snd_ratnum clocks[2] = {
+static const struct snd_ratnum clocks[2] = {
{
.num = 793800,
.den_min = 1,
@@ -451,7 +451,7 @@ static struct snd_ratnum clocks[2] = {
}
};
-static struct snd_pcm_hw_constraint_ratnums hw_constraints_clocks = {
+static const struct snd_pcm_hw_constraint_ratnums hw_constraints_clocks = {
.nrats = 2,
.rats = clocks,
};
@@ -839,15 +839,12 @@ static snd_pcm_uframes_t snd_es1938_playback_pointer(struct snd_pcm_substream *s
}
static int snd_es1938_capture_copy(struct snd_pcm_substream *substream,
- int channel,
- snd_pcm_uframes_t pos,
- void __user *dst,
- snd_pcm_uframes_t count)
+ int channel, unsigned long pos,
+ void __user *dst, unsigned long count)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct es1938 *chip = snd_pcm_substream_chip(substream);
- pos <<= chip->dma1_shift;
- count <<= chip->dma1_shift;
+
if (snd_BUG_ON(pos + count > chip->dma1_size))
return -EINVAL;
if (pos + count < chip->dma1_size) {
@@ -856,12 +853,31 @@ static int snd_es1938_capture_copy(struct snd_pcm_substream *substream,
} else {
if (copy_to_user(dst, runtime->dma_area + pos + 1, count - 1))
return -EFAULT;
- if (put_user(runtime->dma_area[0], ((unsigned char __user *)dst) + count - 1))
+ if (put_user(runtime->dma_area[0],
+ ((unsigned char __user *)dst) + count - 1))
return -EFAULT;
}
return 0;
}
+static int snd_es1938_capture_copy_kernel(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void *dst, unsigned long count)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct es1938 *chip = snd_pcm_substream_chip(substream);
+
+ if (snd_BUG_ON(pos + count > chip->dma1_size))
+ return -EINVAL;
+ if (pos + count < chip->dma1_size) {
+ memcpy(dst, runtime->dma_area + pos + 1, count);
+ } else {
+ memcpy(dst, runtime->dma_area + pos + 1, count - 1);
+ runtime->dma_area[0] = *((unsigned char *)dst + count - 1);
+ }
+ return 0;
+}
+
/*
* buffer management
*/
@@ -1012,7 +1028,8 @@ static const struct snd_pcm_ops snd_es1938_capture_ops = {
.prepare = snd_es1938_capture_prepare,
.trigger = snd_es1938_capture_trigger,
.pointer = snd_es1938_capture_pointer,
- .copy = snd_es1938_capture_copy,
+ .copy_user = snd_es1938_capture_copy,
+ .copy_kernel = snd_es1938_capture_copy_kernel,
};
static int snd_es1938_new_pcm(struct es1938 *chip, int device)
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
index c47287d79306..2e402ece4c86 100644
--- a/sound/pci/fm801.c
+++ b/sound/pci/fm801.c
@@ -334,23 +334,23 @@ static unsigned short snd_fm801_codec_read(struct snd_ac97 *ac97, unsigned short
return fm801_readw(chip, AC97_DATA);
}
-static unsigned int rates[] = {
+static const unsigned int rates[] = {
5500, 8000, 9600, 11025,
16000, 19200, 22050, 32000,
38400, 44100, 48000
};
-static struct snd_pcm_hw_constraint_list hw_constraints_rates = {
+static const struct snd_pcm_hw_constraint_list hw_constraints_rates = {
.count = ARRAY_SIZE(rates),
.list = rates,
.mask = 0,
};
-static unsigned int channels[] = {
+static const unsigned int channels[] = {
2, 4, 6
};
-static struct snd_pcm_hw_constraint_list hw_constraints_channels = {
+static const struct snd_pcm_hw_constraint_list hw_constraints_channels = {
.count = ARRAY_SIZE(channels),
.list = channels,
.mask = 0,
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 70bb365a08d2..821aad374a06 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -19,13 +19,11 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <linux/mm.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/module.h>
-#include <linux/async.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <sound/core.h>
@@ -1477,18 +1475,8 @@ int snd_hda_mixer_amp_volume_put(struct snd_kcontrol *kcontrol,
}
EXPORT_SYMBOL_GPL(snd_hda_mixer_amp_volume_put);
-/**
- * snd_hda_mixer_amp_volume_put - TLV callback for a standard AMP mixer volume
- * @kcontrol: ctl element
- * @op_flag: operation flag
- * @size: byte size of input TLV
- * @_tlv: TLV data
- *
- * The control element is supposed to have the private_value field
- * set up via HDA_COMPOSE_AMP_VAL*() or related macros.
- */
-int snd_hda_mixer_amp_tlv(struct snd_kcontrol *kcontrol, int op_flag,
- unsigned int size, unsigned int __user *_tlv)
+/* inquiry the amp caps and convert to TLV */
+static void get_ctl_amp_tlv(struct snd_kcontrol *kcontrol, unsigned int *tlv)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
hda_nid_t nid = get_amp_nid(kcontrol);
@@ -1497,8 +1485,6 @@ int snd_hda_mixer_amp_tlv(struct snd_kcontrol *kcontrol, int op_flag,
bool min_mute = get_amp_min_mute(kcontrol);
u32 caps, val1, val2;
- if (size < 4 * sizeof(unsigned int))
- return -ENOMEM;
caps = query_amp_caps(codec, nid, dir);
val2 = (caps & AC_AMPCAP_STEP_SIZE) >> AC_AMPCAP_STEP_SIZE_SHIFT;
val2 = (val2 + 1) * 25;
@@ -1507,13 +1493,31 @@ int snd_hda_mixer_amp_tlv(struct snd_kcontrol *kcontrol, int op_flag,
val1 = ((int)val1) * ((int)val2);
if (min_mute || (caps & AC_AMPCAP_MIN_MUTE))
val2 |= TLV_DB_SCALE_MUTE;
- if (put_user(SNDRV_CTL_TLVT_DB_SCALE, _tlv))
- return -EFAULT;
- if (put_user(2 * sizeof(unsigned int), _tlv + 1))
- return -EFAULT;
- if (put_user(val1, _tlv + 2))
- return -EFAULT;
- if (put_user(val2, _tlv + 3))
+ tlv[0] = SNDRV_CTL_TLVT_DB_SCALE;
+ tlv[1] = 2 * sizeof(unsigned int);
+ tlv[2] = val1;
+ tlv[3] = val2;
+}
+
+/**
+ * snd_hda_mixer_amp_tlv - TLV callback for a standard AMP mixer volume
+ * @kcontrol: ctl element
+ * @op_flag: operation flag
+ * @size: byte size of input TLV
+ * @_tlv: TLV data
+ *
+ * The control element is supposed to have the private_value field
+ * set up via HDA_COMPOSE_AMP_VAL*() or related macros.
+ */
+int snd_hda_mixer_amp_tlv(struct snd_kcontrol *kcontrol, int op_flag,
+ unsigned int size, unsigned int __user *_tlv)
+{
+ unsigned int tlv[4];
+
+ if (size < 4 * sizeof(unsigned int))
+ return -ENOMEM;
+ get_ctl_amp_tlv(kcontrol, tlv);
+ if (copy_to_user(_tlv, tlv, sizeof(tlv)))
return -EFAULT;
return 0;
}
@@ -1807,13 +1811,10 @@ static int get_kctl_0dB_offset(struct hda_codec *codec,
const int *tlv = NULL;
int val = -1;
- if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
- /* FIXME: set_fs() hack for obtaining user-space TLV data */
- mm_segment_t fs = get_fs();
- set_fs(get_ds());
- if (!kctl->tlv.c(kctl, 0, sizeof(_tlv), _tlv))
- tlv = _tlv;
- set_fs(fs);
+ if ((kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) &&
+ kctl->tlv.c == snd_hda_mixer_amp_tlv) {
+ get_ctl_amp_tlv(kctl, _tlv);
+ tlv = _tlv;
} else if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_READ)
tlv = kctl->tlv.p;
if (tlv && tlv[0] == SNDRV_CTL_TLVT_DB_SCALE) {
@@ -2118,196 +2119,6 @@ int snd_hda_mixer_amp_switch_put(struct snd_kcontrol *kcontrol,
EXPORT_SYMBOL_GPL(snd_hda_mixer_amp_switch_put);
/*
- * bound volume controls
- *
- * bind multiple volumes (# indices, from 0)
- */
-
-#define AMP_VAL_IDX_SHIFT 19
-#define AMP_VAL_IDX_MASK (0x0f<<19)
-
-/**
- * snd_hda_mixer_bind_switch_get - Get callback for a bound volume control
- * @kcontrol: ctl element
- * @ucontrol: pointer to get/store the data
- *
- * The control element is supposed to have the private_value field
- * set up via HDA_BIND_MUTE*() macros.
- */
-int snd_hda_mixer_bind_switch_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- unsigned long pval;
- int err;
-
- mutex_lock(&codec->control_mutex);
- pval = kcontrol->private_value;
- kcontrol->private_value = pval & ~AMP_VAL_IDX_MASK; /* index 0 */
- err = snd_hda_mixer_amp_switch_get(kcontrol, ucontrol);
- kcontrol->private_value = pval;
- mutex_unlock(&codec->control_mutex);
- return err;
-}
-EXPORT_SYMBOL_GPL(snd_hda_mixer_bind_switch_get);
-
-/**
- * snd_hda_mixer_bind_switch_put - Put callback for a bound volume control
- * @kcontrol: ctl element
- * @ucontrol: pointer to get/store the data
- *
- * The control element is supposed to have the private_value field
- * set up via HDA_BIND_MUTE*() macros.
- */
-int snd_hda_mixer_bind_switch_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- unsigned long pval;
- int i, indices, err = 0, change = 0;
-
- mutex_lock(&codec->control_mutex);
- pval = kcontrol->private_value;
- indices = (pval & AMP_VAL_IDX_MASK) >> AMP_VAL_IDX_SHIFT;
- for (i = 0; i < indices; i++) {
- kcontrol->private_value = (pval & ~AMP_VAL_IDX_MASK) |
- (i << AMP_VAL_IDX_SHIFT);
- err = snd_hda_mixer_amp_switch_put(kcontrol, ucontrol);
- if (err < 0)
- break;
- change |= err;
- }
- kcontrol->private_value = pval;
- mutex_unlock(&codec->control_mutex);
- return err < 0 ? err : change;
-}
-EXPORT_SYMBOL_GPL(snd_hda_mixer_bind_switch_put);
-
-/**
- * snd_hda_mixer_bind_ctls_info - Info callback for a generic bound control
- * @kcontrol: referred ctl element
- * @uinfo: pointer to get/store the data
- *
- * The control element is supposed to have the private_value field
- * set up via HDA_BIND_VOL() or HDA_BIND_SW() macros.
- */
-int snd_hda_mixer_bind_ctls_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- struct hda_bind_ctls *c;
- int err;
-
- mutex_lock(&codec->control_mutex);
- c = (struct hda_bind_ctls *)kcontrol->private_value;
- kcontrol->private_value = *c->values;
- err = c->ops->info(kcontrol, uinfo);
- kcontrol->private_value = (long)c;
- mutex_unlock(&codec->control_mutex);
- return err;
-}
-EXPORT_SYMBOL_GPL(snd_hda_mixer_bind_ctls_info);
-
-/**
- * snd_hda_mixer_bind_ctls_get - Get callback for a generic bound control
- * @kcontrol: ctl element
- * @ucontrol: pointer to get/store the data
- *
- * The control element is supposed to have the private_value field
- * set up via HDA_BIND_VOL() or HDA_BIND_SW() macros.
- */
-int snd_hda_mixer_bind_ctls_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- struct hda_bind_ctls *c;
- int err;
-
- mutex_lock(&codec->control_mutex);
- c = (struct hda_bind_ctls *)kcontrol->private_value;
- kcontrol->private_value = *c->values;
- err = c->ops->get(kcontrol, ucontrol);
- kcontrol->private_value = (long)c;
- mutex_unlock(&codec->control_mutex);
- return err;
-}
-EXPORT_SYMBOL_GPL(snd_hda_mixer_bind_ctls_get);
-
-/**
- * snd_hda_mixer_bind_ctls_put - Put callback for a generic bound control
- * @kcontrol: ctl element
- * @ucontrol: pointer to get/store the data
- *
- * The control element is supposed to have the private_value field
- * set up via HDA_BIND_VOL() or HDA_BIND_SW() macros.
- */
-int snd_hda_mixer_bind_ctls_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- struct hda_bind_ctls *c;
- unsigned long *vals;
- int err = 0, change = 0;
-
- mutex_lock(&codec->control_mutex);
- c = (struct hda_bind_ctls *)kcontrol->private_value;
- for (vals = c->values; *vals; vals++) {
- kcontrol->private_value = *vals;
- err = c->ops->put(kcontrol, ucontrol);
- if (err < 0)
- break;
- change |= err;
- }
- kcontrol->private_value = (long)c;
- mutex_unlock(&codec->control_mutex);
- return err < 0 ? err : change;
-}
-EXPORT_SYMBOL_GPL(snd_hda_mixer_bind_ctls_put);
-
-/**
- * snd_hda_mixer_bind_tlv - TLV callback for a generic bound control
- * @kcontrol: ctl element
- * @op_flag: operation flag
- * @size: byte size of input TLV
- * @tlv: TLV data
- *
- * The control element is supposed to have the private_value field
- * set up via HDA_BIND_VOL() macro.
- */
-int snd_hda_mixer_bind_tlv(struct snd_kcontrol *kcontrol, int op_flag,
- unsigned int size, unsigned int __user *tlv)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- struct hda_bind_ctls *c;
- int err;
-
- mutex_lock(&codec->control_mutex);
- c = (struct hda_bind_ctls *)kcontrol->private_value;
- kcontrol->private_value = *c->values;
- err = c->ops->tlv(kcontrol, op_flag, size, tlv);
- kcontrol->private_value = (long)c;
- mutex_unlock(&codec->control_mutex);
- return err;
-}
-EXPORT_SYMBOL_GPL(snd_hda_mixer_bind_tlv);
-
-struct hda_ctl_ops snd_hda_bind_vol = {
- .info = snd_hda_mixer_amp_volume_info,
- .get = snd_hda_mixer_amp_volume_get,
- .put = snd_hda_mixer_amp_volume_put,
- .tlv = snd_hda_mixer_amp_tlv
-};
-EXPORT_SYMBOL_GPL(snd_hda_bind_vol);
-
-struct hda_ctl_ops snd_hda_bind_sw = {
- .info = snd_hda_mixer_amp_switch_info,
- .get = snd_hda_mixer_amp_switch_get,
- .put = snd_hda_mixer_amp_switch_put,
- .tlv = snd_hda_mixer_amp_tlv
-};
-EXPORT_SYMBOL_GPL(snd_hda_bind_sw);
-
-/*
* SPDIF out controls
*/
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 1c60beb5b70a..d1eb14842340 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -1345,6 +1345,9 @@ int azx_codec_configure(struct azx *chip)
list_for_each_codec_safe(codec, next, &chip->bus) {
snd_hda_codec_configure(codec);
}
+
+ if (!azx_bus(chip)->num_codecs)
+ return -ENODEV;
return 0;
}
EXPORT_SYMBOL_GPL(azx_codec_configure);
diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
index 35a9ab2cac46..a68e75b00ea3 100644
--- a/sound/pci/hda/hda_controller.h
+++ b/sound/pci/hda/hda_controller.h
@@ -32,7 +32,11 @@
#define AZX_DCAPS_NO_MSI (1 << 9) /* No MSI support */
#define AZX_DCAPS_SNOOP_MASK (3 << 10) /* snoop type mask */
#define AZX_DCAPS_SNOOP_OFF (1 << 12) /* snoop default off */
-/* 13 unused */
+#ifdef CONFIG_SND_HDA_I915
+#define AZX_DCAPS_I915_COMPONENT (1 << 13) /* bind with i915 gfx */
+#else
+#define AZX_DCAPS_I915_COMPONENT 0 /* NOP */
+#endif
/* 14 unused */
#define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */
#define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 71545b56b4c8..28e265a88383 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -948,6 +948,8 @@ static void resume_path_from_idx(struct hda_codec *codec, int path_idx)
static int hda_gen_mixer_mute_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
+static int hda_gen_bind_mute_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
static int hda_gen_bind_mute_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
@@ -970,7 +972,7 @@ static const struct snd_kcontrol_new control_templates[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.info = snd_hda_mixer_amp_switch_info,
- .get = snd_hda_mixer_bind_switch_get,
+ .get = hda_gen_bind_mute_get,
.put = hda_gen_bind_mute_put, /* replaced */
.private_value = HDA_COMPOSE_AMP_VAL(0, 3, 0, 0),
},
@@ -1101,11 +1103,51 @@ static int hda_gen_mixer_mute_put(struct snd_kcontrol *kcontrol,
return snd_hda_mixer_amp_switch_put(kcontrol, ucontrol);
}
+/*
+ * Bound mute controls
+ */
+#define AMP_VAL_IDX_SHIFT 19
+#define AMP_VAL_IDX_MASK (0x0f<<19)
+
+static int hda_gen_bind_mute_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ unsigned long pval;
+ int err;
+
+ mutex_lock(&codec->control_mutex);
+ pval = kcontrol->private_value;
+ kcontrol->private_value = pval & ~AMP_VAL_IDX_MASK; /* index 0 */
+ err = snd_hda_mixer_amp_switch_get(kcontrol, ucontrol);
+ kcontrol->private_value = pval;
+ mutex_unlock(&codec->control_mutex);
+ return err;
+}
+
static int hda_gen_bind_mute_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ unsigned long pval;
+ int i, indices, err = 0, change = 0;
+
sync_auto_mute_bits(kcontrol, ucontrol);
- return snd_hda_mixer_bind_switch_put(kcontrol, ucontrol);
+
+ mutex_lock(&codec->control_mutex);
+ pval = kcontrol->private_value;
+ indices = (pval & AMP_VAL_IDX_MASK) >> AMP_VAL_IDX_SHIFT;
+ for (i = 0; i < indices; i++) {
+ kcontrol->private_value = (pval & ~AMP_VAL_IDX_MASK) |
+ (i << AMP_VAL_IDX_SHIFT);
+ err = snd_hda_mixer_amp_switch_put(kcontrol, ucontrol);
+ if (err < 0)
+ break;
+ change |= err;
+ }
+ kcontrol->private_value = pval;
+ mutex_unlock(&codec->control_mutex);
+ return err < 0 ? err : change;
}
/* any ctl assigned to the path with the given index? */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 01eb1dc7b5b3..5ae8ddab6412 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -263,6 +263,7 @@ enum {
AZX_DRIVER_ICH,
AZX_DRIVER_PCH,
AZX_DRIVER_SCH,
+ AZX_DRIVER_SKL,
AZX_DRIVER_HDMI,
AZX_DRIVER_ATI,
AZX_DRIVER_ATIHDMI,
@@ -292,38 +293,43 @@ enum {
(AZX_DCAPS_NO_ALIGN_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY |\
AZX_DCAPS_SNOOP_TYPE(SCH))
-/* PCH up to IVB; no runtime PM */
+/* PCH up to IVB; no runtime PM; bind with i915 gfx */
#define AZX_DCAPS_INTEL_PCH_NOPM \
- (AZX_DCAPS_INTEL_PCH_BASE)
+ (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_I915_COMPONENT)
/* PCH for HSW/BDW; with runtime PM */
+/* no i915 binding for this as HSW/BDW has another controller for HDMI */
#define AZX_DCAPS_INTEL_PCH \
(AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME)
/* HSW HDMI */
#define AZX_DCAPS_INTEL_HASWELL \
(/*AZX_DCAPS_ALIGN_BUFSIZE |*/ AZX_DCAPS_COUNT_LPIB_DELAY |\
- AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_POWERWELL |\
- AZX_DCAPS_SNOOP_TYPE(SCH))
+ AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_COMPONENT |\
+ AZX_DCAPS_I915_POWERWELL | AZX_DCAPS_SNOOP_TYPE(SCH))
/* Broadwell HDMI can't use position buffer reliably, force to use LPIB */
#define AZX_DCAPS_INTEL_BROADWELL \
(/*AZX_DCAPS_ALIGN_BUFSIZE |*/ AZX_DCAPS_POSFIX_LPIB |\
- AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_POWERWELL |\
- AZX_DCAPS_SNOOP_TYPE(SCH))
+ AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_COMPONENT |\
+ AZX_DCAPS_I915_POWERWELL | AZX_DCAPS_SNOOP_TYPE(SCH))
#define AZX_DCAPS_INTEL_BAYTRAIL \
- (AZX_DCAPS_INTEL_PCH_NOPM | AZX_DCAPS_I915_POWERWELL)
+ (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_I915_COMPONENT |\
+ AZX_DCAPS_I915_POWERWELL)
#define AZX_DCAPS_INTEL_BRASWELL \
- (AZX_DCAPS_INTEL_PCH | AZX_DCAPS_I915_POWERWELL)
+ (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\
+ AZX_DCAPS_I915_COMPONENT | AZX_DCAPS_I915_POWERWELL)
#define AZX_DCAPS_INTEL_SKYLAKE \
- (AZX_DCAPS_INTEL_PCH | AZX_DCAPS_SEPARATE_STREAM_TAG |\
+ (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\
+ AZX_DCAPS_SEPARATE_STREAM_TAG | AZX_DCAPS_I915_COMPONENT |\
AZX_DCAPS_I915_POWERWELL)
#define AZX_DCAPS_INTEL_BROXTON \
- (AZX_DCAPS_INTEL_PCH | AZX_DCAPS_SEPARATE_STREAM_TAG |\
+ (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\
+ AZX_DCAPS_SEPARATE_STREAM_TAG | AZX_DCAPS_I915_COMPONENT |\
AZX_DCAPS_I915_POWERWELL)
/* quirks for ATI SB / AMD Hudson */
@@ -364,23 +370,13 @@ enum {
((pci)->device == 0x0d0c) || \
((pci)->device == 0x160c))
-#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
-#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
-#define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171)
-#define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
-#define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0)
#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
-#define IS_BXT_T(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x1a98)
-#define IS_GLK(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x3198)
-#define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348)
-#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci) || \
- IS_BXT_T(pci) || IS_KBL(pci) || IS_KBL_LP(pci) || \
- IS_KBL_H(pci) || IS_GLK(pci) || IS_CFL(pci))
static char *driver_short_names[] = {
[AZX_DRIVER_ICH] = "HDA Intel",
[AZX_DRIVER_PCH] = "HDA Intel PCH",
[AZX_DRIVER_SCH] = "HDA Intel MID",
+ [AZX_DRIVER_SKL] = "HDA Intel PCH", /* kept old name for compatibility */
[AZX_DRIVER_HDMI] = "HDA Intel HDMI",
[AZX_DRIVER_ATI] = "HDA ATI SB",
[AZX_DRIVER_ATIHDMI] = "HDA ATI HDMI",
@@ -644,13 +640,13 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
snd_hdac_set_codec_wakeup(bus, true);
- if (IS_SKL_PLUS(pci)) {
+ if (chip->driver_type == AZX_DRIVER_SKL) {
pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
val = val & ~INTEL_HDA_CGCTL_MISCBDCGE;
pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
}
azx_init_chip(chip, full_reset);
- if (IS_SKL_PLUS(pci)) {
+ if (chip->driver_type == AZX_DRIVER_SKL) {
pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
val = val | INTEL_HDA_CGCTL_MISCBDCGE;
pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
@@ -1017,7 +1013,7 @@ static int azx_suspend(struct device *dev)
if (chip->msi)
pci_disable_msi(chip->pci);
- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
+ if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
&& hda->need_i915_power)
snd_hdac_display_power(bus, false);
@@ -1075,9 +1071,11 @@ static int azx_resume(struct device *dev)
*/
static int azx_freeze_noirq(struct device *dev)
{
+ struct snd_card *card = dev_get_drvdata(dev);
+ struct azx *chip = card->private_data;
struct pci_dev *pci = to_pci_dev(dev);
- if (IS_SKL_PLUS(pci))
+ if (chip->driver_type == AZX_DRIVER_SKL)
pci_set_power_state(pci, PCI_D3hot);
return 0;
@@ -1085,9 +1083,11 @@ static int azx_freeze_noirq(struct device *dev)
static int azx_thaw_noirq(struct device *dev)
{
+ struct snd_card *card = dev_get_drvdata(dev);
+ struct azx *chip = card->private_data;
struct pci_dev *pci = to_pci_dev(dev);
- if (IS_SKL_PLUS(pci))
+ if (chip->driver_type == AZX_DRIVER_SKL)
pci_set_power_state(pci, PCI_D0);
return 0;
@@ -1119,7 +1119,7 @@ static int azx_runtime_suspend(struct device *dev)
azx_stop_chip(chip);
azx_enter_link_reset(chip);
azx_clear_irq_pending(chip);
- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
+ if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
&& hda->need_i915_power)
snd_hdac_display_power(azx_bus(chip), false);
@@ -1384,8 +1384,9 @@ static int azx_free(struct azx *chip)
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
if (hda->need_i915_power)
snd_hdac_display_power(bus, false);
- snd_hdac_i915_exit(bus);
}
+ if (chip->driver_caps & AZX_DCAPS_I915_COMPONENT)
+ snd_hdac_i915_exit(bus);
kfree(hda);
return 0;
@@ -1497,7 +1498,7 @@ static int check_position_fix(struct azx *chip, int fix)
dev_dbg(chip->card->dev, "Using LPIB position fix\n");
return POS_FIX_LPIB;
}
- if (IS_SKL_PLUS(chip->pci)) {
+ if (chip->driver_type == AZX_DRIVER_SKL) {
dev_dbg(chip->card->dev, "Using SKL position fix\n");
return POS_FIX_SKL;
}
@@ -1798,7 +1799,7 @@ static int azx_first_init(struct azx *chip)
return -ENXIO;
}
- if (IS_SKL_PLUS(pci))
+ if (chip->driver_type == AZX_DRIVER_SKL)
snd_hdac_bus_parse_capabilities(bus);
/*
@@ -2201,16 +2202,8 @@ static int azx_probe_continue(struct azx *chip)
hda->probe_continued = 1;
- /* Request display power well for the HDA controller or codec. For
- * Haswell/Broadwell, both the display HDA controller and codec need
- * this power. For other platforms, like Baytrail/Braswell, only the
- * display codec needs the power and it can be released after probe.
- */
- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
- /* HSW/BDW controllers need this power */
- if (CONTROLLER_IN_GPU(pci))
- hda->need_i915_power = 1;
-
+ /* bind with i915 if needed */
+ if (chip->driver_caps & AZX_DCAPS_I915_COMPONENT) {
err = snd_hdac_i915_init(bus);
if (err < 0) {
/* if the controller is bound only with HDMI/DP
@@ -2222,9 +2215,23 @@ static int azx_probe_continue(struct azx *chip)
dev_err(chip->card->dev,
"HSW/BDW HD-audio HDMI/DP requires binding with gfx driver\n");
goto out_free;
- } else
- goto skip_i915;
+ } else {
+ /* don't bother any longer */
+ chip->driver_caps &=
+ ~(AZX_DCAPS_I915_COMPONENT | AZX_DCAPS_I915_POWERWELL);
+ }
}
+ }
+
+ /* Request display power well for the HDA controller or codec. For
+ * Haswell/Broadwell, both the display HDA controller and codec need
+ * this power. For other platforms, like Baytrail/Braswell, only the
+ * display codec needs the power and it can be released after probe.
+ */
+ if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
+ /* HSW/BDW controllers need this power */
+ if (CONTROLLER_IN_GPU(pci))
+ hda->need_i915_power = 1;
err = snd_hdac_display_power(bus, true);
if (err < 0) {
@@ -2234,7 +2241,6 @@ static int azx_probe_continue(struct azx *chip)
}
}
- skip_i915:
err = azx_first_init(chip);
if (err < 0)
goto out_free;
@@ -2277,7 +2283,7 @@ static int azx_probe_continue(struct azx *chip)
pm_runtime_put_autosuspend(&pci->dev);
out_free:
- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
+ if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
&& !hda->need_i915_power)
snd_hdac_display_power(bus, false);
@@ -2367,31 +2373,31 @@ static const struct pci_device_id azx_ids[] = {
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
/* Sunrise Point */
{ PCI_DEVICE(0x8086, 0xa170),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE },
/* Sunrise Point-LP */
{ PCI_DEVICE(0x8086, 0x9d70),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE },
/* Kabylake */
{ PCI_DEVICE(0x8086, 0xa171),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE },
/* Kabylake-LP */
{ PCI_DEVICE(0x8086, 0x9d71),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE },
/* Kabylake-H */
{ PCI_DEVICE(0x8086, 0xa2f0),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE },
/* Coffelake */
{ PCI_DEVICE(0x8086, 0xa348),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE},
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
/* Broxton-P(Apollolake) */
{ PCI_DEVICE(0x8086, 0x5a98),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_BROXTON },
/* Broxton-T */
{ PCI_DEVICE(0x8086, 0x1a98),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_BROXTON },
/* Gemini-Lake */
{ PCI_DEVICE(0x8086, 0x3198),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_BROXTON },
/* Haswell */
{ PCI_DEVICE(0x8086, 0x0a0c),
.driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index d0e066e4c985..5b5c324c99b9 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -178,67 +178,6 @@ void snd_hda_sync_vmaster_hook(struct hda_vmaster_mute_hook *hook);
#define HDA_AMP_UNMUTE 0x00
#define HDA_AMP_VOLMASK 0x7f
-/* mono switch binding multiple inputs */
-#define HDA_BIND_MUTE_MONO(xname, nid, channel, indices, direction) \
- { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = 0, \
- .info = snd_hda_mixer_amp_switch_info, \
- .get = snd_hda_mixer_bind_switch_get, \
- .put = snd_hda_mixer_bind_switch_put, \
- .private_value = HDA_COMPOSE_AMP_VAL(nid, channel, indices, direction) }
-
-/* stereo switch binding multiple inputs */
-#define HDA_BIND_MUTE(xname,nid,indices,dir) \
- HDA_BIND_MUTE_MONO(xname,nid,3,indices,dir)
-
-int snd_hda_mixer_bind_switch_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol);
-int snd_hda_mixer_bind_switch_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol);
-
-/* more generic bound controls */
-struct hda_ctl_ops {
- snd_kcontrol_info_t *info;
- snd_kcontrol_get_t *get;
- snd_kcontrol_put_t *put;
- snd_kcontrol_tlv_rw_t *tlv;
-};
-
-extern struct hda_ctl_ops snd_hda_bind_vol; /* for bind-volume with TLV */
-extern struct hda_ctl_ops snd_hda_bind_sw; /* for bind-switch */
-
-struct hda_bind_ctls {
- struct hda_ctl_ops *ops;
- unsigned long values[];
-};
-
-int snd_hda_mixer_bind_ctls_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo);
-int snd_hda_mixer_bind_ctls_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol);
-int snd_hda_mixer_bind_ctls_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol);
-int snd_hda_mixer_bind_tlv(struct snd_kcontrol *kcontrol, int op_flag,
- unsigned int size, unsigned int __user *tlv);
-
-#define HDA_BIND_VOL(xname, bindrec) \
- { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
- .name = xname, \
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE |\
- SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
- SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,\
- .info = snd_hda_mixer_bind_ctls_info,\
- .get = snd_hda_mixer_bind_ctls_get,\
- .put = snd_hda_mixer_bind_ctls_put,\
- .tlv = { .c = snd_hda_mixer_bind_tlv },\
- .private_value = (long) (bindrec) }
-#define HDA_BIND_SW(xname, bindrec) \
- { .iface = SNDRV_CTL_ELEM_IFACE_MIXER,\
- .name = xname, \
- .info = snd_hda_mixer_bind_ctls_info,\
- .get = snd_hda_mixer_bind_ctls_get,\
- .put = snd_hda_mixer_bind_ctls_put,\
- .private_value = (long) (bindrec) }
-
/*
* SPDIF I/O
*/
diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
index 9739fce9e032..9b7efece4484 100644
--- a/sound/pci/hda/hda_sysfs.c
+++ b/sound/pci/hda/hda_sysfs.c
@@ -761,7 +761,7 @@ static struct attribute *hda_dev_attrs[] = {
NULL
};
-static struct attribute_group hda_dev_attr_group = {
+static const struct attribute_group hda_dev_attr_group = {
.attrs = hda_dev_attrs,
};
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 90e4ff87445e..76c85f08bea6 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -174,7 +174,6 @@ struct hdmi_spec {
/* i915/powerwell (Haswell+/Valleyview+) specific */
bool use_acomp_notifier; /* use i915 eld_notify callback for hotplug */
struct i915_audio_component_audio_ops i915_audio_ops;
- bool i915_bound; /* was i915 bound in this driver? */
struct hdac_chmap chmap;
hda_nid_t vendor_nid;
@@ -2234,8 +2233,6 @@ static void generic_spec_free(struct hda_codec *codec)
struct hdmi_spec *spec = codec->spec;
if (spec) {
- if (spec->i915_bound)
- snd_hdac_i915_exit(&codec->bus->core);
hdmi_array_free(spec);
kfree(spec);
codec->spec = NULL;
@@ -2506,19 +2503,41 @@ static void i915_pin_cvt_fixup(struct hda_codec *codec,
}
}
-/* Intel Haswell and onwards; audio component with eld notifier */
-static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid)
+/* precondition and allocation for Intel codecs */
+static int alloc_intel_hdmi(struct hda_codec *codec)
{
- struct hdmi_spec *spec;
- int err;
-
- /* HSW+ requires i915 binding */
+ /* requires i915 binding */
if (!codec->bus->core.audio_component) {
codec_info(codec, "No i915 binding for Intel HDMI/DP codec\n");
return -ENODEV;
}
- err = alloc_generic_hdmi(codec);
+ return alloc_generic_hdmi(codec);
+}
+
+/* parse and post-process for Intel codecs */
+static int parse_intel_hdmi(struct hda_codec *codec)
+{
+ int err;
+
+ err = hdmi_parse_codec(codec);
+ if (err < 0) {
+ generic_spec_free(codec);
+ return err;
+ }
+
+ generic_hdmi_init_per_pins(codec);
+ register_i915_notifier(codec);
+ return 0;
+}
+
+/* Intel Haswell and onwards; audio component with eld notifier */
+static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid)
+{
+ struct hdmi_spec *spec;
+ int err;
+
+ err = alloc_intel_hdmi(codec);
if (err < 0)
return err;
spec = codec->spec;
@@ -2542,15 +2561,7 @@ static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid)
spec->ops.setup_stream = i915_hsw_setup_stream;
spec->ops.pin_cvt_fixup = i915_pin_cvt_fixup;
- err = hdmi_parse_codec(codec);
- if (err < 0) {
- generic_spec_free(codec);
- return err;
- }
-
- generic_hdmi_init_per_pins(codec);
- register_i915_notifier(codec);
- return 0;
+ return parse_intel_hdmi(codec);
}
static int patch_i915_hsw_hdmi(struct hda_codec *codec)
@@ -2569,13 +2580,7 @@ static int patch_i915_byt_hdmi(struct hda_codec *codec)
struct hdmi_spec *spec;
int err;
- /* requires i915 binding */
- if (!codec->bus->core.audio_component) {
- codec_info(codec, "No i915 binding for Intel HDMI/DP codec\n");
- return -ENODEV;
- }
-
- err = alloc_generic_hdmi(codec);
+ err = alloc_intel_hdmi(codec);
if (err < 0)
return err;
spec = codec->spec;
@@ -2590,49 +2595,18 @@ static int patch_i915_byt_hdmi(struct hda_codec *codec)
spec->ops.pin_cvt_fixup = i915_pin_cvt_fixup;
- err = hdmi_parse_codec(codec);
- if (err < 0) {
- generic_spec_free(codec);
- return err;
- }
-
- generic_hdmi_init_per_pins(codec);
- register_i915_notifier(codec);
- return 0;
+ return parse_intel_hdmi(codec);
}
/* Intel IronLake, SandyBridge and IvyBridge; with eld notifier */
static int patch_i915_cpt_hdmi(struct hda_codec *codec)
{
- struct hdmi_spec *spec;
int err;
- /* no i915 component should have been bound before this */
- if (WARN_ON(codec->bus->core.audio_component))
- return -EBUSY;
-
- err = alloc_generic_hdmi(codec);
+ err = alloc_intel_hdmi(codec);
if (err < 0)
return err;
- spec = codec->spec;
-
- /* Try to bind with i915 now */
- err = snd_hdac_i915_init(&codec->bus->core);
- if (err < 0)
- goto error;
- spec->i915_bound = true;
-
- err = hdmi_parse_codec(codec);
- if (err < 0)
- goto error;
-
- generic_hdmi_init_per_pins(codec);
- register_i915_notifier(codec);
- return 0;
-
- error:
- generic_spec_free(codec);
- return err;
+ return parse_intel_hdmi(codec);
}
/*
@@ -2782,21 +2756,21 @@ static int nvhdmi_7x_init_8ch(struct hda_codec *codec)
return 0;
}
-static unsigned int channels_2_6_8[] = {
+static const unsigned int channels_2_6_8[] = {
2, 6, 8
};
-static unsigned int channels_2_8[] = {
+static const unsigned int channels_2_8[] = {
2, 8
};
-static struct snd_pcm_hw_constraint_list hw_constraints_2_6_8_channels = {
+static const struct snd_pcm_hw_constraint_list hw_constraints_2_6_8_channels = {
.count = ARRAY_SIZE(channels_2_6_8),
.list = channels_2_6_8,
.mask = 0,
};
-static struct snd_pcm_hw_constraint_list hw_constraints_2_8_channels = {
+static const struct snd_pcm_hw_constraint_list hw_constraints_2_8_channels = {
.count = ARRAY_SIZE(channels_2_8),
.list = channels_2_8,
.mask = 0,
@@ -2807,7 +2781,7 @@ static int simple_playback_pcm_open(struct hda_pcm_stream *hinfo,
struct snd_pcm_substream *substream)
{
struct hdmi_spec *spec = codec->spec;
- struct snd_pcm_hw_constraint_list *hw_constraints_channels = NULL;
+ const struct snd_pcm_hw_constraint_list *hw_constraints_channels = NULL;
switch (codec->preset->vendor_id) {
case 0x10de0002:
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index cbeebc0a9711..cd6987b5c6d9 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -41,9 +41,6 @@
/* keep halting ALC5505 DSP, for power saving */
#define HALT_REALTEK_ALC5505
-/* for GPIO Poll */
-#define GPIO_MASK 0x03
-
/* extra amp-initialization sequence types */
enum {
ALC_INIT_NONE,
@@ -327,6 +324,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
case 0x10ec0292:
alc_update_coef_idx(codec, 0x4, 1<<15, 0);
break;
+ case 0x10ec0215:
case 0x10ec0225:
case 0x10ec0233:
case 0x10ec0255:
@@ -335,12 +333,13 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
case 0x10ec0283:
case 0x10ec0286:
case 0x10ec0288:
+ case 0x10ec0285:
case 0x10ec0295:
case 0x10ec0298:
+ case 0x10ec0289:
case 0x10ec0299:
alc_update_coef_idx(codec, 0x10, 1<<9, 0);
break;
- case 0x10ec0285:
case 0x10ec0293:
alc_update_coef_idx(codec, 0xa, 1<<13, 0);
break;
@@ -2575,18 +2574,37 @@ static int patch_alc262(struct hda_codec *codec)
* ALC268
*/
/* bind Beep switches of both NID 0x0f and 0x10 */
-static const struct hda_bind_ctls alc268_bind_beep_sw = {
- .ops = &snd_hda_bind_sw,
- .values = {
- HDA_COMPOSE_AMP_VAL(0x0f, 3, 1, HDA_INPUT),
- HDA_COMPOSE_AMP_VAL(0x10, 3, 1, HDA_INPUT),
- 0
- },
-};
+static int alc268_beep_switch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ unsigned long pval;
+ int err;
+
+ mutex_lock(&codec->control_mutex);
+ pval = kcontrol->private_value;
+ kcontrol->private_value = (pval & ~0xff) | 0x0f;
+ err = snd_hda_mixer_amp_switch_put(kcontrol, ucontrol);
+ if (err >= 0) {
+ kcontrol->private_value = (pval & ~0xff) | 0x10;
+ err = snd_hda_mixer_amp_switch_put(kcontrol, ucontrol);
+ }
+ kcontrol->private_value = pval;
+ mutex_unlock(&codec->control_mutex);
+ return err;
+}
static const struct snd_kcontrol_new alc268_beep_mixer[] = {
HDA_CODEC_VOLUME("Beep Playback Volume", 0x1d, 0x0, HDA_INPUT),
- HDA_BIND_SW("Beep Playback Switch", &alc268_bind_beep_sw),
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Beep Playback Switch",
+ .subdevice = HDA_SUBDEV_AMP_FLAG,
+ .info = snd_hda_mixer_amp_switch_info,
+ .get = snd_hda_mixer_amp_switch_get,
+ .put = alc268_beep_switch_put,
+ .private_value = HDA_COMPOSE_AMP_VAL(0x0f, 3, 1, HDA_INPUT)
+ },
{ }
};
@@ -2719,11 +2737,12 @@ enum {
ALC269_TYPE_ALC282,
ALC269_TYPE_ALC283,
ALC269_TYPE_ALC284,
- ALC269_TYPE_ALC285,
+ ALC269_TYPE_ALC293,
ALC269_TYPE_ALC286,
ALC269_TYPE_ALC298,
ALC269_TYPE_ALC255,
ALC269_TYPE_ALC256,
+ ALC269_TYPE_ALC215,
ALC269_TYPE_ALC225,
ALC269_TYPE_ALC294,
ALC269_TYPE_ALC700,
@@ -2745,7 +2764,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
case ALC269_TYPE_ALC269VC:
case ALC269_TYPE_ALC280:
case ALC269_TYPE_ALC284:
- case ALC269_TYPE_ALC285:
+ case ALC269_TYPE_ALC293:
ssids = alc269va_ssids;
break;
case ALC269_TYPE_ALC269VB:
@@ -2756,6 +2775,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
case ALC269_TYPE_ALC298:
case ALC269_TYPE_ALC255:
case ALC269_TYPE_ALC256:
+ case ALC269_TYPE_ALC215:
case ALC269_TYPE_ALC225:
case ALC269_TYPE_ALC294:
case ALC269_TYPE_ALC700:
@@ -3043,6 +3063,135 @@ static void alc283_shutup(struct hda_codec *codec)
alc_write_coef_idx(codec, 0x43, 0x9614);
}
+static void alc256_init(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ bool hp_pin_sense;
+
+ if (!hp_pin)
+ return;
+
+ msleep(30);
+
+ hp_pin_sense = snd_hda_jack_detect(codec, hp_pin);
+
+ if (hp_pin_sense)
+ msleep(2);
+
+ alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */
+
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+
+ if (hp_pin_sense)
+ msleep(85);
+
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+
+ if (hp_pin_sense)
+ msleep(100);
+
+ alc_update_coef_idx(codec, 0x46, 3 << 12, 0);
+ alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */
+}
+
+static void alc256_shutup(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ bool hp_pin_sense;
+
+ if (!hp_pin) {
+ alc269_shutup(codec);
+ return;
+ }
+
+ hp_pin_sense = snd_hda_jack_detect(codec, hp_pin);
+
+ if (hp_pin_sense)
+ msleep(2);
+
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+
+ if (hp_pin_sense)
+ msleep(85);
+
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
+ alc_update_coef_idx(codec, 0x46, 0, 3 << 12); /* 3k pull low control for Headset jack. */
+
+ if (hp_pin_sense)
+ msleep(100);
+
+ alc_auto_setup_eapd(codec, false);
+ snd_hda_shutup_pins(codec);
+}
+
+static void alc_default_init(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ bool hp_pin_sense;
+
+ if (!hp_pin)
+ return;
+
+ msleep(30);
+
+ hp_pin_sense = snd_hda_jack_detect(codec, hp_pin);
+
+ if (hp_pin_sense)
+ msleep(2);
+
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+
+ if (hp_pin_sense)
+ msleep(85);
+
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+
+ if (hp_pin_sense)
+ msleep(100);
+}
+
+static void alc_default_shutup(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ bool hp_pin_sense;
+
+ if (!hp_pin) {
+ alc269_shutup(codec);
+ return;
+ }
+
+ hp_pin_sense = snd_hda_jack_detect(codec, hp_pin);
+
+ if (hp_pin_sense)
+ msleep(2);
+
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+
+ if (hp_pin_sense)
+ msleep(85);
+
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
+ if (hp_pin_sense)
+ msleep(100);
+
+ alc_auto_setup_eapd(codec, false);
+ snd_hda_shutup_pins(codec);
+}
+
static void alc5505_coef_set(struct hda_codec *codec, unsigned int index_reg,
unsigned int val)
{
@@ -3754,6 +3903,16 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
UPDATE_COEF(0x4a, 3<<10, 0),
{}
};
+ static struct coef_fw coef0274[] = {
+ UPDATE_COEF(0x4a, 0x0100, 0),
+ UPDATE_COEFEX(0x57, 0x05, 0x4000, 0),
+ UPDATE_COEF(0x6b, 0xf000, 0x5000),
+ UPDATE_COEF(0x4a, 0x0010, 0),
+ UPDATE_COEF(0x4a, 0x0c00, 0x0c00),
+ WRITE_COEF(0x45, 0x5289),
+ UPDATE_COEF(0x4a, 0x0c00, 0),
+ {}
+ };
switch (codec->core.vendor_id) {
case 0x10ec0255:
@@ -3764,6 +3923,11 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
alc_process_coef_fw(codec, coef0256);
alc_process_coef_fw(codec, coef0255);
break;
+ case 0x10ec0234:
+ case 0x10ec0274:
+ case 0x10ec0294:
+ alc_process_coef_fw(codec, coef0274);
+ break;
case 0x10ec0233:
case 0x10ec0283:
alc_process_coef_fw(codec, coef0233);
@@ -3841,7 +4005,12 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
UPDATE_COEF(0x63, 3<<14, 0),
{}
};
-
+ static struct coef_fw coef0274[] = {
+ UPDATE_COEFEX(0x57, 0x05, 0x4000, 0x4000),
+ UPDATE_COEF(0x4a, 0x0010, 0),
+ UPDATE_COEF(0x6b, 0xf000, 0),
+ {}
+ };
switch (codec->core.vendor_id) {
case 0x10ec0255:
@@ -3851,6 +4020,14 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
alc_process_coef_fw(codec, coef0255);
snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
break;
+ case 0x10ec0234:
+ case 0x10ec0274:
+ case 0x10ec0294:
+ alc_write_coef_idx(codec, 0x45, 0x4689);
+ snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
+ alc_process_coef_fw(codec, coef0274);
+ snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
+ break;
case 0x10ec0233:
case 0x10ec0283:
alc_write_coef_idx(codec, 0x45, 0xc429);
@@ -3948,6 +4125,13 @@ static void alc_headset_mode_default(struct hda_codec *codec)
WRITE_COEF(0xb7, 0x802b),
{}
};
+ static struct coef_fw coef0274[] = {
+ WRITE_COEF(0x45, 0x4289),
+ UPDATE_COEF(0x4a, 0x0010, 0x0010),
+ UPDATE_COEF(0x6b, 0x0f00, 0),
+ UPDATE_COEF(0x49, 0x0300, 0x0300),
+ {}
+ };
switch (codec->core.vendor_id) {
case 0x10ec0225:
@@ -3959,6 +4143,11 @@ static void alc_headset_mode_default(struct hda_codec *codec)
case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
break;
+ case 0x10ec0234:
+ case 0x10ec0274:
+ case 0x10ec0294:
+ alc_process_coef_fw(codec, coef0274);
+ break;
case 0x10ec0233:
case 0x10ec0283:
alc_process_coef_fw(codec, coef0233);
@@ -4044,6 +4233,11 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
case 0x10ec0256:
alc_process_coef_fw(codec, coef0256);
break;
+ case 0x10ec0234:
+ case 0x10ec0274:
+ case 0x10ec0294:
+ alc_write_coef_idx(codec, 0x45, 0xd689);
+ break;
case 0x10ec0233:
case 0x10ec0283:
alc_process_coef_fw(codec, coef0233);
@@ -4138,6 +4332,11 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
case 0x10ec0256:
alc_process_coef_fw(codec, coef0256);
break;
+ case 0x10ec0234:
+ case 0x10ec0274:
+ case 0x10ec0294:
+ alc_write_coef_idx(codec, 0x45, 0xe689);
+ break;
case 0x10ec0233:
case 0x10ec0283:
alc_process_coef_fw(codec, coef0233);
@@ -4201,6 +4400,13 @@ static void alc_determine_headset_type(struct hda_codec *codec)
UPDATE_COEF(0x49, 1<<8, 1<<8),
{}
};
+ static struct coef_fw coef0274[] = {
+ UPDATE_COEF(0x4a, 0x0010, 0),
+ UPDATE_COEF(0x4a, 0x8000, 0),
+ WRITE_COEF(0x45, 0xd289),
+ UPDATE_COEF(0x49, 0x0300, 0x0300),
+ {}
+ };
switch (codec->core.vendor_id) {
case 0x10ec0255:
@@ -4210,6 +4416,14 @@ static void alc_determine_headset_type(struct hda_codec *codec)
val = alc_read_coef_idx(codec, 0x46);
is_ctia = (val & 0x0070) == 0x0070;
break;
+ case 0x10ec0234:
+ case 0x10ec0274:
+ case 0x10ec0294:
+ alc_process_coef_fw(codec, coef0274);
+ msleep(80);
+ val = alc_read_coef_idx(codec, 0x46);
+ is_ctia = (val & 0x00f0) == 0x00f0;
+ break;
case 0x10ec0233:
case 0x10ec0283:
alc_write_coef_idx(codec, 0x45, 0xd029);
@@ -4892,6 +5106,7 @@ enum {
ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
+ ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
ALC269_FIXUP_HEADSET_MODE,
ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC,
ALC269_FIXUP_ASPIRE_HEADSET_MIC,
@@ -5192,6 +5407,16 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
},
+ [ALC269_FIXUP_DELL4_MIC_NO_PRESENCE] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+ { 0x1b, 0x01a1913d }, /* use as headphone mic, without its own jack detect */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE
+ },
[ALC269_FIXUP_HEADSET_MODE] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_headset_mode,
@@ -6322,6 +6547,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x17, 0x90170110},
{0x1a, 0x03011020},
{0x21, 0x03211030}),
+ SND_HDA_PIN_QUIRK(0x10ec0299, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
+ ALC225_STANDARD_PINS,
+ {0x12, 0xb7a60130},
+ {0x13, 0xb8a61140},
+ {0x17, 0x90170110}),
{}
};
@@ -6384,7 +6614,8 @@ static int patch_alc269(struct hda_codec *codec)
codec->patch_ops.suspend = alc269_suspend;
codec->patch_ops.resume = alc269_resume;
#endif
- spec->shutup = alc269_shutup;
+ spec->shutup = alc_default_shutup;
+ spec->init_hook = alc_default_init;
snd_hda_pick_fixup(codec, alc269_fixup_models,
alc269_fixup_tbl, alc269_fixups);
@@ -6424,6 +6655,7 @@ static int patch_alc269(struct hda_codec *codec)
}
if (err < 0)
goto error;
+ spec->shutup = alc269_shutup;
spec->init_hook = alc269_fill_coef;
alc269_fill_coef(codec);
break;
@@ -6447,9 +6679,8 @@ static int patch_alc269(struct hda_codec *codec)
case 0x10ec0292:
spec->codec_variant = ALC269_TYPE_ALC284;
break;
- case 0x10ec0285:
case 0x10ec0293:
- spec->codec_variant = ALC269_TYPE_ALC285;
+ spec->codec_variant = ALC269_TYPE_ALC293;
break;
case 0x10ec0286:
case 0x10ec0288:
@@ -6464,9 +6695,17 @@ static int patch_alc269(struct hda_codec *codec)
break;
case 0x10ec0256:
spec->codec_variant = ALC269_TYPE_ALC256;
+ spec->shutup = alc256_shutup;
+ spec->init_hook = alc256_init;
spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
break;
+ case 0x10ec0215:
+ case 0x10ec0285:
+ case 0x10ec0289:
+ spec->codec_variant = ALC269_TYPE_ALC215;
+ spec->gen.mixer_nid = 0;
+ break;
case 0x10ec0225:
case 0x10ec0295:
spec->codec_variant = ALC269_TYPE_ALC225;
@@ -6479,6 +6718,8 @@ static int patch_alc269(struct hda_codec *codec)
case 0x10ec0274:
case 0x10ec0294:
spec->codec_variant = ALC269_TYPE_ALC294;
+ spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */
+ alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */
break;
case 0x10ec0700:
case 0x10ec0701:
@@ -7495,6 +7736,7 @@ static int patch_alc680(struct hda_codec *codec)
* patch entries
*/
static const struct hda_device_id snd_hda_id_realtek[] = {
+ HDA_CODEC_ENTRY(0x10ec0215, "ALC215", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
@@ -7520,6 +7762,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
HDA_CODEC_ENTRY(0x10ec0285, "ALC285", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0286, "ALC286", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0288, "ALC288", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0289, "ALC289", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0290, "ALC290", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0292, "ALC292", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0293, "ALC293", patch_alc269),
diff --git a/sound/pci/hda/patch_si3054.c b/sound/pci/hda/patch_si3054.c
index ffda38c45509..f63acb1b965c 100644
--- a/sound/pci/hda/patch_si3054.c
+++ b/sound/pci/hda/patch_si3054.c
@@ -169,8 +169,8 @@ static int si3054_pcm_open(struct hda_pcm_stream *hinfo,
struct hda_codec *codec,
struct snd_pcm_substream *substream)
{
- static unsigned int rates[] = { 8000, 9600, 16000 };
- static struct snd_pcm_hw_constraint_list hw_constraints_rates = {
+ static const unsigned int rates[] = { 8000, 9600, 16000 };
+ static const struct snd_pcm_hw_constraint_list hw_constraints_rates = {
.count = ARRAY_SIZE(rates),
.list = rates,
.mask = 0,
diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c
index 1d8612cabb9e..0e66afa403a3 100644
--- a/sound/pci/ice1712/ice1712.c
+++ b/sound/pci/ice1712/ice1712.c
@@ -932,10 +932,10 @@ static int snd_ice1712_pcm_ds(struct snd_ice1712 *ice, int device)
* PCM code - professional part (multitrack)
*/
-static unsigned int rates[] = { 8000, 9600, 11025, 12000, 16000, 22050, 24000,
+static const unsigned int rates[] = { 8000, 9600, 11025, 12000, 16000, 22050, 24000,
32000, 44100, 48000, 64000, 88200, 96000 };
-static struct snd_pcm_hw_constraint_list hw_constraints_rates = {
+static const struct snd_pcm_hw_constraint_list hw_constraints_rates = {
.count = ARRAY_SIZE(rates),
.list = rates,
.mask = 0,
@@ -1401,7 +1401,7 @@ static struct snd_kcontrol_new snd_ice1712_multi_playback_ctrls[] = {
},
};
-static struct snd_kcontrol_new snd_ice1712_multi_capture_analog_switch = {
+static const struct snd_kcontrol_new snd_ice1712_multi_capture_analog_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "H/W Multi Capture Switch",
.info = snd_ice1712_pro_mixer_switch_info,
@@ -1420,7 +1420,7 @@ static const struct snd_kcontrol_new snd_ice1712_multi_capture_spdif_switch = {
.count = 2,
};
-static struct snd_kcontrol_new snd_ice1712_multi_capture_analog_volume = {
+static const struct snd_kcontrol_new snd_ice1712_multi_capture_analog_volume = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ),
@@ -2165,7 +2165,7 @@ static int snd_ice1712_pro_route_spdif_put(struct snd_kcontrol *kcontrol,
return change;
}
-static struct snd_kcontrol_new snd_ice1712_mixer_pro_analog_route = {
+static const struct snd_kcontrol_new snd_ice1712_mixer_pro_analog_route = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "H/W Playback Route",
.info = snd_ice1712_pro_route_info,
diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
index 58f8f2ae758d..5cfba09c9761 100644
--- a/sound/pci/ice1712/ice1712.h
+++ b/sound/pci/ice1712/ice1712.h
@@ -348,7 +348,7 @@ struct snd_ice1712 {
struct mutex open_mutex;
struct snd_pcm_substream *pcm_reserved[4];
- struct snd_pcm_hw_constraint_list *hw_rates; /* card-specific rate constraints */
+ const struct snd_pcm_hw_constraint_list *hw_rates; /* card-specific rate constraints */
unsigned int akm_codecs;
struct snd_akm4xxx *akm;
diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
index 9cd6e55c0642..057c2f394ea7 100644
--- a/sound/pci/ice1712/ice1724.c
+++ b/sound/pci/ice1712/ice1724.c
@@ -521,25 +521,25 @@ static irqreturn_t snd_vt1724_interrupt(int irq, void *dev_id)
* PCM code - professional part (multitrack)
*/
-static unsigned int rates[] = {
+static const unsigned int rates[] = {
8000, 9600, 11025, 12000, 16000, 22050, 24000,
32000, 44100, 48000, 64000, 88200, 96000,
176400, 192000,
};
-static struct snd_pcm_hw_constraint_list hw_constraints_rates_96 = {
+static const struct snd_pcm_hw_constraint_list hw_constraints_rates_96 = {
.count = ARRAY_SIZE(rates) - 2, /* up to 96000 */
.list = rates,
.mask = 0,
};
-static struct snd_pcm_hw_constraint_list hw_constraints_rates_48 = {
+static const struct snd_pcm_hw_constraint_list hw_constraints_rates_48 = {
.count = ARRAY_SIZE(rates) - 5, /* up to 48000 */
.list = rates,
.mask = 0,
};
-static struct snd_pcm_hw_constraint_list hw_constraints_rates_192 = {
+static const struct snd_pcm_hw_constraint_list hw_constraints_rates_192 = {
.count = ARRAY_SIZE(rates),
.list = rates,
.mask = 0,
@@ -2142,7 +2142,7 @@ static int snd_vt1724_pro_route_spdif_put(struct snd_kcontrol *kcontrol,
digital_route_shift(idx));
}
-static struct snd_kcontrol_new snd_vt1724_mixer_pro_analog_route =
+static const struct snd_kcontrol_new snd_vt1724_mixer_pro_analog_route =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "H/W Playback Route",
diff --git a/sound/pci/ice1712/juli.c b/sound/pci/ice1712/juli.c
index 4f0213427152..5bb146703738 100644
--- a/sound/pci/ice1712/juli.c
+++ b/sound/pci/ice1712/juli.c
@@ -133,19 +133,19 @@ struct juli_spec {
/*
* Initial setup of the conversion array GPIO <-> rate
*/
-static unsigned int juli_rates[] = {
+static const unsigned int juli_rates[] = {
16000, 22050, 24000, 32000,
44100, 48000, 64000, 88200,
96000, 176400, 192000,
};
-static unsigned int gpio_vals[] = {
+static const unsigned int gpio_vals[] = {
GPIO_RATE_16000, GPIO_RATE_22050, GPIO_RATE_24000, GPIO_RATE_32000,
GPIO_RATE_44100, GPIO_RATE_48000, GPIO_RATE_64000, GPIO_RATE_88200,
GPIO_RATE_96000, GPIO_RATE_176400, GPIO_RATE_192000,
};
-static struct snd_pcm_hw_constraint_list juli_rates_info = {
+static const struct snd_pcm_hw_constraint_list juli_rates_info = {
.count = ARRAY_SIZE(juli_rates),
.list = juli_rates,
.mask = 0,
diff --git a/sound/pci/ice1712/maya44.c b/sound/pci/ice1712/maya44.c
index 7de25c4807fd..0e30419f6bbd 100644
--- a/sound/pci/ice1712/maya44.c
+++ b/sound/pci/ice1712/maya44.c
@@ -661,12 +661,12 @@ static void set_rate(struct snd_ice1712 *ice, unsigned int rate)
* supported sample rates (to override the default one)
*/
-static unsigned int rates[] = {
+static const unsigned int rates[] = {
32000, 44100, 48000, 64000, 88200, 96000, 176400, 192000
};
/* playback rates: 32..192 kHz */
-static struct snd_pcm_hw_constraint_list dac_rates = {
+static const struct snd_pcm_hw_constraint_list dac_rates = {
.count = ARRAY_SIZE(rates),
.list = rates,
.mask = 0
diff --git a/sound/pci/ice1712/quartet.c b/sound/pci/ice1712/quartet.c
index 7c387b04067e..f1b3732cc6d2 100644
--- a/sound/pci/ice1712/quartet.c
+++ b/sound/pci/ice1712/quartet.c
@@ -231,17 +231,17 @@ static char *get_binary(char *buffer, int value)
/*
* Initial setup of the conversion array GPIO <-> rate
*/
-static unsigned int qtet_rates[] = {
+static const unsigned int qtet_rates[] = {
44100, 48000, 88200,
96000, 176400, 192000,
};
-static unsigned int cks_vals[] = {
+static const unsigned int cks_vals[] = {
CPLD_CKS_44100HZ, CPLD_CKS_48000HZ, CPLD_CKS_88200HZ,
CPLD_CKS_96000HZ, CPLD_CKS_176400HZ, CPLD_CKS_192000HZ,
};
-static struct snd_pcm_hw_constraint_list qtet_rates_info = {
+static const struct snd_pcm_hw_constraint_list qtet_rates_info = {
.count = ARRAY_SIZE(qtet_rates),
.list = qtet_rates,
.mask = 0,
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 6d17b171c17b..a8d7092e93dd 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -1136,31 +1136,31 @@ static struct snd_pcm_hardware snd_intel8x0_stream =
.fifo_size = 0,
};
-static unsigned int channels4[] = {
+static const unsigned int channels4[] = {
2, 4,
};
-static struct snd_pcm_hw_constraint_list hw_constraints_channels4 = {
+static const struct snd_pcm_hw_constraint_list hw_constraints_channels4 = {
.count = ARRAY_SIZE(channels4),
.list = channels4,
.mask = 0,
};
-static unsigned int channels6[] = {
+static const unsigned int channels6[] = {
2, 4, 6,
};
-static struct snd_pcm_hw_constraint_list hw_constraints_channels6 = {
+static const struct snd_pcm_hw_constraint_list hw_constraints_channels6 = {
.count = ARRAY_SIZE(channels6),
.list = channels6,
.mask = 0,
};
-static unsigned int channels8[] = {
+static const unsigned int channels8[] = {
2, 4, 6, 8,
};
-static struct snd_pcm_hw_constraint_list hw_constraints_channels8 = {
+static const struct snd_pcm_hw_constraint_list hw_constraints_channels8 = {
.count = ARRAY_SIZE(channels8),
.list = channels8,
.mask = 0,
diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
index 1bc98c867133..18ff668ce7a5 100644
--- a/sound/pci/intel8x0m.c
+++ b/sound/pci/intel8x0m.c
@@ -635,8 +635,8 @@ static struct snd_pcm_hardware snd_intel8x0m_stream =
static int snd_intel8x0m_pcm_open(struct snd_pcm_substream *substream, struct ichdev *ichdev)
{
- static unsigned int rates[] = { 8000, 9600, 12000, 16000 };
- static struct snd_pcm_hw_constraint_list hw_constraints_rates = {
+ static const unsigned int rates[] = { 8000, 9600, 12000, 16000 };
+ static const struct snd_pcm_hw_constraint_list hw_constraints_rates = {
.count = ARRAY_SIZE(rates),
.list = rates,
.mask = 0,
diff --git a/sound/pci/korg1212/korg1212.c b/sound/pci/korg1212/korg1212.c
index 1e25095fd144..b28fe4914d6b 100644
--- a/sound/pci/korg1212/korg1212.c
+++ b/sound/pci/korg1212/korg1212.c
@@ -1299,13 +1299,21 @@ static int snd_korg1212_silence(struct snd_korg1212 *korg1212, int pos, int coun
return 0;
}
-static int snd_korg1212_copy_to(struct snd_korg1212 *korg1212, void __user *dst, int pos, int count, int offset, int size)
+static int snd_korg1212_copy_to(struct snd_pcm_substream *substream,
+ void __user *dst, int pos, int count,
+ bool in_kernel)
{
- struct KorgAudioFrame * src = korg1212->recordDataBufsPtr[0].bufferData + pos;
- int i, rc;
-
- K1212_DEBUG_PRINTK_VERBOSE("K1212_DEBUG: snd_korg1212_copy_to pos=%d offset=%d size=%d\n",
- pos, offset, size);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_korg1212 *korg1212 = snd_pcm_substream_chip(substream);
+ struct KorgAudioFrame *src;
+ int i, size;
+
+ pos = bytes_to_frames(runtime, pos);
+ count = bytes_to_frames(runtime, count);
+ size = korg1212->channels * 2;
+ src = korg1212->recordDataBufsPtr[0].bufferData + pos;
+ K1212_DEBUG_PRINTK_VERBOSE("K1212_DEBUG: snd_korg1212_copy_to pos=%d size=%d count=%d\n",
+ pos, size, count);
if (snd_BUG_ON(pos + count > K1212_MAX_SAMPLES))
return -EINVAL;
@@ -1317,11 +1325,10 @@ static int snd_korg1212_copy_to(struct snd_korg1212 *korg1212, void __user *dst,
return -EFAULT;
}
#endif
- rc = copy_to_user(dst + offset, src, size);
- if (rc) {
- K1212_DEBUG_PRINTK("K1212_DEBUG: snd_korg1212_copy_to USER EFAULT src=%p dst=%p iter=%d\n", src, dst, i);
+ if (in_kernel)
+ memcpy((void *)dst, src, size);
+ else if (copy_to_user(dst, src, size))
return -EFAULT;
- }
src++;
dst += size;
}
@@ -1329,13 +1336,22 @@ static int snd_korg1212_copy_to(struct snd_korg1212 *korg1212, void __user *dst,
return 0;
}
-static int snd_korg1212_copy_from(struct snd_korg1212 *korg1212, void __user *src, int pos, int count, int offset, int size)
+static int snd_korg1212_copy_from(struct snd_pcm_substream *substream,
+ void __user *src, int pos, int count,
+ bool in_kernel)
{
- struct KorgAudioFrame * dst = korg1212->playDataBufsPtr[0].bufferData + pos;
- int i, rc;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_korg1212 *korg1212 = snd_pcm_substream_chip(substream);
+ struct KorgAudioFrame *dst;
+ int i, size;
- K1212_DEBUG_PRINTK_VERBOSE("K1212_DEBUG: snd_korg1212_copy_from pos=%d offset=%d size=%d count=%d\n",
- pos, offset, size, count);
+ pos = bytes_to_frames(runtime, pos);
+ count = bytes_to_frames(runtime, count);
+ size = korg1212->channels * 2;
+ dst = korg1212->playDataBufsPtr[0].bufferData + pos;
+
+ K1212_DEBUG_PRINTK_VERBOSE("K1212_DEBUG: snd_korg1212_copy_from pos=%d size=%d count=%d\n",
+ pos, size, count);
if (snd_BUG_ON(pos + count > K1212_MAX_SAMPLES))
return -EINVAL;
@@ -1348,11 +1364,10 @@ static int snd_korg1212_copy_from(struct snd_korg1212 *korg1212, void __user *sr
return -EFAULT;
}
#endif
- rc = copy_from_user((void*) dst + offset, src, size);
- if (rc) {
- K1212_DEBUG_PRINTK("K1212_DEBUG: snd_korg1212_copy_from USER EFAULT src=%p dst=%p iter=%d\n", src, dst, i);
+ if (in_kernel)
+ memcpy((void *)dst, src, size);
+ else if (copy_from_user(dst, src, size))
return -EFAULT;
- }
dst++;
src += size;
}
@@ -1640,45 +1655,46 @@ static snd_pcm_uframes_t snd_korg1212_capture_pointer(struct snd_pcm_substream *
}
static int snd_korg1212_playback_copy(struct snd_pcm_substream *substream,
- int channel, /* not used (interleaved data) */
- snd_pcm_uframes_t pos,
- void __user *src,
- snd_pcm_uframes_t count)
+ int channel, unsigned long pos,
+ void __user *src, unsigned long count)
{
- struct snd_korg1212 *korg1212 = snd_pcm_substream_chip(substream);
-
- K1212_DEBUG_PRINTK_VERBOSE("K1212_DEBUG: snd_korg1212_playback_copy [%s] %ld %ld\n",
- stateName[korg1212->cardState], pos, count);
-
- return snd_korg1212_copy_from(korg1212, src, pos, count, 0, korg1212->channels * 2);
+ return snd_korg1212_copy_from(substream, src, pos, count, false);
+}
+static int snd_korg1212_playback_copy_kernel(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void *src, unsigned long count)
+{
+ return snd_korg1212_copy_from(substream, (void __user *)src,
+ pos, count, true);
}
static int snd_korg1212_playback_silence(struct snd_pcm_substream *substream,
int channel, /* not used (interleaved data) */
- snd_pcm_uframes_t pos,
- snd_pcm_uframes_t count)
+ unsigned long pos,
+ unsigned long count)
{
+ struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_korg1212 *korg1212 = snd_pcm_substream_chip(substream);
- K1212_DEBUG_PRINTK_VERBOSE("K1212_DEBUG: snd_korg1212_playback_silence [%s]\n",
- stateName[korg1212->cardState]);
-
- return snd_korg1212_silence(korg1212, pos, count, 0, korg1212->channels * 2);
+ return snd_korg1212_silence(korg1212, bytes_to_frames(runtime, pos),
+ bytes_to_frames(runtime, count),
+ 0, korg1212->channels * 2);
}
static int snd_korg1212_capture_copy(struct snd_pcm_substream *substream,
- int channel, /* not used (interleaved data) */
- snd_pcm_uframes_t pos,
- void __user *dst,
- snd_pcm_uframes_t count)
+ int channel, unsigned long pos,
+ void __user *dst, unsigned long count)
{
- struct snd_korg1212 *korg1212 = snd_pcm_substream_chip(substream);
-
- K1212_DEBUG_PRINTK_VERBOSE("K1212_DEBUG: snd_korg1212_capture_copy [%s] %ld %ld\n",
- stateName[korg1212->cardState], pos, count);
+ return snd_korg1212_copy_to(substream, dst, pos, count, false);
+}
- return snd_korg1212_copy_to(korg1212, dst, pos, count, 0, korg1212->channels * 2);
+static int snd_korg1212_capture_copy_kernel(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void *dst, unsigned long count)
+{
+ return snd_korg1212_copy_to(substream, (void __user *)dst,
+ pos, count, true);
}
static const struct snd_pcm_ops snd_korg1212_playback_ops = {
@@ -1689,8 +1705,9 @@ static const struct snd_pcm_ops snd_korg1212_playback_ops = {
.prepare = snd_korg1212_prepare,
.trigger = snd_korg1212_trigger,
.pointer = snd_korg1212_playback_pointer,
- .copy = snd_korg1212_playback_copy,
- .silence = snd_korg1212_playback_silence,
+ .copy_user = snd_korg1212_playback_copy,
+ .copy_kernel = snd_korg1212_playback_copy_kernel,
+ .fill_silence = snd_korg1212_playback_silence,
};
static const struct snd_pcm_ops snd_korg1212_capture_ops = {
@@ -1701,7 +1718,8 @@ static const struct snd_pcm_ops snd_korg1212_capture_ops = {
.prepare = snd_korg1212_prepare,
.trigger = snd_korg1212_trigger,
.pointer = snd_korg1212_capture_pointer,
- .copy = snd_korg1212_capture_copy,
+ .copy_user = snd_korg1212_capture_copy,
+ .copy_kernel = snd_korg1212_capture_copy_kernel,
};
/*
diff --git a/sound/pci/mixart/mixart_mixer.c b/sound/pci/mixart/mixart_mixer.c
index 4a4616aac787..2b9496a66c77 100644
--- a/sound/pci/mixart/mixart_mixer.c
+++ b/sound/pci/mixart/mixart_mixer.c
@@ -404,7 +404,7 @@ static int mixart_analog_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e
static const DECLARE_TLV_DB_SCALE(db_scale_analog, -9600, 50, 0);
-static struct snd_kcontrol_new mixart_control_analog_level = {
+static const struct snd_kcontrol_new mixart_control_analog_level = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ),
@@ -897,7 +897,7 @@ static int mixart_pcm_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem
static const DECLARE_TLV_DB_SCALE(db_scale_digital, -10950, 50, 0);
-static struct snd_kcontrol_new snd_mixart_pcm_vol =
+static const struct snd_kcontrol_new snd_mixart_pcm_vol =
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
@@ -951,7 +951,7 @@ static int mixart_pcm_sw_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_
return changed;
}
-static struct snd_kcontrol_new mixart_control_pcm_switch = {
+static const struct snd_kcontrol_new mixart_control_pcm_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
/* name will be filled later */
.count = MIXART_PLAYBACK_STREAMS,
diff --git a/sound/pci/nm256/nm256.c b/sound/pci/nm256/nm256.c
index 103fe311e5a9..0ef8054c3936 100644
--- a/sound/pci/nm256/nm256.c
+++ b/sound/pci/nm256/nm256.c
@@ -398,10 +398,10 @@ snd_nm256_load_coefficient(struct nm256 *chip, int stream, int number)
/* The actual rates supported by the card. */
-static unsigned int samplerates[8] = {
+static const unsigned int samplerates[8] = {
8000, 11025, 16000, 22050, 24000, 32000, 44100, 48000,
};
-static struct snd_pcm_hw_constraint_list constraints_rates = {
+static const struct snd_pcm_hw_constraint_list constraints_rates = {
.count = ARRAY_SIZE(samplerates),
.list = samplerates,
.mask = 0,
@@ -695,53 +695,68 @@ snd_nm256_capture_pointer(struct snd_pcm_substream *substream)
*/
static int
snd_nm256_playback_silence(struct snd_pcm_substream *substream,
- int channel, /* not used (interleaved data) */
- snd_pcm_uframes_t pos,
- snd_pcm_uframes_t count)
+ int channel, unsigned long pos, unsigned long count)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct nm256_stream *s = runtime->private_data;
- count = frames_to_bytes(runtime, count);
- pos = frames_to_bytes(runtime, pos);
+
memset_io(s->bufptr + pos, 0, count);
return 0;
}
static int
snd_nm256_playback_copy(struct snd_pcm_substream *substream,
- int channel, /* not used (interleaved data) */
- snd_pcm_uframes_t pos,
- void __user *src,
- snd_pcm_uframes_t count)
+ int channel, unsigned long pos,
+ void __user *src, unsigned long count)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct nm256_stream *s = runtime->private_data;
- count = frames_to_bytes(runtime, count);
- pos = frames_to_bytes(runtime, pos);
+
if (copy_from_user_toio(s->bufptr + pos, src, count))
return -EFAULT;
return 0;
}
+static int
+snd_nm256_playback_copy_kernel(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void *src, unsigned long count)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct nm256_stream *s = runtime->private_data;
+
+ memcpy_toio(s->bufptr + pos, src, count);
+ return 0;
+}
+
/*
* copy to user
*/
static int
snd_nm256_capture_copy(struct snd_pcm_substream *substream,
- int channel, /* not used (interleaved data) */
- snd_pcm_uframes_t pos,
- void __user *dst,
- snd_pcm_uframes_t count)
+ int channel, unsigned long pos,
+ void __user *dst, unsigned long count)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct nm256_stream *s = runtime->private_data;
- count = frames_to_bytes(runtime, count);
- pos = frames_to_bytes(runtime, pos);
+
if (copy_to_user_fromio(dst, s->bufptr + pos, count))
return -EFAULT;
return 0;
}
+static int
+snd_nm256_capture_copy_kernel(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void *dst, unsigned long count)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct nm256_stream *s = runtime->private_data;
+
+ memcpy_fromio(dst, s->bufptr + pos, count);
+ return 0;
+}
+
#endif /* !__i386__ */
@@ -911,8 +926,9 @@ static const struct snd_pcm_ops snd_nm256_playback_ops = {
.trigger = snd_nm256_playback_trigger,
.pointer = snd_nm256_playback_pointer,
#ifndef __i386__
- .copy = snd_nm256_playback_copy,
- .silence = snd_nm256_playback_silence,
+ .copy_user = snd_nm256_playback_copy,
+ .copy_kernel = snd_nm256_playback_copy_kernel,
+ .fill_silence = snd_nm256_playback_silence,
#endif
.mmap = snd_pcm_lib_mmap_iomem,
};
@@ -926,7 +942,8 @@ static const struct snd_pcm_ops snd_nm256_capture_ops = {
.trigger = snd_nm256_capture_trigger,
.pointer = snd_nm256_capture_pointer,
#ifndef __i386__
- .copy = snd_nm256_capture_copy,
+ .copy_user = snd_nm256_capture_copy,
+ .copy_kernel = snd_nm256_capture_copy_kernel,
#endif
.mmap = snd_pcm_lib_mmap_iomem,
};
diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
index 96d15db65dfd..e4cdef94e4a2 100644
--- a/sound/pci/rme32.c
+++ b/sound/pci/rme32.c
@@ -254,39 +254,46 @@ static inline unsigned int snd_rme32_pcm_byteptr(struct rme32 * rme32)
}
/* silence callback for halfduplex mode */
-static int snd_rme32_playback_silence(struct snd_pcm_substream *substream, int channel, /* not used (interleaved data) */
- snd_pcm_uframes_t pos,
- snd_pcm_uframes_t count)
+static int snd_rme32_playback_silence(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ unsigned long count)
{
struct rme32 *rme32 = snd_pcm_substream_chip(substream);
- count <<= rme32->playback_frlog;
- pos <<= rme32->playback_frlog;
+
memset_io(rme32->iobase + RME32_IO_DATA_BUFFER + pos, 0, count);
return 0;
}
/* copy callback for halfduplex mode */
-static int snd_rme32_playback_copy(struct snd_pcm_substream *substream, int channel, /* not used (interleaved data) */
- snd_pcm_uframes_t pos,
- void __user *src, snd_pcm_uframes_t count)
+static int snd_rme32_playback_copy(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void __user *src, unsigned long count)
{
struct rme32 *rme32 = snd_pcm_substream_chip(substream);
- count <<= rme32->playback_frlog;
- pos <<= rme32->playback_frlog;
+
if (copy_from_user_toio(rme32->iobase + RME32_IO_DATA_BUFFER + pos,
- src, count))
+ src, count))
return -EFAULT;
return 0;
}
+static int snd_rme32_playback_copy_kernel(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void *src, unsigned long count)
+{
+ struct rme32 *rme32 = snd_pcm_substream_chip(substream);
+
+ memcpy_toio(rme32->iobase + RME32_IO_DATA_BUFFER + pos, src, count);
+ return 0;
+}
+
/* copy callback for halfduplex mode */
-static int snd_rme32_capture_copy(struct snd_pcm_substream *substream, int channel, /* not used (interleaved data) */
- snd_pcm_uframes_t pos,
- void __user *dst, snd_pcm_uframes_t count)
+static int snd_rme32_capture_copy(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void __user *dst, unsigned long count)
{
struct rme32 *rme32 = snd_pcm_substream_chip(substream);
- count <<= rme32->capture_frlog;
- pos <<= rme32->capture_frlog;
+
if (copy_to_user_fromio(dst,
rme32->iobase + RME32_IO_DATA_BUFFER + pos,
count))
@@ -294,6 +301,16 @@ static int snd_rme32_capture_copy(struct snd_pcm_substream *substream, int chann
return 0;
}
+static int snd_rme32_capture_copy_kernel(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void *dst, unsigned long count)
+{
+ struct rme32 *rme32 = snd_pcm_substream_chip(substream);
+
+ memcpy_fromio(dst, rme32->iobase + RME32_IO_DATA_BUFFER + pos, count);
+ return 0;
+}
+
/*
* SPDIF I/O capabilities (half-duplex mode)
*/
@@ -819,10 +836,9 @@ static irqreturn_t snd_rme32_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static unsigned int period_bytes[] = { RME32_BLOCK_SIZE };
-
+static const unsigned int period_bytes[] = { RME32_BLOCK_SIZE };
-static struct snd_pcm_hw_constraint_list hw_constraints_period_bytes = {
+static const struct snd_pcm_hw_constraint_list hw_constraints_period_bytes = {
.count = ARRAY_SIZE(period_bytes),
.list = period_bytes,
.mask = 0
@@ -1157,9 +1173,8 @@ static int snd_rme32_playback_fd_ack(struct snd_pcm_substream *substream)
if (rme32->running & (1 << SNDRV_PCM_STREAM_CAPTURE))
rec->hw_queue_size -= cprec->hw_ready;
spin_unlock(&rme32->lock);
- snd_pcm_indirect_playback_transfer(substream, rec,
- snd_rme32_pb_trans_copy);
- return 0;
+ return snd_pcm_indirect_playback_transfer(substream, rec,
+ snd_rme32_pb_trans_copy);
}
static void snd_rme32_cp_trans_copy(struct snd_pcm_substream *substream,
@@ -1174,9 +1189,8 @@ static void snd_rme32_cp_trans_copy(struct snd_pcm_substream *substream,
static int snd_rme32_capture_fd_ack(struct snd_pcm_substream *substream)
{
struct rme32 *rme32 = snd_pcm_substream_chip(substream);
- snd_pcm_indirect_capture_transfer(substream, &rme32->capture_pcm,
- snd_rme32_cp_trans_copy);
- return 0;
+ return snd_pcm_indirect_capture_transfer(substream, &rme32->capture_pcm,
+ snd_rme32_cp_trans_copy);
}
static snd_pcm_uframes_t
@@ -1205,8 +1219,9 @@ static const struct snd_pcm_ops snd_rme32_playback_spdif_ops = {
.prepare = snd_rme32_playback_prepare,
.trigger = snd_rme32_pcm_trigger,
.pointer = snd_rme32_playback_pointer,
- .copy = snd_rme32_playback_copy,
- .silence = snd_rme32_playback_silence,
+ .copy_user = snd_rme32_playback_copy,
+ .copy_kernel = snd_rme32_playback_copy_kernel,
+ .fill_silence = snd_rme32_playback_silence,
.mmap = snd_pcm_lib_mmap_iomem,
};
@@ -1219,7 +1234,8 @@ static const struct snd_pcm_ops snd_rme32_capture_spdif_ops = {
.prepare = snd_rme32_capture_prepare,
.trigger = snd_rme32_pcm_trigger,
.pointer = snd_rme32_capture_pointer,
- .copy = snd_rme32_capture_copy,
+ .copy_user = snd_rme32_capture_copy,
+ .copy_kernel = snd_rme32_capture_copy_kernel,
.mmap = snd_pcm_lib_mmap_iomem,
};
@@ -1231,8 +1247,9 @@ static const struct snd_pcm_ops snd_rme32_playback_adat_ops = {
.prepare = snd_rme32_playback_prepare,
.trigger = snd_rme32_pcm_trigger,
.pointer = snd_rme32_playback_pointer,
- .copy = snd_rme32_playback_copy,
- .silence = snd_rme32_playback_silence,
+ .copy_user = snd_rme32_playback_copy,
+ .copy_kernel = snd_rme32_playback_copy_kernel,
+ .fill_silence = snd_rme32_playback_silence,
.mmap = snd_pcm_lib_mmap_iomem,
};
@@ -1244,7 +1261,8 @@ static const struct snd_pcm_ops snd_rme32_capture_adat_ops = {
.prepare = snd_rme32_capture_prepare,
.trigger = snd_rme32_pcm_trigger,
.pointer = snd_rme32_capture_pointer,
- .copy = snd_rme32_capture_copy,
+ .copy_user = snd_rme32_capture_copy,
+ .copy_kernel = snd_rme32_capture_copy_kernel,
.mmap = snd_pcm_lib_mmap_iomem,
};
diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c
index 05b9da30990d..2e19ba55e754 100644
--- a/sound/pci/rme96.c
+++ b/sound/pci/rme96.c
@@ -327,13 +327,10 @@ snd_rme96_capture_ptr(struct rme96 *rme96)
static int
snd_rme96_playback_silence(struct snd_pcm_substream *substream,
- int channel, /* not used (interleaved data) */
- snd_pcm_uframes_t pos,
- snd_pcm_uframes_t count)
+ int channel, unsigned long pos, unsigned long count)
{
struct rme96 *rme96 = snd_pcm_substream_chip(substream);
- count <<= rme96->playback_frlog;
- pos <<= rme96->playback_frlog;
+
memset_io(rme96->iobase + RME96_IO_PLAY_BUFFER + pos,
0, count);
return 0;
@@ -341,32 +338,49 @@ snd_rme96_playback_silence(struct snd_pcm_substream *substream,
static int
snd_rme96_playback_copy(struct snd_pcm_substream *substream,
- int channel, /* not used (interleaved data) */
- snd_pcm_uframes_t pos,
- void __user *src,
- snd_pcm_uframes_t count)
+ int channel, unsigned long pos,
+ void __user *src, unsigned long count)
{
struct rme96 *rme96 = snd_pcm_substream_chip(substream);
- count <<= rme96->playback_frlog;
- pos <<= rme96->playback_frlog;
- return copy_from_user_toio(rme96->iobase + RME96_IO_PLAY_BUFFER + pos, src,
- count);
+
+ return copy_from_user_toio(rme96->iobase + RME96_IO_PLAY_BUFFER + pos,
+ src, count);
+}
+
+static int
+snd_rme96_playback_copy_kernel(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void *src, unsigned long count)
+{
+ struct rme96 *rme96 = snd_pcm_substream_chip(substream);
+
+ memcpy_toio(rme96->iobase + RME96_IO_PLAY_BUFFER + pos, src, count);
+ return 0;
}
static int
snd_rme96_capture_copy(struct snd_pcm_substream *substream,
- int channel, /* not used (interleaved data) */
- snd_pcm_uframes_t pos,
- void __user *dst,
- snd_pcm_uframes_t count)
+ int channel, unsigned long pos,
+ void __user *dst, unsigned long count)
{
struct rme96 *rme96 = snd_pcm_substream_chip(substream);
- count <<= rme96->capture_frlog;
- pos <<= rme96->capture_frlog;
- return copy_to_user_fromio(dst, rme96->iobase + RME96_IO_REC_BUFFER + pos,
+
+ return copy_to_user_fromio(dst,
+ rme96->iobase + RME96_IO_REC_BUFFER + pos,
count);
}
+static int
+snd_rme96_capture_copy_kernel(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void *dst, unsigned long count)
+{
+ struct rme96 *rme96 = snd_pcm_substream_chip(substream);
+
+ memcpy_fromio(dst, rme96->iobase + RME96_IO_REC_BUFFER + pos, count);
+ return 0;
+}
+
/*
* Digital output capabilities (S/PDIF)
*/
@@ -1149,9 +1163,9 @@ snd_rme96_interrupt(int irq,
return IRQ_HANDLED;
}
-static unsigned int period_bytes[] = { RME96_SMALL_BLOCK_SIZE, RME96_LARGE_BLOCK_SIZE };
+static const unsigned int period_bytes[] = { RME96_SMALL_BLOCK_SIZE, RME96_LARGE_BLOCK_SIZE };
-static struct snd_pcm_hw_constraint_list hw_constraints_period_bytes = {
+static const struct snd_pcm_hw_constraint_list hw_constraints_period_bytes = {
.count = ARRAY_SIZE(period_bytes),
.list = period_bytes,
.mask = 0
@@ -1513,8 +1527,9 @@ static const struct snd_pcm_ops snd_rme96_playback_spdif_ops = {
.prepare = snd_rme96_playback_prepare,
.trigger = snd_rme96_playback_trigger,
.pointer = snd_rme96_playback_pointer,
- .copy = snd_rme96_playback_copy,
- .silence = snd_rme96_playback_silence,
+ .copy_user = snd_rme96_playback_copy,
+ .copy_kernel = snd_rme96_playback_copy_kernel,
+ .fill_silence = snd_rme96_playback_silence,
.mmap = snd_pcm_lib_mmap_iomem,
};
@@ -1526,7 +1541,8 @@ static const struct snd_pcm_ops snd_rme96_capture_spdif_ops = {
.prepare = snd_rme96_capture_prepare,
.trigger = snd_rme96_capture_trigger,
.pointer = snd_rme96_capture_pointer,
- .copy = snd_rme96_capture_copy,
+ .copy_user = snd_rme96_capture_copy,
+ .copy_kernel = snd_rme96_capture_copy_kernel,
.mmap = snd_pcm_lib_mmap_iomem,
};
@@ -1538,8 +1554,9 @@ static const struct snd_pcm_ops snd_rme96_playback_adat_ops = {
.prepare = snd_rme96_playback_prepare,
.trigger = snd_rme96_playback_trigger,
.pointer = snd_rme96_playback_pointer,
- .copy = snd_rme96_playback_copy,
- .silence = snd_rme96_playback_silence,
+ .copy_user = snd_rme96_playback_copy,
+ .copy_kernel = snd_rme96_playback_copy_kernel,
+ .fill_silence = snd_rme96_playback_silence,
.mmap = snd_pcm_lib_mmap_iomem,
};
@@ -1551,7 +1568,8 @@ static const struct snd_pcm_ops snd_rme96_capture_adat_ops = {
.prepare = snd_rme96_capture_prepare,
.trigger = snd_rme96_capture_trigger,
.pointer = snd_rme96_capture_pointer,
- .copy = snd_rme96_capture_copy,
+ .copy_user = snd_rme96_capture_copy,
+ .copy_kernel = snd_rme96_capture_copy_kernel,
.mmap = snd_pcm_lib_mmap_iomem,
};
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index fc0face6cdc6..fe36d44d16c6 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -3913,42 +3913,73 @@ static char *hdsp_channel_buffer_location(struct hdsp *hdsp,
return hdsp->playback_buffer + (mapped_channel * HDSP_CHANNEL_BUFFER_BYTES);
}
-static int snd_hdsp_playback_copy(struct snd_pcm_substream *substream, int channel,
- snd_pcm_uframes_t pos, void __user *src, snd_pcm_uframes_t count)
+static int snd_hdsp_playback_copy(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void __user *src, unsigned long count)
{
struct hdsp *hdsp = snd_pcm_substream_chip(substream);
char *channel_buf;
- if (snd_BUG_ON(pos + count > HDSP_CHANNEL_BUFFER_BYTES / 4))
+ if (snd_BUG_ON(pos + count > HDSP_CHANNEL_BUFFER_BYTES))
return -EINVAL;
channel_buf = hdsp_channel_buffer_location (hdsp, substream->pstr->stream, channel);
if (snd_BUG_ON(!channel_buf))
return -EIO;
- if (copy_from_user(channel_buf + pos * 4, src, count * 4))
+ if (copy_from_user(channel_buf + pos, src, count))
return -EFAULT;
- return count;
+ return 0;
}
-static int snd_hdsp_capture_copy(struct snd_pcm_substream *substream, int channel,
- snd_pcm_uframes_t pos, void __user *dst, snd_pcm_uframes_t count)
+static int snd_hdsp_playback_copy_kernel(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void *src, unsigned long count)
{
struct hdsp *hdsp = snd_pcm_substream_chip(substream);
char *channel_buf;
- if (snd_BUG_ON(pos + count > HDSP_CHANNEL_BUFFER_BYTES / 4))
+ channel_buf = hdsp_channel_buffer_location(hdsp, substream->pstr->stream, channel);
+ if (snd_BUG_ON(!channel_buf))
+ return -EIO;
+ memcpy(channel_buf + pos, src, count);
+ return 0;
+}
+
+static int snd_hdsp_capture_copy(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void __user *dst, unsigned long count)
+{
+ struct hdsp *hdsp = snd_pcm_substream_chip(substream);
+ char *channel_buf;
+
+ if (snd_BUG_ON(pos + count > HDSP_CHANNEL_BUFFER_BYTES))
return -EINVAL;
channel_buf = hdsp_channel_buffer_location (hdsp, substream->pstr->stream, channel);
if (snd_BUG_ON(!channel_buf))
return -EIO;
- if (copy_to_user(dst, channel_buf + pos * 4, count * 4))
+ if (copy_to_user(dst, channel_buf + pos, count))
return -EFAULT;
- return count;
+ return 0;
}
-static int snd_hdsp_hw_silence(struct snd_pcm_substream *substream, int channel,
- snd_pcm_uframes_t pos, snd_pcm_uframes_t count)
+static int snd_hdsp_capture_copy_kernel(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void *dst, unsigned long count)
+{
+ struct hdsp *hdsp = snd_pcm_substream_chip(substream);
+ char *channel_buf;
+
+ channel_buf = hdsp_channel_buffer_location(hdsp, substream->pstr->stream, channel);
+ if (snd_BUG_ON(!channel_buf))
+ return -EIO;
+ memcpy(dst, channel_buf + pos, count);
+ return 0;
+}
+
+static int snd_hdsp_hw_silence(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ unsigned long count)
{
struct hdsp *hdsp = snd_pcm_substream_chip(substream);
char *channel_buf;
@@ -3956,8 +3987,8 @@ static int snd_hdsp_hw_silence(struct snd_pcm_substream *substream, int channel,
channel_buf = hdsp_channel_buffer_location (hdsp, substream->pstr->stream, channel);
if (snd_BUG_ON(!channel_buf))
return -EIO;
- memset(channel_buf + pos * 4, 0, count * 4);
- return count;
+ memset(channel_buf + pos, 0, count);
+ return 0;
}
static int snd_hdsp_reset(struct snd_pcm_substream *substream)
@@ -4239,17 +4270,17 @@ static struct snd_pcm_hardware snd_hdsp_capture_subinfo =
.fifo_size = 0
};
-static unsigned int hdsp_period_sizes[] = { 64, 128, 256, 512, 1024, 2048, 4096, 8192 };
+static const unsigned int hdsp_period_sizes[] = { 64, 128, 256, 512, 1024, 2048, 4096, 8192 };
-static struct snd_pcm_hw_constraint_list hdsp_hw_constraints_period_sizes = {
+static const struct snd_pcm_hw_constraint_list hdsp_hw_constraints_period_sizes = {
.count = ARRAY_SIZE(hdsp_period_sizes),
.list = hdsp_period_sizes,
.mask = 0
};
-static unsigned int hdsp_9632_sample_rates[] = { 32000, 44100, 48000, 64000, 88200, 96000, 128000, 176400, 192000 };
+static const unsigned int hdsp_9632_sample_rates[] = { 32000, 44100, 48000, 64000, 88200, 96000, 128000, 176400, 192000 };
-static struct snd_pcm_hw_constraint_list hdsp_hw_constraints_9632_sample_rates = {
+static const struct snd_pcm_hw_constraint_list hdsp_hw_constraints_9632_sample_rates = {
.count = ARRAY_SIZE(hdsp_9632_sample_rates),
.list = hdsp_9632_sample_rates,
.mask = 0
@@ -4869,8 +4900,9 @@ static const struct snd_pcm_ops snd_hdsp_playback_ops = {
.prepare = snd_hdsp_prepare,
.trigger = snd_hdsp_trigger,
.pointer = snd_hdsp_hw_pointer,
- .copy = snd_hdsp_playback_copy,
- .silence = snd_hdsp_hw_silence,
+ .copy_user = snd_hdsp_playback_copy,
+ .copy_kernel = snd_hdsp_playback_copy_kernel,
+ .fill_silence = snd_hdsp_hw_silence,
};
static const struct snd_pcm_ops snd_hdsp_capture_ops = {
@@ -4881,7 +4913,8 @@ static const struct snd_pcm_ops snd_hdsp_capture_ops = {
.prepare = snd_hdsp_prepare,
.trigger = snd_hdsp_trigger,
.pointer = snd_hdsp_hw_pointer,
- .copy = snd_hdsp_capture_copy,
+ .copy_user = snd_hdsp_capture_copy,
+ .copy_kernel = snd_hdsp_capture_copy_kernel,
};
static int snd_hdsp_create_hwdep(struct snd_card *card, struct hdsp *hdsp)
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index c48acdb0e186..254c3d040118 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -6040,11 +6040,11 @@ static int snd_hdspm_hw_rule_out_channels(struct snd_pcm_hw_params *params,
}
-static unsigned int hdspm_aes32_sample_rates[] = {
+static const unsigned int hdspm_aes32_sample_rates[] = {
32000, 44100, 48000, 64000, 88200, 96000, 128000, 176400, 192000
};
-static struct snd_pcm_hw_constraint_list
+static const struct snd_pcm_hw_constraint_list
hdspm_hw_constraints_aes32_sample_rates = {
.count = ARRAY_SIZE(hdspm_aes32_sample_rates),
.list = hdspm_aes32_sample_rates,
diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
index 55172c689991..150d08898db8 100644
--- a/sound/pci/rme9652/rme9652.c
+++ b/sound/pci/rme9652/rme9652.c
@@ -1883,13 +1883,14 @@ static char *rme9652_channel_buffer_location(struct snd_rme9652 *rme9652,
}
}
-static int snd_rme9652_playback_copy(struct snd_pcm_substream *substream, int channel,
- snd_pcm_uframes_t pos, void __user *src, snd_pcm_uframes_t count)
+static int snd_rme9652_playback_copy(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void __user *src, unsigned long count)
{
struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream);
char *channel_buf;
- if (snd_BUG_ON(pos + count > RME9652_CHANNEL_BUFFER_BYTES / 4))
+ if (snd_BUG_ON(pos + count > RME9652_CHANNEL_BUFFER_BYTES))
return -EINVAL;
channel_buf = rme9652_channel_buffer_location (rme9652,
@@ -1897,18 +1898,35 @@ static int snd_rme9652_playback_copy(struct snd_pcm_substream *substream, int ch
channel);
if (snd_BUG_ON(!channel_buf))
return -EIO;
- if (copy_from_user(channel_buf + pos * 4, src, count * 4))
+ if (copy_from_user(channel_buf + pos, src, count))
return -EFAULT;
- return count;
+ return 0;
}
-static int snd_rme9652_capture_copy(struct snd_pcm_substream *substream, int channel,
- snd_pcm_uframes_t pos, void __user *dst, snd_pcm_uframes_t count)
+static int snd_rme9652_playback_copy_kernel(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void *src, unsigned long count)
{
struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream);
char *channel_buf;
- if (snd_BUG_ON(pos + count > RME9652_CHANNEL_BUFFER_BYTES / 4))
+ channel_buf = rme9652_channel_buffer_location(rme9652,
+ substream->pstr->stream,
+ channel);
+ if (snd_BUG_ON(!channel_buf))
+ return -EIO;
+ memcpy(channel_buf + pos, src, count);
+ return 0;
+}
+
+static int snd_rme9652_capture_copy(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void __user *dst, unsigned long count)
+{
+ struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream);
+ char *channel_buf;
+
+ if (snd_BUG_ON(pos + count > RME9652_CHANNEL_BUFFER_BYTES))
return -EINVAL;
channel_buf = rme9652_channel_buffer_location (rme9652,
@@ -1916,13 +1934,30 @@ static int snd_rme9652_capture_copy(struct snd_pcm_substream *substream, int cha
channel);
if (snd_BUG_ON(!channel_buf))
return -EIO;
- if (copy_to_user(dst, channel_buf + pos * 4, count * 4))
+ if (copy_to_user(dst, channel_buf + pos, count))
return -EFAULT;
- return count;
+ return 0;
}
-static int snd_rme9652_hw_silence(struct snd_pcm_substream *substream, int channel,
- snd_pcm_uframes_t pos, snd_pcm_uframes_t count)
+static int snd_rme9652_capture_copy_kernel(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void *dst, unsigned long count)
+{
+ struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream);
+ char *channel_buf;
+
+ channel_buf = rme9652_channel_buffer_location(rme9652,
+ substream->pstr->stream,
+ channel);
+ if (snd_BUG_ON(!channel_buf))
+ return -EIO;
+ memcpy(dst, channel_buf + pos, count);
+ return 0;
+}
+
+static int snd_rme9652_hw_silence(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ unsigned long count)
{
struct snd_rme9652 *rme9652 = snd_pcm_substream_chip(substream);
char *channel_buf;
@@ -1932,8 +1967,8 @@ static int snd_rme9652_hw_silence(struct snd_pcm_substream *substream, int chann
channel);
if (snd_BUG_ON(!channel_buf))
return -EIO;
- memset(channel_buf + pos * 4, 0, count * 4);
- return count;
+ memset(channel_buf + pos, 0, count);
+ return 0;
}
static int snd_rme9652_reset(struct snd_pcm_substream *substream)
@@ -2193,9 +2228,9 @@ static struct snd_pcm_hardware snd_rme9652_capture_subinfo =
.fifo_size = 0,
};
-static unsigned int period_sizes[] = { 64, 128, 256, 512, 1024, 2048, 4096, 8192 };
+static const unsigned int period_sizes[] = { 64, 128, 256, 512, 1024, 2048, 4096, 8192 };
-static struct snd_pcm_hw_constraint_list hw_constraints_period_sizes = {
+static const struct snd_pcm_hw_constraint_list hw_constraints_period_sizes = {
.count = ARRAY_SIZE(period_sizes),
.list = period_sizes,
.mask = 0
@@ -2376,8 +2411,9 @@ static const struct snd_pcm_ops snd_rme9652_playback_ops = {
.prepare = snd_rme9652_prepare,
.trigger = snd_rme9652_trigger,
.pointer = snd_rme9652_hw_pointer,
- .copy = snd_rme9652_playback_copy,
- .silence = snd_rme9652_hw_silence,
+ .copy_user = snd_rme9652_playback_copy,
+ .copy_kernel = snd_rme9652_playback_copy_kernel,
+ .fill_silence = snd_rme9652_hw_silence,
};
static const struct snd_pcm_ops snd_rme9652_capture_ops = {
@@ -2388,7 +2424,8 @@ static const struct snd_pcm_ops snd_rme9652_capture_ops = {
.prepare = snd_rme9652_prepare,
.trigger = snd_rme9652_trigger,
.pointer = snd_rme9652_hw_pointer,
- .copy = snd_rme9652_capture_copy,
+ .copy_user = snd_rme9652_capture_copy,
+ .copy_kernel = snd_rme9652_capture_copy_kernel,
};
static int snd_rme9652_create_pcm(struct snd_card *card,
diff --git a/sound/pci/sonicvibes.c b/sound/pci/sonicvibes.c
index 8e3d4ec39c35..784d762f18a7 100644
--- a/sound/pci/sonicvibes.c
+++ b/sound/pci/sonicvibes.c
@@ -248,13 +248,13 @@ static const struct pci_device_id snd_sonic_ids[] = {
MODULE_DEVICE_TABLE(pci, snd_sonic_ids);
-static struct snd_ratden sonicvibes_adc_clock = {
+static const struct snd_ratden sonicvibes_adc_clock = {
.num_min = 4000 * 65536,
.num_max = 48000UL * 65536,
.num_step = 1,
.den = 65536,
};
-static struct snd_pcm_hw_constraint_ratdens snd_sonicvibes_hw_constraints_adc_clock = {
+static const struct snd_pcm_hw_constraint_ratdens snd_sonicvibes_hw_constraints_adc_clock = {
.nrats = 1,
.rats = &sonicvibes_adc_clock,
};
diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c
index b6c84d15b10b..c767b8664359 100644
--- a/sound/pci/via82xx.c
+++ b/sound/pci/via82xx.c
@@ -1286,10 +1286,10 @@ static int snd_via8233_multi_open(struct snd_pcm_substream *substream)
/* channels constraint for VIA8233A
* 3 and 5 channels are not supported
*/
- static unsigned int channels[] = {
+ static const unsigned int channels[] = {
1, 2, 4, 6
};
- static struct snd_pcm_hw_constraint_list hw_constraints_channels = {
+ static const struct snd_pcm_hw_constraint_list hw_constraints_channels = {
.count = ARRAY_SIZE(channels),
.list = channels,
.mask = 0,
diff --git a/sound/pci/via82xx_modem.c b/sound/pci/via82xx_modem.c
index 2f6d40f10618..55f79b2599e7 100644
--- a/sound/pci/via82xx_modem.c
+++ b/sound/pci/via82xx_modem.c
@@ -744,8 +744,8 @@ static int snd_via82xx_modem_pcm_open(struct via82xx_modem *chip, struct viadev
{
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
- static unsigned int rates[] = { 8000, 9600, 12000, 16000 };
- static struct snd_pcm_hw_constraint_list hw_constraints_rates = {
+ static const unsigned int rates[] = { 8000, 9600, 12000, 16000 };
+ static const struct snd_pcm_hw_constraint_list hw_constraints_rates = {
.count = ARRAY_SIZE(rates),
.list = rates,
.mask = 0,
diff --git a/sound/pci/vx222/vx222.c b/sound/pci/vx222/vx222.c
index ecbaf473fc1e..55861849d7df 100644
--- a/sound/pci/vx222/vx222.c
+++ b/sound/pci/vx222/vx222.c
@@ -116,7 +116,7 @@ static struct snd_vx_hardware vx222_mic_hw = {
*/
static int snd_vx222_free(struct vx_core *chip)
{
- struct snd_vx222 *vx = (struct snd_vx222 *)chip;
+ struct snd_vx222 *vx = to_vx222(chip);
if (chip->irq >= 0)
free_irq(chip->irq, (void*)chip);
@@ -158,7 +158,7 @@ static int snd_vx222_create(struct snd_card *card, struct pci_dev *pci,
pci_disable_device(pci);
return -ENOMEM;
}
- vx = (struct snd_vx222 *)chip;
+ vx = to_vx222(chip);
vx->pci = pci;
if ((err = pci_request_regions(pci, CARD_NAME)) < 0) {
diff --git a/sound/pci/vx222/vx222.h b/sound/pci/vx222/vx222.h
index 2f0d78f609a6..cae355c8ed28 100644
--- a/sound/pci/vx222/vx222.h
+++ b/sound/pci/vx222/vx222.h
@@ -39,6 +39,8 @@ struct snd_vx222 {
int mic_level; /* mic level for vx222 mic */
};
+#define to_vx222(x) container_of(x, struct snd_vx222, core)
+
/* we use a lookup table with 148 values, see vx_mixer.c */
#define VX2_AKM_LEVEL_MAX 0x93
diff --git a/sound/pci/vx222/vx222_ops.c b/sound/pci/vx222/vx222_ops.c
index 7df1663ea510..d4298af6d3ee 100644
--- a/sound/pci/vx222/vx222_ops.c
+++ b/sound/pci/vx222/vx222_ops.c
@@ -86,7 +86,7 @@ static int vx2_reg_index[VX_REG_MAX] = {
static inline unsigned long vx2_reg_addr(struct vx_core *_chip, int reg)
{
- struct snd_vx222 *chip = (struct snd_vx222 *)_chip;
+ struct snd_vx222 *chip = to_vx222(_chip);
return chip->port[vx2_reg_index[reg]] + vx2_reg_offset[reg];
}
@@ -159,7 +159,7 @@ static void vx2_outl(struct vx_core *chip, int offset, unsigned int val)
static void vx2_reset_dsp(struct vx_core *_chip)
{
- struct snd_vx222 *chip = (struct snd_vx222 *)_chip;
+ struct snd_vx222 *chip = to_vx222(_chip);
/* set the reset dsp bit to 0 */
vx_outl(chip, CDSP, chip->regCDSP & ~VX_CDSP_DSP_RESET_MASK);
@@ -174,7 +174,7 @@ static void vx2_reset_dsp(struct vx_core *_chip)
static int vx2_test_xilinx(struct vx_core *_chip)
{
- struct snd_vx222 *chip = (struct snd_vx222 *)_chip;
+ struct snd_vx222 *chip = to_vx222(_chip);
unsigned int data;
dev_dbg(_chip->card->dev, "testing xilinx...\n");
@@ -479,7 +479,7 @@ static int vx2_test_and_ack(struct vx_core *chip)
*/
static void vx2_validate_irq(struct vx_core *_chip, int enable)
{
- struct snd_vx222 *chip = (struct snd_vx222 *)_chip;
+ struct snd_vx222 *chip = to_vx222(_chip);
/* Set the interrupt enable bit to 1 in CDSP register */
if (enable) {
@@ -730,7 +730,7 @@ static void vx2_old_write_codec_bit(struct vx_core *chip, int codec, unsigned in
*/
static void vx2_reset_codec(struct vx_core *_chip)
{
- struct snd_vx222 *chip = (struct snd_vx222 *)_chip;
+ struct snd_vx222 *chip = to_vx222(_chip);
/* Set the reset CODEC bit to 0. */
vx_outl(chip, CDSP, chip->regCDSP &~ VX_CDSP_CODEC_RESET_MASK);
@@ -772,7 +772,7 @@ static void vx2_reset_codec(struct vx_core *_chip)
*/
static void vx2_change_audio_source(struct vx_core *_chip, int src)
{
- struct snd_vx222 *chip = (struct snd_vx222 *)_chip;
+ struct snd_vx222 *chip = to_vx222(_chip);
switch (src) {
case VX_AUDIO_SRC_DIGITAL:
@@ -791,7 +791,7 @@ static void vx2_change_audio_source(struct vx_core *_chip, int src)
*/
static void vx2_set_clock_source(struct vx_core *_chip, int source)
{
- struct snd_vx222 *chip = (struct snd_vx222 *)_chip;
+ struct snd_vx222 *chip = to_vx222(_chip);
if (source == INTERNAL_QUARTZ)
chip->regCFG &= ~VX_CFG_CLOCKIN_SEL_MASK;
@@ -805,7 +805,7 @@ static void vx2_set_clock_source(struct vx_core *_chip, int source)
*/
static void vx2_reset_board(struct vx_core *_chip, int cold_reset)
{
- struct snd_vx222 *chip = (struct snd_vx222 *)_chip;
+ struct snd_vx222 *chip = to_vx222(_chip);
/* initialize the register values */
chip->regCDSP = VX_CDSP_CODEC_RESET_MASK | VX_CDSP_DSP_RESET_MASK ;
@@ -878,7 +878,7 @@ static int vx_input_level_info(struct snd_kcontrol *kcontrol, struct snd_ctl_ele
static int vx_input_level_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct vx_core *_chip = snd_kcontrol_chip(kcontrol);
- struct snd_vx222 *chip = (struct snd_vx222 *)_chip;
+ struct snd_vx222 *chip = to_vx222(_chip);
mutex_lock(&_chip->mixer_mutex);
ucontrol->value.integer.value[0] = chip->input_level[0];
ucontrol->value.integer.value[1] = chip->input_level[1];
@@ -889,7 +889,7 @@ static int vx_input_level_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem
static int vx_input_level_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct vx_core *_chip = snd_kcontrol_chip(kcontrol);
- struct snd_vx222 *chip = (struct snd_vx222 *)_chip;
+ struct snd_vx222 *chip = to_vx222(_chip);
if (ucontrol->value.integer.value[0] < 0 ||
ucontrol->value.integer.value[0] > MIC_LEVEL_MAX)
return -EINVAL;
@@ -922,7 +922,7 @@ static int vx_mic_level_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_
static int vx_mic_level_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct vx_core *_chip = snd_kcontrol_chip(kcontrol);
- struct snd_vx222 *chip = (struct snd_vx222 *)_chip;
+ struct snd_vx222 *chip = to_vx222(_chip);
ucontrol->value.integer.value[0] = chip->mic_level;
return 0;
}
@@ -930,7 +930,7 @@ static int vx_mic_level_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_v
static int vx_mic_level_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct vx_core *_chip = snd_kcontrol_chip(kcontrol);
- struct snd_vx222 *chip = (struct snd_vx222 *)_chip;
+ struct snd_vx222 *chip = to_vx222(_chip);
if (ucontrol->value.integer.value[0] < 0 ||
ucontrol->value.integer.value[0] > MIC_LEVEL_MAX)
return -EINVAL;
@@ -973,7 +973,7 @@ static const struct snd_kcontrol_new vx_control_mic_level = {
static int vx2_add_mic_controls(struct vx_core *_chip)
{
- struct snd_vx222 *chip = (struct snd_vx222 *)_chip;
+ struct snd_vx222 *chip = to_vx222(_chip);
int err;
if (_chip->type != VX_TYPE_MIC)
diff --git a/sound/pcmcia/vx/vxp_mixer.c b/sound/pcmcia/vx/vxp_mixer.c
index a4a664259f0d..304b153005a5 100644
--- a/sound/pcmcia/vx/vxp_mixer.c
+++ b/sound/pcmcia/vx/vxp_mixer.c
@@ -43,7 +43,7 @@ static int vx_mic_level_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_
static int vx_mic_level_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct vx_core *_chip = snd_kcontrol_chip(kcontrol);
- struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip;
+ struct snd_vxpocket *chip = to_vxpocket(_chip);
ucontrol->value.integer.value[0] = chip->mic_level;
return 0;
}
@@ -51,7 +51,7 @@ static int vx_mic_level_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_v
static int vx_mic_level_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct vx_core *_chip = snd_kcontrol_chip(kcontrol);
- struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip;
+ struct snd_vxpocket *chip = to_vxpocket(_chip);
unsigned int val = ucontrol->value.integer.value[0];
if (val > MIC_LEVEL_MAX)
@@ -69,7 +69,7 @@ static int vx_mic_level_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_v
static const DECLARE_TLV_DB_SCALE(db_scale_mic, -21, 3, 0);
-static struct snd_kcontrol_new vx_control_mic_level = {
+static const struct snd_kcontrol_new vx_control_mic_level = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = (SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ),
@@ -88,7 +88,7 @@ static struct snd_kcontrol_new vx_control_mic_level = {
static int vx_mic_boost_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct vx_core *_chip = snd_kcontrol_chip(kcontrol);
- struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip;
+ struct snd_vxpocket *chip = to_vxpocket(_chip);
ucontrol->value.integer.value[0] = chip->mic_level;
return 0;
}
@@ -96,7 +96,7 @@ static int vx_mic_boost_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_v
static int vx_mic_boost_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct vx_core *_chip = snd_kcontrol_chip(kcontrol);
- struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip;
+ struct snd_vxpocket *chip = to_vxpocket(_chip);
int val = !!ucontrol->value.integer.value[0];
mutex_lock(&_chip->mixer_mutex);
if (chip->mic_level != val) {
@@ -109,7 +109,7 @@ static int vx_mic_boost_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_v
return 0;
}
-static struct snd_kcontrol_new vx_control_mic_boost = {
+static const struct snd_kcontrol_new vx_control_mic_boost = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Mic Boost",
.info = vx_mic_boost_info,
@@ -120,7 +120,7 @@ static struct snd_kcontrol_new vx_control_mic_boost = {
int vxp_add_mic_controls(struct vx_core *_chip)
{
- struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip;
+ struct snd_vxpocket *chip = to_vxpocket(_chip);
int err;
/* mute input levels */
diff --git a/sound/pcmcia/vx/vxp_ops.c b/sound/pcmcia/vx/vxp_ops.c
index 5f97791f00d7..8cde40226355 100644
--- a/sound/pcmcia/vx/vxp_ops.c
+++ b/sound/pcmcia/vx/vxp_ops.c
@@ -50,7 +50,7 @@ static int vxp_reg_offset[VX_REG_MAX] = {
static inline unsigned long vxp_reg_addr(struct vx_core *_chip, int reg)
{
- struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip;
+ struct snd_vxpocket *chip = to_vxpocket(_chip);
return chip->port + vxp_reg_offset[reg];
}
@@ -110,7 +110,7 @@ static int vx_check_magic(struct vx_core *chip)
static void vxp_reset_dsp(struct vx_core *_chip)
{
- struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip;
+ struct snd_vxpocket *chip = to_vxpocket(_chip);
/* set the reset dsp bit to 1 */
vx_outb(chip, CDSP, chip->regCDSP | VXP_CDSP_DSP_RESET_MASK);
@@ -128,7 +128,7 @@ static void vxp_reset_dsp(struct vx_core *_chip)
*/
static void vxp_reset_codec(struct vx_core *_chip)
{
- struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip;
+ struct snd_vxpocket *chip = to_vxpocket(_chip);
/* Set the reset CODEC bit to 1. */
vx_outb(chip, CDSP, chip->regCDSP | VXP_CDSP_CODEC_RESET_MASK);
@@ -147,7 +147,7 @@ static void vxp_reset_codec(struct vx_core *_chip)
*/
static int vxp_load_xilinx_binary(struct vx_core *_chip, const struct firmware *fw)
{
- struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip;
+ struct snd_vxpocket *chip = to_vxpocket(_chip);
unsigned int i;
int c;
int regCSUER, regRUER;
@@ -280,7 +280,7 @@ static int vxp_load_dsp(struct vx_core *vx, int index, const struct firmware *fw
*/
static int vxp_test_and_ack(struct vx_core *_chip)
{
- struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip;
+ struct snd_vxpocket *chip = to_vxpocket(_chip);
/* not booted yet? */
if (! (_chip->chip_status & VX_STAT_XILINX_LOADED))
@@ -307,7 +307,7 @@ static int vxp_test_and_ack(struct vx_core *_chip)
*/
static void vxp_validate_irq(struct vx_core *_chip, int enable)
{
- struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip;
+ struct snd_vxpocket *chip = to_vxpocket(_chip);
/* Set the interrupt enable bit to 1 in CDSP register */
if (enable)
@@ -323,7 +323,7 @@ static void vxp_validate_irq(struct vx_core *_chip, int enable)
*/
static void vx_setup_pseudo_dma(struct vx_core *_chip, int do_write)
{
- struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip;
+ struct snd_vxpocket *chip = to_vxpocket(_chip);
/* Interrupt mode and HREQ pin enabled for host transmit / receive data transfers */
vx_outb(chip, ICR, do_write ? ICR_TREQ : ICR_RREQ);
@@ -343,7 +343,7 @@ static void vx_setup_pseudo_dma(struct vx_core *_chip, int do_write)
*/
static void vx_release_pseudo_dma(struct vx_core *_chip)
{
- struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip;
+ struct snd_vxpocket *chip = to_vxpocket(_chip);
/* Disable DMA and 16-bit accesses */
chip->regDIALOG &= ~(VXP_DLG_DMAWRITE_SEL_MASK|
@@ -403,7 +403,7 @@ static void vxp_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime,
static void vxp_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime,
struct vx_pipe *pipe, int count)
{
- struct snd_vxpocket *pchip = (struct snd_vxpocket *)chip;
+ struct snd_vxpocket *pchip = to_vxpocket(chip);
long port = vxp_reg_addr(chip, VX_DMA);
int offset = pipe->hw_ptr;
unsigned short *addr = (unsigned short *)(runtime->dma_area + offset);
@@ -467,7 +467,7 @@ static void vxp_write_codec_reg(struct vx_core *chip, int codec, unsigned int da
*/
void vx_set_mic_boost(struct vx_core *chip, int boost)
{
- struct snd_vxpocket *pchip = (struct snd_vxpocket *)chip;
+ struct snd_vxpocket *pchip = to_vxpocket(chip);
if (chip->chip_status & VX_STAT_IS_STALE)
return;
@@ -509,7 +509,7 @@ static int vx_compute_mic_level(int level)
*/
void vx_set_mic_level(struct vx_core *chip, int level)
{
- struct snd_vxpocket *pchip = (struct snd_vxpocket *)chip;
+ struct snd_vxpocket *pchip = to_vxpocket(chip);
if (chip->chip_status & VX_STAT_IS_STALE)
return;
@@ -528,7 +528,7 @@ void vx_set_mic_level(struct vx_core *chip, int level)
*/
static void vxp_change_audio_source(struct vx_core *_chip, int src)
{
- struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip;
+ struct snd_vxpocket *chip = to_vxpocket(_chip);
switch (src) {
case VX_AUDIO_SRC_DIGITAL:
@@ -568,7 +568,7 @@ static void vxp_change_audio_source(struct vx_core *_chip, int src)
*/
static void vxp_set_clock_source(struct vx_core *_chip, int source)
{
- struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip;
+ struct snd_vxpocket *chip = to_vxpocket(_chip);
if (source == INTERNAL_QUARTZ)
chip->regCDSP &= ~VXP_CDSP_CLOCKIN_SEL_MASK;
@@ -583,7 +583,7 @@ static void vxp_set_clock_source(struct vx_core *_chip, int source)
*/
static void vxp_reset_board(struct vx_core *_chip, int cold_reset)
{
- struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip;
+ struct snd_vxpocket *chip = to_vxpocket(_chip);
chip->regCDSP = 0;
chip->regDIALOG = 0;
diff --git a/sound/pcmcia/vx/vxpocket.c b/sound/pcmcia/vx/vxpocket.c
index b16f42deed67..ca0d19e723fd 100644
--- a/sound/pcmcia/vx/vxpocket.c
+++ b/sound/pcmcia/vx/vxpocket.c
@@ -155,7 +155,7 @@ static int snd_vxpocket_new(struct snd_card *card, int ibl,
}
chip->ibl.size = ibl;
- vxp = (struct snd_vxpocket *)chip;
+ vxp = to_vxpocket(chip);
vxp->p_dev = link;
link->priv = chip;
@@ -187,7 +187,7 @@ static int snd_vxpocket_assign_resources(struct vx_core *chip, int port, int irq
{
int err;
struct snd_card *card = chip->card;
- struct snd_vxpocket *vxp = (struct snd_vxpocket *)chip;
+ struct snd_vxpocket *vxp = to_vxpocket(chip);
snd_printdd(KERN_DEBUG "vxpocket assign resources: port = 0x%x, irq = %d\n", port, irq);
vxp->port = port;
diff --git a/sound/pcmcia/vx/vxpocket.h b/sound/pcmcia/vx/vxpocket.h
index 13d658c1a216..26f4255e132e 100644
--- a/sound/pcmcia/vx/vxpocket.h
+++ b/sound/pcmcia/vx/vxpocket.h
@@ -43,6 +43,8 @@ struct snd_vxpocket {
struct pcmcia_device *p_dev;
};
+#define to_vxpocket(x) container_of(x, struct snd_vxpocket, core)
+
extern struct snd_vx_ops snd_vxpocket_ops;
void vx_set_mic_boost(struct vx_core *chip, int boost);
diff --git a/sound/ppc/awacs.c b/sound/ppc/awacs.c
index 1468e4b7bf93..d1e4ef1c5c30 100644
--- a/sound/ppc/awacs.c
+++ b/sound/ppc/awacs.c
@@ -514,7 +514,7 @@ static struct snd_kcontrol_new snd_pmac_awacs_amp_vol[] = {
},
};
-static struct snd_kcontrol_new snd_pmac_awacs_amp_hp_sw = {
+static const struct snd_kcontrol_new snd_pmac_awacs_amp_hp_sw = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Headphone Playback Switch",
.info = snd_pmac_boolean_stereo_info,
@@ -523,7 +523,7 @@ static struct snd_kcontrol_new snd_pmac_awacs_amp_hp_sw = {
.private_value = AMP_CH_HD,
};
-static struct snd_kcontrol_new snd_pmac_awacs_amp_spk_sw = {
+static const struct snd_kcontrol_new snd_pmac_awacs_amp_spk_sw = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Speaker Playback Switch",
.info = snd_pmac_boolean_stereo_info,
diff --git a/sound/ppc/beep.c b/sound/ppc/beep.c
index d3524f9fa05d..f19eb3e39937 100644
--- a/sound/ppc/beep.c
+++ b/sound/ppc/beep.c
@@ -206,7 +206,7 @@ static int snd_pmac_put_beep(struct snd_kcontrol *kcontrol,
return oval != chip->beep->volume;
}
-static struct snd_kcontrol_new snd_pmac_beep_mixer = {
+static const struct snd_kcontrol_new snd_pmac_beep_mixer = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Beep Playback Volume",
.info = snd_pmac_info_beep,
diff --git a/sound/ppc/tumbler.c b/sound/ppc/tumbler.c
index 58ee8089bbf9..0779a2912237 100644
--- a/sound/ppc/tumbler.c
+++ b/sound/ppc/tumbler.c
@@ -897,7 +897,7 @@ static struct snd_kcontrol_new snapper_mixers[] = {
},
};
-static struct snd_kcontrol_new tumbler_hp_sw = {
+static const struct snd_kcontrol_new tumbler_hp_sw = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Headphone Playback Switch",
.info = snd_pmac_boolean_mono_info,
@@ -905,7 +905,7 @@ static struct snd_kcontrol_new tumbler_hp_sw = {
.put = tumbler_put_mute_switch,
.private_value = TUMBLER_MUTE_HP,
};
-static struct snd_kcontrol_new tumbler_speaker_sw = {
+static const struct snd_kcontrol_new tumbler_speaker_sw = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Speaker Playback Switch",
.info = snd_pmac_boolean_mono_info,
@@ -913,7 +913,7 @@ static struct snd_kcontrol_new tumbler_speaker_sw = {
.put = tumbler_put_mute_switch,
.private_value = TUMBLER_MUTE_AMP,
};
-static struct snd_kcontrol_new tumbler_lineout_sw = {
+static const struct snd_kcontrol_new tumbler_lineout_sw = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Line Out Playback Switch",
.info = snd_pmac_boolean_mono_info,
@@ -921,7 +921,7 @@ static struct snd_kcontrol_new tumbler_lineout_sw = {
.put = tumbler_put_mute_switch,
.private_value = TUMBLER_MUTE_LINE,
};
-static struct snd_kcontrol_new tumbler_drc_sw = {
+static const struct snd_kcontrol_new tumbler_drc_sw = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "DRC Switch",
.info = snd_pmac_boolean_mono_info,
diff --git a/sound/sh/aica.c b/sound/sh/aica.c
index fbbc25279559..ab4802df62e1 100644
--- a/sound/sh/aica.c
+++ b/sound/sh/aica.c
@@ -535,7 +535,7 @@ static int aica_pcmvolume_put(struct snd_kcontrol *kcontrol,
return 1;
}
-static struct snd_kcontrol_new snd_aica_pcmswitch_control = {
+static const struct snd_kcontrol_new snd_aica_pcmswitch_control = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Playback Switch",
.index = 0,
@@ -544,7 +544,7 @@ static struct snd_kcontrol_new snd_aica_pcmswitch_control = {
.put = aica_pcmswitch_put
};
-static struct snd_kcontrol_new snd_aica_pcmvolume_control = {
+static const struct snd_kcontrol_new snd_aica_pcmvolume_control = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Playback Volume",
.index = 0,
diff --git a/sound/sh/sh_dac_audio.c b/sound/sh/sh_dac_audio.c
index 461b310c7872..c1e00ed715ee 100644
--- a/sound/sh/sh_dac_audio.c
+++ b/sound/sh/sh_dac_audio.c
@@ -184,23 +184,36 @@ static int snd_sh_dac_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
return 0;
}
-static int snd_sh_dac_pcm_copy(struct snd_pcm_substream *substream, int channel,
- snd_pcm_uframes_t pos, void __user *src, snd_pcm_uframes_t count)
+static int snd_sh_dac_pcm_copy(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void __user *src, unsigned long count)
{
/* channel is not used (interleaved data) */
struct snd_sh_dac *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
- ssize_t b_count = frames_to_bytes(runtime , count);
- ssize_t b_pos = frames_to_bytes(runtime , pos);
- if (count < 0)
- return -EINVAL;
+ if (copy_from_user_toio(chip->data_buffer + pos, src, count))
+ return -EFAULT;
+ chip->buffer_end = chip->data_buffer + pos + count;
- if (!count)
- return 0;
+ if (chip->empty) {
+ chip->empty = 0;
+ dac_audio_start_timer(chip);
+ }
+
+ return 0;
+}
- memcpy_toio(chip->data_buffer + b_pos, src, b_count);
- chip->buffer_end = chip->data_buffer + b_pos + b_count;
+static int snd_sh_dac_pcm_copy_kernel(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void *src, unsigned long count)
+{
+ /* channel is not used (interleaved data) */
+ struct snd_sh_dac *chip = snd_pcm_substream_chip(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ memcpy_toio(chip->data_buffer + pos, src, count);
+ chip->buffer_end = chip->data_buffer + pos + count;
if (chip->empty) {
chip->empty = 0;
@@ -211,23 +224,15 @@ static int snd_sh_dac_pcm_copy(struct snd_pcm_substream *substream, int channel,
}
static int snd_sh_dac_pcm_silence(struct snd_pcm_substream *substream,
- int channel, snd_pcm_uframes_t pos,
- snd_pcm_uframes_t count)
+ int channel, unsigned long pos,
+ unsigned long count)
{
/* channel is not used (interleaved data) */
struct snd_sh_dac *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
- ssize_t b_count = frames_to_bytes(runtime , count);
- ssize_t b_pos = frames_to_bytes(runtime , pos);
-
- if (count < 0)
- return -EINVAL;
-
- if (!count)
- return 0;
- memset_io(chip->data_buffer + b_pos, 0, b_count);
- chip->buffer_end = chip->data_buffer + b_pos + b_count;
+ memset_io(chip->data_buffer + pos, 0, count);
+ chip->buffer_end = chip->data_buffer + pos + count;
if (chip->empty) {
chip->empty = 0;
@@ -256,8 +261,9 @@ static struct snd_pcm_ops snd_sh_dac_pcm_ops = {
.prepare = snd_sh_dac_pcm_prepare,
.trigger = snd_sh_dac_pcm_trigger,
.pointer = snd_sh_dac_pcm_pointer,
- .copy = snd_sh_dac_pcm_copy,
- .silence = snd_sh_dac_pcm_silence,
+ .copy_user = snd_sh_dac_pcm_copy,
+ .copy_kernel = snd_sh_dac_pcm_copy_kernel,
+ .fill_silence = snd_sh_dac_pcm_silence,
.mmap = snd_pcm_lib_mmap_iomem,
};
diff --git a/sound/soc/atmel/atmel-pcm.h b/sound/soc/atmel/atmel-pcm.h
index 6eaf081cad50..4b27aed40a51 100644
--- a/sound/soc/atmel/atmel-pcm.h
+++ b/sound/soc/atmel/atmel-pcm.h
@@ -83,8 +83,7 @@ struct atmel_pcm_dma_params {
#define ssc_readx(base, reg) (__raw_readl((base) + (reg)))
#define ssc_writex(base, reg, value) __raw_writel((value), (base) + (reg))
-#if defined(CONFIG_SND_ATMEL_SOC_PDC) || \
- defined(CONFIG_SND_ATMEL_SOC_PDC_MODULE)
+#if IS_ENABLED(CONFIG_SND_ATMEL_SOC_PDC)
int atmel_pcm_pdc_platform_register(struct device *dev);
void atmel_pcm_pdc_platform_unregister(struct device *dev);
#else
@@ -97,8 +96,7 @@ static inline void atmel_pcm_pdc_platform_unregister(struct device *dev)
}
#endif
-#if defined(CONFIG_SND_ATMEL_SOC_DMA) || \
- defined(CONFIG_SND_ATMEL_SOC_DMA_MODULE)
+#if IS_ENABLED(CONFIG_SND_ATMEL_SOC_DMA)
int atmel_pcm_dma_platform_register(struct device *dev);
void atmel_pcm_dma_platform_unregister(struct device *dev);
#else
diff --git a/sound/soc/atmel/tse850-pcm5142.c b/sound/soc/atmel/tse850-pcm5142.c
index a72c7d642026..3a1393283156 100644
--- a/sound/soc/atmel/tse850-pcm5142.c
+++ b/sound/soc/atmel/tse850-pcm5142.c
@@ -227,7 +227,7 @@ int tse850_put_ana(struct snd_kcontrol *kctrl,
static const char * const mux_text[] = { "Mixer", "Loop" };
static const struct soc_enum mux_enum =
- SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, 2, mux_text);
+ SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, ARRAY_SIZE(mux_text), mux_text);
static const struct snd_kcontrol_new mux1 =
SOC_DAPM_ENUM_EXT("MUX1", mux_enum, tse850_get_mux1, tse850_put_mux1);
@@ -252,7 +252,7 @@ static const char * const ana_text[] = {
};
static const struct soc_enum ana_enum =
- SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, 9, ana_text);
+ SOC_ENUM_SINGLE(SND_SOC_NOPM, 0, ARRAY_SIZE(ana_text), ana_text);
static const struct snd_kcontrol_new out =
SOC_DAPM_ENUM_EXT("ANA", ana_enum, tse850_get_ana, tse850_put_ana);
diff --git a/sound/soc/blackfin/bf5xx-ac97-pcm.c b/sound/soc/blackfin/bf5xx-ac97-pcm.c
index 02ad2606fa19..913e29275f4e 100644
--- a/sound/soc/blackfin/bf5xx-ac97-pcm.c
+++ b/sound/soc/blackfin/bf5xx-ac97-pcm.c
@@ -279,23 +279,33 @@ static int bf5xx_pcm_mmap(struct snd_pcm_substream *substream,
return 0 ;
}
#else
-static int bf5xx_pcm_copy(struct snd_pcm_substream *substream, int channel,
- snd_pcm_uframes_t pos,
- void __user *buf, snd_pcm_uframes_t count)
+static int bf5xx_pcm_copy(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void *buf, unsigned long count)
{
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int chan_mask = ac97_chan_mask[runtime->channels - 1];
+ struct ac97_frame *dst;
+
pr_debug("%s copy pos:0x%lx count:0x%lx\n",
substream->stream ? "Capture" : "Playback", pos, count);
+ dst = (struct ac97_frame *)runtime->dma_area +
+ bytes_to_frames(runtime, pos);
+ count = bytes_to_frames(runtime, count);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- bf5xx_pcm_to_ac97((struct ac97_frame *)runtime->dma_area + pos,
- (__u16 *)buf, count, chan_mask);
+ bf5xx_pcm_to_ac97(dst, buf, count, chan_mask);
else
- bf5xx_ac97_to_pcm((struct ac97_frame *)runtime->dma_area + pos,
- (__u16 *)buf, count);
+ bf5xx_ac97_to_pcm(dst, buf, count);
return 0;
}
+
+static int bf5xx_pcm_copy_user(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void __user *buf, unsigned long count)
+{
+ return bf5xx_pcm_copy(substream, channel, pos, (void *)buf, count);
+}
#endif
static struct snd_pcm_ops bf5xx_pcm_ac97_ops = {
@@ -309,7 +319,8 @@ static struct snd_pcm_ops bf5xx_pcm_ac97_ops = {
#if defined(CONFIG_SND_BF5XX_MMAP_SUPPORT)
.mmap = bf5xx_pcm_mmap,
#else
- .copy = bf5xx_pcm_copy,
+ .copy_user = bf5xx_pcm_copy_user,
+ .copy_kernel = bf5xx_pcm_copy,
#endif
};
diff --git a/sound/soc/blackfin/bf5xx-i2s-pcm.c b/sound/soc/blackfin/bf5xx-i2s-pcm.c
index 6cba211da32e..470d99abf6f6 100644
--- a/sound/soc/blackfin/bf5xx-i2s-pcm.c
+++ b/sound/soc/blackfin/bf5xx-i2s-pcm.c
@@ -225,8 +225,9 @@ static int bf5xx_pcm_mmap(struct snd_pcm_substream *substream,
return 0 ;
}
-static int bf5xx_pcm_copy(struct snd_pcm_substream *substream, int channel,
- snd_pcm_uframes_t pos, void *buf, snd_pcm_uframes_t count)
+static int bf5xx_pcm_copy(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void *buf, unsigned long count)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -238,6 +239,8 @@ static int bf5xx_pcm_copy(struct snd_pcm_substream *substream, int channel,
dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
if (dma_data->tdm_mode) {
+ pos = bytes_to_frames(runtime, pos);
+ count = bytes_to_frames(runtime, count);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
src = buf;
dst = runtime->dma_area;
@@ -269,21 +272,29 @@ static int bf5xx_pcm_copy(struct snd_pcm_substream *substream, int channel,
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
src = buf;
dst = runtime->dma_area;
- dst += frames_to_bytes(runtime, pos);
+ dst += pos;
} else {
src = runtime->dma_area;
- src += frames_to_bytes(runtime, pos);
+ src += pos;
dst = buf;
}
- memcpy(dst, src, frames_to_bytes(runtime, count));
+ memcpy(dst, src, count);
}
return 0;
}
+static int bf5xx_pcm_copy_user(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ void __user *buf, unsigned long count)
+{
+ return bf5xx_pcm_copy(substream, channel, pos, (void *)buf, count);
+}
+
static int bf5xx_pcm_silence(struct snd_pcm_substream *substream,
- int channel, snd_pcm_uframes_t pos, snd_pcm_uframes_t count)
+ int channel, unsigned long pos,
+ unsigned long count)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -295,11 +306,11 @@ static int bf5xx_pcm_silence(struct snd_pcm_substream *substream,
dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
if (dma_data->tdm_mode) {
- offset = pos * 8 * sample_size;
- samples = count * 8;
+ offset = bytes_to_frames(runtime, pos) * 8 * sample_size;
+ samples = bytes_to_frames(runtime, count) * 8;
} else {
- offset = frames_to_bytes(runtime, pos);
- samples = count * runtime->channels;
+ offset = pos;
+ samples = bytes_to_samples(runtime, count);
}
snd_pcm_format_set_silence(runtime->format, buf + offset, samples);
@@ -316,8 +327,9 @@ static struct snd_pcm_ops bf5xx_pcm_i2s_ops = {
.trigger = bf5xx_pcm_trigger,
.pointer = bf5xx_pcm_pointer,
.mmap = bf5xx_pcm_mmap,
- .copy = bf5xx_pcm_copy,
- .silence = bf5xx_pcm_silence,
+ .copy_user = bf5xx_pcm_copy_user,
+ .copy_kernel = bf5xx_pcm_copy,
+ .fill_silence = bf5xx_pcm_silence,
};
static int bf5xx_pcm_i2s_new(struct snd_soc_pcm_runtime *rtd)
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 883ed4c8a551..6c78b0b49b81 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -72,6 +72,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_DA9055 if I2C
select SND_SOC_DIO2125
select SND_SOC_DMIC
+ select SND_SOC_ES8316 if I2C
select SND_SOC_ES8328_SPI if SPI_MASTER
select SND_SOC_ES8328_I2C if I2C
select SND_SOC_ES7134
@@ -543,6 +544,10 @@ config SND_SOC_HDMI_CODEC
config SND_SOC_ES7134
tristate "Everest Semi ES7134 CODEC"
+config SND_SOC_ES8316
+ tristate "Everest Semi ES8316 CODEC"
+ depends on I2C
+
config SND_SOC_ES8328
tristate
@@ -1114,6 +1119,11 @@ config SND_SOC_WM9713
tristate
select REGMAP_AC97
+config SND_SOC_ZX_AUD96P22
+ tristate "ZTE ZX AUD96P22 CODEC"
+ depends on I2C
+ select REGMAP_I2C
+
# Amp
config SND_SOC_LM4857
tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 28a63fdaf982..1755a54e3dc9 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -65,6 +65,7 @@ snd-soc-da732x-objs := da732x.o
snd-soc-da9055-objs := da9055.o
snd-soc-dmic-objs := dmic.o
snd-soc-es7134-objs := es7134.o
+snd-soc-es8316-objs := es8316.o
snd-soc-es8328-objs := es8328.o
snd-soc-es8328-i2c-objs := es8328-i2c.o
snd-soc-es8328-spi-objs := es8328-spi.o
@@ -224,6 +225,7 @@ snd-soc-wm9705-objs := wm9705.o
snd-soc-wm9712-objs := wm9712.o
snd-soc-wm9713-objs := wm9713.o
snd-soc-wm-hubs-objs := wm_hubs.o
+snd-soc-zx-aud96p22-objs := zx_aud96p22.o
# Amp
snd-soc-dio2125-objs := dio2125.o
snd-soc-max9877-objs := max9877.o
@@ -300,6 +302,7 @@ obj-$(CONFIG_SND_SOC_DA732X) += snd-soc-da732x.o
obj-$(CONFIG_SND_SOC_DA9055) += snd-soc-da9055.o
obj-$(CONFIG_SND_SOC_DMIC) += snd-soc-dmic.o
obj-$(CONFIG_SND_SOC_ES7134) += snd-soc-es7134.o
+obj-$(CONFIG_SND_SOC_ES8316) += snd-soc-es8316.o
obj-$(CONFIG_SND_SOC_ES8328) += snd-soc-es8328.o
obj-$(CONFIG_SND_SOC_ES8328_I2C)+= snd-soc-es8328-i2c.o
obj-$(CONFIG_SND_SOC_ES8328_SPI)+= snd-soc-es8328-spi.o
@@ -455,6 +458,7 @@ obj-$(CONFIG_SND_SOC_WM9712) += snd-soc-wm9712.o
obj-$(CONFIG_SND_SOC_WM9713) += snd-soc-wm9713.o
obj-$(CONFIG_SND_SOC_WM_ADSP) += snd-soc-wm-adsp.o
obj-$(CONFIG_SND_SOC_WM_HUBS) += snd-soc-wm-hubs.o
+obj-$(CONFIG_SND_SOC_ZX_AUD96P22) += snd-soc-zx-aud96p22.o
# Amp
obj-$(CONFIG_SND_SOC_DIO2125) += snd-soc-dio2125.o
diff --git a/sound/soc/codecs/ak4613.c b/sound/soc/codecs/ak4613.c
index b2dfddead227..690edebf029e 100644
--- a/sound/soc/codecs/ak4613.c
+++ b/sound/soc/codecs/ak4613.c
@@ -94,6 +94,8 @@ struct ak4613_interface {
struct ak4613_priv {
struct mutex lock;
const struct ak4613_interface *iface;
+ struct snd_pcm_hw_constraint_list constraint;
+ unsigned int sysclk;
unsigned int fmt;
u8 oc;
@@ -139,9 +141,7 @@ static const struct reg_default ak4613_reg[] = {
#define AUDIO_IFACE(b, fmt) { b, SND_SOC_DAIFMT_##fmt }
static const struct ak4613_interface ak4613_iface[] = {
/* capture */ /* playback */
- [0] = { AUDIO_IFACE(24, LEFT_J), AUDIO_IFACE(16, RIGHT_J) },
- [1] = { AUDIO_IFACE(24, LEFT_J), AUDIO_IFACE(20, RIGHT_J) },
- [2] = { AUDIO_IFACE(24, LEFT_J), AUDIO_IFACE(24, RIGHT_J) },
+ /* [0] - [2] are not supported */
[3] = { AUDIO_IFACE(24, LEFT_J), AUDIO_IFACE(24, LEFT_J) },
[4] = { AUDIO_IFACE(24, I2S), AUDIO_IFACE(24, I2S) },
};
@@ -254,6 +254,74 @@ static void ak4613_dai_shutdown(struct snd_pcm_substream *substream,
mutex_unlock(&priv->lock);
}
+static void ak4613_hw_constraints(struct ak4613_priv *priv,
+ struct snd_pcm_runtime *runtime)
+{
+ static const unsigned int ak4613_rates[] = {
+ 32000,
+ 44100,
+ 48000,
+ 64000,
+ 88200,
+ 96000,
+ 176400,
+ 192000,
+ };
+ struct snd_pcm_hw_constraint_list *constraint = &priv->constraint;
+ unsigned int fs;
+ int i;
+
+ constraint->list = ak4613_rates;
+ constraint->mask = 0;
+ constraint->count = 0;
+
+ /*
+ * Slave Mode
+ * Normal: [32kHz, 48kHz] : 256fs,384fs or 512fs
+ * Double: [64kHz, 96kHz] : 256fs
+ * Quad : [128kHz,192kHz]: 128fs
+ *
+ * Master mode
+ * Normal: [32kHz, 48kHz] : 256fs or 512fs
+ * Double: [64kHz, 96kHz] : 256fs
+ * Quad : [128kHz,192kHz]: 128fs
+ */
+ for (i = 0; i < ARRAY_SIZE(ak4613_rates); i++) {
+ /* minimum fs on each range */
+ fs = (ak4613_rates[i] <= 96000) ? 256 : 128;
+
+ if (priv->sysclk >= ak4613_rates[i] * fs)
+ constraint->count = i + 1;
+ }
+
+ snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, constraint);
+}
+
+static int ak4613_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct ak4613_priv *priv = snd_soc_codec_get_drvdata(codec);
+
+ priv->cnt++;
+
+ ak4613_hw_constraints(priv, substream->runtime);
+
+ return 0;
+}
+
+static int ak4613_dai_set_sysclk(struct snd_soc_dai *codec_dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct ak4613_priv *priv = snd_soc_codec_get_drvdata(codec);
+
+ priv->sysclk = freq;
+
+ return 0;
+}
+
static int ak4613_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct snd_soc_codec *codec = dai->codec;
@@ -262,11 +330,9 @@ static int ak4613_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
fmt &= SND_SOC_DAIFMT_FORMAT_MASK;
switch (fmt) {
- case SND_SOC_DAIFMT_RIGHT_J:
case SND_SOC_DAIFMT_LEFT_J:
case SND_SOC_DAIFMT_I2S:
priv->fmt = fmt;
-
break;
default:
return -EINVAL;
@@ -286,13 +352,8 @@ static bool ak4613_dai_fmt_matching(const struct ak4613_interface *iface,
if (fmts->fmt != fmt)
return false;
- if (fmt == SND_SOC_DAIFMT_RIGHT_J) {
- if (fmts->width != width)
- return false;
- } else {
- if (fmts->width < width)
- return false;
- }
+ if (fmts->width != width)
+ return false;
return true;
}
@@ -319,6 +380,7 @@ static int ak4613_dai_hw_params(struct snd_pcm_substream *substream,
case 48000:
ctrl2 = DFS_NORMAL_SPEED;
break;
+ case 64000:
case 88200:
case 96000:
ctrl2 = DFS_DOUBLE_SPEED;
@@ -345,7 +407,7 @@ static int ak4613_dai_hw_params(struct snd_pcm_substream *substream,
if (ak4613_dai_fmt_matching(priv->iface, is_play, fmt, width))
iface = priv->iface;
} else {
- for (i = ARRAY_SIZE(ak4613_iface); i >= 0; i--) {
+ for (i = ARRAY_SIZE(ak4613_iface) - 1; i >= 0; i--) {
if (!ak4613_dai_fmt_matching(ak4613_iface + i,
is_play,
fmt, width))
@@ -358,7 +420,6 @@ static int ak4613_dai_hw_params(struct snd_pcm_substream *substream,
if ((priv->iface == NULL) ||
(priv->iface == iface)) {
priv->iface = iface;
- priv->cnt++;
ret = 0;
}
mutex_unlock(&priv->lock);
@@ -407,7 +468,9 @@ static int ak4613_set_bias_level(struct snd_soc_codec *codec,
}
static const struct snd_soc_dai_ops ak4613_dai_ops = {
+ .startup = ak4613_dai_startup,
.shutdown = ak4613_dai_shutdown,
+ .set_sysclk = ak4613_dai_set_sysclk,
.set_fmt = ak4613_dai_set_fmt,
.hw_params = ak4613_dai_hw_params,
};
@@ -420,8 +483,7 @@ static const struct snd_soc_dai_ops ak4613_dai_ops = {
SNDRV_PCM_RATE_96000 |\
SNDRV_PCM_RATE_176400 |\
SNDRV_PCM_RATE_192000)
-#define AK4613_PCM_FMTBIT (SNDRV_PCM_FMTBIT_S16_LE |\
- SNDRV_PCM_FMTBIT_S24_LE)
+#define AK4613_PCM_FMTBIT (SNDRV_PCM_FMTBIT_S24_LE)
static struct snd_soc_dai_driver ak4613_dai = {
.name = "ak4613-hifi",
@@ -527,6 +589,7 @@ static int ak4613_i2c_probe(struct i2c_client *i2c,
priv->iface = NULL;
priv->cnt = 0;
+ priv->sysclk = 0;
mutex_init(&priv->lock);
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index 23ab9646c351..66de8a2013a6 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -433,7 +433,7 @@ static int ak4642_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
static int ak4642_set_mcko(struct snd_soc_codec *codec,
u32 frequency)
{
- u32 fs_list[] = {
+ static const u32 fs_list[] = {
[0] = 8000,
[1] = 12000,
[2] = 16000,
@@ -447,7 +447,7 @@ static int ak4642_set_mcko(struct snd_soc_codec *codec,
[14] = 29400,
[15] = 44100,
};
- u32 ps_list[] = {
+ static const u32 ps_list[] = {
[0] = 256,
[1] = 128,
[2] = 64,
diff --git a/sound/soc/codecs/cs35l34.c b/sound/soc/codecs/cs35l34.c
index 7c5d1510cf2c..0a747c66cc6c 100644
--- a/sound/soc/codecs/cs35l34.c
+++ b/sound/soc/codecs/cs35l34.c
@@ -567,12 +567,12 @@ static int cs35l34_pcm_hw_params(struct snd_pcm_substream *substream,
return ret;
}
-static unsigned int cs35l34_src_rates[] = {
+static const unsigned int cs35l34_src_rates[] = {
8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
};
-static struct snd_pcm_hw_constraint_list cs35l34_constraints = {
+static const struct snd_pcm_hw_constraint_list cs35l34_constraints = {
.count = ARRAY_SIZE(cs35l34_src_rates),
.list = cs35l34_src_rates,
};
diff --git a/sound/soc/codecs/cs35l35.c b/sound/soc/codecs/cs35l35.c
index f8aef5869b03..f1ee184ecab2 100644
--- a/sound/soc/codecs/cs35l35.c
+++ b/sound/soc/codecs/cs35l35.c
@@ -162,6 +162,14 @@ static bool cs35l35_precious_register(struct device *dev, unsigned int reg)
}
}
+static void cs35l35_reset(struct cs35l35_private *cs35l35)
+{
+ gpiod_set_value_cansleep(cs35l35->reset_gpio, 0);
+ usleep_range(2000, 2100);
+ gpiod_set_value_cansleep(cs35l35->reset_gpio, 1);
+ usleep_range(1000, 1100);
+}
+
static int cs35l35_wait_for_pdn(struct cs35l35_private *cs35l35)
{
int ret;
@@ -756,6 +764,76 @@ static int cs35l35_codec_set_sysclk(struct snd_soc_codec *codec,
return ret;
}
+static int cs35l35_boost_inductor(struct cs35l35_private *cs35l35,
+ int inductor)
+{
+ struct regmap *regmap = cs35l35->regmap;
+ unsigned int bst_ipk = 0;
+
+ /*
+ * Digital Boost Converter Configuration for feedback,
+ * ramping, switching frequency, and estimation block seeding.
+ */
+
+ regmap_update_bits(regmap, CS35L35_BST_CONV_SW_FREQ,
+ CS35L35_BST_CONV_SWFREQ_MASK, 0x00);
+
+ regmap_read(regmap, CS35L35_BST_PEAK_I, &bst_ipk);
+ bst_ipk &= CS35L35_BST_IPK_MASK;
+
+ switch (inductor) {
+ case 1000: /* 1 uH */
+ regmap_write(regmap, CS35L35_BST_CONV_COEF_1, 0x24);
+ regmap_write(regmap, CS35L35_BST_CONV_COEF_2, 0x24);
+ regmap_update_bits(regmap, CS35L35_BST_CONV_SW_FREQ,
+ CS35L35_BST_CONV_LBST_MASK, 0x00);
+
+ if (bst_ipk < 0x04)
+ regmap_write(regmap, CS35L35_BST_CONV_SLOPE_COMP, 0x1B);
+ else
+ regmap_write(regmap, CS35L35_BST_CONV_SLOPE_COMP, 0x4E);
+ break;
+ case 1200: /* 1.2 uH */
+ regmap_write(regmap, CS35L35_BST_CONV_COEF_1, 0x20);
+ regmap_write(regmap, CS35L35_BST_CONV_COEF_2, 0x20);
+ regmap_update_bits(regmap, CS35L35_BST_CONV_SW_FREQ,
+ CS35L35_BST_CONV_LBST_MASK, 0x01);
+
+ if (bst_ipk < 0x04)
+ regmap_write(regmap, CS35L35_BST_CONV_SLOPE_COMP, 0x1B);
+ else
+ regmap_write(regmap, CS35L35_BST_CONV_SLOPE_COMP, 0x47);
+ break;
+ case 1500: /* 1.5uH */
+ regmap_write(regmap, CS35L35_BST_CONV_COEF_1, 0x20);
+ regmap_write(regmap, CS35L35_BST_CONV_COEF_2, 0x20);
+ regmap_update_bits(regmap, CS35L35_BST_CONV_SW_FREQ,
+ CS35L35_BST_CONV_LBST_MASK, 0x02);
+
+ if (bst_ipk < 0x04)
+ regmap_write(regmap, CS35L35_BST_CONV_SLOPE_COMP, 0x1B);
+ else
+ regmap_write(regmap, CS35L35_BST_CONV_SLOPE_COMP, 0x3C);
+ break;
+ case 2200: /* 2.2uH */
+ regmap_write(regmap, CS35L35_BST_CONV_COEF_1, 0x19);
+ regmap_write(regmap, CS35L35_BST_CONV_COEF_2, 0x25);
+ regmap_update_bits(regmap, CS35L35_BST_CONV_SW_FREQ,
+ CS35L35_BST_CONV_LBST_MASK, 0x03);
+
+ if (bst_ipk < 0x04)
+ regmap_write(regmap, CS35L35_BST_CONV_SLOPE_COMP, 0x1B);
+ else
+ regmap_write(regmap, CS35L35_BST_CONV_SLOPE_COMP, 0x23);
+ break;
+ default:
+ dev_err(cs35l35->dev, "Invalid Inductor Value %d uH\n",
+ inductor);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int cs35l35_codec_probe(struct snd_soc_codec *codec)
{
struct cs35l35_private *cs35l35 = snd_soc_codec_get_drvdata(codec);
@@ -775,6 +853,10 @@ static int cs35l35_codec_probe(struct snd_soc_codec *codec)
cs35l35->pdata.bst_ipk <<
CS35L35_BST_IPK_SHIFT);
+ ret = cs35l35_boost_inductor(cs35l35, cs35l35->pdata.boost_ind);
+ if (ret)
+ return ret;
+
if (cs35l35->pdata.gain_zc)
regmap_update_bits(cs35l35->regmap, CS35L35_PROTECT_CTL,
CS35L35_AMP_GAIN_ZC_MASK,
@@ -1195,7 +1277,15 @@ static int cs35l35_handle_of_data(struct i2c_client *i2c_client,
return -EINVAL;
}
- pdata->bst_ipk = (val32 - 1680) / 110;
+ pdata->bst_ipk = ((val32 - 1680) / 110) | CS35L35_VALID_PDATA;
+ }
+
+ ret = of_property_read_u32(np, "cirrus,boost-ind-nanohenry", &val32);
+ if (ret >= 0) {
+ pdata->boost_ind = val32;
+ } else {
+ dev_err(&i2c_client->dev, "Inductor not specified.\n");
+ return -EINVAL;
}
if (of_property_read_u32(np, "cirrus,sp-drv-strength", &val32) >= 0)
@@ -1454,7 +1544,7 @@ static int cs35l35_i2c_probe(struct i2c_client *i2c_client,
}
}
- gpiod_set_value_cansleep(cs35l35->reset_gpio, 1);
+ cs35l35_reset(cs35l35);
init_completion(&cs35l35->pdn_done);
diff --git a/sound/soc/codecs/cs35l35.h b/sound/soc/codecs/cs35l35.h
index 5a6e43a87c4d..621bfef70d03 100644
--- a/sound/soc/codecs/cs35l35.h
+++ b/sound/soc/codecs/cs35l35.h
@@ -200,6 +200,12 @@
#define CS35L35_SP_I2S_DRV_MASK 0x03
#define CS35L35_SP_I2S_DRV_SHIFT 0
+/* Boost Converter Config */
+#define CS35L35_BST_CONV_COEFF_MASK 0xFF
+#define CS35L35_BST_CONV_SLOPE_MASK 0xFF
+#define CS35L35_BST_CONV_LBST_MASK 0x03
+#define CS35L35_BST_CONV_SWFREQ_MASK 0xF0
+
/* Class H Algorithm Control */
#define CS35L35_CH_STEREO_MASK 0x40
#define CS35L35_CH_STEREO_SHIFT 6
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index e78b5f055f25..d8824773dc29 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -674,8 +674,6 @@ static int cs4271_common_probe(struct device *dev,
cs4271->gpio_nreset = cs4271plat->gpio_nreset;
if (gpio_is_valid(cs4271->gpio_nreset)) {
- int ret;
-
ret = devm_gpio_request(dev, cs4271->gpio_nreset,
"CS4271 Reset");
if (ret < 0)
diff --git a/sound/soc/codecs/cs53l30.c b/sound/soc/codecs/cs53l30.c
index 1e0d5973b758..06933a5d0a75 100644
--- a/sound/soc/codecs/cs53l30.c
+++ b/sound/soc/codecs/cs53l30.c
@@ -747,7 +747,7 @@ static unsigned int const cs53l30_src_rates[] = {
8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
};
-static struct snd_pcm_hw_constraint_list src_constraints = {
+static const struct snd_pcm_hw_constraint_list src_constraints = {
.count = ARRAY_SIZE(cs53l30_src_rates),
.list = cs53l30_src_rates,
};
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index 024d83fa6a7f..c3e11897f8ae 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -13,6 +13,8 @@
*/
#include <linux/acpi.h>
+#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/i2c.h>
@@ -1606,12 +1608,12 @@ static enum da7213_dmic_clk_rate
}
static struct da7213_platform_data
- *da7213_of_to_pdata(struct snd_soc_codec *codec)
+ *da7213_fw_to_pdata(struct snd_soc_codec *codec)
{
- struct device_node *np = codec->dev->of_node;
+ struct device *dev = codec->dev;
struct da7213_platform_data *pdata;
- const char *of_str;
- u32 of_val32;
+ const char *fw_str;
+ u32 fw_val32;
pdata = devm_kzalloc(codec->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
@@ -1619,29 +1621,29 @@ static struct da7213_platform_data
return NULL;
}
- if (of_property_read_u32(np, "dlg,micbias1-lvl", &of_val32) >= 0)
- pdata->micbias1_lvl = da7213_of_micbias_lvl(codec, of_val32);
+ if (device_property_read_u32(dev, "dlg,micbias1-lvl", &fw_val32) >= 0)
+ pdata->micbias1_lvl = da7213_of_micbias_lvl(codec, fw_val32);
else
pdata->micbias1_lvl = DA7213_MICBIAS_2_2V;
- if (of_property_read_u32(np, "dlg,micbias2-lvl", &of_val32) >= 0)
- pdata->micbias2_lvl = da7213_of_micbias_lvl(codec, of_val32);
+ if (device_property_read_u32(dev, "dlg,micbias2-lvl", &fw_val32) >= 0)
+ pdata->micbias2_lvl = da7213_of_micbias_lvl(codec, fw_val32);
else
pdata->micbias2_lvl = DA7213_MICBIAS_2_2V;
- if (!of_property_read_string(np, "dlg,dmic-data-sel", &of_str))
- pdata->dmic_data_sel = da7213_of_dmic_data_sel(codec, of_str);
+ if (!device_property_read_string(dev, "dlg,dmic-data-sel", &fw_str))
+ pdata->dmic_data_sel = da7213_of_dmic_data_sel(codec, fw_str);
else
pdata->dmic_data_sel = DA7213_DMIC_DATA_LRISE_RFALL;
- if (!of_property_read_string(np, "dlg,dmic-samplephase", &of_str))
+ if (!device_property_read_string(dev, "dlg,dmic-samplephase", &fw_str))
pdata->dmic_samplephase =
- da7213_of_dmic_samplephase(codec, of_str);
+ da7213_of_dmic_samplephase(codec, fw_str);
else
pdata->dmic_samplephase = DA7213_DMIC_SAMPLE_ON_CLKEDGE;
- if (of_property_read_u32(np, "dlg,dmic-clkrate", &of_val32) >= 0)
- pdata->dmic_clk_rate = da7213_of_dmic_clkrate(codec, of_val32);
+ if (device_property_read_u32(dev, "dlg,dmic-clkrate", &fw_val32) >= 0)
+ pdata->dmic_clk_rate = da7213_of_dmic_clkrate(codec, fw_val32);
else
pdata->dmic_clk_rate = DA7213_DMIC_CLK_3_0MHZ;
@@ -1713,10 +1715,9 @@ static int da7213_probe(struct snd_soc_codec *codec)
DA7213_LINE_AMP_OE, DA7213_LINE_AMP_OE);
/* Handle DT/Platform data */
- if (codec->dev->of_node)
- da7213->pdata = da7213_of_to_pdata(codec);
- else
- da7213->pdata = dev_get_platdata(codec->dev);
+ da7213->pdata = dev_get_platdata(codec->dev);
+ if (!da7213->pdata)
+ da7213->pdata = da7213_fw_to_pdata(codec);
/* Set platform data values */
if (da7213->pdata) {
diff --git a/sound/soc/codecs/da7218.c b/sound/soc/codecs/da7218.c
index d256ebf9e309..6e1940eb0653 100644
--- a/sound/soc/codecs/da7218.c
+++ b/sound/soc/codecs/da7218.c
@@ -1457,7 +1457,7 @@ static int da7218_dai_event(struct snd_soc_dapm_widget *w,
++i;
msleep(DA7218_SRM_CHECK_DELAY);
}
- } while ((i < DA7218_SRM_CHECK_TRIES) & (!success));
+ } while ((i < DA7218_SRM_CHECK_TRIES) && (!success));
if (!success)
dev_warn(codec->dev, "SRM failed to lock\n");
diff --git a/sound/soc/codecs/da7219-aad.c b/sound/soc/codecs/da7219-aad.c
index 6274d79c1353..1d1d10dd92ae 100644
--- a/sound/soc/codecs/da7219-aad.c
+++ b/sound/soc/codecs/da7219-aad.c
@@ -115,19 +115,21 @@ static void da7219_aad_hptest_work(struct work_struct *work)
struct da7219_priv *da7219 = snd_soc_codec_get_drvdata(codec);
u16 tonegen_freq_hptest;
- u8 pll_srm_sts, gain_ramp_ctrl, accdet_cfg8;
+ u8 pll_srm_sts, pll_ctrl, gain_ramp_ctrl, accdet_cfg8;
int report = 0, ret = 0;
- /* Lock DAPM and any Kcontrols that are affected by this test */
+ /* Lock DAPM, Kcontrols affected by this test and the PLL */
snd_soc_dapm_mutex_lock(dapm);
- mutex_lock(&da7219->lock);
+ mutex_lock(&da7219->ctrl_lock);
+ mutex_lock(&da7219->pll_lock);
/* Ensure MCLK is available for HP test procedure */
if (da7219->mclk) {
ret = clk_prepare_enable(da7219->mclk);
if (ret) {
dev_err(codec->dev, "Failed to enable mclk - %d\n", ret);
- mutex_unlock(&da7219->lock);
+ mutex_unlock(&da7219->pll_lock);
+ mutex_unlock(&da7219->ctrl_lock);
snd_soc_dapm_mutex_unlock(dapm);
return;
}
@@ -136,12 +138,21 @@ static void da7219_aad_hptest_work(struct work_struct *work)
/*
* If MCLK not present, then we're using the internal oscillator and
* require different frequency settings to achieve the same result.
+ *
+ * If MCLK is present, but PLL is not enabled then we enable it here to
+ * ensure a consistent detection procedure.
*/
pll_srm_sts = snd_soc_read(codec, DA7219_PLL_SRM_STS);
- if (pll_srm_sts & DA7219_PLL_SRM_STS_MCLK)
+ if (pll_srm_sts & DA7219_PLL_SRM_STS_MCLK) {
tonegen_freq_hptest = cpu_to_le16(DA7219_AAD_HPTEST_RAMP_FREQ);
- else
+
+ pll_ctrl = snd_soc_read(codec, DA7219_PLL_CTRL);
+ if ((pll_ctrl & DA7219_PLL_MODE_MASK) == DA7219_PLL_MODE_BYPASS)
+ da7219_set_pll(codec, DA7219_SYSCLK_PLL,
+ DA7219_PLL_FREQ_OUT_98304);
+ } else {
tonegen_freq_hptest = cpu_to_le16(DA7219_AAD_HPTEST_RAMP_FREQ_INT_OSC);
+ }
/* Ensure gain ramping at fastest rate */
gain_ramp_ctrl = snd_soc_read(codec, DA7219_GAIN_RAMP_CTRL);
@@ -302,11 +313,17 @@ static void da7219_aad_hptest_work(struct work_struct *work)
snd_soc_update_bits(codec, DA7219_HP_R_CTRL, DA7219_HP_R_AMP_OE_MASK,
DA7219_HP_R_AMP_OE_MASK);
+ /* Restore PLL to previous configuration, if re-configured */
+ if ((pll_srm_sts & DA7219_PLL_SRM_STS_MCLK) &&
+ ((pll_ctrl & DA7219_PLL_MODE_MASK) == DA7219_PLL_MODE_BYPASS))
+ da7219_set_pll(codec, DA7219_SYSCLK_MCLK, 0);
+
/* Remove MCLK, if previously enabled */
if (da7219->mclk)
clk_disable_unprepare(da7219->mclk);
- mutex_unlock(&da7219->lock);
+ mutex_unlock(&da7219->pll_lock);
+ mutex_unlock(&da7219->ctrl_lock);
snd_soc_dapm_mutex_unlock(dapm);
/*
diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c
index 99601627f83c..f71d72c22bfc 100644
--- a/sound/soc/codecs/da7219.c
+++ b/sound/soc/codecs/da7219.c
@@ -260,9 +260,9 @@ static int da7219_volsw_locked_get(struct snd_kcontrol *kcontrol,
struct da7219_priv *da7219 = snd_soc_codec_get_drvdata(codec);
int ret;
- mutex_lock(&da7219->lock);
+ mutex_lock(&da7219->ctrl_lock);
ret = snd_soc_get_volsw(kcontrol, ucontrol);
- mutex_unlock(&da7219->lock);
+ mutex_unlock(&da7219->ctrl_lock);
return ret;
}
@@ -274,9 +274,9 @@ static int da7219_volsw_locked_put(struct snd_kcontrol *kcontrol,
struct da7219_priv *da7219 = snd_soc_codec_get_drvdata(codec);
int ret;
- mutex_lock(&da7219->lock);
+ mutex_lock(&da7219->ctrl_lock);
ret = snd_soc_put_volsw(kcontrol, ucontrol);
- mutex_unlock(&da7219->lock);
+ mutex_unlock(&da7219->ctrl_lock);
return ret;
}
@@ -288,9 +288,9 @@ static int da7219_enum_locked_get(struct snd_kcontrol *kcontrol,
struct da7219_priv *da7219 = snd_soc_codec_get_drvdata(codec);
int ret;
- mutex_lock(&da7219->lock);
+ mutex_lock(&da7219->ctrl_lock);
ret = snd_soc_get_enum_double(kcontrol, ucontrol);
- mutex_unlock(&da7219->lock);
+ mutex_unlock(&da7219->ctrl_lock);
return ret;
}
@@ -302,9 +302,9 @@ static int da7219_enum_locked_put(struct snd_kcontrol *kcontrol,
struct da7219_priv *da7219 = snd_soc_codec_get_drvdata(codec);
int ret;
- mutex_lock(&da7219->lock);
+ mutex_lock(&da7219->ctrl_lock);
ret = snd_soc_put_enum_double(kcontrol, ucontrol);
- mutex_unlock(&da7219->lock);
+ mutex_unlock(&da7219->ctrl_lock);
return ret;
}
@@ -424,9 +424,9 @@ static int da7219_tonegen_freq_get(struct snd_kcontrol *kcontrol,
u16 val;
int ret;
- mutex_lock(&da7219->lock);
+ mutex_lock(&da7219->ctrl_lock);
ret = regmap_raw_read(da7219->regmap, reg, &val, sizeof(val));
- mutex_unlock(&da7219->lock);
+ mutex_unlock(&da7219->ctrl_lock);
if (ret)
return ret;
@@ -458,9 +458,9 @@ static int da7219_tonegen_freq_put(struct snd_kcontrol *kcontrol,
*/
val = cpu_to_le16(ucontrol->value.integer.value[0]);
- mutex_lock(&da7219->lock);
+ mutex_lock(&da7219->ctrl_lock);
ret = regmap_raw_write(da7219->regmap, reg, &val, sizeof(val));
- mutex_unlock(&da7219->lock);
+ mutex_unlock(&da7219->ctrl_lock);
return ret;
}
@@ -801,7 +801,7 @@ static int da7219_dai_event(struct snd_soc_dapm_widget *w,
++i;
msleep(50);
}
- } while ((i < DA7219_SRM_CHECK_RETRIES) && (!srm_lock));
+ } while ((i < DA7219_SRM_CHECK_RETRIES) & (!srm_lock));
if (!srm_lock)
dev_warn(codec->dev, "SRM failed to lock\n");
@@ -1129,6 +1129,8 @@ static int da7219_set_dai_sysclk(struct snd_soc_dai *codec_dai,
return -EINVAL;
}
+ mutex_lock(&da7219->pll_lock);
+
switch (clk_id) {
case DA7219_CLKSRC_MCLK_SQR:
snd_soc_update_bits(codec, DA7219_PLL_CTRL,
@@ -1141,6 +1143,7 @@ static int da7219_set_dai_sysclk(struct snd_soc_dai *codec_dai,
break;
default:
dev_err(codec_dai->dev, "Unknown clock source %d\n", clk_id);
+ mutex_unlock(&da7219->pll_lock);
return -EINVAL;
}
@@ -1152,19 +1155,20 @@ static int da7219_set_dai_sysclk(struct snd_soc_dai *codec_dai,
if (ret) {
dev_err(codec_dai->dev, "Failed to set clock rate %d\n",
freq);
+ mutex_unlock(&da7219->pll_lock);
return ret;
}
}
da7219->mclk_rate = freq;
+ mutex_unlock(&da7219->pll_lock);
+
return 0;
}
-static int da7219_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
- int source, unsigned int fref, unsigned int fout)
+int da7219_set_pll(struct snd_soc_codec *codec, int source, unsigned int fout)
{
- struct snd_soc_codec *codec = codec_dai->codec;
struct da7219_priv *da7219 = snd_soc_codec_get_drvdata(codec);
u8 pll_ctrl, indiv_bits, indiv;
@@ -1237,6 +1241,20 @@ static int da7219_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
return 0;
}
+static int da7219_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
+ int source, unsigned int fref, unsigned int fout)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct da7219_priv *da7219 = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ mutex_lock(&da7219->pll_lock);
+ ret = da7219_set_pll(codec, source, fout);
+ mutex_unlock(&da7219->pll_lock);
+
+ return ret;
+}
+
static int da7219_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
@@ -1741,7 +1759,8 @@ static int da7219_probe(struct snd_soc_codec *codec)
unsigned int rev;
int ret;
- mutex_init(&da7219->lock);
+ mutex_init(&da7219->ctrl_lock);
+ mutex_init(&da7219->pll_lock);
/* Regulator configuration */
ret = da7219_handle_supplies(codec);
diff --git a/sound/soc/codecs/da7219.h b/sound/soc/codecs/da7219.h
index 6baba7455fa1..8d6c3c8c8026 100644
--- a/sound/soc/codecs/da7219.h
+++ b/sound/soc/codecs/da7219.h
@@ -810,7 +810,8 @@ struct da7219_priv {
bool wakeup_source;
struct regulator_bulk_data supplies[DA7219_NUM_SUPPLIES];
struct regmap *regmap;
- struct mutex lock;
+ struct mutex ctrl_lock;
+ struct mutex pll_lock;
struct clk *mclk;
unsigned int mclk_rate;
@@ -821,4 +822,6 @@ struct da7219_priv {
u8 gain_ramp_ctrl;
};
+int da7219_set_pll(struct snd_soc_codec *codec, int source, unsigned int fout);
+
#endif /* __DA7219_H */
diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c
new file mode 100644
index 000000000000..ecc02449c569
--- /dev/null
+++ b/sound/soc/codecs/es8316.c
@@ -0,0 +1,637 @@
+/*
+ * es8316.c -- es8316 ALSA SoC audio driver
+ * Copyright Everest Semiconductor Co.,Ltd
+ *
+ * Authors: David Yang <[email protected]>,
+ * Daniel Drake <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/regmap.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+#include "es8316.h"
+
+/* In slave mode at single speed, the codec is documented as accepting 5
+ * MCLK/LRCK ratios, but we also add ratio 400, which is commonly used on
+ * Intel Cherry Trail platforms (19.2MHz MCLK, 48kHz LRCK).
+ */
+#define NR_SUPPORTED_MCLK_LRCK_RATIOS 6
+static const unsigned int supported_mclk_lrck_ratios[] = {
+ 256, 384, 400, 512, 768, 1024
+};
+
+struct es8316_priv {
+ unsigned int sysclk;
+ unsigned int allowed_rates[NR_SUPPORTED_MCLK_LRCK_RATIOS];
+ struct snd_pcm_hw_constraint_list sysclk_constraints;
+};
+
+/*
+ * ES8316 controls
+ */
+static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(dac_vol_tlv, -9600, 50, 1);
+static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(adc_vol_tlv, -9600, 50, 1);
+static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_max_gain_tlv, -650, 150, 0);
+static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_min_gain_tlv, -1200, 150, 0);
+static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_target_tlv, -1650, 150, 0);
+static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(hpmixer_gain_tlv, -1200, 150, 0);
+
+static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(adc_pga_gain_tlv,
+ 0, 0, TLV_DB_SCALE_ITEM(-350, 0, 0),
+ 1, 1, TLV_DB_SCALE_ITEM(0, 0, 0),
+ 2, 2, TLV_DB_SCALE_ITEM(250, 0, 0),
+ 3, 3, TLV_DB_SCALE_ITEM(450, 0, 0),
+ 4, 4, TLV_DB_SCALE_ITEM(700, 0, 0),
+ 5, 5, TLV_DB_SCALE_ITEM(1000, 0, 0),
+ 6, 6, TLV_DB_SCALE_ITEM(1300, 0, 0),
+ 7, 7, TLV_DB_SCALE_ITEM(1600, 0, 0),
+ 8, 8, TLV_DB_SCALE_ITEM(1800, 0, 0),
+ 9, 9, TLV_DB_SCALE_ITEM(2100, 0, 0),
+ 10, 10, TLV_DB_SCALE_ITEM(2400, 0, 0),
+);
+
+static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(hpout_vol_tlv,
+ 0, 0, TLV_DB_SCALE_ITEM(-4800, 0, 0),
+ 1, 3, TLV_DB_SCALE_ITEM(-2400, 1200, 0),
+);
+
+static const char * const ng_type_txt[] =
+ { "Constant PGA Gain", "Mute ADC Output" };
+static const struct soc_enum ng_type =
+ SOC_ENUM_SINGLE(ES8316_ADC_ALC_NG, 6, 2, ng_type_txt);
+
+static const char * const adcpol_txt[] = { "Normal", "Invert" };
+static const struct soc_enum adcpol =
+ SOC_ENUM_SINGLE(ES8316_ADC_MUTE, 1, 2, adcpol_txt);
+static const char *const dacpol_txt[] =
+ { "Normal", "R Invert", "L Invert", "L + R Invert" };
+static const struct soc_enum dacpol =
+ SOC_ENUM_SINGLE(ES8316_DAC_SET1, 0, 4, dacpol_txt);
+
+static const struct snd_kcontrol_new es8316_snd_controls[] = {
+ SOC_DOUBLE_TLV("Headphone Playback Volume", ES8316_CPHP_ICAL_VOL,
+ 4, 0, 3, 1, hpout_vol_tlv),
+ SOC_DOUBLE_TLV("Headphone Mixer Volume", ES8316_HPMIX_VOL,
+ 0, 4, 7, 0, hpmixer_gain_tlv),
+
+ SOC_ENUM("Playback Polarity", dacpol),
+ SOC_DOUBLE_R_TLV("DAC Playback Volume", ES8316_DAC_VOLL,
+ ES8316_DAC_VOLR, 0, 0xc0, 1, dac_vol_tlv),
+ SOC_SINGLE("DAC Soft Ramp Switch", ES8316_DAC_SET1, 4, 1, 1),
+ SOC_SINGLE("DAC Soft Ramp Rate", ES8316_DAC_SET1, 2, 4, 0),
+ SOC_SINGLE("DAC Notch Filter Switch", ES8316_DAC_SET2, 6, 1, 0),
+ SOC_SINGLE("DAC Double Fs Switch", ES8316_DAC_SET2, 7, 1, 0),
+ SOC_SINGLE("DAC Stereo Enhancement", ES8316_DAC_SET3, 0, 7, 0),
+
+ SOC_ENUM("Capture Polarity", adcpol),
+ SOC_SINGLE("Mic Boost Switch", ES8316_ADC_D2SEPGA, 0, 1, 0),
+ SOC_SINGLE_TLV("ADC Capture Volume", ES8316_ADC_VOLUME,
+ 0, 0xc0, 1, adc_vol_tlv),
+ SOC_SINGLE_TLV("ADC PGA Gain Volume", ES8316_ADC_PGAGAIN,
+ 4, 10, 0, adc_pga_gain_tlv),
+ SOC_SINGLE("ADC Soft Ramp Switch", ES8316_ADC_MUTE, 4, 1, 0),
+ SOC_SINGLE("ADC Double Fs Switch", ES8316_ADC_DMIC, 4, 1, 0),
+
+ SOC_SINGLE("ALC Capture Switch", ES8316_ADC_ALC1, 6, 1, 0),
+ SOC_SINGLE_TLV("ALC Capture Max Volume", ES8316_ADC_ALC1, 0, 28, 0,
+ alc_max_gain_tlv),
+ SOC_SINGLE_TLV("ALC Capture Min Volume", ES8316_ADC_ALC2, 0, 28, 0,
+ alc_min_gain_tlv),
+ SOC_SINGLE_TLV("ALC Capture Target Volume", ES8316_ADC_ALC3, 4, 10, 0,
+ alc_target_tlv),
+ SOC_SINGLE("ALC Capture Hold Time", ES8316_ADC_ALC3, 0, 10, 0),
+ SOC_SINGLE("ALC Capture Decay Time", ES8316_ADC_ALC4, 4, 10, 0),
+ SOC_SINGLE("ALC Capture Attack Time", ES8316_ADC_ALC4, 0, 10, 0),
+ SOC_SINGLE("ALC Capture Noise Gate Switch", ES8316_ADC_ALC_NG,
+ 5, 1, 0),
+ SOC_SINGLE("ALC Capture Noise Gate Threshold", ES8316_ADC_ALC_NG,
+ 0, 31, 0),
+ SOC_ENUM("ALC Capture Noise Gate Type", ng_type),
+};
+
+/* Analog Input Mux */
+static const char * const es8316_analog_in_txt[] = {
+ "lin1-rin1",
+ "lin2-rin2",
+ "lin1-rin1 with 20db Boost",
+ "lin2-rin2 with 20db Boost"
+};
+static const unsigned int es8316_analog_in_values[] = { 0, 1, 2, 3 };
+static const struct soc_enum es8316_analog_input_enum =
+ SOC_VALUE_ENUM_SINGLE(ES8316_ADC_PDN_LINSEL, 4, 3,
+ ARRAY_SIZE(es8316_analog_in_txt),
+ es8316_analog_in_txt,
+ es8316_analog_in_values);
+static const struct snd_kcontrol_new es8316_analog_in_mux_controls =
+ SOC_DAPM_ENUM("Route", es8316_analog_input_enum);
+
+static const char * const es8316_dmic_txt[] = {
+ "dmic disable",
+ "dmic data at high level",
+ "dmic data at low level",
+};
+static const unsigned int es8316_dmic_values[] = { 0, 1, 2 };
+static const struct soc_enum es8316_dmic_src_enum =
+ SOC_VALUE_ENUM_SINGLE(ES8316_ADC_DMIC, 0, 3,
+ ARRAY_SIZE(es8316_dmic_txt),
+ es8316_dmic_txt,
+ es8316_dmic_values);
+static const struct snd_kcontrol_new es8316_dmic_src_controls =
+ SOC_DAPM_ENUM("Route", es8316_dmic_src_enum);
+
+/* hp mixer mux */
+static const char * const es8316_hpmux_texts[] = {
+ "lin1-rin1",
+ "lin2-rin2",
+ "lin-rin with Boost",
+ "lin-rin with Boost and PGA"
+};
+
+static const unsigned int es8316_hpmux_values[] = { 0, 1, 2, 3 };
+
+static SOC_ENUM_SINGLE_DECL(es8316_left_hpmux_enum, ES8316_HPMIX_SEL,
+ 4, es8316_hpmux_texts);
+
+static const struct snd_kcontrol_new es8316_left_hpmux_controls =
+ SOC_DAPM_ENUM("Route", es8316_left_hpmux_enum);
+
+static SOC_ENUM_SINGLE_DECL(es8316_right_hpmux_enum, ES8316_HPMIX_SEL,
+ 0, es8316_hpmux_texts);
+
+static const struct snd_kcontrol_new es8316_right_hpmux_controls =
+ SOC_DAPM_ENUM("Route", es8316_right_hpmux_enum);
+
+/* headphone Output Mixer */
+static const struct snd_kcontrol_new es8316_out_left_mix[] = {
+ SOC_DAPM_SINGLE("LLIN Switch", ES8316_HPMIX_SWITCH, 6, 1, 0),
+ SOC_DAPM_SINGLE("Left DAC Switch", ES8316_HPMIX_SWITCH, 7, 1, 0),
+};
+static const struct snd_kcontrol_new es8316_out_right_mix[] = {
+ SOC_DAPM_SINGLE("RLIN Switch", ES8316_HPMIX_SWITCH, 2, 1, 0),
+ SOC_DAPM_SINGLE("Right DAC Switch", ES8316_HPMIX_SWITCH, 3, 1, 0),
+};
+
+/* DAC data source mux */
+static const char * const es8316_dacsrc_texts[] = {
+ "LDATA TO LDAC, RDATA TO RDAC",
+ "LDATA TO LDAC, LDATA TO RDAC",
+ "RDATA TO LDAC, RDATA TO RDAC",
+ "RDATA TO LDAC, LDATA TO RDAC",
+};
+
+static const unsigned int es8316_dacsrc_values[] = { 0, 1, 2, 3 };
+
+static SOC_ENUM_SINGLE_DECL(es8316_dacsrc_mux_enum, ES8316_DAC_SET1,
+ 6, es8316_dacsrc_texts);
+
+static const struct snd_kcontrol_new es8316_dacsrc_mux_controls =
+ SOC_DAPM_ENUM("Route", es8316_dacsrc_mux_enum);
+
+static const struct snd_soc_dapm_widget es8316_dapm_widgets[] = {
+ SND_SOC_DAPM_SUPPLY("Bias", ES8316_SYS_PDN, 3, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Analog power", ES8316_SYS_PDN, 4, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Mic Bias", ES8316_SYS_PDN, 5, 1, NULL, 0),
+
+ SND_SOC_DAPM_INPUT("DMIC"),
+ SND_SOC_DAPM_INPUT("MIC1"),
+ SND_SOC_DAPM_INPUT("MIC2"),
+
+ /* Input Mux */
+ SND_SOC_DAPM_MUX("Differential Mux", SND_SOC_NOPM, 0, 0,
+ &es8316_analog_in_mux_controls),
+
+ SND_SOC_DAPM_SUPPLY("ADC Vref", ES8316_SYS_PDN, 1, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("ADC bias", ES8316_SYS_PDN, 2, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("ADC Clock", ES8316_CLKMGR_CLKSW, 3, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Line input PGA", ES8316_ADC_PDN_LINSEL,
+ 7, 1, NULL, 0),
+ SND_SOC_DAPM_ADC("Mono ADC", NULL, ES8316_ADC_PDN_LINSEL, 6, 1),
+ SND_SOC_DAPM_MUX("Digital Mic Mux", SND_SOC_NOPM, 0, 0,
+ &es8316_dmic_src_controls),
+
+ /* Digital Interface */
+ SND_SOC_DAPM_AIF_OUT("I2S OUT", "I2S1 Capture", 1,
+ ES8316_SERDATA_ADC, 6, 1),
+ SND_SOC_DAPM_AIF_IN("I2S IN", "I2S1 Playback", 0,
+ SND_SOC_NOPM, 0, 0),
+
+ SND_SOC_DAPM_MUX("DAC Source Mux", SND_SOC_NOPM, 0, 0,
+ &es8316_dacsrc_mux_controls),
+
+ SND_SOC_DAPM_SUPPLY("DAC Vref", ES8316_SYS_PDN, 0, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("DAC Clock", ES8316_CLKMGR_CLKSW, 2, 0, NULL, 0),
+ SND_SOC_DAPM_DAC("Right DAC", NULL, ES8316_DAC_PDN, 0, 1),
+ SND_SOC_DAPM_DAC("Left DAC", NULL, ES8316_DAC_PDN, 4, 1),
+
+ /* Headphone Output Side */
+ SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0,
+ &es8316_left_hpmux_controls),
+ SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0,
+ &es8316_right_hpmux_controls),
+ SND_SOC_DAPM_MIXER("Left Headphone Mixer", ES8316_HPMIX_PDN,
+ 5, 1, &es8316_out_left_mix[0],
+ ARRAY_SIZE(es8316_out_left_mix)),
+ SND_SOC_DAPM_MIXER("Right Headphone Mixer", ES8316_HPMIX_PDN,
+ 1, 1, &es8316_out_right_mix[0],
+ ARRAY_SIZE(es8316_out_right_mix)),
+ SND_SOC_DAPM_PGA("Left Headphone Mixer Out", ES8316_HPMIX_PDN,
+ 4, 1, NULL, 0),
+ SND_SOC_DAPM_PGA("Right Headphone Mixer Out", ES8316_HPMIX_PDN,
+ 0, 1, NULL, 0),
+
+ SND_SOC_DAPM_OUT_DRV("Left Headphone Charge Pump", ES8316_CPHP_OUTEN,
+ 6, 0, NULL, 0),
+ SND_SOC_DAPM_OUT_DRV("Right Headphone Charge Pump", ES8316_CPHP_OUTEN,
+ 2, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Headphone Charge Pump", ES8316_CPHP_PDN2,
+ 5, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Headphone Charge Pump Clock", ES8316_CLKMGR_CLKSW,
+ 4, 0, NULL, 0),
+
+ SND_SOC_DAPM_OUT_DRV("Left Headphone Driver", ES8316_CPHP_OUTEN,
+ 5, 0, NULL, 0),
+ SND_SOC_DAPM_OUT_DRV("Right Headphone Driver", ES8316_CPHP_OUTEN,
+ 1, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Headphone Out", ES8316_CPHP_PDN1, 2, 1, NULL, 0),
+
+ /* pdn_Lical and pdn_Rical bits are documented as Reserved, but must
+ * be explicitly unset in order to enable HP output
+ */
+ SND_SOC_DAPM_SUPPLY("Left Headphone ical", ES8316_CPHP_ICAL_VOL,
+ 7, 1, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Right Headphone ical", ES8316_CPHP_ICAL_VOL,
+ 3, 1, NULL, 0),
+
+ SND_SOC_DAPM_OUTPUT("HPOL"),
+ SND_SOC_DAPM_OUTPUT("HPOR"),
+};
+
+static const struct snd_soc_dapm_route es8316_dapm_routes[] = {
+ /* Recording */
+ {"MIC1", NULL, "Mic Bias"},
+ {"MIC2", NULL, "Mic Bias"},
+ {"MIC1", NULL, "Bias"},
+ {"MIC2", NULL, "Bias"},
+ {"MIC1", NULL, "Analog power"},
+ {"MIC2", NULL, "Analog power"},
+
+ {"Differential Mux", "lin1-rin1", "MIC1"},
+ {"Differential Mux", "lin2-rin2", "MIC2"},
+ {"Line input PGA", NULL, "Differential Mux"},
+
+ {"Mono ADC", NULL, "ADC Clock"},
+ {"Mono ADC", NULL, "ADC Vref"},
+ {"Mono ADC", NULL, "ADC bias"},
+ {"Mono ADC", NULL, "Line input PGA"},
+
+ /* It's not clear why, but to avoid recording only silence,
+ * the DAC clock must be running for the ADC to work.
+ */
+ {"Mono ADC", NULL, "DAC Clock"},
+
+ {"Digital Mic Mux", "dmic disable", "Mono ADC"},
+
+ {"I2S OUT", NULL, "Digital Mic Mux"},
+
+ /* Playback */
+ {"DAC Source Mux", "LDATA TO LDAC, RDATA TO RDAC", "I2S IN"},
+
+ {"Left DAC", NULL, "DAC Clock"},
+ {"Right DAC", NULL, "DAC Clock"},
+
+ {"Left DAC", NULL, "DAC Vref"},
+ {"Right DAC", NULL, "DAC Vref"},
+
+ {"Left DAC", NULL, "DAC Source Mux"},
+ {"Right DAC", NULL, "DAC Source Mux"},
+
+ {"Left Headphone Mux", "lin-rin with Boost and PGA", "Line input PGA"},
+ {"Right Headphone Mux", "lin-rin with Boost and PGA", "Line input PGA"},
+
+ {"Left Headphone Mixer", "LLIN Switch", "Left Headphone Mux"},
+ {"Left Headphone Mixer", "Left DAC Switch", "Left DAC"},
+
+ {"Right Headphone Mixer", "RLIN Switch", "Right Headphone Mux"},
+ {"Right Headphone Mixer", "Right DAC Switch", "Right DAC"},
+
+ {"Left Headphone Mixer Out", NULL, "Left Headphone Mixer"},
+ {"Right Headphone Mixer Out", NULL, "Right Headphone Mixer"},
+
+ {"Left Headphone Charge Pump", NULL, "Left Headphone Mixer Out"},
+ {"Right Headphone Charge Pump", NULL, "Right Headphone Mixer Out"},
+
+ {"Left Headphone Charge Pump", NULL, "Headphone Charge Pump"},
+ {"Right Headphone Charge Pump", NULL, "Headphone Charge Pump"},
+
+ {"Left Headphone Charge Pump", NULL, "Headphone Charge Pump Clock"},
+ {"Right Headphone Charge Pump", NULL, "Headphone Charge Pump Clock"},
+
+ {"Left Headphone Driver", NULL, "Left Headphone Charge Pump"},
+ {"Right Headphone Driver", NULL, "Right Headphone Charge Pump"},
+
+ {"HPOL", NULL, "Left Headphone Driver"},
+ {"HPOR", NULL, "Right Headphone Driver"},
+
+ {"HPOL", NULL, "Left Headphone ical"},
+ {"HPOR", NULL, "Right Headphone ical"},
+
+ {"Headphone Out", NULL, "Bias"},
+ {"Headphone Out", NULL, "Analog power"},
+ {"HPOL", NULL, "Headphone Out"},
+ {"HPOR", NULL, "Headphone Out"},
+};
+
+static int es8316_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct es8316_priv *es8316 = snd_soc_codec_get_drvdata(codec);
+ int i;
+ int count = 0;
+
+ es8316->sysclk = freq;
+
+ if (freq == 0)
+ return 0;
+
+ /* Limit supported sample rates to ones that can be autodetected
+ * by the codec running in slave mode.
+ */
+ for (i = 0; i < NR_SUPPORTED_MCLK_LRCK_RATIOS; i++) {
+ const unsigned int ratio = supported_mclk_lrck_ratios[i];
+
+ if (freq % ratio == 0)
+ es8316->allowed_rates[count++] = freq / ratio;
+ }
+
+ es8316->sysclk_constraints.list = es8316->allowed_rates;
+ es8316->sysclk_constraints.count = count;
+
+ return 0;
+}
+
+static int es8316_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ u8 serdata1 = 0;
+ u8 serdata2 = 0;
+ u8 clksw;
+ u8 mask;
+
+ if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS) {
+ dev_err(codec->dev, "Codec driver only supports slave mode\n");
+ return -EINVAL;
+ }
+
+ if ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) != SND_SOC_DAIFMT_I2S) {
+ dev_err(codec->dev, "Codec driver only supports I2S format\n");
+ return -EINVAL;
+ }
+
+ /* Clock inversion */
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ serdata1 |= ES8316_SERDATA1_BCLK_INV;
+ serdata2 |= ES8316_SERDATA2_ADCLRP;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ serdata1 |= ES8316_SERDATA1_BCLK_INV;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ serdata2 |= ES8316_SERDATA2_ADCLRP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mask = ES8316_SERDATA1_MASTER | ES8316_SERDATA1_BCLK_INV;
+ snd_soc_update_bits(codec, ES8316_SERDATA1, mask, serdata1);
+
+ mask = ES8316_SERDATA2_FMT_MASK | ES8316_SERDATA2_ADCLRP;
+ snd_soc_update_bits(codec, ES8316_SERDATA_ADC, mask, serdata2);
+ snd_soc_update_bits(codec, ES8316_SERDATA_DAC, mask, serdata2);
+
+ /* Enable BCLK and MCLK inputs in slave mode */
+ clksw = ES8316_CLKMGR_CLKSW_MCLK_ON | ES8316_CLKMGR_CLKSW_BCLK_ON;
+ snd_soc_update_bits(codec, ES8316_CLKMGR_CLKSW, clksw, clksw);
+
+ return 0;
+}
+
+static int es8316_pcm_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct es8316_priv *es8316 = snd_soc_codec_get_drvdata(codec);
+
+ if (es8316->sysclk == 0) {
+ dev_err(codec->dev, "No sysclk provided\n");
+ return -EINVAL;
+ }
+
+ /* The set of sample rates that can be supported depends on the
+ * MCLK supplied to the CODEC.
+ */
+ snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &es8316->sysclk_constraints);
+
+ return 0;
+}
+
+static int es8316_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct es8316_priv *es8316 = snd_soc_codec_get_drvdata(codec);
+ u8 wordlen = 0;
+
+ if (!es8316->sysclk) {
+ dev_err(codec->dev, "No MCLK configured\n");
+ return -EINVAL;
+ }
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ wordlen = ES8316_SERDATA2_LEN_16;
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ wordlen = ES8316_SERDATA2_LEN_20;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ wordlen = ES8316_SERDATA2_LEN_24;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ wordlen = ES8316_SERDATA2_LEN_32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, ES8316_SERDATA_DAC,
+ ES8316_SERDATA2_LEN_MASK, wordlen);
+ snd_soc_update_bits(codec, ES8316_SERDATA_ADC,
+ ES8316_SERDATA2_LEN_MASK, wordlen);
+ return 0;
+}
+
+static int es8316_mute(struct snd_soc_dai *dai, int mute)
+{
+ snd_soc_update_bits(dai->codec, ES8316_DAC_SET1, 0x20,
+ mute ? 0x20 : 0);
+ return 0;
+}
+
+#define ES8316_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE)
+
+static struct snd_soc_dai_ops es8316_ops = {
+ .startup = es8316_pcm_startup,
+ .hw_params = es8316_pcm_hw_params,
+ .set_fmt = es8316_set_dai_fmt,
+ .set_sysclk = es8316_set_dai_sysclk,
+ .digital_mute = es8316_mute,
+};
+
+static struct snd_soc_dai_driver es8316_dai = {
+ .name = "ES8316 HiFi",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = ES8316_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = ES8316_FORMATS,
+ },
+ .ops = &es8316_ops,
+ .symmetric_rates = 1,
+};
+
+static int es8316_probe(struct snd_soc_codec *codec)
+{
+ /* Reset codec and enable current state machine */
+ snd_soc_write(codec, ES8316_RESET, 0x3f);
+ usleep_range(5000, 5500);
+ snd_soc_write(codec, ES8316_RESET, ES8316_RESET_CSM_ON);
+ msleep(30);
+
+ /*
+ * Documentation is unclear, but this value from the vendor driver is
+ * needed otherwise audio output is silent.
+ */
+ snd_soc_write(codec, ES8316_SYS_VMIDSEL, 0xff);
+
+ /*
+ * Documentation for this register is unclear and incomplete,
+ * but here is a vendor-provided value that improves volume
+ * and quality for Intel CHT platforms.
+ */
+ snd_soc_write(codec, ES8316_CLKMGR_ADCOSR, 0x32);
+
+ return 0;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_es8316 = {
+ .probe = es8316_probe,
+ .idle_bias_off = true,
+
+ .component_driver = {
+ .controls = es8316_snd_controls,
+ .num_controls = ARRAY_SIZE(es8316_snd_controls),
+ .dapm_widgets = es8316_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(es8316_dapm_widgets),
+ .dapm_routes = es8316_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(es8316_dapm_routes),
+ },
+};
+
+static const struct regmap_config es8316_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x53,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int es8316_i2c_probe(struct i2c_client *i2c_client,
+ const struct i2c_device_id *id)
+{
+ struct es8316_priv *es8316;
+ struct regmap *regmap;
+
+ es8316 = devm_kzalloc(&i2c_client->dev, sizeof(struct es8316_priv),
+ GFP_KERNEL);
+ if (es8316 == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c_client, es8316);
+
+ regmap = devm_regmap_init_i2c(i2c_client, &es8316_regmap);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return snd_soc_register_codec(&i2c_client->dev, &soc_codec_dev_es8316,
+ &es8316_dai, 1);
+}
+
+static int es8316_i2c_remove(struct i2c_client *client)
+{
+ snd_soc_unregister_codec(&client->dev);
+ return 0;
+}
+
+static const struct i2c_device_id es8316_i2c_id[] = {
+ {"es8316", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, es8316_i2c_id);
+
+static const struct of_device_id es8316_of_match[] = {
+ { .compatible = "everest,es8316", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, es8316_of_match);
+
+static const struct acpi_device_id es8316_acpi_match[] = {
+ {"ESSX8316", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, es8316_acpi_match);
+
+static struct i2c_driver es8316_i2c_driver = {
+ .driver = {
+ .name = "es8316",
+ .acpi_match_table = ACPI_PTR(es8316_acpi_match),
+ .of_match_table = of_match_ptr(es8316_of_match),
+ },
+ .probe = es8316_i2c_probe,
+ .remove = es8316_i2c_remove,
+ .id_table = es8316_i2c_id,
+};
+module_i2c_driver(es8316_i2c_driver);
+
+MODULE_DESCRIPTION("Everest Semi ES8316 ALSA SoC Codec Driver");
+MODULE_AUTHOR("David Yang <[email protected]>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/es8316.h b/sound/soc/codecs/es8316.h
new file mode 100644
index 000000000000..6bcdd63ea459
--- /dev/null
+++ b/sound/soc/codecs/es8316.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright Everest Semiconductor Co.,Ltd
+ *
+ * Author: David Yang <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _ES8316_H
+#define _ES8316_H
+
+/*
+ * ES8316 register space
+ */
+
+/* Reset Control */
+#define ES8316_RESET 0x00
+
+/* Clock Management */
+#define ES8316_CLKMGR_CLKSW 0x01
+#define ES8316_CLKMGR_CLKSEL 0x02
+#define ES8316_CLKMGR_ADCOSR 0x03
+#define ES8316_CLKMGR_ADCDIV1 0x04
+#define ES8316_CLKMGR_ADCDIV2 0x05
+#define ES8316_CLKMGR_DACDIV1 0x06
+#define ES8316_CLKMGR_DACDIV2 0x07
+#define ES8316_CLKMGR_CPDIV 0x08
+
+/* Serial Data Port Control */
+#define ES8316_SERDATA1 0x09
+#define ES8316_SERDATA_ADC 0x0a
+#define ES8316_SERDATA_DAC 0x0b
+
+/* System Control */
+#define ES8316_SYS_VMIDSEL 0x0c
+#define ES8316_SYS_PDN 0x0d
+#define ES8316_SYS_LP1 0x0e
+#define ES8316_SYS_LP2 0x0f
+#define ES8316_SYS_VMIDLOW 0x10
+#define ES8316_SYS_VSEL 0x11
+#define ES8316_SYS_REF 0x12
+
+/* Headphone Mixer */
+#define ES8316_HPMIX_SEL 0x13
+#define ES8316_HPMIX_SWITCH 0x14
+#define ES8316_HPMIX_PDN 0x15
+#define ES8316_HPMIX_VOL 0x16
+
+/* Charge Pump Headphone driver */
+#define ES8316_CPHP_OUTEN 0x17
+#define ES8316_CPHP_ICAL_VOL 0x18
+#define ES8316_CPHP_PDN1 0x19
+#define ES8316_CPHP_PDN2 0x1a
+#define ES8316_CPHP_LDOCTL 0x1b
+
+/* Calibration */
+#define ES8316_CAL_TYPE 0x1c
+#define ES8316_CAL_SET 0x1d
+#define ES8316_CAL_HPLIV 0x1e
+#define ES8316_CAL_HPRIV 0x1f
+#define ES8316_CAL_HPLMV 0x20
+#define ES8316_CAL_HPRMV 0x21
+
+/* ADC Control */
+#define ES8316_ADC_PDN_LINSEL 0x22
+#define ES8316_ADC_PGAGAIN 0x23
+#define ES8316_ADC_D2SEPGA 0x24
+#define ES8316_ADC_DMIC 0x25
+#define ES8316_ADC_MUTE 0x26
+#define ES8316_ADC_VOLUME 0x27
+#define ES8316_ADC_ALC1 0x29
+#define ES8316_ADC_ALC2 0x2a
+#define ES8316_ADC_ALC3 0x2b
+#define ES8316_ADC_ALC4 0x2c
+#define ES8316_ADC_ALC5 0x2d
+#define ES8316_ADC_ALC_NG 0x2e
+
+/* DAC Control */
+#define ES8316_DAC_PDN 0x2f
+#define ES8316_DAC_SET1 0x30
+#define ES8316_DAC_SET2 0x31
+#define ES8316_DAC_SET3 0x32
+#define ES8316_DAC_VOLL 0x33
+#define ES8316_DAC_VOLR 0x34
+
+/* GPIO */
+#define ES8316_GPIO_SEL 0x4d
+#define ES8316_GPIO_DEBOUNCE 0x4e
+#define ES8316_GPIO_FLAG 0x4f
+
+/* Test mode */
+#define ES8316_TESTMODE 0x50
+#define ES8316_TEST1 0x51
+#define ES8316_TEST2 0x52
+#define ES8316_TEST3 0x53
+
+/*
+ * Field definitions
+ */
+
+/* ES8316_RESET */
+#define ES8316_RESET_CSM_ON 0x80
+
+/* ES8316_CLKMGR_CLKSW */
+#define ES8316_CLKMGR_CLKSW_MCLK_ON 0x40
+#define ES8316_CLKMGR_CLKSW_BCLK_ON 0x20
+
+/* ES8316_SERDATA1 */
+#define ES8316_SERDATA1_MASTER 0x80
+#define ES8316_SERDATA1_BCLK_INV 0x20
+
+/* ES8316_SERDATA_ADC and _DAC */
+#define ES8316_SERDATA2_FMT_MASK 0x3
+#define ES8316_SERDATA2_FMT_I2S 0x00
+#define ES8316_SERDATA2_FMT_LEFTJ 0x01
+#define ES8316_SERDATA2_FMT_RIGHTJ 0x02
+#define ES8316_SERDATA2_FMT_PCM 0x03
+#define ES8316_SERDATA2_ADCLRP 0x20
+#define ES8316_SERDATA2_LEN_MASK 0x1c
+#define ES8316_SERDATA2_LEN_24 0x00
+#define ES8316_SERDATA2_LEN_20 0x04
+#define ES8316_SERDATA2_LEN_18 0x08
+#define ES8316_SERDATA2_LEN_16 0x0c
+#define ES8316_SERDATA2_LEN_32 0x10
+
+#endif
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index 8c5ae1fc23a9..22ed0dc88f0a 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -25,17 +25,6 @@
#include <drm/drm_crtc.h> /* This is only to get MAX_ELD_BYTES */
-struct hdmi_device {
- struct device *dev;
- struct list_head list;
- int cnt;
-};
-#define pos_to_hdmi_device(pos) container_of((pos), struct hdmi_device, list)
-LIST_HEAD(hdmi_device_list);
-static DEFINE_MUTEX(hdmi_mutex);
-
-#define DAI_NAME_SIZE 16
-
#define HDMI_CODEC_CHMAP_IDX_UNKNOWN -1
struct hdmi_codec_channel_map_table {
@@ -293,7 +282,6 @@ struct hdmi_codec_priv {
struct hdmi_codec_daifmt daifmt[2];
struct mutex current_stream_lock;
struct snd_pcm_substream *current_stream;
- struct snd_pcm_hw_constraint_list ratec;
uint8_t eld[MAX_ELD_BYTES];
struct snd_pcm_chmap *chmap_info;
unsigned int chmap_idx;
@@ -702,6 +690,7 @@ static int hdmi_codec_pcm_new(struct snd_soc_pcm_runtime *rtd,
}
static struct snd_soc_dai_driver hdmi_i2s_dai = {
+ .name = "i2s-hifi",
.id = DAI_ID_I2S,
.playback = {
.stream_name = "Playback",
@@ -716,6 +705,7 @@ static struct snd_soc_dai_driver hdmi_i2s_dai = {
};
static const struct snd_soc_dai_driver hdmi_spdif_dai = {
+ .name = "spdif-hifi",
.id = DAI_ID_SPDIF,
.playback = {
.stream_name = "Playback",
@@ -728,30 +718,16 @@ static const struct snd_soc_dai_driver hdmi_spdif_dai = {
.pcm_new = hdmi_codec_pcm_new,
};
-static char hdmi_dai_name[][DAI_NAME_SIZE] = {
- "hdmi-hifi.0",
- "hdmi-hifi.1",
- "hdmi-hifi.2",
- "hdmi-hifi.3",
-};
-
-static int hdmi_of_xlate_dai_name(struct snd_soc_component *component,
- struct of_phandle_args *args,
- const char **dai_name)
+static int hdmi_of_xlate_dai_id(struct snd_soc_component *component,
+ struct device_node *endpoint)
{
- int id;
+ struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
+ int ret = -ENOTSUPP; /* see snd_soc_get_dai_id() */
- if (args->args_count)
- id = args->args[0];
- else
- id = 0;
+ if (hcp->hcd.ops->get_dai_id)
+ ret = hcp->hcd.ops->get_dai_id(component, endpoint);
- if (id < ARRAY_SIZE(hdmi_dai_name)) {
- *dai_name = hdmi_dai_name[id];
- return 0;
- }
-
- return -EAGAIN;
+ return ret;
}
static struct snd_soc_codec_driver hdmi_codec = {
@@ -762,7 +738,7 @@ static struct snd_soc_codec_driver hdmi_codec = {
.num_dapm_widgets = ARRAY_SIZE(hdmi_widgets),
.dapm_routes = hdmi_routes,
.num_dapm_routes = ARRAY_SIZE(hdmi_routes),
- .of_xlate_dai_name = hdmi_of_xlate_dai_name,
+ .of_xlate_dai_id = hdmi_of_xlate_dai_id,
},
};
@@ -771,8 +747,6 @@ static int hdmi_codec_probe(struct platform_device *pdev)
struct hdmi_codec_pdata *hcd = pdev->dev.platform_data;
struct device *dev = &pdev->dev;
struct hdmi_codec_priv *hcp;
- struct hdmi_device *hd;
- struct list_head *pos;
int dai_count, i = 0;
int ret;
@@ -794,35 +768,6 @@ static int hdmi_codec_probe(struct platform_device *pdev)
if (!hcp)
return -ENOMEM;
- hd = NULL;
- mutex_lock(&hdmi_mutex);
- list_for_each(pos, &hdmi_device_list) {
- struct hdmi_device *tmp = pos_to_hdmi_device(pos);
-
- if (tmp->dev == dev->parent) {
- hd = tmp;
- break;
- }
- }
-
- if (!hd) {
- hd = devm_kzalloc(dev, sizeof(*hd), GFP_KERNEL);
- if (!hd) {
- mutex_unlock(&hdmi_mutex);
- return -ENOMEM;
- }
-
- hd->dev = dev->parent;
-
- list_add_tail(&hd->list, &hdmi_device_list);
- }
- mutex_unlock(&hdmi_mutex);
-
- if (hd->cnt >= ARRAY_SIZE(hdmi_dai_name)) {
- dev_err(dev, "too many hdmi codec are deteced\n");
- return -EINVAL;
- }
-
hcp->hcd = *hcd;
mutex_init(&hcp->current_stream_lock);
@@ -835,14 +780,11 @@ static int hdmi_codec_probe(struct platform_device *pdev)
hcp->daidrv[i] = hdmi_i2s_dai;
hcp->daidrv[i].playback.channels_max =
hcd->max_i2s_channels;
- hcp->daidrv[i].name = hdmi_dai_name[hd->cnt++];
i++;
}
- if (hcd->spdif) {
+ if (hcd->spdif)
hcp->daidrv[i] = hdmi_spdif_dai;
- hcp->daidrv[i].name = hdmi_dai_name[hd->cnt++];
- }
ret = snd_soc_register_codec(dev, &hdmi_codec, hcp->daidrv,
dai_count);
@@ -859,20 +801,8 @@ static int hdmi_codec_probe(struct platform_device *pdev)
static int hdmi_codec_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct list_head *pos;
struct hdmi_codec_priv *hcp;
- mutex_lock(&hdmi_mutex);
- list_for_each(pos, &hdmi_device_list) {
- struct hdmi_device *tmp = pos_to_hdmi_device(pos);
-
- if (tmp->dev == dev->parent) {
- list_del(pos);
- break;
- }
- }
- mutex_unlock(&hdmi_mutex);
-
hcp = dev_get_drvdata(dev);
kfree(hcp->chmap_info);
snd_soc_unregister_codec(dev);
diff --git a/sound/soc/codecs/max9867.c b/sound/soc/codecs/max9867.c
index 0247edc9c84e..2a40a69a7513 100644
--- a/sound/soc/codecs/max9867.c
+++ b/sound/soc/codecs/max9867.c
@@ -132,7 +132,7 @@ enum rates {
pcm_rate_48, max_pcm_rate,
};
-struct ni_div_rates {
+static const struct ni_div_rates {
u32 mclk;
u16 ni[max_pcm_rate];
} ni_div[] = {
diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
index d8e8590746af..a78802920c3c 100644
--- a/sound/soc/codecs/msm8916-wcd-analog.c
+++ b/sound/soc/codecs/msm8916-wcd-analog.c
@@ -223,8 +223,8 @@ struct pm8916_wcd_analog_priv {
u16 codec_version;
struct clk *mclk;
struct regulator_bulk_data supplies[ARRAY_SIZE(supply_names)];
- bool micbias1_cap_mode;
- bool micbias2_cap_mode;
+ unsigned int micbias1_cap_mode;
+ unsigned int micbias2_cap_mode;
};
static const char *const adc2_mux_text[] = { "ZERO", "INP2", "INP3" };
@@ -285,7 +285,7 @@ static void pm8916_wcd_analog_micbias_enable(struct snd_soc_codec *codec)
static int pm8916_wcd_analog_enable_micbias_ext(struct snd_soc_codec
*codec, int event,
- int reg, u32 cap_mode)
+ int reg, unsigned int cap_mode)
{
switch (event) {
case SND_SOC_DAPM_POST_PMU:
diff --git a/sound/soc/codecs/nau8824.c b/sound/soc/codecs/nau8824.c
index cca974d26136..3a309b18035e 100644
--- a/sound/soc/codecs/nau8824.c
+++ b/sound/soc/codecs/nau8824.c
@@ -1125,6 +1125,57 @@ static int nau8824_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
}
/**
+ * nau8824_set_tdm_slot - configure DAI TDM.
+ * @dai: DAI
+ * @tx_mask: Bitmask representing active TX slots. Ex.
+ * 0xf for normal 4 channel TDM.
+ * 0xf0 for shifted 4 channel TDM
+ * @rx_mask: Bitmask [0:1] representing active DACR RX slots.
+ * Bitmask [2:3] representing active DACL RX slots.
+ * 00=CH0,01=CH1,10=CH2,11=CH3. Ex.
+ * 0xf for DACL/R selecting TDM CH3.
+ * 0xf0 for DACL/R selecting shifted TDM CH3.
+ * @slots: Number of slots in use.
+ * @slot_width: Width in bits for each slot.
+ *
+ * Configures a DAI for TDM operation. Only support 4 slots TDM.
+ */
+static int nau8824_set_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct nau8824 *nau8824 = snd_soc_codec_get_drvdata(codec);
+ unsigned int tslot_l = 0, ctrl_val = 0;
+
+ if (slots > 4 || ((tx_mask & 0xf0) && (tx_mask & 0xf)) ||
+ ((rx_mask & 0xf0) && (rx_mask & 0xf)) ||
+ ((rx_mask & 0xf0) && (tx_mask & 0xf)) ||
+ ((rx_mask & 0xf) && (tx_mask & 0xf0)))
+ return -EINVAL;
+
+ ctrl_val |= (NAU8824_TDM_MODE | NAU8824_TDM_OFFSET_EN);
+ if (tx_mask & 0xf0) {
+ tslot_l = 4 * slot_width;
+ ctrl_val |= (tx_mask >> 4);
+ } else {
+ ctrl_val |= tx_mask;
+ }
+ if (rx_mask & 0xf0)
+ ctrl_val |= ((rx_mask >> 4) << NAU8824_TDM_DACR_RX_SFT);
+ else
+ ctrl_val |= (rx_mask << NAU8824_TDM_DACR_RX_SFT);
+
+ regmap_update_bits(nau8824->regmap, NAU8824_REG_TDM_CTRL,
+ NAU8824_TDM_MODE | NAU8824_TDM_OFFSET_EN |
+ NAU8824_TDM_DACL_RX_MASK | NAU8824_TDM_DACR_RX_MASK |
+ NAU8824_TDM_TX_MASK, ctrl_val);
+ regmap_update_bits(nau8824->regmap, NAU8824_REG_PORT0_LEFT_TIME_SLOT,
+ NAU8824_TSLOT_L_MASK, tslot_l);
+
+ return 0;
+}
+
+/**
* nau8824_calc_fll_param - Calculate FLL parameters.
* @fll_in: external clock provided to codec.
* @fs: sampling rate.
@@ -1440,6 +1491,7 @@ static struct snd_soc_codec_driver nau8824_codec_driver = {
static const struct snd_soc_dai_ops nau8824_dai_ops = {
.hw_params = nau8824_hw_params,
.set_fmt = nau8824_set_fmt,
+ .set_tdm_slot = nau8824_set_tdm_slot,
};
#define NAU8824_RATES SNDRV_PCM_RATE_8000_192000
diff --git a/sound/soc/codecs/nau8824.h b/sound/soc/codecs/nau8824.h
index 87ac9a382aed..21eae2431c83 100644
--- a/sound/soc/codecs/nau8824.h
+++ b/sound/soc/codecs/nau8824.h
@@ -258,6 +258,18 @@
#define NAU8824_I2S_MS_SLAVE (0 << NAU8824_I2S_MS_SFT)
#define NAU8824_I2S_BLK_DIV_MASK 0x7
+/* PORT0_LEFT_TIME_SLOT (0x1E) */
+#define NAU8824_TSLOT_L_MASK 0x3ff
+
+/* TDM_CTRL (0x20) */
+#define NAU8824_TDM_MODE (0x1 << 15)
+#define NAU8824_TDM_OFFSET_EN (0x1 << 14)
+#define NAU8824_TDM_DACL_RX_SFT 6
+#define NAU8824_TDM_DACL_RX_MASK (0x3 << NAU8824_TDM_DACL_RX_SFT)
+#define NAU8824_TDM_DACR_RX_SFT 4
+#define NAU8824_TDM_DACR_RX_MASK (0x3 << NAU8824_TDM_DACR_RX_SFT)
+#define NAU8824_TDM_TX_MASK 0xf
+
/* ADC_FILTER_CTRL (0x24) */
#define NAU8824_ADC_SYNC_DOWN_MASK 0x3
#define NAU8824_ADC_SYNC_DOWN_32 0
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index 97fbeba9498f..46a30eaa7ace 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -1612,7 +1612,6 @@ static int nau8825_jack_insert(struct nau8825 *nau8825)
snd_soc_dapm_sync(dapm);
break;
case 2:
- case 3:
dev_dbg(nau8825->dev, "CTIA (micgnd2) mic connected\n");
type = SND_JACK_HEADSET;
@@ -1632,6 +1631,11 @@ static int nau8825_jack_insert(struct nau8825 *nau8825)
snd_soc_dapm_force_enable_pin(dapm, "SAR");
snd_soc_dapm_sync(dapm);
break;
+ case 3:
+ /* detect error case */
+ dev_err(nau8825->dev, "detection error; disable mic function\n");
+ type = SND_JACK_HEADPHONE;
+ break;
}
/* Leaving HPOL/R grounded after jack insert by default. They will be
@@ -1682,7 +1686,7 @@ static irqreturn_t nau8825_interrupt(int irq, void *data)
} else if (active_irq & NAU8825_HEADSET_COMPLETION_IRQ) {
if (nau8825_is_jack_inserted(regmap)) {
event |= nau8825_jack_insert(nau8825);
- if (!nau8825->high_imped) {
+ if (!nau8825->xtalk_bypass && !nau8825->high_imped) {
/* Apply the cross talk suppression in the
* headset without high impedance.
*/
@@ -2328,6 +2332,13 @@ static int nau8825_set_bias_level(struct snd_soc_codec *codec,
break;
case SND_SOC_BIAS_OFF:
+ /* Reset the configuration of jack type for detection */
+ /* Detach 2kOhm Resistors from MICBIAS to MICGND1/2 */
+ regmap_update_bits(nau8825->regmap, NAU8825_REG_MIC_BIAS,
+ NAU8825_MICBIAS_JKSLV | NAU8825_MICBIAS_JKR2, 0);
+ /* ground HPL/HPR, MICGRND1/2 */
+ regmap_update_bits(nau8825->regmap,
+ NAU8825_REG_HSD_CTRL, 0xf, 0xf);
/* Cancel and reset cross talk detection funciton */
nau8825_xtalk_cancel(nau8825);
/* Turn off all interruptions before system shutdown. Keep the
@@ -2351,6 +2362,10 @@ static int __maybe_unused nau8825_suspend(struct snd_soc_codec *codec)
disable_irq(nau8825->irq);
snd_soc_codec_force_bias_level(codec, SND_SOC_BIAS_OFF);
+ /* Power down codec power; don't suppoet button wakeup */
+ snd_soc_dapm_disable_pin(nau8825->dapm, "SAR");
+ snd_soc_dapm_disable_pin(nau8825->dapm, "MICBIAS");
+ snd_soc_dapm_sync(nau8825->dapm);
regcache_cache_only(nau8825->regmap, true);
regcache_mark_dirty(nau8825->regmap);
@@ -2425,10 +2440,13 @@ static void nau8825_print_device_properties(struct nau8825 *nau8825)
nau8825->jack_insert_debounce);
dev_dbg(dev, "jack-eject-debounce: %d\n",
nau8825->jack_eject_debounce);
+ dev_dbg(dev, "crosstalk-bypass: %d\n",
+ nau8825->xtalk_bypass);
}
static int nau8825_read_device_properties(struct device *dev,
struct nau8825 *nau8825) {
+ int ret;
nau8825->jkdet_enable = device_property_read_bool(dev,
"nuvoton,jkdet-enable");
@@ -2436,30 +2454,60 @@ static int nau8825_read_device_properties(struct device *dev,
"nuvoton,jkdet-pull-enable");
nau8825->jkdet_pull_up = device_property_read_bool(dev,
"nuvoton,jkdet-pull-up");
- device_property_read_u32(dev, "nuvoton,jkdet-polarity",
+ ret = device_property_read_u32(dev, "nuvoton,jkdet-polarity",
&nau8825->jkdet_polarity);
- device_property_read_u32(dev, "nuvoton,micbias-voltage",
+ if (ret)
+ nau8825->jkdet_polarity = 1;
+ ret = device_property_read_u32(dev, "nuvoton,micbias-voltage",
&nau8825->micbias_voltage);
- device_property_read_u32(dev, "nuvoton,vref-impedance",
+ if (ret)
+ nau8825->micbias_voltage = 6;
+ ret = device_property_read_u32(dev, "nuvoton,vref-impedance",
&nau8825->vref_impedance);
- device_property_read_u32(dev, "nuvoton,sar-threshold-num",
+ if (ret)
+ nau8825->vref_impedance = 2;
+ ret = device_property_read_u32(dev, "nuvoton,sar-threshold-num",
&nau8825->sar_threshold_num);
- device_property_read_u32_array(dev, "nuvoton,sar-threshold",
+ if (ret)
+ nau8825->sar_threshold_num = 4;
+ ret = device_property_read_u32_array(dev, "nuvoton,sar-threshold",
nau8825->sar_threshold, nau8825->sar_threshold_num);
- device_property_read_u32(dev, "nuvoton,sar-hysteresis",
+ if (ret) {
+ nau8825->sar_threshold[0] = 0x08;
+ nau8825->sar_threshold[1] = 0x12;
+ nau8825->sar_threshold[2] = 0x26;
+ nau8825->sar_threshold[3] = 0x73;
+ }
+ ret = device_property_read_u32(dev, "nuvoton,sar-hysteresis",
&nau8825->sar_hysteresis);
- device_property_read_u32(dev, "nuvoton,sar-voltage",
+ if (ret)
+ nau8825->sar_hysteresis = 0;
+ ret = device_property_read_u32(dev, "nuvoton,sar-voltage",
&nau8825->sar_voltage);
- device_property_read_u32(dev, "nuvoton,sar-compare-time",
+ if (ret)
+ nau8825->sar_voltage = 6;
+ ret = device_property_read_u32(dev, "nuvoton,sar-compare-time",
&nau8825->sar_compare_time);
- device_property_read_u32(dev, "nuvoton,sar-sampling-time",
+ if (ret)
+ nau8825->sar_compare_time = 1;
+ ret = device_property_read_u32(dev, "nuvoton,sar-sampling-time",
&nau8825->sar_sampling_time);
- device_property_read_u32(dev, "nuvoton,short-key-debounce",
+ if (ret)
+ nau8825->sar_sampling_time = 1;
+ ret = device_property_read_u32(dev, "nuvoton,short-key-debounce",
&nau8825->key_debounce);
- device_property_read_u32(dev, "nuvoton,jack-insert-debounce",
+ if (ret)
+ nau8825->key_debounce = 3;
+ ret = device_property_read_u32(dev, "nuvoton,jack-insert-debounce",
&nau8825->jack_insert_debounce);
- device_property_read_u32(dev, "nuvoton,jack-eject-debounce",
+ if (ret)
+ nau8825->jack_insert_debounce = 7;
+ ret = device_property_read_u32(dev, "nuvoton,jack-eject-debounce",
&nau8825->jack_eject_debounce);
+ if (ret)
+ nau8825->jack_eject_debounce = 0;
+ nau8825->xtalk_bypass = device_property_read_bool(dev,
+ "nuvoton,crosstalk-bypass");
nau8825->mclk = devm_clk_get(dev, "mclk");
if (PTR_ERR(nau8825->mclk) == -EPROBE_DEFER) {
diff --git a/sound/soc/codecs/nau8825.h b/sound/soc/codecs/nau8825.h
index 514fd13c2f46..8aee5c8647ae 100644
--- a/sound/soc/codecs/nau8825.h
+++ b/sound/soc/codecs/nau8825.h
@@ -476,6 +476,7 @@ struct nau8825 {
int xtalk_event_mask;
bool xtalk_protect;
int imp_rms[NAU8825_XTALK_IMM];
+ int xtalk_bypass;
};
int nau8825_enable_jack_detect(struct snd_soc_codec *codec,
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
index f91221b1ddf0..1b6796c4c471 100644
--- a/sound/soc/codecs/rt5514.c
+++ b/sound/soc/codecs/rt5514.c
@@ -9,6 +9,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/acpi.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -395,14 +396,14 @@ static const char * const rt5514_dmic_src[] = {
"DMIC1", "DMIC2"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5514_stereo1_dmic_enum, RT5514_DIG_SOURCE_CTRL,
RT5514_AD0_DMIC_INPUT_SEL_SFT, rt5514_dmic_src);
static const struct snd_kcontrol_new rt5514_sto1_dmic_mux =
SOC_DAPM_ENUM("Stereo1 DMIC Source", rt5514_stereo1_dmic_enum);
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5514_stereo2_dmic_enum, RT5514_DIG_SOURCE_CTRL,
RT5514_AD1_DMIC_INPUT_SEL_SFT, rt5514_dmic_src);
@@ -906,9 +907,23 @@ static int rt5514_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
if (rx_mask || tx_mask)
val |= RT5514_TDM_MODE;
- if (slots == 4)
+ switch (slots) {
+ case 4:
val |= RT5514_TDMSLOT_SEL_RX_4CH | RT5514_TDMSLOT_SEL_TX_4CH;
+ break;
+
+ case 6:
+ val |= RT5514_TDMSLOT_SEL_RX_6CH | RT5514_TDMSLOT_SEL_TX_6CH;
+ break;
+ case 8:
+ val |= RT5514_TDMSLOT_SEL_RX_8CH | RT5514_TDMSLOT_SEL_TX_8CH;
+ break;
+
+ case 2:
+ default:
+ break;
+ }
switch (slot_width) {
case 20:
@@ -919,6 +934,10 @@ static int rt5514_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
val |= RT5514_CH_LEN_RX_24 | RT5514_CH_LEN_TX_24;
break;
+ case 25:
+ val |= RT5514_TDM_MODE2;
+ break;
+
case 32:
val |= RT5514_CH_LEN_RX_32 | RT5514_CH_LEN_TX_32;
break;
@@ -930,7 +949,8 @@ static int rt5514_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
regmap_update_bits(rt5514->regmap, RT5514_I2S_CTRL1, RT5514_TDM_MODE |
RT5514_TDMSLOT_SEL_RX_MASK | RT5514_TDMSLOT_SEL_TX_MASK |
- RT5514_CH_LEN_RX_MASK | RT5514_CH_LEN_TX_MASK, val);
+ RT5514_CH_LEN_RX_MASK | RT5514_CH_LEN_TX_MASK |
+ RT5514_TDM_MODE2, val);
return 0;
}
@@ -1076,6 +1096,14 @@ static const struct of_device_id rt5514_of_match[] = {
MODULE_DEVICE_TABLE(of, rt5514_of_match);
#endif
+#ifdef CONFIG_ACPI
+static struct acpi_device_id rt5514_acpi_match[] = {
+ { "10EC5514", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, rt5514_acpi_match);
+#endif
+
static int rt5514_parse_dt(struct rt5514_priv *rt5514, struct device *dev)
{
device_property_read_u32(dev, "realtek,dmic-init-delay-ms",
@@ -1179,6 +1207,7 @@ static const struct dev_pm_ops rt5514_i2_pm_ops = {
static struct i2c_driver rt5514_i2c_driver = {
.driver = {
.name = "rt5514",
+ .acpi_match_table = ACPI_PTR(rt5514_acpi_match),
.of_match_table = of_match_ptr(rt5514_of_match),
.pm = &rt5514_i2_pm_ops,
},
diff --git a/sound/soc/codecs/rt5514.h b/sound/soc/codecs/rt5514.h
index 5d343fb6d125..02bc212a86d9 100644
--- a/sound/soc/codecs/rt5514.h
+++ b/sound/soc/codecs/rt5514.h
@@ -117,6 +117,8 @@
#define RT5514_POW_ADCFEDL_BIT 0
/* RT5514_I2S_CTRL1 (0x2010) */
+#define RT5514_TDM_MODE2 (0x1 << 30)
+#define RT5514_TDM_MODE2_SFT 30
#define RT5514_TDM_MODE (0x1 << 28)
#define RT5514_TDM_MODE_SFT 28
#define RT5514_I2S_LR_MASK (0x1 << 26)
@@ -136,6 +138,8 @@
#define RT5514_TDMSLOT_SEL_RX_MASK (0x3 << 10)
#define RT5514_TDMSLOT_SEL_RX_SFT 10
#define RT5514_TDMSLOT_SEL_RX_4CH (0x1 << 10)
+#define RT5514_TDMSLOT_SEL_RX_6CH (0x2 << 10)
+#define RT5514_TDMSLOT_SEL_RX_8CH (0x3 << 10)
#define RT5514_CH_LEN_RX_MASK (0x3 << 8)
#define RT5514_CH_LEN_RX_SFT 8
#define RT5514_CH_LEN_RX_16 (0x0 << 8)
@@ -145,6 +149,8 @@
#define RT5514_TDMSLOT_SEL_TX_MASK (0x3 << 6)
#define RT5514_TDMSLOT_SEL_TX_SFT 6
#define RT5514_TDMSLOT_SEL_TX_4CH (0x1 << 6)
+#define RT5514_TDMSLOT_SEL_TX_6CH (0x2 << 6)
+#define RT5514_TDMSLOT_SEL_TX_8CH (0x3 << 6)
#define RT5514_CH_LEN_TX_MASK (0x3 << 4)
#define RT5514_CH_LEN_TX_SFT 4
#define RT5514_CH_LEN_TX_16 (0x0 << 4)
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 87844a45886a..9ec58166f7c4 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -34,6 +34,17 @@
#include "rl6231.h"
#include "rt5645.h"
+#define QUIRK_INV_JD1_1(q) ((q) & 1)
+#define QUIRK_LEVEL_IRQ(q) (((q) >> 1) & 1)
+#define QUIRK_IN2_DIFF(q) (((q) >> 2) & 1)
+#define QUIRK_JD_MODE(q) (((q) >> 4) & 7)
+#define QUIRK_DMIC1_DATA_PIN(q) (((q) >> 8) & 3)
+#define QUIRK_DMIC2_DATA_PIN(q) (((q) >> 12) & 3)
+
+static unsigned int quirk = -1;
+module_param(quirk, uint, 0444);
+MODULE_PARM_DESC(quirk, "RT5645 pdata quirk override");
+
#define RT5645_DEVICE_ID 0x6308
#define RT5650_DEVICE_ID 0x6419
@@ -59,7 +70,7 @@ static const struct regmap_range_cfg rt5645_ranges[] = {
static const struct reg_sequence init_list[] = {
{RT5645_PR_BASE + 0x3d, 0x3600},
- {RT5645_PR_BASE + 0x1c, 0xfd20},
+ {RT5645_PR_BASE + 0x1c, 0xfd70},
{RT5645_PR_BASE + 0x20, 0x611f},
{RT5645_PR_BASE + 0x21, 0x4040},
{RT5645_PR_BASE + 0x23, 0x0004},
@@ -3151,7 +3162,7 @@ static int rt5645_jack_detect(struct snd_soc_codec *codec, int jack_insert)
snd_soc_dapm_sync(dapm);
rt5645->jack_type = SND_JACK_HEADPHONE;
}
- if (rt5645->pdata.jd_invert)
+ if (rt5645->pdata.level_trigger_irq)
regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2,
RT5645_JD_1_1_MASK, RT5645_JD_1_1_NOR);
} else { /* jack out */
@@ -3172,7 +3183,7 @@ static int rt5645_jack_detect(struct snd_soc_codec *codec, int jack_insert)
snd_soc_dapm_disable_pin(dapm, "LDO2");
snd_soc_dapm_disable_pin(dapm, "Mic Det Power");
snd_soc_dapm_sync(dapm);
- if (rt5645->pdata.jd_invert)
+ if (rt5645->pdata.level_trigger_irq)
regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2,
RT5645_JD_1_1_MASK, RT5645_JD_1_1_INV);
}
@@ -3238,24 +3249,16 @@ static void rt5645_jack_detect_work(struct work_struct *work)
snd_soc_jack_report(rt5645->mic_jack,
report, SND_JACK_MICROPHONE);
return;
- case 1: /* 2 port */
- val = snd_soc_read(rt5645->codec, RT5645_A_JD_CTRL1) & 0x0070;
- break;
- default: /* 1 port */
- val = snd_soc_read(rt5645->codec, RT5645_A_JD_CTRL1) & 0x0020;
+ default: /* read rt5645 jd1_1 status */
+ val = snd_soc_read(rt5645->codec, RT5645_INT_IRQ_ST) & 0x1000;
break;
}
- switch (val) {
- /* jack in */
- case 0x30: /* 2 port */
- case 0x0: /* 1 port or 2 port */
- if (rt5645->jack_type == 0) {
- report = rt5645_jack_detect(rt5645->codec, 1);
- /* for push button and jack out */
- break;
- }
+ if (!val && (rt5645->jack_type == 0)) { /* jack in */
+ report = rt5645_jack_detect(rt5645->codec, 1);
+ } else if (!val && rt5645->jack_type != 0) {
+ /* for push button and jack out */
btn_type = 0;
if (snd_soc_read(rt5645->codec, RT5645_INT_IRQ_ST) & 0x4) {
/* button pressed */
@@ -3302,19 +3305,12 @@ static void rt5645_jack_detect_work(struct work_struct *work)
mod_timer(&rt5645->btn_check_timer,
msecs_to_jiffies(100));
}
-
- break;
- /* jack out */
- case 0x70: /* 2 port */
- case 0x10: /* 2 port */
- case 0x20: /* 1 port */
+ } else {
+ /* jack out */
report = 0;
snd_soc_update_bits(rt5645->codec,
RT5645_INT_IRQ_ST, 0x1, 0x0);
rt5645_jack_detect(rt5645->codec, 0);
- break;
- default:
- break;
}
snd_soc_jack_report(rt5645->hp_jack, report, SND_JACK_HEADPHONE);
@@ -3601,7 +3597,7 @@ static struct rt5645_platform_data buddy_platform_data = {
.dmic1_data_pin = RT5645_DMIC_DATA_GPIO5,
.dmic2_data_pin = RT5645_DMIC_DATA_IN2P,
.jd_mode = 3,
- .jd_invert = true,
+ .level_trigger_irq = true,
};
static struct dmi_system_id dmi_platform_intel_broadwell[] = {
@@ -3614,6 +3610,33 @@ static struct dmi_system_id dmi_platform_intel_broadwell[] = {
{ }
};
+static struct rt5645_platform_data gpd_win_platform_data = {
+ .jd_mode = 3,
+ .inv_jd1_1 = true,
+};
+
+static const struct dmi_system_id dmi_platform_gpd_win[] = {
+ {
+ /*
+ * Match for the GPDwin which unfortunately uses somewhat
+ * generic dmi strings, which is why we test for 4 strings.
+ * Comparing against 23 other byt/cht boards, board_vendor
+ * and board_name are unique to the GPDwin, where as only one
+ * other board has the same board_serial and 3 others have
+ * the same default product_name. Also the GPDwin is the
+ * only device to have both board_ and product_name not set.
+ */
+ .ident = "GPD Win",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Default string"),
+ DMI_MATCH(DMI_BOARD_SERIAL, "Default string"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ },
+ },
+ {}
+};
+
static bool rt5645_check_dp(struct device *dev)
{
if (device_property_present(dev, "realtek,in2-differential") ||
@@ -3664,6 +3687,17 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
rt5645_parse_dt(rt5645, &i2c->dev);
else if (dmi_check_system(dmi_platform_intel_braswell))
rt5645->pdata = general_platform_data;
+ else if (dmi_check_system(dmi_platform_gpd_win))
+ rt5645->pdata = gpd_win_platform_data;
+
+ if (quirk != -1) {
+ rt5645->pdata.in2_diff = QUIRK_IN2_DIFF(quirk);
+ rt5645->pdata.level_trigger_irq = QUIRK_LEVEL_IRQ(quirk);
+ rt5645->pdata.inv_jd1_1 = QUIRK_INV_JD1_1(quirk);
+ rt5645->pdata.jd_mode = QUIRK_JD_MODE(quirk);
+ rt5645->pdata.dmic1_data_pin = QUIRK_DMIC1_DATA_PIN(quirk);
+ rt5645->pdata.dmic2_data_pin = QUIRK_DMIC2_DATA_PIN(quirk);
+ }
rt5645->gpiod_hp_det = devm_gpiod_get_optional(&i2c->dev, "hp-detect",
GPIOD_IN);
@@ -3745,6 +3779,8 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
ret);
}
+ regmap_update_bits(rt5645->regmap, RT5645_CLSD_OUT_CTRL, 0xc0, 0xc0);
+
if (rt5645->pdata.in2_diff)
regmap_update_bits(rt5645->regmap, RT5645_IN2_CTRL,
RT5645_IN_DF2, RT5645_IN_DF2);
@@ -3848,12 +3884,16 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
default:
break;
}
+ if (rt5645->pdata.inv_jd1_1) {
+ regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2,
+ RT5645_JD_1_1_MASK, RT5645_JD_1_1_INV);
+ }
}
regmap_update_bits(rt5645->regmap, RT5645_ADDA_CLK1,
RT5645_I2S_PD1_MASK, RT5645_I2S_PD1_2);
- if (rt5645->pdata.jd_invert) {
+ if (rt5645->pdata.level_trigger_irq) {
regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2,
RT5645_JD_1_1_MASK, RT5645_JD_1_1_INV);
}
@@ -3897,6 +3937,7 @@ static int rt5645_i2c_remove(struct i2c_client *i2c)
cancel_delayed_work_sync(&rt5645->jack_detect_work);
cancel_delayed_work_sync(&rt5645->rcclock_work);
+ del_timer_sync(&rt5645->btn_check_timer);
snd_soc_unregister_codec(&i2c->dev);
regulator_bulk_disable(ARRAY_SIZE(rt5645->supplies), rt5645->supplies);
diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c
index f5d34153e21f..db05b60d5002 100644
--- a/sound/soc/codecs/rt5651.c
+++ b/sound/soc/codecs/rt5651.c
@@ -586,44 +586,6 @@ static const struct snd_kcontrol_new hpo_r_mute_control =
SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT5651_HP_VOL,
RT5651_R_MUTE_SFT, 1, 1);
-/* INL/R source */
-static const char * const rt5651_inl_src[] = {"IN2P", "HPOVOLLP"};
-
-static SOC_ENUM_SINGLE_DECL(
- rt5651_inl_enum, RT5651_INL1_INR1_VOL,
- RT5651_INL_SEL_SFT, rt5651_inl_src);
-
-static const struct snd_kcontrol_new rt5651_inl1_mux =
- SOC_DAPM_ENUM("INL1 source", rt5651_inl_enum);
-
-static const char * const rt5651_inr1_src[] = {"IN2N", "HPOVOLRP"};
-
-static SOC_ENUM_SINGLE_DECL(
- rt5651_inr1_enum, RT5651_INL1_INR1_VOL,
- RT5651_INR_SEL_SFT, rt5651_inr1_src);
-
-static const struct snd_kcontrol_new rt5651_inr1_mux =
- SOC_DAPM_ENUM("INR1 source", rt5651_inr1_enum);
-
-static const char * const rt5651_inl2_src[] = {"IN3P", "OUTVOLLP"};
-
-static SOC_ENUM_SINGLE_DECL(
- rt5651_inl2_enum, RT5651_INL2_INR2_VOL,
- RT5651_INL_SEL_SFT, rt5651_inl2_src);
-
-static const struct snd_kcontrol_new rt5651_inl2_mux =
- SOC_DAPM_ENUM("INL2 source", rt5651_inl2_enum);
-
-static const char * const rt5651_inr2_src[] = {"IN3N", "OUTVOLRP"};
-
-static SOC_ENUM_SINGLE_DECL(
- rt5651_inr2_enum, RT5651_INL2_INR2_VOL,
- RT5651_INR_SEL_SFT, rt5651_inr2_src);
-
-static const struct snd_kcontrol_new rt5651_inr2_mux =
- SOC_DAPM_ENUM("INR2 source", rt5651_inr2_enum);
-
-
/* Stereo ADC source */
static const char * const rt5651_stereo1_adc1_src[] = {"DD MIX", "ADC"};
@@ -955,11 +917,7 @@ static const struct snd_soc_dapm_widget rt5651_dapm_widgets[] = {
RT5651_PWR_IN2_L_BIT, 0, NULL, 0),
SND_SOC_DAPM_PGA("INR2 VOL", RT5651_PWR_VOL,
RT5651_PWR_IN2_R_BIT, 0, NULL, 0),
- /* IN Mux */
- SND_SOC_DAPM_MUX("INL1 Mux", SND_SOC_NOPM, 0, 0, &rt5651_inl1_mux),
- SND_SOC_DAPM_MUX("INR1 Mux", SND_SOC_NOPM, 0, 0, &rt5651_inr1_mux),
- SND_SOC_DAPM_MUX("INL2 Mux", SND_SOC_NOPM, 0, 0, &rt5651_inl2_mux),
- SND_SOC_DAPM_MUX("INR2 Mux", SND_SOC_NOPM, 0, 0, &rt5651_inr2_mux),
+
/* REC Mixer */
SND_SOC_DAPM_MIXER("RECMIXL", RT5651_PWR_MIXER, RT5651_PWR_RM_L_BIT, 0,
rt5651_rec_l_mix, ARRAY_SIZE(rt5651_rec_l_mix)),
diff --git a/sound/soc/codecs/rt5663.c b/sound/soc/codecs/rt5663.c
index a32508d7dcfd..a33202affeb1 100644
--- a/sound/soc/codecs/rt5663.c
+++ b/sound/soc/codecs/rt5663.c
@@ -2847,6 +2847,8 @@ static int rt5663_resume(struct snd_soc_codec *codec)
regcache_cache_only(rt5663->regmap, false);
regcache_sync(rt5663->regmap);
+ rt5663_irq(0, rt5663);
+
return 0;
}
#else
@@ -3141,7 +3143,7 @@ static int rt5663_i2c_probe(struct i2c_client *i2c,
regmap_update_bits(rt5663->regmap, RT5663_DIG_MISC,
RT5663_DIG_GATE_CTRL_MASK, RT5663_DIG_GATE_CTRL_EN);
regmap_update_bits(rt5663->regmap, RT5663_AUTO_1MRC_CLK,
- RT5663_IRQ_POW_SAV_MASK, RT5663_IRQ_POW_SAV_EN);
+ RT5663_IRQ_MANUAL_MASK, RT5663_IRQ_MANUAL_EN);
regmap_update_bits(rt5663->regmap, RT5663_IRQ_1,
RT5663_EN_IRQ_JD1_MASK, RT5663_EN_IRQ_JD1_EN);
regmap_update_bits(rt5663->regmap, RT5663_GPIO_1,
diff --git a/sound/soc/codecs/rt5663.h b/sound/soc/codecs/rt5663.h
index d77fae619f2f..4621812c94d8 100644
--- a/sound/soc/codecs/rt5663.h
+++ b/sound/soc/codecs/rt5663.h
@@ -590,6 +590,10 @@
#define RT5663_IRQ_POW_SAV_JD1_SHIFT 14
#define RT5663_IRQ_POW_SAV_JD1_DIS (0x0 << 14)
#define RT5663_IRQ_POW_SAV_JD1_EN (0x1 << 14)
+#define RT5663_IRQ_MANUAL_MASK (0x1 << 8)
+#define RT5663_IRQ_MANUAL_SHIFT 8
+#define RT5663_IRQ_MANUAL_DIS (0x0 << 8)
+#define RT5663_IRQ_MANUAL_EN (0x1 << 8)
/* IRQ Control 1 (0x00b6) */
#define RT5663_EN_CB_JD_MASK (0x1 << 3)
diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c
index 8cd22307f5b6..370ed54d1e15 100644
--- a/sound/soc/codecs/rt5665.c
+++ b/sound/soc/codecs/rt5665.c
@@ -70,6 +70,7 @@ struct rt5665_priv {
int jack_type;
int irq_work_delay_time;
unsigned int sar_adc_value;
+ bool calibration_done;
};
static const struct reg_default rt5665_reg[] = {
@@ -912,46 +913,46 @@ static const char * const rt5665_data_select[] = {
"L/R", "R/L", "L/L", "R/R"
};
-static const SOC_ENUM_SINGLE_DECL(rt5665_if1_1_01_adc_enum,
+static SOC_ENUM_SINGLE_DECL(rt5665_if1_1_01_adc_enum,
RT5665_TDM_CTRL_2, RT5665_I2S1_1_DS_ADC_SLOT01_SFT, rt5665_data_select);
-static const SOC_ENUM_SINGLE_DECL(rt5665_if1_1_23_adc_enum,
+static SOC_ENUM_SINGLE_DECL(rt5665_if1_1_23_adc_enum,
RT5665_TDM_CTRL_2, RT5665_I2S1_1_DS_ADC_SLOT23_SFT, rt5665_data_select);
-static const SOC_ENUM_SINGLE_DECL(rt5665_if1_1_45_adc_enum,
+static SOC_ENUM_SINGLE_DECL(rt5665_if1_1_45_adc_enum,
RT5665_TDM_CTRL_2, RT5665_I2S1_1_DS_ADC_SLOT45_SFT, rt5665_data_select);
-static const SOC_ENUM_SINGLE_DECL(rt5665_if1_1_67_adc_enum,
+static SOC_ENUM_SINGLE_DECL(rt5665_if1_1_67_adc_enum,
RT5665_TDM_CTRL_2, RT5665_I2S1_1_DS_ADC_SLOT67_SFT, rt5665_data_select);
-static const SOC_ENUM_SINGLE_DECL(rt5665_if1_2_01_adc_enum,
+static SOC_ENUM_SINGLE_DECL(rt5665_if1_2_01_adc_enum,
RT5665_TDM_CTRL_2, RT5665_I2S1_2_DS_ADC_SLOT01_SFT, rt5665_data_select);
-static const SOC_ENUM_SINGLE_DECL(rt5665_if1_2_23_adc_enum,
+static SOC_ENUM_SINGLE_DECL(rt5665_if1_2_23_adc_enum,
RT5665_TDM_CTRL_2, RT5665_I2S1_2_DS_ADC_SLOT23_SFT, rt5665_data_select);
-static const SOC_ENUM_SINGLE_DECL(rt5665_if1_2_45_adc_enum,
+static SOC_ENUM_SINGLE_DECL(rt5665_if1_2_45_adc_enum,
RT5665_TDM_CTRL_2, RT5665_I2S1_2_DS_ADC_SLOT45_SFT, rt5665_data_select);
-static const SOC_ENUM_SINGLE_DECL(rt5665_if1_2_67_adc_enum,
+static SOC_ENUM_SINGLE_DECL(rt5665_if1_2_67_adc_enum,
RT5665_TDM_CTRL_2, RT5665_I2S1_2_DS_ADC_SLOT67_SFT, rt5665_data_select);
-static const SOC_ENUM_SINGLE_DECL(rt5665_if2_1_dac_enum,
+static SOC_ENUM_SINGLE_DECL(rt5665_if2_1_dac_enum,
RT5665_DIG_INF2_DATA, RT5665_IF2_1_DAC_SEL_SFT, rt5665_data_select);
-static const SOC_ENUM_SINGLE_DECL(rt5665_if2_1_adc_enum,
+static SOC_ENUM_SINGLE_DECL(rt5665_if2_1_adc_enum,
RT5665_DIG_INF2_DATA, RT5665_IF2_1_ADC_SEL_SFT, rt5665_data_select);
-static const SOC_ENUM_SINGLE_DECL(rt5665_if2_2_dac_enum,
+static SOC_ENUM_SINGLE_DECL(rt5665_if2_2_dac_enum,
RT5665_DIG_INF2_DATA, RT5665_IF2_2_DAC_SEL_SFT, rt5665_data_select);
-static const SOC_ENUM_SINGLE_DECL(rt5665_if2_2_adc_enum,
+static SOC_ENUM_SINGLE_DECL(rt5665_if2_2_adc_enum,
RT5665_DIG_INF2_DATA, RT5665_IF2_2_ADC_SEL_SFT, rt5665_data_select);
-static const SOC_ENUM_SINGLE_DECL(rt5665_if3_dac_enum,
+static SOC_ENUM_SINGLE_DECL(rt5665_if3_dac_enum,
RT5665_DIG_INF3_DATA, RT5665_IF3_DAC_SEL_SFT, rt5665_data_select);
-static const SOC_ENUM_SINGLE_DECL(rt5665_if3_adc_enum,
+static SOC_ENUM_SINGLE_DECL(rt5665_if3_adc_enum,
RT5665_DIG_INF3_DATA, RT5665_IF3_ADC_SEL_SFT, rt5665_data_select);
static const struct snd_kcontrol_new rt5665_if1_1_01_adc_swap_mux =
@@ -1305,6 +1306,11 @@ static void rt5665_jack_detect_handler(struct work_struct *work)
usleep_range(10000, 15000);
}
+ while (!rt5665->calibration_done) {
+ pr_debug("%s calibration not ready\n", __func__);
+ usleep_range(10000, 15000);
+ }
+
mutex_lock(&rt5665->calibrate_mutex);
val = snd_soc_read(rt5665->codec, RT5665_AJD1_CTRL) & 0x0010;
@@ -1819,14 +1825,14 @@ static const char * const rt5665_dac2_src[] = {
"IF1 DAC2", "IF2_1 DAC", "IF2_2 DAC", "IF3 DAC", "Mono ADC MIX"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_dac_l2_enum, RT5665_DAC2_CTRL,
RT5665_DAC_L2_SEL_SFT, rt5665_dac2_src);
static const struct snd_kcontrol_new rt5665_dac_l2_mux =
SOC_DAPM_ENUM("Digital DAC L2 Source", rt5665_dac_l2_enum);
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_dac_r2_enum, RT5665_DAC2_CTRL,
RT5665_DAC_R2_SEL_SFT, rt5665_dac2_src);
@@ -1839,14 +1845,14 @@ static const char * const rt5665_dac3_src[] = {
"IF1 DAC2", "IF2_1 DAC", "IF2_2 DAC", "IF3 DAC", "STO2 ADC MIX"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_dac_l3_enum, RT5665_DAC3_CTRL,
RT5665_DAC_L3_SEL_SFT, rt5665_dac3_src);
static const struct snd_kcontrol_new rt5665_dac_l3_mux =
SOC_DAPM_ENUM("Digital DAC L3 Source", rt5665_dac_l3_enum);
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_dac_r3_enum, RT5665_DAC3_CTRL,
RT5665_DAC_R3_SEL_SFT, rt5665_dac3_src);
@@ -1859,14 +1865,14 @@ static const char * const rt5665_sto1_adc1_src[] = {
"DD Mux", "ADC"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_sto1_adc1l_enum, RT5665_STO1_ADC_MIXER,
RT5665_STO1_ADC1L_SRC_SFT, rt5665_sto1_adc1_src);
static const struct snd_kcontrol_new rt5665_sto1_adc1l_mux =
SOC_DAPM_ENUM("Stereo1 ADC1L Source", rt5665_sto1_adc1l_enum);
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_sto1_adc1r_enum, RT5665_STO1_ADC_MIXER,
RT5665_STO1_ADC1R_SRC_SFT, rt5665_sto1_adc1_src);
@@ -1879,14 +1885,14 @@ static const char * const rt5665_sto1_adc_src[] = {
"ADC1 L", "ADC1 R", "ADC2 L", "ADC2 R"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_sto1_adcl_enum, RT5665_STO1_ADC_MIXER,
RT5665_STO1_ADCL_SRC_SFT, rt5665_sto1_adc_src);
static const struct snd_kcontrol_new rt5665_sto1_adcl_mux =
SOC_DAPM_ENUM("Stereo1 ADCL Source", rt5665_sto1_adcl_enum);
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_sto1_adcr_enum, RT5665_STO1_ADC_MIXER,
RT5665_STO1_ADCR_SRC_SFT, rt5665_sto1_adc_src);
@@ -1899,14 +1905,14 @@ static const char * const rt5665_sto1_adc2_src[] = {
"DAC MIX", "DMIC"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_sto1_adc2l_enum, RT5665_STO1_ADC_MIXER,
RT5665_STO1_ADC2L_SRC_SFT, rt5665_sto1_adc2_src);
static const struct snd_kcontrol_new rt5665_sto1_adc2l_mux =
SOC_DAPM_ENUM("Stereo1 ADC2L Source", rt5665_sto1_adc2l_enum);
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_sto1_adc2r_enum, RT5665_STO1_ADC_MIXER,
RT5665_STO1_ADC2R_SRC_SFT, rt5665_sto1_adc2_src);
@@ -1919,7 +1925,7 @@ static const char * const rt5665_sto1_dmic_src[] = {
"DMIC1", "DMIC2"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_sto1_dmic_enum, RT5665_STO1_ADC_MIXER,
RT5665_STO1_DMIC_SRC_SFT, rt5665_sto1_dmic_src);
@@ -1931,7 +1937,7 @@ static const char * const rt5665_sto1_dd_l_src[] = {
"STO2 DAC", "MONO DAC"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_sto1_dd_l_enum, RT5665_STO1_ADC_MIXER,
RT5665_STO1_DD_L_SRC_SFT, rt5665_sto1_dd_l_src);
@@ -1943,7 +1949,7 @@ static const char * const rt5665_sto1_dd_r_src[] = {
"STO2 DAC", "MONO DAC", "AEC REF"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_sto1_dd_r_enum, RT5665_STO1_ADC_MIXER,
RT5665_STO1_DD_R_SRC_SFT, rt5665_sto1_dd_r_src);
@@ -1956,7 +1962,7 @@ static const char * const rt5665_mono_adc_l2_src[] = {
"DAC MIXL", "DMIC"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_mono_adc_l2_enum, RT5665_MONO_ADC_MIXER,
RT5665_MONO_ADC_L2_SRC_SFT, rt5665_mono_adc_l2_src);
@@ -1970,7 +1976,7 @@ static const char * const rt5665_mono_adc_l1_src[] = {
"DD Mux", "ADC"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_mono_adc_l1_enum, RT5665_MONO_ADC_MIXER,
RT5665_MONO_ADC_L1_SRC_SFT, rt5665_mono_adc_l1_src);
@@ -1982,14 +1988,14 @@ static const char * const rt5665_mono_dd_src[] = {
"STO2 DAC", "MONO DAC"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_mono_dd_l_enum, RT5665_MONO_ADC_MIXER,
RT5665_MONO_DD_L_SRC_SFT, rt5665_mono_dd_src);
static const struct snd_kcontrol_new rt5665_mono_dd_l_mux =
SOC_DAPM_ENUM("Mono DD L Source", rt5665_mono_dd_l_enum);
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_mono_dd_r_enum, RT5665_MONO_ADC_MIXER,
RT5665_MONO_DD_R_SRC_SFT, rt5665_mono_dd_src);
@@ -2002,14 +2008,14 @@ static const char * const rt5665_mono_adc_src[] = {
"ADC1 L", "ADC1 R", "ADC2 L", "ADC2 R"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_mono_adc_l_enum, RT5665_MONO_ADC_MIXER,
RT5665_MONO_ADC_L_SRC_SFT, rt5665_mono_adc_src);
static const struct snd_kcontrol_new rt5665_mono_adc_l_mux =
SOC_DAPM_ENUM("Mono ADC L Source", rt5665_mono_adc_l_enum);
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_mono_adcr_enum, RT5665_MONO_ADC_MIXER,
RT5665_MONO_ADC_R_SRC_SFT, rt5665_mono_adc_src);
@@ -2022,7 +2028,7 @@ static const char * const rt5665_mono_dmic_l_src[] = {
"DMIC1 L", "DMIC2 L"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_mono_dmic_l_enum, RT5665_MONO_ADC_MIXER,
RT5665_MONO_DMIC_L_SRC_SFT, rt5665_mono_dmic_l_src);
@@ -2035,7 +2041,7 @@ static const char * const rt5665_mono_adc_r2_src[] = {
"DAC MIXR", "DMIC"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_mono_adc_r2_enum, RT5665_MONO_ADC_MIXER,
RT5665_MONO_ADC_R2_SRC_SFT, rt5665_mono_adc_r2_src);
@@ -2048,7 +2054,7 @@ static const char * const rt5665_mono_adc_r1_src[] = {
"DD Mux", "ADC"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_mono_adc_r1_enum, RT5665_MONO_ADC_MIXER,
RT5665_MONO_ADC_R1_SRC_SFT, rt5665_mono_adc_r1_src);
@@ -2061,7 +2067,7 @@ static const char * const rt5665_mono_dmic_r_src[] = {
"DMIC1 R", "DMIC2 R"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_mono_dmic_r_enum, RT5665_MONO_ADC_MIXER,
RT5665_MONO_DMIC_R_SRC_SFT, rt5665_mono_dmic_r_src);
@@ -2075,14 +2081,14 @@ static const char * const rt5665_sto2_adc1_src[] = {
"DD Mux", "ADC"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_sto2_adc1l_enum, RT5665_STO2_ADC_MIXER,
RT5665_STO2_ADC1L_SRC_SFT, rt5665_sto2_adc1_src);
static const struct snd_kcontrol_new rt5665_sto2_adc1l_mux =
SOC_DAPM_ENUM("Stereo2 ADC1L Source", rt5665_sto2_adc1l_enum);
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_sto2_adc1r_enum, RT5665_STO2_ADC_MIXER,
RT5665_STO2_ADC1R_SRC_SFT, rt5665_sto2_adc1_src);
@@ -2095,14 +2101,14 @@ static const char * const rt5665_sto2_adc_src[] = {
"ADC1 L", "ADC1 R", "ADC2 L"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_sto2_adcl_enum, RT5665_STO2_ADC_MIXER,
RT5665_STO2_ADCL_SRC_SFT, rt5665_sto2_adc_src);
static const struct snd_kcontrol_new rt5665_sto2_adcl_mux =
SOC_DAPM_ENUM("Stereo2 ADCL Source", rt5665_sto2_adcl_enum);
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_sto2_adcr_enum, RT5665_STO2_ADC_MIXER,
RT5665_STO2_ADCR_SRC_SFT, rt5665_sto2_adc_src);
@@ -2115,14 +2121,14 @@ static const char * const rt5665_sto2_adc2_src[] = {
"DAC MIX", "DMIC"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_sto2_adc2l_enum, RT5665_STO2_ADC_MIXER,
RT5665_STO2_ADC2L_SRC_SFT, rt5665_sto2_adc2_src);
static const struct snd_kcontrol_new rt5665_sto2_adc2l_mux =
SOC_DAPM_ENUM("Stereo2 ADC2L Source", rt5665_sto2_adc2l_enum);
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_sto2_adc2r_enum, RT5665_STO2_ADC_MIXER,
RT5665_STO2_ADC2R_SRC_SFT, rt5665_sto2_adc2_src);
@@ -2135,7 +2141,7 @@ static const char * const rt5665_sto2_dmic_src[] = {
"DMIC1", "DMIC2"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_sto2_dmic_enum, RT5665_STO2_ADC_MIXER,
RT5665_STO2_DMIC_SRC_SFT, rt5665_sto2_dmic_src);
@@ -2147,7 +2153,7 @@ static const char * const rt5665_sto2_dd_l_src[] = {
"STO2 DAC", "MONO DAC"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_sto2_dd_l_enum, RT5665_STO2_ADC_MIXER,
RT5665_STO2_DD_L_SRC_SFT, rt5665_sto2_dd_l_src);
@@ -2159,7 +2165,7 @@ static const char * const rt5665_sto2_dd_r_src[] = {
"STO2 DAC", "MONO DAC"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_sto2_dd_r_enum, RT5665_STO2_ADC_MIXER,
RT5665_STO2_DD_R_SRC_SFT, rt5665_sto2_dd_r_src);
@@ -2172,14 +2178,14 @@ static const char * const rt5665_dac1_src[] = {
"IF1 DAC1", "IF2_1 DAC", "IF2_2 DAC", "IF3 DAC"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_dac_r1_enum, RT5665_AD_DA_MIXER,
RT5665_DAC1_R_SEL_SFT, rt5665_dac1_src);
static const struct snd_kcontrol_new rt5665_dac_r1_mux =
SOC_DAPM_ENUM("DAC R1 Source", rt5665_dac_r1_enum);
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_dac_l1_enum, RT5665_AD_DA_MIXER,
RT5665_DAC1_L_SEL_SFT, rt5665_dac1_src);
@@ -2192,14 +2198,14 @@ static const char * const rt5665_dig_dac_mix_src[] = {
"Stereo1 DAC Mixer", "Stereo2 DAC Mixer", "Mono DAC Mixer"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_dig_dac_mixl_enum, RT5665_A_DAC1_MUX,
RT5665_DAC_MIX_L_SFT, rt5665_dig_dac_mix_src);
static const struct snd_kcontrol_new rt5665_dig_dac_mixl_mux =
SOC_DAPM_ENUM("DAC Digital Mixer L Source", rt5665_dig_dac_mixl_enum);
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_dig_dac_mixr_enum, RT5665_A_DAC1_MUX,
RT5665_DAC_MIX_R_SFT, rt5665_dig_dac_mix_src);
@@ -2212,14 +2218,14 @@ static const char * const rt5665_alg_dac1_src[] = {
"Stereo1 DAC Mixer", "DAC1", "DMIC1"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_alg_dac_l1_enum, RT5665_A_DAC1_MUX,
RT5665_A_DACL1_SFT, rt5665_alg_dac1_src);
static const struct snd_kcontrol_new rt5665_alg_dac_l1_mux =
SOC_DAPM_ENUM("Analog DAC L1 Source", rt5665_alg_dac_l1_enum);
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_alg_dac_r1_enum, RT5665_A_DAC1_MUX,
RT5665_A_DACR1_SFT, rt5665_alg_dac1_src);
@@ -2232,14 +2238,14 @@ static const char * const rt5665_alg_dac2_src[] = {
"Mono DAC Mixer", "DAC2"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_alg_dac_l2_enum, RT5665_A_DAC2_MUX,
RT5665_A_DACL2_SFT, rt5665_alg_dac2_src);
static const struct snd_kcontrol_new rt5665_alg_dac_l2_mux =
SOC_DAPM_ENUM("Analog DAC L2 Source", rt5665_alg_dac_l2_enum);
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_alg_dac_r2_enum, RT5665_A_DAC2_MUX,
RT5665_A_DACR2_SFT, rt5665_alg_dac2_src);
@@ -2253,7 +2259,7 @@ static const char * const rt5665_if2_1_adc_in_src[] = {
"IF1 DAC2", "IF2_2 DAC", "IF3 DAC", "DAC1 MIX"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_if2_1_adc_in_enum, RT5665_DIG_INF2_DATA,
RT5665_IF2_1_ADC_IN_SFT, rt5665_if2_1_adc_in_src);
@@ -2266,7 +2272,7 @@ static const char * const rt5665_if2_2_adc_in_src[] = {
"IF1 DAC2", "IF2_1 DAC", "IF3 DAC", "DAC1 MIX"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_if2_2_adc_in_enum, RT5665_DIG_INF2_DATA,
RT5665_IF2_2_ADC_IN_SFT, rt5665_if2_2_adc_in_src);
@@ -2280,7 +2286,7 @@ static const char * const rt5665_if3_adc_in_src[] = {
"IF1 DAC2", "IF2_1 DAC", "IF2_2 DAC", "DAC1 MIX"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_if3_adc_in_enum, RT5665_DIG_INF3_DATA,
RT5665_IF3_ADC_IN_SFT, rt5665_if3_adc_in_src);
@@ -2293,14 +2299,14 @@ static const char * const rt5665_pdm_src[] = {
"Stereo1 DAC", "Stereo2 DAC", "Mono DAC"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_pdm_l_enum, RT5665_PDM_OUT_CTRL,
RT5665_PDM1_L_SFT, rt5665_pdm_src);
static const struct snd_kcontrol_new rt5665_pdm_l_mux =
SOC_DAPM_ENUM("PDM L Source", rt5665_pdm_l_enum);
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_pdm_r_enum, RT5665_PDM_OUT_CTRL,
RT5665_PDM1_R_SFT, rt5665_pdm_src);
@@ -2314,7 +2320,7 @@ static const char * const rt5665_if1_1_adc1_data_src[] = {
"STO1 ADC", "IF2_1 DAC",
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_if1_1_adc1_data_enum, RT5665_TDM_CTRL_3,
RT5665_IF1_ADC1_SEL_SFT, rt5665_if1_1_adc1_data_src);
@@ -2326,7 +2332,7 @@ static const char * const rt5665_if1_1_adc2_data_src[] = {
"STO2 ADC", "IF2_2 DAC",
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_if1_1_adc2_data_enum, RT5665_TDM_CTRL_3,
RT5665_IF1_ADC2_SEL_SFT, rt5665_if1_1_adc2_data_src);
@@ -2338,7 +2344,7 @@ static const char * const rt5665_if1_1_adc3_data_src[] = {
"MONO ADC", "IF3 DAC",
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_if1_1_adc3_data_enum, RT5665_TDM_CTRL_3,
RT5665_IF1_ADC3_SEL_SFT, rt5665_if1_1_adc3_data_src);
@@ -2350,7 +2356,7 @@ static const char * const rt5665_if1_2_adc1_data_src[] = {
"STO1 ADC", "IF1 DAC",
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_if1_2_adc1_data_enum, RT5665_TDM_CTRL_4,
RT5665_IF1_ADC1_SEL_SFT, rt5665_if1_2_adc1_data_src);
@@ -2362,7 +2368,7 @@ static const char * const rt5665_if1_2_adc2_data_src[] = {
"STO2 ADC", "IF2_1 DAC",
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_if1_2_adc2_data_enum, RT5665_TDM_CTRL_4,
RT5665_IF1_ADC2_SEL_SFT, rt5665_if1_2_adc2_data_src);
@@ -2374,7 +2380,7 @@ static const char * const rt5665_if1_2_adc3_data_src[] = {
"MONO ADC", "IF2_2 DAC",
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_if1_2_adc3_data_enum, RT5665_TDM_CTRL_4,
RT5665_IF1_ADC3_SEL_SFT, rt5665_if1_2_adc3_data_src);
@@ -2386,7 +2392,7 @@ static const char * const rt5665_if1_2_adc4_data_src[] = {
"DAC1", "IF3 DAC",
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_if1_2_adc4_data_enum, RT5665_TDM_CTRL_4,
RT5665_IF1_ADC4_SEL_SFT, rt5665_if1_2_adc4_data_src);
@@ -2401,14 +2407,14 @@ static const char * const rt5665_tdm_adc_data_src[] = {
"4123", "4132", "4213", "4231", "4312", "4321"
};
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_tdm1_adc_data_enum, RT5665_TDM_CTRL_3,
RT5665_TDM_ADC_SEL_SFT, rt5665_tdm_adc_data_src);
static const struct snd_kcontrol_new rt5665_tdm1_adc_mux =
SOC_DAPM_ENUM("TDM1 ADC Mux", rt5665_tdm1_adc_data_enum);
-static const SOC_ENUM_SINGLE_DECL(
+static SOC_ENUM_SINGLE_DECL(
rt5665_tdm2_adc_data_enum, RT5665_TDM_CTRL_4,
RT5665_TDM_ADC_SEL_SFT, rt5665_tdm_adc_data_src);
@@ -2607,7 +2613,7 @@ static int rt5665_i2s_pin_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
- unsigned int val1, val2, mask1, mask2 = 0;
+ unsigned int val1, val2, mask1 = 0, mask2 = 0;
switch (w->shift) {
case RT5665_PWR_I2S2_1_BIT:
@@ -2635,13 +2641,17 @@ static int rt5665_i2s_pin_event(struct snd_soc_dapm_widget *w,
}
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
- snd_soc_update_bits(codec, RT5665_GPIO_CTRL_1, mask1, val1);
+ if (mask1)
+ snd_soc_update_bits(codec, RT5665_GPIO_CTRL_1,
+ mask1, val1);
if (mask2)
snd_soc_update_bits(codec, RT5665_GPIO_CTRL_2,
mask2, val2);
break;
case SND_SOC_DAPM_POST_PMD:
- snd_soc_update_bits(codec, RT5665_GPIO_CTRL_1, mask1, 0);
+ if (mask1)
+ snd_soc_update_bits(codec, RT5665_GPIO_CTRL_1,
+ mask1, 0);
if (mask2)
snd_soc_update_bits(codec, RT5665_GPIO_CTRL_2,
mask2, 0);
@@ -2684,6 +2694,8 @@ static const struct snd_soc_dapm_widget rt5665_dapm_widgets[] = {
RT5665_DAC_MONO_R_ASRC_SFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY_S("ADC STO1 ASRC", 1, RT5665_ASRC_1,
RT5665_ADC_STO1_ASRC_SFT, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY_S("ADC STO2 ASRC", 1, RT5665_ASRC_1,
+ RT5665_ADC_STO2_ASRC_SFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY_S("ADC Mono L ASRC", 1, RT5665_ASRC_1,
RT5665_ADC_MONO_L_ASRC_SFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY_S("ADC Mono R ASRC", 1, RT5665_ASRC_1,
@@ -3227,6 +3239,7 @@ static const struct snd_soc_dapm_route rt5665_dapm_routes[] = {
/*ASRC*/
{"ADC Stereo1 Filter", NULL, "ADC STO1 ASRC", is_using_asrc},
+ {"ADC Stereo2 Filter", NULL, "ADC STO2 ASRC", is_using_asrc},
{"ADC Mono Left Filter", NULL, "ADC Mono L ASRC", is_using_asrc},
{"ADC Mono Right Filter", NULL, "ADC Mono R ASRC", is_using_asrc},
{"DAC Mono Left Filter", NULL, "DAC Mono L ASRC", is_using_asrc},
@@ -4688,6 +4701,7 @@ static void rt5665_calibrate(struct rt5665_priv *rt5665)
regmap_write(rt5665->regmap, RT5665_ASRC_8, 0x0120);
out_unlock:
+ rt5665->calibration_done = true;
mutex_unlock(&rt5665->calibrate_mutex);
}
@@ -4922,7 +4936,7 @@ static struct acpi_device_id rt5665_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, rt5665_acpi_match);
#endif
-struct i2c_driver rt5665_i2c_driver = {
+static struct i2c_driver rt5665_i2c_driver = {
.driver = {
.name = "rt5665",
.of_match_table = of_match_ptr(rt5665_of_match),
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index e27c5a4a0a15..0ec7985ed306 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -1717,7 +1717,6 @@ static const struct snd_soc_dapm_widget rt5670_dapm_widgets[] = {
SND_SOC_DAPM_PGA("IF1_ADC1", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_PGA("IF1_ADC2", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_PGA("IF1_ADC3", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_PGA("IF1_ADC4", SND_SOC_NOPM, 0, 0, NULL, 0),
/* DSP */
SND_SOC_DAPM_PGA("TxDP_ADC", SND_SOC_NOPM, 0, 0, NULL, 0),
@@ -2023,7 +2022,6 @@ static const struct snd_soc_dapm_route rt5670_dapm_routes[] = {
{ "Stereo1 ADC MIXL", NULL, "Sto1 ADC MIXL" },
{ "Stereo1 ADC MIXL", NULL, "ADC Stereo1 Filter" },
- { "ADC Stereo1 Filter", NULL, "PLL1", is_sys_clk_from_pll },
{ "Stereo1 ADC MIXR", NULL, "Sto1 ADC MIXR" },
{ "Stereo1 ADC MIXR", NULL, "ADC Stereo1 Filter" },
@@ -2062,7 +2060,6 @@ static const struct snd_soc_dapm_route rt5670_dapm_routes[] = {
{ "Stereo2 ADC MIXL", NULL, "Stereo2 ADC LR Mux" },
{ "Stereo2 ADC MIXL", NULL, "ADC Stereo2 Filter" },
- { "ADC Stereo2 Filter", NULL, "PLL1", is_sys_clk_from_pll },
{ "Stereo2 ADC MIXR", NULL, "Sto2 ADC MIXR" },
{ "Stereo2 ADC MIXR", NULL, "ADC Stereo2 Filter" },
@@ -2086,13 +2083,13 @@ static const struct snd_soc_dapm_route rt5670_dapm_routes[] = {
{ "IF1 ADC1 IN1 Mux", "IF1_ADC3", "IF1_ADC3" },
{ "IF1 ADC1 IN2 Mux", "IF1_ADC1_IN1", "IF1 ADC1 IN1 Mux" },
- { "IF1 ADC1 IN2 Mux", "IF1_ADC4", "IF1_ADC4" },
+ { "IF1 ADC1 IN2 Mux", "IF1_ADC4", "TxDP_ADC" },
{ "IF1 ADC2 IN Mux", "IF_ADC2", "IF_ADC2" },
{ "IF1 ADC2 IN Mux", "VAD_ADC", "VAD_ADC" },
{ "IF1 ADC2 IN1 Mux", "IF1_ADC2_IN", "IF1 ADC2 IN Mux" },
- { "IF1 ADC2 IN1 Mux", "IF1_ADC4", "IF1_ADC4" },
+ { "IF1 ADC2 IN1 Mux", "IF1_ADC4", "TxDP_ADC" },
{ "IF1_ADC1" , NULL, "IF1 ADC1 IN2 Mux" },
{ "IF1_ADC2" , NULL, "IF1 ADC2 IN1 Mux" },
@@ -2445,10 +2442,9 @@ static int rt5670_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
return 0;
}
-static int rt5670_set_dai_sysclk(struct snd_soc_dai *dai,
- int clk_id, unsigned int freq, int dir)
+static int rt5670_set_codec_sysclk(struct snd_soc_codec *codec, int clk_id,
+ int source, unsigned int freq, int dir)
{
- struct snd_soc_codec *codec = dai->codec;
struct rt5670_priv *rt5670 = snd_soc_codec_get_drvdata(codec);
unsigned int reg_val = 0;
@@ -2472,7 +2468,7 @@ static int rt5670_set_dai_sysclk(struct snd_soc_dai *dai,
if (clk_id != RT5670_SCLK_S_RCCLK)
rt5670->sysclk_src = clk_id;
- dev_dbg(dai->dev, "Sysclk is %dHz and clock id is %d\n", freq, clk_id);
+ dev_dbg(codec->dev, "Sysclk : %dHz clock id : %d\n", freq, clk_id);
return 0;
}
@@ -2724,7 +2720,6 @@ static int rt5670_resume(struct snd_soc_codec *codec)
static const struct snd_soc_dai_ops rt5670_aif_dai_ops = {
.hw_params = rt5670_hw_params,
.set_fmt = rt5670_set_dai_fmt,
- .set_sysclk = rt5670_set_dai_sysclk,
.set_tdm_slot = rt5670_set_tdm_slot,
.set_pll = rt5670_set_dai_pll,
};
@@ -2777,6 +2772,7 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5670 = {
.resume = rt5670_resume,
.set_bias_level = rt5670_set_bias_level,
.idle_bias_off = true,
+ .set_sysclk = rt5670_set_codec_sysclk,
.component_driver = {
.controls = rt5670_snd_controls,
.num_controls = ARRAY_SIZE(rt5670_snd_controls),
@@ -2849,6 +2845,10 @@ static const struct dmi_system_id dmi_platform_intel_braswell[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Tablet B"),
},
},
+ {}
+};
+
+static const struct dmi_system_id dmi_platform_intel_bytcht_jdmode2[] = {
{
.ident = "Lenovo Thinkpad Tablet 10",
.matches = {
@@ -2883,6 +2883,11 @@ static int rt5670_i2c_probe(struct i2c_client *i2c,
rt5670->pdata.dmic1_data_pin = RT5670_DMIC_DATA_IN2P;
rt5670->pdata.dev_gpio = true;
rt5670->pdata.jd_mode = 1;
+ } else if (dmi_check_system(dmi_platform_intel_bytcht_jdmode2)) {
+ rt5670->pdata.dmic_en = true;
+ rt5670->pdata.dmic1_data_pin = RT5670_DMIC_DATA_IN2P;
+ rt5670->pdata.dev_gpio = true;
+ rt5670->pdata.jd_mode = 2;
}
rt5670->regmap = devm_regmap_init_i2c(i2c, &rt5670_regmap);
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index 65ac4518ad06..36e530a36c82 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -41,15 +41,6 @@
#define RT5677_PR_BASE (RT5677_PR_RANGE_BASE + (0 * RT5677_PR_SPACING))
-/* GPIO indexes defined by ACPI */
-enum {
- RT5677_GPIO_PLUG_DET = 0,
- RT5677_GPIO_MIC_PRESENT_L = 1,
- RT5677_GPIO_HOTWORD_DET_L = 2,
- RT5677_GPIO_DSP_INT = 3,
- RT5677_GPIO_HP_AMP_SHDN_L = 4,
-};
-
static const struct regmap_range_cfg rt5677_ranges[] = {
{
.name = "PR",
@@ -5030,7 +5021,6 @@ static const struct regmap_config rt5677_regmap = {
static const struct i2c_device_id rt5677_i2c_id[] = {
{ "rt5677", RT5677 },
{ "rt5676", RT5676 },
- { "RT5677CE:00", RT5677 },
{ }
};
MODULE_DEVICE_TABLE(i2c, rt5677_i2c_id);
@@ -5041,28 +5031,19 @@ static const struct of_device_id rt5677_of_match[] = {
};
MODULE_DEVICE_TABLE(of, rt5677_of_match);
-static const struct acpi_gpio_params plug_det_gpio = { RT5677_GPIO_PLUG_DET, 0, false };
-static const struct acpi_gpio_params mic_present_gpio = { RT5677_GPIO_MIC_PRESENT_L, 0, false };
-static const struct acpi_gpio_params headphone_enable_gpio = { RT5677_GPIO_HP_AMP_SHDN_L, 0, false };
-
-static const struct acpi_gpio_mapping bdw_rt5677_gpios[] = {
- { "plug-det-gpios", &plug_det_gpio, 1 },
- { "mic-present-gpios", &mic_present_gpio, 1 },
- { "headphone-enable-gpios", &headphone_enable_gpio, 1 },
- { NULL },
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id rt5677_acpi_match[] = {
+ { "RT5677CE", RT5677 },
+ { }
};
+MODULE_DEVICE_TABLE(acpi, rt5677_acpi_match);
+#endif
static void rt5677_read_acpi_properties(struct rt5677_priv *rt5677,
struct device *dev)
{
- int ret;
u32 val;
- ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(dev),
- bdw_rt5677_gpios);
- if (ret)
- dev_warn(dev, "Failed to add driver gpios\n");
-
if (!device_property_read_u32(dev, "DCLK", &val))
rt5677->pdata.dmic2_clk_pin = val;
@@ -5301,6 +5282,7 @@ static struct i2c_driver rt5677_i2c_driver = {
.driver = {
.name = "rt5677",
.of_match_table = rt5677_of_match,
+ .acpi_match_table = ACPI_PTR(rt5677_acpi_match),
},
.probe = rt5677_i2c_probe,
.remove = rt5677_i2c_remove,
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 5a2702edeb77..8f6814c1eb6b 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -74,6 +74,20 @@ static const struct reg_default sgtl5000_reg_defaults[] = {
{ SGTL5000_DAP_AVC_DECAY, 0x0050 },
};
+/* AVC: Threshold dB -> register: pre-calculated values */
+static const u16 avc_thr_db2reg[97] = {
+ 0x5168, 0x488E, 0x40AA, 0x39A1, 0x335D, 0x2DC7, 0x28CC, 0x245D, 0x2068,
+ 0x1CE2, 0x19BE, 0x16F1, 0x1472, 0x1239, 0x103E, 0x0E7A, 0x0CE6, 0x0B7F,
+ 0x0A3F, 0x0922, 0x0824, 0x0741, 0x0677, 0x05C3, 0x0522, 0x0493, 0x0414,
+ 0x03A2, 0x033D, 0x02E3, 0x0293, 0x024B, 0x020B, 0x01D2, 0x019F, 0x0172,
+ 0x014A, 0x0126, 0x0106, 0x00E9, 0x00D0, 0x00B9, 0x00A5, 0x0093, 0x0083,
+ 0x0075, 0x0068, 0x005D, 0x0052, 0x0049, 0x0041, 0x003A, 0x0034, 0x002E,
+ 0x0029, 0x0025, 0x0021, 0x001D, 0x001A, 0x0017, 0x0014, 0x0012, 0x0010,
+ 0x000E, 0x000D, 0x000B, 0x000A, 0x0009, 0x0008, 0x0007, 0x0006, 0x0005,
+ 0x0005, 0x0004, 0x0004, 0x0003, 0x0003, 0x0002, 0x0002, 0x0002, 0x0002,
+ 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0000, 0x0000, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000};
+
/* regulator supplies for sgtl5000, VDDD is an optional external supply */
enum sgtl5000_regulator_supplies {
VDDA,
@@ -382,6 +396,65 @@ static int dac_put_volsw(struct snd_kcontrol *kcontrol,
return 0;
}
+/*
+ * custom function to get AVC threshold
+ *
+ * The threshold dB is calculated by rearranging the calculation from the
+ * avc_put_threshold function: register_value = 10^(dB/20) * 0.636 * 2^15 ==>
+ * dB = ( fls(register_value) - 14.347 ) * 6.02
+ *
+ * As this calculation is expensive and the threshold dB values may not exeed
+ * 0 to 96 we use pre-calculated values.
+ */
+static int avc_get_threshold(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ int db, i;
+ u16 reg = snd_soc_read(codec, SGTL5000_DAP_AVC_THRESHOLD);
+
+ /* register value 0 => -96dB */
+ if (!reg) {
+ ucontrol->value.integer.value[0] = 96;
+ ucontrol->value.integer.value[1] = 96;
+ return 0;
+ }
+
+ /* get dB from register value (rounded down) */
+ for (i = 0; avc_thr_db2reg[i] > reg; i++)
+ ;
+ db = i;
+
+ ucontrol->value.integer.value[0] = db;
+ ucontrol->value.integer.value[1] = db;
+
+ return 0;
+}
+
+/*
+ * custom function to put AVC threshold
+ *
+ * The register value is calculated by following formula:
+ * register_value = 10^(dB/20) * 0.636 * 2^15
+ * As this calculation is expensive and the threshold dB values may not exeed
+ * 0 to 96 we use pre-calculated values.
+ */
+static int avc_put_threshold(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ int db;
+ u16 reg;
+
+ db = (int)ucontrol->value.integer.value[0];
+ if (db < 0 || db > 96)
+ return -EINVAL;
+ reg = avc_thr_db2reg[db];
+ snd_soc_write(codec, SGTL5000_DAP_AVC_THRESHOLD, reg);
+
+ return 0;
+}
+
static const DECLARE_TLV_DB_SCALE(capture_6db_attenuate, -600, 600, 0);
/* tlv for mic gain, 0db 20db 30db 40db */
@@ -396,6 +469,12 @@ static const DECLARE_TLV_DB_SCALE(headphone_volume, -5150, 50, 0);
/* tlv for lineout volume, 31 steps of .5db each */
static const DECLARE_TLV_DB_SCALE(lineout_volume, -1550, 50, 0);
+/* tlv for dap avc max gain, 0db, 6db, 12db */
+static const DECLARE_TLV_DB_SCALE(avc_max_gain, 0, 600, 0);
+
+/* tlv for dap avc threshold, */
+static const DECLARE_TLV_DB_MINMAX(avc_threshold, 0, 9600);
+
static const struct snd_kcontrol_new sgtl5000_snd_controls[] = {
/* SOC_DOUBLE_S8_TLV with invert */
{
@@ -434,6 +513,16 @@ static const struct snd_kcontrol_new sgtl5000_snd_controls[] = {
0x1f, 1,
lineout_volume),
SOC_SINGLE("Lineout Playback Switch", SGTL5000_CHIP_ANA_CTRL, 8, 1, 1),
+
+ /* Automatic Volume Control (DAP AVC) */
+ SOC_SINGLE("AVC Switch", SGTL5000_DAP_AVC_CTRL, 0, 1, 0),
+ SOC_SINGLE("AVC Hard Limiter Switch", SGTL5000_DAP_AVC_CTRL, 5, 1, 0),
+ SOC_SINGLE_TLV("AVC Max Gain Volume", SGTL5000_DAP_AVC_CTRL, 12, 2, 0,
+ avc_max_gain),
+ SOC_SINGLE("AVC Integrator Response", SGTL5000_DAP_AVC_CTRL, 8, 3, 0),
+ SOC_SINGLE_EXT_TLV("AVC Threshold Volume", SGTL5000_DAP_AVC_THRESHOLD,
+ 0, 96, 0, avc_get_threshold, avc_put_threshold,
+ avc_threshold),
};
/* mute the codec used by alsa core */
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index f8a90ba8cd71..d7d03c92cb8a 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -1210,7 +1210,7 @@ static const struct snd_soc_dai_ops aic31xx_dai_ops = {
static struct snd_soc_dai_driver dac31xx_dai_driver[] = {
{
- .name = "tlv32dac31xx-hifi",
+ .name = "tlv320dac31xx-hifi",
.playback = {
.stream_name = "Playback",
.channels_min = 2,
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 20695b691aff..65c059b5ffd7 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -482,8 +482,6 @@ struct wm_coeff_ctl_ops {
struct snd_ctl_elem_value *ucontrol);
int (*xput)(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
- int (*xinfo)(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo);
};
struct wm_coeff_ctl {
@@ -1890,7 +1888,7 @@ static void *wm_adsp_read_algs(struct wm_adsp *dsp, size_t n_algs,
}
if (be32_to_cpu(val) != 0xbedead)
- adsp_warn(dsp, "Algorithm list end %x 0x%x != 0xbeadead\n",
+ adsp_warn(dsp, "Algorithm list end %x 0x%x != 0xbedead\n",
pos + len, be32_to_cpu(val));
alg = kzalloc(len * 2, GFP_KERNEL | GFP_DMA);
@@ -2654,7 +2652,7 @@ int wm_adsp2_preloader_put(struct snd_kcontrol *kcontrol,
(struct soc_mixer_control *)kcontrol->private_value;
char preload[32];
- snprintf(preload, ARRAY_SIZE(preload), "DSP%d Preload", mc->shift);
+ snprintf(preload, ARRAY_SIZE(preload), "DSP%u Preload", mc->shift);
dsp->preloaded = ucontrol->value.integer.value[0];
diff --git a/sound/soc/codecs/zx_aud96p22.c b/sound/soc/codecs/zx_aud96p22.c
new file mode 100644
index 000000000000..032fb7cf6cbd
--- /dev/null
+++ b/sound/soc/codecs/zx_aud96p22.c
@@ -0,0 +1,403 @@
+/*
+ * Copyright (C) 2017 Sanechips Technology Co., Ltd.
+ * Copyright 2017 Linaro Ltd.
+ *
+ * Author: Baoyou Xie <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+#include <sound/tlv.h>
+
+#define AUD96P22_RESET 0x00
+#define RST_DAC_DPZ BIT(0)
+#define RST_ADC_DPZ BIT(1)
+#define AUD96P22_I2S1_CONFIG_0 0x03
+#define I2S1_MS_MODE BIT(3)
+#define I2S1_MODE_MASK 0x7
+#define I2S1_MODE_RIGHT_J 0x0
+#define I2S1_MODE_I2S 0x1
+#define I2S1_MODE_LEFT_J 0x2
+#define AUD96P22_PD_0 0x15
+#define AUD96P22_PD_1 0x16
+#define AUD96P22_PD_3 0x18
+#define AUD96P22_PD_4 0x19
+#define AUD96P22_MUTE_0 0x1d
+#define AUD96P22_MUTE_2 0x1f
+#define AUD96P22_MUTE_4 0x21
+#define AUD96P22_RECVOL_0 0x24
+#define AUD96P22_RECVOL_1 0x25
+#define AUD96P22_PGA1VOL_0 0x26
+#define AUD96P22_PGA1VOL_1 0x27
+#define AUD96P22_LMVOL_0 0x34
+#define AUD96P22_LMVOL_1 0x35
+#define AUD96P22_HS1VOL_0 0x38
+#define AUD96P22_HS1VOL_1 0x39
+#define AUD96P22_PGA1SEL_0 0x47
+#define AUD96P22_PGA1SEL_1 0x48
+#define AUD96P22_LDR1SEL_0 0x59
+#define AUD96P22_LDR1SEL_1 0x60
+#define AUD96P22_LDR2SEL_0 0x5d
+#define AUD96P22_REG_MAX 0xfb
+
+struct aud96p22_priv {
+ struct regmap *regmap;
+};
+
+static int aud96p22_adc_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct aud96p22_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct regmap *regmap = priv->regmap;
+
+ if (event != SND_SOC_DAPM_POST_PMU)
+ return -EINVAL;
+
+ /* Assert/de-assert the bit to reset ADC data path */
+ regmap_update_bits(regmap, AUD96P22_RESET, RST_ADC_DPZ, 0);
+ regmap_update_bits(regmap, AUD96P22_RESET, RST_ADC_DPZ, RST_ADC_DPZ);
+
+ return 0;
+}
+
+static int aud96p22_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct aud96p22_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct regmap *regmap = priv->regmap;
+
+ if (event != SND_SOC_DAPM_POST_PMU)
+ return -EINVAL;
+
+ /* Assert/de-assert the bit to reset DAC data path */
+ regmap_update_bits(regmap, AUD96P22_RESET, RST_DAC_DPZ, 0);
+ regmap_update_bits(regmap, AUD96P22_RESET, RST_DAC_DPZ, RST_DAC_DPZ);
+
+ return 0;
+}
+
+static const DECLARE_TLV_DB_SCALE(lm_tlv, -11550, 50, 0);
+static const DECLARE_TLV_DB_SCALE(hs_tlv, -3900, 300, 0);
+static const DECLARE_TLV_DB_SCALE(rec_tlv, -9550, 50, 0);
+static const DECLARE_TLV_DB_SCALE(pga_tlv, -1800, 100, 0);
+
+static const struct snd_kcontrol_new aud96p22_snd_controls[] = {
+ /* Volume control */
+ SOC_DOUBLE_R_TLV("Master Playback Volume", AUD96P22_LMVOL_0,
+ AUD96P22_LMVOL_1, 0, 0xff, 0, lm_tlv),
+ SOC_DOUBLE_R_TLV("Headphone Volume", AUD96P22_HS1VOL_0,
+ AUD96P22_HS1VOL_1, 0, 0xf, 0, hs_tlv),
+ SOC_DOUBLE_R_TLV("Master Capture Volume", AUD96P22_RECVOL_0,
+ AUD96P22_RECVOL_1, 0, 0xff, 0, rec_tlv),
+ SOC_DOUBLE_R_TLV("Analogue Capture Volume", AUD96P22_PGA1VOL_0,
+ AUD96P22_PGA1VOL_1, 0, 0x37, 0, pga_tlv),
+
+ /* Mute control */
+ SOC_DOUBLE("Master Playback Switch", AUD96P22_MUTE_2, 0, 1, 1, 1),
+ SOC_DOUBLE("Headphone Switch", AUD96P22_MUTE_2, 4, 5, 1, 1),
+ SOC_DOUBLE("Line Out Switch", AUD96P22_MUTE_4, 0, 1, 1, 1),
+ SOC_DOUBLE("Speaker Switch", AUD96P22_MUTE_4, 2, 3, 1, 1),
+ SOC_DOUBLE("Master Capture Switch", AUD96P22_MUTE_0, 0, 1, 1, 1),
+ SOC_DOUBLE("Analogue Capture Switch", AUD96P22_MUTE_0, 2, 3, 1, 1),
+};
+
+/* Input mux kcontrols */
+static const unsigned int ain_mux_values[] = {
+ 0, 1, 3, 4, 5,
+};
+
+static const char * const ainl_mux_texts[] = {
+ "AINL1 differential",
+ "AINL1 single-ended",
+ "AINL3 single-ended",
+ "AINL2 differential",
+ "AINL2 single-ended",
+};
+
+static const char * const ainr_mux_texts[] = {
+ "AINR1 differential",
+ "AINR1 single-ended",
+ "AINR3 single-ended",
+ "AINR2 differential",
+ "AINR2 single-ended",
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(ainl_mux_enum, AUD96P22_PGA1SEL_0,
+ 0, 0x7, ainl_mux_texts, ain_mux_values);
+static SOC_VALUE_ENUM_SINGLE_DECL(ainr_mux_enum, AUD96P22_PGA1SEL_1,
+ 0, 0x7, ainr_mux_texts, ain_mux_values);
+
+static const struct snd_kcontrol_new ainl_mux_kcontrol =
+ SOC_DAPM_ENUM("AINL Mux", ainl_mux_enum);
+static const struct snd_kcontrol_new ainr_mux_kcontrol =
+ SOC_DAPM_ENUM("AINR Mux", ainr_mux_enum);
+
+/* Output mixer kcontrols */
+static const struct snd_kcontrol_new ld1_left_kcontrols[] = {
+ SOC_DAPM_SINGLE("DACL LD1L Switch", AUD96P22_LDR1SEL_0, 0, 1, 0),
+ SOC_DAPM_SINGLE("AINL LD1L Switch", AUD96P22_LDR1SEL_0, 1, 1, 0),
+ SOC_DAPM_SINGLE("AINR LD1L Switch", AUD96P22_LDR1SEL_0, 2, 1, 0),
+};
+
+static const struct snd_kcontrol_new ld1_right_kcontrols[] = {
+ SOC_DAPM_SINGLE("DACR LD1R Switch", AUD96P22_LDR1SEL_1, 8, 1, 0),
+ SOC_DAPM_SINGLE("AINR LD1R Switch", AUD96P22_LDR1SEL_1, 9, 1, 0),
+ SOC_DAPM_SINGLE("AINL LD1R Switch", AUD96P22_LDR1SEL_1, 10, 1, 0),
+};
+
+static const struct snd_kcontrol_new ld2_kcontrols[] = {
+ SOC_DAPM_SINGLE("DACL LD2 Switch", AUD96P22_LDR2SEL_0, 0, 1, 0),
+ SOC_DAPM_SINGLE("AINL LD2 Switch", AUD96P22_LDR2SEL_0, 1, 1, 0),
+ SOC_DAPM_SINGLE("DACR LD2 Switch", AUD96P22_LDR2SEL_0, 2, 1, 0),
+};
+
+static const struct snd_soc_dapm_widget aud96p22_dapm_widgets[] = {
+ /* Overall power bit */
+ SND_SOC_DAPM_SUPPLY("POWER", AUD96P22_PD_0, 0, 0, NULL, 0),
+
+ /* Input pins */
+ SND_SOC_DAPM_INPUT("AINL1P"),
+ SND_SOC_DAPM_INPUT("AINL2P"),
+ SND_SOC_DAPM_INPUT("AINL3"),
+ SND_SOC_DAPM_INPUT("AINL1N"),
+ SND_SOC_DAPM_INPUT("AINL2N"),
+ SND_SOC_DAPM_INPUT("AINR2N"),
+ SND_SOC_DAPM_INPUT("AINR1N"),
+ SND_SOC_DAPM_INPUT("AINR3"),
+ SND_SOC_DAPM_INPUT("AINR2P"),
+ SND_SOC_DAPM_INPUT("AINR1P"),
+
+ /* Input muxes */
+ SND_SOC_DAPM_MUX("AINLMUX", AUD96P22_PD_1, 2, 0, &ainl_mux_kcontrol),
+ SND_SOC_DAPM_MUX("AINRMUX", AUD96P22_PD_1, 3, 0, &ainr_mux_kcontrol),
+
+ /* ADCs */
+ SND_SOC_DAPM_ADC_E("ADCL", "Capture Left", AUD96P22_PD_1, 0, 0,
+ aud96p22_adc_event, SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_ADC_E("ADCR", "Capture Right", AUD96P22_PD_1, 1, 0,
+ aud96p22_adc_event, SND_SOC_DAPM_POST_PMU),
+
+ /* DACs */
+ SND_SOC_DAPM_DAC_E("DACL", "Playback Left", AUD96P22_PD_3, 0, 0,
+ aud96p22_dac_event, SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_DAC_E("DACR", "Playback Right", AUD96P22_PD_3, 1, 0,
+ aud96p22_dac_event, SND_SOC_DAPM_POST_PMU),
+
+ /* Output mixers */
+ SND_SOC_DAPM_MIXER("LD1L", AUD96P22_PD_3, 6, 0, ld1_left_kcontrols,
+ ARRAY_SIZE(ld1_left_kcontrols)),
+ SND_SOC_DAPM_MIXER("LD1R", AUD96P22_PD_3, 7, 0, ld1_right_kcontrols,
+ ARRAY_SIZE(ld1_right_kcontrols)),
+ SND_SOC_DAPM_MIXER("LD2", AUD96P22_PD_4, 2, 0, ld2_kcontrols,
+ ARRAY_SIZE(ld2_kcontrols)),
+
+ /* Headset power switch */
+ SND_SOC_DAPM_SUPPLY("HS1L", AUD96P22_PD_3, 4, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("HS1R", AUD96P22_PD_3, 5, 0, NULL, 0),
+
+ /* Output pins */
+ SND_SOC_DAPM_OUTPUT("HSOUTL"),
+ SND_SOC_DAPM_OUTPUT("LINEOUTL"),
+ SND_SOC_DAPM_OUTPUT("LINEOUTMP"),
+ SND_SOC_DAPM_OUTPUT("LINEOUTMN"),
+ SND_SOC_DAPM_OUTPUT("LINEOUTR"),
+ SND_SOC_DAPM_OUTPUT("HSOUTR"),
+};
+
+static const struct snd_soc_dapm_route aud96p22_dapm_routes[] = {
+ { "AINLMUX", "AINL1 differential", "AINL1N" },
+ { "AINLMUX", "AINL1 single-ended", "AINL1P" },
+ { "AINLMUX", "AINL3 single-ended", "AINL3" },
+ { "AINLMUX", "AINL2 differential", "AINL2N" },
+ { "AINLMUX", "AINL2 single-ended", "AINL2P" },
+
+ { "AINRMUX", "AINR1 differential", "AINR1N" },
+ { "AINRMUX", "AINR1 single-ended", "AINR1P" },
+ { "AINRMUX", "AINR3 single-ended", "AINR3" },
+ { "AINRMUX", "AINR2 differential", "AINR2N" },
+ { "AINRMUX", "AINR2 single-ended", "AINR2P" },
+
+ { "ADCL", NULL, "AINLMUX" },
+ { "ADCR", NULL, "AINRMUX" },
+
+ { "ADCL", NULL, "POWER" },
+ { "ADCR", NULL, "POWER" },
+ { "DACL", NULL, "POWER" },
+ { "DACR", NULL, "POWER" },
+
+ { "LD1L", "DACL LD1L Switch", "DACL" },
+ { "LD1L", "AINL LD1L Switch", "AINLMUX" },
+ { "LD1L", "AINR LD1L Switch", "AINRMUX" },
+
+ { "LD1R", "DACR LD1R Switch", "DACR" },
+ { "LD1R", "AINR LD1R Switch", "AINRMUX" },
+ { "LD1R", "AINL LD1R Switch", "AINLMUX" },
+
+ { "LD2", "DACL LD2 Switch", "DACL" },
+ { "LD2", "AINL LD2 Switch", "AINLMUX" },
+ { "LD2", "DACR LD2 Switch", "DACR" },
+
+ { "HSOUTL", NULL, "LD1L" },
+ { "HSOUTR", NULL, "LD1R" },
+ { "HSOUTL", NULL, "HS1L" },
+ { "HSOUTR", NULL, "HS1R" },
+
+ { "LINEOUTL", NULL, "LD1L" },
+ { "LINEOUTR", NULL, "LD1R" },
+
+ { "LINEOUTMP", NULL, "LD2" },
+ { "LINEOUTMN", NULL, "LD2" },
+};
+
+static struct snd_soc_codec_driver aud96p22_driver = {
+ .component_driver = {
+ .controls = aud96p22_snd_controls,
+ .num_controls = ARRAY_SIZE(aud96p22_snd_controls),
+ .dapm_widgets = aud96p22_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(aud96p22_dapm_widgets),
+ .dapm_routes = aud96p22_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(aud96p22_dapm_routes),
+ },
+};
+
+static int aud96p22_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct aud96p22_priv *priv = snd_soc_codec_get_drvdata(dai->codec);
+ struct regmap *regmap = priv->regmap;
+ unsigned int val;
+
+ /* Master/slave mode */
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ val = 0;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ val = I2S1_MS_MODE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(regmap, AUD96P22_I2S1_CONFIG_0, I2S1_MS_MODE, val);
+
+ /* Audio format */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_RIGHT_J:
+ val = I2S1_MODE_RIGHT_J;
+ break;
+ case SND_SOC_DAIFMT_I2S:
+ val = I2S1_MODE_I2S;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ val = I2S1_MODE_LEFT_J;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(regmap, AUD96P22_I2S1_CONFIG_0, I2S1_MODE_MASK, val);
+
+ return 0;
+}
+
+static struct snd_soc_dai_ops aud96p22_dai_ops = {
+ .set_fmt = aud96p22_set_fmt,
+};
+
+#define AUD96P22_RATES SNDRV_PCM_RATE_8000_192000
+#define AUD96P22_FORMATS (\
+ SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S18_3LE | \
+ SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE)
+
+static struct snd_soc_dai_driver aud96p22_dai = {
+ .name = "aud96p22-dai",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = AUD96P22_RATES,
+ .formats = AUD96P22_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = AUD96P22_RATES,
+ .formats = AUD96P22_FORMATS,
+ },
+ .ops = &aud96p22_dai_ops,
+};
+
+static const struct regmap_config aud96p22_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = AUD96P22_REG_MAX,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int aud96p22_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &i2c->dev;
+ struct aud96p22_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (priv == NULL)
+ return -ENOMEM;
+
+ priv->regmap = devm_regmap_init_i2c(i2c, &aud96p22_regmap);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ dev_err(dev, "failed to init i2c regmap: %d\n", ret);
+ return ret;
+ }
+
+ i2c_set_clientdata(i2c, priv);
+
+ ret = snd_soc_register_codec(dev, &aud96p22_driver, &aud96p22_dai, 1);
+ if (ret) {
+ dev_err(dev, "failed to register codec: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int aud96p22_i2c_remove(struct i2c_client *i2c)
+{
+ snd_soc_unregister_codec(&i2c->dev);
+ return 0;
+}
+
+const struct of_device_id aud96p22_dt_ids[] = {
+ { .compatible = "zte,zx-aud96p22", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, aud96p22_dt_ids);
+
+static struct i2c_driver aud96p22_i2c_driver = {
+ .driver = {
+ .name = "zx_aud96p22",
+ .of_match_table = aud96p22_dt_ids,
+ },
+ .probe = aud96p22_i2c_probe,
+ .remove = aud96p22_i2c_remove,
+};
+module_i2c_driver(aud96p22_i2c_driver);
+
+MODULE_DESCRIPTION("ZTE ASoC AUD96P22 CODEC driver");
+MODULE_AUTHOR("Baoyou Xie <[email protected]>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 3c5a9804d3f5..56ec1d301ac2 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -629,7 +629,7 @@ static int davinci_mcasp_ch_constraint(struct davinci_mcasp *mcasp, int stream,
if (mcasp->tdm_mask[stream])
slots = hweight32(mcasp->tdm_mask[stream]);
- for (i = 2; i <= slots; i++)
+ for (i = 1; i <= slots; i++)
list[count++] = i;
for (i = 2; i <= serializers; i++)
@@ -1297,7 +1297,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
snd_pcm_hw_constraint_minmax(substream->runtime,
SNDRV_PCM_HW_PARAM_CHANNELS,
- 2, max_channels);
+ 0, max_channels);
snd_pcm_hw_constraint_list(substream->runtime,
0, SNDRV_PCM_HW_PARAM_CHANNELS,
@@ -1459,13 +1459,13 @@ static struct snd_soc_dai_driver davinci_mcasp_dai[] = {
.suspend = davinci_mcasp_suspend,
.resume = davinci_mcasp_resume,
.playback = {
- .channels_min = 2,
+ .channels_min = 1,
.channels_max = 32 * 16,
.rates = DAVINCI_MCASP_RATES,
.formats = DAVINCI_MCASP_PCM_FMTS,
},
.capture = {
- .channels_min = 2,
+ .channels_min = 1,
.channels_max = 32 * 16,
.rates = DAVINCI_MCASP_RATES,
.formats = DAVINCI_MCASP_PCM_FMTS,
@@ -1971,12 +1971,12 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
*/
mcasp->chconstr[SNDRV_PCM_STREAM_PLAYBACK].list =
devm_kzalloc(mcasp->dev, sizeof(unsigned int) *
- (32 + mcasp->num_serializer - 2),
+ (32 + mcasp->num_serializer - 1),
GFP_KERNEL);
mcasp->chconstr[SNDRV_PCM_STREAM_CAPTURE].list =
devm_kzalloc(mcasp->dev, sizeof(unsigned int) *
- (32 + mcasp->num_serializer - 2),
+ (32 + mcasp->num_serializer - 1),
GFP_KERNEL);
if (!mcasp->chconstr[SNDRV_PCM_STREAM_PLAYBACK].list ||
diff --git a/sound/soc/dwc/dwc-i2s.c b/sound/soc/dwc/dwc-i2s.c
index 9c46e4112026..916067638180 100644
--- a/sound/soc/dwc/dwc-i2s.c
+++ b/sound/soc/dwc/dwc-i2s.c
@@ -496,6 +496,8 @@ static int dw_configure_dai(struct dw_i2s_dev *dev,
idx = COMP1_TX_WORDSIZE_0(comp1);
if (WARN_ON(idx >= ARRAY_SIZE(formats)))
return -EINVAL;
+ if (dev->quirks & DW_I2S_QUIRK_16BIT_IDX_OVERRIDE)
+ idx = 1;
dw_i2s_dai->playback.channels_min = MIN_CHANNEL_NUM;
dw_i2s_dai->playback.channels_max =
1 << (COMP1_TX_CHANNELS(comp1) + 1);
@@ -508,6 +510,8 @@ static int dw_configure_dai(struct dw_i2s_dev *dev,
idx = COMP2_RX_WORDSIZE_0(comp2);
if (WARN_ON(idx >= ARRAY_SIZE(formats)))
return -EINVAL;
+ if (dev->quirks & DW_I2S_QUIRK_16BIT_IDX_OVERRIDE)
+ idx = 1;
dw_i2s_dai->capture.channels_min = MIN_CHANNEL_NUM;
dw_i2s_dai->capture.channels_max =
1 << (COMP1_RX_CHANNELS(comp1) + 1);
@@ -543,6 +547,8 @@ static int dw_configure_dai_by_pd(struct dw_i2s_dev *dev,
if (ret < 0)
return ret;
+ if (dev->quirks & DW_I2S_QUIRK_16BIT_IDX_OVERRIDE)
+ idx = 1;
/* Set DMA slaves info */
dev->play_dma_data.pd.data = pdata->play_dma_data;
dev->capture_dma_data.pd.data = pdata->capture_dma_data;
diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
index 0b82e209b6e3..1f7e70bfbd55 100644
--- a/sound/soc/fsl/mpc5200_dma.c
+++ b/sound/soc/fsl/mpc5200_dma.c
@@ -302,7 +302,6 @@ static int psc_dma_new(struct snd_soc_pcm_runtime *rtd)
struct snd_card *card = rtd->card->snd_card;
struct snd_soc_dai *dai = rtd->cpu_dai;
struct snd_pcm *pcm = rtd->pcm;
- struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai);
size_t size = psc_dma_hardware.buffer_bytes_max;
int rc;
diff --git a/sound/soc/generic/Kconfig b/sound/soc/generic/Kconfig
index d023959b8cd6..c954be0a0f96 100644
--- a/sound/soc/generic/Kconfig
+++ b/sound/soc/generic/Kconfig
@@ -14,3 +14,20 @@ config SND_SIMPLE_SCU_CARD
help
This option enables generic simple SCU sound card support.
It supports DPCM of multi CPU single Codec system.
+
+config SND_AUDIO_GRAPH_CARD
+ tristate "ASoC Audio Graph sound card support"
+ depends on OF
+ select SND_SIMPLE_CARD_UTILS
+ help
+ This option enables generic simple simple sound card support
+ with OF-graph DT bindings.
+
+config SND_AUDIO_GRAPH_SCU_CARD
+ tristate "ASoC Audio Graph SCU sound card support"
+ depends on OF
+ select SND_SIMPLE_CARD_UTILS
+ help
+ This option enables generic simple SCU sound card support
+ with OF-graph DT bindings.
+ It supports DPCM of multi CPU single Codec ststem.
diff --git a/sound/soc/generic/Makefile b/sound/soc/generic/Makefile
index ee750f3023ba..9e000523a3b4 100644
--- a/sound/soc/generic/Makefile
+++ b/sound/soc/generic/Makefile
@@ -1,7 +1,11 @@
snd-soc-simple-card-utils-objs := simple-card-utils.o
snd-soc-simple-card-objs := simple-card.o
snd-soc-simple-scu-card-objs := simple-scu-card.o
+snd-soc-audio-graph-card-objs := audio-graph-card.o
+snd-soc-audio-graph-scu-card-objs := audio-graph-scu-card.o
obj-$(CONFIG_SND_SIMPLE_CARD_UTILS) += snd-soc-simple-card-utils.o
obj-$(CONFIG_SND_SIMPLE_CARD) += snd-soc-simple-card.o
obj-$(CONFIG_SND_SIMPLE_SCU_CARD) += snd-soc-simple-scu-card.o
+obj-$(CONFIG_SND_AUDIO_GRAPH_CARD) += snd-soc-audio-graph-card.o
+obj-$(CONFIG_SND_AUDIO_GRAPH_SCU_CARD) += snd-soc-audio-graph-scu-card.o
diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
new file mode 100644
index 000000000000..105ec3a6e30d
--- /dev/null
+++ b/sound/soc/generic/audio-graph-card.c
@@ -0,0 +1,338 @@
+/*
+ * ASoC audio graph sound card support
+ *
+ * Copyright (C) 2016 Renesas Solutions Corp.
+ * Kuninori Morimoto <[email protected]>
+ *
+ * based on ${LINUX}/sound/soc/generic/simple-card.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <sound/jack.h>
+#include <sound/simple_card_utils.h>
+
+struct graph_card_data {
+ struct snd_soc_card snd_card;
+ struct graph_dai_props {
+ struct asoc_simple_dai cpu_dai;
+ struct asoc_simple_dai codec_dai;
+ } *dai_props;
+ struct snd_soc_dai_link *dai_link;
+ struct gpio_desc *pa_gpio;
+};
+
+static int asoc_graph_card_outdrv_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct graph_card_data *priv = snd_soc_card_get_drvdata(dapm->card);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ gpiod_set_value_cansleep(priv->pa_gpio, 1);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ gpiod_set_value_cansleep(priv->pa_gpio, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget asoc_graph_card_dapm_widgets[] = {
+ SND_SOC_DAPM_OUT_DRV_E("Amplifier", SND_SOC_NOPM,
+ 0, 0, NULL, 0, asoc_graph_card_outdrv_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+};
+
+#define graph_priv_to_card(priv) (&(priv)->snd_card)
+#define graph_priv_to_props(priv, i) ((priv)->dai_props + (i))
+#define graph_priv_to_dev(priv) (graph_priv_to_card(priv)->dev)
+#define graph_priv_to_link(priv, i) (graph_priv_to_card(priv)->dai_link + (i))
+
+static int asoc_graph_card_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct graph_dai_props *dai_props = graph_priv_to_props(priv, rtd->num);
+ int ret;
+
+ ret = asoc_simple_card_clk_enable(&dai_props->cpu_dai);
+ if (ret)
+ return ret;
+
+ ret = asoc_simple_card_clk_enable(&dai_props->codec_dai);
+ if (ret)
+ asoc_simple_card_clk_disable(&dai_props->cpu_dai);
+
+ return ret;
+}
+
+static void asoc_graph_card_shutdown(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct graph_dai_props *dai_props = graph_priv_to_props(priv, rtd->num);
+
+ asoc_simple_card_clk_disable(&dai_props->cpu_dai);
+
+ asoc_simple_card_clk_disable(&dai_props->codec_dai);
+}
+
+static struct snd_soc_ops asoc_graph_card_ops = {
+ .startup = asoc_graph_card_startup,
+ .shutdown = asoc_graph_card_shutdown,
+};
+
+static int asoc_graph_card_dai_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_dai *codec = rtd->codec_dai;
+ struct snd_soc_dai *cpu = rtd->cpu_dai;
+ struct graph_dai_props *dai_props =
+ graph_priv_to_props(priv, rtd->num);
+ int ret;
+
+ ret = asoc_simple_card_init_dai(codec, &dai_props->codec_dai);
+ if (ret < 0)
+ return ret;
+
+ ret = asoc_simple_card_init_dai(cpu, &dai_props->cpu_dai);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int asoc_graph_card_dai_link_of(struct device_node *cpu_port,
+ struct graph_card_data *priv,
+ int idx)
+{
+ struct device *dev = graph_priv_to_dev(priv);
+ struct snd_soc_dai_link *dai_link = graph_priv_to_link(priv, idx);
+ struct graph_dai_props *dai_props = graph_priv_to_props(priv, idx);
+ struct asoc_simple_dai *cpu_dai = &dai_props->cpu_dai;
+ struct asoc_simple_dai *codec_dai = &dai_props->codec_dai;
+ struct device_node *cpu_ep = of_get_next_child(cpu_port, NULL);
+ struct device_node *codec_ep = of_graph_get_remote_endpoint(cpu_ep);
+ struct device_node *rcpu_ep = of_graph_get_remote_endpoint(codec_ep);
+ int ret;
+
+ if (rcpu_ep != cpu_ep) {
+ dev_err(dev, "remote-endpoint mismatch (%s/%s/%s)\n",
+ cpu_ep->name, codec_ep->name, rcpu_ep->name);
+ ret = -EINVAL;
+ goto dai_link_of_err;
+ }
+
+ ret = asoc_simple_card_parse_daifmt(dev, cpu_ep, codec_ep,
+ NULL, &dai_link->dai_fmt);
+ if (ret < 0)
+ goto dai_link_of_err;
+
+ /*
+ * we need to consider "mclk-fs" around here
+ * see simple-card
+ */
+
+ ret = asoc_simple_card_parse_graph_cpu(cpu_ep, dai_link);
+ if (ret < 0)
+ goto dai_link_of_err;
+
+ ret = asoc_simple_card_parse_graph_codec(codec_ep, dai_link);
+ if (ret < 0)
+ goto dai_link_of_err;
+
+ ret = asoc_simple_card_of_parse_tdm(cpu_ep, cpu_dai);
+ if (ret < 0)
+ goto dai_link_of_err;
+
+ ret = asoc_simple_card_of_parse_tdm(codec_ep, codec_dai);
+ if (ret < 0)
+ goto dai_link_of_err;
+
+ ret = asoc_simple_card_parse_clk_cpu(dev, cpu_ep, dai_link, cpu_dai);
+ if (ret < 0)
+ goto dai_link_of_err;
+
+ ret = asoc_simple_card_parse_clk_codec(dev, codec_ep, dai_link, codec_dai);
+ if (ret < 0)
+ goto dai_link_of_err;
+
+ ret = asoc_simple_card_canonicalize_dailink(dai_link);
+ if (ret < 0)
+ goto dai_link_of_err;
+
+ ret = asoc_simple_card_set_dailink_name(dev, dai_link,
+ "%s-%s",
+ dai_link->cpu_dai_name,
+ dai_link->codec_dai_name);
+ if (ret < 0)
+ goto dai_link_of_err;
+
+ dai_link->ops = &asoc_graph_card_ops;
+ dai_link->init = asoc_graph_card_dai_init;
+
+ asoc_simple_card_canonicalize_cpu(dai_link,
+ of_graph_get_endpoint_count(dai_link->cpu_of_node) == 1);
+
+dai_link_of_err:
+ of_node_put(cpu_ep);
+ of_node_put(rcpu_ep);
+ of_node_put(codec_ep);
+
+ return ret;
+}
+
+static int asoc_graph_card_parse_of(struct graph_card_data *priv)
+{
+ struct of_phandle_iterator it;
+ struct device *dev = graph_priv_to_dev(priv);
+ struct snd_soc_card *card = graph_priv_to_card(priv);
+ struct device_node *node = dev->of_node;
+ int rc, idx = 0;
+ int ret;
+
+ ret = asoc_simple_card_of_parse_widgets(card, NULL);
+ if (ret < 0)
+ return ret;
+
+ ret = asoc_simple_card_of_parse_routing(card, NULL, 1);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * we need to consider "mclk-fs" around here
+ * see simple-card
+ */
+
+ of_for_each_phandle(&it, rc, node, "dais", NULL, 0) {
+ ret = asoc_graph_card_dai_link_of(it.node, priv, idx++);
+ of_node_put(it.node);
+ if (ret < 0)
+ return ret;
+ }
+
+ return asoc_simple_card_parse_card_name(card, NULL);
+}
+
+static int asoc_graph_get_dais_count(struct device *dev)
+{
+ struct of_phandle_iterator it;
+ struct device_node *node = dev->of_node;
+ int count = 0;
+ int rc;
+
+ of_for_each_phandle(&it, rc, node, "dais", NULL, 0) {
+ count++;
+ of_node_put(it.node);
+ }
+
+ return count;
+}
+
+static int asoc_graph_card_probe(struct platform_device *pdev)
+{
+ struct graph_card_data *priv;
+ struct snd_soc_dai_link *dai_link;
+ struct graph_dai_props *dai_props;
+ struct device *dev = &pdev->dev;
+ struct snd_soc_card *card;
+ int num, ret;
+
+ /* Allocate the private data and the DAI link array */
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ num = asoc_graph_get_dais_count(dev);
+ if (num == 0)
+ return -EINVAL;
+
+ dai_props = devm_kzalloc(dev, sizeof(*dai_props) * num, GFP_KERNEL);
+ dai_link = devm_kzalloc(dev, sizeof(*dai_link) * num, GFP_KERNEL);
+ if (!dai_props || !dai_link)
+ return -ENOMEM;
+
+ priv->pa_gpio = devm_gpiod_get_optional(dev, "pa", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->pa_gpio)) {
+ ret = PTR_ERR(priv->pa_gpio);
+ dev_err(dev, "failed to get amplifier gpio: %d\n", ret);
+ return ret;
+ }
+
+ priv->dai_props = dai_props;
+ priv->dai_link = dai_link;
+
+ /* Init snd_soc_card */
+ card = graph_priv_to_card(priv);
+ card->owner = THIS_MODULE;
+ card->dev = dev;
+ card->dai_link = dai_link;
+ card->num_links = num;
+ card->dapm_widgets = asoc_graph_card_dapm_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(asoc_graph_card_dapm_widgets);
+
+ ret = asoc_graph_card_parse_of(priv);
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "parse error %d\n", ret);
+ goto err;
+ }
+
+ snd_soc_card_set_drvdata(card, priv);
+
+ ret = devm_snd_soc_register_card(dev, card);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+err:
+ asoc_simple_card_clean_reference(card);
+
+ return ret;
+}
+
+static int asoc_graph_card_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+ return asoc_simple_card_clean_reference(card);
+}
+
+static const struct of_device_id asoc_graph_of_match[] = {
+ { .compatible = "audio-graph-card", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, asoc_graph_of_match);
+
+static struct platform_driver asoc_graph_card = {
+ .driver = {
+ .name = "asoc-audio-graph-card",
+ .of_match_table = asoc_graph_of_match,
+ },
+ .probe = asoc_graph_card_probe,
+ .remove = asoc_graph_card_remove,
+};
+module_platform_driver(asoc_graph_card);
+
+MODULE_ALIAS("platform:asoc-audio-graph-card");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ASoC Audio Graph Sound Card");
+MODULE_AUTHOR("Kuninori Morimoto <[email protected]>");
diff --git a/sound/soc/generic/audio-graph-scu-card.c b/sound/soc/generic/audio-graph-scu-card.c
new file mode 100644
index 000000000000..dcd2df37bc3b
--- /dev/null
+++ b/sound/soc/generic/audio-graph-scu-card.c
@@ -0,0 +1,411 @@
+/*
+ * ASoC audio graph SCU sound card support
+ *
+ * Copyright (C) 2017 Renesas Solutions Corp.
+ * Kuninori Morimoto <[email protected]>
+ *
+ * based on
+ * ${LINUX}/sound/soc/generic/simple-scu-card.c
+ * ${LINUX}/sound/soc/generic/audio-graph-card.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <sound/jack.h>
+#include <sound/simple_card_utils.h>
+
+struct graph_card_data {
+ struct snd_soc_card snd_card;
+ struct snd_soc_codec_conf codec_conf;
+ struct asoc_simple_dai *dai_props;
+ struct snd_soc_dai_link *dai_link;
+ struct asoc_simple_card_data adata;
+};
+
+#define graph_priv_to_card(priv) (&(priv)->snd_card)
+#define graph_priv_to_props(priv, i) ((priv)->dai_props + (i))
+#define graph_priv_to_dev(priv) (graph_priv_to_card(priv)->dev)
+#define graph_priv_to_link(priv, i) (graph_priv_to_card(priv)->dai_link + (i))
+
+static int asoc_graph_card_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct asoc_simple_dai *dai_props = graph_priv_to_props(priv, rtd->num);
+
+ return asoc_simple_card_clk_enable(dai_props);
+}
+
+static void asoc_graph_card_shutdown(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct asoc_simple_dai *dai_props = graph_priv_to_props(priv, rtd->num);
+
+ asoc_simple_card_clk_disable(dai_props);
+}
+
+static struct snd_soc_ops asoc_graph_card_ops = {
+ .startup = asoc_graph_card_startup,
+ .shutdown = asoc_graph_card_shutdown,
+};
+
+static int asoc_graph_card_dai_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_dai *dai;
+ struct snd_soc_dai_link *dai_link;
+ struct asoc_simple_dai *dai_props;
+ int num = rtd->num;
+
+ dai_link = graph_priv_to_link(priv, num);
+ dai_props = graph_priv_to_props(priv, num);
+ dai = dai_link->dynamic ?
+ rtd->cpu_dai :
+ rtd->codec_dai;
+
+ return asoc_simple_card_init_dai(dai, dai_props);
+}
+
+static int asoc_graph_card_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
+
+ asoc_simple_card_convert_fixup(&priv->adata, params);
+
+ return 0;
+}
+
+static int asoc_graph_card_dai_link_of(struct device_node *ep,
+ struct graph_card_data *priv,
+ unsigned int daifmt,
+ int idx, int is_fe)
+{
+ struct device *dev = graph_priv_to_dev(priv);
+ struct snd_soc_dai_link *dai_link = graph_priv_to_link(priv, idx);
+ struct asoc_simple_dai *dai_props = graph_priv_to_props(priv, idx);
+ struct snd_soc_card *card = graph_priv_to_card(priv);
+ int ret;
+
+ if (is_fe) {
+ /* BE is dummy */
+ dai_link->codec_of_node = NULL;
+ dai_link->codec_dai_name = "snd-soc-dummy-dai";
+ dai_link->codec_name = "snd-soc-dummy";
+
+ /* FE settings */
+ dai_link->dynamic = 1;
+ dai_link->dpcm_merged_format = 1;
+
+ ret = asoc_simple_card_parse_graph_cpu(ep, dai_link);
+ if (ret)
+ return ret;
+
+ ret = asoc_simple_card_parse_clk_cpu(dev, ep, dai_link, dai_props);
+ if (ret < 0)
+ return ret;
+
+ ret = asoc_simple_card_set_dailink_name(dev, dai_link,
+ "fe.%s",
+ dai_link->cpu_dai_name);
+ if (ret < 0)
+ return ret;
+
+ /* card->num_links includes Codec */
+ asoc_simple_card_canonicalize_cpu(dai_link,
+ of_graph_get_endpoint_count(dai_link->cpu_of_node) == 1);
+ } else {
+ /* FE is dummy */
+ dai_link->cpu_of_node = NULL;
+ dai_link->cpu_dai_name = "snd-soc-dummy-dai";
+ dai_link->cpu_name = "snd-soc-dummy";
+
+ /* BE settings */
+ dai_link->no_pcm = 1;
+ dai_link->be_hw_params_fixup = asoc_graph_card_be_hw_params_fixup;
+
+ ret = asoc_simple_card_parse_graph_codec(ep, dai_link);
+ if (ret < 0)
+ return ret;
+
+ ret = asoc_simple_card_parse_clk_codec(dev, ep, dai_link, dai_props);
+ if (ret < 0)
+ return ret;
+
+ ret = asoc_simple_card_set_dailink_name(dev, dai_link,
+ "be.%s",
+ dai_link->codec_dai_name);
+ if (ret < 0)
+ return ret;
+
+ snd_soc_of_parse_audio_prefix(card,
+ &priv->codec_conf,
+ dai_link->codec_of_node,
+ "prefix");
+ }
+
+ ret = asoc_simple_card_of_parse_tdm(ep, dai_props);
+ if (ret)
+ return ret;
+
+ ret = asoc_simple_card_canonicalize_dailink(dai_link);
+ if (ret < 0)
+ return ret;
+
+ dai_link->dai_fmt = daifmt;
+ dai_link->dpcm_playback = 1;
+ dai_link->dpcm_capture = 1;
+ dai_link->ops = &asoc_graph_card_ops;
+ dai_link->init = asoc_graph_card_dai_init;
+
+ return 0;
+}
+
+static int asoc_graph_card_parse_of(struct graph_card_data *priv)
+{
+ struct of_phandle_iterator it;
+ struct device *dev = graph_priv_to_dev(priv);
+ struct snd_soc_card *card = graph_priv_to_card(priv);
+ struct device_node *node = dev->of_node;
+ struct device_node *cpu_port;
+ struct device_node *cpu_ep;
+ struct device_node *codec_ep;
+ struct device_node *rcpu_ep;
+ struct device_node *codec_port;
+ struct device_node *codec_port_old;
+ unsigned int daifmt = 0;
+ int dai_idx, ret;
+ int rc, codec;
+
+ if (!node)
+ return -EINVAL;
+
+ /*
+ * we need to consider "widgets", "mclk-fs" around here
+ * see simple-card
+ */
+
+ ret = asoc_simple_card_of_parse_routing(card, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ asoc_simple_card_parse_convert(dev, NULL, &priv->adata);
+
+ /*
+ * it supports multi CPU, single CODEC only here
+ * see asoc_graph_get_dais_count
+ */
+
+ /* find 1st codec */
+ of_for_each_phandle(&it, rc, node, "dais", NULL, 0) {
+ cpu_port = it.node;
+ cpu_ep = of_get_next_child(cpu_port, NULL);
+ codec_ep = of_graph_get_remote_endpoint(cpu_ep);
+ rcpu_ep = of_graph_get_remote_endpoint(codec_ep);
+
+ of_node_put(cpu_port);
+ of_node_put(cpu_ep);
+ of_node_put(codec_ep);
+ of_node_put(rcpu_ep);
+
+ if (!codec_ep)
+ continue;
+
+ if (rcpu_ep != cpu_ep) {
+ dev_err(dev, "remote-endpoint missmatch (%s/%s/%s)\n",
+ cpu_ep->name, codec_ep->name, rcpu_ep->name);
+ ret = -EINVAL;
+ goto parse_of_err;
+ }
+
+ ret = asoc_simple_card_parse_daifmt(dev, cpu_ep, codec_ep,
+ NULL, &daifmt);
+ if (ret < 0)
+ goto parse_of_err;
+ }
+
+ dai_idx = 0;
+ codec_port_old = NULL;
+ for (codec = 0; codec < 2; codec++) {
+ /*
+ * To listup valid sounds continuously,
+ * detect all CPU-dummy first, and
+ * detect all dummy-Codec second
+ */
+ of_for_each_phandle(&it, rc, node, "dais", NULL, 0) {
+ cpu_port = it.node;
+ cpu_ep = of_get_next_child(cpu_port, NULL);
+ codec_ep = of_graph_get_remote_endpoint(cpu_ep);
+ codec_port = of_graph_get_port_parent(codec_ep);
+
+ of_node_put(cpu_port);
+ of_node_put(cpu_ep);
+ of_node_put(codec_ep);
+ of_node_put(codec_port);
+
+ if (codec) {
+ if (!codec_port)
+ continue;
+
+ if (codec_port_old == codec_port)
+ continue;
+
+ codec_port_old = codec_port;
+
+ /* Back-End (= Codec) */
+ ret = asoc_graph_card_dai_link_of(codec_ep, priv, daifmt, dai_idx++, 0);
+ if (ret < 0)
+ goto parse_of_err;
+ } else {
+ /* Front-End (= CPU) */
+ ret = asoc_graph_card_dai_link_of(cpu_ep, priv, daifmt, dai_idx++, 1);
+ if (ret < 0)
+ goto parse_of_err;
+ }
+ }
+ }
+
+ ret = asoc_simple_card_parse_card_name(card, NULL);
+ if (ret)
+ goto parse_of_err;
+
+ ret = 0;
+
+parse_of_err:
+ return ret;
+}
+
+static int asoc_graph_get_dais_count(struct device *dev)
+{
+ struct of_phandle_iterator it;
+ struct device_node *node = dev->of_node;
+ struct device_node *cpu_port;
+ struct device_node *cpu_ep;
+ struct device_node *codec_ep;
+ struct device_node *codec_port;
+ struct device_node *codec_port_old;
+ int count = 0;
+ int rc;
+
+ codec_port_old = NULL;
+ of_for_each_phandle(&it, rc, node, "dais", NULL, 0) {
+ cpu_port = it.node;
+ cpu_ep = of_get_next_child(cpu_port, NULL);
+ codec_ep = of_graph_get_remote_endpoint(cpu_ep);
+ codec_port = of_graph_get_port_parent(codec_ep);
+
+ of_node_put(cpu_port);
+ of_node_put(cpu_ep);
+ of_node_put(codec_ep);
+ of_node_put(codec_port);
+
+ if (cpu_ep)
+ count++;
+
+ if (!codec_port)
+ continue;
+
+ if (codec_port_old == codec_port)
+ continue;
+
+ count++;
+ codec_port_old = codec_port;
+ }
+
+ return count;
+}
+
+static int asoc_graph_card_probe(struct platform_device *pdev)
+{
+ struct graph_card_data *priv;
+ struct snd_soc_dai_link *dai_link;
+ struct asoc_simple_dai *dai_props;
+ struct device *dev = &pdev->dev;
+ struct snd_soc_card *card;
+ int num, ret;
+
+ /* Allocate the private data and the DAI link array */
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ num = asoc_graph_get_dais_count(dev);
+ if (num == 0)
+ return -EINVAL;
+
+ dai_props = devm_kzalloc(dev, sizeof(*dai_props) * num, GFP_KERNEL);
+ dai_link = devm_kzalloc(dev, sizeof(*dai_link) * num, GFP_KERNEL);
+ if (!dai_props || !dai_link)
+ return -ENOMEM;
+
+ priv->dai_props = dai_props;
+ priv->dai_link = dai_link;
+
+ /* Init snd_soc_card */
+ card = graph_priv_to_card(priv);
+ card->owner = THIS_MODULE;
+ card->dev = dev;
+ card->dai_link = priv->dai_link;
+ card->num_links = num;
+ card->codec_conf = &priv->codec_conf;
+ card->num_configs = 1;
+
+ ret = asoc_graph_card_parse_of(priv);
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "parse error %d\n", ret);
+ goto err;
+ }
+
+ snd_soc_card_set_drvdata(card, priv);
+
+ ret = devm_snd_soc_register_card(dev, card);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+err:
+ asoc_simple_card_clean_reference(card);
+
+ return ret;
+}
+
+static int asoc_graph_card_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+ return asoc_simple_card_clean_reference(card);
+}
+
+static const struct of_device_id asoc_graph_of_match[] = {
+ { .compatible = "audio-graph-scu-card", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, asoc_graph_of_match);
+
+static struct platform_driver asoc_graph_card = {
+ .driver = {
+ .name = "asoc-audio-graph-scu-card",
+ .of_match_table = asoc_graph_of_match,
+ },
+ .probe = asoc_graph_card_probe,
+ .remove = asoc_graph_card_remove,
+};
+module_platform_driver(asoc_graph_card);
+
+MODULE_ALIAS("platform:asoc-audio-graph-scu-card");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ASoC Audio Graph SCU Sound Card");
+MODULE_AUTHOR("Kuninori Morimoto <[email protected]>");
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index 343b291fc372..26d64fa40c9c 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -10,8 +10,49 @@
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <sound/simple_card_utils.h>
+void asoc_simple_card_convert_fixup(struct asoc_simple_card_data *data,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ if (data->convert_rate)
+ rate->min =
+ rate->max = data->convert_rate;
+
+ if (data->convert_channels)
+ channels->min =
+ channels->max = data->convert_channels;
+}
+EXPORT_SYMBOL_GPL(asoc_simple_card_convert_fixup);
+
+void asoc_simple_card_parse_convert(struct device *dev, char *prefix,
+ struct asoc_simple_card_data *data)
+{
+ struct device_node *np = dev->of_node;
+ char prop[128];
+
+ if (!prefix)
+ prefix = "";
+
+ /* sampling rate convert */
+ snprintf(prop, sizeof(prop), "%s%s", prefix, "convert-rate");
+ of_property_read_u32(np, prop, &data->convert_rate);
+
+ /* channels transfer */
+ snprintf(prop, sizeof(prop), "%s%s", prefix, "convert-channels");
+ of_property_read_u32(np, prop, &data->convert_channels);
+
+ dev_dbg(dev, "convert_rate %d\n", data->convert_rate);
+ dev_dbg(dev, "convert_channels %d\n", data->convert_channels);
+}
+EXPORT_SYMBOL_GPL(asoc_simple_card_parse_convert);
+
int asoc_simple_card_parse_daifmt(struct device *dev,
struct device_node *node,
struct device_node *codec,
@@ -20,14 +61,13 @@ int asoc_simple_card_parse_daifmt(struct device *dev,
{
struct device_node *bitclkmaster = NULL;
struct device_node *framemaster = NULL;
- int prefix_len = prefix ? strlen(prefix) : 0;
unsigned int daifmt;
daifmt = snd_soc_of_parse_daifmt(node, prefix,
&bitclkmaster, &framemaster);
daifmt &= ~SND_SOC_DAIFMT_MASTER_MASK;
- if (prefix_len && !bitclkmaster && !framemaster) {
+ if (!bitclkmaster && !framemaster) {
/*
* No dai-link level and master setting was not found from
* sound node level, revert back to legacy DT parsing and
@@ -51,6 +91,8 @@ int asoc_simple_card_parse_daifmt(struct device *dev,
*retfmt = daifmt;
+ dev_dbg(dev, "format : %04x\n", daifmt);
+
return 0;
}
EXPORT_SYMBOL_GPL(asoc_simple_card_parse_daifmt);
@@ -72,6 +114,8 @@ int asoc_simple_card_set_dailink_name(struct device *dev,
dai_link->name = name;
dai_link->stream_name = name;
+
+ dev_dbg(dev, "name : %s\n", name);
}
return ret;
@@ -81,27 +125,54 @@ EXPORT_SYMBOL_GPL(asoc_simple_card_set_dailink_name);
int asoc_simple_card_parse_card_name(struct snd_soc_card *card,
char *prefix)
{
- char prop[128];
int ret;
- snprintf(prop, sizeof(prop), "%sname", prefix);
+ if (!prefix)
+ prefix = "";
/* Parse the card name from DT */
- ret = snd_soc_of_parse_card_name(card, prop);
- if (ret < 0)
- return ret;
+ ret = snd_soc_of_parse_card_name(card, "label");
+ if (ret < 0) {
+ char prop[128];
+
+ snprintf(prop, sizeof(prop), "%sname", prefix);
+ ret = snd_soc_of_parse_card_name(card, prop);
+ if (ret < 0)
+ return ret;
+ }
if (!card->name && card->dai_link)
card->name = card->dai_link->name;
+ dev_dbg(card->dev, "Card Name: %s\n", card->name ? card->name : "");
+
return 0;
}
EXPORT_SYMBOL_GPL(asoc_simple_card_parse_card_name);
+static void asoc_simple_card_clk_register(struct asoc_simple_dai *dai,
+ struct clk *clk)
+{
+ dai->clk = clk;
+}
+
+int asoc_simple_card_clk_enable(struct asoc_simple_dai *dai)
+{
+ return clk_prepare_enable(dai->clk);
+}
+EXPORT_SYMBOL_GPL(asoc_simple_card_clk_enable);
+
+void asoc_simple_card_clk_disable(struct asoc_simple_dai *dai)
+{
+ clk_disable_unprepare(dai->clk);
+}
+EXPORT_SYMBOL_GPL(asoc_simple_card_clk_disable);
+
int asoc_simple_card_parse_clk(struct device *dev,
struct device_node *node,
struct device_node *dai_of_node,
- struct asoc_simple_dai *simple_dai)
+ struct asoc_simple_dai *simple_dai,
+ const char *name)
{
struct clk *clk;
u32 val;
@@ -115,7 +186,8 @@ int asoc_simple_card_parse_clk(struct device *dev,
clk = devm_get_clk_from_child(dev, node, NULL);
if (!IS_ERR(clk)) {
simple_dai->sysclk = clk_get_rate(clk);
- simple_dai->clk = clk;
+
+ asoc_simple_card_clk_register(simple_dai, clk);
} else if (!of_property_read_u32(node, "system-clock-frequency", &val)) {
simple_dai->sysclk = val;
} else {
@@ -124,6 +196,8 @@ int asoc_simple_card_parse_clk(struct device *dev,
simple_dai->sysclk = clk_get_rate(clk);
}
+ dev_dbg(dev, "%s : sysclk = %d\n", name, simple_dai->sysclk);
+
return 0;
}
EXPORT_SYMBOL_GPL(asoc_simple_card_parse_clk);
@@ -165,6 +239,71 @@ int asoc_simple_card_parse_dai(struct device_node *node,
}
EXPORT_SYMBOL_GPL(asoc_simple_card_parse_dai);
+static int asoc_simple_card_get_dai_id(struct device_node *ep)
+{
+ struct device_node *node;
+ struct device_node *endpoint;
+ int i, id;
+ int ret;
+
+ ret = snd_soc_get_dai_id(ep);
+ if (ret != -ENOTSUPP)
+ return ret;
+
+ node = of_graph_get_port_parent(ep);
+
+ /*
+ * Non HDMI sound case, counting port/endpoint on its DT
+ * is enough. Let's count it.
+ */
+ i = 0;
+ id = -1;
+ for_each_endpoint_of_node(node, endpoint) {
+ if (endpoint == ep)
+ id = i;
+ i++;
+ }
+ if (id < 0)
+ return -ENODEV;
+
+ return id;
+}
+
+int asoc_simple_card_parse_graph_dai(struct device_node *ep,
+ struct device_node **dai_of_node,
+ const char **dai_name)
+{
+ struct device_node *node;
+ struct of_phandle_args args;
+ int ret;
+
+ if (!ep)
+ return 0;
+ if (!dai_name)
+ return 0;
+
+ /*
+ * of_graph_get_port_parent() will call
+ * of_node_put(). So, call of_node_get() here
+ */
+ of_node_get(ep);
+ node = of_graph_get_port_parent(ep);
+
+ /* Get dai->name */
+ args.np = node;
+ args.args[0] = asoc_simple_card_get_dai_id(ep);
+ args.args_count = (of_graph_get_endpoint_count(node) > 1);
+
+ ret = snd_soc_get_dai_name(&args, dai_name);
+ if (ret < 0)
+ return ret;
+
+ *dai_of_node = node;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(asoc_simple_card_parse_graph_dai);
+
int asoc_simple_card_init_dai(struct snd_soc_dai *dai,
struct asoc_simple_dai *simple_dai)
{
@@ -236,6 +375,47 @@ int asoc_simple_card_clean_reference(struct snd_soc_card *card)
}
EXPORT_SYMBOL_GPL(asoc_simple_card_clean_reference);
+int asoc_simple_card_of_parse_routing(struct snd_soc_card *card,
+ char *prefix,
+ int optional)
+{
+ struct device_node *node = card->dev->of_node;
+ char prop[128];
+
+ if (!prefix)
+ prefix = "";
+
+ snprintf(prop, sizeof(prop), "%s%s", prefix, "routing");
+
+ if (!of_property_read_bool(node, prop)) {
+ if (optional)
+ return 0;
+ return -EINVAL;
+ }
+
+ return snd_soc_of_parse_audio_routing(card, prop);
+}
+EXPORT_SYMBOL_GPL(asoc_simple_card_of_parse_routing);
+
+int asoc_simple_card_of_parse_widgets(struct snd_soc_card *card,
+ char *prefix)
+{
+ struct device_node *node = card->dev->of_node;
+ char prop[128];
+
+ if (!prefix)
+ prefix = "";
+
+ snprintf(prop, sizeof(prop), "%s%s", prefix, "widgets");
+
+ if (of_property_read_bool(node, prop))
+ return snd_soc_of_parse_audio_simple_widgets(card, prop);
+
+ /* no widgets is not error */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(asoc_simple_card_of_parse_widgets);
+
/* Module information */
MODULE_AUTHOR("Kuninori Morimoto <[email protected]>");
MODULE_DESCRIPTION("ALSA SoC Simple Card Utils");
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index bc136d2bd7cd..dfaf48ff88b0 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -118,13 +118,13 @@ static int asoc_simple_card_startup(struct snd_pcm_substream *substream)
simple_priv_to_props(priv, rtd->num);
int ret;
- ret = clk_prepare_enable(dai_props->cpu_dai.clk);
+ ret = asoc_simple_card_clk_enable(&dai_props->cpu_dai);
if (ret)
return ret;
- ret = clk_prepare_enable(dai_props->codec_dai.clk);
+ ret = asoc_simple_card_clk_enable(&dai_props->codec_dai);
if (ret)
- clk_disable_unprepare(dai_props->cpu_dai.clk);
+ asoc_simple_card_clk_disable(&dai_props->cpu_dai);
return ret;
}
@@ -136,9 +136,9 @@ static void asoc_simple_card_shutdown(struct snd_pcm_substream *substream)
struct simple_dai_props *dai_props =
simple_priv_to_props(priv, rtd->num);
- clk_disable_unprepare(dai_props->cpu_dai.clk);
+ asoc_simple_card_clk_disable(&dai_props->cpu_dai);
- clk_disable_unprepare(dai_props->codec_dai.clk);
+ asoc_simple_card_clk_disable(&dai_props->codec_dai);
}
static int asoc_simple_card_hw_params(struct snd_pcm_substream *substream,
@@ -233,13 +233,19 @@ static int asoc_simple_card_dai_link_of(struct device_node *node,
snprintf(prop, sizeof(prop), "%scpu", prefix);
cpu = of_get_child_by_name(node, prop);
+ if (!cpu) {
+ ret = -EINVAL;
+ dev_err(dev, "%s: Can't find %s DT node\n", __func__, prop);
+ goto dai_link_of_err;
+ }
+
snprintf(prop, sizeof(prop), "%splat", prefix);
plat = of_get_child_by_name(node, prop);
snprintf(prop, sizeof(prop), "%scodec", prefix);
codec = of_get_child_by_name(node, prop);
- if (!cpu || !codec) {
+ if (!codec) {
ret = -EINVAL;
dev_err(dev, "%s: Can't find %s DT node\n", __func__, prop);
goto dai_link_of_err;
@@ -265,17 +271,11 @@ static int asoc_simple_card_dai_link_of(struct device_node *node,
if (ret < 0)
goto dai_link_of_err;
- ret = snd_soc_of_parse_tdm_slot(cpu, &cpu_dai->tx_slot_mask,
- &cpu_dai->rx_slot_mask,
- &cpu_dai->slots,
- &cpu_dai->slot_width);
+ ret = asoc_simple_card_of_parse_tdm(cpu, cpu_dai);
if (ret < 0)
goto dai_link_of_err;
- ret = snd_soc_of_parse_tdm_slot(codec, &codec_dai->tx_slot_mask,
- &codec_dai->rx_slot_mask,
- &codec_dai->slots,
- &codec_dai->slot_width);
+ ret = asoc_simple_card_of_parse_tdm(codec, codec_dai);
if (ret < 0)
goto dai_link_of_err;
@@ -301,15 +301,6 @@ static int asoc_simple_card_dai_link_of(struct device_node *node,
dai_link->ops = &asoc_simple_card_ops;
dai_link->init = asoc_simple_card_dai_init;
- dev_dbg(dev, "\tname : %s\n", dai_link->stream_name);
- dev_dbg(dev, "\tformat : %04x\n", dai_link->dai_fmt);
- dev_dbg(dev, "\tcpu : %s / %d\n",
- dai_link->cpu_dai_name,
- dai_props->cpu_dai.sysclk);
- dev_dbg(dev, "\tcodec : %s / %d\n",
- dai_link->codec_dai_name,
- dai_props->codec_dai.sysclk);
-
asoc_simple_card_canonicalize_cpu(dai_link, single_cpu);
dai_link_of_err:
@@ -350,12 +341,12 @@ static int asoc_simple_card_parse_aux_devs(struct device_node *node,
return 0;
}
-static int asoc_simple_card_parse_of(struct device_node *node,
- struct simple_card_data *priv)
+static int asoc_simple_card_parse_of(struct simple_card_data *priv)
{
struct device *dev = simple_priv_to_dev(priv);
struct snd_soc_card *card = simple_priv_to_card(priv);
struct device_node *dai_link;
+ struct device_node *node = dev->of_node;
int ret;
if (!node)
@@ -363,21 +354,13 @@ static int asoc_simple_card_parse_of(struct device_node *node,
dai_link = of_get_child_by_name(node, PREFIX "dai-link");
- /* The off-codec widgets */
- if (of_property_read_bool(node, PREFIX "widgets")) {
- ret = snd_soc_of_parse_audio_simple_widgets(card,
- PREFIX "widgets");
- if (ret)
- goto card_parse_end;
- }
+ ret = asoc_simple_card_of_parse_widgets(card, PREFIX);
+ if (ret < 0)
+ goto card_parse_end;
- /* DAPM routes */
- if (of_property_read_bool(node, PREFIX "routing")) {
- ret = snd_soc_of_parse_audio_routing(card,
- PREFIX "routing");
- if (ret)
- goto card_parse_end;
- }
+ ret = asoc_simple_card_of_parse_routing(card, PREFIX, 1);
+ if (ret < 0)
+ goto card_parse_end;
/* Factor to mclk, used in hw_params() */
of_property_read_u32(node, PREFIX "mclk-fs", &priv->mclk_fs);
@@ -454,7 +437,7 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
if (np && of_device_is_available(np)) {
- ret = asoc_simple_card_parse_of(np, priv);
+ ret = asoc_simple_card_parse_of(priv);
if (ret < 0) {
if (ret != -EPROBE_DEFER)
dev_err(dev, "parse error %d\n", ret);
@@ -497,8 +480,10 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
snd_soc_card_set_drvdata(card, priv);
ret = devm_snd_soc_register_card(dev, card);
- if (ret >= 0)
- return ret;
+ if (ret < 0)
+ goto err;
+
+ return 0;
err:
asoc_simple_card_clean_reference(card);
diff --git a/sound/soc/generic/simple-scu-card.c b/sound/soc/generic/simple-scu-card.c
index dcbcab230d1b..a75b385455c4 100644
--- a/sound/soc/generic/simple-scu-card.c
+++ b/sound/soc/generic/simple-scu-card.c
@@ -27,8 +27,7 @@ struct simple_card_data {
struct snd_soc_codec_conf codec_conf;
struct asoc_simple_dai *dai_props;
struct snd_soc_dai_link *dai_link;
- u32 convert_rate;
- u32 convert_channels;
+ struct asoc_simple_card_data adata;
};
#define simple_priv_to_card(priv) (&(priv)->snd_card)
@@ -47,7 +46,7 @@ static int asoc_simple_card_startup(struct snd_pcm_substream *substream)
struct asoc_simple_dai *dai_props =
simple_priv_to_props(priv, rtd->num);
- return clk_prepare_enable(dai_props->clk);
+ return asoc_simple_card_clk_enable(dai_props);
}
static void asoc_simple_card_shutdown(struct snd_pcm_substream *substream)
@@ -57,7 +56,7 @@ static void asoc_simple_card_shutdown(struct snd_pcm_substream *substream)
struct asoc_simple_dai *dai_props =
simple_priv_to_props(priv, rtd->num);
- clk_disable_unprepare(dai_props->clk);
+ asoc_simple_card_clk_disable(dai_props);
}
static const struct snd_soc_ops asoc_simple_card_ops = {
@@ -86,18 +85,8 @@ static int asoc_simple_card_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
- struct snd_interval *rate = hw_param_interval(params,
- SNDRV_PCM_HW_PARAM_RATE);
- struct snd_interval *channels = hw_param_interval(params,
- SNDRV_PCM_HW_PARAM_CHANNELS);
- if (priv->convert_rate)
- rate->min =
- rate->max = priv->convert_rate;
-
- if (priv->convert_channels)
- channels->min =
- channels->max = priv->convert_channels;
+ asoc_simple_card_convert_fixup(&priv->adata, params);
return 0;
}
@@ -171,11 +160,7 @@ static int asoc_simple_card_dai_link_of(struct device_node *np,
PREFIX "prefix");
}
- ret = snd_soc_of_parse_tdm_slot(np,
- &dai_props->tx_slot_mask,
- &dai_props->rx_slot_mask,
- &dai_props->slots,
- &dai_props->slot_width);
+ ret = asoc_simple_card_of_parse_tdm(np, dai_props);
if (ret)
return ret;
@@ -189,21 +174,16 @@ static int asoc_simple_card_dai_link_of(struct device_node *np,
dai_link->ops = &asoc_simple_card_ops;
dai_link->init = asoc_simple_card_dai_init;
- dev_dbg(dev, "\t%s / %04x / %d\n",
- dai_link->name,
- dai_link->dai_fmt,
- dai_props->sysclk);
-
return 0;
}
-static int asoc_simple_card_parse_of(struct device_node *node,
- struct simple_card_data *priv)
+static int asoc_simple_card_parse_of(struct simple_card_data *priv)
{
struct device *dev = simple_priv_to_dev(priv);
struct device_node *np;
struct snd_soc_card *card = simple_priv_to_card(priv);
+ struct device_node *node = dev->of_node;
unsigned int daifmt = 0;
bool is_fe;
int ret, i;
@@ -211,15 +191,11 @@ static int asoc_simple_card_parse_of(struct device_node *node,
if (!node)
return -EINVAL;
- ret = snd_soc_of_parse_audio_routing(card, PREFIX "routing");
+ ret = asoc_simple_card_of_parse_routing(card, PREFIX, 0);
if (ret < 0)
return ret;
- /* sampling rate convert */
- of_property_read_u32(node, PREFIX "convert-rate", &priv->convert_rate);
-
- /* channels transfer */
- of_property_read_u32(node, PREFIX "convert-channels", &priv->convert_channels);
+ asoc_simple_card_parse_convert(dev, PREFIX, &priv->adata);
/* find 1st codec */
np = of_get_child_by_name(node, PREFIX "codec");
@@ -246,11 +222,6 @@ static int asoc_simple_card_parse_of(struct device_node *node,
if (ret < 0)
return ret;
- dev_dbg(dev, "New card: %s\n",
- card->name ? card->name : "");
- dev_dbg(dev, "convert_rate %d\n", priv->convert_rate);
- dev_dbg(dev, "convert_channels %d\n", priv->convert_channels);
-
return 0;
}
@@ -288,7 +259,7 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
card->codec_conf = &priv->codec_conf;
card->num_configs = 1;
- ret = asoc_simple_card_parse_of(np, priv);
+ ret = asoc_simple_card_parse_of(priv);
if (ret < 0) {
if (ret != -EPROBE_DEFER)
dev_err(dev, "parse error %d\n", ret);
@@ -298,8 +269,10 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
snd_soc_card_set_drvdata(card, priv);
ret = devm_snd_soc_register_card(dev, card);
- if (ret >= 0)
- return ret;
+ if (ret < 0)
+ goto err;
+
+ return 0;
err:
asoc_simple_card_clean_reference(card);
diff --git a/sound/soc/hisilicon/hi6210-i2s.c b/sound/soc/hisilicon/hi6210-i2s.c
index 45163e5202f5..b193d3beb253 100644
--- a/sound/soc/hisilicon/hi6210-i2s.c
+++ b/sound/soc/hisilicon/hi6210-i2s.c
@@ -97,8 +97,8 @@ static inline u32 hi6210_read_reg(struct hi6210_i2s *i2s, int reg)
return readl(i2s->base + reg);
}
-int hi6210_i2s_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *cpu_dai)
+static int hi6210_i2s_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
{
struct hi6210_i2s *i2s = dev_get_drvdata(cpu_dai->dev);
int ret, n;
@@ -175,8 +175,9 @@ int hi6210_i2s_startup(struct snd_pcm_substream *substream,
return 0;
}
-void hi6210_i2s_shutdown(struct snd_pcm_substream *substream,
- struct snd_soc_dai *cpu_dai)
+
+static void hi6210_i2s_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
{
struct hi6210_i2s *i2s = dev_get_drvdata(cpu_dai->dev);
int n;
@@ -524,7 +525,7 @@ static struct snd_soc_dai_ops hi6210_i2s_dai_ops = {
.shutdown = hi6210_i2s_shutdown,
};
-struct snd_soc_dai_driver hi6210_i2s_dai_init = {
+static const struct snd_soc_dai_driver hi6210_i2s_dai_init = {
.probe = hi6210_i2s_dai_probe,
.playback = {
.channels_min = 2,
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index 67968ef3bbda..b301bfff1c09 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -214,6 +214,18 @@ config SND_SOC_INTEL_BYT_CHT_DA7213_MACH
platforms with DA7212/7213 audio codec.
If unsure select "N".
+config SND_SOC_INTEL_BYT_CHT_ES8316_MACH
+ tristate "ASoC Audio driver for Intel Baytrail & Cherrytrail with ES8316 codec"
+ depends on X86_INTEL_LPSS && I2C && ACPI
+ select SND_SOC_ES8316
+ select SND_SST_ATOM_HIFI2_PLATFORM
+ select SND_SST_IPC_ACPI
+ select SND_SOC_INTEL_SST_MATCH if ACPI
+ help
+ This adds support for ASoC machine driver for Intel(R) Baytrail &
+ Cherrytrail platforms with ES8316 audio codec.
+ If unsure select "N".
+
config SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH
tristate "ASoC Audio driver for Intel Baytrail & Cherrytrail platform with no codec (MinnowBoard MAX, Up)"
depends on X86_INTEL_LPSS && I2C && ACPI
@@ -226,6 +238,36 @@ config SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH
connector
If unsure select "N".
+config SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH
+ tristate "ASoC Audio driver for KBL with RT5663 and MAX98927 in I2S Mode"
+ depends on X86_INTEL_LPSS && I2C
+ select SND_SOC_INTEL_SST
+ select SND_SOC_INTEL_SKYLAKE
+ select SND_SOC_RT5663
+ select SND_SOC_MAX98927
+ select SND_SOC_DMIC
+ select SND_SOC_HDAC_HDMI
+ help
+ This adds support for ASoC Onboard Codec I2S machine driver. This will
+ create an alsa sound card for RT5663 + MAX98927.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH
+ tristate "ASoC Audio driver for KBL with RT5663, RT5514 and MAX98927 in I2S Mode"
+ depends on X86_INTEL_LPSS && I2C
+ select SND_SOC_INTEL_SST
+ select SND_SOC_INTEL_SKYLAKE
+ select SND_SOC_RT5663
+ select SND_SOC_RT5514
+ select SND_SOC_MAX98927
+ select SND_SOC_HDAC_HDMI
+ help
+ This adds support for ASoC Onboard Codec I2S machine driver. This will
+ create an alsa sound card for RT5663 + RT5514 + MAX98927.
+ Say Y if you have such a device.
+ If unsure select "N".
+
config SND_SOC_INTEL_SKYLAKE
tristate
select SND_HDA_EXT_CORE
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index 21cac1c8dd4c..b082b31023d5 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -690,7 +690,7 @@ static int sst_pcm_new(struct snd_soc_pcm_runtime *rtd)
snd_dma_continuous_data(GFP_DMA),
SST_MIN_BUFFER, SST_MAX_BUFFER);
if (retval) {
- dev_err(rtd->dev, "dma buffer allocationf fail\n");
+ dev_err(rtd->dev, "dma buffer allocation failure\n");
return retval;
}
}
diff --git a/sound/soc/intel/atom/sst/sst.c b/sound/soc/intel/atom/sst/sst.c
index f9ba71315e33..8afdff457579 100644
--- a/sound/soc/intel/atom/sst/sst.c
+++ b/sound/soc/intel/atom/sst/sst.c
@@ -258,7 +258,7 @@ static ssize_t firmware_version_show(struct device *dev,
}
-DEVICE_ATTR_RO(firmware_version);
+static DEVICE_ATTR_RO(firmware_version);
static const struct attribute *sst_fw_version_attrs[] = {
&dev_attr_firmware_version.attr,
@@ -382,37 +382,6 @@ void sst_context_cleanup(struct intel_sst_drv *ctx)
}
EXPORT_SYMBOL_GPL(sst_context_cleanup);
-static inline void sst_save_shim64(struct intel_sst_drv *ctx,
- void __iomem *shim,
- struct sst_shim_regs64 *shim_regs)
-{
- unsigned long irq_flags;
-
- spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags);
-
- shim_regs->imrx = sst_shim_read64(shim, SST_IMRX);
- shim_regs->csr = sst_shim_read64(shim, SST_CSR);
-
-
- spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags);
-}
-
-static inline void sst_restore_shim64(struct intel_sst_drv *ctx,
- void __iomem *shim,
- struct sst_shim_regs64 *shim_regs)
-{
- unsigned long irq_flags;
-
- /*
- * we only need to restore IMRX for this case, rest will be
- * initialize by FW or driver when firmware is loaded
- */
- spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags);
- sst_shim_write64(shim, SST_IMRX, shim_regs->imrx);
- sst_shim_write64(shim, SST_CSR, shim_regs->csr);
- spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags);
-}
-
void sst_configure_runtime_pm(struct intel_sst_drv *ctx)
{
pm_runtime_set_autosuspend_delay(ctx->dev, SST_SUSPEND_DELAY);
@@ -432,8 +401,6 @@ void sst_configure_runtime_pm(struct intel_sst_drv *ctx)
pm_runtime_set_active(ctx->dev);
else
pm_runtime_put_noidle(ctx->dev);
-
- sst_save_shim64(ctx, ctx->shim, ctx->shim_regs64);
}
EXPORT_SYMBOL_GPL(sst_configure_runtime_pm);
@@ -457,8 +424,6 @@ static int intel_sst_runtime_suspend(struct device *dev)
flush_workqueue(ctx->post_msg_wq);
ctx->ops->reset(ctx);
- /* save the shim registers because PMC doesn't save state */
- sst_save_shim64(ctx, ctx->shim, ctx->shim_regs64);
return ret;
}
@@ -499,23 +464,23 @@ static int intel_sst_suspend(struct device *dev)
fw_save = kzalloc(sizeof(*fw_save), GFP_KERNEL);
if (!fw_save)
return -ENOMEM;
- fw_save->iram = kzalloc(ctx->iram_end - ctx->iram_base, GFP_KERNEL);
+ fw_save->iram = kvzalloc(ctx->iram_end - ctx->iram_base, GFP_KERNEL);
if (!fw_save->iram) {
ret = -ENOMEM;
goto iram;
}
- fw_save->dram = kzalloc(ctx->dram_end - ctx->dram_base, GFP_KERNEL);
+ fw_save->dram = kvzalloc(ctx->dram_end - ctx->dram_base, GFP_KERNEL);
if (!fw_save->dram) {
ret = -ENOMEM;
goto dram;
}
- fw_save->sram = kzalloc(SST_MAILBOX_SIZE, GFP_KERNEL);
+ fw_save->sram = kvzalloc(SST_MAILBOX_SIZE, GFP_KERNEL);
if (!fw_save->sram) {
ret = -ENOMEM;
goto sram;
}
- fw_save->ddr = kzalloc(ctx->ddr_end - ctx->ddr_base, GFP_KERNEL);
+ fw_save->ddr = kvzalloc(ctx->ddr_end - ctx->ddr_base, GFP_KERNEL);
if (!fw_save->ddr) {
ret = -ENOMEM;
goto ddr;
@@ -530,11 +495,11 @@ static int intel_sst_suspend(struct device *dev)
ctx->ops->reset(ctx);
return 0;
ddr:
- kfree(fw_save->sram);
+ kvfree(fw_save->sram);
sram:
- kfree(fw_save->dram);
+ kvfree(fw_save->dram);
dram:
- kfree(fw_save->iram);
+ kvfree(fw_save->iram);
iram:
kfree(fw_save);
return ret;
@@ -562,10 +527,10 @@ static int intel_sst_resume(struct device *dev)
memcpy32_toio(ctx->mailbox, fw_save->sram, SST_MAILBOX_SIZE);
memcpy32_toio(ctx->ddr, fw_save->ddr, ctx->ddr_end - ctx->ddr_base);
- kfree(fw_save->sram);
- kfree(fw_save->dram);
- kfree(fw_save->iram);
- kfree(fw_save->ddr);
+ kvfree(fw_save->sram);
+ kvfree(fw_save->dram);
+ kvfree(fw_save->iram);
+ kvfree(fw_save->ddr);
kfree(fw_save);
block = sst_create_block(ctx, 0, FW_DWNL_ID);
diff --git a/sound/soc/intel/atom/sst/sst.h b/sound/soc/intel/atom/sst/sst.h
index 5c9a51cc77aa..e02e2b4cc08f 100644
--- a/sound/soc/intel/atom/sst/sst.h
+++ b/sound/soc/intel/atom/sst/sst.h
@@ -317,31 +317,11 @@ struct sst_ipc_reg {
int ipcd;
};
-struct sst_shim_regs64 {
- u64 csr;
- u64 pisr;
- u64 pimr;
- u64 isrx;
- u64 isrd;
- u64 imrx;
- u64 imrd;
- u64 ipcx;
- u64 ipcd;
- u64 isrsc;
- u64 isrlpesc;
- u64 imrsc;
- u64 imrlpesc;
- u64 ipcsc;
- u64 ipclpesc;
- u64 clkctl;
- u64 csr2;
-};
-
struct sst_fw_save {
- void *iram;
- void *dram;
- void *sram;
- void *ddr;
+ void *iram; /* allocated via kvmalloc() */
+ void *dram; /* allocated via kvmalloc() */
+ void *sram; /* allocated via kvmalloc() */
+ void *ddr; /* allocated via kvmalloc() */
};
/**
@@ -356,7 +336,6 @@ struct sst_fw_save {
* @dram : SST DRAM pointer
* @pdata : SST info passed as a part of pci platform data
* @shim_phy_add : SST shim phy addr
- * @shim_regs64: Struct to save shim registers
* @ipc_dispatch_list : ipc messages dispatched
* @rx_list : to copy the process_reply/process_msg from DSP
* @ipc_post_msg_wq : wq to post IPC messages context
@@ -398,7 +377,6 @@ struct intel_sst_drv {
unsigned int ddr_end;
unsigned int ddr_base;
unsigned int mailbox_recv_offset;
- struct sst_shim_regs64 *shim_regs64;
struct list_head block_list;
struct list_head ipc_dispatch_list;
struct sst_platform_info *pdata;
diff --git a/sound/soc/intel/atom/sst/sst_acpi.c b/sound/soc/intel/atom/sst/sst_acpi.c
index dd250b8b26f2..0e928d54305d 100644
--- a/sound/soc/intel/atom/sst/sst_acpi.c
+++ b/sound/soc/intel/atom/sst/sst_acpi.c
@@ -303,8 +303,6 @@ static int sst_acpi_probe(struct platform_device *pdev)
dev_err(dev, "No matching machine driver found\n");
return -ENODEV;
}
- if (mach->machine_quirk)
- mach = mach->machine_quirk(mach);
pdata = mach->pdata;
@@ -360,23 +358,9 @@ static int sst_acpi_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- /* need to save shim registers in BYT */
- ctx->shim_regs64 = devm_kzalloc(ctx->dev, sizeof(*ctx->shim_regs64),
- GFP_KERNEL);
- if (!ctx->shim_regs64) {
- ret = -ENOMEM;
- goto do_sst_cleanup;
- }
-
sst_configure_runtime_pm(ctx);
platform_set_drvdata(pdev, ctx);
return ret;
-
-do_sst_cleanup:
- sst_context_cleanup(ctx);
- platform_set_drvdata(pdev, NULL);
- dev_err(ctx->dev, "failed with %d\n", ret);
- return ret;
}
/**
@@ -453,12 +437,20 @@ static const struct dmi_system_id cht_table[] = {
static struct sst_acpi_mach cht_surface_mach = {
- "10EC5640", "cht-bsw-rt5645", "intel/fw_sst_22a8.bin", "cht-bsw", NULL,
- &chv_platform_data };
+ .id = "10EC5640",
+ .drv_name = "cht-bsw-rt5645",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "cht-bsw",
+ .pdata = &chv_platform_data,
+};
static struct sst_acpi_mach byt_thinkpad_10 = {
- "10EC5640", "cht-bsw-rt5672", "intel/fw_sst_0f28.bin", "cht-bsw", NULL,
- &byt_rvp_platform_data };
+ .id = "10EC5640",
+ .drv_name = "cht-bsw-rt5672",
+ .fw_filename = "intel/fw_sst_0f28.bin",
+ .board = "cht-bsw",
+ .pdata = &byt_rvp_platform_data,
+};
static struct sst_acpi_mach *cht_quirk(void *arg)
{
@@ -486,68 +478,182 @@ static struct sst_acpi_mach *byt_quirk(void *arg)
static struct sst_acpi_mach sst_acpi_bytcr[] = {
- {"10EC5640", "bytcr_rt5640", "intel/fw_sst_0f28.bin", "bytcr_rt5640", byt_quirk,
- &byt_rvp_platform_data },
- {"10EC5642", "bytcr_rt5640", "intel/fw_sst_0f28.bin", "bytcr_rt5640", NULL,
- &byt_rvp_platform_data },
- {"INTCCFFD", "bytcr_rt5640", "intel/fw_sst_0f28.bin", "bytcr_rt5640", NULL,
- &byt_rvp_platform_data },
- {"10EC5651", "bytcr_rt5651", "intel/fw_sst_0f28.bin", "bytcr_rt5651", NULL,
- &byt_rvp_platform_data },
- {"DLGS7212", "bytcht_da7213", "intel/fw_sst_0f28.bin", "bytcht_da7213", NULL,
- &byt_rvp_platform_data },
- {"DLGS7213", "bytcht_da7213", "intel/fw_sst_0f28.bin", "bytcht_da7213", NULL,
- &byt_rvp_platform_data },
+ {
+ .id = "10EC5640",
+ .drv_name = "bytcr_rt5640",
+ .fw_filename = "intel/fw_sst_0f28.bin",
+ .board = "bytcr_rt5640",
+ .machine_quirk = byt_quirk,
+ .pdata = &byt_rvp_platform_data,
+ },
+ {
+ .id = "10EC5642",
+ .drv_name = "bytcr_rt5640",
+ .fw_filename = "intel/fw_sst_0f28.bin",
+ .board = "bytcr_rt5640",
+ .pdata = &byt_rvp_platform_data
+ },
+ {
+ .id = "INTCCFFD",
+ .drv_name = "bytcr_rt5640",
+ .fw_filename = "intel/fw_sst_0f28.bin",
+ .board = "bytcr_rt5640",
+ .pdata = &byt_rvp_platform_data
+ },
+ {
+ .id = "10EC5651",
+ .drv_name = "bytcr_rt5651",
+ .fw_filename = "intel/fw_sst_0f28.bin",
+ .board = "bytcr_rt5651",
+ .pdata = &byt_rvp_platform_data
+ },
+ {
+ .id = "DLGS7212",
+ .drv_name = "bytcht_da7213",
+ .fw_filename = "intel/fw_sst_0f28.bin",
+ .board = "bytcht_da7213",
+ .pdata = &byt_rvp_platform_data
+ },
+ {
+ .id = "DLGS7213",
+ .drv_name = "bytcht_da7213",
+ .fw_filename = "intel/fw_sst_0f28.bin",
+ .board = "bytcht_da7213",
+ .pdata = &byt_rvp_platform_data
+ },
/* some Baytrail platforms rely on RT5645, use CHT machine driver */
- {"10EC5645", "cht-bsw-rt5645", "intel/fw_sst_0f28.bin", "cht-bsw", NULL,
- &byt_rvp_platform_data },
- {"10EC5648", "cht-bsw-rt5645", "intel/fw_sst_0f28.bin", "cht-bsw", NULL,
- &byt_rvp_platform_data },
+ {
+ .id = "10EC5645",
+ .drv_name = "cht-bsw-rt5645",
+ .fw_filename = "intel/fw_sst_0f28.bin",
+ .board = "cht-bsw",
+ .pdata = &byt_rvp_platform_data
+ },
+ {
+ .id = "10EC5648",
+ .drv_name = "cht-bsw-rt5645",
+ .fw_filename = "intel/fw_sst_0f28.bin",
+ .board = "cht-bsw",
+ .pdata = &byt_rvp_platform_data
+ },
#if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
/*
* This is always last in the table so that it is selected only when
* enabled explicitly and there is no codec-related information in SSDT
*/
- {"80860F28", "bytcht_nocodec", "intel/fw_sst_0f28.bin", "bytcht_nocodec", NULL,
- &byt_rvp_platform_data },
+ {
+ .id = "80860F28",
+ .drv_name = "bytcht_nocodec",
+ .fw_filename = "intel/fw_sst_0f28.bin",
+ .board = "bytcht_nocodec",
+ .pdata = &byt_rvp_platform_data
+ },
#endif
{},
};
/* Cherryview-based platforms: CherryTrail and Braswell */
static struct sst_acpi_mach sst_acpi_chv[] = {
- {"10EC5670", "cht-bsw-rt5672", "intel/fw_sst_22a8.bin", "cht-bsw", NULL,
- &chv_platform_data },
- {"10EC5672", "cht-bsw-rt5672", "intel/fw_sst_22a8.bin", "cht-bsw", NULL,
- &chv_platform_data },
- {"10EC5645", "cht-bsw-rt5645", "intel/fw_sst_22a8.bin", "cht-bsw", NULL,
- &chv_platform_data },
- {"10EC5650", "cht-bsw-rt5645", "intel/fw_sst_22a8.bin", "cht-bsw", NULL,
- &chv_platform_data },
- {"10EC3270", "cht-bsw-rt5645", "intel/fw_sst_22a8.bin", "cht-bsw", NULL,
- &chv_platform_data },
-
- {"193C9890", "cht-bsw-max98090", "intel/fw_sst_22a8.bin", "cht-bsw", NULL,
- &chv_platform_data },
- {"DLGS7212", "bytcht_da7213", "intel/fw_sst_22a8.bin", "bytcht_da7213", NULL,
- &chv_platform_data },
- {"DLGS7213", "bytcht_da7213", "intel/fw_sst_22a8.bin", "bytcht_da7213", NULL,
- &chv_platform_data },
+ {
+ .id = "10EC5670",
+ .drv_name = "cht-bsw-rt5672",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "cht-bsw",
+ .pdata = &chv_platform_data
+ },
+ {
+ .id = "10EC5672",
+ .drv_name = "cht-bsw-rt5672",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "cht-bsw",
+ .pdata = &chv_platform_data
+ },
+ {
+ .id = "10EC5645",
+ .drv_name = "cht-bsw-rt5645",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "cht-bsw",
+ .pdata = &chv_platform_data
+ },
+ {
+ .id = "10EC5650",
+ .drv_name = "cht-bsw-rt5645",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "cht-bsw",
+ .pdata = &chv_platform_data
+ },
+ {
+ .id = "10EC3270",
+ .drv_name = "cht-bsw-rt5645",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "cht-bsw",
+ .pdata = &chv_platform_data
+ },
+
+ {
+ .id = "193C9890",
+ .drv_name = "cht-bsw-max98090",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "cht-bsw",
+ .pdata = &chv_platform_data
+ },
+ {
+ .id = "DLGS7212",
+ .drv_name = "bytcht_da7213",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "bytcht_da7213",
+ .pdata = &chv_platform_data
+ },
+ {
+ .id = "DLGS7213",
+ .drv_name = "bytcht_da7213",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "bytcht_da7213",
+ .pdata = &chv_platform_data
+ },
+ {
+ .id = "ESSX8316",
+ .drv_name = "bytcht_es8316",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "bytcht_es8316",
+ .pdata = &chv_platform_data
+ },
/* some CHT-T platforms rely on RT5640, use Baytrail machine driver */
- {"10EC5640", "bytcr_rt5640", "intel/fw_sst_22a8.bin", "bytcr_rt5640", cht_quirk,
- &chv_platform_data },
- {"10EC3276", "bytcr_rt5640", "intel/fw_sst_22a8.bin", "bytcr_rt5640", NULL,
- &chv_platform_data },
+ {
+ .id = "10EC5640",
+ .drv_name = "bytcr_rt5640",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "bytcr_rt5640",
+ .machine_quirk = cht_quirk,
+ .pdata = &chv_platform_data
+ },
+ {
+ .id = "10EC3276",
+ .drv_name = "bytcr_rt5640",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "bytcr_rt5640",
+ .pdata = &chv_platform_data
+ },
/* some CHT-T platforms rely on RT5651, use Baytrail machine driver */
- {"10EC5651", "bytcr_rt5651", "intel/fw_sst_22a8.bin", "bytcr_rt5651", NULL,
- &chv_platform_data },
+ {
+ .id = "10EC5651",
+ .drv_name = "bytcr_rt5651",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "bytcr_rt5651",
+ .pdata = &chv_platform_data
+ },
#if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
/*
* This is always last in the table so that it is selected only when
* enabled explicitly and there is no codec-related information in SSDT
*/
- {"808622A8", "bytcht_nocodec", "intel/fw_sst_22a8.bin", "bytcht_nocodec", NULL,
- &chv_platform_data },
+ {
+ .id = "808622A8",
+ .drv_name = "bytcht_nocodec",
+ .fw_filename = "intel/fw_sst_22a8.bin",
+ .board = "bytcht_nocodec",
+ .pdata = &chv_platform_data
+ },
#endif
{},
};
diff --git a/sound/soc/intel/boards/Makefile b/sound/soc/intel/boards/Makefile
index 56896e09445d..a5c5bc5732a2 100644
--- a/sound/soc/intel/boards/Makefile
+++ b/sound/soc/intel/boards/Makefile
@@ -11,7 +11,10 @@ snd-soc-sst-cht-bsw-rt5672-objs := cht_bsw_rt5672.o
snd-soc-sst-cht-bsw-rt5645-objs := cht_bsw_rt5645.o
snd-soc-sst-cht-bsw-max98090_ti-objs := cht_bsw_max98090_ti.o
snd-soc-sst-byt-cht-da7213-objs := bytcht_da7213.o
+snd-soc-sst-byt-cht-es8316-objs := bytcht_es8316.o
snd-soc-sst-byt-cht-nocodec-objs := bytcht_nocodec.o
+snd-soc-kbl_rt5663_max98927-objs := kbl_rt5663_max98927.o
+snd-soc-kbl_rt5663_rt5514_max98927-objs := kbl_rt5663_rt5514_max98927.o
snd-soc-skl_rt286-objs := skl_rt286.o
snd-skl_nau88l25_max98357a-objs := skl_nau88l25_max98357a.o
snd-soc-skl_nau88l25_ssm4567-objs := skl_nau88l25_ssm4567.o
@@ -29,7 +32,10 @@ obj-$(CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH) += snd-soc-sst-cht-bsw-rt5672.o
obj-$(CONFIG_SND_SOC_INTEL_CHT_BSW_RT5645_MACH) += snd-soc-sst-cht-bsw-rt5645.o
obj-$(CONFIG_SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH) += snd-soc-sst-cht-bsw-max98090_ti.o
obj-$(CONFIG_SND_SOC_INTEL_BYT_CHT_DA7213_MACH) += snd-soc-sst-byt-cht-da7213.o
+obj-$(CONFIG_SND_SOC_INTEL_BYT_CHT_ES8316_MACH) += snd-soc-sst-byt-cht-es8316.o
obj-$(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH) += snd-soc-sst-byt-cht-nocodec.o
+obj-$(CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH) += snd-soc-kbl_rt5663_max98927.o
+obj-$(CONFIG_SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH) += snd-soc-kbl_rt5663_rt5514_max98927.o
obj-$(CONFIG_SND_SOC_INTEL_SKL_RT286_MACH) += snd-soc-skl_rt286.o
obj-$(CONFIG_SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH) += snd-skl_nau88l25_max98357a.o
obj-$(CONFIG_SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH) += snd-soc-skl_nau88l25_ssm4567.o
diff --git a/sound/soc/intel/boards/bdw-rt5677.c b/sound/soc/intel/boards/bdw-rt5677.c
index 14d9693c1641..058b8ccedf02 100644
--- a/sound/soc/intel/boards/bdw-rt5677.c
+++ b/sound/soc/intel/boards/bdw-rt5677.c
@@ -16,6 +16,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
@@ -120,6 +121,26 @@ static struct snd_soc_jack_gpio mic_jack_gpio = {
.invert = 1,
};
+/* GPIO indexes defined by ACPI */
+enum {
+ RT5677_GPIO_PLUG_DET = 0,
+ RT5677_GPIO_MIC_PRESENT_L = 1,
+ RT5677_GPIO_HOTWORD_DET_L = 2,
+ RT5677_GPIO_DSP_INT = 3,
+ RT5677_GPIO_HP_AMP_SHDN_L = 4,
+};
+
+static const struct acpi_gpio_params plug_det_gpio = { RT5677_GPIO_PLUG_DET, 0, false };
+static const struct acpi_gpio_params mic_present_gpio = { RT5677_GPIO_MIC_PRESENT_L, 0, false };
+static const struct acpi_gpio_params headphone_enable_gpio = { RT5677_GPIO_HP_AMP_SHDN_L, 0, false };
+
+static const struct acpi_gpio_mapping bdw_rt5677_gpios[] = {
+ { "plug-det-gpios", &plug_det_gpio, 1 },
+ { "mic-present-gpios", &mic_present_gpio, 1 },
+ { "headphone-enable-gpios", &headphone_enable_gpio, 1 },
+ { NULL },
+};
+
static int broadwell_ssp0_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
@@ -184,6 +205,11 @@ static int bdw_rt5677_init(struct snd_soc_pcm_runtime *rtd)
snd_soc_card_get_drvdata(rtd->card);
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+ int ret;
+
+ ret = devm_acpi_dev_add_driver_gpios(codec->dev, bdw_rt5677_gpios);
+ if (ret)
+ dev_warn(codec->dev, "Failed to add driver gpios\n");
/* Enable codec ASRC function for Stereo DAC/Stereo1 ADC/DMIC/I2S1.
* The ASRC clock source is clk_i2s1_asrc.
diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
index 3a8c4d954a91..ce35ec7884d1 100644
--- a/sound/soc/intel/boards/bxt_da7219_max98357a.c
+++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
@@ -89,11 +89,6 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
if (ret)
dev_err(card->dev, "failed to stop PLL: %d\n", ret);
} else if(SND_SOC_DAPM_EVENT_ON(event)) {
- ret = snd_soc_dai_set_sysclk(codec_dai,
- DA7219_CLKSRC_MCLK, 19200000, SND_SOC_CLOCK_IN);
- if (ret)
- dev_err(card->dev, "can't set codec sysclk configuration\n");
-
ret = snd_soc_dai_set_pll(codec_dai, 0,
DA7219_SYSCLK_PLL_SRM, 0, DA7219_PLL_FREQ_OUT_98304);
if (ret)
@@ -187,8 +182,17 @@ static int broxton_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
static int broxton_da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
{
int ret;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_codec *codec = rtd->codec;
+ /* Configure sysclk for codec */
+ ret = snd_soc_dai_set_sysclk(codec_dai, DA7219_CLKSRC_MCLK, 19200000,
+ SND_SOC_CLOCK_IN);
+ if (ret) {
+ dev_err(rtd->dev, "can't set codec sysclk configuration\n");
+ return ret;
+ }
+
/*
* Headset buttons map to the google Reference headset.
* These can be configured by userspace.
@@ -238,31 +242,31 @@ static int broxton_da7219_fe_init(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static unsigned int rates[] = {
+static const unsigned int rates[] = {
48000,
};
-static struct snd_pcm_hw_constraint_list constraints_rates = {
+static const struct snd_pcm_hw_constraint_list constraints_rates = {
.count = ARRAY_SIZE(rates),
.list = rates,
.mask = 0,
};
-static unsigned int channels[] = {
+static const unsigned int channels[] = {
DUAL_CHANNEL,
};
-static struct snd_pcm_hw_constraint_list constraints_channels = {
+static const struct snd_pcm_hw_constraint_list constraints_channels = {
.count = ARRAY_SIZE(channels),
.list = channels,
.mask = 0,
};
-static unsigned int channels_quad[] = {
+static const unsigned int channels_quad[] = {
QUAD_CHANNEL,
};
-static struct snd_pcm_hw_constraint_list constraints_channels_quad = {
+static const struct snd_pcm_hw_constraint_list constraints_channels_quad = {
.count = ARRAY_SIZE(channels_quad),
.list = channels_quad,
.mask = 0,
diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
index 1a68d043c803..0c3a3cbcb884 100644
--- a/sound/soc/intel/boards/bxt_rt298.c
+++ b/sound/soc/intel/boards/bxt_rt298.c
@@ -207,11 +207,11 @@ static const struct snd_soc_ops broxton_rt298_ops = {
.hw_params = broxton_rt298_hw_params,
};
-static unsigned int rates[] = {
+static const unsigned int rates[] = {
48000,
};
-static struct snd_pcm_hw_constraint_list constraints_rates = {
+static const struct snd_pcm_hw_constraint_list constraints_rates = {
.count = ARRAY_SIZE(rates),
.list = rates,
.mask = 0,
@@ -222,19 +222,16 @@ static int broxton_dmic_fixup(struct snd_soc_pcm_runtime *rtd,
{
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
- if (params_channels(params) == 2)
- channels->min = channels->max = 2;
- else
- channels->min = channels->max = 4;
+ channels->min = channels->max = 4;
return 0;
}
-static unsigned int channels_dmic[] = {
- 2, 4,
+static const unsigned int channels_dmic[] = {
+ 1, 2, 3, 4,
};
-static struct snd_pcm_hw_constraint_list constraints_dmic_channels = {
+static const struct snd_pcm_hw_constraint_list constraints_dmic_channels = {
.count = ARRAY_SIZE(channels_dmic),
.list = channels_dmic,
.mask = 0,
@@ -256,11 +253,11 @@ static const struct snd_soc_ops broxton_dmic_ops = {
.startup = broxton_dmic_startup,
};
-static unsigned int channels[] = {
+static const unsigned int channels[] = {
2,
};
-static struct snd_pcm_hw_constraint_list constraints_channels = {
+static const struct snd_pcm_hw_constraint_list constraints_channels = {
.count = ARRAY_SIZE(channels),
.list = channels,
.mask = 0,
diff --git a/sound/soc/intel/boards/byt-max98090.c b/sound/soc/intel/boards/byt-max98090.c
index d9f81b8d915d..047be7fa0ce9 100644
--- a/sound/soc/intel/boards/byt-max98090.c
+++ b/sound/soc/intel/boards/byt-max98090.c
@@ -67,20 +67,27 @@ static struct snd_soc_jack_pin hs_jack_pins[] = {
static struct snd_soc_jack_gpio hs_jack_gpios[] = {
{
- .name = "hp-gpio",
- .idx = 0,
+ .name = "hp",
.report = SND_JACK_HEADPHONE | SND_JACK_LINEOUT,
.debounce_time = 200,
},
{
- .name = "mic-gpio",
- .idx = 1,
+ .name = "mic",
.invert = 1,
.report = SND_JACK_MICROPHONE,
.debounce_time = 200,
},
};
+static const struct acpi_gpio_params hp_gpios = { 0, 0, false };
+static const struct acpi_gpio_params mic_gpios = { 1, 0, false };
+
+static const struct acpi_gpio_mapping acpi_byt_max98090_gpios[] = {
+ { "hp-gpios", &hp_gpios, 1 },
+ { "mic-gpios", &mic_gpios, 1 },
+ {},
+};
+
static int byt_max98090_init(struct snd_soc_pcm_runtime *runtime)
{
int ret;
@@ -140,8 +147,9 @@ static struct snd_soc_card byt_max98090_card = {
static int byt_max98090_probe(struct platform_device *pdev)
{
- int ret_val = 0;
+ struct device *dev = &pdev->dev;
struct byt_max98090_private *priv;
+ int ret_val;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_ATOMIC);
if (!priv) {
@@ -149,6 +157,10 @@ static int byt_max98090_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ ret_val = devm_acpi_dev_add_driver_gpios(dev->parent, acpi_byt_max98090_gpios);
+ if (ret_val)
+ dev_dbg(dev, "Unable to add GPIO mapping table\n");
+
byt_max98090_card.dev = &pdev->dev;
snd_soc_card_set_drvdata(&byt_max98090_card, priv);
ret_val = devm_snd_soc_register_card(&pdev->dev, &byt_max98090_card);
@@ -158,7 +170,7 @@ static int byt_max98090_probe(struct platform_device *pdev)
return ret_val;
}
- return ret_val;
+ return 0;
}
static int byt_max98090_remove(struct platform_device *pdev)
diff --git a/sound/soc/intel/boards/bytcht_es8316.c b/sound/soc/intel/boards/bytcht_es8316.c
new file mode 100644
index 000000000000..52635462dac6
--- /dev/null
+++ b/sound/soc/intel/boards/bytcht_es8316.c
@@ -0,0 +1,300 @@
+/*
+ * bytcht_es8316.c - ASoc Machine driver for Intel Baytrail/Cherrytrail
+ * platforms with Everest ES8316 SoC
+ *
+ * Copyright (C) 2017 Endless Mobile, Inc.
+ * Authors: David Yang <[email protected]>,
+ * Daniel Drake <[email protected]>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <asm/platform_sst_audio.h>
+#include <linux/clk.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "../atom/sst-atom-controls.h"
+#include "../common/sst-acpi.h"
+#include "../common/sst-dsp.h"
+
+struct byt_cht_es8316_private {
+ struct clk *mclk;
+};
+
+#define CODEC_DAI1 "ES8316 HiFi"
+
+static inline struct snd_soc_dai *get_codec_dai(struct snd_soc_card *card)
+{
+ struct snd_soc_pcm_runtime *rtd;
+
+ list_for_each_entry(rtd, &card->rtd_list, list) {
+ if (!strncmp(rtd->codec_dai->name, CODEC_DAI1,
+ strlen(CODEC_DAI1)))
+ return rtd->codec_dai;
+ }
+ return NULL;
+}
+
+static const struct snd_soc_dapm_widget byt_cht_es8316_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone", NULL),
+
+ /*
+ * The codec supports two analog microphone inputs. I have only
+ * tested MIC1. A DMIC route could also potentially be added
+ * if such functionality is found on another platform.
+ */
+ SND_SOC_DAPM_MIC("Microphone 1", NULL),
+ SND_SOC_DAPM_MIC("Microphone 2", NULL),
+};
+
+static const struct snd_soc_dapm_route byt_cht_es8316_audio_map[] = {
+ {"MIC1", NULL, "Microphone 1"},
+ {"MIC2", NULL, "Microphone 2"},
+
+ {"Headphone", NULL, "HPOL"},
+ {"Headphone", NULL, "HPOR"},
+
+ {"Playback", NULL, "ssp2 Tx"},
+ {"ssp2 Tx", NULL, "codec_out0"},
+ {"ssp2 Tx", NULL, "codec_out1"},
+ {"codec_in0", NULL, "ssp2 Rx" },
+ {"codec_in1", NULL, "ssp2 Rx" },
+ {"ssp2 Rx", NULL, "Capture"},
+};
+
+static const struct snd_kcontrol_new byt_cht_es8316_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone"),
+ SOC_DAPM_PIN_SWITCH("Microphone 1"),
+ SOC_DAPM_PIN_SWITCH("Microphone 2"),
+};
+
+static int byt_cht_es8316_init(struct snd_soc_pcm_runtime *runtime)
+{
+ struct snd_soc_card *card = runtime->card;
+ struct byt_cht_es8316_private *priv = snd_soc_card_get_drvdata(card);
+ int ret;
+
+ card->dapm.idle_bias_off = true;
+
+ /*
+ * The firmware might enable the clock at boot (this information
+ * may or may not be reflected in the enable clock register).
+ * To change the rate we must disable the clock first to cover these
+ * cases. Due to common clock framework restrictions that do not allow
+ * to disable a clock that has not been enabled, we need to enable
+ * the clock first.
+ */
+ ret = clk_prepare_enable(priv->mclk);
+ if (!ret)
+ clk_disable_unprepare(priv->mclk);
+
+ ret = clk_set_rate(priv->mclk, 19200000);
+ if (ret)
+ dev_err(card->dev, "unable to set MCLK rate\n");
+
+ ret = clk_prepare_enable(priv->mclk);
+ if (ret)
+ dev_err(card->dev, "unable to enable MCLK\n");
+
+ ret = snd_soc_dai_set_sysclk(runtime->codec_dai, 0, 19200000,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(card->dev, "can't set codec clock %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_pcm_stream byt_cht_es8316_dai_params = {
+ .formats = SNDRV_PCM_FMTBIT_S24_LE,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .channels_min = 2,
+ .channels_max = 2,
+};
+
+static int byt_cht_es8316_codec_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+ int ret;
+
+ /* The DSP will covert the FE rate to 48k, stereo */
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+
+ /* set SSP2 to 24-bit */
+ params_set_format(params, SNDRV_PCM_FORMAT_S24_LE);
+
+ /*
+ * Default mode for SSP configuration is TDM 4 slot, override config
+ * with explicit setting to I2S 2ch 24-bit. The word length is set with
+ * dai_set_tdm_slot() since there is no other API exposed
+ */
+ ret = snd_soc_dai_set_fmt(rtd->cpu_dai,
+ SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS
+ );
+ if (ret < 0) {
+ dev_err(rtd->dev, "can't set format to I2S, err %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_tdm_slot(rtd->cpu_dai, 0x3, 0x3, 2, 24);
+ if (ret < 0) {
+ dev_err(rtd->dev, "can't set I2S config, err %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int byt_cht_es8316_aif1_startup(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_hw_constraint_single(substream->runtime,
+ SNDRV_PCM_HW_PARAM_RATE, 48000);
+}
+
+static const struct snd_soc_ops byt_cht_es8316_aif1_ops = {
+ .startup = byt_cht_es8316_aif1_startup,
+};
+
+static struct snd_soc_dai_link byt_cht_es8316_dais[] = {
+ [MERR_DPCM_AUDIO] = {
+ .name = "Audio Port",
+ .stream_name = "Audio",
+ .cpu_dai_name = "media-cpu-dai",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .platform_name = "sst-mfld-platform",
+ .nonatomic = true,
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ops = &byt_cht_es8316_aif1_ops,
+ },
+
+ [MERR_DPCM_DEEP_BUFFER] = {
+ .name = "Deep-Buffer Audio Port",
+ .stream_name = "Deep-Buffer Audio",
+ .cpu_dai_name = "deepbuffer-cpu-dai",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .platform_name = "sst-mfld-platform",
+ .nonatomic = true,
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .ops = &byt_cht_es8316_aif1_ops,
+ },
+
+ [MERR_DPCM_COMPR] = {
+ .name = "Compressed Port",
+ .stream_name = "Compress",
+ .cpu_dai_name = "compress-cpu-dai",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .platform_name = "sst-mfld-platform",
+ },
+
+ /* back ends */
+ {
+ /* Only SSP2 has been tested here, so BYT-CR platforms that
+ * require SSP0 will not work.
+ */
+ .name = "SSP2-Codec",
+ .id = 1,
+ .cpu_dai_name = "ssp2-port",
+ .platform_name = "sst-mfld-platform",
+ .no_pcm = 1,
+ .codec_dai_name = "ES8316 HiFi",
+ .codec_name = "i2c-ESSX8316:00",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBS_CFS,
+ .be_hw_params_fixup = byt_cht_es8316_codec_fixup,
+ .nonatomic = true,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .init = byt_cht_es8316_init,
+ },
+};
+
+
+/* SoC card */
+static struct snd_soc_card byt_cht_es8316_card = {
+ .name = "bytcht-es8316",
+ .owner = THIS_MODULE,
+ .dai_link = byt_cht_es8316_dais,
+ .num_links = ARRAY_SIZE(byt_cht_es8316_dais),
+ .dapm_widgets = byt_cht_es8316_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(byt_cht_es8316_widgets),
+ .dapm_routes = byt_cht_es8316_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(byt_cht_es8316_audio_map),
+ .controls = byt_cht_es8316_controls,
+ .num_controls = ARRAY_SIZE(byt_cht_es8316_controls),
+ .fully_routed = true,
+};
+
+static int snd_byt_cht_es8316_mc_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct byt_cht_es8316_private *priv;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_ATOMIC);
+ if (!priv)
+ return -ENOMEM;
+
+ /* register the soc card */
+ byt_cht_es8316_card.dev = &pdev->dev;
+ snd_soc_card_set_drvdata(&byt_cht_es8316_card, priv);
+
+ priv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
+ if (IS_ERR(priv->mclk)) {
+ ret = PTR_ERR(priv->mclk);
+ dev_err(&pdev->dev,
+ "Failed to get MCLK from pmc_plt_clk_3: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = devm_snd_soc_register_card(&pdev->dev, &byt_cht_es8316_card);
+ if (ret) {
+ dev_err(&pdev->dev, "snd_soc_register_card failed %d\n", ret);
+ return ret;
+ }
+ platform_set_drvdata(pdev, &byt_cht_es8316_card);
+ return ret;
+}
+
+static struct platform_driver snd_byt_cht_es8316_mc_driver = {
+ .driver = {
+ .name = "bytcht_es8316",
+ },
+ .probe = snd_byt_cht_es8316_mc_probe,
+};
+
+module_platform_driver(snd_byt_cht_es8316_mc_driver);
+MODULE_DESCRIPTION("ASoC Intel(R) Baytrail/Cherrytrail Machine driver");
+MODULE_AUTHOR("David Yang <[email protected]>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:bytcht_es8316");
diff --git a/sound/soc/intel/boards/bytcht_nocodec.c b/sound/soc/intel/boards/bytcht_nocodec.c
index 89853eeaaf9d..1dd9441806fa 100644
--- a/sound/soc/intel/boards/bytcht_nocodec.c
+++ b/sound/soc/intel/boards/bytcht_nocodec.c
@@ -85,11 +85,11 @@ static int codec_fixup(struct snd_soc_pcm_runtime *rtd,
return 0;
}
-static unsigned int rates_48000[] = {
+static const unsigned int rates_48000[] = {
48000,
};
-static struct snd_pcm_hw_constraint_list constraints_48000 = {
+static const struct snd_pcm_hw_constraint_list constraints_48000 = {
.count = ARRAY_SIZE(rates_48000),
.list = rates_48000,
};
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index 8164bec63bf1..4a3516b38c2c 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -203,11 +203,11 @@ static int byt_rt5651_codec_fixup(struct snd_soc_pcm_runtime *rtd,
return 0;
}
-static unsigned int rates_48000[] = {
+static const unsigned int rates_48000[] = {
48000,
};
-static struct snd_pcm_hw_constraint_list constraints_48000 = {
+static const struct snd_pcm_hw_constraint_list constraints_48000 = {
.count = ARRAY_SIZE(rates_48000),
.list = rates_48000,
};
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index 742bc0d4e681..20755ecc7f9e 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -39,18 +39,6 @@ struct cht_mc_private {
bool ts3a227e_present;
};
-static inline struct snd_soc_dai *cht_get_codec_dai(struct snd_soc_card *card)
-{
- struct snd_soc_pcm_runtime *rtd;
-
- list_for_each_entry(rtd, &card->rtd_list, list) {
- if (!strncmp(rtd->codec_dai->name, CHT_CODEC_DAI,
- strlen(CHT_CODEC_DAI)))
- return rtd->codec_dai;
- }
- return NULL;
-}
-
static const struct snd_soc_dapm_widget cht_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headphone", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c
index e4d46d4360d7..bc2a52de06a3 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5672.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5672.c
@@ -19,6 +19,8 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/clk.h>
+#include <asm/cpu_device_id.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
@@ -31,8 +33,11 @@
#define CHT_PLAT_CLK_3_HZ 19200000
#define CHT_CODEC_DAI "rt5670-aif1"
-static struct snd_soc_jack cht_bsw_headset;
-static char cht_bsw_codec_name[16];
+struct cht_mc_private {
+ struct snd_soc_jack headset;
+ char codec_name[16];
+ struct clk *mclk;
+};
/* Headset jack detection DAPM pins */
static struct snd_soc_jack_pin cht_bsw_headset_pins[] = {
@@ -64,6 +69,7 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
struct snd_soc_dapm_context *dapm = w->dapm;
struct snd_soc_card *card = dapm->card;
struct snd_soc_dai *codec_dai;
+ struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
int ret;
codec_dai = cht_get_codec_dai(card);
@@ -73,6 +79,15 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
}
if (SND_SOC_DAPM_EVENT_ON(event)) {
+ if (ctx->mclk) {
+ ret = clk_prepare_enable(ctx->mclk);
+ if (ret < 0) {
+ dev_err(card->dev,
+ "could not configure MCLK state");
+ return ret;
+ }
+ }
+
/* set codec PLL source to the 19.2MHz platform clock (MCLK) */
ret = snd_soc_dai_set_pll(codec_dai, 0, RT5670_PLL1_S_MCLK,
CHT_PLAT_CLK_3_HZ, 48000 * 512);
@@ -96,6 +111,9 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
*/
snd_soc_dai_set_sysclk(codec_dai, RT5670_SCLK_S_RCCLK,
48000 * 512, SND_SOC_CLOCK_IN);
+
+ if (ctx->mclk)
+ clk_disable_unprepare(ctx->mclk);
}
return 0;
}
@@ -171,6 +189,7 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
int ret;
struct snd_soc_dai *codec_dai = runtime->codec_dai;
struct snd_soc_codec *codec = codec_dai->codec;
+ struct cht_mc_private *ctx = snd_soc_card_get_drvdata(runtime->card);
/* TDM 4 slots 24 bit, set Rx & Tx bitmask to 4 active slots */
ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xF, 0xF, 4, 24);
@@ -194,13 +213,37 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
RT5670_CLK_SEL_I2S1_ASRC);
ret = snd_soc_card_jack_new(runtime->card, "Headset",
- SND_JACK_HEADSET | SND_JACK_BTN_0 |
- SND_JACK_BTN_1 | SND_JACK_BTN_2, &cht_bsw_headset,
- cht_bsw_headset_pins, ARRAY_SIZE(cht_bsw_headset_pins));
+ SND_JACK_HEADSET | SND_JACK_BTN_0 |
+ SND_JACK_BTN_1 | SND_JACK_BTN_2,
+ &ctx->headset,
+ cht_bsw_headset_pins,
+ ARRAY_SIZE(cht_bsw_headset_pins));
if (ret)
return ret;
- rt5670_set_jack_detect(codec, &cht_bsw_headset);
+ rt5670_set_jack_detect(codec, &ctx->headset);
+ if (ctx->mclk) {
+ /*
+ * The firmware might enable the clock at
+ * boot (this information may or may not
+ * be reflected in the enable clock register).
+ * To change the rate we must disable the clock
+ * first to cover these cases. Due to common
+ * clock framework restrictions that do not allow
+ * to disable a clock that has not been enabled,
+ * we need to enable the clock first.
+ */
+ ret = clk_prepare_enable(ctx->mclk);
+ if (!ret)
+ clk_disable_unprepare(ctx->mclk);
+
+ ret = clk_set_rate(ctx->mclk, CHT_PLAT_CLK_3_HZ);
+
+ if (ret) {
+ dev_err(runtime->dev, "unable to set MCLK rate\n");
+ return ret;
+ }
+ }
return 0;
}
@@ -341,34 +384,62 @@ static struct snd_soc_card snd_soc_card_cht = {
.resume_post = cht_resume_post,
};
+static bool is_valleyview(void)
+{
+ static const struct x86_cpu_id cpu_ids[] = {
+ { X86_VENDOR_INTEL, 6, 55 }, /* Valleyview, Bay Trail */
+ {}
+ };
+
+ if (!x86_match_cpu(cpu_ids))
+ return false;
+ return true;
+}
+
#define RT5672_I2C_DEFAULT "i2c-10EC5670:00"
static int snd_cht_mc_probe(struct platform_device *pdev)
{
int ret_val = 0;
+ struct cht_mc_private *drv;
struct sst_acpi_mach *mach = pdev->dev.platform_data;
const char *i2c_name;
int i;
- strcpy(cht_bsw_codec_name, RT5672_I2C_DEFAULT);
+ drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_ATOMIC);
+ if (!drv)
+ return -ENOMEM;
+
+ strcpy(drv->codec_name, RT5672_I2C_DEFAULT);
/* fixup codec name based on HID */
if (mach) {
i2c_name = sst_acpi_find_name_from_hid(mach->id);
if (i2c_name) {
- snprintf(cht_bsw_codec_name, sizeof(cht_bsw_codec_name),
+ snprintf(drv->codec_name, sizeof(drv->codec_name),
"i2c-%s", i2c_name);
for (i = 0; i < ARRAY_SIZE(cht_dailink); i++) {
if (!strcmp(cht_dailink[i].codec_name,
RT5672_I2C_DEFAULT)) {
cht_dailink[i].codec_name =
- cht_bsw_codec_name;
+ drv->codec_name;
break;
}
}
}
}
+ if (is_valleyview()) {
+ drv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
+ if (IS_ERR(drv->mclk)) {
+ dev_err(&pdev->dev,
+ "Failed to get MCLK from pmc_plt_clk_3: %ld\n",
+ PTR_ERR(drv->mclk));
+ return PTR_ERR(drv->mclk);
+ }
+ }
+ snd_soc_card_set_drvdata(&snd_soc_card_cht, drv);
+
/* register the soc card */
snd_soc_card_cht.dev = &pdev->dev;
ret_val = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_cht);
diff --git a/sound/soc/intel/boards/kbl_rt5663_max98927.c b/sound/soc/intel/boards/kbl_rt5663_max98927.c
new file mode 100644
index 000000000000..f9ba97788157
--- /dev/null
+++ b/sound/soc/intel/boards/kbl_rt5663_max98927.c
@@ -0,0 +1,687 @@
+/*
+ * Intel Kabylake I2S Machine Driver with MAXIM98927
+ * and RT5663 Codecs
+ *
+ * Copyright (C) 2017, Intel Corporation. All rights reserved.
+ *
+ * Modified from:
+ * Intel Skylake I2S Machine driver
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "../../codecs/rt5663.h"
+#include "../../codecs/hdac_hdmi.h"
+#include "../skylake/skl.h"
+
+#define KBL_REALTEK_CODEC_DAI "rt5663-aif"
+#define KBL_MAXIM_CODEC_DAI "max98927-aif1"
+#define DMIC_CH(p) p->list[p->count-1]
+#define MAXIM_DEV0_NAME "i2c-MX98927:00"
+#define MAXIM_DEV1_NAME "i2c-MX98927:01"
+
+static struct snd_soc_card kabylake_audio_card;
+static const struct snd_pcm_hw_constraint_list *dmic_constraints;
+static struct snd_soc_jack skylake_hdmi[3];
+
+struct kbl_hdmi_pcm {
+ struct list_head head;
+ struct snd_soc_dai *codec_dai;
+ int device;
+};
+
+struct kbl_rt5663_private {
+ struct snd_soc_jack kabylake_headset;
+ struct list_head hdmi_pcm_list;
+};
+
+enum {
+ KBL_DPCM_AUDIO_PB = 0,
+ KBL_DPCM_AUDIO_CP,
+ KBL_DPCM_AUDIO_REF_CP,
+ KBL_DPCM_AUDIO_DMIC_CP,
+ KBL_DPCM_AUDIO_HDMI1_PB,
+ KBL_DPCM_AUDIO_HDMI2_PB,
+ KBL_DPCM_AUDIO_HDMI3_PB,
+};
+
+static const struct snd_kcontrol_new kabylake_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ SOC_DAPM_PIN_SWITCH("Left Spk"),
+ SOC_DAPM_PIN_SWITCH("Right Spk"),
+};
+
+static const struct snd_soc_dapm_widget kabylake_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_SPK("Left Spk", NULL),
+ SND_SOC_DAPM_SPK("Right Spk", NULL),
+ SND_SOC_DAPM_MIC("SoC DMIC", NULL),
+ SND_SOC_DAPM_SPK("DP", NULL),
+ SND_SOC_DAPM_SPK("HDMI", NULL),
+
+};
+
+static const struct snd_soc_dapm_route kabylake_map[] = {
+ /* HP jack connectors - unknown if we have jack detection */
+ { "Headphone Jack", NULL, "HPOL" },
+ { "Headphone Jack", NULL, "HPOR" },
+
+ /* speaker */
+ { "Left Spk", NULL, "Left BE_OUT" },
+ { "Right Spk", NULL, "Right BE_OUT" },
+
+ /* other jacks */
+ { "IN1P", NULL, "Headset Mic" },
+ { "IN1N", NULL, "Headset Mic" },
+ { "DMic", NULL, "SoC DMIC" },
+
+ { "HDMI", NULL, "hif5 Output" },
+ { "DP", NULL, "hif6 Output" },
+
+ /* CODEC BE connections */
+ { "Left HiFi Playback", NULL, "ssp0 Tx" },
+ { "Right HiFi Playback", NULL, "ssp0 Tx" },
+ { "ssp0 Tx", NULL, "codec0_out" },
+
+ { "AIF Playback", NULL, "ssp1 Tx" },
+ { "ssp1 Tx", NULL, "codec1_out" },
+
+ { "codec0_in", NULL, "ssp1 Rx" },
+ { "ssp1 Rx", NULL, "AIF Capture" },
+
+ /* DMIC */
+ { "dmic01_hifi", NULL, "DMIC01 Rx" },
+ { "DMIC01 Rx", NULL, "DMIC AIF" },
+
+ { "hifi3", NULL, "iDisp3 Tx"},
+ { "iDisp3 Tx", NULL, "iDisp3_out"},
+ { "hifi2", NULL, "iDisp2 Tx"},
+ { "iDisp2 Tx", NULL, "iDisp2_out"},
+ { "hifi1", NULL, "iDisp1 Tx"},
+ { "iDisp1 Tx", NULL, "iDisp1_out"},
+};
+
+static struct snd_soc_codec_conf max98927_codec_conf[] = {
+ {
+ .dev_name = MAXIM_DEV0_NAME,
+ .name_prefix = "Right",
+ },
+ {
+ .dev_name = MAXIM_DEV1_NAME,
+ .name_prefix = "Left",
+ },
+};
+
+static struct snd_soc_dai_link_component max98927_codec_components[] = {
+ { /* Left */
+ .name = MAXIM_DEV0_NAME,
+ .dai_name = KBL_MAXIM_CODEC_DAI,
+ },
+ { /* Right */
+ .name = MAXIM_DEV1_NAME,
+ .dai_name = KBL_MAXIM_CODEC_DAI,
+ },
+};
+
+static int kabylake_rt5663_fe_init(struct snd_soc_pcm_runtime *rtd)
+{
+ int ret;
+ struct snd_soc_dapm_context *dapm;
+ struct snd_soc_component *component = rtd->cpu_dai->component;
+
+ dapm = snd_soc_component_get_dapm(component);
+ ret = snd_soc_dapm_ignore_suspend(dapm, "Reference Capture");
+ if (ret) {
+ dev_err(rtd->dev, "Ref Cap ignore suspend failed %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int kabylake_rt5663_codec_init(struct snd_soc_pcm_runtime *rtd)
+{
+ int ret;
+ struct kbl_rt5663_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_codec *codec = rtd->codec;
+
+ /*
+ * Headset buttons map to the google Reference headset.
+ * These can be configured by userspace.
+ */
+ ret = snd_soc_card_jack_new(&kabylake_audio_card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3, &ctx->kabylake_headset,
+ NULL, 0);
+ if (ret) {
+ dev_err(rtd->dev, "Headset Jack creation failed %d\n", ret);
+ return ret;
+ }
+
+ rt5663_set_jack_detect(codec, &ctx->kabylake_headset);
+ ret = snd_soc_dapm_ignore_suspend(&rtd->card->dapm, "SoC DMIC");
+ if (ret) {
+ dev_err(rtd->dev, "SoC DMIC ignore suspend failed %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int kabylake_hdmi1_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct kbl_rt5663_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_dai *dai = rtd->codec_dai;
+ struct kbl_hdmi_pcm *pcm;
+
+ pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
+ if (!pcm)
+ return -ENOMEM;
+
+ pcm->device = KBL_DPCM_AUDIO_HDMI1_PB;
+ pcm->codec_dai = dai;
+
+ list_add_tail(&pcm->head, &ctx->hdmi_pcm_list);
+
+ return 0;
+}
+
+static int kabylake_hdmi2_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct kbl_rt5663_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_dai *dai = rtd->codec_dai;
+ struct kbl_hdmi_pcm *pcm;
+
+ pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
+ if (!pcm)
+ return -ENOMEM;
+
+ pcm->device = KBL_DPCM_AUDIO_HDMI2_PB;
+ pcm->codec_dai = dai;
+
+ list_add_tail(&pcm->head, &ctx->hdmi_pcm_list);
+
+ return 0;
+}
+
+static int kabylake_hdmi3_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct kbl_rt5663_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_dai *dai = rtd->codec_dai;
+ struct kbl_hdmi_pcm *pcm;
+
+ pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
+ if (!pcm)
+ return -ENOMEM;
+
+ pcm->device = KBL_DPCM_AUDIO_HDMI3_PB;
+ pcm->codec_dai = dai;
+
+ list_add_tail(&pcm->head, &ctx->hdmi_pcm_list);
+
+ return 0;
+}
+
+static unsigned int rates[] = {
+ 48000,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_rates = {
+ .count = ARRAY_SIZE(rates),
+ .list = rates,
+ .mask = 0,
+};
+
+static unsigned int channels[] = {
+ 2,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_channels = {
+ .count = ARRAY_SIZE(channels),
+ .list = channels,
+ .mask = 0,
+};
+
+static int kbl_fe_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ /*
+ * On this platform for PCM device we support,
+ * 48Khz
+ * stereo
+ * 16 bit audio
+ */
+
+ runtime->hw.channels_max = 2;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
+ snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16);
+
+ snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
+
+ return 0;
+}
+
+static const struct snd_soc_ops kabylake_rt5663_fe_ops = {
+ .startup = kbl_fe_startup,
+};
+
+static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+
+ /* The ADSP will convert the FE rate to 48k, stereo */
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+ /* set SSP1 to 24 bit */
+ snd_mask_none(fmt);
+ snd_mask_set(fmt, SNDRV_PCM_FORMAT_S24_LE);
+
+ return 0;
+}
+
+static int kabylake_rt5663_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int ret;
+
+ ret = snd_soc_dai_set_sysclk(codec_dai,
+ RT5663_SCLK_S_MCLK, 24576000, SND_SOC_CLOCK_IN);
+ /* use ASRC for internal clocks, as PLL rate isn't multiple of BCLK */
+ rt5663_sel_asrc_clk_src(codec_dai->codec, RT5663_DA_STEREO_FILTER, 1);
+
+ if (ret < 0)
+ dev_err(rtd->dev, "snd_soc_dai_set_sysclk err = %d\n", ret);
+
+ return ret;
+}
+
+static struct snd_soc_ops kabylake_rt5663_ops = {
+ .hw_params = kabylake_rt5663_hw_params,
+};
+
+static int kabylake_dmic_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ if (params_channels(params) == 2 || DMIC_CH(dmic_constraints) == 2)
+ channels->min = channels->max = 2;
+ else
+ channels->min = channels->max = 4;
+
+ return 0;
+}
+
+static unsigned int channels_dmic[] = {
+ 2, 4,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_dmic_channels = {
+ .count = ARRAY_SIZE(channels_dmic),
+ .list = channels_dmic,
+ .mask = 0,
+};
+
+static const unsigned int dmic_2ch[] = {
+ 2,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_dmic_2ch = {
+ .count = ARRAY_SIZE(dmic_2ch),
+ .list = dmic_2ch,
+ .mask = 0,
+};
+
+static int kabylake_dmic_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ runtime->hw.channels_max = DMIC_CH(dmic_constraints);
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ dmic_constraints);
+
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
+}
+
+static struct snd_soc_ops kabylake_dmic_ops = {
+ .startup = kabylake_dmic_startup,
+};
+
+static unsigned int rates_16000[] = {
+ 16000,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_16000 = {
+ .count = ARRAY_SIZE(rates_16000),
+ .list = rates_16000,
+};
+
+static const unsigned int ch_mono[] = {
+ 1,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_refcap = {
+ .count = ARRAY_SIZE(ch_mono),
+ .list = ch_mono,
+};
+
+static int kabylake_refcap_startup(struct snd_pcm_substream *substream)
+{
+ substream->runtime->hw.channels_max = 1;
+ snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_refcap);
+
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_16000);
+}
+
+static struct snd_soc_ops skylaye_refcap_ops = {
+ .startup = kabylake_refcap_startup,
+};
+
+/* kabylake digital audio interface glue - connects codec <--> CPU */
+static struct snd_soc_dai_link kabylake_dais[] = {
+ /* Front End DAI links */
+ [KBL_DPCM_AUDIO_PB] = {
+ .name = "Kbl Audio Port",
+ .stream_name = "Audio",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:1f.3",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .init = kabylake_rt5663_fe_init,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .ops = &kabylake_rt5663_fe_ops,
+ },
+ [KBL_DPCM_AUDIO_CP] = {
+ .name = "Kbl Audio Capture Port",
+ .stream_name = "Audio Record",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:1f.3",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_capture = 1,
+ .ops = &kabylake_rt5663_fe_ops,
+ },
+ [KBL_DPCM_AUDIO_REF_CP] = {
+ .name = "Kbl Audio Reference cap",
+ .stream_name = "Wake on Voice",
+ .cpu_dai_name = "Reference Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .dpcm_capture = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .ops = &skylaye_refcap_ops,
+ },
+ [KBL_DPCM_AUDIO_DMIC_CP] = {
+ .name = "Kbl Audio DMIC cap",
+ .stream_name = "dmiccap",
+ .cpu_dai_name = "DMIC Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .dpcm_capture = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .ops = &kabylake_dmic_ops,
+ },
+ [KBL_DPCM_AUDIO_HDMI1_PB] = {
+ .name = "Kbl HDMI Port1",
+ .stream_name = "Hdmi1",
+ .cpu_dai_name = "HDMI1 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [KBL_DPCM_AUDIO_HDMI2_PB] = {
+ .name = "Kbl HDMI Port2",
+ .stream_name = "Hdmi2",
+ .cpu_dai_name = "HDMI2 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [KBL_DPCM_AUDIO_HDMI3_PB] = {
+ .name = "Kbl HDMI Port3",
+ .stream_name = "Hdmi3",
+ .cpu_dai_name = "HDMI3 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .init = NULL,
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+
+ /* Back End DAI links */
+ {
+ /* SSP0 - Codec */
+ .name = "SSP0-Codec",
+ .id = 0,
+ .cpu_dai_name = "SSP0 Pin",
+ .platform_name = "0000:00:1f.3",
+ .no_pcm = 1,
+ .codecs = max98927_codec_components,
+ .num_codecs = ARRAY_SIZE(max98927_codec_components),
+ .dai_fmt = SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = kabylake_ssp_fixup,
+ .dpcm_playback = 1,
+ },
+ {
+ /* SSP1 - Codec */
+ .name = "SSP1-Codec",
+ .id = 1,
+ .cpu_dai_name = "SSP1 Pin",
+ .platform_name = "0000:00:1f.3",
+ .no_pcm = 1,
+ .codec_name = "i2c-10EC5663:00",
+ .codec_dai_name = KBL_REALTEK_CODEC_DAI,
+ .init = kabylake_rt5663_codec_init,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = kabylake_ssp_fixup,
+ .ops = &kabylake_rt5663_ops,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ },
+ {
+ .name = "dmic01",
+ .id = 2,
+ .cpu_dai_name = "DMIC01 Pin",
+ .codec_name = "dmic-codec",
+ .codec_dai_name = "dmic-hifi",
+ .platform_name = "0000:00:1f.3",
+ .be_hw_params_fixup = kabylake_dmic_fixup,
+ .ignore_suspend = 1,
+ .dpcm_capture = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp1",
+ .id = 3,
+ .cpu_dai_name = "iDisp1 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi1",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = kabylake_hdmi1_init,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp2",
+ .id = 4,
+ .cpu_dai_name = "iDisp2 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi2",
+ .platform_name = "0000:00:1f.3",
+ .init = kabylake_hdmi2_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp3",
+ .id = 5,
+ .cpu_dai_name = "iDisp3 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi3",
+ .platform_name = "0000:00:1f.3",
+ .init = kabylake_hdmi3_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+};
+
+#define NAME_SIZE 32
+static int kabylake_card_late_probe(struct snd_soc_card *card)
+{
+ struct kbl_rt5663_private *ctx = snd_soc_card_get_drvdata(card);
+ struct kbl_hdmi_pcm *pcm;
+ int err, i = 0;
+ char jack_name[NAME_SIZE];
+
+ list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
+ snprintf(jack_name, sizeof(jack_name),
+ "HDMI/DP, pcm=%d Jack", pcm->device);
+ err = snd_soc_card_jack_new(card, jack_name,
+ SND_JACK_AVOUT, &skylake_hdmi[i],
+ NULL, 0);
+
+ if (err)
+ return err;
+
+ err = hdac_hdmi_jack_init(pcm->codec_dai, pcm->device,
+ &skylake_hdmi[i]);
+ if (err < 0)
+ return err;
+
+ i++;
+ }
+
+ return 0;
+}
+
+/* kabylake audio machine driver for SPT + RT5663 */
+static struct snd_soc_card kabylake_audio_card = {
+ .name = "kblrt5663max",
+ .owner = THIS_MODULE,
+ .dai_link = kabylake_dais,
+ .num_links = ARRAY_SIZE(kabylake_dais),
+ .controls = kabylake_controls,
+ .num_controls = ARRAY_SIZE(kabylake_controls),
+ .dapm_widgets = kabylake_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(kabylake_widgets),
+ .dapm_routes = kabylake_map,
+ .num_dapm_routes = ARRAY_SIZE(kabylake_map),
+ .codec_conf = max98927_codec_conf,
+ .num_configs = ARRAY_SIZE(max98927_codec_conf),
+ .fully_routed = true,
+ .late_probe = kabylake_card_late_probe,
+};
+
+static int kabylake_audio_probe(struct platform_device *pdev)
+{
+ struct kbl_rt5663_private *ctx;
+ struct skl_machine_pdata *pdata;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_ATOMIC);
+ if (!ctx)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
+
+ kabylake_audio_card.dev = &pdev->dev;
+ snd_soc_card_set_drvdata(&kabylake_audio_card, ctx);
+
+ pdata = dev_get_drvdata(&pdev->dev);
+ if (pdata)
+ dmic_constraints = pdata->dmic_num == 2 ?
+ &constraints_dmic_2ch : &constraints_dmic_channels;
+
+ return devm_snd_soc_register_card(&pdev->dev, &kabylake_audio_card);
+}
+
+static const struct platform_device_id kbl_board_ids[] = {
+ { .name = "kbl_rt5663_m98927" },
+ { }
+};
+
+static struct platform_driver kabylake_audio = {
+ .probe = kabylake_audio_probe,
+ .driver = {
+ .name = "kbl_rt5663_m98927",
+ .pm = &snd_soc_pm_ops,
+ },
+ .id_table = kbl_board_ids,
+};
+
+module_platform_driver(kabylake_audio)
+
+/* Module information */
+MODULE_DESCRIPTION("Audio Machine driver-RT5663 & MAX98927 in I2S mode");
+MODULE_AUTHOR("Naveen M <[email protected]>");
+MODULE_AUTHOR("Harsha Priya <[email protected]>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:kbl_rt5663_m98927");
diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
new file mode 100644
index 000000000000..3fe4a0807095
--- /dev/null
+++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
@@ -0,0 +1,640 @@
+/*
+ * Intel Kabylake I2S Machine Driver with MAXIM98927
+ * RT5514 and RT5663 Codecs
+ *
+ * Copyright (C) 2017, Intel Corporation. All rights reserved.
+ *
+ * Modified from:
+ * Intel Kabylake I2S Machine driver supporting MAXIM98927 and
+ * RT5663 codecs
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "../../codecs/rt5514.h"
+#include "../../codecs/rt5663.h"
+#include "../../codecs/hdac_hdmi.h"
+#include "../skylake/skl.h"
+
+#define KBL_REALTEK_CODEC_DAI "rt5663-aif"
+#define KBL_REALTEK_DMIC_CODEC_DAI "rt5514-aif1"
+#define KBL_MAXIM_CODEC_DAI "max98927-aif1"
+#define MAXIM_DEV0_NAME "i2c-MX98927:00"
+#define MAXIM_DEV1_NAME "i2c-MX98927:01"
+#define RT5514_DEV_NAME "i2c-10EC5514:00"
+#define RT5663_DEV_NAME "i2c-10EC5663:00"
+#define RT5514_AIF1_BCLK_FREQ (48000 * 8 * 16)
+#define RT5514_AIF1_SYSCLK_FREQ 12288000
+#define NAME_SIZE 32
+
+#define DMIC_CH(p) p->list[p->count-1]
+
+
+static struct snd_soc_card kabylake_audio_card;
+static const struct snd_pcm_hw_constraint_list *dmic_constraints;
+
+struct kbl_hdmi_pcm {
+ struct list_head head;
+ struct snd_soc_dai *codec_dai;
+ int device;
+};
+
+struct kbl_codec_private {
+ struct snd_soc_jack kabylake_headset;
+ struct list_head hdmi_pcm_list;
+ struct snd_soc_jack kabylake_hdmi[2];
+};
+
+enum {
+ KBL_DPCM_AUDIO_PB = 0,
+ KBL_DPCM_AUDIO_CP,
+ KBL_DPCM_AUDIO_DMIC_CP,
+ KBL_DPCM_AUDIO_HDMI1_PB,
+ KBL_DPCM_AUDIO_HDMI2_PB,
+};
+
+static const struct snd_kcontrol_new kabylake_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ SOC_DAPM_PIN_SWITCH("Left Spk"),
+ SOC_DAPM_PIN_SWITCH("Right Spk"),
+ SOC_DAPM_PIN_SWITCH("DMIC"),
+};
+
+static const struct snd_soc_dapm_widget kabylake_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_SPK("Left Spk", NULL),
+ SND_SOC_DAPM_SPK("Right Spk", NULL),
+ SND_SOC_DAPM_MIC("DMIC", NULL),
+ SND_SOC_DAPM_SPK("DP", NULL),
+ SND_SOC_DAPM_SPK("HDMI", NULL),
+
+};
+
+static const struct snd_soc_dapm_route kabylake_map[] = {
+ /* Headphones */
+ { "Headphone Jack", NULL, "HPOL" },
+ { "Headphone Jack", NULL, "HPOR" },
+
+ /* speaker */
+ { "Left Spk", NULL, "Left BE_OUT" },
+ { "Right Spk", NULL, "Right BE_OUT" },
+
+ /* other jacks */
+ { "IN1P", NULL, "Headset Mic" },
+ { "IN1N", NULL, "Headset Mic" },
+
+ { "HDMI", NULL, "hif5 Output" },
+ { "DP", NULL, "hif6 Output" },
+
+ /* CODEC BE connections */
+ { "Left HiFi Playback", NULL, "ssp0 Tx" },
+ { "Right HiFi Playback", NULL, "ssp0 Tx" },
+ { "ssp0 Tx", NULL, "codec0_out" },
+
+ { "AIF Playback", NULL, "ssp1 Tx" },
+ { "ssp1 Tx", NULL, "codec1_out" },
+
+ { "codec0_in", NULL, "ssp1 Rx" },
+ { "ssp1 Rx", NULL, "AIF Capture" },
+
+ { "codec1_in", NULL, "ssp0 Rx" },
+ { "ssp0 Rx", NULL, "AIF1 Capture" },
+
+ /* DMIC */
+ { "DMIC1L", NULL, "DMIC" },
+ { "DMIC1R", NULL, "DMIC" },
+ { "DMIC2L", NULL, "DMIC" },
+ { "DMIC2R", NULL, "DMIC" },
+
+ { "hifi2", NULL, "iDisp2 Tx" },
+ { "iDisp2 Tx", NULL, "iDisp2_out" },
+ { "hifi1", NULL, "iDisp1 Tx" },
+ { "iDisp1 Tx", NULL, "iDisp1_out" },
+};
+
+static struct snd_soc_codec_conf max98927_codec_conf[] = {
+ {
+ .dev_name = MAXIM_DEV0_NAME,
+ .name_prefix = "Right",
+ },
+ {
+ .dev_name = MAXIM_DEV1_NAME,
+ .name_prefix = "Left",
+ },
+};
+
+static struct snd_soc_dai_link_component ssp0_codec_components[] = {
+ { /* Left */
+ .name = MAXIM_DEV0_NAME,
+ .dai_name = KBL_MAXIM_CODEC_DAI,
+ },
+ { /* Right */
+ .name = MAXIM_DEV1_NAME,
+ .dai_name = KBL_MAXIM_CODEC_DAI,
+ },
+ { /*dmic */
+ .name = RT5514_DEV_NAME,
+ .dai_name = KBL_REALTEK_DMIC_CODEC_DAI,
+ },
+};
+
+static int kabylake_rt5663_fe_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_dapm_context *dapm;
+ struct snd_soc_component *component = rtd->cpu_dai->component;
+ int ret;
+
+ dapm = snd_soc_component_get_dapm(component);
+ ret = snd_soc_dapm_ignore_suspend(dapm, "Reference Capture");
+ if (ret)
+ dev_err(rtd->dev, "Ref Cap -Ignore suspend failed = %d\n", ret);
+
+ return ret;
+}
+
+static int kabylake_rt5663_codec_init(struct snd_soc_pcm_runtime *rtd)
+{
+ int ret;
+ struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_codec *codec = rtd->codec;
+
+ /*
+ * Headset buttons map to the google Reference headset.
+ * These can be configured by userspace.
+ */
+ ret = snd_soc_card_jack_new(&kabylake_audio_card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3, &ctx->kabylake_headset,
+ NULL, 0);
+ if (ret) {
+ dev_err(rtd->dev, "Headset Jack creation failed %d\n", ret);
+ return ret;
+ }
+
+ rt5663_set_jack_detect(codec, &ctx->kabylake_headset);
+
+ ret = snd_soc_dapm_ignore_suspend(&rtd->card->dapm, "DMIC");
+ if (ret)
+ dev_err(rtd->dev, "DMIC - Ignore suspend failed = %d\n", ret);
+
+ return ret;
+}
+
+static int kabylake_hdmi_init(struct snd_soc_pcm_runtime *rtd, int device)
+{
+ struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_dai *dai = rtd->codec_dai;
+ struct kbl_hdmi_pcm *pcm;
+
+ pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
+ if (!pcm)
+ return -ENOMEM;
+
+ pcm->device = device;
+ pcm->codec_dai = dai;
+
+ list_add_tail(&pcm->head, &ctx->hdmi_pcm_list);
+
+ return 0;
+}
+
+static int kabylake_hdmi1_init(struct snd_soc_pcm_runtime *rtd)
+{
+ return kabylake_hdmi_init(rtd, KBL_DPCM_AUDIO_HDMI1_PB);
+}
+
+static int kabylake_hdmi2_init(struct snd_soc_pcm_runtime *rtd)
+{
+ return kabylake_hdmi_init(rtd, KBL_DPCM_AUDIO_HDMI2_PB);
+}
+
+static const unsigned int rates[] = {
+ 48000,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_rates = {
+ .count = ARRAY_SIZE(rates),
+ .list = rates,
+ .mask = 0,
+};
+
+static const unsigned int channels[] = {
+ 2,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_channels = {
+ .count = ARRAY_SIZE(channels),
+ .list = channels,
+ .mask = 0,
+};
+
+static int kbl_fe_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ /*
+ * On this platform for PCM device we support,
+ * 48Khz
+ * stereo
+ * 16 bit audio
+ */
+
+ runtime->hw.channels_max = 2;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
+ snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16);
+
+ snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
+
+ return 0;
+}
+
+static const struct snd_soc_ops kabylake_rt5663_fe_ops = {
+ .startup = kbl_fe_startup,
+};
+
+static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ struct snd_soc_dpcm *dpcm = container_of(
+ params, struct snd_soc_dpcm, hw_params);
+ struct snd_soc_dai_link *fe_dai_link = dpcm->fe->dai_link;
+ struct snd_soc_dai_link *be_dai_link = dpcm->be->dai_link;
+
+ /*
+ * The ADSP will convert the FE rate to 48k, stereo, 24 bit
+ */
+ if (!strcmp(fe_dai_link->name, "Kbl Audio Port") ||
+ !strcmp(fe_dai_link->name, "Kbl Audio Capture Port")) {
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+ snd_mask_none(fmt);
+ snd_mask_set(fmt, SNDRV_PCM_FORMAT_S24_LE);
+ } else if (!strcmp(fe_dai_link->name, "Kbl Audio DMIC cap")) {
+ if (params_channels(params) == 2 ||
+ DMIC_CH(dmic_constraints) == 2)
+ channels->min = channels->max = 2;
+ else
+ channels->min = channels->max = 4;
+ }
+ /*
+ * The speaker on the SSP0 supports S16_LE and not S24_LE.
+ * thus changing the mask here
+ */
+ if (!strcmp(be_dai_link->name, "SSP0-Codec"))
+ snd_mask_set(fmt, SNDRV_PCM_FORMAT_S16_LE);
+
+ return 0;
+}
+
+static int kabylake_rt5663_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int ret;
+
+ /* use ASRC for internal clocks, as PLL rate isn't multiple of BCLK */
+ rt5663_sel_asrc_clk_src(codec_dai->codec, RT5663_DA_STEREO_FILTER, 1);
+
+ ret = snd_soc_dai_set_sysclk(codec_dai,
+ RT5663_SCLK_S_MCLK, 24576000, SND_SOC_CLOCK_IN);
+ if (ret < 0)
+ dev_err(rtd->dev, "snd_soc_dai_set_sysclk err = %d\n", ret);
+
+ return ret;
+}
+
+static struct snd_soc_ops kabylake_rt5663_ops = {
+ .hw_params = kabylake_rt5663_hw_params,
+};
+
+static int kabylake_ssp0_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ int ret = 0, j;
+
+ for (j = 0; j < rtd->num_codecs; j++) {
+ struct snd_soc_dai *codec_dai = rtd->codec_dais[j];
+
+ if (!strcmp(codec_dai->component->name, RT5514_DEV_NAME)) {
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xF, 0, 8, 16);
+ if (ret < 0) {
+ dev_err(rtd->dev, "set TDM slot err:%d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_pll(codec_dai, 0,
+ RT5514_PLL1_S_BCLK, RT5514_AIF1_BCLK_FREQ,
+ RT5514_AIF1_SYSCLK_FREQ);
+ if (ret < 0) {
+ dev_err(rtd->dev, "set bclk err: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_sysclk(codec_dai,
+ RT5514_SCLK_S_PLL1, RT5514_AIF1_SYSCLK_FREQ,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(rtd->dev, "set sclk err: %d\n", ret);
+ return ret;
+ }
+ }
+ if (!strcmp(codec_dai->component->name, MAXIM_DEV0_NAME) ||
+ !strcmp(codec_dai->component->name, MAXIM_DEV1_NAME)) {
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xF0, 3, 8, 16);
+ if (ret < 0) {
+ dev_err(rtd->dev, "set TDM slot err:%d\n", ret);
+ return ret;
+ }
+ }
+ }
+ return ret;
+}
+
+static struct snd_soc_ops kabylake_ssp0_ops = {
+ .hw_params = kabylake_ssp0_hw_params,
+};
+
+static const unsigned int channels_dmic[] = {
+ 4,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_dmic_channels = {
+ .count = ARRAY_SIZE(channels_dmic),
+ .list = channels_dmic,
+ .mask = 0,
+};
+
+static const unsigned int dmic_2ch[] = {
+ 4,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_dmic_2ch = {
+ .count = ARRAY_SIZE(dmic_2ch),
+ .list = dmic_2ch,
+ .mask = 0,
+};
+
+static int kabylake_dmic_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ runtime->hw.channels_max = DMIC_CH(dmic_constraints);
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ dmic_constraints);
+
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
+}
+
+static struct snd_soc_ops kabylake_dmic_ops = {
+ .startup = kabylake_dmic_startup,
+};
+
+/* kabylake digital audio interface glue - connects codec <--> CPU */
+static struct snd_soc_dai_link kabylake_dais[] = {
+ /* Front End DAI links */
+ [KBL_DPCM_AUDIO_PB] = {
+ .name = "Kbl Audio Port",
+ .stream_name = "Audio",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:1f.3",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .init = kabylake_rt5663_fe_init,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .ops = &kabylake_rt5663_fe_ops,
+ },
+ [KBL_DPCM_AUDIO_CP] = {
+ .name = "Kbl Audio Capture Port",
+ .stream_name = "Audio Record",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:1f.3",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_capture = 1,
+ .ops = &kabylake_rt5663_fe_ops,
+ },
+ [KBL_DPCM_AUDIO_DMIC_CP] = {
+ .name = "Kbl Audio DMIC cap",
+ .stream_name = "dmiccap",
+ .cpu_dai_name = "DMIC Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .dpcm_capture = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .ops = &kabylake_dmic_ops,
+ },
+ [KBL_DPCM_AUDIO_HDMI1_PB] = {
+ .name = "Kbl HDMI Port1",
+ .stream_name = "Hdmi1",
+ .cpu_dai_name = "HDMI1 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [KBL_DPCM_AUDIO_HDMI2_PB] = {
+ .name = "Kbl HDMI Port2",
+ .stream_name = "Hdmi2",
+ .cpu_dai_name = "HDMI2 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ /* Back End DAI links */
+ /* single Back end dai for both max speakers and dmic */
+ {
+ /* SSP0 - Codec */
+ .name = "SSP0-Codec",
+ .id = 0,
+ .cpu_dai_name = "SSP0 Pin",
+ .platform_name = "0000:00:1f.3",
+ .no_pcm = 1,
+ .codecs = ssp0_codec_components,
+ .num_codecs = ARRAY_SIZE(ssp0_codec_components),
+ .dai_fmt = SND_SOC_DAIFMT_DSP_B |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = kabylake_ssp_fixup,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ops = &kabylake_ssp0_ops,
+ },
+ {
+ .name = "SSP1-Codec",
+ .id = 1,
+ .cpu_dai_name = "SSP1 Pin",
+ .platform_name = "0000:00:1f.3",
+ .no_pcm = 1,
+ .codec_name = RT5663_DEV_NAME,
+ .codec_dai_name = KBL_REALTEK_CODEC_DAI,
+ .init = kabylake_rt5663_codec_init,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = kabylake_ssp_fixup,
+ .ops = &kabylake_rt5663_ops,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ },
+ {
+ .name = "iDisp1",
+ .id = 3,
+ .cpu_dai_name = "iDisp1 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi1",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = kabylake_hdmi1_init,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp2",
+ .id = 4,
+ .cpu_dai_name = "iDisp2 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi2",
+ .platform_name = "0000:00:1f.3",
+ .init = kabylake_hdmi2_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+};
+
+static int kabylake_card_late_probe(struct snd_soc_card *card)
+{
+ struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(card);
+ struct kbl_hdmi_pcm *pcm;
+ int err, i = 0;
+ char jack_name[NAME_SIZE];
+
+ list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
+ err = snd_soc_card_jack_new(card, jack_name,
+ SND_JACK_AVOUT, &ctx->kabylake_hdmi[i],
+ NULL, 0);
+
+ if (err)
+ return err;
+ err = hdac_hdmi_jack_init(pcm->codec_dai, pcm->device,
+ &ctx->kabylake_hdmi[i]);
+ if (err < 0)
+ return err;
+ i++;
+ }
+
+ return 0;
+}
+
+/*
+ * kabylake audio machine driver for MAX98927 + RT5514 + RT5663
+ */
+static struct snd_soc_card kabylake_audio_card = {
+ .name = "kbl_r5514_5663_max",
+ .owner = THIS_MODULE,
+ .dai_link = kabylake_dais,
+ .num_links = ARRAY_SIZE(kabylake_dais),
+ .controls = kabylake_controls,
+ .num_controls = ARRAY_SIZE(kabylake_controls),
+ .dapm_widgets = kabylake_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(kabylake_widgets),
+ .dapm_routes = kabylake_map,
+ .num_dapm_routes = ARRAY_SIZE(kabylake_map),
+ .codec_conf = max98927_codec_conf,
+ .num_configs = ARRAY_SIZE(max98927_codec_conf),
+ .fully_routed = true,
+ .late_probe = kabylake_card_late_probe,
+};
+
+static int kabylake_audio_probe(struct platform_device *pdev)
+{
+ struct kbl_codec_private *ctx;
+ struct skl_machine_pdata *pdata;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_ATOMIC);
+ if (!ctx)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
+
+ kabylake_audio_card.dev = &pdev->dev;
+ snd_soc_card_set_drvdata(&kabylake_audio_card, ctx);
+
+ pdata = dev_get_drvdata(&pdev->dev);
+ if (pdata)
+ dmic_constraints = pdata->dmic_num == 2 ?
+ &constraints_dmic_2ch : &constraints_dmic_channels;
+
+ return devm_snd_soc_register_card(&pdev->dev, &kabylake_audio_card);
+}
+
+static const struct platform_device_id kbl_board_ids[] = {
+ { .name = "kbl_r5514_5663_max" },
+ { }
+};
+
+static struct platform_driver kabylake_audio = {
+ .probe = kabylake_audio_probe,
+ .driver = {
+ .name = "kbl_r5514_5663_max",
+ .pm = &snd_soc_pm_ops,
+ },
+ .id_table = kbl_board_ids,
+};
+
+module_platform_driver(kabylake_audio)
+
+/* Module information */
+MODULE_DESCRIPTION("Audio Machine driver-RT5663 RT5514 & MAX98927");
+MODULE_AUTHOR("Harsha Priya <[email protected]>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:kbl_r5514_5663_max");
diff --git a/sound/soc/intel/boards/skl_nau88l25_max98357a.c b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
index 3b12bc1fa518..5ed0aa27b467 100644
--- a/sound/soc/intel/boards/skl_nau88l25_max98357a.c
+++ b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
@@ -266,21 +266,21 @@ static int skylake_nau8825_fe_init(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static unsigned int rates[] = {
+static const unsigned int rates[] = {
48000,
};
-static struct snd_pcm_hw_constraint_list constraints_rates = {
+static const struct snd_pcm_hw_constraint_list constraints_rates = {
.count = ARRAY_SIZE(rates),
.list = rates,
.mask = 0,
};
-static unsigned int channels[] = {
+static const unsigned int channels[] = {
2,
};
-static struct snd_pcm_hw_constraint_list constraints_channels = {
+static const struct snd_pcm_hw_constraint_list constraints_channels = {
.count = ARRAY_SIZE(channels),
.list = channels,
.mask = 0,
@@ -348,11 +348,11 @@ static int skylake_dmic_fixup(struct snd_soc_pcm_runtime *rtd,
return 0;
}
-static unsigned int channels_dmic[] = {
+static const unsigned int channels_dmic[] = {
2, 4,
};
-static struct snd_pcm_hw_constraint_list constraints_dmic_channels = {
+static const struct snd_pcm_hw_constraint_list constraints_dmic_channels = {
.count = ARRAY_SIZE(channels_dmic),
.list = channels_dmic,
.mask = 0,
@@ -384,11 +384,11 @@ static const struct snd_soc_ops skylake_dmic_ops = {
.startup = skylake_dmic_startup,
};
-static unsigned int rates_16000[] = {
+static const unsigned int rates_16000[] = {
16000,
};
-static struct snd_pcm_hw_constraint_list constraints_16000 = {
+static const struct snd_pcm_hw_constraint_list constraints_16000 = {
.count = ARRAY_SIZE(rates_16000),
.list = rates_16000,
};
diff --git a/sound/soc/intel/boards/skl_nau88l25_ssm4567.c b/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
index eb7751b0599b..01b8b140bb08 100644
--- a/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
+++ b/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
@@ -297,21 +297,21 @@ static int skylake_nau8825_fe_init(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static unsigned int rates[] = {
+static const unsigned int rates[] = {
48000,
};
-static struct snd_pcm_hw_constraint_list constraints_rates = {
+static const struct snd_pcm_hw_constraint_list constraints_rates = {
.count = ARRAY_SIZE(rates),
.list = rates,
.mask = 0,
};
-static unsigned int channels[] = {
+static const unsigned int channels[] = {
2,
};
-static struct snd_pcm_hw_constraint_list constraints_channels = {
+static const struct snd_pcm_hw_constraint_list constraints_channels = {
.count = ARRAY_SIZE(channels),
.list = channels,
.mask = 0,
@@ -397,11 +397,11 @@ static const struct snd_soc_ops skylake_nau8825_ops = {
.hw_params = skylake_nau8825_hw_params,
};
-static unsigned int channels_dmic[] = {
+static const unsigned int channels_dmic[] = {
2, 4,
};
-static struct snd_pcm_hw_constraint_list constraints_dmic_channels = {
+static const struct snd_pcm_hw_constraint_list constraints_dmic_channels = {
.count = ARRAY_SIZE(channels_dmic),
.list = channels_dmic,
.mask = 0,
@@ -433,11 +433,11 @@ static const struct snd_soc_ops skylake_dmic_ops = {
.startup = skylake_dmic_startup,
};
-static unsigned int rates_16000[] = {
+static const unsigned int rates_16000[] = {
16000,
};
-static struct snd_pcm_hw_constraint_list constraints_16000 = {
+static const struct snd_pcm_hw_constraint_list constraints_16000 = {
.count = ARRAY_SIZE(rates_16000),
.list = rates_16000,
};
diff --git a/sound/soc/intel/boards/skl_rt286.c b/sound/soc/intel/boards/skl_rt286.c
index f5ab7b8d51d1..2bc4cfca594e 100644
--- a/sound/soc/intel/boards/skl_rt286.c
+++ b/sound/soc/intel/boards/skl_rt286.c
@@ -43,6 +43,7 @@ struct skl_rt286_private {
enum {
SKL_DPCM_AUDIO_PB = 0,
+ SKL_DPCM_AUDIO_DB_PB,
SKL_DPCM_AUDIO_CP,
SKL_DPCM_AUDIO_REF_CP,
SKL_DPCM_AUDIO_DMIC_CP,
@@ -165,21 +166,21 @@ static int skylake_hdmi_init(struct snd_soc_pcm_runtime *rtd)
return 0;
}
-static unsigned int rates[] = {
+static const unsigned int rates[] = {
48000,
};
-static struct snd_pcm_hw_constraint_list constraints_rates = {
+static const struct snd_pcm_hw_constraint_list constraints_rates = {
.count = ARRAY_SIZE(rates),
.list = rates,
.mask = 0,
};
-static unsigned int channels[] = {
+static const unsigned int channels[] = {
2,
};
-static struct snd_pcm_hw_constraint_list constraints_channels = {
+static const struct snd_pcm_hw_constraint_list constraints_channels = {
.count = ARRAY_SIZE(channels),
.list = channels,
.mask = 0,
@@ -264,11 +265,11 @@ static int skylake_dmic_fixup(struct snd_soc_pcm_runtime *rtd,
return 0;
}
-static unsigned int channels_dmic[] = {
+static const unsigned int channels_dmic[] = {
2, 4,
};
-static struct snd_pcm_hw_constraint_list constraints_dmic_channels = {
+static const struct snd_pcm_hw_constraint_list constraints_dmic_channels = {
.count = ARRAY_SIZE(channels_dmic),
.list = channels_dmic,
.mask = 0,
@@ -310,6 +311,23 @@ static struct snd_soc_dai_link skylake_rt286_dais[] = {
.dpcm_playback = 1,
.ops = &skylake_rt286_fe_ops,
},
+ [SKL_DPCM_AUDIO_DB_PB] = {
+ .name = "Skl Deepbuffer Port",
+ .stream_name = "Deep Buffer Audio",
+ .cpu_dai_name = "Deepbuffer Pin",
+ .platform_name = "0000:00:1f.3",
+ .nonatomic = 1,
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST
+ },
+ .dpcm_playback = 1,
+ .ops = &skylake_rt286_fe_ops,
+
+ },
[SKL_DPCM_AUDIO_CP] = {
.name = "Skl Audio Capture Port",
.stream_name = "Audio Record",
diff --git a/sound/soc/intel/common/sst-acpi.h b/sound/soc/intel/common/sst-acpi.h
index 214e000667ae..afe9b87b8bd5 100644
--- a/sound/soc/intel/common/sst-acpi.h
+++ b/sound/soc/intel/common/sst-acpi.h
@@ -43,6 +43,9 @@ static inline bool sst_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
/* acpi match */
struct sst_acpi_mach *sst_acpi_find_machine(struct sst_acpi_mach *machines);
+/* acpi check hid */
+bool sst_acpi_check_hid(const u8 hid[ACPI_ID_LEN]);
+
/* Descriptor for SST ASoC machine driver */
struct sst_acpi_mach {
/* ACPI ID for the matching machine driver. Audio codec for instance */
@@ -55,5 +58,25 @@ struct sst_acpi_mach {
/* board name */
const char *board;
struct sst_acpi_mach * (*machine_quirk)(void *arg);
+ const void *quirk_data;
void *pdata;
};
+
+#define SST_ACPI_MAX_CODECS 3
+
+/**
+ * struct sst_codecs: Structure to hold secondary codec information apart from
+ * the matched one, this data will be passed to the quirk function to match
+ * with the ACPI detected devices
+ *
+ * @num_codecs: number of secondary codecs used in the platform
+ * @codecs: holds the codec IDs
+ *
+ */
+struct sst_codecs {
+ int num_codecs;
+ u8 codecs[SST_ACPI_MAX_CODECS][ACPI_ID_LEN];
+};
+
+/* check all codecs */
+struct sst_acpi_mach *sst_acpi_codec_list(void *arg);
diff --git a/sound/soc/intel/common/sst-dsp-priv.h b/sound/soc/intel/common/sst-dsp-priv.h
index d13c84364c3c..8734040d64d3 100644
--- a/sound/soc/intel/common/sst-dsp-priv.h
+++ b/sound/soc/intel/common/sst-dsp-priv.h
@@ -77,6 +77,10 @@ struct sst_addr {
u32 dram_offset;
u32 dsp_iram_offset;
u32 dsp_dram_offset;
+ u32 sram0_base;
+ u32 sram1_base;
+ u32 w0_stat_sz;
+ u32 w0_up_sz;
void __iomem *lpe;
void __iomem *shim;
void __iomem *pci_cfg;
diff --git a/sound/soc/intel/common/sst-match-acpi.c b/sound/soc/intel/common/sst-match-acpi.c
index 1070f3ad23e5..56d26f36a3cb 100644
--- a/sound/soc/intel/common/sst-match-acpi.c
+++ b/sound/soc/intel/common/sst-match-acpi.c
@@ -63,16 +63,33 @@ static acpi_status sst_acpi_mach_match(acpi_handle handle, u32 level,
return AE_OK;
}
+bool sst_acpi_check_hid(const u8 hid[ACPI_ID_LEN])
+{
+ acpi_status status;
+ bool found = false;
+
+ status = acpi_get_devices(hid, sst_acpi_mach_match, &found, NULL);
+
+ if (ACPI_FAILURE(status))
+ return false;
+
+ return found;
+}
+EXPORT_SYMBOL_GPL(sst_acpi_check_hid);
+
struct sst_acpi_mach *sst_acpi_find_machine(struct sst_acpi_mach *machines)
{
struct sst_acpi_mach *mach;
- bool found = false;
- for (mach = machines; mach->id[0]; mach++)
- if (ACPI_SUCCESS(acpi_get_devices(mach->id,
- sst_acpi_mach_match,
- &found, NULL)) && found)
- return mach;
+ for (mach = machines; mach->id[0]; mach++) {
+ if (sst_acpi_check_hid(mach->id) == true) {
+ if (mach->machine_quirk == NULL)
+ return mach;
+
+ if (mach->machine_quirk(mach) != NULL)
+ return mach;
+ }
+ }
return NULL;
}
EXPORT_SYMBOL_GPL(sst_acpi_find_machine);
@@ -134,5 +151,23 @@ bool sst_acpi_find_package_from_hid(const u8 hid[ACPI_ID_LEN],
}
EXPORT_SYMBOL_GPL(sst_acpi_find_package_from_hid);
+struct sst_acpi_mach *sst_acpi_codec_list(void *arg)
+{
+ struct sst_acpi_mach *mach = arg;
+ struct sst_codecs *codec_list = (struct sst_codecs *) mach->quirk_data;
+ int i;
+
+ if (mach->quirk_data == NULL)
+ return mach;
+
+ for (i = 0; i < codec_list->num_codecs; i++) {
+ if (sst_acpi_check_hid(codec_list->codecs[i]) != true)
+ return NULL;
+ }
+
+ return mach;
+}
+EXPORT_SYMBOL_GPL(sst_acpi_codec_list);
+
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Intel Common ACPI Match module");
diff --git a/sound/soc/intel/skylake/Makefile b/sound/soc/intel/skylake/Makefile
index 60fbc9bbe473..e7d77722d560 100644
--- a/sound/soc/intel/skylake/Makefile
+++ b/sound/soc/intel/skylake/Makefile
@@ -1,6 +1,10 @@
snd-soc-skl-objs := skl.o skl-pcm.o skl-nhlt.o skl-messages.o \
skl-topology.o
+ifdef CONFIG_DEBUG_FS
+ snd-soc-skl-objs += skl-debug.o
+endif
+
obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += snd-soc-skl.o
# Skylake IPC Support
diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c
index f5e7dbb1ba39..cf11b84888b9 100644
--- a/sound/soc/intel/skylake/bxt-sst.c
+++ b/sound/soc/intel/skylake/bxt-sst.c
@@ -573,6 +573,10 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
sst->fw_ops = bxt_fw_ops;
sst->addr.lpe = mmio_base;
sst->addr.shim = mmio_base;
+ sst->addr.sram0_base = BXT_ADSP_SRAM0_BASE;
+ sst->addr.sram1_base = BXT_ADSP_SRAM1_BASE;
+ sst->addr.w0_stat_sz = SKL_ADSP_W0_STAT_SZ;
+ sst->addr.w0_up_sz = SKL_ADSP_W0_UP_SZ;
sst_dsp_mailbox_init(sst, (BXT_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ),
SKL_ADSP_W0_UP_SZ, BXT_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ);
diff --git a/sound/soc/intel/skylake/skl-debug.c b/sound/soc/intel/skylake/skl-debug.c
new file mode 100644
index 000000000000..dc20d91f62e6
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-debug.c
@@ -0,0 +1,261 @@
+/*
+ * skl-debug.c - Debugfs for skl driver
+ *
+ * Copyright (C) 2016-17 Intel Corp
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/pci.h>
+#include <linux/debugfs.h>
+#include "skl.h"
+#include "skl-sst-dsp.h"
+#include "skl-sst-ipc.h"
+#include "skl-tplg-interface.h"
+#include "skl-topology.h"
+#include "../common/sst-dsp.h"
+#include "../common/sst-dsp-priv.h"
+
+#define MOD_BUF PAGE_SIZE
+#define FW_REG_BUF PAGE_SIZE
+#define FW_REG_SIZE 0x60
+
+struct skl_debug {
+ struct skl *skl;
+ struct device *dev;
+
+ struct dentry *fs;
+ struct dentry *modules;
+ u8 fw_read_buff[FW_REG_BUF];
+};
+
+static ssize_t skl_print_pins(struct skl_module_pin *m_pin, char *buf,
+ int max_pin, ssize_t size, bool direction)
+{
+ int i;
+ ssize_t ret = 0;
+
+ for (i = 0; i < max_pin; i++)
+ ret += snprintf(buf + size, MOD_BUF - size,
+ "%s %d\n\tModule %d\n\tInstance %d\n\t"
+ "In-used %s\n\tType %s\n"
+ "\tState %d\n\tIndex %d\n",
+ direction ? "Input Pin:" : "Output Pin:",
+ i, m_pin[i].id.module_id,
+ m_pin[i].id.instance_id,
+ m_pin[i].in_use ? "Used" : "Unused",
+ m_pin[i].is_dynamic ? "Dynamic" : "Static",
+ m_pin[i].pin_state, i);
+ return ret;
+}
+
+static ssize_t skl_print_fmt(struct skl_module_fmt *fmt, char *buf,
+ ssize_t size, bool direction)
+{
+ return snprintf(buf + size, MOD_BUF - size,
+ "%s\n\tCh %d\n\tFreq %d\n\tBit depth %d\n\t"
+ "Valid bit depth %d\n\tCh config %#x\n\tInterleaving %d\n\t"
+ "Sample Type %d\n\tCh Map %#x\n",
+ direction ? "Input Format:" : "Output Format:",
+ fmt->channels, fmt->s_freq, fmt->bit_depth,
+ fmt->valid_bit_depth, fmt->ch_cfg,
+ fmt->interleaving_style, fmt->sample_type,
+ fmt->ch_map);
+}
+
+static ssize_t module_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct skl_module_cfg *mconfig = file->private_data;
+ char *buf;
+ ssize_t ret;
+
+ buf = kzalloc(MOD_BUF, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = snprintf(buf, MOD_BUF, "Module:\n\tUUID %pUL\n\tModule id %d\n"
+ "\tInstance id %d\n\tPvt_id %d\n", mconfig->guid,
+ mconfig->id.module_id, mconfig->id.instance_id,
+ mconfig->id.pvt_id);
+
+ ret += snprintf(buf + ret, MOD_BUF - ret,
+ "Resources:\n\tMCPS %#x\n\tIBS %#x\n\tOBS %#x\t\n",
+ mconfig->mcps, mconfig->ibs, mconfig->obs);
+
+ ret += snprintf(buf + ret, MOD_BUF - ret,
+ "Module data:\n\tCore %d\n\tIn queue %d\n\t"
+ "Out queue %d\n\tType %s\n",
+ mconfig->core_id, mconfig->max_in_queue,
+ mconfig->max_out_queue,
+ mconfig->is_loadable ? "loadable" : "inbuilt");
+
+ ret += skl_print_fmt(mconfig->in_fmt, buf, ret, true);
+ ret += skl_print_fmt(mconfig->out_fmt, buf, ret, false);
+
+ ret += snprintf(buf + ret, MOD_BUF - ret,
+ "Fixup:\n\tParams %#x\n\tConverter %#x\n",
+ mconfig->params_fixup, mconfig->converter);
+
+ ret += snprintf(buf + ret, MOD_BUF - ret,
+ "Module Gateway:\n\tType %#x\n\tVbus %#x\n\tHW conn %#x\n\tSlot %#x\n",
+ mconfig->dev_type, mconfig->vbus_id,
+ mconfig->hw_conn_type, mconfig->time_slot);
+
+ ret += snprintf(buf + ret, MOD_BUF - ret,
+ "Pipeline:\n\tID %d\n\tPriority %d\n\tConn Type %d\n\t"
+ "Pages %#x\n", mconfig->pipe->ppl_id,
+ mconfig->pipe->pipe_priority, mconfig->pipe->conn_type,
+ mconfig->pipe->memory_pages);
+
+ ret += snprintf(buf + ret, MOD_BUF - ret,
+ "\tParams:\n\t\tHost DMA %d\n\t\tLink DMA %d\n",
+ mconfig->pipe->p_params->host_dma_id,
+ mconfig->pipe->p_params->link_dma_id);
+
+ ret += snprintf(buf + ret, MOD_BUF - ret,
+ "\tPCM params:\n\t\tCh %d\n\t\tFreq %d\n\t\tFormat %d\n",
+ mconfig->pipe->p_params->ch,
+ mconfig->pipe->p_params->s_freq,
+ mconfig->pipe->p_params->s_fmt);
+
+ ret += snprintf(buf + ret, MOD_BUF - ret,
+ "\tLink %#x\n\tStream %#x\n",
+ mconfig->pipe->p_params->linktype,
+ mconfig->pipe->p_params->stream);
+
+ ret += snprintf(buf + ret, MOD_BUF - ret,
+ "\tState %d\n\tPassthru %s\n",
+ mconfig->pipe->state,
+ mconfig->pipe->passthru ? "true" : "false");
+
+ ret += skl_print_pins(mconfig->m_in_pin, buf,
+ mconfig->max_in_queue, ret, true);
+ ret += skl_print_pins(mconfig->m_out_pin, buf,
+ mconfig->max_out_queue, ret, false);
+
+ ret += snprintf(buf + ret, MOD_BUF - ret,
+ "Other:\n\tDomain %d\n\tHomogenous Input %s\n\t"
+ "Homogenous Output %s\n\tIn Queue Mask %d\n\t"
+ "Out Queue Mask %d\n\tDMA ID %d\n\tMem Pages %d\n\t"
+ "Module Type %d\n\tModule State %d\n",
+ mconfig->domain,
+ mconfig->homogenous_inputs ? "true" : "false",
+ mconfig->homogenous_outputs ? "true" : "false",
+ mconfig->in_queue_mask, mconfig->out_queue_mask,
+ mconfig->dma_id, mconfig->mem_pages, mconfig->m_state,
+ mconfig->m_type);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations mcfg_fops = {
+ .open = simple_open,
+ .read = module_read,
+ .llseek = default_llseek,
+};
+
+
+void skl_debug_init_module(struct skl_debug *d,
+ struct snd_soc_dapm_widget *w,
+ struct skl_module_cfg *mconfig)
+{
+ if (!debugfs_create_file(w->name, 0444,
+ d->modules, mconfig,
+ &mcfg_fops))
+ dev_err(d->dev, "%s: module debugfs init failed\n", w->name);
+}
+
+static ssize_t fw_softreg_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct skl_debug *d = file->private_data;
+ struct sst_dsp *sst = d->skl->skl_sst->dsp;
+ size_t w0_stat_sz = sst->addr.w0_stat_sz;
+ void __iomem *in_base = sst->mailbox.in_base;
+ void __iomem *fw_reg_addr;
+ unsigned int offset;
+ char *tmp;
+ ssize_t ret = 0;
+
+ tmp = kzalloc(FW_REG_BUF, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ fw_reg_addr = in_base - w0_stat_sz;
+ memset(d->fw_read_buff, 0, FW_REG_BUF);
+
+ if (w0_stat_sz > 0)
+ __iowrite32_copy(d->fw_read_buff, fw_reg_addr, w0_stat_sz >> 2);
+
+ for (offset = 0; offset < FW_REG_SIZE; offset += 16) {
+ ret += snprintf(tmp + ret, FW_REG_BUF - ret, "%#.4x: ", offset);
+ hex_dump_to_buffer(d->fw_read_buff + offset, 16, 16, 4,
+ tmp + ret, FW_REG_BUF - ret, 0);
+ ret += strlen(tmp + ret);
+
+ /* print newline for each offset */
+ if (FW_REG_BUF - ret > 0)
+ tmp[ret++] = '\n';
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, tmp, ret);
+ kfree(tmp);
+
+ return ret;
+}
+
+static const struct file_operations soft_regs_ctrl_fops = {
+ .open = simple_open,
+ .read = fw_softreg_read,
+ .llseek = default_llseek,
+};
+
+struct skl_debug *skl_debugfs_init(struct skl *skl)
+{
+ struct skl_debug *d;
+
+ d = devm_kzalloc(&skl->pci->dev, sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return NULL;
+
+ /* create the debugfs dir with platform component's debugfs as parent */
+ d->fs = debugfs_create_dir("dsp",
+ skl->platform->component.debugfs_root);
+ if (IS_ERR(d->fs) || !d->fs) {
+ dev_err(&skl->pci->dev, "debugfs root creation failed\n");
+ return NULL;
+ }
+
+ d->skl = skl;
+ d->dev = &skl->pci->dev;
+
+ /* now create the module dir */
+ d->modules = debugfs_create_dir("modules", d->fs);
+ if (IS_ERR(d->modules) || !d->modules) {
+ dev_err(&skl->pci->dev, "modules debugfs create failed\n");
+ goto err;
+ }
+
+ if (!debugfs_create_file("fw_soft_regs_rd", 0444, d->fs, d,
+ &soft_regs_ctrl_fops)) {
+ dev_err(d->dev, "fw soft regs control debugfs init failed\n");
+ goto err;
+ }
+
+ return d;
+
+err:
+ debugfs_remove_recursive(d->fs);
+ return NULL;
+}
diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
index ab1adc0c9cc3..eca85827dbd2 100644
--- a/sound/soc/intel/skylake/skl-messages.c
+++ b/sound/soc/intel/skylake/skl-messages.c
@@ -507,6 +507,8 @@ static void skl_setup_cpr_gateway_cfg(struct skl_sst *ctx,
struct skl_module_cfg *mconfig,
struct skl_cpr_cfg *cpr_mconfig)
{
+ u32 dma_io_buf;
+
cpr_mconfig->gtw_cfg.node_id = skl_get_node_id(ctx, mconfig);
if (cpr_mconfig->gtw_cfg.node_id == SKL_NON_GATEWAY_CPR_NODE_ID) {
@@ -514,10 +516,29 @@ static void skl_setup_cpr_gateway_cfg(struct skl_sst *ctx,
return;
}
- if (SKL_CONN_SOURCE == mconfig->hw_conn_type)
- cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * mconfig->obs;
- else
- cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * mconfig->ibs;
+ switch (mconfig->hw_conn_type) {
+ case SKL_CONN_SOURCE:
+ if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
+ dma_io_buf = mconfig->ibs;
+ else
+ dma_io_buf = mconfig->obs;
+ break;
+
+ case SKL_CONN_SINK:
+ if (mconfig->dev_type == SKL_DEVICE_HDAHOST)
+ dma_io_buf = mconfig->obs;
+ else
+ dma_io_buf = mconfig->ibs;
+ break;
+
+ default:
+ dev_warn(ctx->dev, "wrong connection type: %d\n",
+ mconfig->hw_conn_type);
+ return;
+ }
+
+ cpr_mconfig->gtw_cfg.dma_buffer_size =
+ mconfig->dma_buffer_size * dma_io_buf;
cpr_mconfig->cpr_feature_mask = 0;
cpr_mconfig->gtw_cfg.config_length = 0;
@@ -707,6 +728,7 @@ static u16 skl_get_module_param_size(struct skl_sst *ctx,
return param_size;
case SKL_MODULE_TYPE_BASE_OUTFMT:
+ case SKL_MODULE_TYPE_MIC_SELECT:
case SKL_MODULE_TYPE_KPB:
return sizeof(struct skl_base_outfmt_cfg);
@@ -761,6 +783,7 @@ static int skl_set_module_format(struct skl_sst *ctx,
break;
case SKL_MODULE_TYPE_BASE_OUTFMT:
+ case SKL_MODULE_TYPE_MIC_SELECT:
case SKL_MODULE_TYPE_KPB:
skl_set_base_outfmt_format(ctx, module_config, *param_data);
break;
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index e91bbcffc856..0ebea34a4988 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -1249,12 +1249,16 @@ static int skl_platform_soc_probe(struct snd_soc_platform *platform)
pm_runtime_get_sync(platform->dev);
if ((ebus_to_hbus(ebus))->ppcap) {
+ skl->platform = platform;
+
+ /* init debugfs */
+ skl->debugfs = skl_debugfs_init(skl);
+
ret = skl_tplg_init(platform, ebus);
if (ret < 0) {
dev_err(platform->dev, "Failed to init topology!\n");
return ret;
}
- skl->platform = platform;
/* load the firmwares, since all is set */
ops = skl_get_dsp_ops(skl->pci->device);
diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c
index 155e456b7a3a..aba9ea11ac74 100644
--- a/sound/soc/intel/skylake/skl-sst.c
+++ b/sound/soc/intel/skylake/skl-sst.c
@@ -553,6 +553,11 @@ int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
sst = skl->dsp;
sst->addr.lpe = mmio_base;
sst->addr.shim = mmio_base;
+ sst->addr.sram0_base = SKL_ADSP_SRAM0_BASE;
+ sst->addr.sram1_base = SKL_ADSP_SRAM1_BASE;
+ sst->addr.w0_stat_sz = SKL_ADSP_W0_STAT_SZ;
+ sst->addr.w0_up_sz = SKL_ADSP_W0_UP_SZ;
+
sst_dsp_mailbox_init(sst, (SKL_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ),
SKL_ADSP_W0_UP_SZ, SKL_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ);
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index 64a0f8ed33e1..68c3f121efc3 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -36,6 +36,19 @@
#define SKL_IN_DIR_BIT_MASK BIT(0)
#define SKL_PIN_COUNT_MASK GENMASK(7, 4)
+static const int mic_mono_list[] = {
+0, 1, 2, 3,
+};
+static const int mic_stereo_list[][SKL_CH_STEREO] = {
+{0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3},
+};
+static const int mic_trio_list[][SKL_CH_TRIO] = {
+{0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3},
+};
+static const int mic_quatro_list[][SKL_CH_QUATRO] = {
+{0, 1, 2, 3},
+};
+
void skl_tplg_d0i3_get(struct skl *skl, enum d0i3_capability caps)
{
struct skl_d0i3_data *d0i3 = &skl->skl_sst->d0i3;
@@ -1314,6 +1327,111 @@ static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
return 0;
}
+static int skl_tplg_mic_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
+ struct skl_module_cfg *mconfig = w->priv;
+ struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+ u32 ch_type = *((u32 *)ec->dobj.private);
+
+ if (mconfig->dmic_ch_type == ch_type)
+ ucontrol->value.enumerated.item[0] =
+ mconfig->dmic_ch_combo_index;
+ else
+ ucontrol->value.enumerated.item[0] = 0;
+
+ return 0;
+}
+
+static int skl_fill_mic_sel_params(struct skl_module_cfg *mconfig,
+ struct skl_mic_sel_config *mic_cfg, struct device *dev)
+{
+ struct skl_specific_cfg *sp_cfg = &mconfig->formats_config;
+
+ sp_cfg->caps_size = sizeof(struct skl_mic_sel_config);
+ sp_cfg->set_params = SKL_PARAM_SET;
+ sp_cfg->param_id = 0x00;
+ if (!sp_cfg->caps) {
+ sp_cfg->caps = devm_kzalloc(dev, sp_cfg->caps_size, GFP_KERNEL);
+ if (!sp_cfg->caps)
+ return -ENOMEM;
+ }
+
+ mic_cfg->mic_switch = SKL_MIC_SEL_SWITCH;
+ mic_cfg->flags = 0;
+ memcpy(sp_cfg->caps, mic_cfg, sp_cfg->caps_size);
+
+ return 0;
+}
+
+static int skl_tplg_mic_control_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol);
+ struct skl_module_cfg *mconfig = w->priv;
+ struct skl_mic_sel_config mic_cfg = {0};
+ struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+ u32 ch_type = *((u32 *)ec->dobj.private);
+ const int *list;
+ u8 in_ch, out_ch, index;
+
+ mconfig->dmic_ch_type = ch_type;
+ mconfig->dmic_ch_combo_index = ucontrol->value.enumerated.item[0];
+
+ /* enum control index 0 is INVALID, so no channels to be set */
+ if (mconfig->dmic_ch_combo_index == 0)
+ return 0;
+
+ /* No valid channel selection map for index 0, so offset by 1 */
+ index = mconfig->dmic_ch_combo_index - 1;
+
+ switch (ch_type) {
+ case SKL_CH_MONO:
+ if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_mono_list))
+ return -EINVAL;
+
+ list = &mic_mono_list[index];
+ break;
+
+ case SKL_CH_STEREO:
+ if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_stereo_list))
+ return -EINVAL;
+
+ list = mic_stereo_list[index];
+ break;
+
+ case SKL_CH_TRIO:
+ if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_trio_list))
+ return -EINVAL;
+
+ list = mic_trio_list[index];
+ break;
+
+ case SKL_CH_QUATRO:
+ if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_quatro_list))
+ return -EINVAL;
+
+ list = mic_quatro_list[index];
+ break;
+
+ default:
+ dev_err(w->dapm->dev,
+ "Invalid channel %d for mic_select module\n",
+ ch_type);
+ return -EINVAL;
+
+ }
+
+ /* channel type enum map to number of chanels for that type */
+ for (out_ch = 0; out_ch < ch_type; out_ch++) {
+ in_ch = list[out_ch];
+ mic_cfg.blob[out_ch][in_ch] = SKL_DEFAULT_MIC_SEL_GAIN;
+ }
+
+ return skl_fill_mic_sel_params(mconfig, &mic_cfg, w->dapm->dev);
+}
+
/*
* Fill the dma id for host and link. In case of passthrough
* pipeline, this will both host and link in the same
@@ -1666,6 +1784,14 @@ static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = {
skl_tplg_tlv_control_set},
};
+static const struct snd_soc_tplg_kcontrol_ops skl_tplg_kcontrol_ops[] = {
+ {
+ .id = SKL_CONTROL_TYPE_MIC_SELECT,
+ .get = skl_tplg_mic_control_get,
+ .put = skl_tplg_mic_control_set,
+ },
+};
+
static int skl_tplg_fill_pipe_tkn(struct device *dev,
struct skl_pipe *pipe, u32 tkn,
u32 tkn_val)
@@ -1995,7 +2121,7 @@ static int skl_tplg_get_token(struct device *dev,
mconfig->converter = tkn_elem->value;
break;
- case SKL_TKL_U32_D0I3_CAPS:
+ case SKL_TKN_U32_D0I3_CAPS:
mconfig->d0i3_caps = tkn_elem->value;
break;
@@ -2070,12 +2196,26 @@ static int skl_tplg_get_token(struct device *dev,
break;
+ case SKL_TKN_U32_CAPS_SET_PARAMS:
+ mconfig->formats_config.set_params =
+ tkn_elem->value;
+ break;
+
+ case SKL_TKN_U32_CAPS_PARAMS_ID:
+ mconfig->formats_config.param_id =
+ tkn_elem->value;
+ break;
+
case SKL_TKN_U32_PROC_DOMAIN:
mconfig->domain =
tkn_elem->value;
break;
+ case SKL_TKN_U32_DMA_BUF_SIZE:
+ mconfig->dma_buffer_size = tkn_elem->value;
+ break;
+
case SKL_TKN_U8_IN_PIN_TYPE:
case SKL_TKN_U8_OUT_PIN_TYPE:
case SKL_TKN_U8_CONN_TYPE:
@@ -2147,7 +2287,7 @@ static int skl_tplg_get_tokens(struct device *dev,
tuple_size += tkn_count * sizeof(*tkn_elem);
}
- return 0;
+ return off;
}
/*
@@ -2198,10 +2338,11 @@ static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w,
num_blocks = ret;
off += array->size;
- array = (struct snd_soc_tplg_vendor_array *)(tplg_w->priv.data + off);
-
/* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */
while (num_blocks > 0) {
+ array = (struct snd_soc_tplg_vendor_array *)
+ (tplg_w->priv.data + off);
+
ret = skl_tplg_get_desc_blocks(dev, array);
if (ret < 0)
@@ -2237,7 +2378,9 @@ static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w,
memcpy(mconfig->formats_config.caps, data,
mconfig->formats_config.caps_size);
--num_blocks;
+ ret = mconfig->formats_config.caps_size;
}
+ off += ret;
}
return 0;
@@ -2329,6 +2472,9 @@ static int skl_tplg_widget_load(struct snd_soc_component *cmpnt,
ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig);
if (ret < 0)
return ret;
+
+ skl_debug_init_module(skl->debugfs, w, mconfig);
+
bind_event:
if (tplg_w->event_type == 0) {
dev_dbg(bus->dev, "ASoC: No event handler required\n");
@@ -2377,14 +2523,34 @@ static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be,
return 0;
}
+static int skl_init_enum_data(struct device *dev, struct soc_enum *se,
+ struct snd_soc_tplg_enum_control *ec)
+{
+
+ void *data;
+
+ if (ec->priv.size) {
+ data = devm_kzalloc(dev, sizeof(ec->priv.size), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ memcpy(data, ec->priv.data, ec->priv.size);
+ se->dobj.private = data;
+ }
+
+ return 0;
+
+}
+
static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
struct snd_kcontrol_new *kctl,
struct snd_soc_tplg_ctl_hdr *hdr)
{
struct soc_bytes_ext *sb;
struct snd_soc_tplg_bytes_control *tplg_bc;
+ struct snd_soc_tplg_enum_control *tplg_ec;
struct hdac_ext_bus *ebus = snd_soc_component_get_drvdata(cmpnt);
struct hdac_bus *bus = ebus_to_hbus(ebus);
+ struct soc_enum *se;
switch (hdr->ops.info) {
case SND_SOC_TPLG_CTL_BYTES:
@@ -2398,6 +2564,17 @@ static int skl_tplg_control_load(struct snd_soc_component *cmpnt,
}
break;
+ case SND_SOC_TPLG_CTL_ENUM:
+ tplg_ec = container_of(hdr,
+ struct snd_soc_tplg_enum_control, hdr);
+ if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READWRITE) {
+ se = (struct soc_enum *)kctl->private_value;
+ if (tplg_ec->priv.size)
+ return skl_init_enum_data(bus->dev, se,
+ tplg_ec);
+ }
+ break;
+
default:
dev_warn(bus->dev, "Control load not supported %d:%d:%d\n",
hdr->ops.get, hdr->ops.put, hdr->ops.info);
@@ -2626,6 +2803,8 @@ static struct snd_soc_tplg_ops skl_tplg_ops = {
.control_load = skl_tplg_control_load,
.bytes_ext_ops = skl_tlv_ops,
.bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops),
+ .io_ops = skl_tplg_kcontrol_ops,
+ .io_ops_count = ARRAY_SIZE(skl_tplg_kcontrol_ops),
.manifest = skl_manifest_load,
};
diff --git a/sound/soc/intel/skylake/skl-topology.h b/sound/soc/intel/skylake/skl-topology.h
index cc64d6bdb4f6..c25e8868b84e 100644
--- a/sound/soc/intel/skylake/skl-topology.h
+++ b/sound/soc/intel/skylake/skl-topology.h
@@ -39,6 +39,11 @@
#define MODULE_MAX_IN_PINS 8
#define MODULE_MAX_OUT_PINS 8
+#define SKL_MIC_CH_SUPPORT 4
+#define SKL_MIC_MAX_CH_SUPPORT 8
+#define SKL_DEFAULT_MIC_SEL_GAIN 0x3FF
+#define SKL_MIC_SEL_SWITCH 0x3
+
enum skl_channel_index {
SKL_CHANNEL_LEFT = 0,
SKL_CHANNEL_RIGHT = 1,
@@ -309,11 +314,14 @@ struct skl_module_cfg {
u8 dev_type;
u8 dma_id;
u8 time_slot;
+ u8 dmic_ch_combo_index;
+ u32 dmic_ch_type;
u32 params_fixup;
u32 converter;
u32 vbus_id;
u32 mem_pages;
enum d0i3_capability d0i3_caps;
+ u32 dma_buffer_size; /* in milli seconds */
struct skl_module_pin *m_in_pin;
struct skl_module_pin *m_out_pin;
enum skl_module_type m_type;
@@ -342,6 +350,19 @@ struct skl_module_deferred_bind {
struct list_head node;
};
+struct skl_mic_sel_config {
+ u16 mic_switch;
+ u16 flags;
+ u16 blob[SKL_MIC_MAX_CH_SUPPORT][SKL_MIC_MAX_CH_SUPPORT];
+} __packed;
+
+enum skl_channel {
+ SKL_CH_MONO = 1,
+ SKL_CH_STEREO = 2,
+ SKL_CH_TRIO = 3,
+ SKL_CH_QUATRO = 4,
+};
+
static inline struct skl *get_skl_ctx(struct device *dev)
{
struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
diff --git a/sound/soc/intel/skylake/skl-tplg-interface.h b/sound/soc/intel/skylake/skl-tplg-interface.h
index 7a2febf99019..f8d1749a2e0c 100644
--- a/sound/soc/intel/skylake/skl-tplg-interface.h
+++ b/sound/soc/intel/skylake/skl-tplg-interface.h
@@ -24,6 +24,7 @@
* SST types start at higher to avoid any overlapping in future
*/
#define SKL_CONTROL_TYPE_BYTE_TLV 0x100
+#define SKL_CONTROL_TYPE_MIC_SELECT 0x102
#define HDA_SST_CFG_MAX 900 /* size of copier cfg*/
#define MAX_IN_QUEUE 8
@@ -82,6 +83,7 @@ enum skl_module_type {
SKL_MODULE_TYPE_ALGO,
SKL_MODULE_TYPE_BASE_OUTFMT,
SKL_MODULE_TYPE_KPB,
+ SKL_MODULE_TYPE_MIC_SELECT,
};
enum skl_core_affinity {
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 4c9b5781282b..334917ee41cf 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -866,6 +866,7 @@ static void skl_remove(struct pci_dev *pci)
/* codec removal, invoke bus_device_remove */
snd_hdac_ext_bus_device_remove(ebus);
+ skl->debugfs = NULL;
skl_platform_unregister(&pci->dev);
skl_free_dsp(skl);
skl_machine_device_unregister(skl);
@@ -876,29 +877,120 @@ static void skl_remove(struct pci_dev *pci)
dev_set_drvdata(&pci->dev, NULL);
}
+static struct sst_codecs skl_codecs = {
+ .num_codecs = 1,
+ .codecs = {"NAU88L25"}
+};
+
+static struct sst_codecs kbl_codecs = {
+ .num_codecs = 1,
+ .codecs = {"NAU88L25"}
+};
+
+static struct sst_codecs bxt_codecs = {
+ .num_codecs = 1,
+ .codecs = {"MX98357A"}
+};
+
+static struct sst_codecs kbl_poppy_codecs = {
+ .num_codecs = 1,
+ .codecs = {"10EC5663"}
+};
+
+static struct sst_codecs kbl_5663_5514_codecs = {
+ .num_codecs = 2,
+ .codecs = {"10EC5663", "10EC5514"}
+};
+
+
static struct sst_acpi_mach sst_skl_devdata[] = {
- { "INT343A", "skl_alc286s_i2s", "intel/dsp_fw_release.bin", NULL, NULL, NULL },
- { "INT343B", "skl_n88l25_s4567", "intel/dsp_fw_release.bin",
- NULL, NULL, &skl_dmic_data },
- { "MX98357A", "skl_n88l25_m98357a", "intel/dsp_fw_release.bin",
- NULL, NULL, &skl_dmic_data },
+ {
+ .id = "INT343A",
+ .drv_name = "skl_alc286s_i2s",
+ .fw_filename = "intel/dsp_fw_release.bin",
+ },
+ {
+ .id = "INT343B",
+ .drv_name = "skl_n88l25_s4567",
+ .fw_filename = "intel/dsp_fw_release.bin",
+ .machine_quirk = sst_acpi_codec_list,
+ .quirk_data = &skl_codecs,
+ .pdata = &skl_dmic_data
+ },
+ {
+ .id = "MX98357A",
+ .drv_name = "skl_n88l25_m98357a",
+ .fw_filename = "intel/dsp_fw_release.bin",
+ .machine_quirk = sst_acpi_codec_list,
+ .quirk_data = &skl_codecs,
+ .pdata = &skl_dmic_data
+ },
{}
};
static struct sst_acpi_mach sst_bxtp_devdata[] = {
- { "INT343A", "bxt_alc298s_i2s", "intel/dsp_fw_bxtn.bin", NULL, NULL, NULL },
- { "DLGS7219", "bxt_da7219_max98357a_i2s", "intel/dsp_fw_bxtn.bin", NULL, NULL, NULL },
+ {
+ .id = "INT343A",
+ .drv_name = "bxt_alc298s_i2s",
+ .fw_filename = "intel/dsp_fw_bxtn.bin",
+ },
+ {
+ .id = "DLGS7219",
+ .drv_name = "bxt_da7219_max98357a_i2s",
+ .fw_filename = "intel/dsp_fw_bxtn.bin",
+ .machine_quirk = sst_acpi_codec_list,
+ .quirk_data = &bxt_codecs,
+ },
};
static struct sst_acpi_mach sst_kbl_devdata[] = {
- { "INT343A", "kbl_alc286s_i2s", "intel/dsp_fw_kbl.bin", NULL, NULL, NULL },
- { "INT343B", "kbl_n88l25_s4567", "intel/dsp_fw_kbl.bin", NULL, NULL, &skl_dmic_data },
- { "MX98357A", "kbl_n88l25_m98357a", "intel/dsp_fw_kbl.bin", NULL, NULL, &skl_dmic_data },
+ {
+ .id = "INT343A",
+ .drv_name = "kbl_alc286s_i2s",
+ .fw_filename = "intel/dsp_fw_kbl.bin",
+ },
+ {
+ .id = "INT343B",
+ .drv_name = "kbl_n88l25_s4567",
+ .fw_filename = "intel/dsp_fw_kbl.bin",
+ .machine_quirk = sst_acpi_codec_list,
+ .quirk_data = &kbl_codecs,
+ .pdata = &skl_dmic_data
+ },
+ {
+ .id = "MX98357A",
+ .drv_name = "kbl_n88l25_m98357a",
+ .fw_filename = "intel/dsp_fw_kbl.bin",
+ .machine_quirk = sst_acpi_codec_list,
+ .quirk_data = &kbl_codecs,
+ .pdata = &skl_dmic_data
+ },
+ {
+ .id = "MX98927",
+ .drv_name = "kbl_r5514_5663_max",
+ .fw_filename = "intel/dsp_fw_kbl.bin",
+ .machine_quirk = sst_acpi_codec_list,
+ .quirk_data = &kbl_5663_5514_codecs,
+ .pdata = &skl_dmic_data
+ },
+ {
+ .id = "MX98927",
+ .drv_name = "kbl_rt5663_m98927",
+ .fw_filename = "intel/dsp_fw_kbl.bin",
+ .machine_quirk = sst_acpi_codec_list,
+ .quirk_data = &kbl_poppy_codecs,
+ .pdata = &skl_dmic_data
+ },
+
{}
};
static struct sst_acpi_mach sst_glk_devdata[] = {
- { "INT343A", "glk_alc298s_i2s", "intel/dsp_fw_glk.bin", NULL, NULL, NULL },
+ {
+ .id = "INT343A",
+ .drv_name = "glk_alc298s_i2s",
+ .fw_filename = "intel/dsp_fw_glk.bin",
+ },
};
/* PCI IDs */
diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h
index 2a630fcb7f08..a6b134b4c037 100644
--- a/sound/soc/intel/skylake/skl.h
+++ b/sound/soc/intel/skylake/skl.h
@@ -23,6 +23,7 @@
#include <sound/hda_register.h>
#include <sound/hdaudio_ext.h>
+#include <sound/soc.h>
#include "skl-nhlt.h"
#define SKL_SUSPEND_DELAY 2000
@@ -42,6 +43,8 @@ struct skl_dsp_resource {
u32 mem;
};
+struct skl_debug;
+
struct skl {
struct hdac_ext_bus ebus;
struct pci_dev *pci;
@@ -66,6 +69,8 @@ struct skl {
int supend_active;
struct work_struct probe_work;
+
+ struct skl_debug *debugfs;
};
#define skl_to_ebus(s) (&(s)->ebus)
@@ -116,4 +121,22 @@ void skl_update_d0i3c(struct device *dev, bool enable);
int skl_nhlt_create_sysfs(struct skl *skl);
void skl_nhlt_remove_sysfs(struct skl *skl);
+struct skl_module_cfg;
+
+#ifdef CONFIG_DEBUG_FS
+struct skl_debug *skl_debugfs_init(struct skl *skl);
+void skl_debug_init_module(struct skl_debug *d,
+ struct snd_soc_dapm_widget *w,
+ struct skl_module_cfg *mconfig);
+#else
+static inline struct skl_debug *skl_debugfs_init(struct skl *skl)
+{
+ return NULL;
+}
+static inline void skl_debug_init_module(struct skl_debug *d,
+ struct snd_soc_dapm_widget *w,
+ struct skl_module_cfg *mconfig)
+{}
+#endif
+
#endif /* __SOUND_SOC_SKL_H */
diff --git a/sound/soc/mediatek/mt2701/mt2701-cs42448.c b/sound/soc/mediatek/mt2701/mt2701-cs42448.c
index aa5b31b121e3..70f61d53fe05 100644
--- a/sound/soc/mediatek/mt2701/mt2701-cs42448.c
+++ b/sound/soc/mediatek/mt2701/mt2701-cs42448.c
@@ -107,7 +107,7 @@ static const struct snd_kcontrol_new mt2701_cs42448_controls[] = {
static const unsigned int mt2701_cs42448_sampling_rates[] = {48000};
-static struct snd_pcm_hw_constraint_list mt2701_cs42448_constraints_rates = {
+static const struct snd_pcm_hw_constraint_list mt2701_cs42448_constraints_rates = {
.count = ARRAY_SIZE(mt2701_cs42448_sampling_rates),
.list = mt2701_cs42448_sampling_rates,
.mask = 0,
diff --git a/sound/soc/omap/mcbsp.c b/sound/soc/omap/mcbsp.c
index 06fec5699cc8..7a54e3083203 100644
--- a/sound/soc/omap/mcbsp.c
+++ b/sound/soc/omap/mcbsp.c
@@ -835,15 +835,11 @@ static ssize_t dma_op_mode_store(struct device *dev,
const char *buf, size_t size)
{
struct omap_mcbsp *mcbsp = dev_get_drvdata(dev);
- const char * const *s;
- int i = 0;
-
- for (s = &dma_op_modes[i]; i < ARRAY_SIZE(dma_op_modes); s++, i++)
- if (sysfs_streq(buf, *s))
- break;
+ int i;
- if (i == ARRAY_SIZE(dma_op_modes))
- return -EINVAL;
+ i = sysfs_match_string(dma_op_modes, buf);
+ if (i < 0)
+ return i;
spin_lock_irq(&mcbsp->lock);
if (!mcbsp->free) {
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index 823b5a236d8d..960744e46edc 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -1,6 +1,6 @@
config SND_PXA2XX_SOC
tristate "SoC Audio for the Intel PXA2xx chip"
- depends on ARCH_PXA
+ depends on ARCH_PXA || COMPILE_TEST
select SND_PXA2XX_LIB
help
Say Y or M if you want to add support for codecs attached to
diff --git a/sound/soc/rockchip/Kconfig b/sound/soc/rockchip/Kconfig
index e3ca1e973de5..c84487805876 100644
--- a/sound/soc/rockchip/Kconfig
+++ b/sound/soc/rockchip/Kconfig
@@ -15,6 +15,15 @@ config SND_SOC_ROCKCHIP_I2S
Rockchip I2S device. The device supports upto maximum of
8 channels each for play and record.
+config SND_SOC_ROCKCHIP_PDM
+ tristate "Rockchip PDM Controller Driver"
+ depends on CLKDEV_LOOKUP && SND_SOC_ROCKCHIP
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+ help
+ Say Y or M if you want to add support for PDM driver for
+ Rockchip PDM Controller. The Controller supports up to maximum of
+ 8 channels record.
+
config SND_SOC_ROCKCHIP_SPDIF
tristate "Rockchip SPDIF Device Driver"
depends on CLKDEV_LOOKUP && SND_SOC_ROCKCHIP
diff --git a/sound/soc/rockchip/Makefile b/sound/soc/rockchip/Makefile
index 991f91bea9f9..105f0e14a4ab 100644
--- a/sound/soc/rockchip/Makefile
+++ b/sound/soc/rockchip/Makefile
@@ -1,8 +1,10 @@
# ROCKCHIP Platform Support
snd-soc-rockchip-i2s-objs := rockchip_i2s.o
+snd-soc-rockchip-pdm-objs := rockchip_pdm.o
snd-soc-rockchip-spdif-objs := rockchip_spdif.o
obj-$(CONFIG_SND_SOC_ROCKCHIP_I2S) += snd-soc-rockchip-i2s.o
+obj-$(CONFIG_SND_SOC_ROCKCHIP_PDM) += snd-soc-rockchip-pdm.o
obj-$(CONFIG_SND_SOC_ROCKCHIP_SPDIF) += snd-soc-rockchip-spdif.o
snd-soc-rockchip-max98090-objs := rockchip_max98090.o
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index 974915cb4c4f..199338fdeda0 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -116,6 +116,7 @@ static void rockchip_snd_txctrl(struct rk_i2s_dev *i2s, int on)
I2S_XFER_TXS_STOP |
I2S_XFER_RXS_STOP);
+ udelay(150);
regmap_update_bits(i2s->regmap, I2S_CLR,
I2S_CLR_TXC | I2S_CLR_RXC,
I2S_CLR_TXC | I2S_CLR_RXC);
@@ -162,6 +163,7 @@ static void rockchip_snd_rxctrl(struct rk_i2s_dev *i2s, int on)
I2S_XFER_TXS_STOP |
I2S_XFER_RXS_STOP);
+ udelay(150);
regmap_update_bits(i2s->regmap, I2S_CLR,
I2S_CLR_TXC | I2S_CLR_RXC,
I2S_CLR_TXC | I2S_CLR_RXC);
@@ -204,7 +206,21 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
regmap_update_bits(i2s->regmap, I2S_CKR, mask, val);
- mask = I2S_TXCR_IBM_MASK;
+ mask = I2S_CKR_CKP_MASK;
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ val = I2S_CKR_CKP_NEG;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ val = I2S_CKR_CKP_POS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(i2s->regmap, I2S_CKR, mask, val);
+
+ mask = I2S_TXCR_IBM_MASK | I2S_TXCR_TFS_MASK | I2S_TXCR_PBM_MASK;
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_RIGHT_J:
val = I2S_TXCR_IBM_RSJM;
@@ -215,13 +231,19 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
case SND_SOC_DAIFMT_I2S:
val = I2S_TXCR_IBM_NORMAL;
break;
+ case SND_SOC_DAIFMT_DSP_A: /* PCM no delay mode */
+ val = I2S_TXCR_TFS_PCM;
+ break;
+ case SND_SOC_DAIFMT_DSP_B: /* PCM delay 1 mode */
+ val = I2S_TXCR_TFS_PCM | I2S_TXCR_PBM_MODE(1);
+ break;
default:
return -EINVAL;
}
regmap_update_bits(i2s->regmap, I2S_TXCR, mask, val);
- mask = I2S_RXCR_IBM_MASK;
+ mask = I2S_RXCR_IBM_MASK | I2S_RXCR_TFS_MASK | I2S_RXCR_PBM_MASK;
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_RIGHT_J:
val = I2S_RXCR_IBM_RSJM;
@@ -232,6 +254,12 @@ static int rockchip_i2s_set_fmt(struct snd_soc_dai *cpu_dai,
case SND_SOC_DAIFMT_I2S:
val = I2S_RXCR_IBM_NORMAL;
break;
+ case SND_SOC_DAIFMT_DSP_A: /* PCM no delay mode */
+ val = I2S_RXCR_TFS_PCM;
+ break;
+ case SND_SOC_DAIFMT_DSP_B: /* PCM delay 1 mode */
+ val = I2S_RXCR_TFS_PCM | I2S_RXCR_PBM_MODE(1);
+ break;
default:
return -EINVAL;
}
@@ -615,12 +643,13 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
goto err_pm_disable;
}
- soc_dai = devm_kzalloc(&pdev->dev,
+ soc_dai = devm_kmemdup(&pdev->dev, &rockchip_i2s_dai,
sizeof(*soc_dai), GFP_KERNEL);
- if (!soc_dai)
- return -ENOMEM;
+ if (!soc_dai) {
+ ret = -ENOMEM;
+ goto err_pm_disable;
+ }
- memcpy(soc_dai, &rockchip_i2s_dai, sizeof(*soc_dai));
if (!of_property_read_u32(node, "rockchip,playback-channels", &val)) {
if (val >= 2 && val <= 8)
soc_dai->playback.channels_max = val;
diff --git a/sound/soc/rockchip/rockchip_i2s.h b/sound/soc/rockchip/rockchip_i2s.h
index 31f11fd25393..a7b8527d8a73 100644
--- a/sound/soc/rockchip/rockchip_i2s.h
+++ b/sound/soc/rockchip/rockchip_i2s.h
@@ -41,6 +41,7 @@
#define I2S_TXCR_TFS_SHIFT 5
#define I2S_TXCR_TFS_I2S (0 << I2S_TXCR_TFS_SHIFT)
#define I2S_TXCR_TFS_PCM (1 << I2S_TXCR_TFS_SHIFT)
+#define I2S_TXCR_TFS_MASK (1 << I2S_TXCR_TFS_SHIFT)
#define I2S_TXCR_VDW_SHIFT 0
#define I2S_TXCR_VDW(x) ((x - 1) << I2S_TXCR_VDW_SHIFT)
#define I2S_TXCR_VDW_MASK (0x1f << I2S_TXCR_VDW_SHIFT)
@@ -70,6 +71,7 @@
#define I2S_RXCR_TFS_SHIFT 5
#define I2S_RXCR_TFS_I2S (0 << I2S_RXCR_TFS_SHIFT)
#define I2S_RXCR_TFS_PCM (1 << I2S_RXCR_TFS_SHIFT)
+#define I2S_RXCR_TFS_MASK (1 << I2S_RXCR_TFS_SHIFT)
#define I2S_RXCR_VDW_SHIFT 0
#define I2S_RXCR_VDW(x) ((x - 1) << I2S_RXCR_VDW_SHIFT)
#define I2S_RXCR_VDW_MASK (0x1f << I2S_RXCR_VDW_SHIFT)
@@ -91,6 +93,7 @@
#define I2S_CKR_CKP_SHIFT 26
#define I2S_CKR_CKP_NEG (0 << I2S_CKR_CKP_SHIFT)
#define I2S_CKR_CKP_POS (1 << I2S_CKR_CKP_SHIFT)
+#define I2S_CKR_CKP_MASK (1 << I2S_CKR_CKP_SHIFT)
#define I2S_CKR_RLP_SHIFT 25
#define I2S_CKR_RLP_NORMAL (0 << I2S_CKR_RLP_SHIFT)
#define I2S_CKR_RLP_OPPSITE (1 << I2S_CKR_RLP_SHIFT)
diff --git a/sound/soc/rockchip/rockchip_pdm.c b/sound/soc/rockchip/rockchip_pdm.c
new file mode 100644
index 000000000000..c5ddeed97260
--- /dev/null
+++ b/sound/soc/rockchip/rockchip_pdm.c
@@ -0,0 +1,516 @@
+/*
+ * Rockchip PDM ALSA SoC Digital Audio Interface(DAI) driver
+ *
+ * Copyright (C) 2017 Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <sound/dmaengine_pcm.h>
+#include <sound/pcm_params.h>
+
+#include "rockchip_pdm.h"
+
+#define PDM_DMA_BURST_SIZE (16) /* size * width: 16*4 = 64 bytes */
+
+struct rk_pdm_dev {
+ struct device *dev;
+ struct clk *clk;
+ struct clk *hclk;
+ struct regmap *regmap;
+ struct snd_dmaengine_dai_dma_data capture_dma_data;
+};
+
+struct rk_pdm_clkref {
+ unsigned int sr;
+ unsigned int clk;
+};
+
+static struct rk_pdm_clkref clkref[] = {
+ { 8000, 40960000 },
+ { 11025, 56448000 },
+ { 12000, 61440000 },
+};
+
+static unsigned int get_pdm_clk(unsigned int sr)
+{
+ unsigned int i, count, clk, div;
+
+ clk = 0;
+ if (!sr)
+ return clk;
+
+ count = ARRAY_SIZE(clkref);
+ for (i = 0; i < count; i++) {
+ if (sr % clkref[i].sr)
+ continue;
+ div = sr / clkref[i].sr;
+ if ((div & (div - 1)) == 0) {
+ clk = clkref[i].clk;
+ break;
+ }
+ }
+
+ return clk;
+}
+
+static inline struct rk_pdm_dev *to_info(struct snd_soc_dai *dai)
+{
+ return snd_soc_dai_get_drvdata(dai);
+}
+
+static void rockchip_pdm_rxctrl(struct rk_pdm_dev *pdm, int on)
+{
+ if (on) {
+ regmap_update_bits(pdm->regmap, PDM_DMA_CTRL,
+ PDM_DMA_RD_MSK, PDM_DMA_RD_EN);
+ regmap_update_bits(pdm->regmap, PDM_SYSCONFIG,
+ PDM_RX_MASK, PDM_RX_START);
+ } else {
+ regmap_update_bits(pdm->regmap, PDM_DMA_CTRL,
+ PDM_DMA_RD_MSK, PDM_DMA_RD_DIS);
+ regmap_update_bits(pdm->regmap, PDM_SYSCONFIG,
+ PDM_RX_MASK | PDM_RX_CLR_MASK,
+ PDM_RX_STOP | PDM_RX_CLR_WR);
+ }
+}
+
+static int rockchip_pdm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct rk_pdm_dev *pdm = to_info(dai);
+ unsigned int val = 0;
+ unsigned int clk_rate, clk_div, samplerate;
+ int ret;
+
+ samplerate = params_rate(params);
+ clk_rate = get_pdm_clk(samplerate);
+ if (!clk_rate)
+ return -EINVAL;
+
+ ret = clk_set_rate(pdm->clk, clk_rate);
+ if (ret)
+ return -EINVAL;
+
+ clk_div = DIV_ROUND_CLOSEST(clk_rate, samplerate);
+
+ switch (clk_div) {
+ case 320:
+ val = PDM_CLK_320FS;
+ break;
+ case 640:
+ val = PDM_CLK_640FS;
+ break;
+ case 1280:
+ val = PDM_CLK_1280FS;
+ break;
+ case 2560:
+ val = PDM_CLK_2560FS;
+ break;
+ case 5120:
+ val = PDM_CLK_5120FS;
+ break;
+ default:
+ dev_err(pdm->dev, "unsupported div: %d\n", clk_div);
+ return -EINVAL;
+ }
+
+ regmap_update_bits(pdm->regmap, PDM_CLK_CTRL, PDM_DS_RATIO_MSK, val);
+ regmap_update_bits(pdm->regmap, PDM_HPF_CTRL,
+ PDM_HPF_CF_MSK, PDM_HPF_60HZ);
+ regmap_update_bits(pdm->regmap, PDM_HPF_CTRL,
+ PDM_HPF_LE | PDM_HPF_RE, PDM_HPF_LE | PDM_HPF_RE);
+ regmap_update_bits(pdm->regmap, PDM_CLK_CTRL, PDM_CLK_EN, PDM_CLK_EN);
+
+ val = 0;
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S8:
+ val |= PDM_VDW(8);
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ val |= PDM_VDW(16);
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ val |= PDM_VDW(20);
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ val |= PDM_VDW(24);
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ val |= PDM_VDW(32);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (params_channels(params)) {
+ case 8:
+ val |= PDM_PATH3_EN;
+ /* fallthrough */
+ case 6:
+ val |= PDM_PATH2_EN;
+ /* fallthrough */
+ case 4:
+ val |= PDM_PATH1_EN;
+ /* fallthrough */
+ case 2:
+ val |= PDM_PATH0_EN;
+ break;
+ default:
+ dev_err(pdm->dev, "invalid channel: %d\n",
+ params_channels(params));
+ return -EINVAL;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ regmap_update_bits(pdm->regmap, PDM_CTRL0,
+ PDM_PATH_MSK | PDM_VDW_MSK,
+ val);
+ regmap_update_bits(pdm->regmap, PDM_DMA_CTRL, PDM_DMA_RDL_MSK,
+ PDM_DMA_RDL(16));
+ regmap_update_bits(pdm->regmap, PDM_SYSCONFIG,
+ PDM_RX_MASK | PDM_RX_CLR_MASK,
+ PDM_RX_STOP | PDM_RX_CLR_WR);
+ }
+
+ return 0;
+}
+
+static int rockchip_pdm_set_fmt(struct snd_soc_dai *cpu_dai,
+ unsigned int fmt)
+{
+ struct rk_pdm_dev *pdm = to_info(cpu_dai);
+ unsigned int mask = 0, val = 0;
+
+ mask = PDM_CKP_MSK;
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ val = PDM_CKP_NORMAL;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ val = PDM_CKP_INVERTED;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(pdm->regmap, PDM_CLK_CTRL, mask, val);
+
+ return 0;
+}
+
+static int rockchip_pdm_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct rk_pdm_dev *pdm = to_info(dai);
+ int ret = 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ rockchip_pdm_rxctrl(pdm, 1);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ rockchip_pdm_rxctrl(pdm, 0);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int rockchip_pdm_dai_probe(struct snd_soc_dai *dai)
+{
+ struct rk_pdm_dev *pdm = to_info(dai);
+
+ dai->capture_dma_data = &pdm->capture_dma_data;
+
+ return 0;
+}
+
+static struct snd_soc_dai_ops rockchip_pdm_dai_ops = {
+ .set_fmt = rockchip_pdm_set_fmt,
+ .trigger = rockchip_pdm_trigger,
+ .hw_params = rockchip_pdm_hw_params,
+};
+
+#define ROCKCHIP_PDM_RATES SNDRV_PCM_RATE_8000_192000
+#define ROCKCHIP_PDM_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver rockchip_pdm_dai = {
+ .probe = rockchip_pdm_dai_probe,
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 8,
+ .rates = ROCKCHIP_PDM_RATES,
+ .formats = ROCKCHIP_PDM_FORMATS,
+ },
+ .ops = &rockchip_pdm_dai_ops,
+ .symmetric_rates = 1,
+};
+
+static const struct snd_soc_component_driver rockchip_pdm_component = {
+ .name = "rockchip-pdm",
+};
+
+static int rockchip_pdm_runtime_suspend(struct device *dev)
+{
+ struct rk_pdm_dev *pdm = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(pdm->clk);
+ clk_disable_unprepare(pdm->hclk);
+
+ return 0;
+}
+
+static int rockchip_pdm_runtime_resume(struct device *dev)
+{
+ struct rk_pdm_dev *pdm = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(pdm->clk);
+ if (ret) {
+ dev_err(pdm->dev, "clock enable failed %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(pdm->hclk);
+ if (ret) {
+ dev_err(pdm->dev, "hclock enable failed %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool rockchip_pdm_wr_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case PDM_SYSCONFIG:
+ case PDM_CTRL0:
+ case PDM_CTRL1:
+ case PDM_CLK_CTRL:
+ case PDM_HPF_CTRL:
+ case PDM_FIFO_CTRL:
+ case PDM_DMA_CTRL:
+ case PDM_INT_EN:
+ case PDM_INT_CLR:
+ case PDM_DATA_VALID:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rockchip_pdm_rd_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case PDM_SYSCONFIG:
+ case PDM_CTRL0:
+ case PDM_CTRL1:
+ case PDM_CLK_CTRL:
+ case PDM_HPF_CTRL:
+ case PDM_FIFO_CTRL:
+ case PDM_DMA_CTRL:
+ case PDM_INT_EN:
+ case PDM_INT_CLR:
+ case PDM_INT_ST:
+ case PDM_DATA_VALID:
+ case PDM_VERSION:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool rockchip_pdm_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case PDM_SYSCONFIG:
+ case PDM_INT_CLR:
+ case PDM_INT_ST:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config rockchip_pdm_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = PDM_VERSION,
+ .writeable_reg = rockchip_pdm_wr_reg,
+ .readable_reg = rockchip_pdm_rd_reg,
+ .volatile_reg = rockchip_pdm_volatile_reg,
+ .cache_type = REGCACHE_FLAT,
+};
+
+static int rockchip_pdm_probe(struct platform_device *pdev)
+{
+ struct rk_pdm_dev *pdm;
+ struct resource *res;
+ void __iomem *regs;
+ int ret;
+
+ pdm = devm_kzalloc(&pdev->dev, sizeof(*pdm), GFP_KERNEL);
+ if (!pdm)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ pdm->regmap = devm_regmap_init_mmio(&pdev->dev, regs,
+ &rockchip_pdm_regmap_config);
+ if (IS_ERR(pdm->regmap))
+ return PTR_ERR(pdm->regmap);
+
+ pdm->capture_dma_data.addr = res->start + PDM_RXFIFO_DATA;
+ pdm->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ pdm->capture_dma_data.maxburst = PDM_DMA_BURST_SIZE;
+
+ pdm->dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, pdm);
+
+ pdm->clk = devm_clk_get(&pdev->dev, "pdm_clk");
+ if (IS_ERR(pdm->clk))
+ return PTR_ERR(pdm->clk);
+
+ pdm->hclk = devm_clk_get(&pdev->dev, "pdm_hclk");
+ if (IS_ERR(pdm->hclk))
+ return PTR_ERR(pdm->hclk);
+
+ ret = clk_prepare_enable(pdm->hclk);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = rockchip_pdm_runtime_resume(&pdev->dev);
+ if (ret)
+ goto err_pm_disable;
+ }
+
+ ret = devm_snd_soc_register_component(&pdev->dev,
+ &rockchip_pdm_component,
+ &rockchip_pdm_dai, 1);
+
+ if (ret) {
+ dev_err(&pdev->dev, "could not register dai: %d\n", ret);
+ goto err_suspend;
+ }
+
+ ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "could not register pcm: %d\n", ret);
+ goto err_suspend;
+ }
+
+ return 0;
+
+err_suspend:
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ rockchip_pdm_runtime_suspend(&pdev->dev);
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+
+ clk_disable_unprepare(pdm->hclk);
+
+ return ret;
+}
+
+static int rockchip_pdm_remove(struct platform_device *pdev)
+{
+ struct rk_pdm_dev *pdm = dev_get_drvdata(&pdev->dev);
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ rockchip_pdm_runtime_suspend(&pdev->dev);
+
+ clk_disable_unprepare(pdm->clk);
+ clk_disable_unprepare(pdm->hclk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int rockchip_pdm_suspend(struct device *dev)
+{
+ struct rk_pdm_dev *pdm = dev_get_drvdata(dev);
+
+ regcache_mark_dirty(pdm->regmap);
+
+ return 0;
+}
+
+static int rockchip_pdm_resume(struct device *dev)
+{
+ struct rk_pdm_dev *pdm = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = regcache_sync(pdm->regmap);
+
+ pm_runtime_put(dev);
+
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops rockchip_pdm_pm_ops = {
+ SET_RUNTIME_PM_OPS(rockchip_pdm_runtime_suspend,
+ rockchip_pdm_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(rockchip_pdm_suspend, rockchip_pdm_resume)
+};
+
+static const struct of_device_id rockchip_pdm_match[] = {
+ { .compatible = "rockchip,pdm", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rockchip_pdm_match);
+
+static struct platform_driver rockchip_pdm_driver = {
+ .probe = rockchip_pdm_probe,
+ .remove = rockchip_pdm_remove,
+ .driver = {
+ .name = "rockchip-pdm",
+ .of_match_table = of_match_ptr(rockchip_pdm_match),
+ .pm = &rockchip_pdm_pm_ops,
+ },
+};
+
+module_platform_driver(rockchip_pdm_driver);
+
+MODULE_AUTHOR("Sugar <[email protected]>");
+MODULE_DESCRIPTION("Rockchip PDM Controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/rockchip/rockchip_pdm.h b/sound/soc/rockchip/rockchip_pdm.h
new file mode 100644
index 000000000000..886b48d128fd
--- /dev/null
+++ b/sound/soc/rockchip/rockchip_pdm.h
@@ -0,0 +1,83 @@
+/*
+ * Rockchip PDM ALSA SoC Digital Audio Interface(DAI) driver
+ *
+ * Copyright (C) 2017 Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ROCKCHIP_PDM_H
+#define _ROCKCHIP_PDM_H
+
+/* PDM REGS */
+#define PDM_SYSCONFIG (0x0000)
+#define PDM_CTRL0 (0x0004)
+#define PDM_CTRL1 (0x0008)
+#define PDM_CLK_CTRL (0x000c)
+#define PDM_HPF_CTRL (0x0010)
+#define PDM_FIFO_CTRL (0x0014)
+#define PDM_DMA_CTRL (0x0018)
+#define PDM_INT_EN (0x001c)
+#define PDM_INT_CLR (0x0020)
+#define PDM_INT_ST (0x0024)
+#define PDM_RXFIFO_DATA (0x0030)
+#define PDM_DATA_VALID (0x0054)
+#define PDM_VERSION (0x0058)
+
+/* PDM_SYSCONFIG */
+#define PDM_RX_MASK (0x1 << 2)
+#define PDM_RX_START (0x1 << 2)
+#define PDM_RX_STOP (0x0 << 2)
+#define PDM_RX_CLR_MASK (0x1 << 0)
+#define PDM_RX_CLR_WR (0x1 << 0)
+#define PDM_RX_CLR_DONE (0x0 << 0)
+
+/* PDM CTRL0 */
+#define PDM_PATH_MSK (0xf << 27)
+#define PDM_PATH3_EN BIT(30)
+#define PDM_PATH2_EN BIT(29)
+#define PDM_PATH1_EN BIT(28)
+#define PDM_PATH0_EN BIT(27)
+#define PDM_HWT_EN BIT(26)
+#define PDM_VDW_MSK (0x1f << 0)
+#define PDM_VDW(X) ((X - 1) << 0)
+
+/* PDM CLK CTRL */
+#define PDM_CLK_MSK BIT(5)
+#define PDM_CLK_EN BIT(5)
+#define PDM_CLK_DIS (0x0 << 5)
+#define PDM_CKP_MSK BIT(3)
+#define PDM_CKP_NORMAL (0x0 << 3)
+#define PDM_CKP_INVERTED BIT(3)
+#define PDM_DS_RATIO_MSK (0x7 << 0)
+#define PDM_CLK_320FS (0x0 << 0)
+#define PDM_CLK_640FS (0x1 << 0)
+#define PDM_CLK_1280FS (0x2 << 0)
+#define PDM_CLK_2560FS (0x3 << 0)
+#define PDM_CLK_5120FS (0x4 << 0)
+
+/* PDM HPF CTRL */
+#define PDM_HPF_LE BIT(3)
+#define PDM_HPF_RE BIT(2)
+#define PDM_HPF_CF_MSK (0x3 << 0)
+#define PDM_HPF_3P79HZ (0x0 << 0)
+#define PDM_HPF_60HZ (0x1 << 0)
+#define PDM_HPF_243HZ (0x2 << 0)
+#define PDM_HPF_493HZ (0x3 << 0)
+
+/* PDM DMA CTRL */
+#define PDM_DMA_RD_MSK BIT(8)
+#define PDM_DMA_RD_EN BIT(8)
+#define PDM_DMA_RD_DIS (0x0 << 8)
+#define PDM_DMA_RDL_MSK (0x7f << 0)
+#define PDM_DMA_RDL(X) ((X - 1) << 0)
+
+#endif /* _ROCKCHIP_PDM_H */
diff --git a/sound/soc/rockchip/rockchip_spdif.c b/sound/soc/rockchip/rockchip_spdif.c
index fa8101d1e16f..ee5055d47d13 100644
--- a/sound/soc/rockchip/rockchip_spdif.c
+++ b/sound/soc/rockchip/rockchip_spdif.c
@@ -49,8 +49,12 @@ static const struct of_device_id rk_spdif_match[] = {
.data = (void *)RK_SPDIF_RK3066 },
{ .compatible = "rockchip,rk3188-spdif",
.data = (void *)RK_SPDIF_RK3188 },
+ { .compatible = "rockchip,rk3228-spdif",
+ .data = (void *)RK_SPDIF_RK3366 },
{ .compatible = "rockchip,rk3288-spdif",
.data = (void *)RK_SPDIF_RK3288 },
+ { .compatible = "rockchip,rk3328-spdif",
+ .data = (void *)RK_SPDIF_RK3366 },
{ .compatible = "rockchip,rk3366-spdif",
.data = (void *)RK_SPDIF_RK3366 },
{ .compatible = "rockchip,rk3368-spdif",
diff --git a/sound/soc/samsung/s3c24xx_uda134x.c b/sound/soc/samsung/s3c24xx_uda134x.c
index 81a78940967c..55538e333cc8 100644
--- a/sound/soc/samsung/s3c24xx_uda134x.c
+++ b/sound/soc/samsung/s3c24xx_uda134x.c
@@ -44,7 +44,7 @@ struct s3c24xx_uda134x {
static unsigned int rates[33 * 2];
#ifdef ENFORCE_RATES
-static struct snd_pcm_hw_constraint_list hw_constraints_rates = {
+static const struct snd_pcm_hw_constraint_list hw_constraints_rates = {
.count = ARRAY_SIZE(rates),
.list = rates,
.mask = 0,
diff --git a/sound/soc/sh/Kconfig b/sound/soc/sh/Kconfig
index 147ebecfed94..1aa5cd77ca24 100644
--- a/sound/soc/sh/Kconfig
+++ b/sound/soc/sh/Kconfig
@@ -38,7 +38,7 @@ config SND_SOC_RCAR
tristate "R-Car series SRU/SCU/SSIU/SSI support"
depends on COMMON_CLK
depends on OF || COMPILE_TEST
- select SND_SIMPLE_CARD
+ select SND_SIMPLE_CARD_UTILS
select REGMAP_MMIO
help
This option enables R-Car SRU/SCU/SSIU/SSI sound support
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index ead520182e26..7c4bdd82bb95 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -301,7 +301,12 @@ struct fsi_master {
spinlock_t lock;
};
-static int fsi_stream_is_play(struct fsi_priv *fsi, struct fsi_stream *io);
+static inline int fsi_stream_is_play(struct fsi_priv *fsi,
+ struct fsi_stream *io)
+{
+ return &fsi->playback == io;
+}
+
/*
* basic read write function
@@ -489,12 +494,6 @@ static void fsi_count_fifo_err(struct fsi_priv *fsi)
/*
* fsi_stream_xx() function
*/
-static inline int fsi_stream_is_play(struct fsi_priv *fsi,
- struct fsi_stream *io)
-{
- return &fsi->playback == io;
-}
-
static inline struct fsi_stream *fsi_stream_get(struct fsi_priv *fsi,
struct snd_pcm_substream *substream)
{
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index d3b0dc145a56..197cb3ec075f 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -308,23 +308,12 @@ static void rsnd_adg_set_ssi_clk(struct rsnd_mod *ssi_mod, u32 val)
}
}
-int rsnd_adg_ssi_clk_stop(struct rsnd_mod *ssi_mod)
-{
- rsnd_adg_set_ssi_clk(ssi_mod, 0);
-
- return 0;
-}
-
-int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *ssi_mod, unsigned int rate)
+int rsnd_adg_clk_query(struct rsnd_priv *priv, unsigned int rate)
{
- struct rsnd_priv *priv = rsnd_mod_to_priv(ssi_mod);
struct rsnd_adg *adg = rsnd_priv_to_adg(priv);
struct device *dev = rsnd_priv_to_dev(priv);
- struct rsnd_mod *adg_mod = rsnd_mod_get(adg);
struct clk *clk;
int i;
- u32 data;
- u32 ckr = 0;
int sel_table[] = {
[CLKA] = 0x1,
[CLKB] = 0x2,
@@ -338,30 +327,42 @@ int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *ssi_mod, unsigned int rate)
* find suitable clock from
* AUDIO_CLKA/AUDIO_CLKB/AUDIO_CLKC/AUDIO_CLKI.
*/
- data = 0;
for_each_rsnd_clk(clk, adg, i) {
- if (rate == clk_get_rate(clk)) {
- data = sel_table[i];
- goto found_clock;
- }
+ if (rate == clk_get_rate(clk))
+ return sel_table[i];
}
/*
* find divided clock from BRGA/BRGB
*/
- if (rate == adg->rbga_rate_for_441khz) {
- data = 0x10;
- goto found_clock;
- }
+ if (rate == adg->rbga_rate_for_441khz)
+ return 0x10;
- if (rate == adg->rbgb_rate_for_48khz) {
- data = 0x20;
- goto found_clock;
- }
+ if (rate == adg->rbgb_rate_for_48khz)
+ return 0x20;
return -EIO;
+}
-found_clock:
+int rsnd_adg_ssi_clk_stop(struct rsnd_mod *ssi_mod)
+{
+ rsnd_adg_set_ssi_clk(ssi_mod, 0);
+
+ return 0;
+}
+
+int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *ssi_mod, unsigned int rate)
+{
+ struct rsnd_priv *priv = rsnd_mod_to_priv(ssi_mod);
+ struct rsnd_adg *adg = rsnd_priv_to_adg(priv);
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct rsnd_mod *adg_mod = rsnd_mod_get(adg);
+ int data;
+ u32 ckr = 0;
+
+ data = rsnd_adg_clk_query(priv, rate);
+ if (data < 0)
+ return data;
rsnd_adg_set_ssi_clk(ssi_mod, data);
@@ -480,6 +481,9 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
if (req_rate[0] % 48000 == 0)
adg->flags = AUDIO_OUT_48;
+ if (of_get_property(np, "clkout-lr-asynchronous", NULL))
+ adg->flags = LRCLK_ASYNC;
+
/*
* This driver is assuming that AUDIO_CLKA/AUDIO_CLKB/AUDIO_CLKC
* have 44.1kHz or 48kHz base clocks for now.
@@ -555,7 +559,6 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
clk = clk_register_fixed_rate(dev, clkout_name[i],
parent_clk_name, 0,
req_rate[0]);
- adg->clkout[i] = ERR_PTR(-ENOENT);
if (!IS_ERR(clk))
adg->clkout[i] = clk;
}
@@ -580,7 +583,6 @@ int rsnd_adg_probe(struct rsnd_priv *priv)
{
struct rsnd_adg *adg;
struct device *dev = rsnd_priv_to_dev(priv);
- struct device_node *np = dev->of_node;
int ret;
adg = devm_kzalloc(dev, sizeof(*adg), GFP_KERNEL);
@@ -597,9 +599,6 @@ int rsnd_adg_probe(struct rsnd_priv *priv)
rsnd_adg_get_clkin(priv, adg);
rsnd_adg_get_clkout(priv, adg);
- if (of_get_property(np, "clkout-lr-asynchronous", NULL))
- adg->flags = LRCLK_ASYNC;
-
priv->adg = adg;
rsnd_adg_clk_enable(priv);
diff --git a/sound/soc/sh/rcar/cmd.c b/sound/soc/sh/rcar/cmd.c
index d879c010cf03..f1d4fb566892 100644
--- a/sound/soc/sh/rcar/cmd.c
+++ b/sound/soc/sh/rcar/cmd.c
@@ -31,7 +31,7 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
struct device *dev = rsnd_priv_to_dev(priv);
u32 data;
- u32 path[] = {
+ static const u32 path[] = {
[1] = 1 << 0,
[5] = 1 << 8,
[6] = 1 << 12,
@@ -71,7 +71,7 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
} else {
struct rsnd_mod *src = rsnd_io_to_mod_src(io);
- u8 cmd_case[] = {
+ static const u8 cmd_case[] = {
[0] = 0x3,
[1] = 0x3,
[2] = 0x4,
@@ -82,6 +82,9 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
[9] = 0x2,
};
+ if (unlikely(!src))
+ return -EIO;
+
data = path[rsnd_mod_id(src)] |
cmd_case[rsnd_mod_id(src)] << 16;
}
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 8c1f4e2e0c4f..3f2ced26ed37 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -203,27 +203,6 @@ int rsnd_io_is_working(struct rsnd_dai_stream *io)
return !!io->substream;
}
-void rsnd_set_slot(struct rsnd_dai *rdai,
- int slots, int num)
-{
- rdai->slots = slots;
- rdai->slots_num = num;
-}
-
-int rsnd_get_slot(struct rsnd_dai_stream *io)
-{
- struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
-
- return rdai->slots;
-}
-
-int rsnd_get_slot_num(struct rsnd_dai_stream *io)
-{
- struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
-
- return rdai->slots_num;
-}
-
int rsnd_runtime_channel_original(struct rsnd_dai_stream *io)
{
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
@@ -248,13 +227,14 @@ int rsnd_runtime_channel_after_ctu(struct rsnd_dai_stream *io)
int rsnd_runtime_channel_for_ssi(struct rsnd_dai_stream *io)
{
+ struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
int chan = rsnd_io_is_play(io) ?
rsnd_runtime_channel_after_ctu(io) :
rsnd_runtime_channel_original(io);
/* Use Multi SSI */
if (rsnd_runtime_is_ssi_multi(io))
- chan /= rsnd_get_slot_num(io);
+ chan /= rsnd_rdai_ssi_lane_get(rdai);
/* TDM Extend Mode needs 8ch */
if (chan == 6)
@@ -265,12 +245,13 @@ int rsnd_runtime_channel_for_ssi(struct rsnd_dai_stream *io)
int rsnd_runtime_is_ssi_multi(struct rsnd_dai_stream *io)
{
- int slots = rsnd_get_slot_num(io);
+ struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
+ int lane = rsnd_rdai_ssi_lane_get(rdai);
int chan = rsnd_io_is_play(io) ?
rsnd_runtime_channel_after_ctu(io) :
rsnd_runtime_channel_original(io);
- return (chan >= 6) && (slots > 1);
+ return (chan > 2) && (lane > 1);
}
int rsnd_runtime_is_ssi_tdm(struct rsnd_dai_stream *io)
@@ -310,6 +291,24 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
u32 val = 0x76543210;
u32 mask = ~0;
+ /*
+ * *Hardware* L/R and *Software* L/R are inverted.
+ * We need to care about inversion timing to control
+ * Playback/Capture correctly.
+ * The point is [DVC] needs *Hardware* L/R, [MEM] needs *Software* L/R
+ *
+ * sL/R : software L/R
+ * hL/R : hardware L/R
+ * (*) : conversion timing
+ *
+ * Playback
+ * sL/R (*) hL/R hL/R hL/R hL/R hL/R
+ * [MEM] -> [SRC] -> [DVC] -> [CMD] -> [SSIU] -> [SSI] -> codec
+ *
+ * Capture
+ * hL/R hL/R hL/R hL/R hL/R (*) sL/R
+ * codec -> [SSI] -> [SSIU] -> [SRC] -> [DVC] -> [CMD] -> [MEM]
+ */
if (rsnd_io_is_play(io)) {
struct rsnd_mod *src = rsnd_io_to_mod_src(io);
@@ -470,8 +469,7 @@ static int rsnd_status_update(u32 *status,
#define rsnd_dai_call(fn, io, param...) \
({ \
- struct rsnd_priv *priv = rsnd_io_to_priv(io); \
- struct device *dev = rsnd_priv_to_dev(priv); \
+ struct device *dev = rsnd_priv_to_dev(rsnd_io_to_priv(io)); \
struct rsnd_mod *mod; \
int is_play = rsnd_io_is_play(io); \
int ret = 0, i; \
@@ -532,6 +530,24 @@ static void rsnd_dai_disconnect(struct rsnd_mod *mod,
io->mod[type] = NULL;
}
+int rsnd_rdai_channels_ctrl(struct rsnd_dai *rdai,
+ int max_channels)
+{
+ if (max_channels > 0)
+ rdai->max_channels = max_channels;
+
+ return rdai->max_channels;
+}
+
+int rsnd_rdai_ssi_lane_ctrl(struct rsnd_dai *rdai,
+ int ssi_lane)
+{
+ if (ssi_lane > 0)
+ rdai->ssi_lane = ssi_lane;
+
+ return rdai->ssi_lane;
+}
+
struct rsnd_dai *rsnd_rdai_get(struct rsnd_priv *priv, int id)
{
if ((id < 0) || (id >= rsnd_rdai_nr(priv)))
@@ -551,40 +567,6 @@ static struct rsnd_dai *rsnd_dai_to_rdai(struct snd_soc_dai *dai)
/*
* rsnd_soc_dai functions
*/
-int rsnd_dai_pointer_offset(struct rsnd_dai_stream *io, int additional)
-{
- struct snd_pcm_substream *substream = io->substream;
- struct snd_pcm_runtime *runtime = substream->runtime;
- int pos = io->byte_pos + additional;
-
- pos %= (runtime->periods * io->byte_per_period);
-
- return pos;
-}
-
-bool rsnd_dai_pointer_update(struct rsnd_dai_stream *io, int byte)
-{
- io->byte_pos += byte;
-
- if (io->byte_pos >= io->next_period_byte) {
- struct snd_pcm_substream *substream = io->substream;
- struct snd_pcm_runtime *runtime = substream->runtime;
-
- io->period_pos++;
- io->next_period_byte += io->byte_per_period;
-
- if (io->period_pos >= runtime->periods) {
- io->byte_pos = 0;
- io->period_pos = 0;
- io->next_period_byte = io->byte_per_period;
- }
-
- return true;
- }
-
- return false;
-}
-
void rsnd_dai_period_elapsed(struct rsnd_dai_stream *io)
{
struct snd_pcm_substream *substream = io->substream;
@@ -602,15 +584,7 @@ void rsnd_dai_period_elapsed(struct rsnd_dai_stream *io)
static void rsnd_dai_stream_init(struct rsnd_dai_stream *io,
struct snd_pcm_substream *substream)
{
- struct snd_pcm_runtime *runtime = substream->runtime;
-
io->substream = substream;
- io->byte_pos = 0;
- io->period_pos = 0;
- io->byte_per_period = runtime->period_size *
- runtime->channels *
- samples_to_bytes(runtime, 1);
- io->next_period_byte = io->byte_per_period;
}
static void rsnd_dai_stream_quit(struct rsnd_dai_stream *io)
@@ -749,9 +723,13 @@ static int rsnd_soc_set_dai_tdm_slot(struct snd_soc_dai *dai,
struct device *dev = rsnd_priv_to_dev(priv);
switch (slots) {
+ case 2:
case 6:
+ case 8:
+ case 16:
/* TDM Extend Mode */
- rsnd_set_slot(rdai, slots, 1);
+ rsnd_rdai_channels_set(rdai, slots);
+ rsnd_rdai_ssi_lane_set(rdai, 1);
break;
default:
dev_err(dev, "unsupported TDM slots (%d)\n", slots);
@@ -761,22 +739,177 @@ static int rsnd_soc_set_dai_tdm_slot(struct snd_soc_dai *dai,
return 0;
}
+static unsigned int rsnd_soc_hw_channels_list[] = {
+ 2, 6, 8, 16,
+};
+
+static unsigned int rsnd_soc_hw_rate_list[] = {
+ 8000,
+ 11025,
+ 16000,
+ 22050,
+ 32000,
+ 44100,
+ 48000,
+ 64000,
+ 88200,
+ 96000,
+ 176400,
+ 192000,
+};
+
+static int rsnd_soc_hw_rule(struct rsnd_priv *priv,
+ unsigned int *list, int list_num,
+ struct snd_interval *baseline, struct snd_interval *iv)
+{
+ struct snd_interval p;
+ unsigned int rate;
+ int i;
+
+ snd_interval_any(&p);
+ p.min = UINT_MAX;
+ p.max = 0;
+
+ for (i = 0; i < list_num; i++) {
+
+ if (!snd_interval_test(iv, list[i]))
+ continue;
+
+ rate = rsnd_ssi_clk_query(priv,
+ baseline->min, list[i], NULL);
+ if (rate > 0) {
+ p.min = min(p.min, list[i]);
+ p.max = max(p.max, list[i]);
+ }
+
+ rate = rsnd_ssi_clk_query(priv,
+ baseline->max, list[i], NULL);
+ if (rate > 0) {
+ p.min = min(p.min, list[i]);
+ p.max = max(p.max, list[i]);
+ }
+ }
+
+ return snd_interval_refine(iv, &p);
+}
+
+static int rsnd_soc_hw_rule_rate(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct snd_interval *ic_ = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_interval *ir = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval ic;
+ struct snd_soc_dai *dai = rule->private;
+ struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
+ struct rsnd_priv *priv = rsnd_rdai_to_priv(rdai);
+
+ /*
+ * possible sampling rate limitation is same as
+ * 2ch if it supports multi ssi
+ */
+ ic = *ic_;
+ if (1 < rsnd_rdai_ssi_lane_get(rdai)) {
+ ic.min = 2;
+ ic.max = 2;
+ }
+
+ return rsnd_soc_hw_rule(priv, rsnd_soc_hw_rate_list,
+ ARRAY_SIZE(rsnd_soc_hw_rate_list),
+ &ic, ir);
+}
+
+
+static int rsnd_soc_hw_rule_channels(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct snd_interval *ic_ = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_interval *ir = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval ic;
+ struct snd_soc_dai *dai = rule->private;
+ struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
+ struct rsnd_priv *priv = rsnd_rdai_to_priv(rdai);
+
+ /*
+ * possible sampling rate limitation is same as
+ * 2ch if it supports multi ssi
+ */
+ ic = *ic_;
+ if (1 < rsnd_rdai_ssi_lane_get(rdai)) {
+ ic.min = 2;
+ ic.max = 2;
+ }
+
+ return rsnd_soc_hw_rule(priv, rsnd_soc_hw_channels_list,
+ ARRAY_SIZE(rsnd_soc_hw_channels_list),
+ ir, &ic);
+}
+
+static void rsnd_soc_hw_constraint(struct snd_pcm_runtime *runtime,
+ struct snd_soc_dai *dai)
+{
+ struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
+ struct snd_pcm_hw_constraint_list *constraint = &rdai->constraint;
+ unsigned int max_channels = rsnd_rdai_channels_get(rdai);
+ int i;
+
+ /*
+ * Channel Limitation
+ * It depends on Platform design
+ */
+ constraint->list = rsnd_soc_hw_channels_list;
+ constraint->count = 0;
+ constraint->mask = 0;
+
+ for (i = 0; i < ARRAY_SIZE(rsnd_soc_hw_channels_list); i++) {
+ if (rsnd_soc_hw_channels_list[i] > max_channels)
+ break;
+ constraint->count = i + 1;
+ }
+
+ snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS, constraint);
+
+ /*
+ * Sampling Rate / Channel Limitation
+ * It depends on Clock Master Mode
+ */
+ if (!rsnd_rdai_is_clk_master(rdai))
+ return;
+
+ snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ rsnd_soc_hw_rule_rate, dai,
+ SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+ snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ rsnd_soc_hw_rule_channels, dai,
+ SNDRV_PCM_HW_PARAM_RATE, -1);
+}
+
static int rsnd_soc_dai_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
+ struct rsnd_priv *priv = rsnd_rdai_to_priv(rdai);
struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream);
+ int ret;
+
+ /* rsnd_io_to_runtime() is not yet enabled here */
+ rsnd_soc_hw_constraint(substream->runtime, dai);
/*
* call rsnd_dai_call without spinlock
*/
- return rsnd_dai_call(nolock_start, io, priv);
+ ret = rsnd_dai_call(nolock_start, io, priv);
+ if (ret < 0)
+ rsnd_dai_call(nolock_stop, io, priv);
+
+ return ret;
}
static void rsnd_soc_dai_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
+ struct rsnd_priv *priv = rsnd_rdai_to_priv(rdai);
struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream);
/*
@@ -820,32 +953,132 @@ void rsnd_parse_connect_common(struct rsnd_dai *rdai,
of_node_put(node);
}
-static int rsnd_dai_probe(struct rsnd_priv *priv)
+static struct device_node *rsnd_dai_of_node(struct rsnd_priv *priv,
+ int *is_graph)
{
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct device_node *np = dev->of_node;
struct device_node *dai_node;
- struct device_node *dai_np;
+ struct device_node *ret;
+
+ *is_graph = 0;
+
+ /*
+ * parse both previous dai (= rcar_sound,dai), and
+ * graph dai (= ports/port)
+ */
+ dai_node = of_get_child_by_name(np, RSND_NODE_DAI);
+ if (dai_node) {
+ ret = dai_node;
+ goto of_node_compatible;
+ }
+
+ ret = np;
+
+ dai_node = of_graph_get_next_endpoint(np, NULL);
+ if (dai_node)
+ goto of_node_graph;
+
+ return NULL;
+
+of_node_graph:
+ *is_graph = 1;
+of_node_compatible:
+ of_node_put(dai_node);
+
+ return ret;
+}
+
+static void __rsnd_dai_probe(struct rsnd_priv *priv,
+ struct device_node *dai_np,
+ int dai_i, int is_graph)
+{
struct device_node *playback, *capture;
struct rsnd_dai_stream *io_playback;
struct rsnd_dai_stream *io_capture;
- struct snd_soc_dai_driver *rdrv, *drv;
+ struct snd_soc_dai_driver *drv;
struct rsnd_dai *rdai;
struct device *dev = rsnd_priv_to_dev(priv);
- int nr, dai_i, io_i;
- int ret;
+ int io_i;
+
+ rdai = rsnd_rdai_get(priv, dai_i);
+ drv = priv->daidrv + dai_i;
+ io_playback = &rdai->playback;
+ io_capture = &rdai->capture;
+
+ snprintf(rdai->name, RSND_DAI_NAME_SIZE, "rsnd-dai.%d", dai_i);
+
+ rdai->priv = priv;
+ drv->name = rdai->name;
+ drv->ops = &rsnd_soc_dai_ops;
+
+ snprintf(rdai->playback.name, RSND_DAI_NAME_SIZE,
+ "DAI%d Playback", dai_i);
+ drv->playback.rates = RSND_RATES;
+ drv->playback.formats = RSND_FMTS;
+ drv->playback.channels_min = 2;
+ drv->playback.channels_max = 16;
+ drv->playback.stream_name = rdai->playback.name;
+
+ snprintf(rdai->capture.name, RSND_DAI_NAME_SIZE,
+ "DAI%d Capture", dai_i);
+ drv->capture.rates = RSND_RATES;
+ drv->capture.formats = RSND_FMTS;
+ drv->capture.channels_min = 2;
+ drv->capture.channels_max = 16;
+ drv->capture.stream_name = rdai->capture.name;
+
+ rdai->playback.rdai = rdai;
+ rdai->capture.rdai = rdai;
+ rsnd_rdai_channels_set(rdai, 2); /* default 2ch */
+ rsnd_rdai_ssi_lane_set(rdai, 1); /* default 1lane */
+
+ for (io_i = 0;; io_i++) {
+ playback = of_parse_phandle(dai_np, "playback", io_i);
+ capture = of_parse_phandle(dai_np, "capture", io_i);
+
+ if (!playback && !capture)
+ break;
- dai_node = rsnd_dai_of_node(priv);
- nr = of_get_child_count(dai_node);
- if (!nr) {
- ret = -EINVAL;
- goto rsnd_dai_probe_done;
+ rsnd_parse_connect_ssi(rdai, playback, capture);
+ rsnd_parse_connect_src(rdai, playback, capture);
+ rsnd_parse_connect_ctu(rdai, playback, capture);
+ rsnd_parse_connect_mix(rdai, playback, capture);
+ rsnd_parse_connect_dvc(rdai, playback, capture);
+
+ of_node_put(playback);
+ of_node_put(capture);
}
+ dev_dbg(dev, "%s (%s/%s)\n", rdai->name,
+ rsnd_io_to_mod_ssi(io_playback) ? "play" : " -- ",
+ rsnd_io_to_mod_ssi(io_capture) ? "capture" : " -- ");
+}
+
+static int rsnd_dai_probe(struct rsnd_priv *priv)
+{
+ struct device_node *dai_node;
+ struct device_node *dai_np;
+ struct snd_soc_dai_driver *rdrv;
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct rsnd_dai *rdai;
+ int nr;
+ int is_graph;
+ int dai_i;
+
+ dai_node = rsnd_dai_of_node(priv, &is_graph);
+ if (is_graph)
+ nr = of_graph_get_endpoint_count(dai_node);
+ else
+ nr = of_get_child_count(dai_node);
+
+ if (!nr)
+ return -EINVAL;
+
rdrv = devm_kzalloc(dev, sizeof(*rdrv) * nr, GFP_KERNEL);
rdai = devm_kzalloc(dev, sizeof(*rdai) * nr, GFP_KERNEL);
- if (!rdrv || !rdai) {
- ret = -ENOMEM;
- goto rsnd_dai_probe_done;
- }
+ if (!rdrv || !rdai)
+ return -ENOMEM;
priv->rdai_nr = nr;
priv->daidrv = rdrv;
@@ -855,68 +1088,18 @@ static int rsnd_dai_probe(struct rsnd_priv *priv)
* parse all dai
*/
dai_i = 0;
- for_each_child_of_node(dai_node, dai_np) {
- rdai = rsnd_rdai_get(priv, dai_i);
- drv = rdrv + dai_i;
- io_playback = &rdai->playback;
- io_capture = &rdai->capture;
-
- snprintf(rdai->name, RSND_DAI_NAME_SIZE, "rsnd-dai.%d", dai_i);
-
- rdai->priv = priv;
- drv->name = rdai->name;
- drv->ops = &rsnd_soc_dai_ops;
-
- snprintf(rdai->playback.name, RSND_DAI_NAME_SIZE,
- "DAI%d Playback", dai_i);
- drv->playback.rates = RSND_RATES;
- drv->playback.formats = RSND_FMTS;
- drv->playback.channels_min = 2;
- drv->playback.channels_max = 6;
- drv->playback.stream_name = rdai->playback.name;
-
- snprintf(rdai->capture.name, RSND_DAI_NAME_SIZE,
- "DAI%d Capture", dai_i);
- drv->capture.rates = RSND_RATES;
- drv->capture.formats = RSND_FMTS;
- drv->capture.channels_min = 2;
- drv->capture.channels_max = 6;
- drv->capture.stream_name = rdai->capture.name;
-
- rdai->playback.rdai = rdai;
- rdai->capture.rdai = rdai;
- rsnd_set_slot(rdai, 2, 1); /* default */
-
- for (io_i = 0;; io_i++) {
- playback = of_parse_phandle(dai_np, "playback", io_i);
- capture = of_parse_phandle(dai_np, "capture", io_i);
-
- if (!playback && !capture)
- break;
-
- rsnd_parse_connect_ssi(rdai, playback, capture);
- rsnd_parse_connect_src(rdai, playback, capture);
- rsnd_parse_connect_ctu(rdai, playback, capture);
- rsnd_parse_connect_mix(rdai, playback, capture);
- rsnd_parse_connect_dvc(rdai, playback, capture);
-
- of_node_put(playback);
- of_node_put(capture);
+ if (is_graph) {
+ for_each_endpoint_of_node(dai_node, dai_np) {
+ __rsnd_dai_probe(priv, dai_np, dai_i, is_graph);
+ rsnd_ssi_parse_hdmi_connection(priv, dai_np, dai_i);
+ dai_i++;
}
-
- dai_i++;
-
- dev_dbg(dev, "%s (%s/%s)\n", rdai->name,
- rsnd_io_to_mod_ssi(io_playback) ? "play" : " -- ",
- rsnd_io_to_mod_ssi(io_capture) ? "capture" : " -- ");
+ } else {
+ for_each_child_of_node(dai_node, dai_np)
+ __rsnd_dai_probe(priv, dai_np, dai_i++, is_graph);
}
- ret = 0;
-
-rsnd_dai_probe_done:
- of_node_put(dai_node);
-
- return ret;
+ return 0;
}
/*
@@ -965,12 +1148,14 @@ static int rsnd_hw_params(struct snd_pcm_substream *substream,
static snd_pcm_uframes_t rsnd_pointer(struct snd_pcm_substream *substream)
{
- struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_dai *dai = rsnd_substream_to_dai(substream);
struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream);
+ snd_pcm_uframes_t pointer = 0;
+
+ rsnd_dai_call(pointer, io, &pointer);
- return bytes_to_frames(runtime, io->byte_pos);
+ return pointer;
}
static struct snd_pcm_ops rsnd_pcm_ops = {
@@ -1033,6 +1218,9 @@ static int rsnd_kctrl_put(struct snd_kcontrol *kctrl,
struct rsnd_kctrl_cfg *cfg = kcontrol_to_cfg(kctrl);
int i, change = 0;
+ if (!cfg->accept(cfg->io))
+ return 0;
+
for (i = 0; i < cfg->size; i++) {
if (cfg->texts) {
change |= (uc->value.enumerated.item[i] != cfg->val[i]);
@@ -1049,6 +1237,18 @@ static int rsnd_kctrl_put(struct snd_kcontrol *kctrl,
return change;
}
+int rsnd_kctrl_accept_anytime(struct rsnd_dai_stream *io)
+{
+ return 1;
+}
+
+int rsnd_kctrl_accept_runtime(struct rsnd_dai_stream *io)
+{
+ struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+
+ return !!runtime;
+}
+
struct rsnd_kctrl_cfg *rsnd_kctrl_init_m(struct rsnd_kctrl_cfg_m *cfg)
{
cfg->cfg.val = cfg->val;
@@ -1067,6 +1267,7 @@ int rsnd_kctrl_new(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct snd_soc_pcm_runtime *rtd,
const unsigned char *name,
+ int (*accept)(struct rsnd_dai_stream *io),
void (*update)(struct rsnd_dai_stream *io,
struct rsnd_mod *mod),
struct rsnd_kctrl_cfg *cfg,
@@ -1101,6 +1302,7 @@ int rsnd_kctrl_new(struct rsnd_mod *mod,
cfg->texts = texts;
cfg->max = max;
cfg->size = size;
+ cfg->accept = accept;
cfg->update = update;
cfg->card = card;
cfg->kctrl = kctrl;
@@ -1332,7 +1534,7 @@ static int rsnd_resume(struct device *dev)
return 0;
}
-static struct dev_pm_ops rsnd_pm_ops = {
+static const struct dev_pm_ops rsnd_pm_ops = {
.suspend = rsnd_suspend,
.resume = rsnd_resume,
};
diff --git a/sound/soc/sh/rcar/ctu.c b/sound/soc/sh/rcar/ctu.c
index 9dcc1f9db026..4ba8f2fe7a4c 100644
--- a/sound/soc/sh/rcar/ctu.c
+++ b/sound/soc/sh/rcar/ctu.c
@@ -279,12 +279,14 @@ static int rsnd_ctu_pcm_new(struct rsnd_mod *mod,
/* CTU Pass */
ret = rsnd_kctrl_new_m(mod, io, rtd, "CTU Pass",
+ rsnd_kctrl_accept_anytime,
NULL,
&ctu->pass, RSND_MAX_CHANNELS,
0xC);
/* ROW0 */
ret = rsnd_kctrl_new_m(mod, io, rtd, "CTU SV0",
+ rsnd_kctrl_accept_anytime,
NULL,
&ctu->sv0, RSND_MAX_CHANNELS,
0x00FFFFFF);
@@ -293,6 +295,7 @@ static int rsnd_ctu_pcm_new(struct rsnd_mod *mod,
/* ROW1 */
ret = rsnd_kctrl_new_m(mod, io, rtd, "CTU SV1",
+ rsnd_kctrl_accept_anytime,
NULL,
&ctu->sv1, RSND_MAX_CHANNELS,
0x00FFFFFF);
@@ -301,6 +304,7 @@ static int rsnd_ctu_pcm_new(struct rsnd_mod *mod,
/* ROW2 */
ret = rsnd_kctrl_new_m(mod, io, rtd, "CTU SV2",
+ rsnd_kctrl_accept_anytime,
NULL,
&ctu->sv2, RSND_MAX_CHANNELS,
0x00FFFFFF);
@@ -309,6 +313,7 @@ static int rsnd_ctu_pcm_new(struct rsnd_mod *mod,
/* ROW3 */
ret = rsnd_kctrl_new_m(mod, io, rtd, "CTU SV3",
+ rsnd_kctrl_accept_anytime,
NULL,
&ctu->sv3, RSND_MAX_CHANNELS,
0x00FFFFFF);
@@ -317,6 +322,7 @@ static int rsnd_ctu_pcm_new(struct rsnd_mod *mod,
/* Reset */
ret = rsnd_kctrl_new_s(mod, io, rtd, "CTU Reset",
+ rsnd_kctrl_accept_anytime,
rsnd_ctu_value_reset,
&ctu->reset, 1);
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index 241cb3b08a07..60aa5e96a49f 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -25,6 +25,7 @@
struct rsnd_dmaen {
struct dma_chan *chan;
+ dma_cookie_t cookie;
dma_addr_t dma_buf;
unsigned int dma_len;
unsigned int dma_period;
@@ -103,10 +104,6 @@ static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
* In Gen2 case, it are Audio-DMAC, and Audio-DMAC-peri-peri.
* But, Audio-DMAC-peri-peri doesn't have interrupt,
* and this driver is assuming that here.
- *
- * If Audio-DMAC-peri-peri has interrpt,
- * rsnd_dai_pointer_update() will be called twice,
- * ant it will breaks io->byte_pos
*/
spin_lock_irqsave(&priv->lock, flags);
@@ -121,7 +118,7 @@ static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
*/
rsnd_dmaen_sync(dmaen, io, dmaen->dma_cnt + 2);
- elapsed = rsnd_dai_pointer_update(io, io->byte_per_period);
+ elapsed = true;
dmaen->dma_cnt++;
}
@@ -292,7 +289,8 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod,
for (i = 0; i < 2; i++)
rsnd_dmaen_sync(dmaen, io, i);
- if (dmaengine_submit(desc) < 0) {
+ dmaen->cookie = dmaengine_submit(desc);
+ if (dmaen->cookie < 0) {
dev_err(dev, "dmaengine_submit() fail\n");
return -EIO;
}
@@ -348,12 +346,34 @@ static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
return 0;
}
+static int rsnd_dmaen_pointer(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ snd_pcm_uframes_t *pointer)
+{
+ struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+ struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
+ struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
+ struct dma_tx_state state;
+ enum dma_status status;
+ unsigned int pos = 0;
+
+ status = dmaengine_tx_status(dmaen->chan, dmaen->cookie, &state);
+ if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) {
+ if (state.residue > 0 && state.residue <= dmaen->dma_len)
+ pos = dmaen->dma_len - state.residue;
+ }
+ *pointer = bytes_to_frames(runtime, pos);
+
+ return 0;
+}
+
static struct rsnd_mod_ops rsnd_dmaen_ops = {
.name = "audmac",
.nolock_start = rsnd_dmaen_nolock_start,
.nolock_stop = rsnd_dmaen_nolock_stop,
.start = rsnd_dmaen_start,
.stop = rsnd_dmaen_stop,
+ .pointer= rsnd_dmaen_pointer,
};
/*
diff --git a/sound/soc/sh/rcar/dvc.c b/sound/soc/sh/rcar/dvc.c
index 463de8360985..99d2d9459e75 100644
--- a/sound/soc/sh/rcar/dvc.c
+++ b/sound/soc/sh/rcar/dvc.c
@@ -249,16 +249,18 @@ static int rsnd_dvc_pcm_new(struct rsnd_mod *mod,
struct snd_soc_pcm_runtime *rtd)
{
struct rsnd_dvc *dvc = rsnd_mod_to_dvc(mod);
+ struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
int is_play = rsnd_io_is_play(io);
- int slots = rsnd_get_slot(io);
+ int channels = rsnd_rdai_channels_get(rdai);
int ret;
/* Volume */
ret = rsnd_kctrl_new_m(mod, io, rtd,
is_play ?
"DVC Out Playback Volume" : "DVC In Capture Volume",
+ rsnd_kctrl_accept_anytime,
rsnd_dvc_volume_update,
- &dvc->volume, slots,
+ &dvc->volume, channels,
0x00800000 - 1);
if (ret < 0)
return ret;
@@ -267,8 +269,9 @@ static int rsnd_dvc_pcm_new(struct rsnd_mod *mod,
ret = rsnd_kctrl_new_m(mod, io, rtd,
is_play ?
"DVC Out Mute Switch" : "DVC In Mute Switch",
+ rsnd_kctrl_accept_anytime,
rsnd_dvc_volume_update,
- &dvc->mute, slots,
+ &dvc->mute, channels,
1);
if (ret < 0)
return ret;
@@ -277,6 +280,7 @@ static int rsnd_dvc_pcm_new(struct rsnd_mod *mod,
ret = rsnd_kctrl_new_s(mod, io, rtd,
is_play ?
"DVC Out Ramp Switch" : "DVC In Ramp Switch",
+ rsnd_kctrl_accept_anytime,
rsnd_dvc_volume_update,
&dvc->ren, 1);
if (ret < 0)
@@ -285,6 +289,7 @@ static int rsnd_dvc_pcm_new(struct rsnd_mod *mod,
ret = rsnd_kctrl_new_e(mod, io, rtd,
is_play ?
"DVC Out Ramp Up Rate" : "DVC In Ramp Up Rate",
+ rsnd_kctrl_accept_anytime,
rsnd_dvc_volume_update,
&dvc->rup,
dvc_ramp_rate);
@@ -294,6 +299,7 @@ static int rsnd_dvc_pcm_new(struct rsnd_mod *mod,
ret = rsnd_kctrl_new_e(mod, io, rtd,
is_play ?
"DVC Out Ramp Down Rate" : "DVC In Ramp Down Rate",
+ rsnd_kctrl_accept_anytime,
rsnd_dvc_volume_update,
&dvc->rdown,
dvc_ramp_rate);
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index 4b0980728e13..ee00e3516911 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -219,6 +219,8 @@ static int rsnd_gen2_probe(struct rsnd_priv *priv)
RSND_GEN_S_REG(SSI_SYS_STATUS5, 0x884),
RSND_GEN_S_REG(SSI_SYS_STATUS6, 0x888),
RSND_GEN_S_REG(SSI_SYS_STATUS7, 0x88c),
+ RSND_GEN_S_REG(HDMI0_SEL, 0x9e0),
+ RSND_GEN_S_REG(HDMI1_SEL, 0x9e4),
/* FIXME: it needs SSI_MODE2/3 in the future */
RSND_GEN_M_REG(SSI_BUSIF_MODE, 0x0, 0x80),
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index 323af41ecfcb..99c57611df88 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -18,6 +18,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/of_graph.h>
#include <linux/of_irq.h>
#include <linux/sh_dma.h>
#include <linux/workqueue.h>
@@ -170,6 +171,8 @@ enum rsnd_reg {
RSND_REG_SSI_SYS_STATUS5,
RSND_REG_SSI_SYS_STATUS6,
RSND_REG_SSI_SYS_STATUS7,
+ RSND_REG_HDMI0_SEL,
+ RSND_REG_HDMI1_SEL,
/* SSI */
RSND_REG_SSICR,
@@ -268,6 +271,9 @@ struct rsnd_mod_ops {
struct rsnd_dai_stream *io,
struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params);
+ int (*pointer)(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ snd_pcm_uframes_t *pointer);
int (*fallback)(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct rsnd_priv *priv);
@@ -305,6 +311,7 @@ struct rsnd_mod {
* H 0: pcm_new
* H 0: fallback
* H 0: hw_params
+ * H 0: pointer
*/
#define __rsnd_mod_shift_nolock_start 0
#define __rsnd_mod_shift_nolock_stop 0
@@ -318,6 +325,7 @@ struct rsnd_mod {
#define __rsnd_mod_shift_pcm_new 28 /* always called */
#define __rsnd_mod_shift_fallback 28 /* always called */
#define __rsnd_mod_shift_hw_params 28 /* always called */
+#define __rsnd_mod_shift_pointer 28 /* always called */
#define __rsnd_mod_add_probe 0
#define __rsnd_mod_add_remove 0
@@ -331,6 +339,7 @@ struct rsnd_mod {
#define __rsnd_mod_add_pcm_new 0
#define __rsnd_mod_add_fallback 0
#define __rsnd_mod_add_hw_params 0
+#define __rsnd_mod_add_pointer 0
#define __rsnd_mod_call_probe 0
#define __rsnd_mod_call_remove 0
@@ -342,6 +351,7 @@ struct rsnd_mod {
#define __rsnd_mod_call_pcm_new 0
#define __rsnd_mod_call_fallback 0
#define __rsnd_mod_call_hw_params 0
+#define __rsnd_mod_call_pointer 0
#define __rsnd_mod_call_nolock_start 0
#define __rsnd_mod_call_nolock_stop 1
@@ -389,11 +399,6 @@ void rsnd_parse_connect_common(struct rsnd_dai *rdai,
struct device_node *playback,
struct device_node *capture);
-void rsnd_set_slot(struct rsnd_dai *rdai,
- int slots, int slots_total);
-int rsnd_get_slot(struct rsnd_dai_stream *io);
-int rsnd_get_slot_num(struct rsnd_dai_stream *io);
-
int rsnd_runtime_channel_original(struct rsnd_dai_stream *io);
int rsnd_runtime_channel_after_ctu(struct rsnd_dai_stream *io);
int rsnd_runtime_channel_for_ssi(struct rsnd_dai_stream *io);
@@ -420,13 +425,8 @@ struct rsnd_dai_stream {
char name[RSND_DAI_NAME_SIZE];
struct snd_pcm_substream *substream;
struct rsnd_mod *mod[RSND_MOD_MAX];
- struct rsnd_dai_path_info *info; /* rcar_snd.h */
struct rsnd_dai *rdai;
u32 parent_ssi_status;
- int byte_pos;
- int period_pos;
- int byte_per_period;
- int next_period_byte;
};
#define rsnd_io_to_mod(io, i) ((i) < RSND_MOD_MAX ? (io)->mod[(i)] : NULL)
#define rsnd_io_to_mod_ssi(io) rsnd_io_to_mod((io), RSND_MOD_SSI)
@@ -449,9 +449,10 @@ struct rsnd_dai {
struct rsnd_dai_stream playback;
struct rsnd_dai_stream capture;
struct rsnd_priv *priv;
+ struct snd_pcm_hw_constraint_list constraint;
- int slots;
- int slots_num;
+ int max_channels; /* 2ch - 16ch */
+ int ssi_lane; /* 1lane - 4lane */
unsigned int clk_master:1;
unsigned int bit_clk_inv:1;
@@ -471,13 +472,24 @@ struct rsnd_dai {
struct rsnd_dai *rsnd_rdai_get(struct rsnd_priv *priv, int id);
-bool rsnd_dai_pointer_update(struct rsnd_dai_stream *io, int cnt);
+#define rsnd_rdai_channels_set(rdai, max_channels) \
+ rsnd_rdai_channels_ctrl(rdai, max_channels)
+#define rsnd_rdai_channels_get(rdai) \
+ rsnd_rdai_channels_ctrl(rdai, 0)
+int rsnd_rdai_channels_ctrl(struct rsnd_dai *rdai,
+ int max_channels);
+
+#define rsnd_rdai_ssi_lane_set(rdai, ssi_lane) \
+ rsnd_rdai_ssi_lane_ctrl(rdai, ssi_lane)
+#define rsnd_rdai_ssi_lane_get(rdai) \
+ rsnd_rdai_ssi_lane_ctrl(rdai, 0)
+int rsnd_rdai_ssi_lane_ctrl(struct rsnd_dai *rdai,
+ int ssi_lane);
+
void rsnd_dai_period_elapsed(struct rsnd_dai_stream *io);
-int rsnd_dai_pointer_offset(struct rsnd_dai_stream *io, int additional);
int rsnd_dai_connect(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
enum rsnd_mod_type type);
-#define rsnd_dai_of_node(priv) rsnd_parse_of_node(priv, RSND_NODE_DAI)
/*
* R-Car Gen1/Gen2
@@ -491,6 +503,7 @@ phys_addr_t rsnd_gen_get_phy_addr(struct rsnd_priv *priv, int reg_id);
/*
* R-Car ADG
*/
+int rsnd_adg_clk_query(struct rsnd_priv *priv, unsigned int rate);
int rsnd_adg_ssi_clk_stop(struct rsnd_mod *mod);
int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *mod, unsigned int rate);
int rsnd_adg_probe(struct rsnd_priv *priv);
@@ -596,6 +609,7 @@ struct rsnd_kctrl_cfg {
unsigned int size;
u32 *val;
const char * const *texts;
+ int (*accept)(struct rsnd_dai_stream *io);
void (*update)(struct rsnd_dai_stream *io, struct rsnd_mod *mod);
struct rsnd_dai_stream *io;
struct snd_card *card;
@@ -613,12 +627,15 @@ struct rsnd_kctrl_cfg_s {
u32 val;
};
+int rsnd_kctrl_accept_anytime(struct rsnd_dai_stream *io);
+int rsnd_kctrl_accept_runtime(struct rsnd_dai_stream *io);
struct rsnd_kctrl_cfg *rsnd_kctrl_init_m(struct rsnd_kctrl_cfg_m *cfg);
struct rsnd_kctrl_cfg *rsnd_kctrl_init_s(struct rsnd_kctrl_cfg_s *cfg);
int rsnd_kctrl_new(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct snd_soc_pcm_runtime *rtd,
const unsigned char *name,
+ int (*accept)(struct rsnd_dai_stream *io),
void (*update)(struct rsnd_dai_stream *io,
struct rsnd_mod *mod),
struct rsnd_kctrl_cfg *cfg,
@@ -626,16 +643,16 @@ int rsnd_kctrl_new(struct rsnd_mod *mod,
int size,
u32 max);
-#define rsnd_kctrl_new_m(mod, io, rtd, name, update, cfg, size, max) \
- rsnd_kctrl_new(mod, io, rtd, name, update, rsnd_kctrl_init_m(cfg), \
+#define rsnd_kctrl_new_m(mod, io, rtd, name, accept, update, cfg, size, max) \
+ rsnd_kctrl_new(mod, io, rtd, name, accept, update, rsnd_kctrl_init_m(cfg), \
NULL, size, max)
-#define rsnd_kctrl_new_s(mod, io, rtd, name, update, cfg, max) \
- rsnd_kctrl_new(mod, io, rtd, name, update, rsnd_kctrl_init_s(cfg), \
+#define rsnd_kctrl_new_s(mod, io, rtd, name, accept, update, cfg, max) \
+ rsnd_kctrl_new(mod, io, rtd, name, accept, update, rsnd_kctrl_init_s(cfg), \
NULL, 1, max)
-#define rsnd_kctrl_new_e(mod, io, rtd, name, update, cfg, texts) \
- rsnd_kctrl_new(mod, io, rtd, name, update, rsnd_kctrl_init_s(cfg), \
+#define rsnd_kctrl_new_e(mod, io, rtd, name, accept, update, cfg, texts) \
+ rsnd_kctrl_new(mod, io, rtd, name, accept, update, rsnd_kctrl_init_s(cfg), \
texts, 1, ARRAY_SIZE(texts))
/*
@@ -648,6 +665,13 @@ int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod);
int rsnd_ssi_use_busif(struct rsnd_dai_stream *io);
u32 rsnd_ssi_multi_slaves_runtime(struct rsnd_dai_stream *io);
+#define RSND_SSI_HDMI_PORT0 0xf0
+#define RSND_SSI_HDMI_PORT1 0xf1
+int rsnd_ssi_hdmi_port(struct rsnd_dai_stream *io);
+void rsnd_ssi_parse_hdmi_connection(struct rsnd_priv *priv,
+ struct device_node *endpoint,
+ int dai_i);
+
#define rsnd_ssi_is_pin_sharing(io) \
__rsnd_ssi_is_pin_sharing(rsnd_io_to_mod_ssi(io))
int __rsnd_ssi_is_pin_sharing(struct rsnd_mod *mod);
@@ -656,6 +680,8 @@ int __rsnd_ssi_is_pin_sharing(struct rsnd_mod *mod);
void rsnd_parse_connect_ssi(struct rsnd_dai *rdai,
struct device_node *playback,
struct device_node *capture);
+unsigned int rsnd_ssi_clk_query(struct rsnd_priv *priv,
+ int param1, int param2, int *idx);
/*
* R-Car SSIU
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index 76a477a3ccb5..7aa239e28491 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -12,10 +12,6 @@
#define SRC_NAME "src"
-/* SRCx_STATUS */
-#define OUF_SRCO ((1 << 12) | (1 << 13))
-#define OUF_SRCI ((1 << 9) | (1 << 8))
-
/* SCU_SYSTEM_STATUS0/1 */
#define OUF_SRC(id) ((1 << (id + 16)) | (1 << id))
@@ -55,20 +51,6 @@ struct rsnd_src {
*
*/
-/*
- * src.c is caring...
- *
- * Gen1
- *
- * [mem] -> [SRU] -> [SSI]
- * |--------|
- *
- * Gen2
- *
- * [mem] -> [SRC] -> [SSIU] -> [SSI]
- * |-----------------|
- */
-
static void rsnd_src_activation(struct rsnd_mod *mod)
{
rsnd_mod_write(mod, SRC_SWRSR, 0);
@@ -515,6 +497,7 @@ static int rsnd_src_pcm_new(struct rsnd_mod *mod,
rsnd_io_is_play(io) ?
"SRC Out Rate Switch" :
"SRC In Rate Switch",
+ rsnd_kctrl_accept_anytime,
rsnd_src_set_convert_rate,
&src->sen, 1);
if (ret < 0)
@@ -524,6 +507,7 @@ static int rsnd_src_pcm_new(struct rsnd_mod *mod,
rsnd_io_is_play(io) ?
"SRC Out Rate" :
"SRC In Rate",
+ rsnd_kctrl_accept_runtime,
rsnd_src_set_convert_rate,
&src->sync, 192000);
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 91e5c07911b4..46feddd78ee2 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -11,6 +11,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <sound/simple_card_utils.h>
#include <linux/delay.h>
#include "rsnd.h"
#define RSND_SSI_NAME_SIZE 16
@@ -76,11 +77,18 @@ struct rsnd_ssi {
int rate;
int irq;
unsigned int usrcnt;
+
+ int byte_pos;
+ int period_pos;
+ int byte_per_period;
+ int next_period_byte;
};
/* flags */
#define RSND_SSI_CLK_PIN_SHARE (1 << 0)
#define RSND_SSI_NO_BUSIF (1 << 1) /* SSI+DMA without BUSIF */
+#define RSND_SSI_HDMI0 (1 << 2) /* for HDMI0 */
+#define RSND_SSI_HDMI1 (1 << 3) /* for HDMI1 */
#define for_each_rsnd_ssi(pos, priv, i) \
for (i = 0; \
@@ -99,6 +107,20 @@ struct rsnd_ssi {
#define rsnd_ssi_is_run_mods(mod, io) \
(rsnd_ssi_run_mods(io) & (1 << rsnd_mod_id(mod)))
+int rsnd_ssi_hdmi_port(struct rsnd_dai_stream *io)
+{
+ struct rsnd_mod *mod = rsnd_io_to_mod_ssi(io);
+ struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+
+ if (rsnd_ssi_mode_flags(ssi) & RSND_SSI_HDMI0)
+ return RSND_SSI_HDMI_PORT0;
+
+ if (rsnd_ssi_mode_flags(ssi) & RSND_SSI_HDMI1)
+ return RSND_SSI_HDMI_PORT1;
+
+ return 0;
+}
+
int rsnd_ssi_use_busif(struct rsnd_dai_stream *io)
{
struct rsnd_mod *mod = rsnd_io_to_mod_ssi(io);
@@ -186,6 +208,46 @@ u32 rsnd_ssi_multi_slaves_runtime(struct rsnd_dai_stream *io)
return 0;
}
+unsigned int rsnd_ssi_clk_query(struct rsnd_priv *priv,
+ int param1, int param2, int *idx)
+{
+ int ssi_clk_mul_table[] = {
+ 1, 2, 4, 8, 16, 6, 12,
+ };
+ int j, ret;
+ unsigned int main_rate;
+
+ for (j = 0; j < ARRAY_SIZE(ssi_clk_mul_table); j++) {
+
+ /*
+ * It will set SSIWSR.CONT here, but SSICR.CKDV = 000
+ * with it is not allowed. (SSIWSR.WS_MODE with
+ * SSICR.CKDV = 000 is not allowed either).
+ * Skip it. See SSICR.CKDV
+ */
+ if (j == 0)
+ continue;
+
+ /*
+ * this driver is assuming that
+ * system word is 32bit x chan
+ * see rsnd_ssi_init()
+ */
+ main_rate = 32 * param1 * param2 * ssi_clk_mul_table[j];
+
+ ret = rsnd_adg_clk_query(priv, main_rate);
+ if (ret < 0)
+ continue;
+
+ if (idx)
+ *idx = j;
+
+ return main_rate;
+ }
+
+ return 0;
+}
+
static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
struct rsnd_dai_stream *io)
{
@@ -195,10 +257,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
struct rsnd_mod *ssi_parent_mod = rsnd_io_to_mod_ssip(io);
int chan = rsnd_runtime_channel_for_ssi(io);
- int j, ret;
- int ssi_clk_mul_table[] = {
- 1, 2, 4, 8, 16, 6, 12,
- };
+ int idx, ret;
unsigned int main_rate;
unsigned int rate = rsnd_io_is_play(io) ?
rsnd_src_get_out_rate(priv, io) :
@@ -222,45 +281,25 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
return 0;
}
- /*
- * Find best clock, and try to start ADG
- */
- for (j = 0; j < ARRAY_SIZE(ssi_clk_mul_table); j++) {
-
- /*
- * It will set SSIWSR.CONT here, but SSICR.CKDV = 000
- * with it is not allowed. (SSIWSR.WS_MODE with
- * SSICR.CKDV = 000 is not allowed either).
- * Skip it. See SSICR.CKDV
- */
- if (j == 0)
- continue;
-
- /*
- * this driver is assuming that
- * system word is 32bit x chan
- * see rsnd_ssi_init()
- */
- main_rate = rate * 32 * chan * ssi_clk_mul_table[j];
-
- ret = rsnd_adg_ssi_clk_try_start(mod, main_rate);
- if (0 == ret) {
- ssi->cr_clk = FORCE | SWL_32 |
- SCKD | SWSD | CKDV(j);
- ssi->wsr = CONT;
+ main_rate = rsnd_ssi_clk_query(priv, rate, chan, &idx);
+ if (!main_rate) {
+ dev_err(dev, "unsupported clock rate\n");
+ return -EIO;
+ }
- ssi->rate = rate;
+ ret = rsnd_adg_ssi_clk_try_start(mod, main_rate);
+ if (ret < 0)
+ return ret;
- dev_dbg(dev, "%s[%d] outputs %u Hz\n",
- rsnd_mod_name(mod),
- rsnd_mod_id(mod), rate);
+ ssi->cr_clk = FORCE | SWL_32 | SCKD | SWSD | CKDV(idx);
+ ssi->wsr = CONT;
+ ssi->rate = rate;
- return 0;
- }
- }
+ dev_dbg(dev, "%s[%d] outputs %u Hz\n",
+ rsnd_mod_name(mod),
+ rsnd_mod_id(mod), rate);
- dev_err(dev, "unsupported clock rate\n");
- return -EIO;
+ return 0;
}
static void rsnd_ssi_master_clk_stop(struct rsnd_mod *mod,
@@ -357,6 +396,59 @@ static void rsnd_ssi_register_setup(struct rsnd_mod *mod)
ssi->cr_mode); /* without EN */
}
+static void rsnd_ssi_pointer_init(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io)
+{
+ struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+ struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+
+ ssi->byte_pos = 0;
+ ssi->period_pos = 0;
+ ssi->byte_per_period = runtime->period_size *
+ runtime->channels *
+ samples_to_bytes(runtime, 1);
+ ssi->next_period_byte = ssi->byte_per_period;
+}
+
+static int rsnd_ssi_pointer_offset(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ int additional)
+{
+ struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+ struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+ int pos = ssi->byte_pos + additional;
+
+ pos %= (runtime->periods * ssi->byte_per_period);
+
+ return pos;
+}
+
+static bool rsnd_ssi_pointer_update(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ int byte)
+{
+ struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+
+ ssi->byte_pos += byte;
+
+ if (ssi->byte_pos >= ssi->next_period_byte) {
+ struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+
+ ssi->period_pos++;
+ ssi->next_period_byte += ssi->byte_per_period;
+
+ if (ssi->period_pos >= runtime->periods) {
+ ssi->byte_pos = 0;
+ ssi->period_pos = 0;
+ ssi->next_period_byte = ssi->byte_per_period;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
/*
* SSI mod common functions
*/
@@ -370,6 +462,8 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
if (!rsnd_ssi_is_run_mods(mod, io))
return 0;
+ rsnd_ssi_pointer_init(mod, io);
+
ssi->usrcnt++;
rsnd_mod_power_on(mod);
@@ -549,7 +643,7 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
if (!is_dma && (status & DIRQ)) {
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
u32 *buf = (u32 *)(runtime->dma_area +
- rsnd_dai_pointer_offset(io, 0));
+ rsnd_ssi_pointer_offset(mod, io, 0));
int shift = 0;
switch (runtime->sample_bits) {
@@ -568,7 +662,7 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
else
*buf = (rsnd_mod_read(mod, SSIRDR) >> shift);
- elapsed = rsnd_dai_pointer_update(io, sizeof(*buf));
+ elapsed = rsnd_ssi_pointer_update(mod, io, sizeof(*buf));
}
/* DMA only */
@@ -675,6 +769,18 @@ static int rsnd_ssi_common_probe(struct rsnd_mod *mod,
return ret;
}
+static int rsnd_ssi_pointer(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ snd_pcm_uframes_t *pointer)
+{
+ struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+ struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+
+ *pointer = bytes_to_frames(runtime, ssi->byte_pos);
+
+ return 0;
+}
+
static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
.name = SSI_NAME,
.probe = rsnd_ssi_common_probe,
@@ -683,6 +789,7 @@ static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
.start = rsnd_ssi_start,
.stop = rsnd_ssi_stop,
.irq = rsnd_ssi_irq,
+ .pointer= rsnd_ssi_pointer,
.pcm_new = rsnd_ssi_pcm_new,
.hw_params = rsnd_ssi_hw_params,
};
@@ -787,13 +894,6 @@ int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod)
/*
- * Non SSI
- */
-static struct rsnd_mod_ops rsnd_ssi_non_ops = {
- .name = SSI_NAME,
-};
-
-/*
* ssi mod function
*/
static void rsnd_ssi_connect(struct rsnd_mod *mod,
@@ -814,7 +914,8 @@ static void rsnd_ssi_connect(struct rsnd_mod *mod,
type = types[i];
if (!rsnd_io_to_mod(io, type)) {
rsnd_dai_connect(mod, io, type);
- rsnd_set_slot(rdai, 2 * (i + 1), (i + 1));
+ rsnd_rdai_channels_set(rdai, (i + 1) * 2);
+ rsnd_rdai_ssi_lane_set(rdai, (i + 1));
return;
}
}
@@ -847,6 +948,47 @@ void rsnd_parse_connect_ssi(struct rsnd_dai *rdai,
of_node_put(node);
}
+static void __rsnd_ssi_parse_hdmi_connection(struct rsnd_priv *priv,
+ struct rsnd_dai_stream *io,
+ struct device_node *remote_ep)
+{
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct rsnd_mod *mod = rsnd_io_to_mod_ssi(io);
+ struct rsnd_ssi *ssi;
+
+ if (!mod)
+ return;
+
+ ssi = rsnd_mod_to_ssi(mod);
+
+ if (strstr(remote_ep->full_name, "hdmi0")) {
+ ssi->flags |= RSND_SSI_HDMI0;
+ dev_dbg(dev, "%s[%d] connected to HDMI0\n",
+ rsnd_mod_name(mod), rsnd_mod_id(mod));
+ }
+
+ if (strstr(remote_ep->full_name, "hdmi1")) {
+ ssi->flags |= RSND_SSI_HDMI1;
+ dev_dbg(dev, "%s[%d] connected to HDMI1\n",
+ rsnd_mod_name(mod), rsnd_mod_id(mod));
+ }
+}
+
+void rsnd_ssi_parse_hdmi_connection(struct rsnd_priv *priv,
+ struct device_node *endpoint,
+ int dai_i)
+{
+ struct rsnd_dai *rdai = rsnd_rdai_get(priv, dai_i);
+ struct device_node *remote_ep;
+
+ remote_ep = of_graph_get_remote_endpoint(endpoint);
+ if (!remote_ep)
+ return;
+
+ __rsnd_ssi_parse_hdmi_connection(priv, &rdai->playback, remote_ep);
+ __rsnd_ssi_parse_hdmi_connection(priv, &rdai->capture, remote_ep);
+}
+
struct rsnd_mod *rsnd_ssi_mod_get(struct rsnd_priv *priv, int id)
{
if (WARN_ON(id < 0 || id >= rsnd_ssi_nr(priv)))
@@ -952,7 +1094,6 @@ int rsnd_ssi_probe(struct rsnd_priv *priv)
goto rsnd_ssi_probe_done;
}
- ops = &rsnd_ssi_non_ops;
if (of_property_read_bool(np, "pio-transfer"))
ops = &rsnd_ssi_pio_ops;
else
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
index 512d238b79e2..bed2c9c0004b 100644
--- a/sound/soc/sh/rcar/ssiu.c
+++ b/sound/soc/sh/rcar/ssiu.c
@@ -123,6 +123,7 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct rsnd_priv *priv)
{
+ int hdmi = rsnd_ssi_hdmi_port(io);
int ret;
ret = rsnd_ssiu_init(mod, io, priv);
@@ -150,6 +151,42 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
rsnd_get_dalign(mod, io));
}
+ if (hdmi) {
+ enum rsnd_mod_type rsnd_ssi_array[] = {
+ RSND_MOD_SSIM1,
+ RSND_MOD_SSIM2,
+ RSND_MOD_SSIM3,
+ };
+ struct rsnd_mod *ssi_mod = rsnd_io_to_mod_ssi(io);
+ struct rsnd_mod *pos;
+ u32 val;
+ int i, shift;
+
+ i = rsnd_mod_id(ssi_mod);
+
+ /* output all same SSI as default */
+ val = i << 16 |
+ i << 20 |
+ i << 24 |
+ i << 28 |
+ i;
+
+ for_each_rsnd_mod_array(i, pos, io, rsnd_ssi_array) {
+ shift = (i * 4) + 16;
+ val = (val & ~(0xF << shift)) |
+ rsnd_mod_id(pos) << shift;
+ }
+
+ switch (hdmi) {
+ case RSND_SSI_HDMI_PORT0:
+ rsnd_mod_write(mod, HDMI0_SEL, val);
+ break;
+ case RSND_SSI_HDMI_PORT1:
+ rsnd_mod_write(mod, HDMI1_SEL, val);
+ break;
+ }
+ }
+
return 0;
}
diff --git a/sound/soc/sh/siu_dai.c b/sound/soc/sh/siu_dai.c
index 76b2ab8c2b4a..4a22aadac294 100644
--- a/sound/soc/sh/siu_dai.c
+++ b/sound/soc/sh/siu_dai.c
@@ -441,7 +441,7 @@ static int siu_dai_put_volume(struct snd_kcontrol *kctrl,
return 0;
}
-static struct snd_kcontrol_new playback_controls = {
+static const struct snd_kcontrol_new playback_controls = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Playback Volume",
.index = 0,
@@ -451,7 +451,7 @@ static struct snd_kcontrol_new playback_controls = {
.private_value = VOLUME_PLAYBACK,
};
-static struct snd_kcontrol_new capture_controls = {
+static const struct snd_kcontrol_new capture_controls = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Capture Volume",
.index = 0,
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index bfd71b873ca2..206f36bf43e8 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -81,7 +81,8 @@ out:
static int soc_compr_open_fe(struct snd_compr_stream *cstream)
{
struct snd_soc_pcm_runtime *fe = cstream->private_data;
- struct snd_pcm_substream *fe_substream = fe->pcm->streams[0].substream;
+ struct snd_pcm_substream *fe_substream =
+ fe->pcm->streams[cstream->direction].substream;
struct snd_soc_platform *platform = fe->platform;
struct snd_soc_dai *cpu_dai = fe->cpu_dai;
struct snd_soc_dpcm *dpcm;
@@ -467,7 +468,8 @@ static int soc_compr_set_params_fe(struct snd_compr_stream *cstream,
struct snd_compr_params *params)
{
struct snd_soc_pcm_runtime *fe = cstream->private_data;
- struct snd_pcm_substream *fe_substream = fe->pcm->streams[0].substream;
+ struct snd_pcm_substream *fe_substream =
+ fe->pcm->streams[cstream->direction].substream;
struct snd_soc_platform *platform = fe->platform;
struct snd_soc_dai *cpu_dai = fe->cpu_dai;
int ret = 0, stream;
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 754e3ef8d7ae..921622a01944 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -34,6 +34,7 @@
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <linux/dmi.h>
#include <sound/core.h>
#include <sound/jack.h>
@@ -68,6 +69,20 @@ static int pmdown_time = 5000;
module_param(pmdown_time, int, 0);
MODULE_PARM_DESC(pmdown_time, "DAPM stream powerdown time (msecs)");
+/* If a DMI filed contain strings in this blacklist (e.g.
+ * "Type2 - Board Manufacturer" or "Type1 - TBD by OEM"), it will be taken
+ * as invalid and dropped when setting the card long name from DMI info.
+ */
+static const char * const dmi_blacklist[] = {
+ "To be filled by OEM",
+ "TBD by OEM",
+ "Default String",
+ "Board Manufacturer",
+ "Board Vendor Name",
+ "Board Product Name",
+ NULL, /* terminator */
+};
+
/* returns the minimum number of bytes needed to represent
* a particular given value */
static int min_bytes_needed(unsigned long val)
@@ -1933,6 +1948,22 @@ static void cleanup_dmi_name(char *name)
name[j] = '\0';
}
+/* Check if a DMI field is valid, i.e. not containing any string
+ * in the black list.
+ */
+static int is_dmi_valid(const char *field)
+{
+ int i = 0;
+
+ while (dmi_blacklist[i]) {
+ if (strstr(field, dmi_blacklist[i]))
+ return 0;
+ i++;
+ }
+
+ return 1;
+}
+
/**
* snd_soc_set_dmi_name() - Register DMI names to card
* @card: The card to register DMI names
@@ -1975,17 +2006,18 @@ int snd_soc_set_dmi_name(struct snd_soc_card *card, const char *flavour)
/* make up dmi long name as: vendor.product.version.board */
vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
- if (!vendor) {
+ if (!vendor || !is_dmi_valid(vendor)) {
dev_warn(card->dev, "ASoC: no DMI vendor name!\n");
return 0;
}
+
snprintf(card->dmi_longname, sizeof(card->snd_card->longname),
"%s", vendor);
cleanup_dmi_name(card->dmi_longname);
product = dmi_get_system_info(DMI_PRODUCT_NAME);
- if (product) {
+ if (product && is_dmi_valid(product)) {
len = strlen(card->dmi_longname);
snprintf(card->dmi_longname + len,
longname_buf_size - len,
@@ -1999,7 +2031,7 @@ int snd_soc_set_dmi_name(struct snd_soc_card *card, const char *flavour)
* name in the product version field
*/
product_version = dmi_get_system_info(DMI_PRODUCT_VERSION);
- if (product_version) {
+ if (product_version && is_dmi_valid(product_version)) {
len = strlen(card->dmi_longname);
snprintf(card->dmi_longname + len,
longname_buf_size - len,
@@ -2012,7 +2044,7 @@ int snd_soc_set_dmi_name(struct snd_soc_card *card, const char *flavour)
}
board = dmi_get_system_info(DMI_BOARD_NAME);
- if (board) {
+ if (board && is_dmi_valid(board)) {
len = strlen(card->dmi_longname);
snprintf(card->dmi_longname + len,
longname_buf_size - len,
@@ -3961,11 +3993,15 @@ unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
prefix = "";
/*
- * check "[prefix]format = xxx"
+ * check "dai-format = xxx"
+ * or "[prefix]format = xxx"
* SND_SOC_DAIFMT_FORMAT_MASK area
*/
- snprintf(prop, sizeof(prop), "%sformat", prefix);
- ret = of_property_read_string(np, prop, &str);
+ ret = of_property_read_string(np, "dai-format", &str);
+ if (ret < 0) {
+ snprintf(prop, sizeof(prop), "%sformat", prefix);
+ ret = of_property_read_string(np, prop, &str);
+ }
if (ret == 0) {
for (i = 0; i < ARRAY_SIZE(of_fmt_table); i++) {
if (strcmp(str, of_fmt_table[i].name) == 0) {
@@ -4045,6 +4081,42 @@ unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
}
EXPORT_SYMBOL_GPL(snd_soc_of_parse_daifmt);
+int snd_soc_get_dai_id(struct device_node *ep)
+{
+ struct snd_soc_component *pos;
+ struct device_node *node;
+ int ret;
+
+ node = of_graph_get_port_parent(ep);
+
+ /*
+ * For example HDMI case, HDMI has video/sound port,
+ * but ALSA SoC needs sound port number only.
+ * Thus counting HDMI DT port/endpoint doesn't work.
+ * Then, it should have .of_xlate_dai_id
+ */
+ ret = -ENOTSUPP;
+ mutex_lock(&client_mutex);
+ list_for_each_entry(pos, &component_list, list) {
+ struct device_node *component_of_node = pos->dev->of_node;
+
+ if (!component_of_node && pos->dev->parent)
+ component_of_node = pos->dev->parent->of_node;
+
+ if (component_of_node != node)
+ continue;
+
+ if (pos->driver->of_xlate_dai_id)
+ ret = pos->driver->of_xlate_dai_id(pos, ep);
+
+ break;
+ }
+ mutex_unlock(&client_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_get_dai_id);
+
int snd_soc_get_dai_name(struct of_phandle_args *args,
const char **dai_name)
{
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index efc5831f205d..dcc5ece08668 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -2743,8 +2743,9 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
if (platform->driver->ops) {
rtd->ops.ack = platform->driver->ops->ack;
- rtd->ops.copy = platform->driver->ops->copy;
- rtd->ops.silence = platform->driver->ops->silence;
+ rtd->ops.copy_user = platform->driver->ops->copy_user;
+ rtd->ops.copy_kernel = platform->driver->ops->copy_kernel;
+ rtd->ops.fill_silence = platform->driver->ops->fill_silence;
rtd->ops.page = platform->driver->ops->page;
rtd->ops.mmap = platform->driver->ops->mmap;
}
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 002772e3ba2c..dd471d2c0266 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -242,6 +242,14 @@ static const struct soc_tplg_map dapm_map[] = {
{SND_SOC_TPLG_DAPM_DAI_IN, snd_soc_dapm_dai_in},
{SND_SOC_TPLG_DAPM_DAI_OUT, snd_soc_dapm_dai_out},
{SND_SOC_TPLG_DAPM_DAI_LINK, snd_soc_dapm_dai_link},
+ {SND_SOC_TPLG_DAPM_BUFFER, snd_soc_dapm_buffer},
+ {SND_SOC_TPLG_DAPM_SCHEDULER, snd_soc_dapm_scheduler},
+ {SND_SOC_TPLG_DAPM_EFFECT, snd_soc_dapm_effect},
+ {SND_SOC_TPLG_DAPM_SIGGEN, snd_soc_dapm_siggen},
+ {SND_SOC_TPLG_DAPM_SRC, snd_soc_dapm_src},
+ {SND_SOC_TPLG_DAPM_ASRC, snd_soc_dapm_asrc},
+ {SND_SOC_TPLG_DAPM_ENCODER, snd_soc_dapm_encoder},
+ {SND_SOC_TPLG_DAPM_DECODER, snd_soc_dapm_decoder},
};
static int tplc_chan_get_reg(struct soc_tplg *tplg,
@@ -344,6 +352,17 @@ static int soc_tplg_widget_load(struct soc_tplg *tplg,
return 0;
}
+/* optionally pass new dynamic widget to component driver. This is mainly for
+ * external widgets where we can assign private data/ops */
+static int soc_tplg_widget_ready(struct soc_tplg *tplg,
+ struct snd_soc_dapm_widget *w, struct snd_soc_tplg_dapm_widget *tplg_w)
+{
+ if (tplg->comp && tplg->ops && tplg->ops->widget_ready)
+ return tplg->ops->widget_ready(tplg->comp, w, tplg_w);
+
+ return 0;
+}
+
/* pass DAI configurations to component driver for extra initialization */
static int soc_tplg_dai_load(struct soc_tplg *tplg,
struct snd_soc_dai_driver *dai_drv)
@@ -1152,7 +1171,8 @@ static int soc_tplg_dapm_graph_elems_load(struct soc_tplg *tplg,
return -EINVAL;
}
- dev_dbg(tplg->dev, "ASoC: adding %d DAPM routes\n", count);
+ dev_dbg(tplg->dev, "ASoC: adding %d DAPM routes for index %d\n", count,
+ hdr->index);
for (i = 0; i < count; i++) {
elem = (struct snd_soc_tplg_dapm_graph_elem *)tplg->pos;
@@ -1465,6 +1485,7 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
if (template.id < 0)
return template.id;
+ /* strings are allocated here, but used and freed by the widget */
template.name = kstrdup(w->name, GFP_KERNEL);
if (!template.name)
return -ENOMEM;
@@ -1577,11 +1598,17 @@ widget:
widget->dobj.widget.kcontrol_type = kcontrol_type;
widget->dobj.ops = tplg->ops;
widget->dobj.index = tplg->index;
- kfree(template.sname);
- kfree(template.name);
list_add(&widget->dobj.list, &tplg->comp->dobj_list);
+
+ ret = soc_tplg_widget_ready(tplg, widget, w);
+ if (ret < 0)
+ goto ready_err;
+
return 0;
+ready_err:
+ snd_soc_tplg_widget_remove(widget);
+ snd_soc_dapm_free_widget(widget);
hdr_err:
kfree(template.sname);
err:
@@ -1628,7 +1655,7 @@ static int soc_tplg_dapm_complete(struct soc_tplg *tplg)
*/
if (!card || !card->instantiated) {
dev_warn(tplg->dev, "ASoC: Parent card not yet available,"
- "Do not add new widgets now\n");
+ " widget card binding deferred\n");
return 0;
}
@@ -2363,7 +2390,7 @@ static int soc_tplg_load_header(struct soc_tplg *tplg,
/* check for matching ID */
if (hdr->index != tplg->req_index &&
- hdr->index != SND_SOC_TPLG_INDEX_ALL)
+ tplg->req_index != SND_SOC_TPLG_INDEX_ALL)
return 0;
tplg->index = hdr->index;
diff --git a/sound/soc/stm/Kconfig b/sound/soc/stm/Kconfig
index 972970f0890a..3398e6c57f37 100644
--- a/sound/soc/stm/Kconfig
+++ b/sound/soc/stm/Kconfig
@@ -1,8 +1,31 @@
-menuconfig SND_SOC_STM32
- tristate "STMicroelectronics STM32 SOC audio support"
+menu "STMicroelectronics STM32 SOC audio support"
+
+config SND_SOC_STM32_SAI
+ tristate "STM32 SAI interface (Serial Audio Interface) support"
depends on ARCH_STM32 || COMPILE_TEST
depends on SND_SOC
select SND_SOC_GENERIC_DMAENGINE_PCM
select REGMAP_MMIO
help
- Say Y if you want to enable ASoC-support for STM32
+ Say Y if you want to enable SAI for STM32
+
+config SND_SOC_STM32_I2S
+ tristate "STM32 I2S interface (SPI/I2S block) support"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on SND_SOC
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+ select REGMAP_MMIO
+ help
+ Say Y if you want to enable I2S for STM32
+
+config SND_SOC_STM32_SPDIFRX
+ tristate "STM32 S/PDIF receiver (SPDIFRX) support"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on SND_SOC
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+ select REGMAP_MMIO
+ select SND_SOC_SPDIF
+ help
+ Say Y if you want to enable S/PDIF capture for STM32
+
+endmenu
diff --git a/sound/soc/stm/Makefile b/sound/soc/stm/Makefile
index e466a4759698..4ed22e648a9a 100644
--- a/sound/soc/stm/Makefile
+++ b/sound/soc/stm/Makefile
@@ -1,6 +1,14 @@
# SAI
snd-soc-stm32-sai-sub-objs := stm32_sai_sub.o
-obj-$(CONFIG_SND_SOC_STM32) += snd-soc-stm32-sai-sub.o
+obj-$(CONFIG_SND_SOC_STM32_SAI) += snd-soc-stm32-sai-sub.o
snd-soc-stm32-sai-objs := stm32_sai.o
-obj-$(CONFIG_SND_SOC_STM32) += snd-soc-stm32-sai.o
+obj-$(CONFIG_SND_SOC_STM32_SAI) += snd-soc-stm32-sai.o
+
+# I2S
+snd-soc-stm32-i2s-objs := stm32_i2s.o
+obj-$(CONFIG_SND_SOC_STM32_I2S) += snd-soc-stm32-i2s.o
+
+# SPDIFRX
+snd-soc-stm32-spdifrx-objs := stm32_spdifrx.o
+obj-$(CONFIG_SND_SOC_STM32_SPDIFRX) += snd-soc-stm32-spdifrx.o
diff --git a/sound/soc/stm/stm32_i2s.c b/sound/soc/stm/stm32_i2s.c
new file mode 100644
index 000000000000..8052629a89df
--- /dev/null
+++ b/sound/soc/stm/stm32_i2s.c
@@ -0,0 +1,946 @@
+/*
+ * STM32 ALSA SoC Digital Audio Interface (I2S) driver.
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Olivier Moysan <[email protected]> for STMicroelectronics.
+ *
+ * License terms: GPL V2.0.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#include <sound/dmaengine_pcm.h>
+#include <sound/pcm_params.h>
+
+#define STM32_I2S_CR1_REG 0x0
+#define STM32_I2S_CFG1_REG 0x08
+#define STM32_I2S_CFG2_REG 0x0C
+#define STM32_I2S_IER_REG 0x10
+#define STM32_I2S_SR_REG 0x14
+#define STM32_I2S_IFCR_REG 0x18
+#define STM32_I2S_TXDR_REG 0X20
+#define STM32_I2S_RXDR_REG 0x30
+#define STM32_I2S_CGFR_REG 0X50
+
+/* Bit definition for SPI2S_CR1 register */
+#define I2S_CR1_SPE BIT(0)
+#define I2S_CR1_CSTART BIT(9)
+#define I2S_CR1_CSUSP BIT(10)
+#define I2S_CR1_HDDIR BIT(11)
+#define I2S_CR1_SSI BIT(12)
+#define I2S_CR1_CRC33_17 BIT(13)
+#define I2S_CR1_RCRCI BIT(14)
+#define I2S_CR1_TCRCI BIT(15)
+
+/* Bit definition for SPI_CFG2 register */
+#define I2S_CFG2_IOSWP_SHIFT 15
+#define I2S_CFG2_IOSWP BIT(I2S_CFG2_IOSWP_SHIFT)
+#define I2S_CFG2_LSBFRST BIT(23)
+#define I2S_CFG2_AFCNTR BIT(31)
+
+/* Bit definition for SPI_CFG1 register */
+#define I2S_CFG1_FTHVL_SHIFT 5
+#define I2S_CFG1_FTHVL_MASK GENMASK(8, I2S_CFG1_FTHVL_SHIFT)
+#define I2S_CFG1_FTHVL_SET(x) ((x) << I2S_CFG1_FTHVL_SHIFT)
+
+#define I2S_CFG1_TXDMAEN BIT(15)
+#define I2S_CFG1_RXDMAEN BIT(14)
+
+/* Bit definition for SPI2S_IER register */
+#define I2S_IER_RXPIE BIT(0)
+#define I2S_IER_TXPIE BIT(1)
+#define I2S_IER_DPXPIE BIT(2)
+#define I2S_IER_EOTIE BIT(3)
+#define I2S_IER_TXTFIE BIT(4)
+#define I2S_IER_UDRIE BIT(5)
+#define I2S_IER_OVRIE BIT(6)
+#define I2S_IER_CRCEIE BIT(7)
+#define I2S_IER_TIFREIE BIT(8)
+#define I2S_IER_MODFIE BIT(9)
+#define I2S_IER_TSERFIE BIT(10)
+
+/* Bit definition for SPI2S_SR register */
+#define I2S_SR_RXP BIT(0)
+#define I2S_SR_TXP BIT(1)
+#define I2S_SR_DPXP BIT(2)
+#define I2S_SR_EOT BIT(3)
+#define I2S_SR_TXTF BIT(4)
+#define I2S_SR_UDR BIT(5)
+#define I2S_SR_OVR BIT(6)
+#define I2S_SR_CRCERR BIT(7)
+#define I2S_SR_TIFRE BIT(8)
+#define I2S_SR_MODF BIT(9)
+#define I2S_SR_TSERF BIT(10)
+#define I2S_SR_SUSP BIT(11)
+#define I2S_SR_TXC BIT(12)
+#define I2S_SR_RXPLVL GENMASK(14, 13)
+#define I2S_SR_RXWNE BIT(15)
+
+#define I2S_SR_MASK GENMASK(15, 0)
+
+/* Bit definition for SPI_IFCR register */
+#define I2S_IFCR_EOTC BIT(3)
+#define I2S_IFCR_TXTFC BIT(4)
+#define I2S_IFCR_UDRC BIT(5)
+#define I2S_IFCR_OVRC BIT(6)
+#define I2S_IFCR_CRCEC BIT(7)
+#define I2S_IFCR_TIFREC BIT(8)
+#define I2S_IFCR_MODFC BIT(9)
+#define I2S_IFCR_TSERFC BIT(10)
+#define I2S_IFCR_SUSPC BIT(11)
+
+#define I2S_IFCR_MASK GENMASK(11, 3)
+
+/* Bit definition for SPI_I2SCGFR register */
+#define I2S_CGFR_I2SMOD BIT(0)
+
+#define I2S_CGFR_I2SCFG_SHIFT 1
+#define I2S_CGFR_I2SCFG_MASK GENMASK(3, I2S_CGFR_I2SCFG_SHIFT)
+#define I2S_CGFR_I2SCFG_SET(x) ((x) << I2S_CGFR_I2SCFG_SHIFT)
+
+#define I2S_CGFR_I2SSTD_SHIFT 4
+#define I2S_CGFR_I2SSTD_MASK GENMASK(5, I2S_CGFR_I2SSTD_SHIFT)
+#define I2S_CGFR_I2SSTD_SET(x) ((x) << I2S_CGFR_I2SSTD_SHIFT)
+
+#define I2S_CGFR_PCMSYNC BIT(7)
+
+#define I2S_CGFR_DATLEN_SHIFT 8
+#define I2S_CGFR_DATLEN_MASK GENMASK(9, I2S_CGFR_DATLEN_SHIFT)
+#define I2S_CGFR_DATLEN_SET(x) ((x) << I2S_CGFR_DATLEN_SHIFT)
+
+#define I2S_CGFR_CHLEN_SHIFT 10
+#define I2S_CGFR_CHLEN BIT(I2S_CGFR_CHLEN_SHIFT)
+#define I2S_CGFR_CKPOL BIT(11)
+#define I2S_CGFR_FIXCH BIT(12)
+#define I2S_CGFR_WSINV BIT(13)
+#define I2S_CGFR_DATFMT BIT(14)
+
+#define I2S_CGFR_I2SDIV_SHIFT 16
+#define I2S_CGFR_I2SDIV_BIT_H 23
+#define I2S_CGFR_I2SDIV_MASK GENMASK(I2S_CGFR_I2SDIV_BIT_H,\
+ I2S_CGFR_I2SDIV_SHIFT)
+#define I2S_CGFR_I2SDIV_SET(x) ((x) << I2S_CGFR_I2SDIV_SHIFT)
+#define I2S_CGFR_I2SDIV_MAX ((1 << (I2S_CGFR_I2SDIV_BIT_H -\
+ I2S_CGFR_I2SDIV_SHIFT)) - 1)
+
+#define I2S_CGFR_ODD_SHIFT 24
+#define I2S_CGFR_ODD BIT(I2S_CGFR_ODD_SHIFT)
+#define I2S_CGFR_MCKOE BIT(25)
+
+enum i2s_master_mode {
+ I2S_MS_NOT_SET,
+ I2S_MS_MASTER,
+ I2S_MS_SLAVE,
+};
+
+enum i2s_mode {
+ I2S_I2SMOD_TX_SLAVE,
+ I2S_I2SMOD_RX_SLAVE,
+ I2S_I2SMOD_TX_MASTER,
+ I2S_I2SMOD_RX_MASTER,
+ I2S_I2SMOD_FD_SLAVE,
+ I2S_I2SMOD_FD_MASTER,
+};
+
+enum i2s_fifo_th {
+ I2S_FIFO_TH_NONE,
+ I2S_FIFO_TH_ONE_QUARTER,
+ I2S_FIFO_TH_HALF,
+ I2S_FIFO_TH_THREE_QUARTER,
+ I2S_FIFO_TH_FULL,
+};
+
+enum i2s_std {
+ I2S_STD_I2S,
+ I2S_STD_LEFT_J,
+ I2S_STD_RIGHT_J,
+ I2S_STD_DSP,
+};
+
+enum i2s_datlen {
+ I2S_I2SMOD_DATLEN_16,
+ I2S_I2SMOD_DATLEN_24,
+ I2S_I2SMOD_DATLEN_32,
+};
+
+#define STM32_I2S_DAI_NAME_SIZE 20
+#define STM32_I2S_FIFO_SIZE 16
+
+#define STM32_I2S_IS_MASTER(x) ((x)->ms_flg == I2S_MS_MASTER)
+#define STM32_I2S_IS_SLAVE(x) ((x)->ms_flg == I2S_MS_SLAVE)
+
+/**
+ * @regmap_conf: I2S register map configuration pointer
+ * @egmap: I2S register map pointer
+ * @pdev: device data pointer
+ * @dai_drv: DAI driver pointer
+ * @dma_data_tx: dma configuration data for tx channel
+ * @dma_data_rx: dma configuration data for tx channel
+ * @substream: PCM substream data pointer
+ * @i2sclk: kernel clock feeding the I2S clock generator
+ * @pclk: peripheral clock driving bus interface
+ * @x8kclk: I2S parent clock for sampling frequencies multiple of 8kHz
+ * @x11kclk: I2S parent clock for sampling frequencies multiple of 11kHz
+ * @base: mmio register base virtual address
+ * @phys_addr: I2S registers physical base address
+ * @lock_fd: lock to manage race conditions in full duplex mode
+ * @dais_name: DAI name
+ * @mclk_rate: master clock frequency (Hz)
+ * @fmt: DAI protocol
+ * @refcount: keep count of opened streams on I2S
+ * @ms_flg: master mode flag.
+ */
+struct stm32_i2s_data {
+ const struct regmap_config *regmap_conf;
+ struct regmap *regmap;
+ struct platform_device *pdev;
+ struct snd_soc_dai_driver *dai_drv;
+ struct snd_dmaengine_dai_dma_data dma_data_tx;
+ struct snd_dmaengine_dai_dma_data dma_data_rx;
+ struct snd_pcm_substream *substream;
+ struct clk *i2sclk;
+ struct clk *pclk;
+ struct clk *x8kclk;
+ struct clk *x11kclk;
+ void __iomem *base;
+ dma_addr_t phys_addr;
+ spinlock_t lock_fd; /* Manage race conditions for full duplex */
+ char dais_name[STM32_I2S_DAI_NAME_SIZE];
+ unsigned int mclk_rate;
+ unsigned int fmt;
+ int refcount;
+ int ms_flg;
+};
+
+static irqreturn_t stm32_i2s_isr(int irq, void *devid)
+{
+ struct stm32_i2s_data *i2s = (struct stm32_i2s_data *)devid;
+ struct platform_device *pdev = i2s->pdev;
+ u32 sr, ier;
+ unsigned long flags;
+ int err = 0;
+
+ regmap_read(i2s->regmap, STM32_I2S_SR_REG, &sr);
+ regmap_read(i2s->regmap, STM32_I2S_IER_REG, &ier);
+
+ flags = sr & ier;
+ if (!flags) {
+ dev_dbg(&pdev->dev, "Spurious IRQ sr=0x%08x, ier=0x%08x\n",
+ sr, ier);
+ return IRQ_NONE;
+ }
+
+ regmap_update_bits(i2s->regmap, STM32_I2S_IFCR_REG,
+ I2S_IFCR_MASK, flags);
+
+ if (flags & I2S_SR_OVR) {
+ dev_dbg(&pdev->dev, "Overrun\n");
+ err = 1;
+ }
+
+ if (flags & I2S_SR_UDR) {
+ dev_dbg(&pdev->dev, "Underrun\n");
+ err = 1;
+ }
+
+ if (flags & I2S_SR_TIFRE)
+ dev_dbg(&pdev->dev, "Frame error\n");
+
+ if (err)
+ snd_pcm_stop_xrun(i2s->substream);
+
+ return IRQ_HANDLED;
+}
+
+static bool stm32_i2s_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case STM32_I2S_CR1_REG:
+ case STM32_I2S_CFG1_REG:
+ case STM32_I2S_CFG2_REG:
+ case STM32_I2S_IER_REG:
+ case STM32_I2S_SR_REG:
+ case STM32_I2S_IFCR_REG:
+ case STM32_I2S_TXDR_REG:
+ case STM32_I2S_RXDR_REG:
+ case STM32_I2S_CGFR_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool stm32_i2s_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case STM32_I2S_TXDR_REG:
+ case STM32_I2S_RXDR_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool stm32_i2s_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case STM32_I2S_CR1_REG:
+ case STM32_I2S_CFG1_REG:
+ case STM32_I2S_CFG2_REG:
+ case STM32_I2S_IER_REG:
+ case STM32_I2S_IFCR_REG:
+ case STM32_I2S_TXDR_REG:
+ case STM32_I2S_CGFR_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int stm32_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
+{
+ struct stm32_i2s_data *i2s = snd_soc_dai_get_drvdata(cpu_dai);
+ u32 cgfr;
+ u32 cgfr_mask = I2S_CGFR_I2SSTD_MASK | I2S_CGFR_CKPOL |
+ I2S_CGFR_WSINV | I2S_CGFR_I2SCFG_MASK;
+
+ dev_dbg(cpu_dai->dev, "fmt %x\n", fmt);
+
+ /*
+ * winv = 0 : default behavior (high/low) for all standards
+ * ckpol = 0 for all standards.
+ */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ cgfr = I2S_CGFR_I2SSTD_SET(I2S_STD_I2S);
+ break;
+ case SND_SOC_DAIFMT_MSB:
+ cgfr = I2S_CGFR_I2SSTD_SET(I2S_STD_LEFT_J);
+ break;
+ case SND_SOC_DAIFMT_LSB:
+ cgfr = I2S_CGFR_I2SSTD_SET(I2S_STD_RIGHT_J);
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ cgfr = I2S_CGFR_I2SSTD_SET(I2S_STD_DSP);
+ break;
+ /* DSP_B not mapped on I2S PCM long format. 1 bit offset does not fit */
+ default:
+ dev_err(cpu_dai->dev, "Unsupported protocol %#x\n",
+ fmt & SND_SOC_DAIFMT_FORMAT_MASK);
+ return -EINVAL;
+ }
+
+ /* DAI clock strobing */
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ cgfr |= I2S_CGFR_CKPOL;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ cgfr |= I2S_CGFR_WSINV;
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ cgfr |= I2S_CGFR_CKPOL;
+ cgfr |= I2S_CGFR_WSINV;
+ break;
+ default:
+ dev_err(cpu_dai->dev, "Unsupported strobing %#x\n",
+ fmt & SND_SOC_DAIFMT_INV_MASK);
+ return -EINVAL;
+ }
+
+ /* DAI clock master masks */
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ i2s->ms_flg = I2S_MS_SLAVE;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ i2s->ms_flg = I2S_MS_MASTER;
+ break;
+ default:
+ dev_err(cpu_dai->dev, "Unsupported mode %#x\n",
+ fmt & SND_SOC_DAIFMT_MASTER_MASK);
+ return -EINVAL;
+ }
+
+ i2s->fmt = fmt;
+ return regmap_update_bits(i2s->regmap, STM32_I2S_CGFR_REG,
+ cgfr_mask, cgfr);
+}
+
+static int stm32_i2s_set_sysclk(struct snd_soc_dai *cpu_dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct stm32_i2s_data *i2s = snd_soc_dai_get_drvdata(cpu_dai);
+
+ dev_dbg(cpu_dai->dev, "I2S MCLK frequency is %uHz\n", freq);
+
+ if ((dir == SND_SOC_CLOCK_OUT) && STM32_I2S_IS_MASTER(i2s)) {
+ i2s->mclk_rate = freq;
+
+ /* Enable master clock if master mode and mclk-fs are set */
+ return regmap_update_bits(i2s->regmap, STM32_I2S_CGFR_REG,
+ I2S_CGFR_MCKOE, I2S_CGFR_MCKOE);
+ }
+
+ return 0;
+}
+
+static int stm32_i2s_configure_clock(struct snd_soc_dai *cpu_dai,
+ struct snd_pcm_hw_params *params)
+{
+ struct stm32_i2s_data *i2s = snd_soc_dai_get_drvdata(cpu_dai);
+ unsigned long i2s_clock_rate;
+ unsigned int tmp, div, real_div, nb_bits, frame_len;
+ unsigned int rate = params_rate(params);
+ int ret;
+ u32 cgfr, cgfr_mask;
+ bool odd;
+
+ if (!(rate % 11025))
+ clk_set_parent(i2s->i2sclk, i2s->x11kclk);
+ else
+ clk_set_parent(i2s->i2sclk, i2s->x8kclk);
+ i2s_clock_rate = clk_get_rate(i2s->i2sclk);
+
+ /*
+ * mckl = mclk_ratio x ws
+ * i2s mode : mclk_ratio = 256
+ * dsp mode : mclk_ratio = 128
+ *
+ * mclk on
+ * i2s mode : div = i2s_clk / (mclk_ratio * ws)
+ * dsp mode : div = i2s_clk / (mclk_ratio * ws)
+ * mclk off
+ * i2s mode : div = i2s_clk / (nb_bits x ws)
+ * dsp mode : div = i2s_clk / (nb_bits x ws)
+ */
+ if (i2s->mclk_rate) {
+ tmp = DIV_ROUND_CLOSEST(i2s_clock_rate, i2s->mclk_rate);
+ } else {
+ frame_len = 32;
+ if ((i2s->fmt & SND_SOC_DAIFMT_FORMAT_MASK) ==
+ SND_SOC_DAIFMT_DSP_A)
+ frame_len = 16;
+
+ /* master clock not enabled */
+ ret = regmap_read(i2s->regmap, STM32_I2S_CGFR_REG, &cgfr);
+ if (ret < 0)
+ return ret;
+
+ nb_bits = frame_len * ((cgfr & I2S_CGFR_CHLEN) + 1);
+ tmp = DIV_ROUND_CLOSEST(i2s_clock_rate, (nb_bits * rate));
+ }
+
+ /* Check the parity of the divider */
+ odd = tmp & 0x1;
+
+ /* Compute the div prescaler */
+ div = tmp >> 1;
+
+ cgfr = I2S_CGFR_I2SDIV_SET(div) | (odd << I2S_CGFR_ODD_SHIFT);
+ cgfr_mask = I2S_CGFR_I2SDIV_MASK | I2S_CGFR_ODD;
+
+ real_div = ((2 * div) + odd);
+ dev_dbg(cpu_dai->dev, "I2S clk: %ld, SCLK: %d\n",
+ i2s_clock_rate, rate);
+ dev_dbg(cpu_dai->dev, "Divider: 2*%d(div)+%d(odd) = %d\n",
+ div, odd, real_div);
+
+ if (((div == 1) && odd) || (div > I2S_CGFR_I2SDIV_MAX)) {
+ dev_err(cpu_dai->dev, "Wrong divider setting\n");
+ return -EINVAL;
+ }
+
+ if (!div && !odd)
+ dev_warn(cpu_dai->dev, "real divider forced to 1\n");
+
+ ret = regmap_update_bits(i2s->regmap, STM32_I2S_CGFR_REG,
+ cgfr_mask, cgfr);
+ if (ret < 0)
+ return ret;
+
+ /* Set bitclock and frameclock to their inactive state */
+ return regmap_update_bits(i2s->regmap, STM32_I2S_CFG2_REG,
+ I2S_CFG2_AFCNTR, I2S_CFG2_AFCNTR);
+}
+
+static int stm32_i2s_configure(struct snd_soc_dai *cpu_dai,
+ struct snd_pcm_hw_params *params,
+ struct snd_pcm_substream *substream)
+{
+ struct stm32_i2s_data *i2s = snd_soc_dai_get_drvdata(cpu_dai);
+ int format = params_width(params);
+ u32 cfgr, cfgr_mask, cfg1, cfg1_mask;
+ unsigned int fthlv;
+ int ret;
+
+ if ((params_channels(params) == 1) &&
+ ((i2s->fmt & SND_SOC_DAIFMT_FORMAT_MASK) != SND_SOC_DAIFMT_DSP_A)) {
+ dev_err(cpu_dai->dev, "Mono mode supported only by DSP_A\n");
+ return -EINVAL;
+ }
+
+ switch (format) {
+ case 16:
+ cfgr = I2S_CGFR_DATLEN_SET(I2S_I2SMOD_DATLEN_16);
+ cfgr_mask = I2S_CGFR_DATLEN_MASK;
+ break;
+ case 32:
+ cfgr = I2S_CGFR_DATLEN_SET(I2S_I2SMOD_DATLEN_32) |
+ I2S_CGFR_CHLEN;
+ cfgr_mask = I2S_CGFR_DATLEN_MASK | I2S_CGFR_CHLEN;
+ break;
+ default:
+ dev_err(cpu_dai->dev, "Unexpected format %d", format);
+ return -EINVAL;
+ }
+
+ if (STM32_I2S_IS_SLAVE(i2s)) {
+ cfgr |= I2S_CGFR_I2SCFG_SET(I2S_I2SMOD_FD_SLAVE);
+
+ /* As data length is either 16 or 32 bits, fixch always set */
+ cfgr |= I2S_CGFR_FIXCH;
+ cfgr_mask |= I2S_CGFR_FIXCH;
+ } else {
+ cfgr |= I2S_CGFR_I2SCFG_SET(I2S_I2SMOD_FD_MASTER);
+ }
+ cfgr_mask |= I2S_CGFR_I2SCFG_MASK;
+
+ ret = regmap_update_bits(i2s->regmap, STM32_I2S_CGFR_REG,
+ cfgr_mask, cfgr);
+ if (ret < 0)
+ return ret;
+
+ cfg1 = I2S_CFG1_RXDMAEN | I2S_CFG1_TXDMAEN;
+ cfg1_mask = cfg1;
+
+ fthlv = STM32_I2S_FIFO_SIZE * I2S_FIFO_TH_ONE_QUARTER / 4;
+ cfg1 |= I2S_CFG1_FTHVL_SET(fthlv - 1);
+ cfg1_mask |= I2S_CFG1_FTHVL_MASK;
+
+ return regmap_update_bits(i2s->regmap, STM32_I2S_CFG1_REG,
+ cfg1_mask, cfg1);
+}
+
+static int stm32_i2s_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct stm32_i2s_data *i2s = snd_soc_dai_get_drvdata(cpu_dai);
+
+ i2s->substream = substream;
+
+ spin_lock(&i2s->lock_fd);
+ i2s->refcount++;
+ spin_unlock(&i2s->lock_fd);
+
+ return regmap_update_bits(i2s->regmap, STM32_I2S_IFCR_REG,
+ I2S_IFCR_MASK, I2S_IFCR_MASK);
+}
+
+static int stm32_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct stm32_i2s_data *i2s = snd_soc_dai_get_drvdata(cpu_dai);
+ int ret;
+
+ ret = stm32_i2s_configure(cpu_dai, params, substream);
+ if (ret < 0) {
+ dev_err(cpu_dai->dev, "Configuration returned error %d\n", ret);
+ return ret;
+ }
+
+ if (STM32_I2S_IS_MASTER(i2s))
+ ret = stm32_i2s_configure_clock(cpu_dai, params);
+
+ return ret;
+}
+
+static int stm32_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct stm32_i2s_data *i2s = snd_soc_dai_get_drvdata(cpu_dai);
+ bool playback_flg = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
+ u32 cfg1_mask, ier;
+ int ret;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ /* Enable i2s */
+ dev_dbg(cpu_dai->dev, "start I2S\n");
+
+ ret = regmap_update_bits(i2s->regmap, STM32_I2S_CR1_REG,
+ I2S_CR1_SPE, I2S_CR1_SPE);
+ if (ret < 0) {
+ dev_err(cpu_dai->dev, "Error %d enabling I2S\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(i2s->regmap, STM32_I2S_CR1_REG,
+ I2S_CR1_CSTART, I2S_CR1_CSTART);
+ if (ret < 0) {
+ dev_err(cpu_dai->dev, "Error %d starting I2S\n", ret);
+ return ret;
+ }
+
+ regmap_update_bits(i2s->regmap, STM32_I2S_IFCR_REG,
+ I2S_IFCR_MASK, I2S_IFCR_MASK);
+
+ if (playback_flg) {
+ ier = I2S_IER_UDRIE;
+ } else {
+ ier = I2S_IER_OVRIE;
+
+ spin_lock(&i2s->lock_fd);
+ if (i2s->refcount == 1)
+ /* dummy write to trigger capture */
+ regmap_write(i2s->regmap,
+ STM32_I2S_TXDR_REG, 0);
+ spin_unlock(&i2s->lock_fd);
+ }
+
+ if (STM32_I2S_IS_SLAVE(i2s))
+ ier |= I2S_IER_TIFREIE;
+
+ regmap_update_bits(i2s->regmap, STM32_I2S_IER_REG, ier, ier);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ if (playback_flg)
+ regmap_update_bits(i2s->regmap, STM32_I2S_IER_REG,
+ I2S_IER_UDRIE,
+ (unsigned int)~I2S_IER_UDRIE);
+ else
+ regmap_update_bits(i2s->regmap, STM32_I2S_IER_REG,
+ I2S_IER_OVRIE,
+ (unsigned int)~I2S_IER_OVRIE);
+
+ spin_lock(&i2s->lock_fd);
+ i2s->refcount--;
+ if (i2s->refcount) {
+ spin_unlock(&i2s->lock_fd);
+ break;
+ }
+ spin_unlock(&i2s->lock_fd);
+
+ dev_dbg(cpu_dai->dev, "stop I2S\n");
+
+ ret = regmap_update_bits(i2s->regmap, STM32_I2S_CR1_REG,
+ I2S_CR1_SPE, 0);
+ if (ret < 0) {
+ dev_err(cpu_dai->dev, "Error %d disabling I2S\n", ret);
+ return ret;
+ }
+
+ cfg1_mask = I2S_CFG1_RXDMAEN | I2S_CFG1_TXDMAEN;
+ regmap_update_bits(i2s->regmap, STM32_I2S_CFG1_REG,
+ cfg1_mask, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void stm32_i2s_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct stm32_i2s_data *i2s = snd_soc_dai_get_drvdata(cpu_dai);
+
+ i2s->substream = NULL;
+
+ regmap_update_bits(i2s->regmap, STM32_I2S_CGFR_REG,
+ I2S_CGFR_MCKOE, (unsigned int)~I2S_CGFR_MCKOE);
+}
+
+static int stm32_i2s_dai_probe(struct snd_soc_dai *cpu_dai)
+{
+ struct stm32_i2s_data *i2s = dev_get_drvdata(cpu_dai->dev);
+ struct snd_dmaengine_dai_dma_data *dma_data_tx = &i2s->dma_data_tx;
+ struct snd_dmaengine_dai_dma_data *dma_data_rx = &i2s->dma_data_rx;
+
+ /* Buswidth will be set by framework */
+ dma_data_tx->addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ dma_data_tx->addr = (dma_addr_t)(i2s->phys_addr) + STM32_I2S_TXDR_REG;
+ dma_data_tx->maxburst = 1;
+ dma_data_rx->addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ dma_data_rx->addr = (dma_addr_t)(i2s->phys_addr) + STM32_I2S_RXDR_REG;
+ dma_data_rx->maxburst = 1;
+
+ snd_soc_dai_init_dma_data(cpu_dai, dma_data_tx, dma_data_rx);
+
+ return 0;
+}
+
+static const struct regmap_config stm32_h7_i2s_regmap_conf = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = STM32_I2S_CGFR_REG,
+ .readable_reg = stm32_i2s_readable_reg,
+ .volatile_reg = stm32_i2s_volatile_reg,
+ .writeable_reg = stm32_i2s_writeable_reg,
+ .fast_io = true,
+};
+
+static const struct snd_soc_dai_ops stm32_i2s_pcm_dai_ops = {
+ .set_sysclk = stm32_i2s_set_sysclk,
+ .set_fmt = stm32_i2s_set_dai_fmt,
+ .startup = stm32_i2s_startup,
+ .hw_params = stm32_i2s_hw_params,
+ .trigger = stm32_i2s_trigger,
+ .shutdown = stm32_i2s_shutdown,
+};
+
+static const struct snd_pcm_hardware stm32_i2s_pcm_hw = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP,
+ .buffer_bytes_max = 8 * PAGE_SIZE,
+ .period_bytes_max = 2048,
+ .periods_min = 2,
+ .periods_max = 8,
+};
+
+static const struct snd_dmaengine_pcm_config stm32_i2s_pcm_config = {
+ .pcm_hardware = &stm32_i2s_pcm_hw,
+ .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
+ .prealloc_buffer_size = PAGE_SIZE * 8,
+};
+
+static const struct snd_soc_component_driver stm32_i2s_component = {
+ .name = "stm32-i2s",
+};
+
+static void stm32_i2s_dai_init(struct snd_soc_pcm_stream *stream,
+ char *stream_name)
+{
+ stream->stream_name = stream_name;
+ stream->channels_min = 1;
+ stream->channels_max = 2;
+ stream->rates = SNDRV_PCM_RATE_8000_192000;
+ stream->formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S32_LE;
+}
+
+static int stm32_i2s_dais_init(struct platform_device *pdev,
+ struct stm32_i2s_data *i2s)
+{
+ struct snd_soc_dai_driver *dai_ptr;
+
+ dai_ptr = devm_kzalloc(&pdev->dev, sizeof(struct snd_soc_dai_driver),
+ GFP_KERNEL);
+ if (!dai_ptr)
+ return -ENOMEM;
+
+ snprintf(i2s->dais_name, STM32_I2S_DAI_NAME_SIZE,
+ "%s", dev_name(&pdev->dev));
+
+ dai_ptr->probe = stm32_i2s_dai_probe;
+ dai_ptr->ops = &stm32_i2s_pcm_dai_ops;
+ dai_ptr->name = i2s->dais_name;
+ dai_ptr->id = 1;
+ stm32_i2s_dai_init(&dai_ptr->playback, "playback");
+ stm32_i2s_dai_init(&dai_ptr->capture, "capture");
+ i2s->dai_drv = dai_ptr;
+
+ return 0;
+}
+
+static const struct of_device_id stm32_i2s_ids[] = {
+ {
+ .compatible = "st,stm32h7-i2s",
+ .data = &stm32_h7_i2s_regmap_conf
+ },
+ {},
+};
+
+static int stm32_i2s_parse_dt(struct platform_device *pdev,
+ struct stm32_i2s_data *i2s)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *of_id;
+ struct reset_control *rst;
+ struct resource *res;
+ int irq, ret;
+
+ if (!np)
+ return -ENODEV;
+
+ of_id = of_match_device(stm32_i2s_ids, &pdev->dev);
+ if (of_id)
+ i2s->regmap_conf = (const struct regmap_config *)of_id->data;
+ else
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ i2s->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(i2s->base))
+ return PTR_ERR(i2s->base);
+
+ i2s->phys_addr = res->start;
+
+ /* Get clocks */
+ i2s->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(i2s->pclk)) {
+ dev_err(&pdev->dev, "Could not get pclk\n");
+ return PTR_ERR(i2s->pclk);
+ }
+
+ i2s->i2sclk = devm_clk_get(&pdev->dev, "i2sclk");
+ if (IS_ERR(i2s->i2sclk)) {
+ dev_err(&pdev->dev, "Could not get i2sclk\n");
+ return PTR_ERR(i2s->i2sclk);
+ }
+
+ i2s->x8kclk = devm_clk_get(&pdev->dev, "x8k");
+ if (IS_ERR(i2s->x8kclk)) {
+ dev_err(&pdev->dev, "missing x8k parent clock\n");
+ return PTR_ERR(i2s->x8kclk);
+ }
+
+ i2s->x11kclk = devm_clk_get(&pdev->dev, "x11k");
+ if (IS_ERR(i2s->x11kclk)) {
+ dev_err(&pdev->dev, "missing x11k parent clock\n");
+ return PTR_ERR(i2s->x11kclk);
+ }
+
+ /* Get irqs */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no irq for node %s\n", pdev->name);
+ return -ENOENT;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, stm32_i2s_isr, IRQF_ONESHOT,
+ dev_name(&pdev->dev), i2s);
+ if (ret) {
+ dev_err(&pdev->dev, "irq request returned %d\n", ret);
+ return ret;
+ }
+
+ /* Reset */
+ rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (!IS_ERR(rst)) {
+ reset_control_assert(rst);
+ udelay(2);
+ reset_control_deassert(rst);
+ }
+
+ return 0;
+}
+
+static int stm32_i2s_probe(struct platform_device *pdev)
+{
+ struct stm32_i2s_data *i2s;
+ int ret;
+
+ i2s = devm_kzalloc(&pdev->dev, sizeof(*i2s), GFP_KERNEL);
+ if (!i2s)
+ return -ENOMEM;
+
+ ret = stm32_i2s_parse_dt(pdev, i2s);
+ if (ret)
+ return ret;
+
+ i2s->pdev = pdev;
+ i2s->ms_flg = I2S_MS_NOT_SET;
+ spin_lock_init(&i2s->lock_fd);
+ platform_set_drvdata(pdev, i2s);
+
+ ret = stm32_i2s_dais_init(pdev, i2s);
+ if (ret)
+ return ret;
+
+ i2s->regmap = devm_regmap_init_mmio(&pdev->dev, i2s->base,
+ i2s->regmap_conf);
+ if (IS_ERR(i2s->regmap)) {
+ dev_err(&pdev->dev, "regmap init failed\n");
+ return PTR_ERR(i2s->regmap);
+ }
+
+ ret = clk_prepare_enable(i2s->pclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Enable pclk failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(i2s->i2sclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Enable i2sclk failed: %d\n", ret);
+ goto err_pclk_disable;
+ }
+
+ ret = devm_snd_soc_register_component(&pdev->dev, &stm32_i2s_component,
+ i2s->dai_drv, 1);
+ if (ret)
+ goto err_clocks_disable;
+
+ ret = devm_snd_dmaengine_pcm_register(&pdev->dev,
+ &stm32_i2s_pcm_config, 0);
+ if (ret)
+ goto err_clocks_disable;
+
+ /* Set SPI/I2S in i2s mode */
+ ret = regmap_update_bits(i2s->regmap, STM32_I2S_CGFR_REG,
+ I2S_CGFR_I2SMOD, I2S_CGFR_I2SMOD);
+ if (ret)
+ goto err_clocks_disable;
+
+ return ret;
+
+err_clocks_disable:
+ clk_disable_unprepare(i2s->i2sclk);
+err_pclk_disable:
+ clk_disable_unprepare(i2s->pclk);
+
+ return ret;
+}
+
+static int stm32_i2s_remove(struct platform_device *pdev)
+{
+ struct stm32_i2s_data *i2s = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(i2s->i2sclk);
+ clk_disable_unprepare(i2s->pclk);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, stm32_i2s_ids);
+
+static struct platform_driver stm32_i2s_driver = {
+ .driver = {
+ .name = "st,stm32-i2s",
+ .of_match_table = stm32_i2s_ids,
+ },
+ .probe = stm32_i2s_probe,
+ .remove = stm32_i2s_remove,
+};
+
+module_platform_driver(stm32_i2s_driver);
+
+MODULE_DESCRIPTION("STM32 Soc i2s Interface");
+MODULE_AUTHOR("Olivier Moysan, <[email protected]>");
+MODULE_ALIAS("platform:stm32-i2s");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/stm/stm32_sai.c b/sound/soc/stm/stm32_sai.c
index 2a27a26bf7a1..f7713314913b 100644
--- a/sound/soc/stm/stm32_sai.c
+++ b/sound/soc/stm/stm32_sai.c
@@ -27,8 +27,17 @@
#include "stm32_sai.h"
+static const struct stm32_sai_conf stm32_sai_conf_f4 = {
+ .version = SAI_STM32F4,
+};
+
+static const struct stm32_sai_conf stm32_sai_conf_h7 = {
+ .version = SAI_STM32H7,
+};
+
static const struct of_device_id stm32_sai_ids[] = {
- { .compatible = "st,stm32f4-sai", .data = (void *)SAI_STM32F4 },
+ { .compatible = "st,stm32f4-sai", .data = (void *)&stm32_sai_conf_f4 },
+ { .compatible = "st,stm32h7-sai", .data = (void *)&stm32_sai_conf_h7 },
{}
};
@@ -52,7 +61,7 @@ static int stm32_sai_probe(struct platform_device *pdev)
of_id = of_match_device(stm32_sai_ids, &pdev->dev);
if (of_id)
- sai->version = (enum stm32_sai_version)of_id->data;
+ sai->conf = (struct stm32_sai_conf *)of_id->data;
else
return -EINVAL;
@@ -110,6 +119,6 @@ static struct platform_driver stm32_sai_driver = {
module_platform_driver(stm32_sai_driver);
MODULE_DESCRIPTION("STM32 Soc SAI Interface");
-MODULE_AUTHOR("Olivier Moysan, <[email protected]>");
+MODULE_AUTHOR("Olivier Moysan <[email protected]>");
MODULE_ALIAS("platform:st,stm32-sai");
MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/stm/stm32_sai.h b/sound/soc/stm/stm32_sai.h
index a801fda5066f..889974dc62d9 100644
--- a/sound/soc/stm/stm32_sai.h
+++ b/sound/soc/stm/stm32_sai.h
@@ -31,6 +31,10 @@
#define STM_SAI_CLRFR_REGX 0x18
#define STM_SAI_DR_REGX 0x1C
+/* Sub-block A registers, relative to sub-block A address */
+#define STM_SAI_PDMCR_REGX 0x40
+#define STM_SAI_PDMLY_REGX 0x44
+
/******************** Bit definition for SAI_GCR register *******************/
#define SAI_GCR_SYNCIN_SHIFT 0
#define SAI_GCR_SYNCIN_MASK GENMASK(1, SAI_GCR_SYNCIN_SHIFT)
@@ -75,10 +79,11 @@
#define SAI_XCR1_NODIV BIT(SAI_XCR1_NODIV_SHIFT)
#define SAI_XCR1_MCKDIV_SHIFT 20
-#define SAI_XCR1_MCKDIV_WIDTH 4
-#define SAI_XCR1_MCKDIV_MASK GENMASK(24, SAI_XCR1_MCKDIV_SHIFT)
+#define SAI_XCR1_MCKDIV_WIDTH(x) (((x) == SAI_STM32F4) ? 4 : 6)
+#define SAI_XCR1_MCKDIV_MASK(x) GENMASK((SAI_XCR1_MCKDIV_SHIFT + (x) - 1),\
+ SAI_XCR1_MCKDIV_SHIFT)
#define SAI_XCR1_MCKDIV_SET(x) ((x) << SAI_XCR1_MCKDIV_SHIFT)
-#define SAI_XCR1_MCKDIV_MAX ((1 << SAI_XCR1_MCKDIV_WIDTH) - 1)
+#define SAI_XCR1_MCKDIV_MAX(x) ((1 << SAI_XCR1_MCKDIV_WIDTH(x)) - 1)
#define SAI_XCR1_OSR_SHIFT 26
#define SAI_XCR1_OSR BIT(SAI_XCR1_OSR_SHIFT)
@@ -125,7 +130,6 @@
#define SAI_XFRCR_FSOFF BIT(SAI_XFRCR_FSOFF_SHIFT)
/****************** Bit definition for SAI_XSLOTR register ******************/
-
#define SAI_XSLOTR_FBOFF_SHIFT 0
#define SAI_XSLOTR_FBOFF_MASK GENMASK(4, SAI_XSLOTR_FBOFF_SHIFT)
#define SAI_XSLOTR_FBOFF_SET(x) ((x) << SAI_XSLOTR_FBOFF_SHIFT)
@@ -179,8 +183,65 @@
#define SAI_XCLRFR_SHIFT 0
#define SAI_XCLRFR_MASK GENMASK(6, SAI_XCLRFR_SHIFT)
+/****************** Bit definition for SAI_PDMCR register ******************/
+#define SAI_PDMCR_PDMEN BIT(0)
+
+#define SAI_PDMCR_MICNBR_SHIFT 4
+#define SAI_PDMCR_MICNBR_MASK GENMASK(5, SAI_PDMCR_MICNBR_SHIFT)
+#define SAI_PDMCR_MICNBR_SET(x) ((x) << SAI_PDMCR_MICNBR_SHIFT)
+
+#define SAI_PDMCR_CKEN1 BIT(8)
+#define SAI_PDMCR_CKEN2 BIT(9)
+#define SAI_PDMCR_CKEN3 BIT(10)
+#define SAI_PDMCR_CKEN4 BIT(11)
+
+/****************** Bit definition for (SAI_PDMDLY register ****************/
+#define SAI_PDMDLY_1L_SHIFT 0
+#define SAI_PDMDLY_1L_MASK GENMASK(2, SAI_PDMDLY_1L_SHIFT)
+#define SAI_PDMDLY_1L_WIDTH 3
+
+#define SAI_PDMDLY_1R_SHIFT 4
+#define SAI_PDMDLY_1R_MASK GENMASK(6, SAI_PDMDLY_1R_SHIFT)
+#define SAI_PDMDLY_1R_WIDTH 3
+
+#define SAI_PDMDLY_2L_SHIFT 8
+#define SAI_PDMDLY_2L_MASK GENMASK(10, SAI_PDMDLY_2L_SHIFT)
+#define SAI_PDMDLY_2L_WIDTH 3
+
+#define SAI_PDMDLY_2R_SHIFT 12
+#define SAI_PDMDLY_2R_MASK GENMASK(14, SAI_PDMDLY_2R_SHIFT)
+#define SAI_PDMDLY_2R_WIDTH 3
+
+#define SAI_PDMDLY_3L_SHIFT 16
+#define SAI_PDMDLY_3L_MASK GENMASK(18, SAI_PDMDLY_3L_SHIFT)
+#define SAI_PDMDLY_3L_WIDTH 3
+
+#define SAI_PDMDLY_3R_SHIFT 20
+#define SAI_PDMDLY_3R_MASK GENMASK(22, SAI_PDMDLY_3R_SHIFT)
+#define SAI_PDMDLY_3R_WIDTH 3
+
+#define SAI_PDMDLY_4L_SHIFT 24
+#define SAI_PDMDLY_4L_MASK GENMASK(26, SAI_PDMDLY_4L_SHIFT)
+#define SAI_PDMDLY_4L_WIDTH 3
+
+#define SAI_PDMDLY_4R_SHIFT 28
+#define SAI_PDMDLY_4R_MASK GENMASK(30, SAI_PDMDLY_4R_SHIFT)
+#define SAI_PDMDLY_4R_WIDTH 3
+
+#define STM_SAI_IS_F4(ip) ((ip)->conf->version == SAI_STM32F4)
+#define STM_SAI_IS_H7(ip) ((ip)->conf->version == SAI_STM32H7)
+
enum stm32_sai_version {
- SAI_STM32F4
+ SAI_STM32F4,
+ SAI_STM32H7
+};
+
+/**
+ * struct stm32_sai_conf - SAI configuration
+ * @version: SAI version
+ */
+struct stm32_sai_conf {
+ int version;
};
/**
@@ -195,6 +256,6 @@ struct stm32_sai_data {
struct platform_device *pdev;
struct clk *clk_x8k;
struct clk *clk_x11k;
- int version;
+ struct stm32_sai_conf *conf;
int irq;
};
diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
index ae4706ca265b..90d439613899 100644
--- a/sound/soc/stm/stm32_sai_sub.c
+++ b/sound/soc/stm/stm32_sai_sub.c
@@ -51,12 +51,15 @@
#define STM_SAI_A_ID 0x0
#define STM_SAI_B_ID 0x1
+#define STM_SAI_IS_SUB_A(x) ((x)->id == STM_SAI_A_ID)
+#define STM_SAI_IS_SUB_B(x) ((x)->id == STM_SAI_B_ID)
#define STM_SAI_BLOCK_NAME(x) (((x)->id == STM_SAI_A_ID) ? "A" : "B")
/**
* struct stm32_sai_sub_data - private data of SAI sub block (block A or B)
* @pdev: device data pointer
* @regmap: SAI register map pointer
+ * @regmap_config: SAI sub block register map configuration pointer
* @dma_params: dma configuration data for rx or tx channel
* @cpu_dai_drv: DAI driver data pointer
* @cpu_dai: DAI runtime data pointer
@@ -79,6 +82,7 @@
struct stm32_sai_sub_data {
struct platform_device *pdev;
struct regmap *regmap;
+ const struct regmap_config *regmap_config;
struct snd_dmaengine_dai_dma_data dma_params;
struct snd_soc_dai_driver *cpu_dai_drv;
struct snd_soc_dai *cpu_dai;
@@ -118,6 +122,8 @@ static bool stm32_sai_sub_readable_reg(struct device *dev, unsigned int reg)
case STM_SAI_SR_REGX:
case STM_SAI_CLRFR_REGX:
case STM_SAI_DR_REGX:
+ case STM_SAI_PDMCR_REGX:
+ case STM_SAI_PDMLY_REGX:
return true;
default:
return false;
@@ -145,13 +151,15 @@ static bool stm32_sai_sub_writeable_reg(struct device *dev, unsigned int reg)
case STM_SAI_SR_REGX:
case STM_SAI_CLRFR_REGX:
case STM_SAI_DR_REGX:
+ case STM_SAI_PDMCR_REGX:
+ case STM_SAI_PDMLY_REGX:
return true;
default:
return false;
}
}
-static const struct regmap_config stm32_sai_sub_regmap_config = {
+static const struct regmap_config stm32_sai_sub_regmap_config_f4 = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
@@ -162,6 +170,17 @@ static const struct regmap_config stm32_sai_sub_regmap_config = {
.fast_io = true,
};
+static const struct regmap_config stm32_sai_sub_regmap_config_h7 = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = STM_SAI_PDMLY_REGX,
+ .readable_reg = stm32_sai_sub_readable_reg,
+ .volatile_reg = stm32_sai_sub_volatile_reg,
+ .writeable_reg = stm32_sai_sub_writeable_reg,
+ .fast_io = true,
+};
+
static irqreturn_t stm32_sai_isr(int irq, void *devid)
{
struct stm32_sai_sub_data *sai = (struct stm32_sai_sub_data *)devid;
@@ -181,29 +200,29 @@ static irqreturn_t stm32_sai_isr(int irq, void *devid)
SAI_XCLRFR_MASK);
if (flags & SAI_XIMR_OVRUDRIE) {
- dev_err(&pdev->dev, "IT %s\n",
+ dev_err(&pdev->dev, "IRQ %s\n",
STM_SAI_IS_PLAYBACK(sai) ? "underrun" : "overrun");
status = SNDRV_PCM_STATE_XRUN;
}
if (flags & SAI_XIMR_MUTEDETIE)
- dev_dbg(&pdev->dev, "IT mute detected\n");
+ dev_dbg(&pdev->dev, "IRQ mute detected\n");
if (flags & SAI_XIMR_WCKCFGIE) {
- dev_err(&pdev->dev, "IT wrong clock configuration\n");
+ dev_err(&pdev->dev, "IRQ wrong clock configuration\n");
status = SNDRV_PCM_STATE_DISCONNECTED;
}
if (flags & SAI_XIMR_CNRDYIE)
- dev_warn(&pdev->dev, "IT Codec not ready\n");
+ dev_err(&pdev->dev, "IRQ Codec not ready\n");
if (flags & SAI_XIMR_AFSDETIE) {
- dev_warn(&pdev->dev, "IT Anticipated frame synchro\n");
+ dev_err(&pdev->dev, "IRQ Anticipated frame synchro\n");
status = SNDRV_PCM_STATE_XRUN;
}
if (flags & SAI_XIMR_LFSDETIE) {
- dev_warn(&pdev->dev, "IT Late frame synchro\n");
+ dev_err(&pdev->dev, "IRQ Late frame synchro\n");
status = SNDRV_PCM_STATE_XRUN;
}
@@ -220,8 +239,15 @@ static int stm32_sai_set_sysclk(struct snd_soc_dai *cpu_dai,
int clk_id, unsigned int freq, int dir)
{
struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
+ int ret;
if ((dir == SND_SOC_CLOCK_OUT) && sai->master) {
+ ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
+ SAI_XCR1_NODIV,
+ (unsigned int)~SAI_XCR1_NODIV);
+ if (ret < 0)
+ return ret;
+
sai->mclk_rate = freq;
dev_dbg(cpu_dai->dev, "SAI MCLK frequency is %uHz\n", freq);
}
@@ -235,7 +261,7 @@ static int stm32_sai_set_dai_tdm_slot(struct snd_soc_dai *cpu_dai, u32 tx_mask,
struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
int slotr, slotr_mask, slot_size;
- dev_dbg(cpu_dai->dev, "masks tx/rx:%#x/%#x, slots:%d, width:%d\n",
+ dev_dbg(cpu_dai->dev, "Masks tx/rx:%#x/%#x, slots:%d, width:%d\n",
tx_mask, rx_mask, slots, slot_width);
switch (slot_width) {
@@ -356,6 +382,10 @@ static int stm32_sai_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
}
cr1_mask |= SAI_XCR1_SLAVE;
+ /* do not generate master by default */
+ cr1 |= SAI_XCR1_NODIV;
+ cr1_mask |= SAI_XCR1_NODIV;
+
ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, cr1_mask, cr1);
if (ret < 0) {
dev_err(cpu_dai->dev, "Failed to update CR1 register\n");
@@ -377,7 +407,7 @@ static int stm32_sai_startup(struct snd_pcm_substream *substream,
ret = clk_prepare_enable(sai->sai_ck);
if (ret < 0) {
- dev_err(cpu_dai->dev, "failed to enable clock: %d\n", ret);
+ dev_err(cpu_dai->dev, "Failed to enable clock: %d\n", ret);
return ret;
}
@@ -497,7 +527,7 @@ static int stm32_sai_set_slots(struct snd_soc_dai *cpu_dai)
SAI_XSLOTR_SLOTEN_SET(sai->slot_mask));
}
- dev_dbg(cpu_dai->dev, "slots %d, slot width %d\n",
+ dev_dbg(cpu_dai->dev, "Slots %d, slot width %d\n",
sai->slots, sai->slot_width);
return 0;
@@ -521,7 +551,7 @@ static void stm32_sai_set_frame(struct snd_soc_dai *cpu_dai)
frcr |= SAI_XFRCR_FSALL_SET((fs_active - 1));
frcr_mask = SAI_XFRCR_FRL_MASK | SAI_XFRCR_FSALL_MASK;
- dev_dbg(cpu_dai->dev, "frame length %d, frame active %d\n",
+ dev_dbg(cpu_dai->dev, "Frame length %d, frame active %d\n",
sai->fs_length, fs_active);
regmap_update_bits(sai->regmap, STM_SAI_FRCR_REGX, frcr_mask, frcr);
@@ -540,7 +570,8 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai,
{
struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
int cr1, mask, div = 0;
- int sai_clk_rate, ret;
+ int sai_clk_rate, mclk_ratio, den, ret;
+ int version = sai->pdata->conf->version;
if (!sai->mclk_rate) {
dev_err(cpu_dai->dev, "Mclk rate is null\n");
@@ -553,21 +584,53 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai,
clk_set_parent(sai->sai_ck, sai->pdata->clk_x8k);
sai_clk_rate = clk_get_rate(sai->sai_ck);
- /*
- * mclk_rate = 256 * fs
- * MCKDIV = 0 if sai_ck < 3/2 * mclk_rate
- * MCKDIV = sai_ck / (2 * mclk_rate) otherwise
- */
- if (2 * sai_clk_rate >= 3 * sai->mclk_rate)
- div = DIV_ROUND_CLOSEST(sai_clk_rate, 2 * sai->mclk_rate);
-
- if (div > SAI_XCR1_MCKDIV_MAX) {
+ if (STM_SAI_IS_F4(sai->pdata)) {
+ /*
+ * mclk_rate = 256 * fs
+ * MCKDIV = 0 if sai_ck < 3/2 * mclk_rate
+ * MCKDIV = sai_ck / (2 * mclk_rate) otherwise
+ */
+ if (2 * sai_clk_rate >= 3 * sai->mclk_rate)
+ div = DIV_ROUND_CLOSEST(sai_clk_rate,
+ 2 * sai->mclk_rate);
+ } else {
+ /*
+ * TDM mode :
+ * mclk on
+ * MCKDIV = sai_ck / (ws x 256) (NOMCK=0. OSR=0)
+ * MCKDIV = sai_ck / (ws x 512) (NOMCK=0. OSR=1)
+ * mclk off
+ * MCKDIV = sai_ck / (frl x ws) (NOMCK=1)
+ * Note: NOMCK/NODIV correspond to same bit.
+ */
+ if (sai->mclk_rate) {
+ mclk_ratio = sai->mclk_rate / params_rate(params);
+ if (mclk_ratio != 256) {
+ if (mclk_ratio == 512) {
+ mask = SAI_XCR1_OSR;
+ cr1 = SAI_XCR1_OSR;
+ } else {
+ dev_err(cpu_dai->dev,
+ "Wrong mclk ratio %d\n",
+ mclk_ratio);
+ return -EINVAL;
+ }
+ }
+ div = DIV_ROUND_CLOSEST(sai_clk_rate, sai->mclk_rate);
+ } else {
+ /* mclk-fs not set, master clock not active. NOMCK=1 */
+ den = sai->fs_length * params_rate(params);
+ div = DIV_ROUND_CLOSEST(sai_clk_rate, den);
+ }
+ }
+
+ if (div > SAI_XCR1_MCKDIV_MAX(version)) {
dev_err(cpu_dai->dev, "Divider %d out of range\n", div);
return -EINVAL;
}
dev_dbg(cpu_dai->dev, "SAI clock %d, divider %d\n", sai_clk_rate, div);
- mask = SAI_XCR1_MCKDIV_MASK;
+ mask = SAI_XCR1_MCKDIV_MASK(SAI_XCR1_MCKDIV_WIDTH(version));
cr1 = SAI_XCR1_MCKDIV_SET(div);
ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, mask, cr1);
if (ret < 0) {
@@ -629,12 +692,12 @@ static int stm32_sai_trigger(struct snd_pcm_substream *substream, int cmd,
dev_dbg(cpu_dai->dev, "Disable DMA and SAI\n");
regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
- SAI_XCR1_DMAEN,
- (unsigned int)~SAI_XCR1_DMAEN);
+ SAI_XCR1_SAIEN,
+ (unsigned int)~SAI_XCR1_SAIEN);
ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
- SAI_XCR1_SAIEN,
- (unsigned int)~SAI_XCR1_SAIEN);
+ SAI_XCR1_DMAEN,
+ (unsigned int)~SAI_XCR1_DMAEN);
if (ret < 0)
dev_err(cpu_dai->dev, "Failed to update CR1 register\n");
break;
@@ -652,6 +715,9 @@ static void stm32_sai_shutdown(struct snd_pcm_substream *substream,
regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX, SAI_XIMR_MASK, 0);
+ regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, SAI_XCR1_NODIV,
+ SAI_XCR1_NODIV);
+
clk_disable_unprepare(sai->sai_ck);
sai->substream = NULL;
}
@@ -761,16 +827,23 @@ static int stm32_sai_sub_parse_of(struct platform_device *pdev,
return -ENODEV;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- dev_err(&pdev->dev, "res %pr\n", res);
-
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
sai->phys_addr = res->start;
- sai->regmap = devm_regmap_init_mmio(&pdev->dev, base,
- &stm32_sai_sub_regmap_config);
+
+ sai->regmap_config = &stm32_sai_sub_regmap_config_f4;
+ /* Note: PDM registers not available for H7 sub-block B */
+ if (STM_SAI_IS_H7(sai->pdata) && STM_SAI_IS_SUB_A(sai))
+ sai->regmap_config = &stm32_sai_sub_regmap_config_h7;
+
+ sai->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "sai_ck",
+ base, sai->regmap_config);
+ if (IS_ERR(sai->regmap)) {
+ dev_err(&pdev->dev, "Failed to initialize MMIO\n");
+ return PTR_ERR(sai->regmap);
+ }
/* Get direction property */
if (of_property_match_string(np, "dma-names", "tx") >= 0) {
@@ -784,7 +857,7 @@ static int stm32_sai_sub_parse_of(struct platform_device *pdev,
sai->sai_ck = devm_clk_get(&pdev->dev, "sai_ck");
if (IS_ERR(sai->sai_ck)) {
- dev_err(&pdev->dev, "missing kernel clock sai_ck\n");
+ dev_err(&pdev->dev, "Missing kernel clock sai_ck\n");
return PTR_ERR(sai->sai_ck);
}
@@ -849,7 +922,7 @@ static int stm32_sai_sub_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev, sai->pdata->irq, stm32_sai_isr,
IRQF_SHARED, dev_name(&pdev->dev), sai);
if (ret) {
- dev_err(&pdev->dev, "irq request returned %d\n", ret);
+ dev_err(&pdev->dev, "IRQ request returned %d\n", ret);
return ret;
}
@@ -861,7 +934,7 @@ static int stm32_sai_sub_probe(struct platform_device *pdev)
ret = devm_snd_dmaengine_pcm_register(&pdev->dev,
&stm32_sai_pcm_config, 0);
if (ret) {
- dev_err(&pdev->dev, "could not register pcm dma\n");
+ dev_err(&pdev->dev, "Could not register pcm dma\n");
return ret;
}
@@ -879,6 +952,6 @@ static struct platform_driver stm32_sai_sub_driver = {
module_platform_driver(stm32_sai_sub_driver);
MODULE_DESCRIPTION("STM32 Soc SAI sub-block Interface");
-MODULE_AUTHOR("Olivier Moysan, <[email protected]>");
+MODULE_AUTHOR("Olivier Moysan <[email protected]>");
MODULE_ALIAS("platform:st,stm32-sai-sub");
MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/stm/stm32_spdifrx.c b/sound/soc/stm/stm32_spdifrx.c
new file mode 100644
index 000000000000..4e4250bdb75a
--- /dev/null
+++ b/sound/soc/stm/stm32_spdifrx.c
@@ -0,0 +1,998 @@
+/*
+ * STM32 ALSA SoC Digital Audio Interface (SPDIF-rx) driver.
+ *
+ * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
+ * Author(s): Olivier Moysan <[email protected]> for STMicroelectronics.
+ *
+ * License terms: GPL V2.0.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include <sound/dmaengine_pcm.h>
+#include <sound/pcm_params.h>
+
+/* SPDIF-rx Register Map */
+#define STM32_SPDIFRX_CR 0x00
+#define STM32_SPDIFRX_IMR 0x04
+#define STM32_SPDIFRX_SR 0x08
+#define STM32_SPDIFRX_IFCR 0x0C
+#define STM32_SPDIFRX_DR 0x10
+#define STM32_SPDIFRX_CSR 0x14
+#define STM32_SPDIFRX_DIR 0x18
+
+/* Bit definition for SPDIF_CR register */
+#define SPDIFRX_CR_SPDIFEN_SHIFT 0
+#define SPDIFRX_CR_SPDIFEN_MASK GENMASK(1, SPDIFRX_CR_SPDIFEN_SHIFT)
+#define SPDIFRX_CR_SPDIFENSET(x) ((x) << SPDIFRX_CR_SPDIFEN_SHIFT)
+
+#define SPDIFRX_CR_RXDMAEN BIT(2)
+#define SPDIFRX_CR_RXSTEO BIT(3)
+
+#define SPDIFRX_CR_DRFMT_SHIFT 4
+#define SPDIFRX_CR_DRFMT_MASK GENMASK(5, SPDIFRX_CR_DRFMT_SHIFT)
+#define SPDIFRX_CR_DRFMTSET(x) ((x) << SPDIFRX_CR_DRFMT_SHIFT)
+
+#define SPDIFRX_CR_PMSK BIT(6)
+#define SPDIFRX_CR_VMSK BIT(7)
+#define SPDIFRX_CR_CUMSK BIT(8)
+#define SPDIFRX_CR_PTMSK BIT(9)
+#define SPDIFRX_CR_CBDMAEN BIT(10)
+#define SPDIFRX_CR_CHSEL_SHIFT 11
+#define SPDIFRX_CR_CHSEL BIT(SPDIFRX_CR_CHSEL_SHIFT)
+
+#define SPDIFRX_CR_NBTR_SHIFT 12
+#define SPDIFRX_CR_NBTR_MASK GENMASK(13, SPDIFRX_CR_NBTR_SHIFT)
+#define SPDIFRX_CR_NBTRSET(x) ((x) << SPDIFRX_CR_NBTR_SHIFT)
+
+#define SPDIFRX_CR_WFA BIT(14)
+
+#define SPDIFRX_CR_INSEL_SHIFT 16
+#define SPDIFRX_CR_INSEL_MASK GENMASK(18, PDIFRX_CR_INSEL_SHIFT)
+#define SPDIFRX_CR_INSELSET(x) ((x) << SPDIFRX_CR_INSEL_SHIFT)
+
+#define SPDIFRX_CR_CKSEN_SHIFT 20
+#define SPDIFRX_CR_CKSEN BIT(20)
+#define SPDIFRX_CR_CKSBKPEN BIT(21)
+
+/* Bit definition for SPDIFRX_IMR register */
+#define SPDIFRX_IMR_RXNEI BIT(0)
+#define SPDIFRX_IMR_CSRNEIE BIT(1)
+#define SPDIFRX_IMR_PERRIE BIT(2)
+#define SPDIFRX_IMR_OVRIE BIT(3)
+#define SPDIFRX_IMR_SBLKIE BIT(4)
+#define SPDIFRX_IMR_SYNCDIE BIT(5)
+#define SPDIFRX_IMR_IFEIE BIT(6)
+
+#define SPDIFRX_XIMR_MASK GENMASK(6, 0)
+
+/* Bit definition for SPDIFRX_SR register */
+#define SPDIFRX_SR_RXNE BIT(0)
+#define SPDIFRX_SR_CSRNE BIT(1)
+#define SPDIFRX_SR_PERR BIT(2)
+#define SPDIFRX_SR_OVR BIT(3)
+#define SPDIFRX_SR_SBD BIT(4)
+#define SPDIFRX_SR_SYNCD BIT(5)
+#define SPDIFRX_SR_FERR BIT(6)
+#define SPDIFRX_SR_SERR BIT(7)
+#define SPDIFRX_SR_TERR BIT(8)
+
+#define SPDIFRX_SR_WIDTH5_SHIFT 16
+#define SPDIFRX_SR_WIDTH5_MASK GENMASK(30, PDIFRX_SR_WIDTH5_SHIFT)
+#define SPDIFRX_SR_WIDTH5SET(x) ((x) << SPDIFRX_SR_WIDTH5_SHIFT)
+
+/* Bit definition for SPDIFRX_IFCR register */
+#define SPDIFRX_IFCR_PERRCF BIT(2)
+#define SPDIFRX_IFCR_OVRCF BIT(3)
+#define SPDIFRX_IFCR_SBDCF BIT(4)
+#define SPDIFRX_IFCR_SYNCDCF BIT(5)
+
+#define SPDIFRX_XIFCR_MASK GENMASK(5, 2)
+
+/* Bit definition for SPDIFRX_DR register (DRFMT = 0b00) */
+#define SPDIFRX_DR0_DR_SHIFT 0
+#define SPDIFRX_DR0_DR_MASK GENMASK(23, SPDIFRX_DR0_DR_SHIFT)
+#define SPDIFRX_DR0_DRSET(x) ((x) << SPDIFRX_DR0_DR_SHIFT)
+
+#define SPDIFRX_DR0_PE BIT(24)
+
+#define SPDIFRX_DR0_V BIT(25)
+#define SPDIFRX_DR0_U BIT(26)
+#define SPDIFRX_DR0_C BIT(27)
+
+#define SPDIFRX_DR0_PT_SHIFT 28
+#define SPDIFRX_DR0_PT_MASK GENMASK(29, SPDIFRX_DR0_PT_SHIFT)
+#define SPDIFRX_DR0_PTSET(x) ((x) << SPDIFRX_DR0_PT_SHIFT)
+
+/* Bit definition for SPDIFRX_DR register (DRFMT = 0b01) */
+#define SPDIFRX_DR1_PE BIT(0)
+#define SPDIFRX_DR1_V BIT(1)
+#define SPDIFRX_DR1_U BIT(2)
+#define SPDIFRX_DR1_C BIT(3)
+
+#define SPDIFRX_DR1_PT_SHIFT 4
+#define SPDIFRX_DR1_PT_MASK GENMASK(5, SPDIFRX_DR1_PT_SHIFT)
+#define SPDIFRX_DR1_PTSET(x) ((x) << SPDIFRX_DR1_PT_SHIFT)
+
+#define SPDIFRX_DR1_DR_SHIFT 8
+#define SPDIFRX_DR1_DR_MASK GENMASK(31, SPDIFRX_DR1_DR_SHIFT)
+#define SPDIFRX_DR1_DRSET(x) ((x) << SPDIFRX_DR1_DR_SHIFT)
+
+/* Bit definition for SPDIFRX_DR register (DRFMT = 0b10) */
+#define SPDIFRX_DR1_DRNL1_SHIFT 0
+#define SPDIFRX_DR1_DRNL1_MASK GENMASK(15, SPDIFRX_DR1_DRNL1_SHIFT)
+#define SPDIFRX_DR1_DRNL1SET(x) ((x) << SPDIFRX_DR1_DRNL1_SHIFT)
+
+#define SPDIFRX_DR1_DRNL2_SHIFT 16
+#define SPDIFRX_DR1_DRNL2_MASK GENMASK(31, SPDIFRX_DR1_DRNL2_SHIFT)
+#define SPDIFRX_DR1_DRNL2SET(x) ((x) << SPDIFRX_DR1_DRNL2_SHIFT)
+
+/* Bit definition for SPDIFRX_CSR register */
+#define SPDIFRX_CSR_USR_SHIFT 0
+#define SPDIFRX_CSR_USR_MASK GENMASK(15, SPDIFRX_CSR_USR_SHIFT)
+#define SPDIFRX_CSR_USRGET(x) (((x) & SPDIFRX_CSR_USR_MASK)\
+ >> SPDIFRX_CSR_USR_SHIFT)
+
+#define SPDIFRX_CSR_CS_SHIFT 16
+#define SPDIFRX_CSR_CS_MASK GENMASK(23, SPDIFRX_CSR_CS_SHIFT)
+#define SPDIFRX_CSR_CSGET(x) (((x) & SPDIFRX_CSR_CS_MASK)\
+ >> SPDIFRX_CSR_CS_SHIFT)
+
+#define SPDIFRX_CSR_SOB BIT(24)
+
+/* Bit definition for SPDIFRX_DIR register */
+#define SPDIFRX_DIR_THI_SHIFT 0
+#define SPDIFRX_DIR_THI_MASK GENMASK(12, SPDIFRX_DIR_THI_SHIFT)
+#define SPDIFRX_DIR_THI_SET(x) ((x) << SPDIFRX_DIR_THI_SHIFT)
+
+#define SPDIFRX_DIR_TLO_SHIFT 16
+#define SPDIFRX_DIR_TLO_MASK GENMASK(28, SPDIFRX_DIR_TLO_SHIFT)
+#define SPDIFRX_DIR_TLO_SET(x) ((x) << SPDIFRX_DIR_TLO_SHIFT)
+
+#define SPDIFRX_SPDIFEN_DISABLE 0x0
+#define SPDIFRX_SPDIFEN_SYNC 0x1
+#define SPDIFRX_SPDIFEN_ENABLE 0x3
+
+#define SPDIFRX_IN1 0x1
+#define SPDIFRX_IN2 0x2
+#define SPDIFRX_IN3 0x3
+#define SPDIFRX_IN4 0x4
+#define SPDIFRX_IN5 0x5
+#define SPDIFRX_IN6 0x6
+#define SPDIFRX_IN7 0x7
+#define SPDIFRX_IN8 0x8
+
+#define SPDIFRX_NBTR_NONE 0x0
+#define SPDIFRX_NBTR_3 0x1
+#define SPDIFRX_NBTR_15 0x2
+#define SPDIFRX_NBTR_63 0x3
+
+#define SPDIFRX_DRFMT_RIGHT 0x0
+#define SPDIFRX_DRFMT_LEFT 0x1
+#define SPDIFRX_DRFMT_PACKED 0x2
+
+/* 192 CS bits in S/PDIF frame. i.e 24 CS bytes */
+#define SPDIFRX_CS_BYTES_NB 24
+#define SPDIFRX_UB_BYTES_NB 48
+
+/*
+ * CSR register is retrieved as a 32 bits word
+ * It contains 1 channel status byte and 2 user data bytes
+ * 2 S/PDIF frames are acquired to get all CS/UB bits
+ */
+#define SPDIFRX_CSR_BUF_LENGTH (SPDIFRX_CS_BYTES_NB * 4 * 2)
+
+/**
+ * struct stm32_spdifrx_data - private data of SPDIFRX
+ * @pdev: device data pointer
+ * @base: mmio register base virtual address
+ * @regmap: SPDIFRX register map pointer
+ * @regmap_conf: SPDIFRX register map configuration pointer
+ * @cs_completion: channel status retrieving completion
+ * @kclk: kernel clock feeding the SPDIFRX clock generator
+ * @dma_params: dma configuration data for rx channel
+ * @substream: PCM substream data pointer
+ * @dmab: dma buffer info pointer
+ * @ctrl_chan: dma channel for S/PDIF control bits
+ * @desc:dma async transaction descriptor
+ * @slave_config: dma slave channel runtime config pointer
+ * @phys_addr: SPDIFRX registers physical base address
+ * @lock: synchronization enabling lock
+ * @cs: channel status buffer
+ * @ub: user data buffer
+ * @irq: SPDIFRX interrupt line
+ * @refcount: keep count of opened DMA channels
+ */
+struct stm32_spdifrx_data {
+ struct platform_device *pdev;
+ void __iomem *base;
+ struct regmap *regmap;
+ const struct regmap_config *regmap_conf;
+ struct completion cs_completion;
+ struct clk *kclk;
+ struct snd_dmaengine_dai_dma_data dma_params;
+ struct snd_pcm_substream *substream;
+ struct snd_dma_buffer *dmab;
+ struct dma_chan *ctrl_chan;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_slave_config slave_config;
+ dma_addr_t phys_addr;
+ spinlock_t lock; /* Sync enabling lock */
+ unsigned char cs[SPDIFRX_CS_BYTES_NB];
+ unsigned char ub[SPDIFRX_UB_BYTES_NB];
+ int irq;
+ int refcount;
+};
+
+static void stm32_spdifrx_dma_complete(void *data)
+{
+ struct stm32_spdifrx_data *spdifrx = (struct stm32_spdifrx_data *)data;
+ struct platform_device *pdev = spdifrx->pdev;
+ u32 *p_start = (u32 *)spdifrx->dmab->area;
+ u32 *p_end = p_start + (2 * SPDIFRX_CS_BYTES_NB) - 1;
+ u32 *ptr = p_start;
+ u16 *ub_ptr = (short *)spdifrx->ub;
+ int i = 0;
+
+ regmap_update_bits(spdifrx->regmap, STM32_SPDIFRX_CR,
+ SPDIFRX_CR_CBDMAEN,
+ (unsigned int)~SPDIFRX_CR_CBDMAEN);
+
+ if (!spdifrx->dmab->area)
+ return;
+
+ while (ptr <= p_end) {
+ if (*ptr & SPDIFRX_CSR_SOB)
+ break;
+ ptr++;
+ }
+
+ if (ptr > p_end) {
+ dev_err(&pdev->dev, "Start of S/PDIF block not found\n");
+ return;
+ }
+
+ while (i < SPDIFRX_CS_BYTES_NB) {
+ spdifrx->cs[i] = (unsigned char)SPDIFRX_CSR_CSGET(*ptr);
+ *ub_ptr++ = SPDIFRX_CSR_USRGET(*ptr++);
+ if (ptr > p_end) {
+ dev_err(&pdev->dev, "Failed to get channel status\n");
+ return;
+ }
+ i++;
+ }
+
+ complete(&spdifrx->cs_completion);
+}
+
+static int stm32_spdifrx_dma_ctrl_start(struct stm32_spdifrx_data *spdifrx)
+{
+ dma_cookie_t cookie;
+ int err;
+
+ spdifrx->desc = dmaengine_prep_slave_single(spdifrx->ctrl_chan,
+ spdifrx->dmab->addr,
+ SPDIFRX_CSR_BUF_LENGTH,
+ DMA_DEV_TO_MEM,
+ DMA_CTRL_ACK);
+ if (!spdifrx->desc)
+ return -EINVAL;
+
+ spdifrx->desc->callback = stm32_spdifrx_dma_complete;
+ spdifrx->desc->callback_param = spdifrx;
+ cookie = dmaengine_submit(spdifrx->desc);
+ err = dma_submit_error(cookie);
+ if (err)
+ return -EINVAL;
+
+ dma_async_issue_pending(spdifrx->ctrl_chan);
+
+ return 0;
+}
+
+static void stm32_spdifrx_dma_ctrl_stop(struct stm32_spdifrx_data *spdifrx)
+{
+ dmaengine_terminate_async(spdifrx->ctrl_chan);
+}
+
+static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx)
+{
+ int cr, cr_mask, imr, ret;
+
+ /* Enable IRQs */
+ imr = SPDIFRX_IMR_IFEIE | SPDIFRX_IMR_SYNCDIE | SPDIFRX_IMR_PERRIE;
+ ret = regmap_update_bits(spdifrx->regmap, STM32_SPDIFRX_IMR, imr, imr);
+ if (ret)
+ return ret;
+
+ spin_lock(&spdifrx->lock);
+
+ spdifrx->refcount++;
+
+ regmap_read(spdifrx->regmap, STM32_SPDIFRX_CR, &cr);
+
+ if (!(cr & SPDIFRX_CR_SPDIFEN_MASK)) {
+ /*
+ * Start sync if SPDIFRX is still in idle state.
+ * SPDIFRX reception enabled when sync done
+ */
+ dev_dbg(&spdifrx->pdev->dev, "start synchronization\n");
+
+ /*
+ * SPDIFRX configuration:
+ * Wait for activity before starting sync process. This avoid
+ * to issue sync errors when spdif signal is missing on input.
+ * Preamble, CS, user, validity and parity error bits not copied
+ * to DR register.
+ */
+ cr = SPDIFRX_CR_WFA | SPDIFRX_CR_PMSK | SPDIFRX_CR_VMSK |
+ SPDIFRX_CR_CUMSK | SPDIFRX_CR_PTMSK | SPDIFRX_CR_RXSTEO;
+ cr_mask = cr;
+
+ cr |= SPDIFRX_CR_SPDIFENSET(SPDIFRX_SPDIFEN_SYNC);
+ cr_mask |= SPDIFRX_CR_SPDIFEN_MASK;
+ ret = regmap_update_bits(spdifrx->regmap, STM32_SPDIFRX_CR,
+ cr_mask, cr);
+ if (ret < 0)
+ dev_err(&spdifrx->pdev->dev,
+ "Failed to start synchronization\n");
+ }
+
+ spin_unlock(&spdifrx->lock);
+
+ return ret;
+}
+
+static void stm32_spdifrx_stop(struct stm32_spdifrx_data *spdifrx)
+{
+ int cr, cr_mask, reg;
+
+ spin_lock(&spdifrx->lock);
+
+ if (--spdifrx->refcount) {
+ spin_unlock(&spdifrx->lock);
+ return;
+ }
+
+ cr = SPDIFRX_CR_SPDIFENSET(SPDIFRX_SPDIFEN_DISABLE);
+ cr_mask = SPDIFRX_CR_SPDIFEN_MASK | SPDIFRX_CR_RXDMAEN;
+
+ regmap_update_bits(spdifrx->regmap, STM32_SPDIFRX_CR, cr_mask, cr);
+
+ regmap_update_bits(spdifrx->regmap, STM32_SPDIFRX_IMR,
+ SPDIFRX_XIMR_MASK, 0);
+
+ regmap_update_bits(spdifrx->regmap, STM32_SPDIFRX_IFCR,
+ SPDIFRX_XIFCR_MASK, SPDIFRX_XIFCR_MASK);
+
+ /* dummy read to clear CSRNE and RXNE in status register */
+ regmap_read(spdifrx->regmap, STM32_SPDIFRX_DR, &reg);
+ regmap_read(spdifrx->regmap, STM32_SPDIFRX_CSR, &reg);
+
+ spin_unlock(&spdifrx->lock);
+}
+
+static int stm32_spdifrx_dma_ctrl_register(struct device *dev,
+ struct stm32_spdifrx_data *spdifrx)
+{
+ int ret;
+
+ spdifrx->dmab = devm_kzalloc(dev, sizeof(struct snd_dma_buffer),
+ GFP_KERNEL);
+ if (!spdifrx->dmab)
+ return -ENOMEM;
+
+ spdifrx->dmab->dev.type = SNDRV_DMA_TYPE_DEV_IRAM;
+ spdifrx->dmab->dev.dev = dev;
+ ret = snd_dma_alloc_pages(spdifrx->dmab->dev.type, dev,
+ SPDIFRX_CSR_BUF_LENGTH, spdifrx->dmab);
+ if (ret < 0) {
+ dev_err(dev, "snd_dma_alloc_pages returned error %d\n", ret);
+ return ret;
+ }
+
+ spdifrx->ctrl_chan = dma_request_chan(dev, "rx-ctrl");
+ if (!spdifrx->ctrl_chan) {
+ dev_err(dev, "dma_request_slave_channel failed\n");
+ return -EINVAL;
+ }
+
+ spdifrx->slave_config.direction = DMA_DEV_TO_MEM;
+ spdifrx->slave_config.src_addr = (dma_addr_t)(spdifrx->phys_addr +
+ STM32_SPDIFRX_CSR);
+ spdifrx->slave_config.dst_addr = spdifrx->dmab->addr;
+ spdifrx->slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ spdifrx->slave_config.src_maxburst = 1;
+
+ ret = dmaengine_slave_config(spdifrx->ctrl_chan,
+ &spdifrx->slave_config);
+ if (ret < 0) {
+ dev_err(dev, "dmaengine_slave_config returned error %d\n", ret);
+ dma_release_channel(spdifrx->ctrl_chan);
+ spdifrx->ctrl_chan = NULL;
+ }
+
+ return ret;
+};
+
+static const char * const spdifrx_enum_input[] = {
+ "in0", "in1", "in2", "in3"
+};
+
+/* By default CS bits are retrieved from channel A */
+static const char * const spdifrx_enum_cs_channel[] = {
+ "A", "B"
+};
+
+static SOC_ENUM_SINGLE_DECL(ctrl_enum_input,
+ STM32_SPDIFRX_CR, SPDIFRX_CR_INSEL_SHIFT,
+ spdifrx_enum_input);
+
+static SOC_ENUM_SINGLE_DECL(ctrl_enum_cs_channel,
+ STM32_SPDIFRX_CR, SPDIFRX_CR_CHSEL_SHIFT,
+ spdifrx_enum_cs_channel);
+
+static int stm32_spdifrx_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
+ uinfo->count = 1;
+
+ return 0;
+}
+
+static int stm32_spdifrx_ub_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
+ uinfo->count = 1;
+
+ return 0;
+}
+
+static int stm32_spdifrx_get_ctrl_data(struct stm32_spdifrx_data *spdifrx)
+{
+ int ret = 0;
+
+ memset(spdifrx->cs, 0, SPDIFRX_CS_BYTES_NB);
+ memset(spdifrx->ub, 0, SPDIFRX_UB_BYTES_NB);
+
+ ret = stm32_spdifrx_dma_ctrl_start(spdifrx);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(spdifrx->kclk);
+ if (ret) {
+ dev_err(&spdifrx->pdev->dev, "Enable kclk failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_update_bits(spdifrx->regmap, STM32_SPDIFRX_CR,
+ SPDIFRX_CR_CBDMAEN, SPDIFRX_CR_CBDMAEN);
+ if (ret < 0)
+ goto end;
+
+ ret = stm32_spdifrx_start_sync(spdifrx);
+ if (ret < 0)
+ goto end;
+
+ if (wait_for_completion_interruptible_timeout(&spdifrx->cs_completion,
+ msecs_to_jiffies(100))
+ <= 0) {
+ dev_err(&spdifrx->pdev->dev, "Failed to get control data\n");
+ ret = -EAGAIN;
+ }
+
+ stm32_spdifrx_stop(spdifrx);
+ stm32_spdifrx_dma_ctrl_stop(spdifrx);
+
+end:
+ clk_disable_unprepare(spdifrx->kclk);
+
+ return ret;
+}
+
+static int stm32_spdifrx_capture_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
+ struct stm32_spdifrx_data *spdifrx = snd_soc_dai_get_drvdata(cpu_dai);
+
+ stm32_spdifrx_get_ctrl_data(spdifrx);
+
+ ucontrol->value.iec958.status[0] = spdifrx->cs[0];
+ ucontrol->value.iec958.status[1] = spdifrx->cs[1];
+ ucontrol->value.iec958.status[2] = spdifrx->cs[2];
+ ucontrol->value.iec958.status[3] = spdifrx->cs[3];
+ ucontrol->value.iec958.status[4] = spdifrx->cs[4];
+
+ return 0;
+}
+
+static int stm32_spdif_user_bits_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
+ struct stm32_spdifrx_data *spdifrx = snd_soc_dai_get_drvdata(cpu_dai);
+
+ stm32_spdifrx_get_ctrl_data(spdifrx);
+
+ ucontrol->value.iec958.status[0] = spdifrx->ub[0];
+ ucontrol->value.iec958.status[1] = spdifrx->ub[1];
+ ucontrol->value.iec958.status[2] = spdifrx->ub[2];
+ ucontrol->value.iec958.status[3] = spdifrx->ub[3];
+ ucontrol->value.iec958.status[4] = spdifrx->ub[4];
+
+ return 0;
+}
+
+static struct snd_kcontrol_new stm32_spdifrx_iec_ctrls[] = {
+ /* Channel status control */
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM,
+ .name = SNDRV_CTL_NAME_IEC958("", CAPTURE, DEFAULT),
+ .access = SNDRV_CTL_ELEM_ACCESS_READ |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = stm32_spdifrx_info,
+ .get = stm32_spdifrx_capture_get,
+ },
+ /* User bits control */
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM,
+ .name = "IEC958 User Bit Capture Default",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = stm32_spdifrx_ub_info,
+ .get = stm32_spdif_user_bits_get,
+ },
+};
+
+static struct snd_kcontrol_new stm32_spdifrx_ctrls[] = {
+ SOC_ENUM("SPDIFRX input", ctrl_enum_input),
+ SOC_ENUM("SPDIFRX CS channel", ctrl_enum_cs_channel),
+};
+
+static int stm32_spdifrx_dai_register_ctrls(struct snd_soc_dai *cpu_dai)
+{
+ int ret;
+
+ ret = snd_soc_add_dai_controls(cpu_dai, stm32_spdifrx_iec_ctrls,
+ ARRAY_SIZE(stm32_spdifrx_iec_ctrls));
+ if (ret < 0)
+ return ret;
+
+ return snd_soc_add_component_controls(cpu_dai->component,
+ stm32_spdifrx_ctrls,
+ ARRAY_SIZE(stm32_spdifrx_ctrls));
+}
+
+static int stm32_spdifrx_dai_probe(struct snd_soc_dai *cpu_dai)
+{
+ struct stm32_spdifrx_data *spdifrx = dev_get_drvdata(cpu_dai->dev);
+
+ spdifrx->dma_params.addr = (dma_addr_t)(spdifrx->phys_addr +
+ STM32_SPDIFRX_DR);
+ spdifrx->dma_params.maxburst = 1;
+
+ snd_soc_dai_init_dma_data(cpu_dai, NULL, &spdifrx->dma_params);
+
+ return stm32_spdifrx_dai_register_ctrls(cpu_dai);
+}
+
+static bool stm32_spdifrx_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case STM32_SPDIFRX_CR:
+ case STM32_SPDIFRX_IMR:
+ case STM32_SPDIFRX_SR:
+ case STM32_SPDIFRX_IFCR:
+ case STM32_SPDIFRX_DR:
+ case STM32_SPDIFRX_CSR:
+ case STM32_SPDIFRX_DIR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool stm32_spdifrx_volatile_reg(struct device *dev, unsigned int reg)
+{
+ if (reg == STM32_SPDIFRX_DR)
+ return true;
+
+ return false;
+}
+
+static bool stm32_spdifrx_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case STM32_SPDIFRX_CR:
+ case STM32_SPDIFRX_IMR:
+ case STM32_SPDIFRX_IFCR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config stm32_h7_spdifrx_regmap_conf = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = STM32_SPDIFRX_DIR,
+ .readable_reg = stm32_spdifrx_readable_reg,
+ .volatile_reg = stm32_spdifrx_volatile_reg,
+ .writeable_reg = stm32_spdifrx_writeable_reg,
+ .fast_io = true,
+};
+
+static irqreturn_t stm32_spdifrx_isr(int irq, void *devid)
+{
+ struct stm32_spdifrx_data *spdifrx = (struct stm32_spdifrx_data *)devid;
+ struct snd_pcm_substream *substream = spdifrx->substream;
+ struct platform_device *pdev = spdifrx->pdev;
+ unsigned int cr, mask, sr, imr;
+ unsigned int flags;
+ int err = 0, err_xrun = 0;
+
+ regmap_read(spdifrx->regmap, STM32_SPDIFRX_SR, &sr);
+ regmap_read(spdifrx->regmap, STM32_SPDIFRX_IMR, &imr);
+
+ mask = imr & SPDIFRX_XIMR_MASK;
+ /* SERR, TERR, FERR IRQs are generated if IFEIE is set */
+ if (mask & SPDIFRX_IMR_IFEIE)
+ mask |= (SPDIFRX_IMR_IFEIE << 1) | (SPDIFRX_IMR_IFEIE << 2);
+
+ flags = sr & mask;
+ if (!flags) {
+ dev_err(&pdev->dev, "Unexpected IRQ. rflags=%#x, imr=%#x\n",
+ sr, imr);
+ return IRQ_NONE;
+ }
+
+ /* Clear IRQs */
+ regmap_update_bits(spdifrx->regmap, STM32_SPDIFRX_IFCR,
+ SPDIFRX_XIFCR_MASK, flags);
+
+ if (flags & SPDIFRX_SR_PERR) {
+ dev_dbg(&pdev->dev, "Parity error\n");
+ err_xrun = 1;
+ }
+
+ if (flags & SPDIFRX_SR_OVR) {
+ dev_dbg(&pdev->dev, "Overrun error\n");
+ err_xrun = 1;
+ }
+
+ if (flags & SPDIFRX_SR_SBD)
+ dev_dbg(&pdev->dev, "Synchronization block detected\n");
+
+ if (flags & SPDIFRX_SR_SYNCD) {
+ dev_dbg(&pdev->dev, "Synchronization done\n");
+
+ /* Enable spdifrx */
+ cr = SPDIFRX_CR_SPDIFENSET(SPDIFRX_SPDIFEN_ENABLE);
+ regmap_update_bits(spdifrx->regmap, STM32_SPDIFRX_CR,
+ SPDIFRX_CR_SPDIFEN_MASK, cr);
+ }
+
+ if (flags & SPDIFRX_SR_FERR) {
+ dev_dbg(&pdev->dev, "Frame error\n");
+ err = 1;
+ }
+
+ if (flags & SPDIFRX_SR_SERR) {
+ dev_dbg(&pdev->dev, "Synchronization error\n");
+ err = 1;
+ }
+
+ if (flags & SPDIFRX_SR_TERR) {
+ dev_dbg(&pdev->dev, "Timeout error\n");
+ err = 1;
+ }
+
+ if (err) {
+ /* SPDIFRX in STATE_STOP. Disable SPDIFRX to clear errors */
+ cr = SPDIFRX_CR_SPDIFENSET(SPDIFRX_SPDIFEN_DISABLE);
+ regmap_update_bits(spdifrx->regmap, STM32_SPDIFRX_CR,
+ SPDIFRX_CR_SPDIFEN_MASK, cr);
+
+ if (substream)
+ snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
+
+ return IRQ_HANDLED;
+ }
+
+ if (err_xrun && substream)
+ snd_pcm_stop_xrun(substream);
+
+ return IRQ_HANDLED;
+}
+
+static int stm32_spdifrx_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct stm32_spdifrx_data *spdifrx = snd_soc_dai_get_drvdata(cpu_dai);
+ int ret;
+
+ spdifrx->substream = substream;
+
+ ret = clk_prepare_enable(spdifrx->kclk);
+ if (ret)
+ dev_err(&spdifrx->pdev->dev, "Enable kclk failed: %d\n", ret);
+
+ return ret;
+}
+
+static int stm32_spdifrx_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct stm32_spdifrx_data *spdifrx = snd_soc_dai_get_drvdata(cpu_dai);
+ int data_size = params_width(params);
+ int fmt;
+
+ switch (data_size) {
+ case 16:
+ fmt = SPDIFRX_DRFMT_PACKED;
+ spdifrx->dma_params.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+ case 32:
+ fmt = SPDIFRX_DRFMT_LEFT;
+ spdifrx->dma_params.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
+ default:
+ dev_err(&spdifrx->pdev->dev, "Unexpected data format\n");
+ return -EINVAL;
+ }
+
+ snd_soc_dai_init_dma_data(cpu_dai, NULL, &spdifrx->dma_params);
+
+ return regmap_update_bits(spdifrx->regmap, STM32_SPDIFRX_CR,
+ SPDIFRX_CR_DRFMT_MASK,
+ SPDIFRX_CR_DRFMTSET(fmt));
+}
+
+static int stm32_spdifrx_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct stm32_spdifrx_data *spdifrx = snd_soc_dai_get_drvdata(cpu_dai);
+ int ret = 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ regmap_update_bits(spdifrx->regmap, STM32_SPDIFRX_IMR,
+ SPDIFRX_IMR_OVRIE, SPDIFRX_IMR_OVRIE);
+
+ regmap_update_bits(spdifrx->regmap, STM32_SPDIFRX_CR,
+ SPDIFRX_CR_RXDMAEN, SPDIFRX_CR_RXDMAEN);
+
+ ret = stm32_spdifrx_start_sync(spdifrx);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ case SNDRV_PCM_TRIGGER_STOP:
+ stm32_spdifrx_stop(spdifrx);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static void stm32_spdifrx_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct stm32_spdifrx_data *spdifrx = snd_soc_dai_get_drvdata(cpu_dai);
+
+ spdifrx->substream = NULL;
+ clk_disable_unprepare(spdifrx->kclk);
+}
+
+static const struct snd_soc_dai_ops stm32_spdifrx_pcm_dai_ops = {
+ .startup = stm32_spdifrx_startup,
+ .hw_params = stm32_spdifrx_hw_params,
+ .trigger = stm32_spdifrx_trigger,
+ .shutdown = stm32_spdifrx_shutdown,
+};
+
+static struct snd_soc_dai_driver stm32_spdifrx_dai[] = {
+ {
+ .name = "spdifrx-capture-cpu-dai",
+ .probe = stm32_spdifrx_dai_probe,
+ .capture = {
+ .stream_name = "CPU-Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE |
+ SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &stm32_spdifrx_pcm_dai_ops,
+ }
+};
+
+static const struct snd_pcm_hardware stm32_spdifrx_pcm_hw = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP,
+ .buffer_bytes_max = 8 * PAGE_SIZE,
+ .period_bytes_max = 2048, /* MDMA constraint */
+ .periods_min = 2,
+ .periods_max = 8,
+};
+
+static const struct snd_soc_component_driver stm32_spdifrx_component = {
+ .name = "stm32-spdifrx",
+};
+
+static const struct snd_dmaengine_pcm_config stm32_spdifrx_pcm_config = {
+ .pcm_hardware = &stm32_spdifrx_pcm_hw,
+ .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
+};
+
+static const struct of_device_id stm32_spdifrx_ids[] = {
+ {
+ .compatible = "st,stm32h7-spdifrx",
+ .data = &stm32_h7_spdifrx_regmap_conf
+ },
+ {}
+};
+
+static int stm_spdifrx_parse_of(struct platform_device *pdev,
+ struct stm32_spdifrx_data *spdifrx)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *of_id;
+ struct resource *res;
+
+ if (!np)
+ return -ENODEV;
+
+ of_id = of_match_device(stm32_spdifrx_ids, &pdev->dev);
+ if (of_id)
+ spdifrx->regmap_conf =
+ (const struct regmap_config *)of_id->data;
+ else
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ spdifrx->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(spdifrx->base))
+ return PTR_ERR(spdifrx->base);
+
+ spdifrx->phys_addr = res->start;
+
+ spdifrx->kclk = devm_clk_get(&pdev->dev, "kclk");
+ if (IS_ERR(spdifrx->kclk)) {
+ dev_err(&pdev->dev, "Could not get kclk\n");
+ return PTR_ERR(spdifrx->kclk);
+ }
+
+ spdifrx->irq = platform_get_irq(pdev, 0);
+ if (spdifrx->irq < 0) {
+ dev_err(&pdev->dev, "No irq for node %s\n", pdev->name);
+ return spdifrx->irq;
+ }
+
+ return 0;
+}
+
+static int stm32_spdifrx_probe(struct platform_device *pdev)
+{
+ struct stm32_spdifrx_data *spdifrx;
+ struct reset_control *rst;
+ const struct snd_dmaengine_pcm_config *pcm_config = NULL;
+ int ret;
+
+ spdifrx = devm_kzalloc(&pdev->dev, sizeof(*spdifrx), GFP_KERNEL);
+ if (!spdifrx)
+ return -ENOMEM;
+
+ spdifrx->pdev = pdev;
+ init_completion(&spdifrx->cs_completion);
+ spin_lock_init(&spdifrx->lock);
+
+ platform_set_drvdata(pdev, spdifrx);
+
+ ret = stm_spdifrx_parse_of(pdev, spdifrx);
+ if (ret)
+ return ret;
+
+ spdifrx->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "kclk",
+ spdifrx->base,
+ spdifrx->regmap_conf);
+ if (IS_ERR(spdifrx->regmap)) {
+ dev_err(&pdev->dev, "Regmap init failed\n");
+ return PTR_ERR(spdifrx->regmap);
+ }
+
+ ret = devm_request_irq(&pdev->dev, spdifrx->irq, stm32_spdifrx_isr, 0,
+ dev_name(&pdev->dev), spdifrx);
+ if (ret) {
+ dev_err(&pdev->dev, "IRQ request returned %d\n", ret);
+ return ret;
+ }
+
+ rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (!IS_ERR(rst)) {
+ reset_control_assert(rst);
+ udelay(2);
+ reset_control_deassert(rst);
+ }
+
+ ret = devm_snd_soc_register_component(&pdev->dev,
+ &stm32_spdifrx_component,
+ stm32_spdifrx_dai,
+ ARRAY_SIZE(stm32_spdifrx_dai));
+ if (ret)
+ return ret;
+
+ ret = stm32_spdifrx_dma_ctrl_register(&pdev->dev, spdifrx);
+ if (ret)
+ goto error;
+
+ pcm_config = &stm32_spdifrx_pcm_config;
+ ret = devm_snd_dmaengine_pcm_register(&pdev->dev, pcm_config, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "PCM DMA register returned %d\n", ret);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ if (spdifrx->ctrl_chan)
+ dma_release_channel(spdifrx->ctrl_chan);
+ if (spdifrx->dmab)
+ snd_dma_free_pages(spdifrx->dmab);
+
+ return ret;
+}
+
+static int stm32_spdifrx_remove(struct platform_device *pdev)
+{
+ struct stm32_spdifrx_data *spdifrx = platform_get_drvdata(pdev);
+
+ if (spdifrx->ctrl_chan)
+ dma_release_channel(spdifrx->ctrl_chan);
+
+ if (spdifrx->dmab)
+ snd_dma_free_pages(spdifrx->dmab);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, stm32_spdifrx_ids);
+
+static struct platform_driver stm32_spdifrx_driver = {
+ .driver = {
+ .name = "st,stm32-spdifrx",
+ .of_match_table = stm32_spdifrx_ids,
+ },
+ .probe = stm32_spdifrx_probe,
+ .remove = stm32_spdifrx_remove,
+};
+
+module_platform_driver(stm32_spdifrx_driver);
+
+MODULE_DESCRIPTION("STM32 Soc spdifrx Interface");
+MODULE_AUTHOR("Olivier Moysan, <[email protected]>");
+MODULE_ALIAS("platform:stm32-spdifrx");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
index c3aab10fa085..150069987c0c 100644
--- a/sound/soc/sunxi/sun4i-codec.c
+++ b/sound/soc/sunxi/sun4i-codec.c
@@ -1339,6 +1339,44 @@ static struct snd_soc_card *sun8i_h3_codec_create_card(struct device *dev)
return card;
};
+static struct snd_soc_card *sun8i_v3s_codec_create_card(struct device *dev)
+{
+ struct snd_soc_card *card;
+ int ret;
+
+ card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return ERR_PTR(-ENOMEM);
+
+ aux_dev.codec_of_node = of_parse_phandle(dev->of_node,
+ "allwinner,codec-analog-controls",
+ 0);
+ if (!aux_dev.codec_of_node) {
+ dev_err(dev, "Can't find analog controls for codec.\n");
+ return ERR_PTR(-EINVAL);
+ };
+
+ card->dai_link = sun4i_codec_create_link(dev, &card->num_links);
+ if (!card->dai_link)
+ return ERR_PTR(-ENOMEM);
+
+ card->dev = dev;
+ card->name = "V3s Audio Codec";
+ card->dapm_widgets = sun6i_codec_card_dapm_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
+ card->dapm_routes = sun8i_codec_card_routes;
+ card->num_dapm_routes = ARRAY_SIZE(sun8i_codec_card_routes);
+ card->aux_dev = &aux_dev;
+ card->num_aux_devs = 1;
+ card->fully_routed = true;
+
+ ret = snd_soc_of_parse_audio_routing(card, "allwinner,audio-routing");
+ if (ret)
+ dev_warn(dev, "failed to parse audio-routing: %d\n", ret);
+
+ return card;
+};
+
static const struct regmap_config sun4i_codec_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -1374,6 +1412,13 @@ static const struct regmap_config sun8i_h3_codec_regmap_config = {
.max_register = SUN8I_H3_CODEC_ADC_DBG,
};
+static const struct regmap_config sun8i_v3s_codec_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = SUN8I_H3_CODEC_ADC_DBG,
+};
+
struct sun4i_codec_quirks {
const struct regmap_config *regmap_config;
const struct snd_soc_codec_driver *codec;
@@ -1437,6 +1482,20 @@ static const struct sun4i_codec_quirks sun8i_h3_codec_quirks = {
.has_reset = true,
};
+static const struct sun4i_codec_quirks sun8i_v3s_codec_quirks = {
+ .regmap_config = &sun8i_v3s_codec_regmap_config,
+ /*
+ * TODO The codec structure should be split out, like
+ * H3, when adding digital audio processing support.
+ */
+ .codec = &sun8i_a23_codec_codec,
+ .create_card = sun8i_v3s_codec_create_card,
+ .reg_adc_fifoc = REG_FIELD(SUN6I_CODEC_ADC_FIFOC, 0, 31),
+ .reg_dac_txdata = SUN8I_H3_CODEC_DAC_TXDATA,
+ .reg_adc_rxdata = SUN6I_CODEC_ADC_RXDATA,
+ .has_reset = true,
+};
+
static const struct of_device_id sun4i_codec_of_match[] = {
{
.compatible = "allwinner,sun4i-a10-codec",
@@ -1458,6 +1517,10 @@ static const struct of_device_id sun4i_codec_of_match[] = {
.compatible = "allwinner,sun8i-h3-codec",
.data = &sun8i_h3_codec_quirks,
},
+ {
+ .compatible = "allwinner,sun8i-v3s-codec",
+ .data = &sun8i_v3s_codec_quirks,
+ },
{}
};
MODULE_DEVICE_TABLE(of, sun4i_codec_of_match);
diff --git a/sound/soc/sunxi/sun8i-codec-analog.c b/sound/soc/sunxi/sun8i-codec-analog.c
index 6c17c99c2c8d..485e79f292c4 100644
--- a/sound/soc/sunxi/sun8i-codec-analog.c
+++ b/sound/soc/sunxi/sun8i-codec-analog.c
@@ -219,6 +219,22 @@ static const struct snd_kcontrol_new sun8i_codec_mixer_controls[] = {
SUN8I_ADDA_LOMIXSC_MIC2, 1, 0),
};
+/* mixer controls */
+static const struct snd_kcontrol_new sun8i_v3s_codec_mixer_controls[] = {
+ SOC_DAPM_DOUBLE_R("DAC Playback Switch",
+ SUN8I_ADDA_LOMIXSC,
+ SUN8I_ADDA_ROMIXSC,
+ SUN8I_ADDA_LOMIXSC_DACL, 1, 0),
+ SOC_DAPM_DOUBLE_R("DAC Reversed Playback Switch",
+ SUN8I_ADDA_LOMIXSC,
+ SUN8I_ADDA_ROMIXSC,
+ SUN8I_ADDA_LOMIXSC_DACR, 1, 0),
+ SOC_DAPM_DOUBLE_R("Mic1 Playback Switch",
+ SUN8I_ADDA_LOMIXSC,
+ SUN8I_ADDA_ROMIXSC,
+ SUN8I_ADDA_LOMIXSC_MIC1, 1, 0),
+};
+
/* ADC mixer controls */
static const struct snd_kcontrol_new sun8i_codec_adc_mixer_controls[] = {
SOC_DAPM_DOUBLE_R("Mixer Capture Switch",
@@ -243,6 +259,22 @@ static const struct snd_kcontrol_new sun8i_codec_adc_mixer_controls[] = {
SUN8I_ADDA_LADCMIXSC_MIC2, 1, 0),
};
+/* ADC mixer controls */
+static const struct snd_kcontrol_new sun8i_v3s_codec_adc_mixer_controls[] = {
+ SOC_DAPM_DOUBLE_R("Mixer Capture Switch",
+ SUN8I_ADDA_LADCMIXSC,
+ SUN8I_ADDA_RADCMIXSC,
+ SUN8I_ADDA_LADCMIXSC_OMIXRL, 1, 0),
+ SOC_DAPM_DOUBLE_R("Mixer Reversed Capture Switch",
+ SUN8I_ADDA_LADCMIXSC,
+ SUN8I_ADDA_RADCMIXSC,
+ SUN8I_ADDA_LADCMIXSC_OMIXRR, 1, 0),
+ SOC_DAPM_DOUBLE_R("Mic1 Capture Switch",
+ SUN8I_ADDA_LADCMIXSC,
+ SUN8I_ADDA_RADCMIXSC,
+ SUN8I_ADDA_LADCMIXSC_MIC1, 1, 0),
+};
+
/* volume / mute controls */
static const DECLARE_TLV_DB_SCALE(sun8i_codec_out_mixer_pregain_scale,
-450, 150, 0);
@@ -289,16 +321,12 @@ static const struct snd_soc_dapm_widget sun8i_codec_common_widgets[] = {
/* Microphone input */
SND_SOC_DAPM_INPUT("MIC1"),
- /* Microphone Bias */
- SND_SOC_DAPM_SUPPLY("MBIAS", SUN8I_ADDA_MIC1G_MICBIAS_CTRL,
- SUN8I_ADDA_MIC1G_MICBIAS_CTRL_MMICBIASEN,
- 0, NULL, 0),
-
/* Mic input path */
SND_SOC_DAPM_PGA("Mic1 Amplifier", SUN8I_ADDA_MIC1G_MICBIAS_CTRL,
SUN8I_ADDA_MIC1G_MICBIAS_CTRL_MIC1AMPEN, 0, NULL, 0),
+};
- /* Mixers */
+static const struct snd_soc_dapm_widget sun8i_codec_mixer_widgets[] = {
SND_SOC_DAPM_MIXER("Left Mixer", SUN8I_ADDA_DAC_PA_SRC,
SUN8I_ADDA_DAC_PA_SRC_LMIXEN, 0,
sun8i_codec_mixer_controls,
@@ -317,10 +345,31 @@ static const struct snd_soc_dapm_widget sun8i_codec_common_widgets[] = {
ARRAY_SIZE(sun8i_codec_adc_mixer_controls)),
};
+static const struct snd_soc_dapm_widget sun8i_v3s_codec_mixer_widgets[] = {
+ SND_SOC_DAPM_MIXER("Left Mixer", SUN8I_ADDA_DAC_PA_SRC,
+ SUN8I_ADDA_DAC_PA_SRC_LMIXEN, 0,
+ sun8i_v3s_codec_mixer_controls,
+ ARRAY_SIZE(sun8i_v3s_codec_mixer_controls)),
+ SND_SOC_DAPM_MIXER("Right Mixer", SUN8I_ADDA_DAC_PA_SRC,
+ SUN8I_ADDA_DAC_PA_SRC_RMIXEN, 0,
+ sun8i_v3s_codec_mixer_controls,
+ ARRAY_SIZE(sun8i_v3s_codec_mixer_controls)),
+ SND_SOC_DAPM_MIXER("Left ADC Mixer", SUN8I_ADDA_ADC_AP_EN,
+ SUN8I_ADDA_ADC_AP_EN_ADCLEN, 0,
+ sun8i_v3s_codec_adc_mixer_controls,
+ ARRAY_SIZE(sun8i_v3s_codec_adc_mixer_controls)),
+ SND_SOC_DAPM_MIXER("Right ADC Mixer", SUN8I_ADDA_ADC_AP_EN,
+ SUN8I_ADDA_ADC_AP_EN_ADCREN, 0,
+ sun8i_v3s_codec_adc_mixer_controls,
+ ARRAY_SIZE(sun8i_v3s_codec_adc_mixer_controls)),
+};
+
static const struct snd_soc_dapm_route sun8i_codec_common_routes[] = {
/* Microphone Routes */
{ "Mic1 Amplifier", NULL, "MIC1"},
+};
+static const struct snd_soc_dapm_route sun8i_codec_mixer_routes[] = {
/* Left Mixer Routes */
{ "Left Mixer", "DAC Playback Switch", "Left DAC" },
{ "Left Mixer", "DAC Reversed Playback Switch", "Right DAC" },
@@ -453,6 +502,27 @@ static int sun8i_codec_add_headphone(struct snd_soc_component *cmpnt)
return 0;
}
+/* mbias specific widget */
+static const struct snd_soc_dapm_widget sun8i_codec_mbias_widgets[] = {
+ SND_SOC_DAPM_SUPPLY("MBIAS", SUN8I_ADDA_MIC1G_MICBIAS_CTRL,
+ SUN8I_ADDA_MIC1G_MICBIAS_CTRL_MMICBIASEN,
+ 0, NULL, 0),
+};
+
+static int sun8i_codec_add_mbias(struct snd_soc_component *cmpnt)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(cmpnt);
+ struct device *dev = cmpnt->dev;
+ int ret;
+
+ ret = snd_soc_dapm_new_controls(dapm, sun8i_codec_mbias_widgets,
+ ARRAY_SIZE(sun8i_codec_mbias_widgets));
+ if (ret)
+ dev_err(dev, "Failed to add MBIAS DAPM widgets: %d\n", ret);
+
+ return ret;
+}
+
/* hmic specific widget */
static const struct snd_soc_dapm_widget sun8i_codec_hmic_widgets[] = {
SND_SOC_DAPM_SUPPLY("HBIAS", SUN8I_ADDA_MIC1G_MICBIAS_CTRL,
@@ -679,6 +749,7 @@ struct sun8i_codec_analog_quirks {
bool has_hmic;
bool has_linein;
bool has_lineout;
+ bool has_mbias;
bool has_mic2;
};
@@ -686,15 +757,64 @@ static const struct sun8i_codec_analog_quirks sun8i_a23_quirks = {
.has_headphone = true,
.has_hmic = true,
.has_linein = true,
+ .has_mbias = true,
.has_mic2 = true,
};
static const struct sun8i_codec_analog_quirks sun8i_h3_quirks = {
.has_linein = true,
.has_lineout = true,
+ .has_mbias = true,
.has_mic2 = true,
};
+static int sun8i_codec_analog_add_mixer(struct snd_soc_component *cmpnt,
+ const struct sun8i_codec_analog_quirks *quirks)
+{
+ struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(cmpnt);
+ struct device *dev = cmpnt->dev;
+ int ret;
+
+ if (!quirks->has_mic2 && !quirks->has_linein) {
+ /*
+ * Apply the special widget set which has uses a control
+ * without MIC2 and Line In, for SoCs without these.
+ * TODO: not all special cases are supported now, this case
+ * is present because it's the case of V3s.
+ */
+ ret = snd_soc_dapm_new_controls(dapm,
+ sun8i_v3s_codec_mixer_widgets,
+ ARRAY_SIZE(sun8i_v3s_codec_mixer_widgets));
+ if (ret) {
+ dev_err(dev, "Failed to add V3s Mixer DAPM widgets: %d\n", ret);
+ return ret;
+ }
+ } else {
+ /* Apply the generic mixer widget set. */
+ ret = snd_soc_dapm_new_controls(dapm,
+ sun8i_codec_mixer_widgets,
+ ARRAY_SIZE(sun8i_codec_mixer_widgets));
+ if (ret) {
+ dev_err(dev, "Failed to add Mixer DAPM widgets: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = snd_soc_dapm_add_routes(dapm, sun8i_codec_mixer_routes,
+ ARRAY_SIZE(sun8i_codec_mixer_routes));
+ if (ret) {
+ dev_err(dev, "Failed to add Mixer DAPM routes: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct sun8i_codec_analog_quirks sun8i_v3s_quirks = {
+ .has_headphone = true,
+ .has_hmic = true,
+};
+
static int sun8i_codec_analog_cmpnt_probe(struct snd_soc_component *cmpnt)
{
struct device *dev = cmpnt->dev;
@@ -709,6 +829,9 @@ static int sun8i_codec_analog_cmpnt_probe(struct snd_soc_component *cmpnt)
quirks = of_device_get_match_data(dev);
/* Add controls, widgets, and routes for individual features */
+ ret = sun8i_codec_analog_add_mixer(cmpnt, quirks);
+ if (ret)
+ return ret;
if (quirks->has_headphone) {
ret = sun8i_codec_add_headphone(cmpnt);
@@ -734,6 +857,12 @@ static int sun8i_codec_analog_cmpnt_probe(struct snd_soc_component *cmpnt)
return ret;
}
+ if (quirks->has_mbias) {
+ ret = sun8i_codec_add_mbias(cmpnt);
+ if (ret)
+ return ret;
+ }
+
if (quirks->has_mic2) {
ret = sun8i_codec_add_mic2(cmpnt);
if (ret)
@@ -762,6 +891,10 @@ static const struct of_device_id sun8i_codec_analog_of_match[] = {
.compatible = "allwinner,sun8i-h3-codec-analog",
.data = &sun8i_h3_quirks,
},
+ {
+ .compatible = "allwinner,sun8i-v3s-codec-analog",
+ .data = &sun8i_v3s_quirks,
+ },
{}
};
MODULE_DEVICE_TABLE(of, sun8i_codec_analog_of_match);
diff --git a/sound/soc/zte/zx-i2s.c b/sound/soc/zte/zx-i2s.c
index a865f37c2a56..8bbad1d72bc5 100644
--- a/sound/soc/zte/zx-i2s.c
+++ b/sound/soc/zte/zx-i2s.c
@@ -203,13 +203,15 @@ static int zx_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
- i2s->master = 1;
- val |= ZX_I2S_TIMING_MAST;
- break;
- case SND_SOC_DAIFMT_CBS_CFS:
+ /* Codec is master, and I2S is slave. */
i2s->master = 0;
val |= ZX_I2S_TIMING_SLAVE;
break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ /* Codec is slave, and I2S is master. */
+ i2s->master = 1;
+ val |= ZX_I2S_TIMING_MAST;
+ break;
default:
dev_err(cpu_dai->dev, "Unknown master/slave format\n");
return -EINVAL;
@@ -226,11 +228,12 @@ static int zx_i2s_hw_params(struct snd_pcm_substream *substream,
struct zx_i2s_info *i2s = snd_soc_dai_get_drvdata(socdai);
struct snd_dmaengine_dai_dma_data *dma_data;
unsigned int lane, ch_num, len, ret = 0;
+ unsigned int ts_width = 32;
unsigned long val;
unsigned long chn_cfg;
dma_data = snd_soc_dai_get_dma_data(socdai, substream);
- dma_data->addr_width = params_width(params) >> 3;
+ dma_data->addr_width = ts_width >> 3;
val = readl_relaxed(i2s->reg_base + ZX_I2S_TIMING_CTRL);
val &= ~(ZX_I2S_TIMING_TS_WIDTH_MASK | ZX_I2S_TIMING_DATA_SIZE_MASK |
@@ -251,7 +254,7 @@ static int zx_i2s_hw_params(struct snd_pcm_substream *substream,
dev_err(socdai->dev, "Unknown data format\n");
return -EINVAL;
}
- val |= ZX_I2S_TIMING_TS_WIDTH(len) | ZX_I2S_TIMING_DATA_SIZE(len);
+ val |= ZX_I2S_TIMING_TS_WIDTH(ts_width) | ZX_I2S_TIMING_DATA_SIZE(len);
ch_num = params_channels(params);
switch (ch_num) {
diff --git a/sound/sparc/cs4231.c b/sound/sparc/cs4231.c
index 30bdc971883b..3d7d425fbd24 100644
--- a/sound/sparc/cs4231.c
+++ b/sound/sparc/cs4231.c
@@ -200,12 +200,12 @@ static unsigned char freq_bits[14] = {
/* 48000 */ 0x0C | CS4231_XTAL1
};
-static unsigned int rates[14] = {
+static const unsigned int rates[14] = {
5510, 6620, 8000, 9600, 11025, 16000, 18900, 22050,
27042, 32000, 33075, 37800, 44100, 48000
};
-static struct snd_pcm_hw_constraint_list hw_constraints_rates = {
+static const struct snd_pcm_hw_constraint_list hw_constraints_rates = {
.count = ARRAY_SIZE(rates),
.list = rates,
};
diff --git a/sound/synth/Kconfig b/sound/synth/Kconfig
new file mode 100644
index 000000000000..dfe8950e0556
--- /dev/null
+++ b/sound/synth/Kconfig
@@ -0,0 +1,2 @@
+config SND_SYNTH_EMUX
+ tristate
diff --git a/sound/synth/emux/Makefile b/sound/synth/emux/Makefile
index fb761c2c2b50..d1bac923eb1b 100644
--- a/sound/synth/emux/Makefile
+++ b/sound/synth/emux/Makefile
@@ -6,8 +6,8 @@
snd-emux-synth-objs := emux.o emux_synth.o emux_seq.o emux_nrpn.o \
emux_effect.o emux_hwdep.o soundfont.o
snd-emux-synth-$(CONFIG_SND_PROC_FS) += emux_proc.o
-snd-emux-synth-$(CONFIG_SND_SEQUENCER_OSS) += emux_oss.o
+ifneq ($(CONFIG_SND_SEQUENCER_OSS),)
+snd-emux-synth-y += emux_oss.o
+endif
-# Toplevel Module Dependencies
-obj-$(CONFIG_SND_SBAWE_SEQ) += snd-emux-synth.o
-obj-$(CONFIG_SND_EMU10K1_SEQ) += snd-emux-synth.o
+obj-$(CONFIG_SND_SYNTH_EMUX) += snd-emux-synth.o
diff --git a/sound/synth/emux/emux.c b/sound/synth/emux/emux.c
index 9312cd8a6fdd..b9981e8c0027 100644
--- a/sound/synth/emux/emux.c
+++ b/sound/synth/emux/emux.c
@@ -47,7 +47,7 @@ int snd_emux_new(struct snd_emux **remu)
mutex_init(&emu->register_mutex);
emu->client = -1;
-#ifdef CONFIG_SND_SEQUENCER_OSS
+#if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS)
emu->oss_synth = NULL;
#endif
emu->max_voices = 0;
@@ -123,7 +123,7 @@ int snd_emux_register(struct snd_emux *emu, struct snd_card *card, int index, ch
snd_emux_init_voices(emu);
snd_emux_init_seq(emu, card, index);
-#ifdef CONFIG_SND_SEQUENCER_OSS
+#if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS)
snd_emux_init_seq_oss(emu);
#endif
snd_emux_init_virmidi(emu, card);
@@ -150,7 +150,7 @@ int snd_emux_free(struct snd_emux *emu)
snd_emux_proc_free(emu);
snd_emux_delete_virmidi(emu);
-#ifdef CONFIG_SND_SEQUENCER_OSS
+#if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS)
snd_emux_detach_seq_oss(emu);
#endif
snd_emux_detach_seq(emu);
diff --git a/sound/synth/emux/emux_effect.c b/sound/synth/emux/emux_effect.c
index a447218b6160..9ac0bf531b4b 100644
--- a/sound/synth/emux/emux_effect.c
+++ b/sound/synth/emux/emux_effect.c
@@ -150,7 +150,7 @@ effect_get_offset(struct snd_midi_channel *chan, int lo, int hi, int mode)
return addr;
}
-#ifdef CONFIG_SND_SEQUENCER_OSS
+#if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS)
/* change effects - for OSS sequencer compatibility */
void
snd_emux_send_effect_oss(struct snd_emux_port *port,
diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c
index 850fab4a8f3b..de19e108974a 100644
--- a/sound/synth/emux/emux_oss.c
+++ b/sound/synth/emux/emux_oss.c
@@ -23,8 +23,6 @@
*/
-#ifdef CONFIG_SND_SEQUENCER_OSS
-
#include <linux/export.h>
#include <linux/uaccess.h>
#include <sound/core.h>
@@ -505,5 +503,3 @@ fake_event(struct snd_emux *emu, struct snd_emux_port *port, int ch, int param,
ev.data.control.value = val;
snd_emux_event_input(&ev, 0, port, atomic, hop);
}
-
-#endif /* CONFIG_SND_SEQUENCER_OSS */
diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig
index a452ad7cec40..f61b5662bb89 100644
--- a/sound/usb/Kconfig
+++ b/sound/usb/Kconfig
@@ -91,7 +91,7 @@ config SND_USB_CAIAQ_INPUT
config SND_USB_US122L
tristate "Tascam US-122L USB driver"
- depends on X86
+ depends on X86 || COMPILE_TEST
select SND_HWDEP
select SND_RAWMIDI
help
diff --git a/sound/usb/line6/driver.h b/sound/usb/line6/driver.h
index a5c2e9ae5f17..dc97895547be 100644
--- a/sound/usb/line6/driver.h
+++ b/sound/usb/line6/driver.h
@@ -117,6 +117,8 @@ enum {
LINE6_CAP_IN_NEEDS_OUT = 1 << 3,
/* device uses raw MIDI via USB (data endpoints) */
LINE6_CAP_CONTROL_MIDI = 1 << 4,
+ /* device provides low-level information */
+ LINE6_CAP_CONTROL_INFO = 1 << 5,
};
/*
diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
index 6ab23e5aee71..956f847a96e4 100644
--- a/sound/usb/line6/podhd.c
+++ b/sound/usb/line6/podhd.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2011 Stefan Hajnoczi <[email protected]>
* Copyright (C) 2015 Andrej Krutak <[email protected]>
+ * Copyright (C) 2017 Hans P. Moller <[email protected]>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -37,7 +38,8 @@ enum {
LINE6_PODHD500_0,
LINE6_PODHD500_1,
LINE6_PODX3,
- LINE6_PODX3LIVE
+ LINE6_PODX3LIVE,
+ LINE6_PODHD500X
};
struct usb_line6_podhd {
@@ -291,7 +293,7 @@ static void podhd_disconnect(struct usb_line6 *line6)
{
struct usb_line6_podhd *pod = (struct usb_line6_podhd *)line6;
- if (pod->line6.properties->capabilities & LINE6_CAP_CONTROL) {
+ if (pod->line6.properties->capabilities & LINE6_CAP_CONTROL_INFO) {
struct usb_interface *intf;
del_timer_sync(&pod->startup_timer);
@@ -331,7 +333,9 @@ static int podhd_init(struct usb_line6 *line6,
pod->line6.properties->ctrl_if, err);
return err;
}
+ }
+ if (pod->line6.properties->capabilities & LINE6_CAP_CONTROL_INFO) {
/* create sysfs entries: */
err = snd_card_add_dev_attr(line6->card, &podhd_dev_attr_group);
if (err < 0)
@@ -348,7 +352,7 @@ static int podhd_init(struct usb_line6 *line6,
return err;
}
- if (!(pod->line6.properties->capabilities & LINE6_CAP_CONTROL)) {
+ if (!(pod->line6.properties->capabilities & LINE6_CAP_CONTROL_INFO)) {
/* register USB audio system directly */
return podhd_startup_finalize(pod);
}
@@ -372,6 +376,7 @@ static const struct usb_device_id podhd_id_table[] = {
{ LINE6_IF_NUM(0x414D, 1), .driver_info = LINE6_PODHD500_1 },
{ LINE6_IF_NUM(0x414A, 0), .driver_info = LINE6_PODX3 },
{ LINE6_IF_NUM(0x414B, 0), .driver_info = LINE6_PODX3LIVE },
+ { LINE6_IF_NUM(0x4159, 0), .driver_info = LINE6_PODHD500X },
{}
};
@@ -425,7 +430,7 @@ static const struct line6_properties podhd_properties_table[] = {
[LINE6_PODX3] = {
.id = "PODX3",
.name = "POD X3",
- .capabilities = LINE6_CAP_CONTROL
+ .capabilities = LINE6_CAP_CONTROL | LINE6_CAP_CONTROL_INFO
| LINE6_CAP_PCM | LINE6_CAP_HWMON | LINE6_CAP_IN_NEEDS_OUT,
.altsetting = 1,
.ep_ctrl_r = 0x81,
@@ -437,7 +442,7 @@ static const struct line6_properties podhd_properties_table[] = {
[LINE6_PODX3LIVE] = {
.id = "PODX3LIVE",
.name = "POD X3 LIVE",
- .capabilities = LINE6_CAP_CONTROL
+ .capabilities = LINE6_CAP_CONTROL | LINE6_CAP_CONTROL_INFO
| LINE6_CAP_PCM | LINE6_CAP_HWMON | LINE6_CAP_IN_NEEDS_OUT,
.altsetting = 1,
.ep_ctrl_r = 0x81,
@@ -446,6 +451,18 @@ static const struct line6_properties podhd_properties_table[] = {
.ep_audio_r = 0x86,
.ep_audio_w = 0x02,
},
+ [LINE6_PODHD500X] = {
+ .id = "PODHD500X",
+ .name = "POD HD500X",
+ .capabilities = LINE6_CAP_CONTROL
+ | LINE6_CAP_PCM | LINE6_CAP_HWMON,
+ .altsetting = 1,
+ .ep_ctrl_r = 0x81,
+ .ep_ctrl_w = 0x01,
+ .ctrl_if = 1,
+ .ep_audio_r = 0x86,
+ .ep_audio_w = 0x02,
+ },
};
/*
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 4fa0053a40af..e3d1dec48ee4 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -362,7 +362,7 @@ static int snd_audigy2nx_led_resume(struct usb_mixer_elem_list *list)
}
/* name and private_value are set dynamically */
-static struct snd_kcontrol_new snd_audigy2nx_control = {
+static const struct snd_kcontrol_new snd_audigy2nx_control = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.info = snd_audigy2nx_led_info,
.get = snd_audigy2nx_led_get,
diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c
index e118bdca983d..a33e31b2fc2f 100644
--- a/sound/usb/usx2y/us122l.c
+++ b/sound/usb/usx2y/us122l.c
@@ -46,8 +46,10 @@ MODULE_PARM_DESC(id, "ID string for "NAME_ALLCAPS".");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable "NAME_ALLCAPS".");
-static int snd_us122l_card_used[SNDRV_CARDS];
+/* driver_info flags */
+#define US122L_FLAG_US144 BIT(0)
+static int snd_us122l_card_used[SNDRV_CARDS];
static int us122l_create_usbmidi(struct snd_card *card)
{
@@ -198,8 +200,7 @@ static int usb_stream_hwdep_open(struct snd_hwdep *hw, struct file *file)
if (!us122l->first)
us122l->first = file;
- if (us122l->dev->descriptor.idProduct == USB_ID_US144 ||
- us122l->dev->descriptor.idProduct == USB_ID_US144MKII) {
+ if (us122l->is_us144) {
iface = usb_ifnum_to_if(us122l->dev, 0);
usb_autopm_get_interface(iface);
}
@@ -214,8 +215,7 @@ static int usb_stream_hwdep_release(struct snd_hwdep *hw, struct file *file)
struct usb_interface *iface;
snd_printdd(KERN_DEBUG "%p %p\n", hw, file);
- if (us122l->dev->descriptor.idProduct == USB_ID_US144 ||
- us122l->dev->descriptor.idProduct == USB_ID_US144MKII) {
+ if (us122l->is_us144) {
iface = usb_ifnum_to_if(us122l->dev, 0);
usb_autopm_put_interface(iface);
}
@@ -483,8 +483,7 @@ static bool us122l_create_card(struct snd_card *card)
int err;
struct us122l *us122l = US122L(card);
- if (us122l->dev->descriptor.idProduct == USB_ID_US144 ||
- us122l->dev->descriptor.idProduct == USB_ID_US144MKII) {
+ if (us122l->is_us144) {
err = usb_set_interface(us122l->dev, 0, 1);
if (err) {
snd_printk(KERN_ERR "usb_set_interface error \n");
@@ -503,8 +502,7 @@ static bool us122l_create_card(struct snd_card *card)
if (!us122l_start(us122l, 44100, 256))
return false;
- if (us122l->dev->descriptor.idProduct == USB_ID_US144 ||
- us122l->dev->descriptor.idProduct == USB_ID_US144MKII)
+ if (us122l->is_us144)
err = us144_create_usbmidi(card);
else
err = us122l_create_usbmidi(card);
@@ -536,7 +534,8 @@ static void snd_us122l_free(struct snd_card *card)
static int usx2y_create_card(struct usb_device *device,
struct usb_interface *intf,
- struct snd_card **cardp)
+ struct snd_card **cardp,
+ unsigned long flags)
{
int dev;
struct snd_card *card;
@@ -556,6 +555,7 @@ static int usx2y_create_card(struct usb_device *device,
US122L(card)->dev = device;
mutex_init(&US122L(card)->mutex);
init_waitqueue_head(&US122L(card)->sk.sleep);
+ US122L(card)->is_us144 = flags & US122L_FLAG_US144;
INIT_LIST_HEAD(&US122L(card)->midi_list);
strcpy(card->driver, "USB "NAME_ALLCAPS"");
sprintf(card->shortname, "TASCAM "NAME_ALLCAPS"");
@@ -579,7 +579,7 @@ static int us122l_usb_probe(struct usb_interface *intf,
struct snd_card *card;
int err;
- err = usx2y_create_card(device, intf, &card);
+ err = usx2y_create_card(device, intf, &card, device_id->driver_info);
if (err < 0)
return err;
@@ -607,9 +607,8 @@ static int snd_us122l_probe(struct usb_interface *intf,
struct snd_card *card;
int err;
- if ((device->descriptor.idProduct == USB_ID_US144 ||
- device->descriptor.idProduct == USB_ID_US144MKII)
- && device->speed == USB_SPEED_HIGH) {
+ if (id->driver_info & US122L_FLAG_US144 &&
+ device->speed == USB_SPEED_HIGH) {
snd_printk(KERN_ERR "disable ehci-hcd to run US-144 \n");
return -ENODEV;
}
@@ -703,8 +702,7 @@ static int snd_us122l_resume(struct usb_interface *intf)
mutex_lock(&us122l->mutex);
/* needed, doesn't restart without: */
- if (us122l->dev->descriptor.idProduct == USB_ID_US144 ||
- us122l->dev->descriptor.idProduct == USB_ID_US144MKII) {
+ if (us122l->is_us144) {
err = usb_set_interface(us122l->dev, 0, 1);
if (err) {
snd_printk(KERN_ERR "usb_set_interface error \n");
@@ -747,7 +745,8 @@ static struct usb_device_id snd_us122l_usb_id_table[] = {
{ /* US-144 only works at USB1.1! Disable module ehci-hcd. */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x0644,
- .idProduct = USB_ID_US144
+ .idProduct = USB_ID_US144,
+ .driver_info = US122L_FLAG_US144
},
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE,
@@ -757,7 +756,8 @@ static struct usb_device_id snd_us122l_usb_id_table[] = {
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x0644,
- .idProduct = USB_ID_US144MKII
+ .idProduct = USB_ID_US144MKII,
+ .driver_info = US122L_FLAG_US144
},
{ /* terminator */ }
};
diff --git a/sound/usb/usx2y/us122l.h b/sound/usb/usx2y/us122l.h
index f263b3f96c86..3e2a2d0041ee 100644
--- a/sound/usb/usx2y/us122l.h
+++ b/sound/usb/usx2y/us122l.h
@@ -16,6 +16,8 @@ struct us122l {
struct list_head midi_list;
atomic_t mmap_count;
+
+ bool is_us144;
};
diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
index b11d3920b9a5..37f06ffdf1e6 100644
--- a/sound/x86/intel_hdmi_audio.c
+++ b/sound/x86/intel_hdmi_audio.c
@@ -1699,8 +1699,8 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
/* get resources */
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "Could not get irq resource\n");
- return -ENODEV;
+ dev_err(&pdev->dev, "Could not get irq resource: %d\n", irq);
+ return irq;
}
res_mmio = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/tools/build/Makefile.build b/tools/build/Makefile.build
index e279a71c650d..c46b20e4ad87 100644
--- a/tools/build/Makefile.build
+++ b/tools/build/Makefile.build
@@ -19,15 +19,9 @@ else
Q=@
endif
-ifneq ($(filter 4.%,$(MAKE_VERSION)),) # make-4
-ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
+ifneq ($(findstring s,$(filter-out --%,$(MAKEFLAGS))),)
quiet=silent_
endif
-else # make-3.8x
-ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
- quiet=silent_
-endif
-endif
build-dir := $(srctree)/tools/build
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
index 8f74ed8e7237..dd8f00cfb8b4 100755
--- a/tools/kvm/kvm_stat/kvm_stat
+++ b/tools/kvm/kvm_stat/kvm_stat
@@ -295,114 +295,6 @@ class ArchS390(Arch):
ARCH = Arch.get_arch()
-def walkdir(path):
- """Returns os.walk() data for specified directory.
-
- As it is only a wrapper it returns the same 3-tuple of (dirpath,
- dirnames, filenames).
- """
- return next(os.walk(path))
-
-
-def parse_int_list(list_string):
- """Returns an int list from a string of comma separated integers and
- integer ranges."""
- integers = []
- members = list_string.split(',')
-
- for member in members:
- if '-' not in member:
- integers.append(int(member))
- else:
- int_range = member.split('-')
- integers.extend(range(int(int_range[0]),
- int(int_range[1]) + 1))
-
- return integers
-
-
-def get_pid_from_gname(gname):
- """Fuzzy function to convert guest name to QEMU process pid.
-
- Returns a list of potential pids, can be empty if no match found.
- Throws an exception on processing errors.
-
- """
- pids = []
- try:
- child = subprocess.Popen(['ps', '-A', '--format', 'pid,args'],
- stdout=subprocess.PIPE)
- except:
- raise Exception
- for line in child.stdout:
- line = line.lstrip().split(' ', 1)
- # perform a sanity check before calling the more expensive
- # function to possibly extract the guest name
- if ' -name ' in line[1] and gname == get_gname_from_pid(line[0]):
- pids.append(int(line[0]))
- child.stdout.close()
-
- return pids
-
-
-def get_gname_from_pid(pid):
- """Returns the guest name for a QEMU process pid.
-
- Extracts the guest name from the QEMU comma line by processing the '-name'
- option. Will also handle names specified out of sequence.
-
- """
- name = ''
- try:
- line = open('/proc/{}/cmdline'.format(pid), 'rb').read().split('\0')
- parms = line[line.index('-name') + 1].split(',')
- while '' in parms:
- # commas are escaped (i.e. ',,'), hence e.g. 'foo,bar' results in
- # ['foo', '', 'bar'], which we revert here
- idx = parms.index('')
- parms[idx - 1] += ',' + parms[idx + 1]
- del parms[idx:idx+2]
- # the '-name' switch allows for two ways to specify the guest name,
- # where the plain name overrides the name specified via 'guest='
- for arg in parms:
- if '=' not in arg:
- name = arg
- break
- if arg[:6] == 'guest=':
- name = arg[6:]
- except (ValueError, IOError, IndexError):
- pass
-
- return name
-
-
-def get_online_cpus():
- """Returns a list of cpu id integers."""
- with open('/sys/devices/system/cpu/online') as cpu_list:
- cpu_string = cpu_list.readline()
- return parse_int_list(cpu_string)
-
-
-def get_filters():
- """Returns a dict of trace events, their filter ids and
- the values that can be filtered.
-
- Trace events can be filtered for special values by setting a
- filter string via an ioctl. The string normally has the format
- identifier==value. For each filter a new event will be created, to
- be able to distinguish the events.
-
- """
- filters = {}
- filters['kvm_userspace_exit'] = ('reason', USERSPACE_EXIT_REASONS)
- if ARCH.exit_reasons:
- filters['kvm_exit'] = ('exit_reason', ARCH.exit_reasons)
- return filters
-
-libc = ctypes.CDLL('libc.so.6', use_errno=True)
-syscall = libc.syscall
-
-
class perf_event_attr(ctypes.Structure):
"""Struct that holds the necessary data to set up a trace event.
@@ -432,25 +324,6 @@ class perf_event_attr(ctypes.Structure):
self.read_format = PERF_FORMAT_GROUP
-def perf_event_open(attr, pid, cpu, group_fd, flags):
- """Wrapper for the sys_perf_evt_open() syscall.
-
- Used to set up performance events, returns a file descriptor or -1
- on error.
-
- Attributes are:
- - syscall number
- - struct perf_event_attr *
- - pid or -1 to monitor all pids
- - cpu number or -1 to monitor all cpus
- - The file descriptor of the group leader or -1 to create a group.
- - flags
-
- """
- return syscall(ARCH.sc_perf_evt_open, ctypes.pointer(attr),
- ctypes.c_int(pid), ctypes.c_int(cpu),
- ctypes.c_int(group_fd), ctypes.c_long(flags))
-
PERF_TYPE_TRACEPOINT = 2
PERF_FORMAT_GROUP = 1 << 3
@@ -495,6 +368,8 @@ class Event(object):
"""Represents a performance event and manages its life cycle."""
def __init__(self, name, group, trace_cpu, trace_pid, trace_point,
trace_filter, trace_set='kvm'):
+ self.libc = ctypes.CDLL('libc.so.6', use_errno=True)
+ self.syscall = self.libc.syscall
self.name = name
self.fd = None
self.setup_event(group, trace_cpu, trace_pid, trace_point,
@@ -511,6 +386,25 @@ class Event(object):
if self.fd:
os.close(self.fd)
+ def perf_event_open(self, attr, pid, cpu, group_fd, flags):
+ """Wrapper for the sys_perf_evt_open() syscall.
+
+ Used to set up performance events, returns a file descriptor or -1
+ on error.
+
+ Attributes are:
+ - syscall number
+ - struct perf_event_attr *
+ - pid or -1 to monitor all pids
+ - cpu number or -1 to monitor all cpus
+ - The file descriptor of the group leader or -1 to create a group.
+ - flags
+
+ """
+ return self.syscall(ARCH.sc_perf_evt_open, ctypes.pointer(attr),
+ ctypes.c_int(pid), ctypes.c_int(cpu),
+ ctypes.c_int(group_fd), ctypes.c_long(flags))
+
def setup_event_attribute(self, trace_set, trace_point):
"""Returns an initialized ctype perf_event_attr struct."""
@@ -539,8 +433,8 @@ class Event(object):
if group.events:
group_leader = group.events[0].fd
- fd = perf_event_open(event_attr, trace_pid,
- trace_cpu, group_leader, 0)
+ fd = self.perf_event_open(event_attr, trace_pid,
+ trace_cpu, group_leader, 0)
if fd == -1:
err = ctypes.get_errno()
raise OSError(err, os.strerror(err),
@@ -575,17 +469,53 @@ class Event(object):
fcntl.ioctl(self.fd, ARCH.ioctl_numbers['RESET'], 0)
-class TracepointProvider(object):
+class Provider(object):
+ """Encapsulates functionalities used by all providers."""
+ @staticmethod
+ def is_field_wanted(fields_filter, field):
+ """Indicate whether field is valid according to fields_filter."""
+ if not fields_filter:
+ return True
+ return re.match(fields_filter, field) is not None
+
+ @staticmethod
+ def walkdir(path):
+ """Returns os.walk() data for specified directory.
+
+ As it is only a wrapper it returns the same 3-tuple of (dirpath,
+ dirnames, filenames).
+ """
+ return next(os.walk(path))
+
+
+class TracepointProvider(Provider):
"""Data provider for the stats class.
Manages the events/groups from which it acquires its data.
"""
- def __init__(self):
+ def __init__(self, pid, fields_filter):
self.group_leaders = []
- self.filters = get_filters()
- self._fields = self.get_available_fields()
- self._pid = 0
+ self.filters = self.get_filters()
+ self.update_fields(fields_filter)
+ self.pid = pid
+
+ @staticmethod
+ def get_filters():
+ """Returns a dict of trace events, their filter ids and
+ the values that can be filtered.
+
+ Trace events can be filtered for special values by setting a
+ filter string via an ioctl. The string normally has the format
+ identifier==value. For each filter a new event will be created, to
+ be able to distinguish the events.
+
+ """
+ filters = {}
+ filters['kvm_userspace_exit'] = ('reason', USERSPACE_EXIT_REASONS)
+ if ARCH.exit_reasons:
+ filters['kvm_exit'] = ('exit_reason', ARCH.exit_reasons)
+ return filters
def get_available_fields(self):
"""Returns a list of available event's of format 'event name(filter
@@ -603,7 +533,7 @@ class TracepointProvider(object):
"""
path = os.path.join(PATH_DEBUGFS_TRACING, 'events', 'kvm')
- fields = walkdir(path)[1]
+ fields = self.walkdir(path)[1]
extra = []
for field in fields:
if field in self.filters:
@@ -613,6 +543,34 @@ class TracepointProvider(object):
fields += extra
return fields
+ def update_fields(self, fields_filter):
+ """Refresh fields, applying fields_filter"""
+ self._fields = [field for field in self.get_available_fields()
+ if self.is_field_wanted(fields_filter, field)]
+
+ @staticmethod
+ def get_online_cpus():
+ """Returns a list of cpu id integers."""
+ def parse_int_list(list_string):
+ """Returns an int list from a string of comma separated integers and
+ integer ranges."""
+ integers = []
+ members = list_string.split(',')
+
+ for member in members:
+ if '-' not in member:
+ integers.append(int(member))
+ else:
+ int_range = member.split('-')
+ integers.extend(range(int(int_range[0]),
+ int(int_range[1]) + 1))
+
+ return integers
+
+ with open('/sys/devices/system/cpu/online') as cpu_list:
+ cpu_string = cpu_list.readline()
+ return parse_int_list(cpu_string)
+
def setup_traces(self):
"""Creates all event and group objects needed to be able to retrieve
data."""
@@ -621,9 +579,9 @@ class TracepointProvider(object):
# Fetch list of all threads of the monitored pid, as qemu
# starts a thread for each vcpu.
path = os.path.join('/proc', str(self._pid), 'task')
- groupids = walkdir(path)[1]
+ groupids = self.walkdir(path)[1]
else:
- groupids = get_online_cpus()
+ groupids = self.get_online_cpus()
# The constant is needed as a buffer for python libs, std
# streams and other files that the script opens.
@@ -671,9 +629,6 @@ class TracepointProvider(object):
self.group_leaders.append(group)
- def available_fields(self):
- return self.get_available_fields()
-
@property
def fields(self):
return self._fields
@@ -707,7 +662,7 @@ class TracepointProvider(object):
self.setup_traces()
self.fields = self._fields
- def read(self):
+ def read(self, by_guest=0):
"""Returns 'event name: current value' for all enabled events."""
ret = defaultdict(int)
for group in self.group_leaders:
@@ -723,16 +678,17 @@ class TracepointProvider(object):
event.reset()
-class DebugfsProvider(object):
+class DebugfsProvider(Provider):
"""Provides data from the files that KVM creates in the kvm debugfs
folder."""
- def __init__(self):
- self._fields = self.get_available_fields()
+ def __init__(self, pid, fields_filter, include_past):
+ self.update_fields(fields_filter)
self._baseline = {}
- self._pid = 0
self.do_read = True
self.paths = []
- self.reset()
+ self.pid = pid
+ if include_past:
+ self.restore()
def get_available_fields(self):
""""Returns a list of available fields.
@@ -740,7 +696,12 @@ class DebugfsProvider(object):
The fields are all available KVM debugfs files
"""
- return walkdir(PATH_DEBUGFS_KVM)[2]
+ return self.walkdir(PATH_DEBUGFS_KVM)[2]
+
+ def update_fields(self, fields_filter):
+ """Refresh fields, applying fields_filter"""
+ self._fields = [field for field in self.get_available_fields()
+ if self.is_field_wanted(fields_filter, field)]
@property
def fields(self):
@@ -757,10 +718,9 @@ class DebugfsProvider(object):
@pid.setter
def pid(self, pid):
+ self._pid = pid
if pid != 0:
- self._pid = pid
-
- vms = walkdir(PATH_DEBUGFS_KVM)[1]
+ vms = self.walkdir(PATH_DEBUGFS_KVM)[1]
if len(vms) == 0:
self.do_read = False
@@ -771,8 +731,15 @@ class DebugfsProvider(object):
self.do_read = True
self.reset()
- def read(self, reset=0):
- """Returns a dict with format:'file name / field -> current value'."""
+ def read(self, reset=0, by_guest=0):
+ """Returns a dict with format:'file name / field -> current value'.
+
+ Parameter 'reset':
+ 0 plain read
+ 1 reset field counts to 0
+ 2 restore the original field counts
+
+ """
results = {}
# If no debugfs filtering support is available, then don't read.
@@ -789,12 +756,22 @@ class DebugfsProvider(object):
for field in self._fields:
value = self.read_field(field, path)
key = path + field
- if reset:
+ if reset == 1:
self._baseline[key] = value
+ if reset == 2:
+ self._baseline[key] = 0
if self._baseline.get(key, -1) == -1:
self._baseline[key] = value
- results[field] = (results.get(field, 0) + value -
- self._baseline.get(key, 0))
+ increment = (results.get(field, 0) + value -
+ self._baseline.get(key, 0))
+ if by_guest:
+ pid = key.split('-')[0]
+ if pid in results:
+ results[pid] += increment
+ else:
+ results[pid] = increment
+ else:
+ results[field] = increment
return results
@@ -813,6 +790,11 @@ class DebugfsProvider(object):
self._baseline = {}
self.read(1)
+ def restore(self):
+ """Reset field counters"""
+ self._baseline = {}
+ self.read(2)
+
class Stats(object):
"""Manages the data providers and the data they provide.
@@ -821,33 +803,32 @@ class Stats(object):
provider data.
"""
- def __init__(self, providers, pid, fields=None):
- self.providers = providers
- self._pid_filter = pid
- self._fields_filter = fields
+ def __init__(self, options):
+ self.providers = self.get_providers(options)
+ self._pid_filter = options.pid
+ self._fields_filter = options.fields
self.values = {}
- self.update_provider_pid()
- self.update_provider_filters()
+
+ @staticmethod
+ def get_providers(options):
+ """Returns a list of data providers depending on the passed options."""
+ providers = []
+
+ if options.debugfs:
+ providers.append(DebugfsProvider(options.pid, options.fields,
+ options.dbgfs_include_past))
+ if options.tracepoints or not providers:
+ providers.append(TracepointProvider(options.pid, options.fields))
+
+ return providers
def update_provider_filters(self):
"""Propagates fields filters to providers."""
- def wanted(key):
- if not self._fields_filter:
- return True
- return re.match(self._fields_filter, key) is not None
-
# As we reset the counters when updating the fields we can
# also clear the cache of old values.
self.values = {}
for provider in self.providers:
- provider_fields = [key for key in provider.get_available_fields()
- if wanted(key)]
- provider.fields = provider_fields
-
- def update_provider_pid(self):
- """Propagates pid filters to providers."""
- for provider in self.providers:
- provider.pid = self._pid_filter
+ provider.update_fields(self._fields_filter)
def reset(self):
self.values = {}
@@ -873,27 +854,52 @@ class Stats(object):
if pid != self._pid_filter:
self._pid_filter = pid
self.values = {}
- self.update_provider_pid()
+ for provider in self.providers:
+ provider.pid = self._pid_filter
- def get(self):
+ def get(self, by_guest=0):
"""Returns a dict with field -> (value, delta to last value) of all
provider data."""
for provider in self.providers:
- new = provider.read()
- for key in provider.fields:
+ new = provider.read(by_guest=by_guest)
+ for key in new if by_guest else provider.fields:
oldval = self.values.get(key, (0, 0))[0]
newval = new.get(key, 0)
newdelta = newval - oldval
self.values[key] = (newval, newdelta)
return self.values
-LABEL_WIDTH = 40
-NUMBER_WIDTH = 10
-DELAY_INITIAL = 0.25
-DELAY_REGULAR = 3.0
+ def toggle_display_guests(self, to_pid):
+ """Toggle between collection of stats by individual event and by
+ guest pid
+
+ Events reported by DebugfsProvider change when switching to/from
+ reading by guest values. Hence we have to remove the excess event
+ names from self.values.
+
+ """
+ if any(isinstance(ins, TracepointProvider) for ins in self.providers):
+ return 1
+ if to_pid:
+ for provider in self.providers:
+ if isinstance(provider, DebugfsProvider):
+ for key in provider.fields:
+ if key in self.values.keys():
+ del self.values[key]
+ else:
+ oldvals = self.values.copy()
+ for key in oldvals:
+ if key.isdigit():
+ del self.values[key]
+ # Update oldval (see get())
+ self.get(to_pid)
+ return 0
+
+DELAY_DEFAULT = 3.0
MAX_GUEST_NAME_LEN = 48
MAX_REGEX_LEN = 44
DEFAULT_REGEX = r'^[^\(]*$'
+SORT_DEFAULT = 0
class Tui(object):
@@ -901,7 +907,10 @@ class Tui(object):
def __init__(self, stats):
self.stats = stats
self.screen = None
- self.update_drilldown()
+ self._delay_initial = 0.25
+ self._delay_regular = DELAY_DEFAULT
+ self._sorting = SORT_DEFAULT
+ self._display_guests = 0
def __enter__(self):
"""Initialises curses for later use. Based on curses.wrapper
@@ -929,7 +938,7 @@ class Tui(object):
return self
def __exit__(self, *exception):
- """Resets the terminal to its normal state. Based on curses.wrappre
+ """Resets the terminal to its normal state. Based on curses.wrapper
implementation from the Python standard library."""
if self.screen:
self.screen.keypad(0)
@@ -937,6 +946,86 @@ class Tui(object):
curses.nocbreak()
curses.endwin()
+ def get_all_gnames(self):
+ """Returns a list of (pid, gname) tuples of all running guests"""
+ res = []
+ try:
+ child = subprocess.Popen(['ps', '-A', '--format', 'pid,args'],
+ stdout=subprocess.PIPE)
+ except:
+ raise Exception
+ for line in child.stdout:
+ line = line.lstrip().split(' ', 1)
+ # perform a sanity check before calling the more expensive
+ # function to possibly extract the guest name
+ if ' -name ' in line[1]:
+ res.append((line[0], self.get_gname_from_pid(line[0])))
+ child.stdout.close()
+
+ return res
+
+ def print_all_gnames(self, row):
+ """Print a list of all running guests along with their pids."""
+ self.screen.addstr(row, 2, '%8s %-60s' %
+ ('Pid', 'Guest Name (fuzzy list, might be '
+ 'inaccurate!)'),
+ curses.A_UNDERLINE)
+ row += 1
+ try:
+ for line in self.get_all_gnames():
+ self.screen.addstr(row, 2, '%8s %-60s' % (line[0], line[1]))
+ row += 1
+ if row >= self.screen.getmaxyx()[0]:
+ break
+ except Exception:
+ self.screen.addstr(row + 1, 2, 'Not available')
+
+ def get_pid_from_gname(self, gname):
+ """Fuzzy function to convert guest name to QEMU process pid.
+
+ Returns a list of potential pids, can be empty if no match found.
+ Throws an exception on processing errors.
+
+ """
+ pids = []
+ for line in self.get_all_gnames():
+ if gname == line[1]:
+ pids.append(int(line[0]))
+
+ return pids
+
+ @staticmethod
+ def get_gname_from_pid(pid):
+ """Returns the guest name for a QEMU process pid.
+
+ Extracts the guest name from the QEMU comma line by processing the
+ '-name' option. Will also handle names specified out of sequence.
+
+ """
+ name = ''
+ try:
+ line = open('/proc/{}/cmdline'
+ .format(pid), 'rb').read().split('\0')
+ parms = line[line.index('-name') + 1].split(',')
+ while '' in parms:
+ # commas are escaped (i.e. ',,'), hence e.g. 'foo,bar' results
+ # in # ['foo', '', 'bar'], which we revert here
+ idx = parms.index('')
+ parms[idx - 1] += ',' + parms[idx + 1]
+ del parms[idx:idx+2]
+ # the '-name' switch allows for two ways to specify the guest name,
+ # where the plain name overrides the name specified via 'guest='
+ for arg in parms:
+ if '=' not in arg:
+ name = arg
+ break
+ if arg[:6] == 'guest=':
+ name = arg[6:]
+ except (ValueError, IOError, IndexError):
+ pass
+
+ return name
+
def update_drilldown(self):
"""Sets or removes a filter that only allows fields without braces."""
if not self.stats.fields_filter:
@@ -954,7 +1043,7 @@ class Tui(object):
if pid is None:
pid = self.stats.pid_filter
self.screen.erase()
- gname = get_gname_from_pid(pid)
+ gname = self.get_gname_from_pid(pid)
if gname:
gname = ('({})'.format(gname[:MAX_GUEST_NAME_LEN] + '...'
if len(gname) > MAX_GUEST_NAME_LEN
@@ -970,13 +1059,13 @@ class Tui(object):
if len(regex) > MAX_REGEX_LEN:
regex = regex[:MAX_REGEX_LEN] + '...'
self.screen.addstr(1, 17, 'regex filter: {0}'.format(regex))
- self.screen.addstr(2, 1, 'Event')
- self.screen.addstr(2, 1 + LABEL_WIDTH + NUMBER_WIDTH -
- len('Total'), 'Total')
- self.screen.addstr(2, 1 + LABEL_WIDTH + NUMBER_WIDTH + 7 -
- len('%Total'), '%Total')
- self.screen.addstr(2, 1 + LABEL_WIDTH + NUMBER_WIDTH + 7 + 8 -
- len('Current'), 'Current')
+ if self._display_guests:
+ col_name = 'Guest Name'
+ else:
+ col_name = 'Event'
+ self.screen.addstr(2, 1, '%-40s %10s%7s %8s' %
+ (col_name, 'Total', '%Total', 'CurAvg/s'),
+ curses.A_STANDOUT)
self.screen.addstr(4, 1, 'Collecting data...')
self.screen.refresh()
@@ -984,16 +1073,25 @@ class Tui(object):
row = 3
self.screen.move(row, 0)
self.screen.clrtobot()
- stats = self.stats.get()
+ stats = self.stats.get(self._display_guests)
- def sortkey(x):
+ def sortCurAvg(x):
+ # sort by current events if available
if stats[x][1]:
return (-stats[x][1], -stats[x][0])
else:
return (0, -stats[x][0])
+
+ def sortTotal(x):
+ # sort by totals
+ return (0, -stats[x][0])
total = 0.
for val in stats.values():
total += val[0]
+ if self._sorting == SORT_DEFAULT:
+ sortkey = sortCurAvg
+ else:
+ sortkey = sortTotal
for key in sorted(stats.keys(), key=sortkey):
if row >= self.screen.getmaxyx()[0]:
@@ -1001,18 +1099,61 @@ class Tui(object):
values = stats[key]
if not values[0] and not values[1]:
break
- col = 1
- self.screen.addstr(row, col, key)
- col += LABEL_WIDTH
- self.screen.addstr(row, col, '%10d' % (values[0],))
- col += NUMBER_WIDTH
- self.screen.addstr(row, col, '%7.1f' % (values[0] * 100 / total,))
- col += 7
- if values[1] is not None:
- self.screen.addstr(row, col, '%8d' % (values[1] / sleeptime,))
+ if values[0] is not None:
+ cur = int(round(values[1] / sleeptime)) if values[1] else ''
+ if self._display_guests:
+ key = self.get_gname_from_pid(key)
+ self.screen.addstr(row, 1, '%-40s %10d%7.1f %8s' %
+ (key, values[0], values[0] * 100 / total,
+ cur))
row += 1
+ if row == 3:
+ self.screen.addstr(4, 1, 'No matching events reported yet')
self.screen.refresh()
+ def show_msg(self, text):
+ """Display message centered text and exit on key press"""
+ hint = 'Press any key to continue'
+ curses.cbreak()
+ self.screen.erase()
+ (x, term_width) = self.screen.getmaxyx()
+ row = 2
+ for line in text:
+ start = (term_width - len(line)) / 2
+ self.screen.addstr(row, start, line)
+ row += 1
+ self.screen.addstr(row + 1, (term_width - len(hint)) / 2, hint,
+ curses.A_STANDOUT)
+ self.screen.getkey()
+
+ def show_help_interactive(self):
+ """Display help with list of interactive commands"""
+ msg = (' b toggle events by guests (debugfs only, honors'
+ ' filters)',
+ ' c clear filter',
+ ' f filter by regular expression',
+ ' g filter by guest name',
+ ' h display interactive commands reference',
+ ' o toggle sorting order (Total vs CurAvg/s)',
+ ' p filter by PID',
+ ' q quit',
+ ' r reset stats',
+ ' s set update interval',
+ ' x toggle reporting of stats for individual child trace'
+ ' events',
+ 'Any other key refreshes statistics immediately')
+ curses.cbreak()
+ self.screen.erase()
+ self.screen.addstr(0, 0, "Interactive commands reference",
+ curses.A_BOLD)
+ self.screen.addstr(2, 0, "Press any key to exit", curses.A_STANDOUT)
+ row = 4
+ for line in msg:
+ self.screen.addstr(row, 0, line)
+ row += 1
+ self.screen.getkey()
+ self.refresh_header()
+
def show_filter_selection(self):
"""Draws filter selection mask.
@@ -1059,6 +1200,7 @@ class Tui(object):
'This might limit the shown data to the trace '
'statistics.')
self.screen.addstr(5, 0, msg)
+ self.print_all_gnames(7)
curses.echo()
self.screen.addstr(3, 0, "Pid [0 or pid]: ")
@@ -1077,10 +1219,40 @@ class Tui(object):
self.refresh_header(pid)
self.update_pid(pid)
break
-
except ValueError:
msg = '"' + str(pid) + '": Not a valid pid'
- continue
+
+ def show_set_update_interval(self):
+ """Draws update interval selection mask."""
+ msg = ''
+ while True:
+ self.screen.erase()
+ self.screen.addstr(0, 0, 'Set update interval (defaults to %fs).' %
+ DELAY_DEFAULT, curses.A_BOLD)
+ self.screen.addstr(4, 0, msg)
+ self.screen.addstr(2, 0, 'Change delay from %.1fs to ' %
+ self._delay_regular)
+ curses.echo()
+ val = self.screen.getstr()
+ curses.noecho()
+
+ try:
+ if len(val) > 0:
+ delay = float(val)
+ if delay < 0.1:
+ msg = '"' + str(val) + '": Value must be >=0.1'
+ continue
+ if delay > 25.5:
+ msg = '"' + str(val) + '": Value must be <=25.5'
+ continue
+ else:
+ delay = DELAY_DEFAULT
+ self._delay_regular = delay
+ break
+
+ except ValueError:
+ msg = '"' + str(val) + '": Invalid value'
+ self.refresh_header()
def show_vm_selection_by_guest_name(self):
"""Draws guest selection mask.
@@ -1098,6 +1270,7 @@ class Tui(object):
'This might limit the shown data to the trace '
'statistics.')
self.screen.addstr(5, 0, msg)
+ self.print_all_gnames(7)
curses.echo()
self.screen.addstr(3, 0, "Guest [ENTER or guest]: ")
gname = self.screen.getstr()
@@ -1110,7 +1283,7 @@ class Tui(object):
else:
pids = []
try:
- pids = get_pid_from_gname(gname)
+ pids = self.get_pid_from_gname(gname)
except:
msg = '"' + gname + '": Internal error while searching, ' \
'use pid filter instead'
@@ -1128,38 +1301,60 @@ class Tui(object):
def show_stats(self):
"""Refreshes the screen and processes user input."""
- sleeptime = DELAY_INITIAL
+ sleeptime = self._delay_initial
self.refresh_header()
+ start = 0.0 # result based on init value never appears on screen
while True:
- self.refresh_body(sleeptime)
+ self.refresh_body(time.time() - start)
curses.halfdelay(int(sleeptime * 10))
- sleeptime = DELAY_REGULAR
+ start = time.time()
+ sleeptime = self._delay_regular
try:
char = self.screen.getkey()
- if char == 'x':
+ if char == 'b':
+ self._display_guests = not self._display_guests
+ if self.stats.toggle_display_guests(self._display_guests):
+ self.show_msg(['Command not available with tracepoints'
+ ' enabled', 'Restart with debugfs only '
+ '(see option \'-d\') and try again!'])
+ self._display_guests = not self._display_guests
self.refresh_header()
- self.update_drilldown()
- sleeptime = DELAY_INITIAL
- if char == 'q':
- break
if char == 'c':
self.stats.fields_filter = DEFAULT_REGEX
self.refresh_header(0)
self.update_pid(0)
- sleeptime = DELAY_INITIAL
if char == 'f':
+ curses.curs_set(1)
self.show_filter_selection()
- sleeptime = DELAY_INITIAL
+ curses.curs_set(0)
+ sleeptime = self._delay_initial
if char == 'g':
+ curses.curs_set(1)
self.show_vm_selection_by_guest_name()
- sleeptime = DELAY_INITIAL
+ curses.curs_set(0)
+ sleeptime = self._delay_initial
+ if char == 'h':
+ self.show_help_interactive()
+ if char == 'o':
+ self._sorting = not self._sorting
if char == 'p':
+ curses.curs_set(1)
self.show_vm_selection_by_pid()
- sleeptime = DELAY_INITIAL
+ curses.curs_set(0)
+ sleeptime = self._delay_initial
+ if char == 'q':
+ break
if char == 'r':
- self.refresh_header()
self.stats.reset()
- sleeptime = DELAY_INITIAL
+ if char == 's':
+ curses.curs_set(1)
+ self.show_set_update_interval()
+ curses.curs_set(0)
+ sleeptime = self._delay_initial
+ if char == 'x':
+ self.update_drilldown()
+ # prevents display of current values on next refresh
+ self.stats.get()
except KeyboardInterrupt:
break
except curses.error:
@@ -1227,13 +1422,17 @@ Requirements:
the large number of files that are possibly opened.
Interactive Commands:
+ b toggle events by guests (debugfs only, honors filters)
c clear filter
f filter by regular expression
g filter by guest name
+ h display interactive commands reference
+ o toggle sorting order (Total vs CurAvg/s)
p filter by PID
q quit
- x toggle reporting of stats for individual child trace events
r reset stats
+ s set update interval
+ x toggle reporting of stats for individual child trace events
Press any other key to refresh statistics immediately.
"""
@@ -1246,7 +1445,7 @@ Press any other key to refresh statistics immediately.
def cb_guest_to_pid(option, opt, val, parser):
try:
- pids = get_pid_from_gname(val)
+ pids = Tui.get_pid_from_gname(val)
except:
raise optparse.OptionValueError('Error while searching for guest '
'"{}", use "-p" to specify a pid '
@@ -1268,6 +1467,13 @@ Press any other key to refresh statistics immediately.
dest='once',
help='run in batch mode for one second',
)
+ optparser.add_option('-i', '--debugfs-include-past',
+ action='store_true',
+ default=False,
+ dest='dbgfs_include_past',
+ help='include all available data on past events for '
+ 'debugfs',
+ )
optparser.add_option('-l', '--log',
action='store_true',
default=False,
@@ -1288,7 +1494,7 @@ Press any other key to refresh statistics immediately.
)
optparser.add_option('-f', '--fields',
action='store',
- default=None,
+ default=DEFAULT_REGEX,
dest='fields',
help='fields to display (regex)',
)
@@ -1311,20 +1517,6 @@ Press any other key to refresh statistics immediately.
return options
-def get_providers(options):
- """Returns a list of data providers depending on the passed options."""
- providers = []
-
- if options.tracepoints:
- providers.append(TracepointProvider())
- if options.debugfs:
- providers.append(DebugfsProvider())
- if len(providers) == 0:
- providers.append(TracepointProvider())
-
- return providers
-
-
def check_access(options):
"""Exits if the current user can't access all needed directories."""
if not os.path.exists('/sys/kernel/debug'):
@@ -1365,8 +1557,7 @@ def main():
sys.stderr.write('Did you use a (unsupported) tid instead of a pid?\n')
sys.exit('Specified pid does not exist.')
- providers = get_providers(options)
- stats = Stats(providers, options.pid, fields=options.fields)
+ stats = Stats(options)
if options.log:
log(stats)
diff --git a/tools/kvm/kvm_stat/kvm_stat.txt b/tools/kvm/kvm_stat/kvm_stat.txt
index 109431bdc63c..e5cf836be8a1 100644
--- a/tools/kvm/kvm_stat/kvm_stat.txt
+++ b/tools/kvm/kvm_stat/kvm_stat.txt
@@ -29,18 +29,26 @@ meaning of events.
INTERACTIVE COMMANDS
--------------------
[horizontal]
+*b*:: toggle events by guests (debugfs only, honors filters)
+
*c*:: clear filter
*f*:: filter by regular expression
*g*:: filter by guest name
+*h*:: display interactive commands reference
+
+*o*:: toggle sorting order (Total vs CurAvg/s)
+
*p*:: filter by PID
*q*:: quit
*r*:: reset stats
+*s*:: set update interval
+
*x*:: toggle reporting of stats for child trace events
Press any other key to refresh statistics immediately.
@@ -64,6 +72,10 @@ OPTIONS
--debugfs::
retrieve statistics from debugfs
+-i::
+--debugfs-include-past::
+ include all available data on past events for debugfs
+
-p<pid>::
--pid=<pid>::
limit statistics to one virtual machine (pid)
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index fc74db62fef4..ccad8ce925e4 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -58,15 +58,9 @@ else
NO_SUBDIR = :
endif
-ifneq ($(filter 4.%,$(MAKE_VERSION)),) # make-4
-ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
+ifneq ($(findstring s,$(filter-out --%,$(MAKEFLAGS))),)
silent=1
endif
-else # make-3.8x
-ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
- silent=1
-endif
-endif
#
# Define a callable command for descending to a new directory
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 28859da78edf..4c2fa98ef39d 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -1943,7 +1943,7 @@ static __init int nfit_test_init(void)
nfit_test->setup = nfit_test0_setup;
break;
case 1:
- nfit_test->num_pm = 1;
+ nfit_test->num_pm = 2;
nfit_test->dcr_idx = NUM_DCR;
nfit_test->num_dcr = 2;
nfit_test->alloc = nfit_test1_alloc;
diff --git a/tools/testing/selftests/breakpoints/breakpoint_test.c b/tools/testing/selftests/breakpoints/breakpoint_test.c
index 120895ab5505..f63356151ad4 100644
--- a/tools/testing/selftests/breakpoints/breakpoint_test.c
+++ b/tools/testing/selftests/breakpoints/breakpoint_test.c
@@ -16,6 +16,8 @@
#include <signal.h>
#include <sys/types.h>
#include <sys/wait.h>
+#include <errno.h>
+#include <string.h>
#include "../kselftest.h"
@@ -42,10 +44,9 @@ static void set_breakpoint_addr(void *addr, int n)
ret = ptrace(PTRACE_POKEUSER, child_pid,
offsetof(struct user, u_debugreg[n]), addr);
- if (ret) {
- perror("Can't set breakpoint addr\n");
- ksft_exit_fail();
- }
+ if (ret)
+ ksft_exit_fail_msg("Can't set breakpoint addr: %s\n",
+ strerror(errno));
}
static void toggle_breakpoint(int n, int type, int len,
@@ -106,8 +107,8 @@ static void toggle_breakpoint(int n, int type, int len,
ret = ptrace(PTRACE_POKEUSER, child_pid,
offsetof(struct user, u_debugreg[7]), dr7);
if (ret) {
- perror("Can't set dr7");
- ksft_exit_fail();
+ ksft_print_msg("Can't set dr7: %s\n", strerror(errno));
+ exit(-1);
}
}
@@ -206,7 +207,7 @@ static void trigger_tests(void)
ret = ptrace(PTRACE_TRACEME, 0, NULL, 0);
if (ret) {
- perror("Can't be traced?\n");
+ ksft_print_msg("Can't be traced? %s\n", strerror(errno));
return;
}
@@ -261,29 +262,30 @@ static void trigger_tests(void)
static void check_success(const char *msg)
{
- const char *msg2;
int child_nr_tests;
int status;
+ int ret;
/* Wait for the child to SIGTRAP */
wait(&status);
- msg2 = "Failed";
+ ret = 0;
if (WSTOPSIG(status) == SIGTRAP) {
child_nr_tests = ptrace(PTRACE_PEEKDATA, child_pid,
&nr_tests, 0);
if (child_nr_tests == nr_tests)
- msg2 = "Ok";
- if (ptrace(PTRACE_POKEDATA, child_pid, &trapped, 1)) {
- perror("Can't poke\n");
- ksft_exit_fail();
- }
+ ret = 1;
+ if (ptrace(PTRACE_POKEDATA, child_pid, &trapped, 1))
+ ksft_exit_fail_msg("Can't poke: %s\n", strerror(errno));
}
nr_tests++;
- printf("%s [%s]\n", msg, msg2);
+ if (ret)
+ ksft_test_result_pass(msg);
+ else
+ ksft_test_result_fail(msg);
}
static void launch_instruction_breakpoints(char *buf, int local, int global)
@@ -294,7 +296,7 @@ static void launch_instruction_breakpoints(char *buf, int local, int global)
set_breakpoint_addr(dummy_funcs[i], i);
toggle_breakpoint(i, BP_X, 1, local, global, 1);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
- sprintf(buf, "Test breakpoint %d with local: %d global: %d",
+ sprintf(buf, "Test breakpoint %d with local: %d global: %d\n",
i, local, global);
check_success(buf);
toggle_breakpoint(i, BP_X, 1, local, global, 0);
@@ -316,8 +318,9 @@ static void launch_watchpoints(char *buf, int mode, int len,
set_breakpoint_addr(&dummy_var[i], i);
toggle_breakpoint(i, mode, len, local, global, 1);
ptrace(PTRACE_CONT, child_pid, NULL, 0);
- sprintf(buf, "Test %s watchpoint %d with len: %d local: "
- "%d global: %d", mode_str, i, len, local, global);
+ sprintf(buf,
+ "Test %s watchpoint %d with len: %d local: %d global: %d\n",
+ mode_str, i, len, local, global);
check_success(buf);
toggle_breakpoint(i, mode, len, local, global, 0);
}
@@ -378,10 +381,12 @@ int main(int argc, char **argv)
pid_t pid;
int ret;
+ ksft_print_header();
+
pid = fork();
if (!pid) {
trigger_tests();
- return 0;
+ exit(0);
}
child_pid = pid;
@@ -392,5 +397,5 @@ int main(int argc, char **argv)
wait(NULL);
- return ksft_exit_pass();
+ ksft_exit_pass();
}
diff --git a/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c b/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
index 3897e996541e..960d02100c26 100644
--- a/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
+++ b/tools/testing/selftests/breakpoints/breakpoint_test_arm64.c
@@ -43,19 +43,25 @@ static void child(int size, int wr)
volatile uint8_t *addr = &var[32 + wr];
if (ptrace(PTRACE_TRACEME, 0, NULL, NULL) != 0) {
- perror("ptrace(PTRACE_TRACEME) failed");
+ ksft_print_msg(
+ "ptrace(PTRACE_TRACEME) failed: %s\n",
+ strerror(errno));
_exit(1);
}
if (raise(SIGSTOP) != 0) {
- perror("raise(SIGSTOP) failed");
+ ksft_print_msg(
+ "raise(SIGSTOP) failed: %s\n", strerror(errno));
_exit(1);
}
if ((uintptr_t) addr % size) {
- perror("Wrong address write for the given size\n");
+ ksft_print_msg(
+ "Wrong address write for the given size: %s\n",
+ strerror(errno));
_exit(1);
}
+
switch (size) {
case 1:
*addr = 47;
@@ -100,12 +106,14 @@ static bool set_watchpoint(pid_t pid, int size, int wp)
if (ptrace(PTRACE_SETREGSET, pid, NT_ARM_HW_WATCH, &iov) == 0)
return true;
- if (errno == EIO) {
- printf("ptrace(PTRACE_SETREGSET, NT_ARM_HW_WATCH) "
- "not supported on this hardware\n");
- ksft_exit_skip();
- }
- perror("ptrace(PTRACE_SETREGSET, NT_ARM_HW_WATCH) failed");
+ if (errno == EIO)
+ ksft_print_msg(
+ "ptrace(PTRACE_SETREGSET, NT_ARM_HW_WATCH) not supported on this hardware: %s\n",
+ strerror(errno));
+
+ ksft_print_msg(
+ "ptrace(PTRACE_SETREGSET, NT_ARM_HW_WATCH) failed: %s\n",
+ strerror(errno));
return false;
}
@@ -117,7 +125,8 @@ static bool run_test(int wr_size, int wp_size, int wr, int wp)
pid_t wpid;
if (pid < 0) {
- perror("fork() failed");
+ ksft_test_result_fail(
+ "fork() failed: %s\n", strerror(errno));
return false;
}
if (pid == 0)
@@ -125,15 +134,17 @@ static bool run_test(int wr_size, int wp_size, int wr, int wp)
wpid = waitpid(pid, &status, __WALL);
if (wpid != pid) {
- perror("waitpid() failed");
+ ksft_print_msg(
+ "waitpid() failed: %s\n", strerror(errno));
return false;
}
if (!WIFSTOPPED(status)) {
- printf("child did not stop\n");
+ ksft_print_msg(
+ "child did not stop: %s\n", strerror(errno));
return false;
}
if (WSTOPSIG(status) != SIGSTOP) {
- printf("child did not stop with SIGSTOP\n");
+ ksft_print_msg("child did not stop with SIGSTOP\n");
return false;
}
@@ -141,42 +152,49 @@ static bool run_test(int wr_size, int wp_size, int wr, int wp)
return false;
if (ptrace(PTRACE_CONT, pid, NULL, NULL) < 0) {
- perror("ptrace(PTRACE_SINGLESTEP) failed");
+ ksft_print_msg(
+ "ptrace(PTRACE_SINGLESTEP) failed: %s\n",
+ strerror(errno));
return false;
}
alarm(3);
wpid = waitpid(pid, &status, __WALL);
if (wpid != pid) {
- perror("waitpid() failed");
+ ksft_print_msg(
+ "waitpid() failed: %s\n", strerror(errno));
return false;
}
alarm(0);
if (WIFEXITED(status)) {
- printf("child did not single-step\t");
+ ksft_print_msg("child did not single-step\n");
return false;
}
if (!WIFSTOPPED(status)) {
- printf("child did not stop\n");
+ ksft_print_msg("child did not stop\n");
return false;
}
if (WSTOPSIG(status) != SIGTRAP) {
- printf("child did not stop with SIGTRAP\n");
+ ksft_print_msg("child did not stop with SIGTRAP\n");
return false;
}
if (ptrace(PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0) {
- perror("ptrace(PTRACE_GETSIGINFO)");
+ ksft_print_msg(
+ "ptrace(PTRACE_GETSIGINFO): %s\n",
+ strerror(errno));
return false;
}
if (siginfo.si_code != TRAP_HWBKPT) {
- printf("Unexpected si_code %d\n", siginfo.si_code);
+ ksft_print_msg(
+ "Unexpected si_code %d\n", siginfo.si_code);
return false;
}
kill(pid, SIGKILL);
wpid = waitpid(pid, &status, 0);
if (wpid != pid) {
- perror("waitpid() failed");
+ ksft_print_msg(
+ "waitpid() failed: %s\n", strerror(errno));
return false;
}
return true;
@@ -194,6 +212,8 @@ int main(int argc, char **argv)
int wr, wp, size;
bool result;
+ ksft_print_header();
+
act.sa_handler = sigalrm;
sigemptyset(&act.sa_mask);
act.sa_flags = 0;
@@ -201,14 +221,16 @@ int main(int argc, char **argv)
for (size = 1; size <= 32; size = size*2) {
for (wr = 0; wr <= 32; wr = wr + size) {
for (wp = wr - size; wp <= wr + size; wp = wp + size) {
- printf("Test size = %d write offset = %d watchpoint offset = %d\t", size, wr, wp);
result = run_test(size, MIN(size, 8), wr, wp);
- if ((result && wr == wp) || (!result && wr != wp)) {
- printf("[OK]\n");
- ksft_inc_pass_cnt();
- } else {
- printf("[FAILED]\n");
- ksft_inc_fail_cnt();
+ if ((result && wr == wp) ||
+ (!result && wr != wp))
+ ksft_test_result_pass(
+ "Test size = %d write offset = %d watchpoint offset = %d\n",
+ size, wr, wp);
+ else {
+ ksft_test_result_fail(
+ "Test size = %d write offset = %d watchpoint offset = %d\n",
+ size, wr, wp);
succeeded = false;
}
}
@@ -216,19 +238,18 @@ int main(int argc, char **argv)
}
for (size = 1; size <= 32; size = size*2) {
- printf("Test size = %d write offset = %d watchpoint offset = -8\t", size, -size);
-
- if (run_test(size, 8, -size, -8)) {
- printf("[OK]\n");
- ksft_inc_pass_cnt();
- } else {
- printf("[FAILED]\n");
- ksft_inc_fail_cnt();
+ if (run_test(size, 8, -size, -8))
+ ksft_test_result_pass(
+ "Test size = %d write offset = %d watchpoint offset = -8\n",
+ size, -size);
+ else {
+ ksft_test_result_fail(
+ "Test size = %d write offset = %d watchpoint offset = -8\n",
+ size, -size);
succeeded = false;
}
}
- ksft_print_cnts();
if (succeeded)
ksft_exit_pass();
else
diff --git a/tools/testing/selftests/breakpoints/step_after_suspend_test.c b/tools/testing/selftests/breakpoints/step_after_suspend_test.c
index 60b8a95dac26..3fece06e9f64 100644
--- a/tools/testing/selftests/breakpoints/step_after_suspend_test.c
+++ b/tools/testing/selftests/breakpoints/step_after_suspend_test.c
@@ -37,17 +37,19 @@ void child(int cpu)
CPU_ZERO(&set);
CPU_SET(cpu, &set);
if (sched_setaffinity(0, sizeof(set), &set) != 0) {
- perror("sched_setaffinity() failed");
+ ksft_print_msg("sched_setaffinity() failed: %s\n",
+ strerror(errno));
_exit(1);
}
if (ptrace(PTRACE_TRACEME, 0, NULL, NULL) != 0) {
- perror("ptrace(PTRACE_TRACEME) failed");
+ ksft_print_msg("ptrace(PTRACE_TRACEME) failed: %s\n",
+ strerror(errno));
_exit(1);
}
if (raise(SIGSTOP) != 0) {
- perror("raise(SIGSTOP) failed");
+ ksft_print_msg("raise(SIGSTOP) failed: %s\n", strerror(errno));
_exit(1);
}
@@ -61,7 +63,7 @@ bool run_test(int cpu)
pid_t wpid;
if (pid < 0) {
- perror("fork() failed");
+ ksft_print_msg("fork() failed: %s\n", strerror(errno));
return false;
}
if (pid == 0)
@@ -69,57 +71,64 @@ bool run_test(int cpu)
wpid = waitpid(pid, &status, __WALL);
if (wpid != pid) {
- perror("waitpid() failed");
+ ksft_print_msg("waitpid() failed: %s\n", strerror(errno));
return false;
}
if (!WIFSTOPPED(status)) {
- printf("child did not stop\n");
+ ksft_print_msg("child did not stop: %s\n", strerror(errno));
return false;
}
if (WSTOPSIG(status) != SIGSTOP) {
- printf("child did not stop with SIGSTOP\n");
+ ksft_print_msg("child did not stop with SIGSTOP: %s\n",
+ strerror(errno));
return false;
}
if (ptrace(PTRACE_SINGLESTEP, pid, NULL, NULL) < 0) {
if (errno == EIO) {
- printf("ptrace(PTRACE_SINGLESTEP) not supported on this architecture\n");
- ksft_exit_skip();
+ ksft_exit_skip(
+ "ptrace(PTRACE_SINGLESTEP) not supported on this architecture: %s\n",
+ strerror(errno));
}
- perror("ptrace(PTRACE_SINGLESTEP) failed");
+ ksft_print_msg("ptrace(PTRACE_SINGLESTEP) failed: %s\n",
+ strerror(errno));
return false;
}
wpid = waitpid(pid, &status, __WALL);
if (wpid != pid) {
- perror("waitpid() failed");
+ ksft_print_msg("waitpid() failed: $s\n", strerror(errno));
return false;
}
if (WIFEXITED(status)) {
- printf("child did not single-step\n");
+ ksft_print_msg("child did not single-step: %s\n",
+ strerror(errno));
return false;
}
if (!WIFSTOPPED(status)) {
- printf("child did not stop\n");
+ ksft_print_msg("child did not stop: %s\n", strerror(errno));
return false;
}
if (WSTOPSIG(status) != SIGTRAP) {
- printf("child did not stop with SIGTRAP\n");
+ ksft_print_msg("child did not stop with SIGTRAP: %s\n",
+ strerror(errno));
return false;
}
if (ptrace(PTRACE_CONT, pid, NULL, NULL) < 0) {
- perror("ptrace(PTRACE_CONT) failed");
+ ksft_print_msg("ptrace(PTRACE_CONT) failed: %s\n",
+ strerror(errno));
return false;
}
wpid = waitpid(pid, &status, __WALL);
if (wpid != pid) {
- perror("waitpid() failed");
+ ksft_print_msg("waitpid() failed: %s\n", strerror(errno));
return false;
}
if (!WIFEXITED(status)) {
- printf("child did not exit after PTRACE_CONT\n");
+ ksft_print_msg("child did not exit after PTRACE_CONT: %s\n",
+ strerror(errno));
return false;
}
@@ -135,28 +144,21 @@ void suspend(void)
struct itimerspec spec = {};
power_state_fd = open("/sys/power/state", O_RDWR);
- if (power_state_fd < 0) {
- perror("open(\"/sys/power/state\") failed (is this test running as root?)");
- ksft_exit_fail();
- }
+ if (power_state_fd < 0)
+ ksft_exit_fail_msg(
+ "open(\"/sys/power/state\") failed (is this test running as root?)\n");
timerfd = timerfd_create(CLOCK_BOOTTIME_ALARM, 0);
- if (timerfd < 0) {
- perror("timerfd_create() failed");
- ksft_exit_fail();
- }
+ if (timerfd < 0)
+ ksft_exit_fail_msg("timerfd_create() failed\n");
spec.it_value.tv_sec = 5;
err = timerfd_settime(timerfd, 0, &spec, NULL);
- if (err < 0) {
- perror("timerfd_settime() failed");
- ksft_exit_fail();
- }
+ if (err < 0)
+ ksft_exit_fail_msg("timerfd_settime() failed\n");
- if (write(power_state_fd, "mem", strlen("mem")) != strlen("mem")) {
- perror("entering suspend failed");
- ksft_exit_fail();
- }
+ if (write(power_state_fd, "mem", strlen("mem")) != strlen("mem"))
+ ksft_exit_fail_msg("Failed to enter Suspend state\n");
close(timerfd);
close(power_state_fd);
@@ -171,6 +173,8 @@ int main(int argc, char **argv)
int err;
int cpu;
+ ksft_print_header();
+
while ((opt = getopt(argc, argv, "n")) != -1) {
switch (opt) {
case 'n':
@@ -187,10 +191,8 @@ int main(int argc, char **argv)
suspend();
err = sched_getaffinity(0, sizeof(available_cpus), &available_cpus);
- if (err < 0) {
- perror("sched_getaffinity() failed");
- ksft_exit_fail();
- }
+ if (err < 0)
+ ksft_exit_fail_msg("sched_getaffinity() failed\n");
for (cpu = 0; cpu < CPU_SETSIZE; cpu++) {
bool test_success;
@@ -199,18 +201,14 @@ int main(int argc, char **argv)
continue;
test_success = run_test(cpu);
- printf("CPU %d: ", cpu);
if (test_success) {
- printf("[OK]\n");
- ksft_inc_pass_cnt();
+ ksft_test_result_pass("CPU %d\n", cpu);
} else {
- printf("[FAILED]\n");
- ksft_inc_fail_cnt();
+ ksft_test_result_fail("CPU %d\n", cpu);
succeeded = false;
}
}
- ksft_print_cnts();
if (succeeded)
ksft_exit_pass();
else
diff --git a/tools/testing/selftests/capabilities/test_execve.c b/tools/testing/selftests/capabilities/test_execve.c
index 10a21a958aaf..763f37fecfb8 100644
--- a/tools/testing/selftests/capabilities/test_execve.c
+++ b/tools/testing/selftests/capabilities/test_execve.c
@@ -138,9 +138,6 @@ static void chdir_to_tmpfs(void)
if (chdir(cwd) != 0)
err(1, "chdir to private tmpfs");
-
- if (umount2(".", MNT_DETACH) != 0)
- err(1, "detach private tmpfs");
}
static void copy_fromat_to(int fromfd, const char *fromname, const char *toname)
@@ -248,7 +245,7 @@ static int do_tests(int uid, const char *our_path)
err(1, "chown");
if (chmod("validate_cap_sgidnonroot", S_ISGID | 0710) != 0)
err(1, "chmod");
-}
+ }
capng_get_caps_process();
@@ -384,7 +381,7 @@ static int do_tests(int uid, const char *our_path)
} else {
printf("[RUN]\tNon-root +ia, sgidnonroot => i\n");
exec_other_validate_cap("./validate_cap_sgidnonroot",
- false, false, true, false);
+ false, false, true, false);
if (fork_wait()) {
printf("[RUN]\tNon-root +ia, sgidroot => i\n");
diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest
index 717581145cfc..14a03ea1e21d 100755
--- a/tools/testing/selftests/ftrace/ftracetest
+++ b/tools/testing/selftests/ftrace/ftracetest
@@ -250,7 +250,7 @@ run_test() { # testfile
local testlog=`mktemp $LOG_DIR/${testname}-log.XXXXXX`
export TMPDIR=`mktemp -d /tmp/ftracetest-dir.XXXXXX`
testcase $1
- echo "execute: "$1 > $testlog
+ echo "execute$INSTANCE: "$1 > $testlog
SIG_RESULT=0
if [ $VERBOSE -ge 2 ]; then
__run_test $1 2>> $testlog | tee -a $testlog
diff --git a/tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc b/tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc
index 0bb5df3c00d4..15e2d3fe1731 100644
--- a/tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc
+++ b/tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc
@@ -28,7 +28,9 @@ echo '*:*' > set_event
yield
-count=`cat trace | grep -v ^# | wc -l`
+echo 0 > tracing_on
+
+count=`head -n 128 trace | grep -v ^# | wc -l`
if [ $count -eq 0 ]; then
fail "none of events are recorded"
fi
@@ -36,10 +38,12 @@ fi
do_reset
echo 1 > events/enable
+echo 1 > tracing_on
yield
-count=`cat trace | grep -v ^# | wc -l`
+echo 0 > tracing_on
+count=`head -n 128 trace | grep -v ^# | wc -l`
if [ $count -eq 0 ]; then
fail "none of events are recorded"
fi
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
index 9dcd0ca1f49c..8095e122daa9 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
@@ -11,17 +11,6 @@ fi
disable_tracing
clear_trace
-# filter by ?, schedule is always good
-if ! echo "sch?dule" > set_ftrace_filter; then
- # test for powerpc 64
- if ! echo ".sch?dule" > set_ftrace_filter; then
- fail "can not enable schedule filter"
- fi
- cat set_ftrace_filter | grep '^.schedule$'
-else
- cat set_ftrace_filter | grep '^schedule$'
-fi
-
ftrace_filter_check() { # glob grep
echo "$1" > set_ftrace_filter
cut -f1 -d" " set_ftrace_filter > $TMPDIR/actual
@@ -39,11 +28,28 @@ ftrace_filter_check '*schedule*' '^.*schedule.*$'
# filter by *, end match
ftrace_filter_check 'schedule*' '^schedule.*$'
+# Advanced full-glob matching feature is recently supported.
+# Skip the tests if we are sure the kernel does not support it.
+if grep -q 'accepts: .* glob-matching-pattern' README ; then
+
# filter by *, both side match
ftrace_filter_check 'sch*ule' '^sch.*ule$'
# filter by char class.
ftrace_filter_check '[Ss]y[Ss]_*' '^[Ss]y[Ss]_.*$'
+# filter by ?, schedule is always good
+if ! echo "sch?dule" > set_ftrace_filter; then
+ # test for powerpc 64
+ if ! echo ".sch?dule" > set_ftrace_filter; then
+ fail "can not enable schedule filter"
+ fi
+ cat set_ftrace_filter | grep '^.schedule$'
+else
+ cat set_ftrace_filter | grep '^schedule$'
+fi
+
+fi
+
echo > set_ftrace_filter
enable_tracing
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
index aa31368851c9..77dfb6b48186 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
@@ -72,6 +72,15 @@ run_enable_disable() {
test_event_enabled $check_disable
echo "schedule:${enable}_event:$EVENT" > set_ftrace_filter
+ if [ -d ../../instances ]; then # Check instances
+ cur=`cat set_ftrace_filter`
+ top=`cat ../../set_ftrace_filter`
+ if [ "$cur" = "$top" ]; then
+ echo "This kernel is too old to support per instance filter"
+ reset_ftrace_filter
+ exit_unsupported
+ fi
+ fi
echo " make sure it works 5 times"
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc
index c8e02ec01eaf..7a9ab4ff83b6 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc
@@ -63,6 +63,10 @@ fi
# powerpc uses .schedule
func="schedule"
+available_file=available_filter_functions
+if [ -d ../../instances -a -f ../../available_filter_functions ]; then
+ available_file=../../available_filter_functions
+fi
x=`grep '^\.schedule$' available_filter_functions | wc -l`
if [ "$x" -eq 1 ]; then
func=".schedule"
@@ -71,6 +75,15 @@ fi
echo '** SET TRACEOFF'
echo "$func:traceoff" > set_ftrace_filter
+if [ -d ../../instances ]; then # Check instances
+ cur=`cat set_ftrace_filter`
+ top=`cat ../../set_ftrace_filter`
+ if [ "$cur" = "$top" ]; then
+ echo "This kernel is too old to support per instance filter"
+ reset_ftrace_filter
+ exit_unsupported
+ fi
+fi
cnt=`grep schedule set_ftrace_filter | wc -l`
if [ $cnt -ne 1 ]; then
@@ -90,11 +103,11 @@ if [ $on != "0" ]; then
fail "Tracing is not off"
fi
-line1=`cat trace | tail -1`
+csum1=`md5sum trace`
sleep $SLEEP_TIME
-line2=`cat trace | tail -1`
+csum2=`md5sum trace`
-if [ "$line1" != "$line2" ]; then
+if [ "$csum1" != "$csum2" ]; then
fail "Tracing file is still changing"
fi
diff --git a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
index c73db7863adb..8a353314dc9b 100644
--- a/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
+++ b/tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
@@ -82,7 +82,10 @@ rmdir foo
if [ -d foo ]; then
fail "foo still exists"
fi
-
+if grep -q "schedule:enable_event:sched:sched_switch" ../set_ftrace_filter; then
+ echo "Older kernel detected. Cleanup filter"
+ echo '!schedule:enable_event:sched:sched_switch' > ../set_ftrace_filter
+fi
instance_slam() {
while :; do
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_maxactive.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_maxactive.tc
index 57abdf1caabf..7ec6f2639ad6 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_maxactive.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_maxactive.tc
@@ -2,6 +2,7 @@
# description: Kretprobe dynamic event with maxactive
[ -f kprobe_events ] || exit_unsupported # this is configurable
+grep -q 'r\[maxactive\]' README || exit_unsupported # this is older version
echo > kprobe_events
diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi.c b/tools/testing/selftests/futex/functional/futex_requeue_pi.c
index 3da06ad23996..d24ab7421e73 100644
--- a/tools/testing/selftests/futex/functional/futex_requeue_pi.c
+++ b/tools/testing/selftests/futex/functional/futex_requeue_pi.c
@@ -32,6 +32,7 @@
#include "futextest.h"
#include "logging.h"
+#define TEST_NAME "futex-requeue-pi"
#define MAX_WAKE_ITERS 1000
#define THREAD_MAX 10
#define SIGNAL_PERIOD_US 100
@@ -404,6 +405,6 @@ int main(int argc, char *argv[])
*/
ret = unit_test(broadcast, locked, owner, timeout_ns);
- print_result(ret);
+ print_result(TEST_NAME, ret);
return ret;
}
diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c b/tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c
index d5e4f2c4da2a..e0a798ad0d21 100644
--- a/tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c
+++ b/tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c
@@ -30,6 +30,8 @@
#include "futextest.h"
#include "logging.h"
+#define TEST_NAME "futex-requeue-pi-mismatched-ops"
+
futex_t f1 = FUTEX_INITIALIZER;
futex_t f2 = FUTEX_INITIALIZER;
int child_ret = 0;
@@ -130,6 +132,6 @@ int main(int argc, char *argv[])
out:
/* If the kernel crashes, we shouldn't return at all. */
- print_result(ret);
+ print_result(TEST_NAME, ret);
return ret;
}
diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c b/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
index 3d7dc6afc3f8..982f83577501 100644
--- a/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
+++ b/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
@@ -32,6 +32,7 @@
#include "futextest.h"
#include "logging.h"
+#define TEST_NAME "futex-requeue-pi-signal-restart"
#define DELAY_US 100
futex_t f1 = FUTEX_INITIALIZER;
@@ -218,6 +219,6 @@ int main(int argc, char *argv[])
if (ret == RET_PASS && waiter_ret)
ret = waiter_ret;
- print_result(ret);
+ print_result(TEST_NAME, ret);
return ret;
}
diff --git a/tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c b/tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c
index 5f687f247454..bdc48dc047e5 100644
--- a/tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c
+++ b/tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c
@@ -34,6 +34,7 @@
#include "logging.h"
#include "futextest.h"
+#define TEST_NAME "futex-wait-private-mapped-file"
#define PAGE_SZ 4096
char pad[PAGE_SZ] = {1};
@@ -60,7 +61,7 @@ void *thr_futex_wait(void *arg)
ret = futex_wait(&val, 1, &wait_timeout, 0);
if (ret && errno != EWOULDBLOCK && errno != ETIMEDOUT) {
error("futex error.\n", errno);
- print_result(RET_ERROR);
+ print_result(TEST_NAME, RET_ERROR);
exit(RET_ERROR);
}
@@ -120,6 +121,6 @@ int main(int argc, char **argv)
pthread_join(thr, NULL);
out:
- print_result(ret);
+ print_result(TEST_NAME, ret);
return ret;
}
diff --git a/tools/testing/selftests/futex/functional/futex_wait_timeout.c b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
index ab428ca894de..6aadd560366e 100644
--- a/tools/testing/selftests/futex/functional/futex_wait_timeout.c
+++ b/tools/testing/selftests/futex/functional/futex_wait_timeout.c
@@ -27,6 +27,8 @@
#include "futextest.h"
#include "logging.h"
+#define TEST_NAME "futex-wait-timeout"
+
static long timeout_ns = 100000; /* 100us default timeout */
void usage(char *prog)
@@ -81,6 +83,6 @@ int main(int argc, char *argv[])
ret = RET_FAIL;
}
- print_result(ret);
+ print_result(TEST_NAME, ret);
return ret;
}
diff --git a/tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c b/tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c
index fe7aee96844b..d237a8b702f0 100644
--- a/tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c
+++ b/tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c
@@ -36,6 +36,7 @@
#include "logging.h"
#include "futextest.h"
+#define TEST_NAME "futex-wait-uninitialized-heap"
#define WAIT_US 5000000
static int child_blocked = 1;
@@ -119,6 +120,6 @@ int main(int argc, char **argv)
}
out:
- print_result(ret);
+ print_result(TEST_NAME, ret);
return ret;
}
diff --git a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
index b6b027448825..9a2c56fa7305 100644
--- a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
+++ b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
@@ -28,6 +28,7 @@
#include "futextest.h"
#include "logging.h"
+#define TEST_NAME "futex-wait-wouldblock"
#define timeout_ns 100000
void usage(char *prog)
@@ -74,6 +75,6 @@ int main(int argc, char *argv[])
ret = RET_FAIL;
}
- print_result(ret);
+ print_result(TEST_NAME, ret);
return ret;
}
diff --git a/tools/testing/selftests/futex/include/logging.h b/tools/testing/selftests/futex/include/logging.h
index e14469103f07..4e7944984fbb 100644
--- a/tools/testing/selftests/futex/include/logging.h
+++ b/tools/testing/selftests/futex/include/logging.h
@@ -107,7 +107,7 @@ void log_verbosity(int level)
*
* print_result() is primarily intended for functional tests.
*/
-void print_result(int ret)
+void print_result(const char *test_name, int ret)
{
const char *result = "Unknown return code";
@@ -124,7 +124,7 @@ void print_result(int ret)
result = FAIL;
break;
}
- printf("Result: %s\n", result);
+ printf("selftests: %s [%s]\n", test_name, result);
}
/* log level macros */
diff --git a/tools/testing/selftests/intel_pstate/.gitignore b/tools/testing/selftests/intel_pstate/.gitignore
new file mode 100644
index 000000000000..3bfcbae5fa13
--- /dev/null
+++ b/tools/testing/selftests/intel_pstate/.gitignore
@@ -0,0 +1,2 @@
+aperf
+msr
diff --git a/tools/testing/selftests/intel_pstate/Makefile b/tools/testing/selftests/intel_pstate/Makefile
index 19678e90efb2..849a90ffe8dd 100644
--- a/tools/testing/selftests/intel_pstate/Makefile
+++ b/tools/testing/selftests/intel_pstate/Makefile
@@ -1,5 +1,5 @@
CFLAGS := $(CFLAGS) -Wall -D_GNU_SOURCE
-LDFLAGS := $(LDFLAGS) -lm
+LDLIBS := $(LDLIBS) -lm
TEST_GEN_FILES := msr aperf
diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h
index ef1c80d67ac7..08e90c2cc5cb 100644
--- a/tools/testing/selftests/kselftest.h
+++ b/tools/testing/selftests/kselftest.h
@@ -12,6 +12,7 @@
#include <stdlib.h>
#include <unistd.h>
+#include <stdarg.h>
/* define kselftest exit codes */
#define KSFT_PASS 0
@@ -31,38 +32,125 @@ struct ksft_count {
static struct ksft_count ksft_cnt;
+static inline int ksft_test_num(void)
+{
+ return ksft_cnt.ksft_pass + ksft_cnt.ksft_fail +
+ ksft_cnt.ksft_xfail + ksft_cnt.ksft_xpass +
+ ksft_cnt.ksft_xskip;
+}
+
static inline void ksft_inc_pass_cnt(void) { ksft_cnt.ksft_pass++; }
static inline void ksft_inc_fail_cnt(void) { ksft_cnt.ksft_fail++; }
static inline void ksft_inc_xfail_cnt(void) { ksft_cnt.ksft_xfail++; }
static inline void ksft_inc_xpass_cnt(void) { ksft_cnt.ksft_xpass++; }
static inline void ksft_inc_xskip_cnt(void) { ksft_cnt.ksft_xskip++; }
+static inline void ksft_print_header(void)
+{
+ printf("TAP version 13\n");
+}
+
static inline void ksft_print_cnts(void)
{
- printf("Pass: %d Fail: %d Xfail: %d Xpass: %d, Xskip: %d\n",
- ksft_cnt.ksft_pass, ksft_cnt.ksft_fail,
- ksft_cnt.ksft_xfail, ksft_cnt.ksft_xpass,
- ksft_cnt.ksft_xskip);
+ printf("1..%d\n", ksft_test_num());
+}
+
+static inline void ksft_print_msg(const char *msg, ...)
+{
+ va_list args;
+
+ va_start(args, msg);
+ printf("# ");
+ vprintf(msg, args);
+ va_end(args);
+}
+
+static inline void ksft_test_result_pass(const char *msg, ...)
+{
+ va_list args;
+
+ ksft_cnt.ksft_pass++;
+
+ va_start(args, msg);
+ printf("ok %d ", ksft_test_num());
+ vprintf(msg, args);
+ va_end(args);
+}
+
+static inline void ksft_test_result_fail(const char *msg, ...)
+{
+ va_list args;
+
+ ksft_cnt.ksft_fail++;
+
+ va_start(args, msg);
+ printf("not ok %d ", ksft_test_num());
+ vprintf(msg, args);
+ va_end(args);
+}
+
+static inline void ksft_test_result_skip(const char *msg, ...)
+{
+ va_list args;
+
+ ksft_cnt.ksft_xskip++;
+
+ va_start(args, msg);
+ printf("ok %d # skip ", ksft_test_num());
+ vprintf(msg, args);
+ va_end(args);
}
static inline int ksft_exit_pass(void)
{
+ ksft_print_cnts();
exit(KSFT_PASS);
}
+
static inline int ksft_exit_fail(void)
{
+ printf("Bail out!\n");
+ ksft_print_cnts();
+ exit(KSFT_FAIL);
+}
+
+static inline int ksft_exit_fail_msg(const char *msg, ...)
+{
+ va_list args;
+
+ va_start(args, msg);
+ printf("Bail out! ");
+ vprintf(msg, args);
+ va_end(args);
+
+ ksft_print_cnts();
exit(KSFT_FAIL);
}
+
static inline int ksft_exit_xfail(void)
{
+ ksft_print_cnts();
exit(KSFT_XFAIL);
}
+
static inline int ksft_exit_xpass(void)
{
+ ksft_print_cnts();
exit(KSFT_XPASS);
}
-static inline int ksft_exit_skip(void)
+
+static inline int ksft_exit_skip(const char *msg, ...)
{
+ if (msg) {
+ va_list args;
+
+ va_start(args, msg);
+ printf("1..%d # Skipped: ", ksft_test_num());
+ vprintf(msg, args);
+ va_end(args);
+ } else {
+ ksft_print_cnts();
+ }
exit(KSFT_SKIP);
}
diff --git a/tools/testing/selftests/seccomp/test_harness.h b/tools/testing/selftests/kselftest_harness.h
index a786c69c7584..c56f72e07cd7 100644
--- a/tools/testing/selftests/seccomp/test_harness.h
+++ b/tools/testing/selftests/kselftest_harness.h
@@ -2,44 +2,53 @@
* Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
* Use of this source code is governed by the GPLv2 license.
*
- * test_harness.h: simple C unit test helper.
- *
- * Usage:
- * #include "test_harness.h"
- * TEST(standalone_test) {
- * do_some_stuff;
- * EXPECT_GT(10, stuff) {
- * stuff_state_t state;
- * enumerate_stuff_state(&state);
- * TH_LOG("expectation failed with state: %s", state.msg);
- * }
- * more_stuff;
- * ASSERT_NE(some_stuff, NULL) TH_LOG("how did it happen?!");
- * last_stuff;
- * EXPECT_EQ(0, last_stuff);
- * }
- *
- * FIXTURE(my_fixture) {
- * mytype_t *data;
- * int awesomeness_level;
- * };
- * FIXTURE_SETUP(my_fixture) {
- * self->data = mytype_new();
- * ASSERT_NE(NULL, self->data);
- * }
- * FIXTURE_TEARDOWN(my_fixture) {
- * mytype_free(self->data);
- * }
- * TEST_F(my_fixture, data_is_good) {
- * EXPECT_EQ(1, is_my_data_good(self->data));
- * }
- *
- * TEST_HARNESS_MAIN
+ * kselftest_harness.h: simple C unit test helper.
+ *
+ * See documentation in Documentation/dev-tools/kselftest.rst
*
* API inspired by code.google.com/p/googletest
*/
-#ifndef TEST_HARNESS_H_
-#define TEST_HARNESS_H_
+
+/**
+ * DOC: example
+ *
+ * .. code-block:: c
+ *
+ * #include "../kselftest_harness.h"
+ *
+ * TEST(standalone_test) {
+ * do_some_stuff;
+ * EXPECT_GT(10, stuff) {
+ * stuff_state_t state;
+ * enumerate_stuff_state(&state);
+ * TH_LOG("expectation failed with state: %s", state.msg);
+ * }
+ * more_stuff;
+ * ASSERT_NE(some_stuff, NULL) TH_LOG("how did it happen?!");
+ * last_stuff;
+ * EXPECT_EQ(0, last_stuff);
+ * }
+ *
+ * FIXTURE(my_fixture) {
+ * mytype_t *data;
+ * int awesomeness_level;
+ * };
+ * FIXTURE_SETUP(my_fixture) {
+ * self->data = mytype_new();
+ * ASSERT_NE(NULL, self->data);
+ * }
+ * FIXTURE_TEARDOWN(my_fixture) {
+ * mytype_free(self->data);
+ * }
+ * TEST_F(my_fixture, data_is_good) {
+ * EXPECT_EQ(1, is_my_data_good(self->data));
+ * }
+ *
+ * TEST_HARNESS_MAIN
+ */
+
+#ifndef __KSELFTEST_HARNESS_H
+#define __KSELFTEST_HARNESS_H
#define _GNU_SOURCE
#include <stdint.h>
@@ -50,158 +59,33 @@
#include <sys/wait.h>
#include <unistd.h>
-/* All exported functionality should be declared through this macro. */
-#define TEST_API(x) _##x
-/*
- * Exported APIs
- */
+/* Utilities exposed to the test definitions */
+#ifndef TH_LOG_STREAM
+# define TH_LOG_STREAM stderr
+#endif
-/* TEST(name) { implementation }
- * Defines a test by name.
- * Names must be unique and tests must not be run in parallel. The
- * implementation containing block is a function and scoping should be treated
- * as such. Returning early may be performed with a bare "return;" statement.
- *
- * EXPECT_* and ASSERT_* are valid in a TEST() { } context.
- */
-#define TEST TEST_API(TEST)
+#ifndef TH_LOG_ENABLED
+# define TH_LOG_ENABLED 1
+#endif
-/* TEST_SIGNAL(name, signal) { implementation }
- * Defines a test by name and the expected term signal.
- * Names must be unique and tests must not be run in parallel. The
- * implementation containing block is a function and scoping should be treated
- * as such. Returning early may be performed with a bare "return;" statement.
+/**
+ * TH_LOG(fmt, ...)
*
- * EXPECT_* and ASSERT_* are valid in a TEST() { } context.
- */
-#define TEST_SIGNAL TEST_API(TEST_SIGNAL)
-
-/* FIXTURE(datatype name) {
- * type property1;
- * ...
- * };
- * Defines the data provided to TEST_F()-defined tests as |self|. It should be
- * populated and cleaned up using FIXTURE_SETUP and FIXTURE_TEARDOWN.
- */
-#define FIXTURE TEST_API(FIXTURE)
-
-/* FIXTURE_DATA(datatype name)
- * This call may be used when the type of the fixture data
- * is needed. In general, this should not be needed unless
- * the |self| is being passed to a helper directly.
- */
-#define FIXTURE_DATA TEST_API(FIXTURE_DATA)
-
-/* FIXTURE_SETUP(fixture name) { implementation }
- * Populates the required "setup" function for a fixture. An instance of the
- * datatype defined with _FIXTURE_DATA will be exposed as |self| for the
- * implementation.
+ * @fmt: format string
+ * @...: optional arguments
*
- * ASSERT_* are valid for use in this context and will prempt the execution
- * of any dependent fixture tests.
+ * .. code-block:: c
*
- * A bare "return;" statement may be used to return early.
- */
-#define FIXTURE_SETUP TEST_API(FIXTURE_SETUP)
-
-/* FIXTURE_TEARDOWN(fixture name) { implementation }
- * Populates the required "teardown" function for a fixture. An instance of the
- * datatype defined with _FIXTURE_DATA will be exposed as |self| for the
- * implementation to clean up.
+ * TH_LOG(format, ...)
*
- * A bare "return;" statement may be used to return early.
- */
-#define FIXTURE_TEARDOWN TEST_API(FIXTURE_TEARDOWN)
-
-/* TEST_F(fixture, name) { implementation }
- * Defines a test that depends on a fixture (e.g., is part of a test case).
- * Very similar to TEST() except that |self| is the setup instance of fixture's
- * datatype exposed for use by the implementation.
- */
-#define TEST_F TEST_API(TEST_F)
-
-#define TEST_F_SIGNAL TEST_API(TEST_F_SIGNAL)
-
-/* Use once to append a main() to the test file. E.g.,
- * TEST_HARNESS_MAIN
- */
-#define TEST_HARNESS_MAIN TEST_API(TEST_HARNESS_MAIN)
-
-/*
- * Operators for use in TEST and TEST_F.
- * ASSERT_* calls will stop test execution immediately.
- * EXPECT_* calls will emit a failure warning, note it, and continue.
- */
-
-/* ASSERT_EQ(expected, measured): expected == measured */
-#define ASSERT_EQ TEST_API(ASSERT_EQ)
-/* ASSERT_NE(expected, measured): expected != measured */
-#define ASSERT_NE TEST_API(ASSERT_NE)
-/* ASSERT_LT(expected, measured): expected < measured */
-#define ASSERT_LT TEST_API(ASSERT_LT)
-/* ASSERT_LE(expected, measured): expected <= measured */
-#define ASSERT_LE TEST_API(ASSERT_LE)
-/* ASSERT_GT(expected, measured): expected > measured */
-#define ASSERT_GT TEST_API(ASSERT_GT)
-/* ASSERT_GE(expected, measured): expected >= measured */
-#define ASSERT_GE TEST_API(ASSERT_GE)
-/* ASSERT_NULL(measured): NULL == measured */
-#define ASSERT_NULL TEST_API(ASSERT_NULL)
-/* ASSERT_TRUE(measured): measured != 0 */
-#define ASSERT_TRUE TEST_API(ASSERT_TRUE)
-/* ASSERT_FALSE(measured): measured == 0 */
-#define ASSERT_FALSE TEST_API(ASSERT_FALSE)
-/* ASSERT_STREQ(expected, measured): !strcmp(expected, measured) */
-#define ASSERT_STREQ TEST_API(ASSERT_STREQ)
-/* ASSERT_STRNE(expected, measured): strcmp(expected, measured) */
-#define ASSERT_STRNE TEST_API(ASSERT_STRNE)
-/* EXPECT_EQ(expected, measured): expected == measured */
-#define EXPECT_EQ TEST_API(EXPECT_EQ)
-/* EXPECT_NE(expected, measured): expected != measured */
-#define EXPECT_NE TEST_API(EXPECT_NE)
-/* EXPECT_LT(expected, measured): expected < measured */
-#define EXPECT_LT TEST_API(EXPECT_LT)
-/* EXPECT_LE(expected, measured): expected <= measured */
-#define EXPECT_LE TEST_API(EXPECT_LE)
-/* EXPECT_GT(expected, measured): expected > measured */
-#define EXPECT_GT TEST_API(EXPECT_GT)
-/* EXPECT_GE(expected, measured): expected >= measured */
-#define EXPECT_GE TEST_API(EXPECT_GE)
-/* EXPECT_NULL(measured): NULL == measured */
-#define EXPECT_NULL TEST_API(EXPECT_NULL)
-/* EXPECT_TRUE(measured): 0 != measured */
-#define EXPECT_TRUE TEST_API(EXPECT_TRUE)
-/* EXPECT_FALSE(measured): 0 == measured */
-#define EXPECT_FALSE TEST_API(EXPECT_FALSE)
-/* EXPECT_STREQ(expected, measured): !strcmp(expected, measured) */
-#define EXPECT_STREQ TEST_API(EXPECT_STREQ)
-/* EXPECT_STRNE(expected, measured): strcmp(expected, measured) */
-#define EXPECT_STRNE TEST_API(EXPECT_STRNE)
-
-/* TH_LOG(format, ...)
* Optional debug logging function available for use in tests.
* Logging may be enabled or disabled by defining TH_LOG_ENABLED.
* E.g., #define TH_LOG_ENABLED 1
- * If no definition is provided, logging is enabled by default.
- */
-#define TH_LOG TEST_API(TH_LOG)
-
-/*
- * Internal implementation.
*
+ * If no definition is provided, logging is enabled by default.
*/
-
-/* Utilities exposed to the test definitions */
-#ifndef TH_LOG_STREAM
-# define TH_LOG_STREAM stderr
-#endif
-
-#ifndef TH_LOG_ENABLED
-# define TH_LOG_ENABLED 1
-#endif
-
-#define _TH_LOG(fmt, ...) do { \
+#define TH_LOG(fmt, ...) do { \
if (TH_LOG_ENABLED) \
__TH_LOG(fmt, ##__VA_ARGS__); \
} while (0)
@@ -211,10 +95,43 @@
fprintf(TH_LOG_STREAM, "%s:%d:%s:" fmt "\n", \
__FILE__, __LINE__, _metadata->name, ##__VA_ARGS__)
-/* Defines the test function and creates the registration stub. */
-#define _TEST(test_name) __TEST_IMPL(test_name, -1)
+/**
+ * TEST(test_name) - Defines the test function and creates the registration
+ * stub
+ *
+ * @test_name: test name
+ *
+ * .. code-block:: c
+ *
+ * TEST(name) { implementation }
+ *
+ * Defines a test by name.
+ * Names must be unique and tests must not be run in parallel. The
+ * implementation containing block is a function and scoping should be treated
+ * as such. Returning early may be performed with a bare "return;" statement.
+ *
+ * EXPECT_* and ASSERT_* are valid in a TEST() { } context.
+ */
+#define TEST(test_name) __TEST_IMPL(test_name, -1)
-#define _TEST_SIGNAL(test_name, signal) __TEST_IMPL(test_name, signal)
+/**
+ * TEST_SIGNAL(test_name, signal)
+ *
+ * @test_name: test name
+ * @signal: signal number
+ *
+ * .. code-block:: c
+ *
+ * TEST_SIGNAL(name, signal) { implementation }
+ *
+ * Defines a test by name and the expected term signal.
+ * Names must be unique and tests must not be run in parallel. The
+ * implementation containing block is a function and scoping should be treated
+ * as such. Returning early may be performed with a bare "return;" statement.
+ *
+ * EXPECT_* and ASSERT_* are valid in a TEST() { } context.
+ */
+#define TEST_SIGNAL(test_name, signal) __TEST_IMPL(test_name, signal)
#define __TEST_IMPL(test_name, _signal) \
static void test_name(struct __test_metadata *_metadata); \
@@ -228,50 +145,121 @@
static void test_name( \
struct __test_metadata __attribute__((unused)) *_metadata)
-/* Wraps the struct name so we have one less argument to pass around. */
-#define _FIXTURE_DATA(fixture_name) struct _test_data_##fixture_name
+/**
+ * FIXTURE_DATA(datatype_name) - Wraps the struct name so we have one less
+ * argument to pass around
+ *
+ * @datatype_name: datatype name
+ *
+ * .. code-block:: c
+ *
+ * FIXTURE_DATA(datatype name)
+ *
+ * This call may be used when the type of the fixture data
+ * is needed. In general, this should not be needed unless
+ * the *self* is being passed to a helper directly.
+ */
+#define FIXTURE_DATA(datatype_name) struct _test_data_##datatype_name
-/* Called once per fixture to setup the data and register. */
-#define _FIXTURE(fixture_name) \
+/**
+ * FIXTURE(fixture_name) - Called once per fixture to setup the data and
+ * register
+ *
+ * @fixture_name: fixture name
+ *
+ * .. code-block:: c
+ *
+ * FIXTURE(datatype name) {
+ * type property1;
+ * ...
+ * };
+ *
+ * Defines the data provided to TEST_F()-defined tests as *self*. It should be
+ * populated and cleaned up using FIXTURE_SETUP() and FIXTURE_TEARDOWN().
+ */
+#define FIXTURE(fixture_name) \
static void __attribute__((constructor)) \
_register_##fixture_name##_data(void) \
{ \
__fixture_count++; \
} \
- _FIXTURE_DATA(fixture_name)
+ FIXTURE_DATA(fixture_name)
-/* Prepares the setup function for the fixture. |_metadata| is included
- * so that ASSERT_* work as a convenience.
+/**
+ * FIXTURE_SETUP(fixture_name) - Prepares the setup function for the fixture.
+ * *_metadata* is included so that ASSERT_* work as a convenience
+ *
+ * @fixture_name: fixture name
+ *
+ * .. code-block:: c
+ *
+ * FIXTURE_SETUP(fixture name) { implementation }
+ *
+ * Populates the required "setup" function for a fixture. An instance of the
+ * datatype defined with FIXTURE_DATA() will be exposed as *self* for the
+ * implementation.
+ *
+ * ASSERT_* are valid for use in this context and will prempt the execution
+ * of any dependent fixture tests.
+ *
+ * A bare "return;" statement may be used to return early.
*/
-#define _FIXTURE_SETUP(fixture_name) \
+#define FIXTURE_SETUP(fixture_name) \
void fixture_name##_setup( \
struct __test_metadata __attribute__((unused)) *_metadata, \
- _FIXTURE_DATA(fixture_name) __attribute__((unused)) *self)
-#define _FIXTURE_TEARDOWN(fixture_name) \
+ FIXTURE_DATA(fixture_name) __attribute__((unused)) *self)
+/**
+ * FIXTURE_TEARDOWN(fixture_name)
+ *
+ * @fixture_name: fixture name
+ *
+ * .. code-block:: c
+ *
+ * FIXTURE_TEARDOWN(fixture name) { implementation }
+ *
+ * Populates the required "teardown" function for a fixture. An instance of the
+ * datatype defined with FIXTURE_DATA() will be exposed as *self* for the
+ * implementation to clean up.
+ *
+ * A bare "return;" statement may be used to return early.
+ */
+#define FIXTURE_TEARDOWN(fixture_name) \
void fixture_name##_teardown( \
struct __test_metadata __attribute__((unused)) *_metadata, \
- _FIXTURE_DATA(fixture_name) __attribute__((unused)) *self)
+ FIXTURE_DATA(fixture_name) __attribute__((unused)) *self)
-/* Emits test registration and helpers for fixture-based test
- * cases.
- * TODO(wad) register fixtures on dedicated test lists.
+/**
+ * TEST_F(fixture_name, test_name) - Emits test registration and helpers for
+ * fixture-based test cases
+ *
+ * @fixture_name: fixture name
+ * @test_name: test name
+ *
+ * .. code-block:: c
+ *
+ * TEST_F(fixture, name) { implementation }
+ *
+ * Defines a test that depends on a fixture (e.g., is part of a test case).
+ * Very similar to TEST() except that *self* is the setup instance of fixture's
+ * datatype exposed for use by the implementation.
*/
-#define _TEST_F(fixture_name, test_name) \
+/* TODO(wad) register fixtures on dedicated test lists. */
+#define TEST_F(fixture_name, test_name) \
__TEST_F_IMPL(fixture_name, test_name, -1)
-#define _TEST_F_SIGNAL(fixture_name, test_name, signal) \
+#define TEST_F_SIGNAL(fixture_name, test_name, signal) \
__TEST_F_IMPL(fixture_name, test_name, signal)
#define __TEST_F_IMPL(fixture_name, test_name, signal) \
static void fixture_name##_##test_name( \
struct __test_metadata *_metadata, \
- _FIXTURE_DATA(fixture_name) *self); \
+ FIXTURE_DATA(fixture_name) *self); \
static inline void wrapper_##fixture_name##_##test_name( \
struct __test_metadata *_metadata) \
{ \
/* fixture data is alloced, setup, and torn down per call. */ \
- _FIXTURE_DATA(fixture_name) self; \
- memset(&self, 0, sizeof(_FIXTURE_DATA(fixture_name))); \
+ FIXTURE_DATA(fixture_name) self; \
+ memset(&self, 0, sizeof(FIXTURE_DATA(fixture_name))); \
fixture_name##_setup(_metadata, &self); \
/* Let setup failure terminate early. */ \
if (!_metadata->passed) \
@@ -292,10 +280,18 @@
} \
static void fixture_name##_##test_name( \
struct __test_metadata __attribute__((unused)) *_metadata, \
- _FIXTURE_DATA(fixture_name) __attribute__((unused)) *self)
+ FIXTURE_DATA(fixture_name) __attribute__((unused)) *self)
-/* Exports a simple wrapper to run the test harness. */
-#define _TEST_HARNESS_MAIN \
+/**
+ * TEST_HARNESS_MAIN - Simple wrapper to run the test harness
+ *
+ * .. code-block:: c
+ *
+ * TEST_HARNESS_MAIN
+ *
+ * Use once to append a main() to the test file.
+ */
+#define TEST_HARNESS_MAIN \
static void __attribute__((constructor)) \
__constructor_order_last(void) \
{ \
@@ -306,54 +302,249 @@
return test_harness_run(argc, argv); \
}
-#define _ASSERT_EQ(_expected, _seen) \
- __EXPECT(_expected, _seen, ==, 1)
-#define _ASSERT_NE(_expected, _seen) \
- __EXPECT(_expected, _seen, !=, 1)
-#define _ASSERT_LT(_expected, _seen) \
- __EXPECT(_expected, _seen, <, 1)
-#define _ASSERT_LE(_expected, _seen) \
- __EXPECT(_expected, _seen, <=, 1)
-#define _ASSERT_GT(_expected, _seen) \
- __EXPECT(_expected, _seen, >, 1)
-#define _ASSERT_GE(_expected, _seen) \
- __EXPECT(_expected, _seen, >=, 1)
-#define _ASSERT_NULL(_seen) \
- __EXPECT(NULL, _seen, ==, 1)
-
-#define _ASSERT_TRUE(_seen) \
- _ASSERT_NE(0, _seen)
-#define _ASSERT_FALSE(_seen) \
- _ASSERT_EQ(0, _seen)
-#define _ASSERT_STREQ(_expected, _seen) \
- __EXPECT_STR(_expected, _seen, ==, 1)
-#define _ASSERT_STRNE(_expected, _seen) \
- __EXPECT_STR(_expected, _seen, !=, 1)
-
-#define _EXPECT_EQ(_expected, _seen) \
- __EXPECT(_expected, _seen, ==, 0)
-#define _EXPECT_NE(_expected, _seen) \
- __EXPECT(_expected, _seen, !=, 0)
-#define _EXPECT_LT(_expected, _seen) \
- __EXPECT(_expected, _seen, <, 0)
-#define _EXPECT_LE(_expected, _seen) \
- __EXPECT(_expected, _seen, <=, 0)
-#define _EXPECT_GT(_expected, _seen) \
- __EXPECT(_expected, _seen, >, 0)
-#define _EXPECT_GE(_expected, _seen) \
- __EXPECT(_expected, _seen, >=, 0)
-
-#define _EXPECT_NULL(_seen) \
- __EXPECT(NULL, _seen, ==, 0)
-#define _EXPECT_TRUE(_seen) \
- _EXPECT_NE(0, _seen)
-#define _EXPECT_FALSE(_seen) \
- _EXPECT_EQ(0, _seen)
-
-#define _EXPECT_STREQ(_expected, _seen) \
- __EXPECT_STR(_expected, _seen, ==, 0)
-#define _EXPECT_STRNE(_expected, _seen) \
- __EXPECT_STR(_expected, _seen, !=, 0)
+/**
+ * DOC: operators
+ *
+ * Operators for use in TEST() and TEST_F().
+ * ASSERT_* calls will stop test execution immediately.
+ * EXPECT_* calls will emit a failure warning, note it, and continue.
+ */
+
+/**
+ * ASSERT_EQ(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * ASSERT_EQ(expected, measured): expected == measured
+ */
+#define ASSERT_EQ(expected, seen) \
+ __EXPECT(expected, seen, ==, 1)
+
+/**
+ * ASSERT_NE(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * ASSERT_NE(expected, measured): expected != measured
+ */
+#define ASSERT_NE(expected, seen) \
+ __EXPECT(expected, seen, !=, 1)
+
+/**
+ * ASSERT_LT(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * ASSERT_LT(expected, measured): expected < measured
+ */
+#define ASSERT_LT(expected, seen) \
+ __EXPECT(expected, seen, <, 1)
+
+/**
+ * ASSERT_LE(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * ASSERT_LE(expected, measured): expected <= measured
+ */
+#define ASSERT_LE(expected, seen) \
+ __EXPECT(expected, seen, <=, 1)
+
+/**
+ * ASSERT_GT(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * ASSERT_GT(expected, measured): expected > measured
+ */
+#define ASSERT_GT(expected, seen) \
+ __EXPECT(expected, seen, >, 1)
+
+/**
+ * ASSERT_GE(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * ASSERT_GE(expected, measured): expected >= measured
+ */
+#define ASSERT_GE(expected, seen) \
+ __EXPECT(expected, seen, >=, 1)
+
+/**
+ * ASSERT_NULL(seen)
+ *
+ * @seen: measured value
+ *
+ * ASSERT_NULL(measured): NULL == measured
+ */
+#define ASSERT_NULL(seen) \
+ __EXPECT(NULL, seen, ==, 1)
+
+/**
+ * ASSERT_TRUE(seen)
+ *
+ * @seen: measured value
+ *
+ * ASSERT_TRUE(measured): measured != 0
+ */
+#define ASSERT_TRUE(seen) \
+ ASSERT_NE(0, seen)
+
+/**
+ * ASSERT_FALSE(seen)
+ *
+ * @seen: measured value
+ *
+ * ASSERT_FALSE(measured): measured == 0
+ */
+#define ASSERT_FALSE(seen) \
+ ASSERT_EQ(0, seen)
+
+/**
+ * ASSERT_STREQ(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * ASSERT_STREQ(expected, measured): !strcmp(expected, measured)
+ */
+#define ASSERT_STREQ(expected, seen) \
+ __EXPECT_STR(expected, seen, ==, 1)
+
+/**
+ * ASSERT_STRNE(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * ASSERT_STRNE(expected, measured): strcmp(expected, measured)
+ */
+#define ASSERT_STRNE(expected, seen) \
+ __EXPECT_STR(expected, seen, !=, 1)
+
+/**
+ * EXPECT_EQ(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * EXPECT_EQ(expected, measured): expected == measured
+ */
+#define EXPECT_EQ(expected, seen) \
+ __EXPECT(expected, seen, ==, 0)
+
+/**
+ * EXPECT_NE(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * EXPECT_NE(expected, measured): expected != measured
+ */
+#define EXPECT_NE(expected, seen) \
+ __EXPECT(expected, seen, !=, 0)
+
+/**
+ * EXPECT_LT(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * EXPECT_LT(expected, measured): expected < measured
+ */
+#define EXPECT_LT(expected, seen) \
+ __EXPECT(expected, seen, <, 0)
+
+/**
+ * EXPECT_LE(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * EXPECT_LE(expected, measured): expected <= measured
+ */
+#define EXPECT_LE(expected, seen) \
+ __EXPECT(expected, seen, <=, 0)
+
+/**
+ * EXPECT_GT(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * EXPECT_GT(expected, measured): expected > measured
+ */
+#define EXPECT_GT(expected, seen) \
+ __EXPECT(expected, seen, >, 0)
+
+/**
+ * EXPECT_GE(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * EXPECT_GE(expected, measured): expected >= measured
+ */
+#define EXPECT_GE(expected, seen) \
+ __EXPECT(expected, seen, >=, 0)
+
+/**
+ * EXPECT_NULL(seen)
+ *
+ * @seen: measured value
+ *
+ * EXPECT_NULL(measured): NULL == measured
+ */
+#define EXPECT_NULL(seen) \
+ __EXPECT(NULL, seen, ==, 0)
+
+/**
+ * EXPECT_TRUE(seen)
+ *
+ * @seen: measured value
+ *
+ * EXPECT_TRUE(measured): 0 != measured
+ */
+#define EXPECT_TRUE(seen) \
+ EXPECT_NE(0, seen)
+
+/**
+ * EXPECT_FALSE(seen)
+ *
+ * @seen: measured value
+ *
+ * EXPECT_FALSE(measured): 0 == measured
+ */
+#define EXPECT_FALSE(seen) \
+ EXPECT_EQ(0, seen)
+
+/**
+ * EXPECT_STREQ(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * EXPECT_STREQ(expected, measured): !strcmp(expected, measured)
+ */
+#define EXPECT_STREQ(expected, seen) \
+ __EXPECT_STR(expected, seen, ==, 0)
+
+/**
+ * EXPECT_STRNE(expected, seen)
+ *
+ * @expected: expected value
+ * @seen: measured value
+ *
+ * EXPECT_STRNE(expected, measured): strcmp(expected, measured)
+ */
+#define EXPECT_STRNE(expected, seen) \
+ __EXPECT_STR(expected, seen, !=, 0)
#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
@@ -532,4 +723,4 @@ static void __attribute__((constructor)) __constructor_order_first(void)
__constructor_order = _CONSTRUCTOR_ORDER_FORWARD;
}
-#endif /* TEST_HARNESS_H_ */
+#endif /* __KSELFTEST_HARNESS_H */
diff --git a/tools/testing/selftests/lib/bitmap.sh b/tools/testing/selftests/lib/bitmap.sh
index 2da187b6ddad..b073c22a3435 100755
--- a/tools/testing/selftests/lib/bitmap.sh
+++ b/tools/testing/selftests/lib/bitmap.sh
@@ -1,5 +1,9 @@
#!/bin/sh
# Runs bitmap infrastructure tests using test_bitmap kernel module
+if ! /sbin/modprobe -q -n test_bitmap; then
+ echo "bitmap: [SKIP]"
+ exit 77
+fi
if /sbin/modprobe -q test_bitmap; then
/sbin/modprobe -q -r test_bitmap
diff --git a/tools/testing/selftests/lib/printf.sh b/tools/testing/selftests/lib/printf.sh
index 4fdc70fe6980..cbf3b124bd94 100755
--- a/tools/testing/selftests/lib/printf.sh
+++ b/tools/testing/selftests/lib/printf.sh
@@ -1,5 +1,9 @@
#!/bin/sh
# Runs printf infrastructure using test_printf kernel module
+if ! /sbin/modprobe -q -n test_printf; then
+ echo "printf: [SKIP]"
+ exit 77
+fi
if /sbin/modprobe -q test_printf; then
/sbin/modprobe -q -r test_printf
diff --git a/tools/testing/selftests/membarrier/membarrier_test.c b/tools/testing/selftests/membarrier/membarrier_test.c
index 535f0fef4d0b..21399fcf1a59 100644
--- a/tools/testing/selftests/membarrier/membarrier_test.c
+++ b/tools/testing/selftests/membarrier/membarrier_test.c
@@ -7,56 +7,63 @@
#include "../kselftest.h"
-enum test_membarrier_status {
- TEST_MEMBARRIER_PASS = 0,
- TEST_MEMBARRIER_FAIL,
- TEST_MEMBARRIER_SKIP,
-};
-
static int sys_membarrier(int cmd, int flags)
{
return syscall(__NR_membarrier, cmd, flags);
}
-static enum test_membarrier_status test_membarrier_cmd_fail(void)
+static int test_membarrier_cmd_fail(void)
{
int cmd = -1, flags = 0;
if (sys_membarrier(cmd, flags) != -1) {
- printf("membarrier: Wrong command should fail but passed.\n");
- return TEST_MEMBARRIER_FAIL;
+ ksft_exit_fail_msg(
+ "sys membarrier invalid command test: command = %d, flags = %d. Should fail, but passed\n",
+ cmd, flags);
}
- return TEST_MEMBARRIER_PASS;
+
+ ksft_test_result_pass(
+ "sys membarrier invalid command test: command = %d, flags = %d. Failed as expected\n",
+ cmd, flags);
+ return 0;
}
-static enum test_membarrier_status test_membarrier_flags_fail(void)
+static int test_membarrier_flags_fail(void)
{
int cmd = MEMBARRIER_CMD_QUERY, flags = 1;
if (sys_membarrier(cmd, flags) != -1) {
- printf("membarrier: Wrong flags should fail but passed.\n");
- return TEST_MEMBARRIER_FAIL;
+ ksft_exit_fail_msg(
+ "sys membarrier MEMBARRIER_CMD_QUERY invalid flags test: flags = %d. Should fail, but passed\n",
+ flags);
}
- return TEST_MEMBARRIER_PASS;
+
+ ksft_test_result_pass(
+ "sys membarrier MEMBARRIER_CMD_QUERY invalid flags test: flags = %d. Failed as expected\n",
+ flags);
+ return 0;
}
-static enum test_membarrier_status test_membarrier_success(void)
+static int test_membarrier_success(void)
{
int cmd = MEMBARRIER_CMD_SHARED, flags = 0;
+ const char *test_name = "sys membarrier MEMBARRIER_CMD_SHARED\n";
if (sys_membarrier(cmd, flags) != 0) {
- printf("membarrier: Executing MEMBARRIER_CMD_SHARED failed. %s.\n",
- strerror(errno));
- return TEST_MEMBARRIER_FAIL;
+ ksft_exit_fail_msg(
+ "sys membarrier MEMBARRIER_CMD_SHARED test: flags = %d\n",
+ flags);
}
- printf("membarrier: MEMBARRIER_CMD_SHARED success.\n");
- return TEST_MEMBARRIER_PASS;
+ ksft_test_result_pass(
+ "sys membarrier MEMBARRIER_CMD_SHARED test: flags = %d\n",
+ flags);
+ return 0;
}
-static enum test_membarrier_status test_membarrier(void)
+static int test_membarrier(void)
{
- enum test_membarrier_status status;
+ int status;
status = test_membarrier_cmd_fail();
if (status)
@@ -67,52 +74,38 @@ static enum test_membarrier_status test_membarrier(void)
status = test_membarrier_success();
if (status)
return status;
- return TEST_MEMBARRIER_PASS;
+ return 0;
}
-static enum test_membarrier_status test_membarrier_query(void)
+static int test_membarrier_query(void)
{
int flags = 0, ret;
- printf("membarrier MEMBARRIER_CMD_QUERY ");
ret = sys_membarrier(MEMBARRIER_CMD_QUERY, flags);
if (ret < 0) {
- printf("failed. %s.\n", strerror(errno));
- switch (errno) {
- case ENOSYS:
+ if (errno == ENOSYS) {
/*
* It is valid to build a kernel with
* CONFIG_MEMBARRIER=n. However, this skips the tests.
*/
- return TEST_MEMBARRIER_SKIP;
- case EINVAL:
- default:
- return TEST_MEMBARRIER_FAIL;
+ ksft_exit_skip(
+ "sys membarrier (CONFIG_MEMBARRIER) is disabled.\n");
}
+ ksft_exit_fail_msg("sys_membarrier() failed\n");
}
- if (!(ret & MEMBARRIER_CMD_SHARED)) {
- printf("command MEMBARRIER_CMD_SHARED is not supported.\n");
- return TEST_MEMBARRIER_FAIL;
- }
- printf("syscall available.\n");
- return TEST_MEMBARRIER_PASS;
+ if (!(ret & MEMBARRIER_CMD_SHARED))
+ ksft_exit_fail_msg("sys_membarrier is not supported.\n");
+
+ ksft_test_result_pass("sys_membarrier available\n");
+ return 0;
}
int main(int argc, char **argv)
{
- switch (test_membarrier_query()) {
- case TEST_MEMBARRIER_FAIL:
- return ksft_exit_fail();
- case TEST_MEMBARRIER_SKIP:
- return ksft_exit_skip();
- }
- switch (test_membarrier()) {
- case TEST_MEMBARRIER_FAIL:
- return ksft_exit_fail();
- case TEST_MEMBARRIER_SKIP:
- return ksft_exit_skip();
- }
+ ksft_print_header();
+
+ test_membarrier_query();
+ test_membarrier();
- printf("membarrier: tests done!\n");
- return ksft_exit_pass();
+ ksft_exit_pass();
}
diff --git a/tools/testing/selftests/memfd/Makefile b/tools/testing/selftests/memfd/Makefile
index 79891d033de1..ad8a0897e47f 100644
--- a/tools/testing/selftests/memfd/Makefile
+++ b/tools/testing/selftests/memfd/Makefile
@@ -7,7 +7,7 @@ TEST_PROGS := run_fuse_test.sh
TEST_GEN_FILES := memfd_test fuse_mnt fuse_test
fuse_mnt.o: CFLAGS += $(shell pkg-config fuse --cflags)
-fuse_mnt: LDFLAGS += $(shell pkg-config fuse --libs)
include ../lib.mk
+$(OUTPUT)/fuse_mnt: LDLIBS += $(shell pkg-config fuse --libs)
diff --git a/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh b/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh
index 6cddde0b96f8..35025ce9ca66 100755
--- a/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh
+++ b/tools/testing/selftests/memory-hotplug/mem-on-off-test.sh
@@ -22,6 +22,11 @@ prerequisite()
echo $msg memory hotplug is not supported >&2
exit 0
fi
+
+ if ! grep -q 1 $SYSFS/devices/system/memory/memory*/removable; then
+ echo $msg no hot-pluggable memory >&2
+ exit 0
+ fi
}
#
@@ -39,7 +44,7 @@ hotpluggable_memory()
done
}
-hotplaggable_offline_memory()
+hotpluggable_offline_memory()
{
hotpluggable_memory offline
}
@@ -75,9 +80,12 @@ online_memory_expect_success()
if ! online_memory $memory; then
echo $FUNCNAME $memory: unexpected fail >&2
+ return 1
elif ! memory_is_online $memory; then
echo $FUNCNAME $memory: unexpected offline >&2
+ return 1
fi
+ return 0
}
online_memory_expect_fail()
@@ -86,9 +94,12 @@ online_memory_expect_fail()
if online_memory $memory 2> /dev/null; then
echo $FUNCNAME $memory: unexpected success >&2
+ return 1
elif ! memory_is_offline $memory; then
echo $FUNCNAME $memory: unexpected online >&2
+ return 1
fi
+ return 0
}
offline_memory_expect_success()
@@ -97,9 +108,12 @@ offline_memory_expect_success()
if ! offline_memory $memory; then
echo $FUNCNAME $memory: unexpected fail >&2
+ return 1
elif ! memory_is_offline $memory; then
echo $FUNCNAME $memory: unexpected offline >&2
+ return 1
fi
+ return 0
}
offline_memory_expect_fail()
@@ -108,14 +122,18 @@ offline_memory_expect_fail()
if offline_memory $memory 2> /dev/null; then
echo $FUNCNAME $memory: unexpected success >&2
+ return 1
elif ! memory_is_online $memory; then
echo $FUNCNAME $memory: unexpected offline >&2
+ return 1
fi
+ return 0
}
error=-12
priority=0
ratio=10
+retval=0
while getopts e:hp:r: opt; do
case $opt in
@@ -131,6 +149,10 @@ while getopts e:hp:r: opt; do
;;
r)
ratio=$OPTARG
+ if [ "$ratio" -gt 100 ] || [ "$ratio" -lt 0 ]; then
+ echo "The percentage should be an integer within 0~100 range"
+ exit 1
+ fi
;;
esac
done
@@ -143,35 +165,58 @@ fi
prerequisite
echo "Test scope: $ratio% hotplug memory"
-echo -e "\t online all hotplug memory in offline state"
-echo -e "\t offline $ratio% hotplug memory in online state"
-echo -e "\t online all hotplug memory in offline state"
#
# Online all hot-pluggable memory
#
-for memory in `hotplaggable_offline_memory`; do
- echo offline-online $memory
- online_memory_expect_success $memory
-done
+hotpluggable_num=`hotpluggable_offline_memory | wc -l`
+echo -e "\t online all hot-pluggable memory in offline state:"
+if [ "$hotpluggable_num" -gt 0 ]; then
+ for memory in `hotpluggable_offline_memory`; do
+ echo "offline->online memory$memory"
+ if ! online_memory_expect_success $memory; then
+ retval=1
+ fi
+ done
+else
+ echo -e "\t\t SKIPPED - no hot-pluggable memory in offline state"
+fi
#
# Offline $ratio percent of hot-pluggable memory
#
+hotpluggable_num=`hotpluggable_online_memory | wc -l`
+target=`echo "a=$hotpluggable_num*$ratio; if ( a%100 ) a/100+1 else a/100" | bc`
+echo -e "\t offline $ratio% hot-pluggable memory in online state"
+echo -e "\t trying to offline $target out of $hotpluggable_num memory block(s):"
for memory in `hotpluggable_online_memory`; do
- if [ $((RANDOM % 100)) -lt $ratio ]; then
- echo online-offline $memory
- offline_memory_expect_success $memory
+ if [ "$target" -gt 0 ]; then
+ echo "online->offline memory$memory"
+ if offline_memory_expect_success $memory; then
+ target=$(($target - 1))
+ fi
fi
done
+if [ "$target" -gt 0 ]; then
+ retval=1
+ echo -e "\t\t FAILED - unable to offline some memory blocks, device busy?"
+fi
#
# Online all hot-pluggable memory again
#
-for memory in `hotplaggable_offline_memory`; do
- echo offline-online $memory
- online_memory_expect_success $memory
-done
+hotpluggable_num=`hotpluggable_offline_memory | wc -l`
+echo -e "\t online all hot-pluggable memory in offline state:"
+if [ "$hotpluggable_num" -gt 0 ]; then
+ for memory in `hotpluggable_offline_memory`; do
+ echo "offline->online memory$memory"
+ if ! online_memory_expect_success $memory; then
+ retval=1
+ fi
+ done
+else
+ echo -e "\t\t SKIPPED - no hot-pluggable memory in offline state"
+fi
#
# Test with memory notifier error injection
@@ -189,15 +234,16 @@ prerequisite_extra()
if [ ! -d "$DEBUGFS" ]; then
echo $msg debugfs is not mounted >&2
- exit 0
+ exit $retval
fi
if [ ! -d $NOTIFIER_ERR_INJECT_DIR ]; then
echo $msg memory-notifier-error-inject module is not available >&2
- exit 0
+ exit $retval
fi
}
+echo -e "\t Test with memory notifier error injection"
prerequisite_extra
#
@@ -214,7 +260,7 @@ done
# Test memory hot-add error handling (offline => online)
#
echo $error > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_ONLINE/error
-for memory in `hotplaggable_offline_memory`; do
+for memory in `hotpluggable_offline_memory`; do
online_memory_expect_fail $memory
done
@@ -222,7 +268,7 @@ done
# Online all hot-pluggable memory
#
echo 0 > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_ONLINE/error
-for memory in `hotplaggable_offline_memory`; do
+for memory in `hotpluggable_offline_memory`; do
online_memory_expect_success $memory
done
@@ -236,3 +282,5 @@ done
echo 0 > $NOTIFIER_ERR_INJECT_DIR/actions/MEM_GOING_OFFLINE/error
/sbin/modprobe -q -r memory-notifier-error-inject
+
+exit $retval
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 35cbb4cba410..f6c9dbf478f8 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -3,8 +3,6 @@
CFLAGS = -Wall -Wl,--no-as-needed -O2 -g
CFLAGS += -I../../../../usr/include/
-reuseport_bpf_numa: LDFLAGS += -lnuma
-
TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh
TEST_GEN_FILES = socket
TEST_GEN_FILES += psock_fanout psock_tpacket
@@ -13,3 +11,4 @@ TEST_GEN_FILES += reuseport_dualstack
include ../lib.mk
+$(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma
diff --git a/tools/testing/selftests/powerpc/benchmarks/context_switch.c b/tools/testing/selftests/powerpc/benchmarks/context_switch.c
index 778f5fbfd784..f4241339edd2 100644
--- a/tools/testing/selftests/powerpc/benchmarks/context_switch.c
+++ b/tools/testing/selftests/powerpc/benchmarks/context_switch.c
@@ -258,9 +258,14 @@ static unsigned long xchg(unsigned long *p, unsigned long val)
return __atomic_exchange_n(p, val, __ATOMIC_SEQ_CST);
}
+static int processes;
+
static int mutex_lock(unsigned long *m)
{
int c;
+ int flags = FUTEX_WAIT;
+ if (!processes)
+ flags |= FUTEX_PRIVATE_FLAG;
c = cmpxchg(m, 0, 1);
if (!c)
@@ -270,7 +275,7 @@ static int mutex_lock(unsigned long *m)
c = xchg(m, 2);
while (c) {
- sys_futex(m, FUTEX_WAIT, 2, NULL, NULL, 0);
+ sys_futex(m, flags, 2, NULL, NULL, 0);
c = xchg(m, 2);
}
@@ -279,12 +284,16 @@ static int mutex_lock(unsigned long *m)
static int mutex_unlock(unsigned long *m)
{
+ int flags = FUTEX_WAKE;
+ if (!processes)
+ flags |= FUTEX_PRIVATE_FLAG;
+
if (*m == 2)
*m = 0;
else if (xchg(m, 0) == 1)
return 0;
- sys_futex(m, FUTEX_WAKE, 1, NULL, NULL, 0);
+ sys_futex(m, flags, 1, NULL, NULL, 0);
return 0;
}
@@ -293,26 +302,32 @@ static unsigned long *m1, *m2;
static void futex_setup(int cpu1, int cpu2)
{
- int shmid;
- void *shmaddr;
+ if (!processes) {
+ static unsigned long _m1, _m2;
+ m1 = &_m1;
+ m2 = &_m2;
+ } else {
+ int shmid;
+ void *shmaddr;
- shmid = shmget(IPC_PRIVATE, getpagesize(), SHM_R | SHM_W);
- if (shmid < 0) {
- perror("shmget");
- exit(1);
- }
+ shmid = shmget(IPC_PRIVATE, getpagesize(), SHM_R | SHM_W);
+ if (shmid < 0) {
+ perror("shmget");
+ exit(1);
+ }
- shmaddr = shmat(shmid, NULL, 0);
- if (shmaddr == (char *)-1) {
- perror("shmat");
- shmctl(shmid, IPC_RMID, NULL);
- exit(1);
- }
+ shmaddr = shmat(shmid, NULL, 0);
+ if (shmaddr == (char *)-1) {
+ perror("shmat");
+ shmctl(shmid, IPC_RMID, NULL);
+ exit(1);
+ }
- shmctl(shmid, IPC_RMID, NULL);
+ shmctl(shmid, IPC_RMID, NULL);
- m1 = shmaddr;
- m2 = shmaddr + sizeof(*m1);
+ m1 = shmaddr;
+ m2 = shmaddr + sizeof(*m1);
+ }
*m1 = 0;
*m2 = 0;
@@ -352,8 +367,6 @@ static struct actions futex_actions = {
.thread2 = futex_thread2,
};
-static int processes;
-
static struct option options[] = {
{ "test", required_argument, 0, 't' },
{ "process", no_argument, &processes, 1 },
diff --git a/tools/testing/selftests/seccomp/Makefile b/tools/testing/selftests/seccomp/Makefile
index 5fa6fd2246b1..aeb0c805f3ca 100644
--- a/tools/testing/selftests/seccomp/Makefile
+++ b/tools/testing/selftests/seccomp/Makefile
@@ -4,3 +4,5 @@ LDFLAGS += -lpthread
include ../lib.mk
+$(TEST_GEN_PROGS): seccomp_bpf.c ../kselftest_harness.h
+ $(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 00a928b833d0..73f5ea6778ce 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -37,7 +37,7 @@
#include <unistd.h>
#include <sys/syscall.h>
-#include "test_harness.h"
+#include "../kselftest_harness.h"
#ifndef PR_SET_PTRACER
# define PR_SET_PTRACER 0x59616d61
@@ -1310,7 +1310,7 @@ void change_syscall(struct __test_metadata *_metadata,
iov.iov_len = sizeof(regs);
ret = ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov);
#endif
- EXPECT_EQ(0, ret);
+ EXPECT_EQ(0, ret) {}
#if defined(__x86_64__) || defined(__i386__) || defined(__powerpc__) || \
defined(__s390__) || defined(__hppa__)
diff --git a/tools/testing/selftests/size/get_size.c b/tools/testing/selftests/size/get_size.c
index 2d1af7cca463..d4b59ab979a0 100644
--- a/tools/testing/selftests/size/get_size.c
+++ b/tools/testing/selftests/size/get_size.c
@@ -75,26 +75,31 @@ void _start(void)
int ccode;
struct sysinfo info;
unsigned long used;
+ static const char *test_name = " get runtime memory use\n";
- print("Testing system size.\n");
- print("1..1\n");
+ print("TAP version 13\n");
+ print("# Testing system size.\n");
ccode = sysinfo(&info);
if (ccode < 0) {
- print("not ok 1 get runtime memory use\n");
- print("# could not get sysinfo\n");
+ print("not ok 1");
+ print(test_name);
+ print(" ---\n reason: \"could not get sysinfo\"\n ...\n");
_exit(ccode);
}
+ print("ok 1");
+ print(test_name);
+
/* ignore cache complexities for now */
used = info.totalram - info.freeram - info.bufferram;
- print_k_value("ok 1 get runtime memory use # size = ", used,
- info.mem_unit);
-
print("# System runtime memory report (units in Kilobytes):\n");
- print_k_value("# Total: ", info.totalram, info.mem_unit);
- print_k_value("# Free: ", info.freeram, info.mem_unit);
- print_k_value("# Buffer: ", info.bufferram, info.mem_unit);
- print_k_value("# In use: ", used, info.mem_unit);
+ print(" ---\n");
+ print_k_value(" Total: ", info.totalram, info.mem_unit);
+ print_k_value(" Free: ", info.freeram, info.mem_unit);
+ print_k_value(" Buffer: ", info.bufferram, info.mem_unit);
+ print_k_value(" In use: ", used, info.mem_unit);
+ print(" ...\n");
+ print("1..1\n");
_exit(0);
}
diff --git a/tools/testing/selftests/sync/sync_test.c b/tools/testing/selftests/sync/sync_test.c
index 9ea08d9f0b13..62fa666e501a 100644
--- a/tools/testing/selftests/sync/sync_test.c
+++ b/tools/testing/selftests/sync/sync_test.c
@@ -29,6 +29,7 @@
#include <unistd.h>
#include <stdlib.h>
#include <sys/types.h>
+#include <sys/stat.h>
#include <sys/wait.h>
#include "synctest.h"
@@ -52,10 +53,22 @@ static int run_test(int (*test)(void), char *name)
exit(test());
}
+static int sync_api_supported(void)
+{
+ struct stat sbuf;
+
+ return 0 == stat("/sys/kernel/debug/sync/sw_sync", &sbuf);
+}
+
int main(void)
{
int err = 0;
+ if (!sync_api_supported()) {
+ printf("SKIP: Sync framework not supported by kernel\n");
+ return 0;
+ }
+
printf("[RUN]\tTesting sync framework\n");
err += RUN_TEST(test_alloc_timeline);
diff --git a/tools/testing/selftests/sysctl/common_tests b/tools/testing/selftests/sysctl/common_tests
index 17d534b1b7b4..b6862322962f 100644
--- a/tools/testing/selftests/sysctl/common_tests
+++ b/tools/testing/selftests/sysctl/common_tests
@@ -24,6 +24,14 @@ verify()
return 0
}
+exit_test()
+{
+ if [ ! -z ${old_strict} ]; then
+ echo ${old_strict} > ${WRITES_STRICT}
+ fi
+ exit $rc
+}
+
trap 'set_orig; rm -f "${TEST_FILE}"' EXIT
rc=0
@@ -63,6 +71,20 @@ else
echo "ok"
fi
+echo -n "Checking write strict setting ... "
+WRITES_STRICT="${SYSCTL}/kernel/sysctl_writes_strict"
+if [ ! -e ${WRITES_STRICT} ]; then
+ echo "FAIL, but skip in case of old kernel" >&2
+else
+ old_strict=$(cat ${WRITES_STRICT})
+ if [ "$old_strict" = "1" ]; then
+ echo "ok"
+ else
+ echo "FAIL, strict value is 0 but force to 1 to continue" >&2
+ echo "1" > ${WRITES_STRICT}
+ fi
+fi
+
# Now that we've validated the sanity of "set_test" and "set_orig",
# we can use those functions to set starting states before running
# specific behavioral tests.
diff --git a/tools/testing/selftests/sysctl/run_numerictests b/tools/testing/selftests/sysctl/run_numerictests
index 8510f93f2d14..e6e76c93d948 100755
--- a/tools/testing/selftests/sysctl/run_numerictests
+++ b/tools/testing/selftests/sysctl/run_numerictests
@@ -7,4 +7,4 @@ TEST_STR=$(( $ORIG + 1 ))
. ./common_tests
-exit $rc
+exit_test
diff --git a/tools/testing/selftests/sysctl/run_stringtests b/tools/testing/selftests/sysctl/run_stringtests
index 90a9293d520c..857ec667fb02 100755
--- a/tools/testing/selftests/sysctl/run_stringtests
+++ b/tools/testing/selftests/sysctl/run_stringtests
@@ -74,4 +74,4 @@ else
echo "ok"
fi
-exit $rc
+exit_test
diff --git a/tools/testing/selftests/vm/virtual_address_range.c b/tools/testing/selftests/vm/virtual_address_range.c
index 3b02aa6eb9da..1830d66a6f0e 100644
--- a/tools/testing/selftests/vm/virtual_address_range.c
+++ b/tools/testing/selftests/vm/virtual_address_range.c
@@ -10,7 +10,6 @@
#include <string.h>
#include <unistd.h>
#include <errno.h>
-#include <numaif.h>
#include <sys/mman.h>
#include <sys/time.h>
@@ -32,15 +31,33 @@
* different areas one below 128TB and one above 128TB
* till it reaches 512TB. One with size 128TB and the
* other being 384TB.
+ *
+ * On Arm64 the address space is 256TB and no high mappings
+ * are supported so far.
*/
+
#define NR_CHUNKS_128TB 8192UL /* Number of 16GB chunks for 128TB */
-#define NR_CHUNKS_384TB 24576UL /* Number of 16GB chunks for 384TB */
+#define NR_CHUNKS_256TB (NR_CHUNKS_128TB * 2UL)
+#define NR_CHUNKS_384TB (NR_CHUNKS_128TB * 3UL)
#define ADDR_MARK_128TB (1UL << 47) /* First address beyond 128TB */
+#define ADDR_MARK_256TB (1UL << 48) /* First address beyond 256TB */
+
+#ifdef __aarch64__
+#define HIGH_ADDR_MARK ADDR_MARK_256TB
+#define HIGH_ADDR_SHIFT 49
+#define NR_CHUNKS_LOW NR_CHUNKS_256TB
+#define NR_CHUNKS_HIGH 0
+#else
+#define HIGH_ADDR_MARK ADDR_MARK_128TB
+#define HIGH_ADDR_SHIFT 48
+#define NR_CHUNKS_LOW NR_CHUNKS_128TB
+#define NR_CHUNKS_HIGH NR_CHUNKS_384TB
+#endif
static char *hind_addr(void)
{
- int bits = 48 + rand() % 15;
+ int bits = HIGH_ADDR_SHIFT + rand() % (63 - HIGH_ADDR_SHIFT);
return (char *) (1UL << bits);
}
@@ -50,14 +67,14 @@ static int validate_addr(char *ptr, int high_addr)
unsigned long addr = (unsigned long) ptr;
if (high_addr) {
- if (addr < ADDR_MARK_128TB) {
+ if (addr < HIGH_ADDR_MARK) {
printf("Bad address %lx\n", addr);
return 1;
}
return 0;
}
- if (addr > ADDR_MARK_128TB) {
+ if (addr > HIGH_ADDR_MARK) {
printf("Bad address %lx\n", addr);
return 1;
}
@@ -79,12 +96,12 @@ static int validate_lower_address_hint(void)
int main(int argc, char *argv[])
{
- char *ptr[NR_CHUNKS_128TB];
- char *hptr[NR_CHUNKS_384TB];
+ char *ptr[NR_CHUNKS_LOW];
+ char *hptr[NR_CHUNKS_HIGH];
char *hint;
unsigned long i, lchunks, hchunks;
- for (i = 0; i < NR_CHUNKS_128TB; i++) {
+ for (i = 0; i < NR_CHUNKS_LOW; i++) {
ptr[i] = mmap(NULL, MAP_CHUNK_SIZE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
@@ -99,7 +116,7 @@ int main(int argc, char *argv[])
}
lchunks = i;
- for (i = 0; i < NR_CHUNKS_384TB; i++) {
+ for (i = 0; i < NR_CHUNKS_HIGH; i++) {
hint = hind_addr();
hptr[i] = mmap(hint, MAP_CHUNK_SIZE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
diff --git a/usr/Kconfig b/usr/Kconfig
index ad0543e21760..d53112fdbf5a 100644
--- a/usr/Kconfig
+++ b/usr/Kconfig
@@ -36,10 +36,8 @@ config INITRAMFS_ROOT_UID
depends on INITRAMFS_SOURCE!=""
default "0"
help
- This setting is only meaningful if the INITRAMFS_SOURCE is
- contains a directory. Setting this user ID (UID) to something
- other than "0" will cause all files owned by that UID to be
- owned by user root in the initial ramdisk image.
+ If INITRAMFS_SOURCE points to a directory, files owned by this UID
+ (-1 = current user) will be owned by root in the resulting image.
If you are not sure, leave it set to "0".
@@ -48,15 +46,13 @@ config INITRAMFS_ROOT_GID
depends on INITRAMFS_SOURCE!=""
default "0"
help
- This setting is only meaningful if the INITRAMFS_SOURCE is
- contains a directory. Setting this group ID (GID) to something
- other than "0" will cause all files owned by that GID to be
- owned by group root in the initial ramdisk image.
+ If INITRAMFS_SOURCE points to a directory, files owned by this GID
+ (-1 = current group) will be owned by root in the resulting image.
If you are not sure, leave it set to "0".
config RD_GZIP
- bool "Support initial ramdisks compressed using gzip"
+ bool "Support initial ramdisk/ramfs compressed using gzip"
depends on BLK_DEV_INITRD
default y
select DECOMPRESS_GZIP
@@ -65,7 +61,7 @@ config RD_GZIP
If unsure, say Y.
config RD_BZIP2
- bool "Support initial ramdisks compressed using bzip2"
+ bool "Support initial ramdisk/ramfs compressed using bzip2"
default y
depends on BLK_DEV_INITRD
select DECOMPRESS_BZIP2
@@ -74,7 +70,7 @@ config RD_BZIP2
If unsure, say N.
config RD_LZMA
- bool "Support initial ramdisks compressed using LZMA"
+ bool "Support initial ramdisk/ramfs compressed using LZMA"
default y
depends on BLK_DEV_INITRD
select DECOMPRESS_LZMA
@@ -83,7 +79,7 @@ config RD_LZMA
If unsure, say N.
config RD_XZ
- bool "Support initial ramdisks compressed using XZ"
+ bool "Support initial ramdisk/ramfs compressed using XZ"
depends on BLK_DEV_INITRD
default y
select DECOMPRESS_XZ
@@ -92,7 +88,7 @@ config RD_XZ
If unsure, say N.
config RD_LZO
- bool "Support initial ramdisks compressed using LZO"
+ bool "Support initial ramdisk/ramfs compressed using LZO"
default y
depends on BLK_DEV_INITRD
select DECOMPRESS_LZO
@@ -101,7 +97,7 @@ config RD_LZO
If unsure, say N.
config RD_LZ4
- bool "Support initial ramdisks compressed using LZ4"
+ bool "Support initial ramdisk/ramfs compressed using LZ4"
default y
depends on BLK_DEV_INITRD
select DECOMPRESS_LZ4
diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c
index 528af4b2d09e..79c7c357804b 100644
--- a/virt/kvm/arm/aarch32.c
+++ b/virt/kvm/arm/aarch32.c
@@ -60,7 +60,7 @@ static const unsigned short cc_map[16] = {
/*
* Check if a trapped instruction should have been executed or not.
*/
-bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
+bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu)
{
unsigned long cpsr;
u32 cpsr_cond;
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 5976609ef27c..8e89d63005c7 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -21,6 +21,7 @@
#include <linux/kvm_host.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/uaccess.h>
#include <clocksource/arm_arch_timer.h>
#include <asm/arch_timer.h>
@@ -35,6 +36,16 @@ static struct timecounter *timecounter;
static unsigned int host_vtimer_irq;
static u32 host_vtimer_irq_flags;
+static const struct kvm_irq_level default_ptimer_irq = {
+ .irq = 30,
+ .level = 1,
+};
+
+static const struct kvm_irq_level default_vtimer_irq = {
+ .irq = 27,
+ .level = 1,
+};
+
void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
{
vcpu_vtimer(vcpu)->active_cleared_last = false;
@@ -95,7 +106,7 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
* If the vcpu is blocked we want to wake it up so that it will see
* the timer has expired when entering the guest.
*/
- kvm_vcpu_kick(vcpu);
+ kvm_vcpu_wake_up(vcpu);
}
static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
@@ -215,7 +226,8 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
if (likely(irqchip_in_kernel(vcpu->kvm))) {
ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
timer_ctx->irq.irq,
- timer_ctx->irq.level);
+ timer_ctx->irq.level,
+ timer_ctx);
WARN_ON(ret);
}
}
@@ -445,23 +457,12 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
kvm_timer_update_state(vcpu);
}
-int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
- const struct kvm_irq_level *virt_irq,
- const struct kvm_irq_level *phys_irq)
+int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
{
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
/*
- * The vcpu timer irq number cannot be determined in
- * kvm_timer_vcpu_init() because it is called much before
- * kvm_vcpu_set_target(). To handle this, we determine
- * vcpu timer irq number when the vcpu is reset.
- */
- vtimer->irq.irq = virt_irq->irq;
- ptimer->irq.irq = phys_irq->irq;
-
- /*
* The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
* and to 0 for ARMv7. We provide an implementation that always
* resets the timer to be disabled and unmasked and is compliant with
@@ -496,6 +497,8 @@ static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
+ struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
/* Synchronize cntvoff across all vtimers of a VM. */
update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
@@ -504,6 +507,9 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
INIT_WORK(&timer->expired, kvm_timer_inject_irq_work);
hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
timer->timer.function = kvm_timer_expire;
+
+ vtimer->irq.irq = default_vtimer_irq.irq;
+ ptimer->irq.irq = default_ptimer_irq.irq;
}
static void kvm_timer_init_interrupt(void *info)
@@ -613,6 +619,30 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
kvm_vgic_unmap_phys_irq(vcpu, vtimer->irq.irq);
}
+static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
+{
+ int vtimer_irq, ptimer_irq;
+ int i, ret;
+
+ vtimer_irq = vcpu_vtimer(vcpu)->irq.irq;
+ ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu));
+ if (ret)
+ return false;
+
+ ptimer_irq = vcpu_ptimer(vcpu)->irq.irq;
+ ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu));
+ if (ret)
+ return false;
+
+ kvm_for_each_vcpu(i, vcpu, vcpu->kvm) {
+ if (vcpu_vtimer(vcpu)->irq.irq != vtimer_irq ||
+ vcpu_ptimer(vcpu)->irq.irq != ptimer_irq)
+ return false;
+ }
+
+ return true;
+}
+
int kvm_timer_enable(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
@@ -632,6 +662,11 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
if (!vgic_initialized(vcpu->kvm))
return -ENODEV;
+ if (!timer_irqs_are_valid(vcpu)) {
+ kvm_debug("incorrectly configured timer irqs\n");
+ return -EINVAL;
+ }
+
/*
* Find the physical IRQ number corresponding to the host_vtimer_irq
*/
@@ -681,3 +716,79 @@ void kvm_timer_init_vhe(void)
val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
write_sysreg(val, cnthctl_el2);
}
+
+static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq)
+{
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ vcpu_vtimer(vcpu)->irq.irq = vtimer_irq;
+ vcpu_ptimer(vcpu)->irq.irq = ptimer_irq;
+ }
+}
+
+int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+{
+ int __user *uaddr = (int __user *)(long)attr->addr;
+ struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
+ struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
+ int irq;
+
+ if (!irqchip_in_kernel(vcpu->kvm))
+ return -EINVAL;
+
+ if (get_user(irq, uaddr))
+ return -EFAULT;
+
+ if (!(irq_is_ppi(irq)))
+ return -EINVAL;
+
+ if (vcpu->arch.timer_cpu.enabled)
+ return -EBUSY;
+
+ switch (attr->attr) {
+ case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
+ set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq);
+ break;
+ case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
+ set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq);
+ break;
+ default:
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+{
+ int __user *uaddr = (int __user *)(long)attr->addr;
+ struct arch_timer_context *timer;
+ int irq;
+
+ switch (attr->attr) {
+ case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
+ timer = vcpu_vtimer(vcpu);
+ break;
+ case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
+ timer = vcpu_ptimer(vcpu);
+ break;
+ default:
+ return -ENXIO;
+ }
+
+ irq = timer->irq.irq;
+ return put_user(irq, uaddr);
+}
+
+int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+{
+ switch (attr->attr) {
+ case KVM_ARM_VCPU_TIMER_IRQ_VTIMER:
+ case KVM_ARM_VCPU_TIMER_IRQ_PTIMER:
+ return 0;
+ }
+
+ return -ENXIO;
+}
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 3417e184c8e1..a39a1e161e63 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -368,6 +368,13 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
kvm_timer_vcpu_put(vcpu);
}
+static void vcpu_power_off(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.power_off = true;
+ kvm_make_request(KVM_REQ_SLEEP, vcpu);
+ kvm_vcpu_kick(vcpu);
+}
+
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
@@ -387,7 +394,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
vcpu->arch.power_off = false;
break;
case KVM_MP_STATE_STOPPED:
- vcpu->arch.power_off = true;
+ vcpu_power_off(vcpu);
break;
default:
return -EINVAL;
@@ -520,6 +527,10 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
}
ret = kvm_timer_enable(vcpu);
+ if (ret)
+ return ret;
+
+ ret = kvm_arm_pmu_v3_enable(vcpu);
return ret;
}
@@ -536,21 +547,7 @@ void kvm_arm_halt_guest(struct kvm *kvm)
kvm_for_each_vcpu(i, vcpu, kvm)
vcpu->arch.pause = true;
- kvm_make_all_cpus_request(kvm, KVM_REQ_VCPU_EXIT);
-}
-
-void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.pause = true;
- kvm_vcpu_kick(vcpu);
-}
-
-void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu)
-{
- struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
-
- vcpu->arch.pause = false;
- swake_up(wq);
+ kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP);
}
void kvm_arm_resume_guest(struct kvm *kvm)
@@ -558,16 +555,23 @@ void kvm_arm_resume_guest(struct kvm *kvm)
int i;
struct kvm_vcpu *vcpu;
- kvm_for_each_vcpu(i, vcpu, kvm)
- kvm_arm_resume_vcpu(vcpu);
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ vcpu->arch.pause = false;
+ swake_up(kvm_arch_vcpu_wq(vcpu));
+ }
}
-static void vcpu_sleep(struct kvm_vcpu *vcpu)
+static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
{
struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
swait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
(!vcpu->arch.pause)));
+
+ if (vcpu->arch.power_off || vcpu->arch.pause) {
+ /* Awaken to handle a signal, request we sleep again later. */
+ kvm_make_request(KVM_REQ_SLEEP, vcpu);
+ }
}
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
@@ -575,6 +579,20 @@ static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
return vcpu->arch.target >= 0;
}
+static void check_vcpu_requests(struct kvm_vcpu *vcpu)
+{
+ if (kvm_request_pending(vcpu)) {
+ if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
+ vcpu_req_sleep(vcpu);
+
+ /*
+ * Clear IRQ_PENDING requests that were made to guarantee
+ * that a VCPU sees new virtual interrupts.
+ */
+ kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
+ }
+}
+
/**
* kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
* @vcpu: The VCPU pointer
@@ -620,8 +638,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
update_vttbr(vcpu->kvm);
- if (vcpu->arch.power_off || vcpu->arch.pause)
- vcpu_sleep(vcpu);
+ check_vcpu_requests(vcpu);
/*
* Preparing the interrupts to be injected also
@@ -650,8 +667,17 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
run->exit_reason = KVM_EXIT_INTR;
}
+ /*
+ * Ensure we set mode to IN_GUEST_MODE after we disable
+ * interrupts and before the final VCPU requests check.
+ * See the comment in kvm_vcpu_exiting_guest_mode() and
+ * Documentation/virtual/kvm/vcpu-requests.rst
+ */
+ smp_store_mb(vcpu->mode, IN_GUEST_MODE);
+
if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) ||
- vcpu->arch.power_off || vcpu->arch.pause) {
+ kvm_request_pending(vcpu)) {
+ vcpu->mode = OUTSIDE_GUEST_MODE;
local_irq_enable();
kvm_pmu_sync_hwstate(vcpu);
kvm_timer_sync_hwstate(vcpu);
@@ -667,7 +693,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
*/
trace_kvm_entry(*vcpu_pc(vcpu));
guest_enter_irqoff();
- vcpu->mode = IN_GUEST_MODE;
ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
@@ -756,6 +781,7 @@ static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
* trigger a world-switch round on the running physical CPU to set the
* virtual IRQ/FIQ fields in the HCR appropriately.
*/
+ kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu);
return 0;
@@ -806,7 +832,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
return -EINVAL;
- return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level);
+ return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL);
case KVM_ARM_IRQ_TYPE_SPI:
if (!irqchip_in_kernel(kvm))
return -ENXIO;
@@ -814,7 +840,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
if (irq_num < VGIC_NR_PRIVATE_IRQS)
return -EINVAL;
- return kvm_vgic_inject_irq(kvm, 0, irq_num, level);
+ return kvm_vgic_inject_irq(kvm, 0, irq_num, level, NULL);
}
return -EINVAL;
@@ -884,7 +910,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
* Handle the "start in power-off" case.
*/
if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
- vcpu->arch.power_off = true;
+ vcpu_power_off(vcpu);
else
vcpu->arch.power_off = false;
@@ -1115,9 +1141,6 @@ static void cpu_init_hyp_mode(void *dummy)
__cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
__cpu_init_stage2();
- if (is_kernel_in_hyp_mode())
- kvm_timer_init_vhe();
-
kvm_arm_init_debug();
}
@@ -1137,6 +1160,7 @@ static void cpu_hyp_reinit(void)
* event was cancelled before the CPU was reset.
*/
__cpu_init_stage2();
+ kvm_timer_init_vhe();
} else {
cpu_init_hyp_mode(NULL);
}
diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
index 87940364570b..91728faa13fd 100644
--- a/virt/kvm/arm/hyp/vgic-v3-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
@@ -19,10 +19,12 @@
#include <linux/irqchip/arm-gic-v3.h>
#include <linux/kvm_host.h>
+#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
#define vtr_to_max_lr_idx(v) ((v) & 0xf)
#define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
+#define vtr_to_nr_apr_regs(v) (1 << (vtr_to_nr_pre_bits(v) - 5))
static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
{
@@ -118,6 +120,90 @@ static void __hyp_text __gic_v3_set_lr(u64 val, int lr)
}
}
+static void __hyp_text __vgic_v3_write_ap0rn(u32 val, int n)
+{
+ switch (n) {
+ case 0:
+ write_gicreg(val, ICH_AP0R0_EL2);
+ break;
+ case 1:
+ write_gicreg(val, ICH_AP0R1_EL2);
+ break;
+ case 2:
+ write_gicreg(val, ICH_AP0R2_EL2);
+ break;
+ case 3:
+ write_gicreg(val, ICH_AP0R3_EL2);
+ break;
+ }
+}
+
+static void __hyp_text __vgic_v3_write_ap1rn(u32 val, int n)
+{
+ switch (n) {
+ case 0:
+ write_gicreg(val, ICH_AP1R0_EL2);
+ break;
+ case 1:
+ write_gicreg(val, ICH_AP1R1_EL2);
+ break;
+ case 2:
+ write_gicreg(val, ICH_AP1R2_EL2);
+ break;
+ case 3:
+ write_gicreg(val, ICH_AP1R3_EL2);
+ break;
+ }
+}
+
+static u32 __hyp_text __vgic_v3_read_ap0rn(int n)
+{
+ u32 val;
+
+ switch (n) {
+ case 0:
+ val = read_gicreg(ICH_AP0R0_EL2);
+ break;
+ case 1:
+ val = read_gicreg(ICH_AP0R1_EL2);
+ break;
+ case 2:
+ val = read_gicreg(ICH_AP0R2_EL2);
+ break;
+ case 3:
+ val = read_gicreg(ICH_AP0R3_EL2);
+ break;
+ default:
+ unreachable();
+ }
+
+ return val;
+}
+
+static u32 __hyp_text __vgic_v3_read_ap1rn(int n)
+{
+ u32 val;
+
+ switch (n) {
+ case 0:
+ val = read_gicreg(ICH_AP1R0_EL2);
+ break;
+ case 1:
+ val = read_gicreg(ICH_AP1R1_EL2);
+ break;
+ case 2:
+ val = read_gicreg(ICH_AP1R2_EL2);
+ break;
+ case 3:
+ val = read_gicreg(ICH_AP1R3_EL2);
+ break;
+ default:
+ unreachable();
+ }
+
+ return val;
+}
+
void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
@@ -154,24 +240,27 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
switch (nr_pre_bits) {
case 7:
- cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2);
- cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2);
+ cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
+ cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
case 6:
- cpu_if->vgic_ap0r[1] = read_gicreg(ICH_AP0R1_EL2);
+ cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
default:
- cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2);
+ cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
}
switch (nr_pre_bits) {
case 7:
- cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2);
- cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2);
+ cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
+ cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
case 6:
- cpu_if->vgic_ap1r[1] = read_gicreg(ICH_AP1R1_EL2);
+ cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
default:
- cpu_if->vgic_ap1r[0] = read_gicreg(ICH_AP1R0_EL2);
+ cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
}
} else {
+ if (static_branch_unlikely(&vgic_v3_cpuif_trap))
+ write_gicreg(0, ICH_HCR_EL2);
+
cpu_if->vgic_elrsr = 0xffff;
cpu_if->vgic_ap0r[0] = 0;
cpu_if->vgic_ap0r[1] = 0;
@@ -224,26 +313,34 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
switch (nr_pre_bits) {
case 7:
- write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2);
- write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2);
+ __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3);
+ __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2);
case 6:
- write_gicreg(cpu_if->vgic_ap0r[1], ICH_AP0R1_EL2);
+ __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1);
default:
- write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2);
+ __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0);
}
switch (nr_pre_bits) {
case 7:
- write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2);
- write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
+ __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3);
+ __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2);
case 6:
- write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2);
+ __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1);
default:
- write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2);
+ __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0);
}
for (i = 0; i < used_lrs; i++)
__gic_v3_set_lr(cpu_if->vgic_lr[i], i);
+ } else {
+ /*
+ * If we need to trap system registers, we must write
+ * ICH_HCR_EL2 anyway, even if no interrupts are being
+ * injected,
+ */
+ if (static_branch_unlikely(&vgic_v3_cpuif_trap))
+ write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
}
/*
@@ -287,3 +384,697 @@ void __hyp_text __vgic_v3_write_vmcr(u32 vmcr)
{
write_gicreg(vmcr, ICH_VMCR_EL2);
}
+
+#ifdef CONFIG_ARM64
+
+static int __hyp_text __vgic_v3_bpr_min(void)
+{
+ /* See Pseudocode for VPriorityGroup */
+ return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2));
+}
+
+static int __hyp_text __vgic_v3_get_group(struct kvm_vcpu *vcpu)
+{
+ u32 esr = kvm_vcpu_get_hsr(vcpu);
+ u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
+
+ return crm != 8;
+}
+
+#define GICv3_IDLE_PRIORITY 0xff
+
+static int __hyp_text __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu,
+ u32 vmcr,
+ u64 *lr_val)
+{
+ unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
+ u8 priority = GICv3_IDLE_PRIORITY;
+ int i, lr = -1;
+
+ for (i = 0; i < used_lrs; i++) {
+ u64 val = __gic_v3_get_lr(i);
+ u8 lr_prio = (val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
+
+ /* Not pending in the state? */
+ if ((val & ICH_LR_STATE) != ICH_LR_PENDING_BIT)
+ continue;
+
+ /* Group-0 interrupt, but Group-0 disabled? */
+ if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK))
+ continue;
+
+ /* Group-1 interrupt, but Group-1 disabled? */
+ if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK))
+ continue;
+
+ /* Not the highest priority? */
+ if (lr_prio >= priority)
+ continue;
+
+ /* This is a candidate */
+ priority = lr_prio;
+ *lr_val = val;
+ lr = i;
+ }
+
+ if (lr == -1)
+ *lr_val = ICC_IAR1_EL1_SPURIOUS;
+
+ return lr;
+}
+
+static int __hyp_text __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu,
+ int intid, u64 *lr_val)
+{
+ unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
+ int i;
+
+ for (i = 0; i < used_lrs; i++) {
+ u64 val = __gic_v3_get_lr(i);
+
+ if ((val & ICH_LR_VIRTUAL_ID_MASK) == intid &&
+ (val & ICH_LR_ACTIVE_BIT)) {
+ *lr_val = val;
+ return i;
+ }
+ }
+
+ *lr_val = ICC_IAR1_EL1_SPURIOUS;
+ return -1;
+}
+
+static int __hyp_text __vgic_v3_get_highest_active_priority(void)
+{
+ u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
+ u32 hap = 0;
+ int i;
+
+ for (i = 0; i < nr_apr_regs; i++) {
+ u32 val;
+
+ /*
+ * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers
+ * contain the active priority levels for this VCPU
+ * for the maximum number of supported priority
+ * levels, and we return the full priority level only
+ * if the BPR is programmed to its minimum, otherwise
+ * we return a combination of the priority level and
+ * subpriority, as determined by the setting of the
+ * BPR, but without the full subpriority.
+ */
+ val = __vgic_v3_read_ap0rn(i);
+ val |= __vgic_v3_read_ap1rn(i);
+ if (!val) {
+ hap += 32;
+ continue;
+ }
+
+ return (hap + __ffs(val)) << __vgic_v3_bpr_min();
+ }
+
+ return GICv3_IDLE_PRIORITY;
+}
+
+static unsigned int __hyp_text __vgic_v3_get_bpr0(u32 vmcr)
+{
+ return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
+}
+
+static unsigned int __hyp_text __vgic_v3_get_bpr1(u32 vmcr)
+{
+ unsigned int bpr;
+
+ if (vmcr & ICH_VMCR_CBPR_MASK) {
+ bpr = __vgic_v3_get_bpr0(vmcr);
+ if (bpr < 7)
+ bpr++;
+ } else {
+ bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
+ }
+
+ return bpr;
+}
+
+/*
+ * Convert a priority to a preemption level, taking the relevant BPR
+ * into account by zeroing the sub-priority bits.
+ */
+static u8 __hyp_text __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp)
+{
+ unsigned int bpr;
+
+ if (!grp)
+ bpr = __vgic_v3_get_bpr0(vmcr) + 1;
+ else
+ bpr = __vgic_v3_get_bpr1(vmcr);
+
+ return pri & (GENMASK(7, 0) << bpr);
+}
+
+/*
+ * The priority value is independent of any of the BPR values, so we
+ * normalize it using the minumal BPR value. This guarantees that no
+ * matter what the guest does with its BPR, we can always set/get the
+ * same value of a priority.
+ */
+static void __hyp_text __vgic_v3_set_active_priority(u8 pri, u32 vmcr, int grp)
+{
+ u8 pre, ap;
+ u32 val;
+ int apr;
+
+ pre = __vgic_v3_pri_to_pre(pri, vmcr, grp);
+ ap = pre >> __vgic_v3_bpr_min();
+ apr = ap / 32;
+
+ if (!grp) {
+ val = __vgic_v3_read_ap0rn(apr);
+ __vgic_v3_write_ap0rn(val | BIT(ap % 32), apr);
+ } else {
+ val = __vgic_v3_read_ap1rn(apr);
+ __vgic_v3_write_ap1rn(val | BIT(ap % 32), apr);
+ }
+}
+
+static int __hyp_text __vgic_v3_clear_highest_active_priority(void)
+{
+ u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
+ u32 hap = 0;
+ int i;
+
+ for (i = 0; i < nr_apr_regs; i++) {
+ u32 ap0, ap1;
+ int c0, c1;
+
+ ap0 = __vgic_v3_read_ap0rn(i);
+ ap1 = __vgic_v3_read_ap1rn(i);
+ if (!ap0 && !ap1) {
+ hap += 32;
+ continue;
+ }
+
+ c0 = ap0 ? __ffs(ap0) : 32;
+ c1 = ap1 ? __ffs(ap1) : 32;
+
+ /* Always clear the LSB, which is the highest priority */
+ if (c0 < c1) {
+ ap0 &= ~BIT(c0);
+ __vgic_v3_write_ap0rn(ap0, i);
+ hap += c0;
+ } else {
+ ap1 &= ~BIT(c1);
+ __vgic_v3_write_ap1rn(ap1, i);
+ hap += c1;
+ }
+
+ /* Rescale to 8 bits of priority */
+ return hap << __vgic_v3_bpr_min();
+ }
+
+ return GICv3_IDLE_PRIORITY;
+}
+
+static void __hyp_text __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
+{
+ u64 lr_val;
+ u8 lr_prio, pmr;
+ int lr, grp;
+
+ grp = __vgic_v3_get_group(vcpu);
+
+ lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
+ if (lr < 0)
+ goto spurious;
+
+ if (grp != !!(lr_val & ICH_LR_GROUP))
+ goto spurious;
+
+ pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
+ lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
+ if (pmr <= lr_prio)
+ goto spurious;
+
+ if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio, vmcr, grp))
+ goto spurious;
+
+ lr_val &= ~ICH_LR_STATE;
+ /* No active state for LPIs */
+ if ((lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI)
+ lr_val |= ICH_LR_ACTIVE_BIT;
+ __gic_v3_set_lr(lr_val, lr);
+ __vgic_v3_set_active_priority(lr_prio, vmcr, grp);
+ vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
+ return;
+
+spurious:
+ vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS);
+}
+
+static void __hyp_text __vgic_v3_clear_active_lr(int lr, u64 lr_val)
+{
+ lr_val &= ~ICH_LR_ACTIVE_BIT;
+ if (lr_val & ICH_LR_HW) {
+ u32 pid;
+
+ pid = (lr_val & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT;
+ gic_write_dir(pid);
+ }
+
+ __gic_v3_set_lr(lr_val, lr);
+}
+
+static void __hyp_text __vgic_v3_bump_eoicount(void)
+{
+ u32 hcr;
+
+ hcr = read_gicreg(ICH_HCR_EL2);
+ hcr += 1 << ICH_HCR_EOIcount_SHIFT;
+ write_gicreg(hcr, ICH_HCR_EL2);
+}
+
+static void __hyp_text __vgic_v3_write_dir(struct kvm_vcpu *vcpu,
+ u32 vmcr, int rt)
+{
+ u32 vid = vcpu_get_reg(vcpu, rt);
+ u64 lr_val;
+ int lr;
+
+ /* EOImode == 0, nothing to be done here */
+ if (!(vmcr & ICH_VMCR_EOIM_MASK))
+ return;
+
+ /* No deactivate to be performed on an LPI */
+ if (vid >= VGIC_MIN_LPI)
+ return;
+
+ lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
+ if (lr == -1) {
+ __vgic_v3_bump_eoicount();
+ return;
+ }
+
+ __vgic_v3_clear_active_lr(lr, lr_val);
+}
+
+static void __hyp_text __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
+{
+ u32 vid = vcpu_get_reg(vcpu, rt);
+ u64 lr_val;
+ u8 lr_prio, act_prio;
+ int lr, grp;
+
+ grp = __vgic_v3_get_group(vcpu);
+
+ /* Drop priority in any case */
+ act_prio = __vgic_v3_clear_highest_active_priority();
+
+ /* If EOIing an LPI, no deactivate to be performed */
+ if (vid >= VGIC_MIN_LPI)
+ return;
+
+ /* EOImode == 1, nothing to be done here */
+ if (vmcr & ICH_VMCR_EOIM_MASK)
+ return;
+
+ lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
+ if (lr == -1) {
+ __vgic_v3_bump_eoicount();
+ return;
+ }
+
+ lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
+
+ /* If priorities or group do not match, the guest has fscked-up. */
+ if (grp != !!(lr_val & ICH_LR_GROUP) ||
+ __vgic_v3_pri_to_pre(lr_prio, vmcr, grp) != act_prio)
+ return;
+
+ /* Let's now perform the deactivation */
+ __vgic_v3_clear_active_lr(lr, lr_val);
+}
+
+static void __hyp_text __vgic_v3_read_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
+{
+ vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG0_MASK));
+}
+
+static void __hyp_text __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
+{
+ vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK));
+}
+
+static void __hyp_text __vgic_v3_write_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
+{
+ u64 val = vcpu_get_reg(vcpu, rt);
+
+ if (val & 1)
+ vmcr |= ICH_VMCR_ENG0_MASK;
+ else
+ vmcr &= ~ICH_VMCR_ENG0_MASK;
+
+ __vgic_v3_write_vmcr(vmcr);
+}
+
+static void __hyp_text __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
+{
+ u64 val = vcpu_get_reg(vcpu, rt);
+
+ if (val & 1)
+ vmcr |= ICH_VMCR_ENG1_MASK;
+ else
+ vmcr &= ~ICH_VMCR_ENG1_MASK;
+
+ __vgic_v3_write_vmcr(vmcr);
+}
+
+static void __hyp_text __vgic_v3_read_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
+{
+ vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr0(vmcr));
+}
+
+static void __hyp_text __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
+{
+ vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr));
+}
+
+static void __hyp_text __vgic_v3_write_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
+{
+ u64 val = vcpu_get_reg(vcpu, rt);
+ u8 bpr_min = __vgic_v3_bpr_min() - 1;
+
+ /* Enforce BPR limiting */
+ if (val < bpr_min)
+ val = bpr_min;
+
+ val <<= ICH_VMCR_BPR0_SHIFT;
+ val &= ICH_VMCR_BPR0_MASK;
+ vmcr &= ~ICH_VMCR_BPR0_MASK;
+ vmcr |= val;
+
+ __vgic_v3_write_vmcr(vmcr);
+}
+
+static void __hyp_text __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
+{
+ u64 val = vcpu_get_reg(vcpu, rt);
+ u8 bpr_min = __vgic_v3_bpr_min();
+
+ if (vmcr & ICH_VMCR_CBPR_MASK)
+ return;
+
+ /* Enforce BPR limiting */
+ if (val < bpr_min)
+ val = bpr_min;
+
+ val <<= ICH_VMCR_BPR1_SHIFT;
+ val &= ICH_VMCR_BPR1_MASK;
+ vmcr &= ~ICH_VMCR_BPR1_MASK;
+ vmcr |= val;
+
+ __vgic_v3_write_vmcr(vmcr);
+}
+
+static void __hyp_text __vgic_v3_read_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
+{
+ u32 val;
+
+ if (!__vgic_v3_get_group(vcpu))
+ val = __vgic_v3_read_ap0rn(n);
+ else
+ val = __vgic_v3_read_ap1rn(n);
+
+ vcpu_set_reg(vcpu, rt, val);
+}
+
+static void __hyp_text __vgic_v3_write_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
+{
+ u32 val = vcpu_get_reg(vcpu, rt);
+
+ if (!__vgic_v3_get_group(vcpu))
+ __vgic_v3_write_ap0rn(val, n);
+ else
+ __vgic_v3_write_ap1rn(val, n);
+}
+
+static void __hyp_text __vgic_v3_read_apxr0(struct kvm_vcpu *vcpu,
+ u32 vmcr, int rt)
+{
+ __vgic_v3_read_apxrn(vcpu, rt, 0);
+}
+
+static void __hyp_text __vgic_v3_read_apxr1(struct kvm_vcpu *vcpu,
+ u32 vmcr, int rt)
+{
+ __vgic_v3_read_apxrn(vcpu, rt, 1);
+}
+
+static void __hyp_text __vgic_v3_read_apxr2(struct kvm_vcpu *vcpu,
+ u32 vmcr, int rt)
+{
+ __vgic_v3_read_apxrn(vcpu, rt, 2);
+}
+
+static void __hyp_text __vgic_v3_read_apxr3(struct kvm_vcpu *vcpu,
+ u32 vmcr, int rt)
+{
+ __vgic_v3_read_apxrn(vcpu, rt, 3);
+}
+
+static void __hyp_text __vgic_v3_write_apxr0(struct kvm_vcpu *vcpu,
+ u32 vmcr, int rt)
+{
+ __vgic_v3_write_apxrn(vcpu, rt, 0);
+}
+
+static void __hyp_text __vgic_v3_write_apxr1(struct kvm_vcpu *vcpu,
+ u32 vmcr, int rt)
+{
+ __vgic_v3_write_apxrn(vcpu, rt, 1);
+}
+
+static void __hyp_text __vgic_v3_write_apxr2(struct kvm_vcpu *vcpu,
+ u32 vmcr, int rt)
+{
+ __vgic_v3_write_apxrn(vcpu, rt, 2);
+}
+
+static void __hyp_text __vgic_v3_write_apxr3(struct kvm_vcpu *vcpu,
+ u32 vmcr, int rt)
+{
+ __vgic_v3_write_apxrn(vcpu, rt, 3);
+}
+
+static void __hyp_text __vgic_v3_read_hppir(struct kvm_vcpu *vcpu,
+ u32 vmcr, int rt)
+{
+ u64 lr_val;
+ int lr, lr_grp, grp;
+
+ grp = __vgic_v3_get_group(vcpu);
+
+ lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
+ if (lr == -1)
+ goto spurious;
+
+ lr_grp = !!(lr_val & ICH_LR_GROUP);
+ if (lr_grp != grp)
+ lr_val = ICC_IAR1_EL1_SPURIOUS;
+
+spurious:
+ vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
+}
+
+static void __hyp_text __vgic_v3_read_pmr(struct kvm_vcpu *vcpu,
+ u32 vmcr, int rt)
+{
+ vmcr &= ICH_VMCR_PMR_MASK;
+ vmcr >>= ICH_VMCR_PMR_SHIFT;
+ vcpu_set_reg(vcpu, rt, vmcr);
+}
+
+static void __hyp_text __vgic_v3_write_pmr(struct kvm_vcpu *vcpu,
+ u32 vmcr, int rt)
+{
+ u32 val = vcpu_get_reg(vcpu, rt);
+
+ val <<= ICH_VMCR_PMR_SHIFT;
+ val &= ICH_VMCR_PMR_MASK;
+ vmcr &= ~ICH_VMCR_PMR_MASK;
+ vmcr |= val;
+
+ write_gicreg(vmcr, ICH_VMCR_EL2);
+}
+
+static void __hyp_text __vgic_v3_read_rpr(struct kvm_vcpu *vcpu,
+ u32 vmcr, int rt)
+{
+ u32 val = __vgic_v3_get_highest_active_priority();
+ vcpu_set_reg(vcpu, rt, val);
+}
+
+static void __hyp_text __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu,
+ u32 vmcr, int rt)
+{
+ u32 vtr, val;
+
+ vtr = read_gicreg(ICH_VTR_EL2);
+ /* PRIbits */
+ val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
+ /* IDbits */
+ val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT;
+ /* SEIS */
+ val |= ((vtr >> 22) & 1) << ICC_CTLR_EL1_SEIS_SHIFT;
+ /* A3V */
+ val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
+ /* EOImode */
+ val |= ((vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT) << ICC_CTLR_EL1_EOImode_SHIFT;
+ /* CBPR */
+ val |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
+
+ vcpu_set_reg(vcpu, rt, val);
+}
+
+static void __hyp_text __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu,
+ u32 vmcr, int rt)
+{
+ u32 val = vcpu_get_reg(vcpu, rt);
+
+ if (val & ICC_CTLR_EL1_CBPR_MASK)
+ vmcr |= ICH_VMCR_CBPR_MASK;
+ else
+ vmcr &= ~ICH_VMCR_CBPR_MASK;
+
+ if (val & ICC_CTLR_EL1_EOImode_MASK)
+ vmcr |= ICH_VMCR_EOIM_MASK;
+ else
+ vmcr &= ~ICH_VMCR_EOIM_MASK;
+
+ write_gicreg(vmcr, ICH_VMCR_EL2);
+}
+
+int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
+{
+ int rt;
+ u32 esr;
+ u32 vmcr;
+ void (*fn)(struct kvm_vcpu *, u32, int);
+ bool is_read;
+ u32 sysreg;
+
+ esr = kvm_vcpu_get_hsr(vcpu);
+ if (vcpu_mode_is_32bit(vcpu)) {
+ if (!kvm_condition_valid(vcpu))
+ return 1;
+
+ sysreg = esr_cp15_to_sysreg(esr);
+ } else {
+ sysreg = esr_sys64_to_sysreg(esr);
+ }
+
+ is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
+
+ switch (sysreg) {
+ case SYS_ICC_IAR0_EL1:
+ case SYS_ICC_IAR1_EL1:
+ if (unlikely(!is_read))
+ return 0;
+ fn = __vgic_v3_read_iar;
+ break;
+ case SYS_ICC_EOIR0_EL1:
+ case SYS_ICC_EOIR1_EL1:
+ if (unlikely(is_read))
+ return 0;
+ fn = __vgic_v3_write_eoir;
+ break;
+ case SYS_ICC_IGRPEN1_EL1:
+ if (is_read)
+ fn = __vgic_v3_read_igrpen1;
+ else
+ fn = __vgic_v3_write_igrpen1;
+ break;
+ case SYS_ICC_BPR1_EL1:
+ if (is_read)
+ fn = __vgic_v3_read_bpr1;
+ else
+ fn = __vgic_v3_write_bpr1;
+ break;
+ case SYS_ICC_AP0Rn_EL1(0):
+ case SYS_ICC_AP1Rn_EL1(0):
+ if (is_read)
+ fn = __vgic_v3_read_apxr0;
+ else
+ fn = __vgic_v3_write_apxr0;
+ break;
+ case SYS_ICC_AP0Rn_EL1(1):
+ case SYS_ICC_AP1Rn_EL1(1):
+ if (is_read)
+ fn = __vgic_v3_read_apxr1;
+ else
+ fn = __vgic_v3_write_apxr1;
+ break;
+ case SYS_ICC_AP0Rn_EL1(2):
+ case SYS_ICC_AP1Rn_EL1(2):
+ if (is_read)
+ fn = __vgic_v3_read_apxr2;
+ else
+ fn = __vgic_v3_write_apxr2;
+ break;
+ case SYS_ICC_AP0Rn_EL1(3):
+ case SYS_ICC_AP1Rn_EL1(3):
+ if (is_read)
+ fn = __vgic_v3_read_apxr3;
+ else
+ fn = __vgic_v3_write_apxr3;
+ break;
+ case SYS_ICC_HPPIR0_EL1:
+ case SYS_ICC_HPPIR1_EL1:
+ if (unlikely(!is_read))
+ return 0;
+ fn = __vgic_v3_read_hppir;
+ break;
+ case SYS_ICC_IGRPEN0_EL1:
+ if (is_read)
+ fn = __vgic_v3_read_igrpen0;
+ else
+ fn = __vgic_v3_write_igrpen0;
+ break;
+ case SYS_ICC_BPR0_EL1:
+ if (is_read)
+ fn = __vgic_v3_read_bpr0;
+ else
+ fn = __vgic_v3_write_bpr0;
+ break;
+ case SYS_ICC_DIR_EL1:
+ if (unlikely(is_read))
+ return 0;
+ fn = __vgic_v3_write_dir;
+ break;
+ case SYS_ICC_RPR_EL1:
+ if (unlikely(!is_read))
+ return 0;
+ fn = __vgic_v3_read_rpr;
+ break;
+ case SYS_ICC_CTLR_EL1:
+ if (is_read)
+ fn = __vgic_v3_read_ctlr;
+ else
+ fn = __vgic_v3_write_ctlr;
+ break;
+ case SYS_ICC_PMR_EL1:
+ if (is_read)
+ fn = __vgic_v3_read_pmr;
+ else
+ fn = __vgic_v3_write_pmr;
+ break;
+ default:
+ return 0;
+ }
+
+ vmcr = __vgic_v3_read_vmcr();
+ rt = kvm_vcpu_sys_get_rt(vcpu);
+ fn(vcpu, vmcr, rt);
+
+ return 1;
+}
+
+#endif
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 1c44aa35f909..0e1fc75f3585 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -20,6 +20,7 @@
#include <linux/kvm_host.h>
#include <linux/io.h>
#include <linux/hugetlb.h>
+#include <linux/sched/signal.h>
#include <trace/events/kvm.h>
#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
@@ -1262,6 +1263,24 @@ static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
__coherent_cache_guest_page(vcpu, pfn, size);
}
+static void kvm_send_hwpoison_signal(unsigned long address,
+ struct vm_area_struct *vma)
+{
+ siginfo_t info;
+
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_MCEERR_AR;
+ info.si_addr = (void __user *)address;
+
+ if (is_vm_hugetlb_page(vma))
+ info.si_addr_lsb = huge_page_shift(hstate_vma(vma));
+ else
+ info.si_addr_lsb = PAGE_SHIFT;
+
+ send_sig_info(SIGBUS, &info, current);
+}
+
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_memory_slot *memslot, unsigned long hva,
unsigned long fault_status)
@@ -1331,6 +1350,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
smp_rmb();
pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
+ if (pfn == KVM_PFN_ERR_HWPOISON) {
+ kvm_send_hwpoison_signal(hva, vma);
+ return 0;
+ }
if (is_error_noslot_pfn(pfn))
return -EFAULT;
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index 4b43e7f3b158..fc8a723ff387 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -203,6 +203,24 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
return reg;
}
+static void kvm_pmu_check_overflow(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ bool overflow = !!kvm_pmu_overflow_status(vcpu);
+
+ if (pmu->irq_level == overflow)
+ return;
+
+ pmu->irq_level = overflow;
+
+ if (likely(irqchip_in_kernel(vcpu->kvm))) {
+ int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
+ pmu->irq_num, overflow,
+ &vcpu->arch.pmu);
+ WARN_ON(ret);
+ }
+}
+
/**
* kvm_pmu_overflow_set - set PMU overflow interrupt
* @vcpu: The vcpu pointer
@@ -210,37 +228,18 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
*/
void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val)
{
- u64 reg;
-
if (val == 0)
return;
vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= val;
- reg = kvm_pmu_overflow_status(vcpu);
- if (reg != 0)
- kvm_vcpu_kick(vcpu);
+ kvm_pmu_check_overflow(vcpu);
}
static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
{
- struct kvm_pmu *pmu = &vcpu->arch.pmu;
- bool overflow;
-
if (!kvm_arm_pmu_v3_ready(vcpu))
return;
-
- overflow = !!kvm_pmu_overflow_status(vcpu);
- if (pmu->irq_level == overflow)
- return;
-
- pmu->irq_level = overflow;
-
- if (likely(irqchip_in_kernel(vcpu->kvm))) {
- int ret;
- ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
- pmu->irq_num, overflow);
- WARN_ON(ret);
- }
+ kvm_pmu_check_overflow(vcpu);
}
bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
@@ -451,34 +450,74 @@ bool kvm_arm_support_pmu_v3(void)
return (perf_num_counters() > 0);
}
-static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
+int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
{
- if (!kvm_arm_support_pmu_v3())
- return -ENODEV;
+ if (!vcpu->arch.pmu.created)
+ return 0;
/*
- * We currently require an in-kernel VGIC to use the PMU emulation,
- * because we do not support forwarding PMU overflow interrupts to
- * userspace yet.
+ * A valid interrupt configuration for the PMU is either to have a
+ * properly configured interrupt number and using an in-kernel
+ * irqchip, or to not have an in-kernel GIC and not set an IRQ.
*/
- if (!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm))
+ if (irqchip_in_kernel(vcpu->kvm)) {
+ int irq = vcpu->arch.pmu.irq_num;
+ if (!kvm_arm_pmu_irq_initialized(vcpu))
+ return -EINVAL;
+
+ /*
+ * If we are using an in-kernel vgic, at this point we know
+ * the vgic will be initialized, so we can check the PMU irq
+ * number against the dimensions of the vgic and make sure
+ * it's valid.
+ */
+ if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
+ return -EINVAL;
+ } else if (kvm_arm_pmu_irq_initialized(vcpu)) {
+ return -EINVAL;
+ }
+
+ kvm_pmu_vcpu_reset(vcpu);
+ vcpu->arch.pmu.ready = true;
+
+ return 0;
+}
+
+static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
+{
+ if (!kvm_arm_support_pmu_v3())
return -ENODEV;
- if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features) ||
- !kvm_arm_pmu_irq_initialized(vcpu))
+ if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
return -ENXIO;
- if (kvm_arm_pmu_v3_ready(vcpu))
+ if (vcpu->arch.pmu.created)
return -EBUSY;
- kvm_pmu_vcpu_reset(vcpu);
- vcpu->arch.pmu.ready = true;
+ if (irqchip_in_kernel(vcpu->kvm)) {
+ int ret;
+
+ /*
+ * If using the PMU with an in-kernel virtual GIC
+ * implementation, we require the GIC to be already
+ * initialized when initializing the PMU.
+ */
+ if (!vgic_initialized(vcpu->kvm))
+ return -ENODEV;
+
+ if (!kvm_arm_pmu_irq_initialized(vcpu))
+ return -ENXIO;
+ ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
+ &vcpu->arch.pmu);
+ if (ret)
+ return ret;
+ }
+
+ vcpu->arch.pmu.created = true;
return 0;
}
-#define irq_is_ppi(irq) ((irq) >= VGIC_NR_SGIS && (irq) < VGIC_NR_PRIVATE_IRQS)
-
/*
* For one VM the interrupt type must be same for each vcpu.
* As a PPI, the interrupt number is the same for all vcpus,
@@ -512,6 +551,9 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
int __user *uaddr = (int __user *)(long)attr->addr;
int irq;
+ if (!irqchip_in_kernel(vcpu->kvm))
+ return -EINVAL;
+
if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
return -ENODEV;
@@ -519,7 +561,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
return -EFAULT;
/* The PMU overflow interrupt can be a PPI or a valid SPI. */
- if (!(irq_is_ppi(irq) || vgic_valid_spi(vcpu->kvm, irq)))
+ if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
return -EINVAL;
if (!pmu_irq_is_valid(vcpu->kvm, irq))
@@ -546,6 +588,9 @@ int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
int __user *uaddr = (int __user *)(long)attr->addr;
int irq;
+ if (!irqchip_in_kernel(vcpu->kvm))
+ return -EINVAL;
+
if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
return -ENODEV;
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
index a08d7a93aebb..f1e363bab5e8 100644
--- a/virt/kvm/arm/psci.c
+++ b/virt/kvm/arm/psci.c
@@ -57,6 +57,7 @@ static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
* for KVM will preserve the register state.
*/
kvm_vcpu_block(vcpu);
+ kvm_clear_request(KVM_REQ_UNHALT, vcpu);
return PSCI_RET_SUCCESS;
}
@@ -64,6 +65,8 @@ static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
{
vcpu->arch.power_off = true;
+ kvm_make_request(KVM_REQ_SLEEP, vcpu);
+ kvm_vcpu_kick(vcpu);
}
static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
@@ -178,10 +181,9 @@ static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
* after this call is handled and before the VCPUs have been
* re-initialized.
*/
- kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
+ kvm_for_each_vcpu(i, tmp, vcpu->kvm)
tmp->arch.power_off = true;
- kvm_vcpu_kick(tmp);
- }
+ kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
vcpu->run->system_event.type = type;
diff --git a/virt/kvm/arm/vgic/vgic-irqfd.c b/virt/kvm/arm/vgic/vgic-irqfd.c
index f138ed2e9c63..b7baf581611a 100644
--- a/virt/kvm/arm/vgic/vgic-irqfd.c
+++ b/virt/kvm/arm/vgic/vgic-irqfd.c
@@ -34,7 +34,7 @@ static int vgic_irqfd_set_irq(struct kvm_kernel_irq_routing_entry *e,
if (!vgic_valid_spi(kvm, spi_id))
return -EINVAL;
- return kvm_vgic_inject_irq(kvm, 0, spi_id, level);
+ return kvm_vgic_inject_irq(kvm, 0, spi_id, level, NULL);
}
/**
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index 63e0bbdcddcc..37522e65eb53 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -308,34 +308,36 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = {
vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12,
VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP,
- vgic_mmio_read_rao, vgic_mmio_write_wi, 1,
+ vgic_mmio_read_rao, vgic_mmio_write_wi, NULL, NULL, 1,
VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET,
- vgic_mmio_read_enable, vgic_mmio_write_senable, 1,
+ vgic_mmio_read_enable, vgic_mmio_write_senable, NULL, NULL, 1,
VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR,
- vgic_mmio_read_enable, vgic_mmio_write_cenable, 1,
+ vgic_mmio_read_enable, vgic_mmio_write_cenable, NULL, NULL, 1,
VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
- vgic_mmio_read_pending, vgic_mmio_write_spending, 1,
+ vgic_mmio_read_pending, vgic_mmio_write_spending, NULL, NULL, 1,
VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
- vgic_mmio_read_pending, vgic_mmio_write_cpending, 1,
+ vgic_mmio_read_pending, vgic_mmio_write_cpending, NULL, NULL, 1,
VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
- vgic_mmio_read_active, vgic_mmio_write_sactive, 1,
+ vgic_mmio_read_active, vgic_mmio_write_sactive,
+ NULL, vgic_mmio_uaccess_write_sactive, 1,
VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
- vgic_mmio_read_active, vgic_mmio_write_cactive, 1,
+ vgic_mmio_read_active, vgic_mmio_write_cactive,
+ NULL, vgic_mmio_uaccess_write_cactive, 1,
VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
- vgic_mmio_read_priority, vgic_mmio_write_priority, 8,
- VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
+ vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
+ 8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET,
- vgic_mmio_read_target, vgic_mmio_write_target, 8,
+ vgic_mmio_read_target, vgic_mmio_write_target, NULL, NULL, 8,
VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG,
- vgic_mmio_read_config, vgic_mmio_write_config, 2,
+ vgic_mmio_read_config, vgic_mmio_write_config, NULL, NULL, 2,
VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT,
vgic_mmio_read_raz, vgic_mmio_write_sgir, 4,
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index 201d5e2e973d..714fa3933546 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -456,11 +456,13 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = {
vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER,
- vgic_mmio_read_active, vgic_mmio_write_sactive, NULL, NULL, 1,
+ vgic_mmio_read_active, vgic_mmio_write_sactive,
+ NULL, vgic_mmio_uaccess_write_sactive, 1,
VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER,
- vgic_mmio_read_active, vgic_mmio_write_cactive, NULL, NULL, 1,
- VGIC_ACCESS_32bit),
+ vgic_mmio_read_active, vgic_mmio_write_cactive,
+ NULL, vgic_mmio_uaccess_write_cactive,
+ 1, VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR,
vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
@@ -526,12 +528,14 @@ static const struct vgic_register_region vgic_v3_sgibase_registers[] = {
vgic_mmio_read_pending, vgic_mmio_write_cpending,
vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
VGIC_ACCESS_32bit),
- REGISTER_DESC_WITH_LENGTH(GICR_ISACTIVER0,
- vgic_mmio_read_active, vgic_mmio_write_sactive, 4,
- VGIC_ACCESS_32bit),
- REGISTER_DESC_WITH_LENGTH(GICR_ICACTIVER0,
- vgic_mmio_read_active, vgic_mmio_write_cactive, 4,
- VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ISACTIVER0,
+ vgic_mmio_read_active, vgic_mmio_write_sactive,
+ NULL, vgic_mmio_uaccess_write_sactive,
+ 4, VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_ICACTIVER0,
+ vgic_mmio_read_active, vgic_mmio_write_cactive,
+ NULL, vgic_mmio_uaccess_write_cactive,
+ 4, VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GICR_IPRIORITYR0,
vgic_mmio_read_priority, vgic_mmio_write_priority, 32,
VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index 1c17b2a2f105..c1e4bdd66131 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -231,56 +231,94 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
* be migrated while we don't hold the IRQ locks and we don't want to be
* chasing moving targets.
*
- * For private interrupts, we only have to make sure the single and only VCPU
- * that can potentially queue the IRQ is stopped.
+ * For private interrupts we don't have to do anything because userspace
+ * accesses to the VGIC state already require all VCPUs to be stopped, and
+ * only the VCPU itself can modify its private interrupts active state, which
+ * guarantees that the VCPU is not running.
*/
static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
{
- if (intid < VGIC_NR_PRIVATE_IRQS)
- kvm_arm_halt_vcpu(vcpu);
- else
+ if (intid > VGIC_NR_PRIVATE_IRQS)
kvm_arm_halt_guest(vcpu->kvm);
}
/* See vgic_change_active_prepare */
static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
{
- if (intid < VGIC_NR_PRIVATE_IRQS)
- kvm_arm_resume_vcpu(vcpu);
- else
+ if (intid > VGIC_NR_PRIVATE_IRQS)
kvm_arm_resume_guest(vcpu->kvm);
}
-void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
- gpa_t addr, unsigned int len,
- unsigned long val)
+static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
int i;
- vgic_change_active_prepare(vcpu, intid);
for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vgic_mmio_change_active(vcpu, irq, false);
vgic_put_irq(vcpu->kvm, irq);
}
- vgic_change_active_finish(vcpu, intid);
}
-void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
+void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
- int i;
+ mutex_lock(&vcpu->kvm->lock);
vgic_change_active_prepare(vcpu, intid);
+
+ __vgic_mmio_write_cactive(vcpu, addr, len, val);
+
+ vgic_change_active_finish(vcpu, intid);
+ mutex_unlock(&vcpu->kvm->lock);
+}
+
+void vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ __vgic_mmio_write_cactive(vcpu, addr, len, val);
+}
+
+static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+ int i;
+
for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vgic_mmio_change_active(vcpu, irq, true);
vgic_put_irq(vcpu->kvm, irq);
}
+}
+
+void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+
+ mutex_lock(&vcpu->kvm->lock);
+ vgic_change_active_prepare(vcpu, intid);
+
+ __vgic_mmio_write_sactive(vcpu, addr, len, val);
+
vgic_change_active_finish(vcpu, intid);
+ mutex_unlock(&vcpu->kvm->lock);
+}
+
+void vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ __vgic_mmio_write_sactive(vcpu, addr, len, val);
}
unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h
index ea4171acdef3..5693f6df45ec 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.h
+++ b/virt/kvm/arm/vgic/vgic-mmio.h
@@ -75,7 +75,7 @@ extern struct kvm_io_device_ops kvm_io_gic_ops;
* The _WITH_LENGTH version instantiates registers with a fixed length
* and is mutually exclusive with the _PER_IRQ version.
*/
-#define REGISTER_DESC_WITH_BITS_PER_IRQ(off, rd, wr, bpi, acc) \
+#define REGISTER_DESC_WITH_BITS_PER_IRQ(off, rd, wr, ur, uw, bpi, acc) \
{ \
.reg_offset = off, \
.bits_per_irq = bpi, \
@@ -83,6 +83,8 @@ extern struct kvm_io_device_ops kvm_io_gic_ops;
.access_flags = acc, \
.read = rd, \
.write = wr, \
+ .uaccess_read = ur, \
+ .uaccess_write = uw, \
}
#define REGISTER_DESC_WITH_LENGTH(off, rd, wr, length, acc) \
@@ -165,6 +167,14 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val);
+void vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val);
+
+void vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val);
+
unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len);
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 030248e669f6..96ea597db0e7 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -21,6 +21,10 @@
#include "vgic.h"
+static bool group0_trap;
+static bool group1_trap;
+static bool common_trap;
+
void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
@@ -258,6 +262,12 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu)
/* Get the show on the road... */
vgic_v3->vgic_hcr = ICH_HCR_EN;
+ if (group0_trap)
+ vgic_v3->vgic_hcr |= ICH_HCR_TALL0;
+ if (group1_trap)
+ vgic_v3->vgic_hcr |= ICH_HCR_TALL1;
+ if (common_trap)
+ vgic_v3->vgic_hcr |= ICH_HCR_TC;
}
int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
@@ -429,6 +439,26 @@ out:
return ret;
}
+DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap);
+
+static int __init early_group0_trap_cfg(char *buf)
+{
+ return strtobool(buf, &group0_trap);
+}
+early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg);
+
+static int __init early_group1_trap_cfg(char *buf)
+{
+ return strtobool(buf, &group1_trap);
+}
+early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg);
+
+static int __init early_common_trap_cfg(char *buf)
+{
+ return strtobool(buf, &common_trap);
+}
+early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg);
+
/**
* vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
* @node: pointer to the DT node
@@ -480,6 +510,21 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
if (kvm_vgic_global_state.vcpu_base == 0)
kvm_info("disabling GICv2 emulation\n");
+#ifdef CONFIG_ARM64
+ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
+ group0_trap = true;
+ group1_trap = true;
+ }
+#endif
+
+ if (group0_trap || group1_trap || common_trap) {
+ kvm_info("GICv3 sysreg trapping enabled ([%s%s%s], reduced performance)\n",
+ group0_trap ? "G0" : "",
+ group1_trap ? "G1" : "",
+ common_trap ? "C" : "");
+ static_branch_enable(&vgic_v3_cpuif_trap);
+ }
+
kvm_vgic_global_state.vctrl_base = NULL;
kvm_vgic_global_state.type = VGIC_V3;
kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS;
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 83b24d20ff8f..fed717e07938 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -35,11 +35,12 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
/*
* Locking order is always:
- * its->cmd_lock (mutex)
- * its->its_lock (mutex)
- * vgic_cpu->ap_list_lock
- * kvm->lpi_list_lock
- * vgic_irq->irq_lock
+ * kvm->lock (mutex)
+ * its->cmd_lock (mutex)
+ * its->its_lock (mutex)
+ * vgic_cpu->ap_list_lock
+ * kvm->lpi_list_lock
+ * vgic_irq->irq_lock
*
* If you need to take multiple locks, always take the upper lock first,
* then the lower ones, e.g. first take the its_lock, then the irq_lock.
@@ -234,10 +235,14 @@ static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
/*
* Only valid injection if changing level for level-triggered IRQs or for a
- * rising edge.
+ * rising edge, and in-kernel connected IRQ lines can only be controlled by
+ * their owner.
*/
-static bool vgic_validate_injection(struct vgic_irq *irq, bool level)
+static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner)
{
+ if (irq->owner != owner)
+ return false;
+
switch (irq->config) {
case VGIC_CONFIG_LEVEL:
return irq->line_level != level;
@@ -285,8 +290,10 @@ retry:
* won't see this one until it exits for some other
* reason.
*/
- if (vcpu)
+ if (vcpu) {
+ kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu);
+ }
return false;
}
@@ -332,6 +339,7 @@ retry:
spin_unlock(&irq->irq_lock);
spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
+ kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu);
return true;
@@ -346,13 +354,16 @@ retry:
* false: to ignore the call
* Level-sensitive true: raise the input signal
* false: lower the input signal
+ * @owner: The opaque pointer to the owner of the IRQ being raised to verify
+ * that the caller is allowed to inject this IRQ. Userspace
+ * injections will have owner == NULL.
*
* The VGIC is not concerned with devices being active-LOW or active-HIGH for
* level-sensitive interrupts. You can think of the level parameter as 1
* being HIGH and 0 being LOW and all devices being active-HIGH.
*/
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
- bool level)
+ bool level, void *owner)
{
struct kvm_vcpu *vcpu;
struct vgic_irq *irq;
@@ -374,7 +385,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
spin_lock(&irq->irq_lock);
- if (!vgic_validate_injection(irq, level)) {
+ if (!vgic_validate_injection(irq, level, owner)) {
/* Nothing to see here, move along... */
spin_unlock(&irq->irq_lock);
vgic_put_irq(kvm, irq);
@@ -431,6 +442,39 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
}
/**
+ * kvm_vgic_set_owner - Set the owner of an interrupt for a VM
+ *
+ * @vcpu: Pointer to the VCPU (used for PPIs)
+ * @intid: The virtual INTID identifying the interrupt (PPI or SPI)
+ * @owner: Opaque pointer to the owner
+ *
+ * Returns 0 if intid is not already used by another in-kernel device and the
+ * owner is set, otherwise returns an error code.
+ */
+int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
+{
+ struct vgic_irq *irq;
+ int ret = 0;
+
+ if (!vgic_initialized(vcpu->kvm))
+ return -EAGAIN;
+
+ /* SGIs and LPIs cannot be wired up to any device */
+ if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid))
+ return -EINVAL;
+
+ irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
+ spin_lock(&irq->irq_lock);
+ if (irq->owner && irq->owner != owner)
+ ret = -EEXIST;
+ else
+ irq->owner = owner;
+ spin_unlock(&irq->irq_lock);
+
+ return ret;
+}
+
+/**
* vgic_prune_ap_list - Remove non-relevant interrupts from the list
*
* @vcpu: The VCPU pointer
@@ -721,8 +765,10 @@ void vgic_kick_vcpus(struct kvm *kvm)
* a good kick...
*/
kvm_for_each_vcpu(c, vcpu, kvm) {
- if (kvm_vgic_vcpu_pending_irq(vcpu))
+ if (kvm_vgic_vcpu_pending_irq(vcpu)) {
+ kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu);
+ }
}
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f0fe9d02f6bb..19f0ecb9b93e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -73,17 +73,17 @@ MODULE_LICENSE("GPL");
/* Architectures should define their poll value according to the halt latency */
unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
-module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR);
+module_param(halt_poll_ns, uint, 0644);
EXPORT_SYMBOL_GPL(halt_poll_ns);
/* Default doubles per-vcpu halt_poll_ns. */
unsigned int halt_poll_ns_grow = 2;
-module_param(halt_poll_ns_grow, uint, S_IRUGO | S_IWUSR);
+module_param(halt_poll_ns_grow, uint, 0644);
EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
/* Default resets per-vcpu halt_poll_ns . */
unsigned int halt_poll_ns_shrink;
-module_param(halt_poll_ns_shrink, uint, S_IRUGO | S_IWUSR);
+module_param(halt_poll_ns_shrink, uint, 0644);
EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
/*
@@ -3191,6 +3191,12 @@ static int kvm_dev_ioctl_create_vm(unsigned long type)
return PTR_ERR(file);
}
+ /*
+ * Don't call kvm_put_kvm anymore at this point; file->f_op is
+ * already set, with ->release() being kvm_vm_release(). In error
+ * cases it will be called by the final fput(file) and will take
+ * care of doing kvm_put_kvm(kvm).
+ */
if (kvm_create_vm_debugfs(kvm, r) < 0) {
put_unused_fd(r);
fput(file);